diff --git a/cogrouping-streams/kstreams/.gitattributes b/cogrouping-streams/kstreams/.gitattributes
deleted file mode 100644
index 097f9f98..00000000
--- a/cogrouping-streams/kstreams/.gitattributes
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# https://help.github.com/articles/dealing-with-line-endings/
-#
-# Linux start script should use lf
-/gradlew text eol=lf
-
-# These are Windows script files and should use crlf
-*.bat text eol=crlf
-
diff --git a/cogrouping-streams/kstreams/gradle.properties b/cogrouping-streams/kstreams/gradle.properties
deleted file mode 100644
index 18f452c7..00000000
--- a/cogrouping-streams/kstreams/gradle.properties
+++ /dev/null
@@ -1,6 +0,0 @@
-# This file was generated by the Gradle 'init' task.
-# https://docs.gradle.org/current/userguide/build_environment.html#sec:gradle_configuration_properties
-
-org.gradle.parallel=true
-org.gradle.caching=true
-
diff --git a/column-difference/ksql/README.md b/column-difference/ksql/README.md
new file mode 100644
index 00000000..7132aa15
--- /dev/null
+++ b/column-difference/ksql/README.md
@@ -0,0 +1,175 @@
+# Column difference
+
+This tutorial demonstrates how to calculate the difference between two columns.
+
+## Setup
+
+The first thing we do is to create a stream named `PURCHASE_STREAM`
+
+```sql
+CREATE STREAM PURCHASE_STREAM (
+ ID VARCHAR,
+ PREVIOUS_PURCHASE DOUBLE,
+ CURRENT_PURCHASE DOUBLE,
+ TXN_TS VARCHAR,
+ FIRST_NAME VARCHAR,
+ LAST_NAME VARCHAR)
+
+ WITH (KAFKA_TOPIC='customer_purchases',
+ VALUE_FORMAT='JSON',
+ PARTITIONS=1);
+```
+
+## Calculate the difference between two columns
+
+Now create a query to determine the difference between two columns:
+
+```sql
+CREATE STREAM PURCHASE_HISTORY_STREAM AS
+ SELECT FIRST_NAME,
+ LAST_NAME,
+ CURRENT_PURCHASE - PREVIOUS_PURCHASE as PURCHASE_DIFF
+FROM PURCHASE_STREAM;
+```
+
+## Running the example
+
+ksqlDB CLI
+
+#### Prerequisites
+
+* Docker running via [Docker Desktop](https://docs.docker.com/desktop/) or [Docker Engine](https://docs.docker.com/engine/install/)
+* [Docker Compose](https://docs.docker.com/compose/install/). Ensure that the command `docker compose version` succeeds.
+
+#### Run the commands
+
+First, start ksqlDB and Kafka:
+
+ ```shell
+ docker compose -f ./docker/docker-compose-ksqldb.yml up -d
+ ```
+Next, open the ksqlDB CLI:
+
+ ```shell
+ docker exec -it ksqldb-cli ksql http://ksqldb-server:8088
+ ```
+
+Finally, run following SQL statements to create the `PURCHASE_STREAM` stream backed by Kafka running in Docker, populate it with
+test data, and run the query that calculates the column difference.
+
+```sql
+ CREATE STREAM PURCHASE_STREAM (
+ ID VARCHAR,
+ PREVIOUS_PURCHASE DOUBLE,
+ CURRENT_PURCHASE DOUBLE,
+ TXN_TS VARCHAR,
+ FIRST_NAME VARCHAR,
+ LAST_NAME VARCHAR)
+
+ WITH (KAFKA_TOPIC='customer_purchases',
+ VALUE_FORMAT='JSON',
+ PARTITIONS=1);
+```
+
+Before we get too far, let’s set the `auto.offset.reset` configuration parameter to earliest. This means all new ksqlDB queries will
+automatically compute their results from the beginning of a stream, rather than the end. This isn’t always what you’ll want to do in
+production, but it makes query results much easier to see in examples like this.
+
+`SET 'auto.offset.reset' = 'earliest';`
+
+And let's adjust the column width, so we can easily see the results:
+
+`SET CLI COLUMN-WIDTH 20`
+
+```sql
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('1', 8000.54, 5004.89, '2020-12-04 02:35:43', 'Art', 'Vandeley');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('2', 500.33, 1000.89, '2020-12-04 02:35:44', 'Nick', 'Fury');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('3', 333.18, 804.89, '2020-12-04 02:35:45', 'Natasha', 'Romanov');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('4', 72848.11, 60040.89, '2020-12-04 02:35:46', 'Wanda', 'Maximoff');
+```
+
+```sql
+CREATE STREAM PURCHASE_HISTORY_STREAM AS
+SELECT FIRST_NAME,
+ LAST_NAME,
+ CURRENT_PURCHASE - PREVIOUS_PURCHASE as PURCHASE_DIFF
+FROM PURCHASE_STREAM;
+```
+
+```sql
+ SELECT * from PURCHASE_HISTORY_STREAM;
+```
+
+The query output should look something like this:
+
+```plaintext
++--------------------+--------------------+--------------------+
+|FIRST_NAME |LAST_NAME |PURCHASE_DIFF |
++--------------------+--------------------+--------------------+
+|Art |Vandeley |-2995.6499999999996 |
+|Nick |Fury |500.56 |
+|Natasha |Romanov |471.71 |
+|Wanda |Maximoff |-12807.220000000001 |
+```
+When you are finished, clean up the containers used for this tutorial by running:
+
+ ```shell
+ docker compose -f ./docker/docker-compose-ksqldb.yml down -v
+ ```
+ Confluent Cloud
+
+#### Prerequisites
+
+* A [Confluent Cloud](https://confluent.cloud/signup) account
+* A ksqlDB cluster created in Confluent Cloud. Follow [this quick start](https://docs.confluent.io/cloud/current/get-started/index.html#section-2-add-ksql-cloud-to-the-cluster) to create one.
+
+#### Run the commands
+
+In the Confluent Cloud Console, navigate to your environment and then click the `ksqlDB` link from left-side menu. Then click on the
+name of ksqlDB cluster you created.
+
+Finally, run following SQL statements in the ksqlDB UI `Editor` tab to create the `PURCHASE_STREAM` stream, populate it with
+test data, and run the column difference query.
+
+```sql
+CREATE STREAM PURCHASE_STREAM (
+ ID VARCHAR,
+ PREVIOUS_PURCHASE DOUBLE,
+ CURRENT_PURCHASE DOUBLE,
+ TXN_TS VARCHAR,
+ FIRST_NAME VARCHAR,
+ LAST_NAME VARCHAR)
+
+ WITH (KAFKA_TOPIC='customer_purchases',
+ VALUE_FORMAT='JSON',
+ PARTITIONS=1);
+```
+
+
+```sql
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('1', 8000.54, 5004.89, '2020-12-04 02:35:43', 'Art', 'Vandeley');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('2', 500.33, 1000.89, '2020-12-04 02:35:44', 'Nick', 'Fury');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('3', 333.18, 804.89, '2020-12-04 02:35:45', 'Natasha', 'Romanov');
+INSERT INTO PURCHASE_STREAM (ID, PREVIOUS_PURCHASE, CURRENT_PURCHASE, TXN_TS, FIRST_NAME, LAST_NAME) VALUES ('4', 72848.11, 60040.89, '2020-12-04 02:35:46', 'Wanda', 'Maximoff');
+```
+
+```sql
+CREATE STREAM PURCHASE_HISTORY_STREAM AS
+SELECT FIRST_NAME,
+ LAST_NAME,
+ CURRENT_PURCHASE - PREVIOUS_PURCHASE as PURCHASE_DIFF
+FROM PURCHASE_STREAM;
+```
+
+```sql
+ SELECT * from PURCHASE_HISTORY_STREAM;
+```
+
+The query output should look like this:
+
+
+
+
{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/query-with-column-difference.sql %}
-+++++
-
-Note that the `-` operator expects numerical values. So if have columns where the numbers are stored as `VARCHAR` you'll have to use a `CAST` operation to convert them to a numerical type, otherwise you'll get an error in your query.
diff --git a/column-difference/ksql/markup/dev/create-financial-transaction-stream.adoc b/column-difference/ksql/markup/dev/create-financial-transaction-stream.adoc
deleted file mode 100644
index 770b4ec0..00000000
--- a/column-difference/ksql/markup/dev/create-financial-transaction-stream.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-The first thing we do is to create a stream named `PURCHASE_STREAM`. This statement creates the `customer_purchases` topic, since it doesn't already exist. For more details check out the https://docs.ksqldb.io/en/latest/developer-guide/ksqldb-reference/create-stream/#create-strea[ksqlDB documentation on the CREATE STREAM] statement. The data contained in the topic is just plain, schemaless JSON.
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/create-activity-stream.sql %}
-+++++
-
-
-Go ahead and create the stream now by pasting this statement into the ksqlDB window you opened at the beginning of this step. After you've created the stream, quit the ksqlDB CLI for now by typing `exit`.
diff --git a/column-difference/ksql/markup/dev/init.adoc b/column-difference/ksql/markup/dev/init.adoc
deleted file mode 100644
index dbd821b2..00000000
--- a/column-difference/ksql/markup/dev/init.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-To get started, make a new directory anywhere you'd like for this project:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/init.sh %}
-+++++
\ No newline at end of file
diff --git a/column-difference/ksql/markup/dev/make-dirs.adoc b/column-difference/ksql/markup/dev/make-dirs.adoc
deleted file mode 100644
index e4a3ab72..00000000
--- a/column-difference/ksql/markup/dev/make-dirs.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Then make the following directories to set up its structure:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/make-dirs.sh %}
-+++++
diff --git a/column-difference/ksql/markup/dev/make-docker-compose.adoc b/column-difference/ksql/markup/dev/make-docker-compose.adoc
deleted file mode 100644
index 190f96ba..00000000
--- a/column-difference/ksql/markup/dev/make-docker-compose.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Next, create the following `docker-compose.yml` file to obtain Confluent Platform (for Kafka in the cloud, see https://www.confluent.io/confluent-cloud/tryfree/[Confluent Cloud]):
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/docker-compose.yml %}
-+++++
diff --git a/column-difference/ksql/markup/dev/make-src-file.adoc b/column-difference/ksql/markup/dev/make-src-file.adoc
deleted file mode 100644
index 2aa07617..00000000
--- a/column-difference/ksql/markup/dev/make-src-file.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Now that you have a series of statements that's doing the right thing, the last step is to put them into a file so that they can be used outside the CLI session. Create a file at `src/statements.sql` with the following content:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/src/statements.sql %}
-+++++
diff --git a/column-difference/ksql/markup/dev/run-producer.adoc b/column-difference/ksql/markup/dev/run-producer.adoc
deleted file mode 100644
index 7e22c754..00000000
--- a/column-difference/ksql/markup/dev/run-producer.adoc
+++ /dev/null
@@ -1,14 +0,0 @@
-Now let's produce some records for the `PURCHASE_STREAM` stream
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/console-producer.sh %}
-+++++
-
-After starting the console producer it will wait for your input.
-To send all send all the stock transactions click on the clipboard icon on the right, then paste the following into the terminal and press enter:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/input-events.json %}
-+++++
-
-After you've sent the records above, you can close the console producer with `Ctrl-C`.
diff --git a/column-difference/ksql/markup/dev/set-properties.adoc b/column-difference/ksql/markup/dev/set-properties.adoc
deleted file mode 100644
index 51d58497..00000000
--- a/column-difference/ksql/markup/dev/set-properties.adoc
+++ /dev/null
@@ -1,14 +0,0 @@
-Set ksqlDB to process data from the beginning of each Kafka topic.
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/set-properties.sql %}
-+++++
-
-Then let's adjust the column width so we can easily see the results of the query
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/set-column-width.sql %}
-+++++
-
-
-
diff --git a/column-difference/ksql/markup/dev/start-cli.adoc b/column-difference/ksql/markup/dev/start-cli.adoc
deleted file mode 100644
index e0129125..00000000
--- a/column-difference/ksql/markup/dev/start-cli.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-To begin developing interactively, open up the ksqlDB CLI:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/start-cli.sh %}
-+++++
diff --git a/column-difference/ksql/markup/dev/start-compose.adoc b/column-difference/ksql/markup/dev/start-compose.adoc
deleted file mode 100644
index ff0aeed7..00000000
--- a/column-difference/ksql/markup/dev/start-compose.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-And launch it by running:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/docker-compose-up.sh %}
-+++++
\ No newline at end of file
diff --git a/column-difference/ksql/markup/dev/transient-reporting.adoc b/column-difference/ksql/markup/dev/transient-reporting.adoc
deleted file mode 100644
index 35c3a6db..00000000
--- a/column-difference/ksql/markup/dev/transient-reporting.adoc
+++ /dev/null
@@ -1,33 +0,0 @@
-Now we write a query to concatenate multiple columns. To achieve this, we will use the `-` operator to calculate the difference between two columns.
-
-[source, sql]
-----
-
-SELECT FIRST_NAME,
- LAST_NAME,
- CURRENT_PURCHASE - PREVIOUS_PURCHASE as PURCHASE_DIFF <1>
-FROM PURCHASE_STREAM
-EMIT CHANGES
-LIMIT 4;
-
-
-----
-
-<1> Using the `-` operator to calculate the difference between two columns.
-
-NOTE: The `-` operator expects numerical values. So if have columns where the numbers are stored as `VARCHAR` you'll have to use a `CAST` operation to convert them to a numerical type, otherwise you'll get an error in your query.
-
-
-This query should produce the following output:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/expected-transient-reporting.log %}
-+++++
-
-Now that the reporting query works, let's update it to create a continuous query for your reporting scenario:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/dev/continuous-reporting.sql %}
-+++++
-
-We're done with the ksqlDB CLI for now so go ahead and type `exit` to quit.
diff --git a/column-difference/ksql/markup/test/make-test-input.adoc b/column-difference/ksql/markup/test/make-test-input.adoc
deleted file mode 100644
index 7706c96f..00000000
--- a/column-difference/ksql/markup/test/make-test-input.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a file at `test/input.json` with the inputs for testing:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/test/input.json %}
-+++++
diff --git a/column-difference/ksql/markup/test/make-test-output.adoc b/column-difference/ksql/markup/test/make-test-output.adoc
deleted file mode 100644
index 2b501cc3..00000000
--- a/column-difference/ksql/markup/test/make-test-output.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a file at `test/output.json` with the expected outputs:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/test/output.json %}
-+++++
diff --git a/column-difference/ksql/markup/test/run-tests.adoc b/column-difference/ksql/markup/test/run-tests.adoc
deleted file mode 100644
index 072eb0c0..00000000
--- a/column-difference/ksql/markup/test/run-tests.adoc
+++ /dev/null
@@ -1,11 +0,0 @@
-Invoke the tests using the ksqlDB test runner and the statements file that you created earlier:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/test/run-tests.sh %}
-+++++
-
-Which should pass:
-
-+++++
-{% include_raw tutorials/column-difference/ksql/code/tutorial-steps/test/expected-results.log %}
-+++++
diff --git a/concatenation/ksql/README.md b/concatenation/ksql/README.md
new file mode 100644
index 00000000..5a0ae525
--- /dev/null
+++ b/concatenation/ksql/README.md
@@ -0,0 +1,199 @@
+# Concatenation
+
+In this tutorial, we'll show how to use the concatenation operator to create a single value from multiple columns.
+
+
+## Setup
+
+The first thing we do is create a stream named `ACTIVITY_STREAM` which simulates stock purchases and serves as our example of concatenating two columns together.
+
+```sql
+CREATE STREAM ACTIVITY_STREAM (
+ ID VARCHAR,
+ NUM_SHARES INT,
+ AMOUNT DOUBLE,
+ TXN_TS VARCHAR,
+ FIRST_NAME VARCHAR,
+ LAST_NAME VARCHAR,
+ SYMBOL VARCHAR )
+
+ WITH (KAFKA_TOPIC='stock_purchases',
+ VALUE_FORMAT='JSON',
+ PARTITIONS=1);
+```
+## Concatenating columns
+
+Now let's create a stream that concatenates several columns to create a summary of activity.
+
+```sql
+CREATE STREAM SUMMARY_RESULTS AS
+ SELECT FIRST_NAME + ' ' + LAST_NAME +
+ ' purchased ' +
+ CAST(NUM_SHARES AS VARCHAR) +
+ ' shares of ' +
+ SYMBOL AS SUMMARY
+FROM ACTIVITY_STREAM;
+```
+
+## Running the example
+
+{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/query-with-concatenation.sql %}
-+++++
-
-Note that concatenation only works with `STRING` values, so you'll have to use a `CAST` operation on non-string fields as demonstrated above, otherwise your query will result in an error.
diff --git a/concatenation/ksql/markup/dev/create-financial-transaction-stream.adoc b/concatenation/ksql/markup/dev/create-financial-transaction-stream.adoc
deleted file mode 100644
index 9588d4df..00000000
--- a/concatenation/ksql/markup/dev/create-financial-transaction-stream.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-The first thing we do is to create a stream named `ACTIVITY_STREAM`. This statement creates the `stock_purchases` topic, since it doesn't already exist. For more details check out the https://docs.ksqldb.io/en/latest/developer-guide/ksqldb-reference/create-stream/#create-strea[ksqlDB documentation on the CREATE STREAM] statement. The data contained in the topic is just plain, schemaless JSON.
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/create-activity-stream.sql %}
-+++++
-
-
-Go ahead and create the stream now by pasting this statement into the ksqlDB window you opened at the beginning of this step. After you've created the stream, quit the ksqlDB CLI for now by typing `exit`.
diff --git a/concatenation/ksql/markup/dev/init.adoc b/concatenation/ksql/markup/dev/init.adoc
deleted file mode 100644
index 1392caf3..00000000
--- a/concatenation/ksql/markup/dev/init.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-To get started, make a new directory anywhere you'd like for this project:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/init.sh %}
-+++++
\ No newline at end of file
diff --git a/concatenation/ksql/markup/dev/make-dirs.adoc b/concatenation/ksql/markup/dev/make-dirs.adoc
deleted file mode 100644
index 70b83616..00000000
--- a/concatenation/ksql/markup/dev/make-dirs.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Then make the following directories to set up its structure:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/make-dirs.sh %}
-+++++
diff --git a/concatenation/ksql/markup/dev/make-docker-compose.adoc b/concatenation/ksql/markup/dev/make-docker-compose.adoc
deleted file mode 100644
index 76465246..00000000
--- a/concatenation/ksql/markup/dev/make-docker-compose.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Next, create the following `docker-compose.yml` file to obtain Confluent Platform (for Kafka in the cloud, see https://www.confluent.io/confluent-cloud/tryfree/[Confluent Cloud]):
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/docker-compose.yml %}
-+++++
diff --git a/concatenation/ksql/markup/dev/make-src-file.adoc b/concatenation/ksql/markup/dev/make-src-file.adoc
deleted file mode 100644
index c5a5d492..00000000
--- a/concatenation/ksql/markup/dev/make-src-file.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Now that you have a series of statements that's doing the right thing, the last step is to put them into a file so that they can be used outside the CLI session. Create a file at `src/statements.sql` with the following content:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/src/statements.sql %}
-+++++
diff --git a/concatenation/ksql/markup/dev/run-producer.adoc b/concatenation/ksql/markup/dev/run-producer.adoc
deleted file mode 100644
index 441e6924..00000000
--- a/concatenation/ksql/markup/dev/run-producer.adoc
+++ /dev/null
@@ -1,14 +0,0 @@
-Now let's produce some records for the `ACTIVITY_STREAM` stream
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/console-producer.sh %}
-+++++
-
-After starting the console producer it will wait for your input.
-To send all send all the stock transactions click on the clipboard icon on the right, then paste the following into the terminal and press enter:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/input-events.json %}
-+++++
-
-After you've sent the records above, you can close the console producer with `Ctrl-C`.
diff --git a/concatenation/ksql/markup/dev/set-properties.adoc b/concatenation/ksql/markup/dev/set-properties.adoc
deleted file mode 100644
index eeecfee1..00000000
--- a/concatenation/ksql/markup/dev/set-properties.adoc
+++ /dev/null
@@ -1,14 +0,0 @@
-Set ksqlDB to process data from the beginning of each Kafka topic.
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/set-properties.sql %}
-+++++
-
-Then let's adjust the column width so we can easily see the results of the query
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/set-column-width.sql %}
-+++++
-
-
-
diff --git a/concatenation/ksql/markup/dev/start-cli.adoc b/concatenation/ksql/markup/dev/start-cli.adoc
deleted file mode 100644
index dc274ce0..00000000
--- a/concatenation/ksql/markup/dev/start-cli.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-To begin developing interactively, open up the ksqlDB CLI:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/start-cli.sh %}
-+++++
diff --git a/concatenation/ksql/markup/dev/start-compose.adoc b/concatenation/ksql/markup/dev/start-compose.adoc
deleted file mode 100644
index b7766887..00000000
--- a/concatenation/ksql/markup/dev/start-compose.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-And launch it by running:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/docker-compose-up.sh %}
-+++++
\ No newline at end of file
diff --git a/concatenation/ksql/markup/dev/transient-reporting.adoc b/concatenation/ksql/markup/dev/transient-reporting.adoc
deleted file mode 100644
index bcfd57de..00000000
--- a/concatenation/ksql/markup/dev/transient-reporting.adoc
+++ /dev/null
@@ -1,35 +0,0 @@
-Now we write a query to concatenate multiple columns. To achieve this, we will use the `+` operator between the fields in our `SELECT` statement rather than a comma.
-
-[source, sql]
-----
-
-SELECT FIRST_NAME + ' ' + LAST_NAME +
- ' purchased ' +
- CAST(NUM_SHARES AS VARCHAR) + <1>
- ' shares of ' +
- SYMBOL AS SUMMARY
-FROM ACTIVITY_STREAM
-EMIT CHANGES
-LIMIT 4;
-
-
-----
-
-<1> The NUM_SHARES field is an `INT` so we need to cast it to a `VARCHAR` as `concatenate` only works with `STRING` types
-
-NOTE: You can also SELECT fields you don't want to concatenate. In that case you use a comma to separate the field from those you concatenate. For example, you can SELECT individual fields `field_1` and `field_2` at the same time that you concatenate `field_3` with `field_4`. For example
-`SELECT field_1, field_2, field_3 + field_4`
-
-This query should produce the following output:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/expected-transient-reporting.log %}
-+++++
-
-Now that the reporting query works, let's update it to create a continuous query for your reporting scenario
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/dev/continuous-reporting.sql %}
-+++++
-
-We're done with the ksqlDB CLI for now so go ahead and type `exit` to quit.
diff --git a/concatenation/ksql/markup/test/make-test-input.adoc b/concatenation/ksql/markup/test/make-test-input.adoc
deleted file mode 100644
index 8324eaac..00000000
--- a/concatenation/ksql/markup/test/make-test-input.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a file at `test/input.json` with the inputs for testing:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/test/input.json %}
-+++++
diff --git a/concatenation/ksql/markup/test/make-test-output.adoc b/concatenation/ksql/markup/test/make-test-output.adoc
deleted file mode 100644
index 050577a4..00000000
--- a/concatenation/ksql/markup/test/make-test-output.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a file at `test/output.json` with the expected outputs:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/test/output.json %}
-+++++
diff --git a/concatenation/ksql/markup/test/run-tests.adoc b/concatenation/ksql/markup/test/run-tests.adoc
deleted file mode 100644
index ca3025a7..00000000
--- a/concatenation/ksql/markup/test/run-tests.adoc
+++ /dev/null
@@ -1,11 +0,0 @@
-Invoke the tests using the ksqlDB test runner and the statements file that you created earlier:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/test/run-tests.sh %}
-+++++
-
-Which should pass:
-
-+++++
-{% include_raw tutorials/concatenation/ksql/code/tutorial-steps/test/expected-results.log %}
-+++++
diff --git a/confluent-parallel-consumer-application/.gitignore b/confluent-parallel-consumer-application/.gitignore
deleted file mode 100644
index 096b979e..00000000
--- a/confluent-parallel-consumer-application/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-kafka/code/tutorial-steps/dev/outputs/
-consumer-records.out
diff --git a/confluent-parallel-consumer-application/confluent/code/configuration/dev.properties b/confluent-parallel-consumer-application/confluent/code/configuration/dev.properties
deleted file mode 100644
index 33294831..00000000
--- a/confluent-parallel-consumer-application/confluent/code/configuration/dev.properties
+++ /dev/null
@@ -1,10 +0,0 @@
-# Consumer properties
-key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
-max.poll.interval.ms=300000
-enable.auto.commit=false
-auto.offset.reset=earliest
-
-# Application-specific properties
-input.topic.name=parallel-consumer-input-topic
-file.path=topic-output.txt
diff --git a/confluent-parallel-consumer-application/confluent/markup/dev/ccloud-run-producer.adoc b/confluent-parallel-consumer-application/confluent/markup/dev/ccloud-run-producer.adoc
deleted file mode 100644
index d98c9f04..00000000
--- a/confluent-parallel-consumer-application/confluent/markup/dev/ccloud-run-producer.adoc
+++ /dev/null
@@ -1,13 +0,0 @@
-Using a terminal window, run the following command to start a Confluent CLI producer:
-
-```plaintext
-confluent kafka topic produce parallel-consumer-input-topic --parse-key
-```
-
-Each line represents input data for the Confluent Parallel Consumer application. To send all of the events below, paste the following into the prompt and press enter:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/input.txt %}
-+++++
-
-Enter `Ctrl-C` to exit.
diff --git a/confluent-parallel-consumer-application/confluent/markup/dev/make-config-file.adoc b/confluent-parallel-consumer-application/confluent/markup/dev/make-config-file.adoc
deleted file mode 100644
index e799a647..00000000
--- a/confluent-parallel-consumer-application/confluent/markup/dev/make-config-file.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Then create a development configuration file at `configuration/dev.properties`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/confluent/code/configuration/dev.properties %}
-+++++
diff --git a/confluent-parallel-consumer-application/confluent/markup/dev/make-topic.adoc b/confluent-parallel-consumer-application/confluent/markup/dev/make-topic.adoc
deleted file mode 100644
index edf03071..00000000
--- a/confluent-parallel-consumer-application/confluent/markup/dev/make-topic.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-In this step we’re going to create a topic for use during this tutorial. Use the following command to create the topic:
-
-```plaintext
-confluent kafka topic create parallel-consumer-input-topic
-```
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/confluent/markup/perftest/ccloud-run-producer.adoc b/confluent-parallel-consumer-application/confluent/markup/perftest/ccloud-run-producer.adoc
deleted file mode 100644
index e7fef46b..00000000
--- a/confluent-parallel-consumer-application/confluent/markup/perftest/ccloud-run-producer.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-Using a terminal window, run the following command to write 10,000 small dummy records to the input topic:
-
-```
-seq 1 10000 | confluent kafka topic produce perftest-parallel-consumer-input-topic
-```
-
-Let's kick off this command and let it run. It'll take a few minutes to produce all 10,000 records.
-In the meantime, let's continue with the tutorial.
diff --git a/confluent-parallel-consumer-application/confluent/markup/perftest/make-topic.adoc b/confluent-parallel-consumer-application/confluent/markup/perftest/make-topic.adoc
deleted file mode 100644
index 5b4870ea..00000000
--- a/confluent-parallel-consumer-application/confluent/markup/perftest/make-topic.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Use the following command to create a topic that we'll use for performance testing:
-
-```plaintext
-confluent kafka topic create perftest-parallel-consumer-input-topic
-```
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/README.md b/confluent-parallel-consumer-application/kafka/README.md
new file mode 100644
index 00000000..5a50698b
--- /dev/null
+++ b/confluent-parallel-consumer-application/kafka/README.md
@@ -0,0 +1,39 @@
+# Confluent parallel consumer
+
+The Confluent Parallel Consumer is an open-source Apache 2.0-licensed Java library that enables you to consume from a Kafka topic with more parallelism than the number of partitions. In an Apache Kafka consumer group, the number of partitions is the parallelism limit.
+Increasing the level of parallelism beyond the partition count is desirable in many situations. For example, when there are fixed partition counts for a reason beyond your control or if you need to make a high-latency call out to a database or microservice while consuming and want to increase throughput.
+
+In this tutorial, you'll build a small "hello world" application that uses the Confluent Parallel Consumer library. There are also some performance tests at a larger scale to compare the Confluent Parallel Consumer with a baseline built using a vanilla Apache Kafka consumer group you can explore on your own.
+
+## ParallelStreamProcessor
+
+For parallel record consuming, you'll use the [ParallelStreamProcessor](https://javadoc.io/doc/io.confluent.parallelconsumer/parallel-consumer-core/latest/io/confluent/parallelconsumer/ParallelStreamProcessor.html) which wraps a [KafkaConsumer](https://kafka.apache.org/36/javadoc/org/apache/kafka/clients/consumer/KafkaConsumer.html).
+
+You create a new instance of a `KafkaConsumer`, create a `ParallelConsumerOptions` configuration object, then use the configuration to create a new `ParallelStreamProcessor` instance:
+
+```java
+final Consumer{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/build-uberjar.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/create-topic.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/create-topic.adoc
deleted file mode 100644
index 400481d7..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/create-topic.adoc
+++ /dev/null
@@ -1,17 +0,0 @@
-
-In this step we're going to create a topic for use during this tutorial.
-
-But first, you're going to open a shell on the broker docker container.
-
-Open a new terminal and window then run this command:
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/open-docker-shell.sh %}
-+++++
-
-Now use the following command to create the topic:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/create-topic.sh %}
-+++++
-
-Keep this terminal window open as you'll need to run a console-producer in a few steps.
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/explain-properties.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/explain-properties.adoc
deleted file mode 100644
index c2da3154..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/explain-properties.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-Let's do a quick overview of some of the more important properties here:
-
-The `key.deserializer` and `value.deserializer` properties provide a class implementing the `Deserializer` interface for converting `byte` arrays into the expected object type of the key and value respectively.
-
-The `max.poll.interval.ms` is the maximum amount of time a consumer may take between calls to `Consumer.poll()`. If a consumer instance takes longer than the specified time, it's considered non-responsive and removed from the consumer-group triggering a rebalance.
-
-Setting `enable.auto.commit` configuration to `false` is required because the Confluent Parallel Consumer handles committing offsets in order to achieve fault tolerance.
-
-`auto.offset.reset` - If a consumer instance can't locate any offsets for its topic-partition assignment(s), it will resume processing from the _**earliest**_ available offset.
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/init.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/init.adoc
deleted file mode 100644
index abb9e137..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/init.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Make a local directory anywhere you'd like for this project:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/init.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-build-file.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-build-file.adoc
deleted file mode 100644
index 5bae1813..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-build-file.adoc
+++ /dev/null
@@ -1,9 +0,0 @@
-In order to build the project, first https://gradle.org/install/[install Gradle] 7.5 or later if you don't already have it.
-Create the following Gradle build file, named `build.gradle` for the project. Note the `parallel-consumer-core` dependency,
-which is available in Maven Central. This artifact includes the Confluent Parallel Consumer's core API.
-There are also separate modules for using the Confluent Parallel Consumer with reactive API frameworks like Vert.x (`parallel-consumer-vertx`)
-and Reactor (`parallel-consumer-reactor`). These modules are out of scope for this introductory tutorial.
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/build.gradle %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-config-dir.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-config-dir.adoc
deleted file mode 100644
index ab3fcbc5..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-config-dir.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Next, create a directory for configuration data:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/make-configuration-dir.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-config-file.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-config-file.adoc
deleted file mode 100644
index 965cb899..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-config-file.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Then create a development configuration file at `configuration/dev.properties`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/configuration/dev.properties %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-consumer-app.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-consumer-app.adoc
deleted file mode 100644
index d174a3cb..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-consumer-app.adoc
+++ /dev/null
@@ -1,79 +0,0 @@
-
-To complete this introductory application, you'll build a main application class and a couple of supporting classes.
-
-
-First, you'll create the main application,`ParallelConsumerApplication`, which is the focal point of this tutorial; consuming records from a Kafka topic using the Confluent Parallel Consumer.
-
-Go ahead and copy the following into a file `src/main/java/io/confluent/developer/ParallelConsumerApplication.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerApplication.java %}
-+++++
-
-Let's go over some of the key parts of the `ParallelConsumerApplication` starting with the constructor:
-
-[source, java]
-.ParallelConsumerApplication constructor
-----
- public ParallelConsumerApplication(final ParallelStreamProcessor{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/docker-compose.yml %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-gradle-wrapper.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-gradle-wrapper.adoc
deleted file mode 100644
index 26afc9e5..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-gradle-wrapper.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-And be sure to run the following command to obtain the Gradle wrapper, which we will use to execute the build. The Gradle wrapper is a best practice ancillary build script that enables developers to more easily collaborate on Gradle projects by ensuring that developers all use the same correct Gradle version for the project (downloading Gradle at build time if necessary).
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/gradle-wrapper.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-src-dir.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-src-dir.adoc
deleted file mode 100644
index dc0a105a..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-src-dir.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a directory for the Java files in this project:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/make-src-dir.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/make-supporting-classes.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/make-supporting-classes.adoc
deleted file mode 100644
index 1f4d5605..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/make-supporting-classes.adoc
+++ /dev/null
@@ -1,45 +0,0 @@
-To complete this tutorial, you'll need to also create an abstract class that we will extend to process messages as we consume them. This
-abstract class, `ConsumerRecordHandler`, encapsulates tracking the number of records processed, which will be useful later on when we run
-performance tests and want to terminate the test application after consuming an expected number of records.
-
-First create the abstract class at `src/main/java/io/confluent/developer/ConsumerRecordHandler.java`
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ConsumerRecordHandler.java %}
-+++++
-
-Using this abstract class will make it easier to change how you want to work with a `ConsumerRecord` without having to modify all of your existing code.
-
-Next you'll extend the `ConsumerRecordHandler` abstract class with a concrete class named `FileWritingRecordHandler`. Copy the following into file `src/main/java/io/confluent/developer/FileWritingRecordHandler.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/FileWritingRecordHandler.java %}
-+++++
-
-Let's take a peek under the hood at this class's `processRecordImpl` method, which gets calls for each record consumed:
-
-[source, java]
-.FileWritingRecordHandler.processRecordImpl
-----
- @Override
- protected void processRecordImpl(final ConsumerRecord{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/PropertiesUtil.java %}
-+++++
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/print-consumer-file-results.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/print-consumer-file-results.adoc
deleted file mode 100644
index 7c6be1ff..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/print-consumer-file-results.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-Your Confluent Parallel Consumer application should have consumed all the records sent and written them out to a file.
-
-In a new terminal, run this command to print the results to the console:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/print-consumer-file-results.sh %}
-+++++
-
-You should see something like this:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/expected-output.txt %}
-+++++
-
-Note that because we configured the Confluent Parallel Consumer to use `KEY` ordering, `Go to Current` appears before `Go to Kafka Summit`
-because these values have the same `event-promo` key. Similarly, `All streams lead to Kafka` appears before `Consume gently down the stream`
-because these values have the same `fun-line` key.
-
-At this point you can stop the Confluent Parallel Consumer application with `Ctrl-C` in the terminal window where it's running.
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/run-dev-app.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/run-dev-app.adoc
deleted file mode 100644
index 96d14a14..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/run-dev-app.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Now that you have an uberjar for the `ParallelConsumerApplication`, you can launch it locally. When you run the following, the prompt won't return, because the application will run until you exit it. There is always another message to process, so streaming applications don't exit until you force them.
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/run-dev-app.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/run-producer.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/run-producer.adoc
deleted file mode 100644
index b4dab05b..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/run-producer.adoc
+++ /dev/null
@@ -1,22 +0,0 @@
-////
- Example content file for how to include a console producer(s) in the tutorial.
- Usually you'll include a line referencing the script to run the console producer and also include some content
- describing how to input data as shown below.
-
- Again modify this file as you need for your tutorial, as this is just sample content. You also may have more than one
- console producer to run depending on how you structure your tutorial
-
-////
-
-Using the terminal window you opened in step three, run the following command to start a console-producer:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/console-producer.sh %}
-+++++
-
-
-Each line represents input data for the Confluent Parallel Consumer application. To send all of the events below, paste the following into the prompt and press enter:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/input.txt %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/dev/start-compose.adoc b/confluent-parallel-consumer-application/kafka/markup/dev/start-compose.adoc
deleted file mode 100644
index 0d9b9a3e..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/dev/start-compose.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-And launch it by running:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/docker-compose-up.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/append-ccloud-config.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/append-ccloud-config.adoc
deleted file mode 100644
index f51e63ca..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/append-ccloud-config.adoc
+++ /dev/null
@@ -1,7 +0,0 @@
-Using the command below, append the contents of `configuration/ccloud.properties` (with your Confluent Cloud configuration)
-to `configuration/perftest-kafka-consumer.properties` and `configuration/perftest-parallel-consumer.properties`:
-
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/ccloud-cat-config.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/append-kafka-config.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/append-kafka-config.adoc
deleted file mode 100644
index 732138e0..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/append-kafka-config.adoc
+++ /dev/null
@@ -1,7 +0,0 @@
-Using the command below, append the contents of `configuration/dev.properties`
-to `configuration/perftest-kafka-consumer.properties` and `configuration/perftest-parallel-consumer.properties`:
-
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/kafka-cat-config.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/build-uberjar.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/build-uberjar.adoc
deleted file mode 100644
index 40313061..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/build-uberjar.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Let's rebuild the uberjar to include this performance test. In your terminal, run:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/build-uberjar.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/create-perftest-topic.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/create-perftest-topic.adoc
deleted file mode 100644
index 0bc8090e..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/create-perftest-topic.adoc
+++ /dev/null
@@ -1,17 +0,0 @@
-
-In this step we're going to create a topic for use during this tutorial.
-
-But first, you're going to open a shell on the broker docker container.
-
-Open a new terminal and window then run this command:
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/dev/open-docker-shell.sh %}
-+++++
-
-Now use the following command to create the topic:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/create-perftest-topic.sh %}
-+++++
-
-Keep this terminal window open as you'll need to run a console-producer in a few steps.
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/make-config-file.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/make-config-file.adoc
deleted file mode 100644
index 55bac717..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/make-config-file.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-Then create two performance test configuration files. The first is for performance testing a multi-threaded `KafkaConsumer`-based
-performance test that we'll use to set a baseline. Create this file at `configuration/perftest-kafka-consumer.properties`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/configuration/perftest-kafka-consumer.properties %}
-+++++
-
-Then create this file at `configuration/perftest-parallel-consumer.properties`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/configuration/perftest-parallel-consumer.properties %}
-+++++
-
-Let's look at some of the more important properties in these configuration files:
-
-. We specify `fetch.min.bytes` to be 100000 in order to https://docs.confluent.io/cloud/current/client-apps/optimizing/throughput.html#consumer-fetching[optimize for consumer throughput]
-. The application-specific property `records.to.consume` is set to `10000` to match the number of records that we produced in the previous step. This will cause the application to terminate upon consuming this many records.
-. The application-specific property `record.handler.sleep.ms` is used to simulate a nontrivial amount of work to perform per record. In this case, we sleep for 20ms to simulate a low-but-nontrivial latency operation like a call to a database or REST API.
-
-In the configuration file for the Confluent Parallel Consumer performance test, there are a few Confluent Parallel Consumer-specific properties.
-
-. `parallel.consumer.max.concurrency` is set to `256`, much higher than the number of partitions in our topic
-. We use `UNORDERED` ordering, `PERIODIC_CONSUMER_ASYNCHRONOUS` offset commit mode, and a high `parallel.consumer.seconds.between.commits` value of 60 seconds.
- Together, these values optimize for throughput. This keeps our test analogous to the `KafkaConsumer`-based baseline. You may have noticed that,
- because we are aiming to maximize throughput in these performance tests while ignoring the overhead of offsets handling, the baseline doesn't even commit offsets!
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/make-kafka-consumer-perftest.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/make-kafka-consumer-perftest.adoc
deleted file mode 100644
index 75e2e43e..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/make-kafka-consumer-perftest.adoc
+++ /dev/null
@@ -1,20 +0,0 @@
-Here you'll build a performance test application and supporting classes that implement
-multi-threaded consuming (one `KafkaConsumer` per-partition to maximize parallelism).
-
-First, you'll create the main performance test application, `src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java %}
-+++++
-
-Second, create the class that implements multi-threaded consuming, `src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java %}
-+++++
-
-Finally, create the record handler that sleeps 20ms per record consumed, `src/main/java/io/confluent/developer/SleepingRecordHandler.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/SleepingRecordHandler.java %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/make-parallel-consumer-perftest.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/make-parallel-consumer-perftest.adoc
deleted file mode 100644
index c2faf9f3..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/make-parallel-consumer-perftest.adoc
+++ /dev/null
@@ -1,27 +0,0 @@
-Here you'll build a performance test application based on the Confluent Parallel Consumer. This test reuses a couple of classes
-that we created previously: `PropertiesUtil` for loading consumer and application-specific properties, and `SleepingRecordHandler`
-for simulating a nontrivial workload per-record just as we did in `MultithreadedKafkaConsumerPerfTest`. Please rewind
-and create these if you skipped the parts of the tutorial that create these two classes.
-
-Because the Confluent Parallel Consumer API is much lighter weight than the lift required to multi-thread `KafkaConsumer` instances
-per partition, let's knock out the entire thing in one class. Create the file `src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java %}
-+++++
-
-Take a look at the code and note the simplicity. Most of the code is for properties file handling and tracking progress. The interesting part relevant to the Confluent Parallel Consumer
-is in the four-line `runConsume()` method:
-
-[source, java]
-.ParallelConsumerPerfTest.runConsume
-----
- private void runConsume(final Properties appProperties) {
- parallelConsumer.subscribe(Collections.singletonList(appProperties.getProperty("input.topic.name")));
- parallelConsumer.poll(context -> {
- recordHandler.processRecord(context.getSingleConsumerRecord());
- });
- }
-----
-
-Bellisimo!
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/perf-test-extensions.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/perf-test-extensions.adoc
deleted file mode 100644
index 690a1c80..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/perf-test-extensions.adoc
+++ /dev/null
@@ -1,19 +0,0 @@
-In this section of the tutorial, we created a performance test for the Confluent Parallel Consumer, and a `KafkaConsumer` baseline to which to compare.
-
-This gave us a couple of data points, but only for one specific test context: each test aimed to consume records as quickly as possible in a single JVM while simulating a 20ms workload per-record.
-
-We can turn a few knobs and pull some levers to gather more performance test results in other application contexts. Since we used helper classes and parameterized configuration in this tutorial, you can easily choose other performance test adventures.
-Some questions you might explore:
-
-. How does performance compare if we increase or decrease the simulated workload time?
-. What if we commit offsets more frequently or even synchronously or transactionally in each test?
- In the case of the Confluent Parallel Consumer, this entails setting `parallel.consumer.seconds.between.commits` to a value lower than 60 seconds,
- and using a `parallel.consumer.commit.mode` of `PERIODIC_CONSUMER_SYNC` or `PERIODIC_TRANSACTIONAL_PRODUCER`.
- These commit modes better simulate an application designed to more easily pick up where it left off when recovering from an error.
-. What if we change the properties of the `KafkaConsumer` instance(s) most relevant to throughput (`fetch.min.bytes` and `max.poll.records`)?
-. What if we use `KEY` or `PARTITION` ordering when configuring the Confluent Parallel Consumer (as opposed to `UNORDERED`)?
-. How does the throughput comparison change if we create `perftest-parallel-consumer-input-topic` with more (or fewer) partitions?
-. What if we use larger, more realistic records and not just integers from 1 to 10,000? What if we also play with different
- key spaces?
-
-Have fun with it!
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/run-kafka-consumer-perftest.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/run-kafka-consumer-perftest.adoc
deleted file mode 100644
index 10d25c56..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/run-kafka-consumer-perftest.adoc
+++ /dev/null
@@ -1,25 +0,0 @@
-Now that you have an uberjar containing `MultithreadedKafkaConsumerPerfTest`, you can launch it locally.
-This will run until the expected 10,000 records have been consumed. Ensure that the `seq` command that you ran previously to
-produce 10,000 records has completed before running this so that we can accurately test consumption throughput.
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/run-kafka-consumer-perftest.sh %}
-+++++
-
-While the performance test runs, take a few sips of the beverage that you previously poured. It will take a minute or
-two to complete, and the final line output will show you the latency for consuming all 10,000 records, e.g.:
-
-+++++
-[main] INFO io.confluent.developer.MultithreadedKafkaConsumer - Total time to consume 10000 records: 40.46 seconds
-+++++
-
-Before we build and run a Confluent Parallel Consumer analogue to this `KafkaConsumer` baseline, let's summarize what we've seen so far:
-
-. We populated a topic with default properties and produced 10,000 small records to it
-. We maxed out the size of our consumer group by running a `KafkaConsumer` per partition, with each instance explicitly assigned to one partition
-. We optimized each `KafkaConsumer` for throughput by setting high values for `max.poll.records` and `fetch.min.bytes`
-. We struck a balance between latency accuracy and instrumentation overhead needed to track progress and
- end when expected by using a 0.5 second `poll` timeout. (We want to report consumption latency shortly after consumption finishes,
- but we also want to minimize busy waiting of the `KafkaConsumer` instances that finish first.)
-. We scratched our head writing some tricky multi-threaded code. By the way, is any multi-threaded code not tricky?
-. The reported performance test latency was *40.46 seconds* in our case (your number is surely different).
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/run-parallel-consumer-perftest.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/run-parallel-consumer-perftest.adoc
deleted file mode 100644
index 48ea8076..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/run-parallel-consumer-perftest.adoc
+++ /dev/null
@@ -1,18 +0,0 @@
-Now that you have an uberjar containing `ParallelConsumerPerfTest`, you can launch it locally.
-This will run until the expected 10,000 records have been consumed. Ensure that the `seq` command that you ran previously to
-produce 10,000 records has completed before running this so that we can accurately test consumption throughput.
-
-As you kick this off, bear in mind the latency that you recorded when you ran `MultithreadedKafkaConsumerPerfTest` (40.46 seconds in the run performed for the tutorial).
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/run-parallel-consumer-perftest.sh %}
-+++++
-
-While the performance test runs, take a few sips of the beverage... actually never mind. It's done:
-
-+++++
-[main] INFO io.confluent.developer.ParallelConsumerPerfTest - Time to consume 10000 records: 1.78 seconds
-+++++
-
-Your latency will surely be different from the `1.78 seconds` shown here. But, assuming you are running the test on reasonable hardware and you aren't running any
-extremely noisy neighbors on your machine, it should be just a few seconds.
diff --git a/confluent-parallel-consumer-application/kafka/markup/perftest/run-producer.adoc b/confluent-parallel-consumer-application/kafka/markup/perftest/run-producer.adoc
deleted file mode 100644
index acd5dbe2..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/perftest/run-producer.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-Using the terminal window you opened in step three, run the following command to write 10,000 small dummy records to the input topic:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/perftest/console-producer.sh %}
-+++++
-
-Let's kick off this command and let it run. It'll take a few minutes to produce all 10,000 records.
-In the meantime, let's continue with the tutorial.
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/markup/test/invoke-tests.adoc b/confluent-parallel-consumer-application/kafka/markup/test/invoke-tests.adoc
deleted file mode 100644
index ddeca7f8..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/test/invoke-tests.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Now run the test, which is as simple as:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/test/invoke-tests.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-application-test.adoc b/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-application-test.adoc
deleted file mode 100644
index 5742aa76..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-application-test.adoc
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-Testing a Confluent Parallel Consumer application is not too complicated thanks to the https://github.com/confluentinc/parallel-consumer/blob/master/parallel-consumer-core/src/test/java/io/confluent/csid/utils/LongPollingMockConsumer.java[LongPollingMockConsumer] that is based on Apache Kafka's https://javadoc.io/doc/org.apache.kafka/kafka-clients/latest/org/apache/kafka/clients/consumer/MockConsumer.html[MockConsumer]. Since the Confluent Parallel Consumer's https://github.com/confluentinc/parallel-consumer[codebase] is well tested, we don't need to use a _live_ consumer and Kafka broker to test our application. We can simply use a mock consumer to process some data you'll feed into it.
-
-
-There is only one method in `KafkaConsumerApplicationTest` annotated with `@Test`, and that is `consumerTest()`. This method actually runs your `ParallelConsumerApplication` with the mock consumer.
-
-
-Now create the following file at `src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java`:
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-record-handler-test.adoc b/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-record-handler-test.adoc
deleted file mode 100644
index 04c22219..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/test/make-consumer-record-handler-test.adoc
+++ /dev/null
@@ -1,8 +0,0 @@
-
-Now let's build a test for the `ConsumerRecordHandler` implementation used in your application. Even though we have a test for the `ParallelConsumerApplication`, it's
-important that you can test this helper class in isolation.
-
-Create the following file at `src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java`:
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/test/make-test-dir.adoc b/confluent-parallel-consumer-application/kafka/markup/test/make-test-dir.adoc
deleted file mode 100644
index 65c58d30..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/test/make-test-dir.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-Create a directory for the tests to live in:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/tutorial-steps/test/make-test-dir.sh %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/markup/test/make-test-file.adoc b/confluent-parallel-consumer-application/kafka/markup/test/make-test-file.adoc
deleted file mode 100644
index ee814ebe..00000000
--- a/confluent-parallel-consumer-application/kafka/markup/test/make-test-file.adoc
+++ /dev/null
@@ -1,5 +0,0 @@
-First, create a test file at `configuration/test.properties`:
-
-+++++
-{% include_raw tutorials/confluent-parallel-consumer-application/kafka/code/configuration/test.properties %}
-+++++
diff --git a/confluent-parallel-consumer-application/kafka/settings.gradle b/confluent-parallel-consumer-application/kafka/settings.gradle
new file mode 100644
index 00000000..f118aff1
--- /dev/null
+++ b/confluent-parallel-consumer-application/kafka/settings.gradle
@@ -0,0 +1,11 @@
+/*
+ * This file was generated by the Gradle 'init' task.
+ *
+ * The settings file is used to specify which projects to include in your build.
+ * For more detailed information on multi-project builds, please refer to https://docs.gradle.org/8.5/userguide/building_swift_projects.html in the Gradle documentation.
+ * This project uses @Incubating APIs which are subject to change.
+ */
+
+rootProject.name = 'parallel-consumer'
+include ':common'
+project(':common').projectDir = file('../../common')
\ No newline at end of file
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ConsumerRecordHandler.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ConsumerRecordHandler.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ConsumerRecordHandler.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ConsumerRecordHandler.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/FileWritingRecordHandler.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/FileWritingRecordHandler.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/FileWritingRecordHandler.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/FileWritingRecordHandler.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/MultithreadedKafkaConsumer.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/MultithreadedKafkaConsumerPerfTest.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerApplication.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ParallelConsumerApplication.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerApplication.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ParallelConsumerApplication.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/ParallelConsumerPerfTest.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/PropertiesUtil.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/PropertiesUtil.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/PropertiesUtil.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/PropertiesUtil.java
diff --git a/confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/SleepingRecordHandler.java b/confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/SleepingRecordHandler.java
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/src/main/java/io/confluent/developer/SleepingRecordHandler.java
rename to confluent-parallel-consumer-application/kafka/src/main/java/io/confluent/developer/SleepingRecordHandler.java
diff --git a/confluent-parallel-consumer-application/kafka/code/configuration/dev.properties b/confluent-parallel-consumer-application/kafka/src/main/resources/dev.properties
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/configuration/dev.properties
rename to confluent-parallel-consumer-application/kafka/src/main/resources/dev.properties
diff --git a/confluent-parallel-consumer-application/kafka/code/configuration/perftest-kafka-consumer.properties b/confluent-parallel-consumer-application/kafka/src/main/resources/perftest-kafka-consumer.properties
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/configuration/perftest-kafka-consumer.properties
rename to confluent-parallel-consumer-application/kafka/src/main/resources/perftest-kafka-consumer.properties
diff --git a/confluent-parallel-consumer-application/kafka/code/configuration/perftest-parallel-consumer.properties b/confluent-parallel-consumer-application/kafka/src/main/resources/perftest-parallel-consumer.properties
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/configuration/perftest-parallel-consumer.properties
rename to confluent-parallel-consumer-application/kafka/src/main/resources/perftest-parallel-consumer.properties
diff --git a/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java b/confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java
similarity index 96%
rename from confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java
rename to confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java
index dca24d41..14403510 100644
--- a/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java
+++ b/confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/FileWritingRecordHandlerTest.java
@@ -2,7 +2,7 @@
import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.nio.file.Files;
diff --git a/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java b/confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java
similarity index 95%
rename from confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java
rename to confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java
index 8f1ec072..6266f96f 100644
--- a/confluent-parallel-consumer-application/kafka/code/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java
+++ b/confluent-parallel-consumer-application/kafka/src/test/java/io/confluent/developer/ParallelConsumerApplicationTest.java
@@ -7,7 +7,7 @@
import org.apache.kafka.clients.consumer.MockConsumer;
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
import org.awaitility.Awaitility;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import java.nio.file.Files;
import java.nio.file.Path;
@@ -19,7 +19,7 @@
import static io.confluent.parallelconsumer.ParallelConsumerOptions.ProcessingOrder.KEY;
import static java.util.concurrent.TimeUnit.SECONDS;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.*;
/**
@@ -27,7 +27,7 @@
*/
public class ParallelConsumerApplicationTest {
- private static final String TEST_CONFIG_FILE = "configuration/test.properties";
+ private static final String TEST_CONFIG_FILE = "src/test/resources/test.properties";
/**
* Test the app end to end with a few records consumable via a mock consumer. The app
diff --git a/confluent-parallel-consumer-application/kafka/code/configuration/test.properties b/confluent-parallel-consumer-application/kafka/src/test/resources/test.properties
similarity index 100%
rename from confluent-parallel-consumer-application/kafka/code/configuration/test.properties
rename to confluent-parallel-consumer-application/kafka/src/test/resources/test.properties
diff --git a/settings.gradle b/settings.gradle
index 8dddd8e7..1d55864e 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -15,6 +15,7 @@ include 'aggregating-minmax:kstreams'
include 'aggregating-minmax:flinksql'
include 'aggregating-sum:kstreams'
include 'cogrouping-streams:kstreams'
+include 'confluent-parallel-consumer-application:kafka'
include 'common'
include 'cumulating-windows:flinksql'
include 'filtering:flinksql'