From b0b87d46932b1da9424d94e69e88011b34f794a0 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Wed, 17 Aug 2022 17:25:10 +0200 Subject: [PATCH 01/10] wip --- docs/antora.yml | 1 + .../examples/code/getting-started.sh | 154 ++++++++++++++++ .../examples/code/getting-started.sh.j2 | 82 +++++++++ .../examples/code/kafka-znode.yaml | 8 + .../getting_started/examples/code/kafka.yaml | 24 +++ .../examples/code/zookeeper.yaml | 14 ++ docs/modules/getting_started/nav.adoc | 3 + .../getting_started/pages/first_steps.adoc | 168 ++++++++++++++++++ docs/modules/getting_started/pages/index.adoc | 18 ++ .../getting_started/pages/installation.adoc | 54 ++++++ docs/templating_vars.yaml | 4 + 11 files changed, 530 insertions(+) create mode 100755 docs/modules/getting_started/examples/code/getting-started.sh create mode 100755 docs/modules/getting_started/examples/code/getting-started.sh.j2 create mode 100644 docs/modules/getting_started/examples/code/kafka-znode.yaml create mode 100644 docs/modules/getting_started/examples/code/kafka.yaml create mode 100644 docs/modules/getting_started/examples/code/zookeeper.yaml create mode 100644 docs/modules/getting_started/nav.adoc create mode 100644 docs/modules/getting_started/pages/first_steps.adoc create mode 100644 docs/modules/getting_started/pages/index.adoc create mode 100644 docs/modules/getting_started/pages/installation.adoc create mode 100644 docs/templating_vars.yaml diff --git a/docs/antora.yml b/docs/antora.yml index f169cad4..ef527255 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -2,5 +2,6 @@ name: kafka version: "nightly" title: Stackable Operator for Apache Kafka nav: + - modules/getting_started/nav.adoc - modules/ROOT/nav.adoc prerelease: true diff --git a/docs/modules/getting_started/examples/code/getting-started.sh b/docs/modules/getting_started/examples/code/getting-started.sh new file mode 100755 index 00000000..d5edd8b9 --- /dev/null +++ b/docs/modules/getting_started/examples/code/getting-started.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash +set -euo pipefail + +# The getting started guide script +# It uses tagged regions which are included in the documentation +# https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/ +# +# There are two variants to go through the guide - using stackablectl or helm +# The script takes either 'stackablectl' or 'helm' as an argument +# +# The script can be run as a test as well, to make sure that the tutorial works +# It includes some assertions throughout, and at the end especially. + +if [ $# -eq 0 ] +then + echo "Installation method argument ('helm' or 'stackablectl') required." + exit 1 +fi + +case "$1" in +"helm") +echo "Adding 'stackable-dev' Helm Chart repository" +# tag::helm-add-repo[] +helm repo add stackable-dev https://repo.stackable.tech/repository/helm-dev/ +# end::helm-add-repo[] +echo "Installing Operators with Helm" +# tag::helm-install-operators[] +helm install --wait commons-operator stackable-dev/commons-operator --version 0.3.0-nightly +helm install --wait secret-operator stackable-dev/secret-operator --version 0.6.0-nightly +helm install --wait zookeeper-operator stackable-dev/zookeeper-operator --version 0.11.0-nightly +helm install --wait hdfs-operator stackable-dev/hdfs-operator --version 0.5.0-nightly +helm install --wait druid-operator stackable-dev/druid-operator --version 0.7.0-nightly +# end::helm-install-operators[] +;; +"stackablectl") +echo "installing Operators with stackablectl" +# tag::stackablectl-install-operators[] +stackablectl operator install \ + commons=0.3.0-nightly \ + secret=0.6.0-nightly \ + zookeeper=0.11.0-nightly \ + hdfs=0.5.0-nightly \ + druid=0.7.0-nightly +# end::stackablectl-install-operators[] +;; +*) +echo "Need to give 'helm' or 'stackablectl' as an argument for which installation method to use!" +exit 1 +;; +esac + +echo "Installing ZooKeeper from zookeeper.yaml" +# tag::install-zookeeper[] +kubectl apply -f zookeeper.yaml +# end::install-zookeeper[] + +sleep 5 + +echo "Awaiting ZooKeeper rollout finish" +# tag::watch-zookeeper-rollout[] +kubectl rollout status --watch statefulset/simple-zk-server-default +# end::watch-zookeeper-rollout[] + +echo "Installing HDFS from hdfs.yaml" +# tag::install-hdfs[] +kubectl apply -f hdfs.yaml +# end::install-hdfs[] + +sleep 5 + +echo "Awaiting HDFS rollout finish" +# tag::watch-hdfs-rollout[] +kubectl rollout status --watch statefulset/simple-hdfs-datanode-default +kubectl rollout status --watch statefulset/simple-hdfs-journalnode-default +kubectl rollout status --watch statefulset/simple-hdfs-namenode-default +# end::watch-hdfs-rollout[] + +echo "Install DruidCluster from druid.yaml" +# tag::install-druid[] +kubectl apply -f druid.yaml +# end::install-druid[] + +sleep 5 + +echo "Awaiting Druid rollout finish" +# tag::watch-druid-rollout[] +kubectl rollout status --watch statefulset/simple-druid-broker-default +kubectl rollout status --watch statefulset/simple-druid-coordinator-default +kubectl rollout status --watch statefulset/simple-druid-historical-default +kubectl rollout status --watch statefulset/simple-druid-middlemanager-default +kubectl rollout status --watch statefulset/simple-druid-router-default +# end::watch-druid-rollout[] + +echo "Starting port-forwarding of port 8888" +# tag::port-forwarding[] +kubectl port-forward svc/simple-druid-router 8888 2>&1 >/dev/null & +# end::port-forwarding[] +PORT_FORWARD_PID=$! +trap "kill $PORT_FORWARD_PID" EXIT +sleep 5 + +submit_job() { +# tag::submit-job[] +curl -s -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json http://localhost:8888/druid/indexer/v1/task +# end::submit-job[] +} + +echo "Submitting job" +task_id=$(submit_job | sed -e 's/.*":"\([^"]\+\).*/\1/g') + +request_job_status() { + curl -s "http://localhost:8888/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g' +} + +while [ "$(request_job_status)" == "RUNNING" ]; do + echo "Task still running..." + sleep 5 +done + +task_status=$(request_job_status) + +if [ "$task_status" == "SUCCESS" ]; then + echo "Task finished successfully!" +else + echo "Task not successful: $task_status" + exit 1 +fi + +segment_load_status() { + curl -s http://localhost:8888/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g' +} + +while [ "$(segment_load_status)" != "100.0" ]; do + echo "Segments still loading..." + sleep 5 +done + +query_data() { +# tag::query-data[] +curl -s -X 'POST' -H 'Content-Type:application/json' -d @query.json http://localhost:8888/druid/v2/sql +# end::query-data[] +} + +echo "Querying data..." +query_result=$(query_data) + +if [ "$query_result" == "$(cat expected_query_result.json)" ]; then + echo "Query result is as expected!" +else + echo "Query result differs from expected result." + echo "Query: $query_result" + echo "Expected: $(cat expected_query_result.json)" + exit 1 +fi diff --git a/docs/modules/getting_started/examples/code/getting-started.sh.j2 b/docs/modules/getting_started/examples/code/getting-started.sh.j2 new file mode 100755 index 00000000..cdf1b075 --- /dev/null +++ b/docs/modules/getting_started/examples/code/getting-started.sh.j2 @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +set -euo pipefail + +# The getting started guide script +# It uses tagged regions which are included in the documentation +# https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/ +# +# There are two variants to go through the guide - using stackablectl or helm +# The script takes either 'stackablectl' or 'helm' as an argument +# +# The script can be run as a test as well, to make sure that the tutorial works +# It includes some assertions throughout, and at the end especially. + +if [ $# -eq 0 ] +then + echo "Installation method argument ('helm' or 'stackablectl') required." + exit 1 +fi + +case "$1" in +"helm") +echo "Adding '{{ helm.repo_name }}' Helm Chart repository" +# tag::helm-add-repo[] +helm repo add {{ helm.repo_name }} {{ helm.repo_url }} +# end::helm-add-repo[] +echo "Installing Operators with Helm" +# tag::helm-install-operators[] +helm install --wait commons-operator {{ helm.repo_name }}/commons-operator --devel +helm install --wait secret-operator {{ helm.repo_name }}/secret-operator --devel +helm install --wait zookeeper-operator {{ helm.repo_name }}/zookeeper-operator --devel +helm install --wait kafka-operator {{ helm.repo_name }}/kafka-operator --devel +# end::helm-install-operators[] +;; +"stackablectl") +echo "installing Operators with stackablectl" +# tag::stackablectl-install-operators[] +stackablectl operator install commons secret zookeeper kafka +# end::stackablectl-install-operators[] +;; +*) +echo "Need to provide 'helm' or 'stackablectl' as an argument for which installation method to use!" +exit 1 +;; +esac + +echo "Installing ZooKeeper from zookeeper.yaml" +# tag::install-zookeeper[] +kubectl apply -f zookeeper.yaml +# end::install-zookeeper[] + +echo "Installing ZNode from kafka-znode.yaml" +# tag::install-znode[] +kubectl apply -f kafka-znode.yaml +# end::install-znode[] + +sleep 5 + +echo "Awaiting ZooKeeper rollout finish" +# tag::watch-zookeeper-rollout[] +kubectl rollout status --watch statefulset/simple-zk-server-default +# end::watch-zookeeper-rollout[] + +echo "Install KafkaCluster from kafka.yaml" +# tag::install-kafka[] +kubectl apply -f kafka.yaml +# end::install-kafka[] + +sleep 5 + +echo "Awaiting Kafka rollout finish" +# tag::watch-kafka-rollout[] +kubectl rollout status --watch statefulset/simple-kafka-broker-default +# end::watch-kafka-rollout[] + +echo "Starting port-forwarding of port 9092" +# tag::port-forwarding[] +kubectl port-forward svc/simple-kafka-broker 9092 2>&1 >/dev/null & +# end::port-forwarding[] +PORT_FORWARD_PID=$! +trap "kill $PORT_FORWARD_PID" EXIT + +sleep 5 diff --git a/docs/modules/getting_started/examples/code/kafka-znode.yaml b/docs/modules/getting_started/examples/code/kafka-znode.yaml new file mode 100644 index 00000000..5fdc3f59 --- /dev/null +++ b/docs/modules/getting_started/examples/code/kafka-znode.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: simple-kafka-znode +spec: + clusterRef: + name: simple-zk diff --git a/docs/modules/getting_started/examples/code/kafka.yaml b/docs/modules/getting_started/examples/code/kafka.yaml new file mode 100644 index 00000000..f4e5b8d1 --- /dev/null +++ b/docs/modules/getting_started/examples/code/kafka.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: kafka.stackable.tech/v1alpha1 +kind: KafkaCluster +metadata: + name: simple-kafka +spec: + version: 3.2.0-stackable0.1.0 + zookeeperConfigMapName: simple-kafka-znode + config: + tls: null + brokers: + config: + resources: + storage: + logDirs: + capacity: '2Gi' + cpu: + max: '500m' + min: '250m' + memory: + limit: '1Gi' + roleGroups: + default: + replicas: 3 diff --git a/docs/modules/getting_started/examples/code/zookeeper.yaml b/docs/modules/getting_started/examples/code/zookeeper.yaml new file mode 100644 index 00000000..a594993d --- /dev/null +++ b/docs/modules/getting_started/examples/code/zookeeper.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: simple-zk +spec: + version: 3.8.0-stackable0.7.1 + servers: + roleGroups: + default: + selector: + matchLabels: + kubernetes.io/os: linux + replicas: 3 diff --git a/docs/modules/getting_started/nav.adoc b/docs/modules/getting_started/nav.adoc new file mode 100644 index 00000000..7a35d5ba --- /dev/null +++ b/docs/modules/getting_started/nav.adoc @@ -0,0 +1,3 @@ +* xref:index.adoc[] +** xref:installation.adoc[] +** xref:first_steps.adoc[] diff --git a/docs/modules/getting_started/pages/first_steps.adoc b/docs/modules/getting_started/pages/first_steps.adoc new file mode 100644 index 00000000..bd22e5b7 --- /dev/null +++ b/docs/modules/getting_started/pages/first_steps.adoc @@ -0,0 +1,168 @@ += First steps + +After going through the xref:installation.adoc[] section and having installed all the Operators, you will now deploy a Druid cluster and it's dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by ingesting example data and subsequently query it. + +== Setup + +Three things need to be installed to have a Druid cluster: + +* A ZooKeeper instance for internal use by Druid +* An HDFS instance to be used as a backend for deep storage +* The Druid cluster itself + +We will create them in this order, each one is created by applying a manifest file. The Operators you just installed will then create the resources according to the manifest. + +=== ZooKeeper + +Create a file named `zookeeper.yaml` with the following content: + +[source,yaml] + +---- +include::example$code/zookeeper.yaml[] +---- + +Then create the resources by applying the manifest file + +[source,bash] +---- +include::example$code/getting-started.sh[tag=install-zookeeper] +---- + +=== HDFS + +Create `hdfs.yaml` with the following contents: + +[source,yaml] +---- +include::example$code/hdfs.yaml[] +---- + +And apply it: + +---- +include::example$code/getting-started.sh[tag=install-hdfs] +---- + +=== Druid + +Create a file named `druid.yaml` with the following contents: + +[source,yaml] +---- +include::example$code/druid.yaml[] +---- + +And apply it: + +---- +include::example$code/getting-started.sh[tag=install-druid] +---- + +This will create the actual druid instance. + +== Verify that it works + +Next you will submit an ingestion job and then query the ingested data - either through the web interface or the API. + +First, make sure that all the Pods in the StatefulSets are ready: + +[source,bash] +---- +kubectl get statefulset +---- + +The output should show all pods ready: + +---- +NAME READY AGE +simple-druid-broker-default 1/1 5m +simple-druid-coordinator-default 1/1 5m +simple-druid-historical-default 1/1 5m +simple-druid-middlemanager-default 1/1 5m +simple-druid-router-default 1/1 5m +simple-hdfs-datanode-default 1/1 6m +simple-hdfs-journalnode-default 1/1 6m +simple-hdfs-namenode-default 2/2 6m +simple-zk-server-default 3/3 7m +---- + +Then, create a port-forward for the Druid Router: + +---- +include::example$code/getting-started.sh[tag=port-forwarding] +---- + +=== Ingest example data + +Next, we will ingest some example data using the web interface. If you prefer to use the command line instead, follow the instructions in the collapsed section below. + + +[#ingest-cmd-line] +.Alternative: Using the command line +[%collapsible] +==== + +If you prefer to not use the web interface and instead interact with the API, create a file `ingestion_spec.json` with the following contents: + +[source,json] +include::example$code/ingestion_spec.json[] + +Submit the file with the following `curl` command: + +[source,bash] +include::example$code/getting-started.sh[tag=submit-job] + +Continue with the <<_query_the_data,next section>>. + +==== + +To open the web interface navigate your browser to https://localhost:8888/ to find the dashboard: + +image::dashboard.png[] + +Now load the example data: + +image::load_example.png[] + +Click through all pages of the load process. You can also follow the https://druid.apache.org/docs/latest/tutorials/index.html#step-4-load-data[Druid Quickstart Guide]. + +Once you finished the ingestion dialog you should see the ingestion overview with the job, which will eventually show SUCCESS: + +image::load_success.png[] + + + +=== Query the data + +Query from the user interface by navigating to the "Query" interface in the menu and query the `wikipedia` table: + +[#query-cmd-line] +.Alternative: Using the command line +[%collapsible] +==== + +To query from the commandline, create a file called `query.json` with the query: + +[source,json] +include::example$code/query.json[] + +and execute it: + +[source,bash] +include::example$code/getting-started.sh[tag=query-data] + +The result should be similar to: + +[source,json] +include::example$code/expected_query_result.json[] + +==== + +image::query.png[] + +Great! You've set up your first Druid cluster, ingested some data and queried it in the web interface! + +== What's next + +Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Operator, such as S3 backed deep storage or OPA based authorization. diff --git a/docs/modules/getting_started/pages/index.adoc b/docs/modules/getting_started/pages/index.adoc new file mode 100644 index 00000000..0f93f440 --- /dev/null +++ b/docs/modules/getting_started/pages/index.adoc @@ -0,0 +1,18 @@ += Getting started + +This guide will get you started with Apache Kafka using the Stackable Operator. It will guide you through the installation of the Operator and its dependencies, setting up your first Kafka instance and create, write to and read from a topic. + +== Prerequisites + +You will need: + +* a Kubernetes cluster +* kubectl +* optional: Helm + +== What's next + +The Guide is divided into two steps: + +* xref:installation.adoc[Installing the Operators]. +* xref:first_steps.adoc[Setting up the Kafka instance]. diff --git a/docs/modules/getting_started/pages/installation.adoc b/docs/modules/getting_started/pages/installation.adoc new file mode 100644 index 00000000..142a0911 --- /dev/null +++ b/docs/modules/getting_started/pages/installation.adoc @@ -0,0 +1,54 @@ += Installation + +On this page you will install the Stackable Operator for Apache Kafka and Operators for its dependencies - ZooKeeper - as well as the commons and secret operator which are required by all Stackable Operators. + +== Stackable Operators + +There are 2 ways to run Stackable Operators + +1. Using xref:stackablectl::index.adoc[] + +1. Using Helm + +=== stackablectl + +stackablectl is the command line tool to interact with Stackable operators and our recommended way to install Operators. +Follow the xref:stackablectl::installation.adoc[installation steps] for your platform. + +After you have installed stackablectl run the following command to install all Operators necessary for Druid: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=stackablectl-install-operators] +---- + +The tool will show + +---- +[INFO ] Installing commons operator +[INFO ] Installing secret operator +[INFO ] Installing zookeeper operator +[INFO ] Installing kafka operator +---- + +TIP: Consult the xref:stackablectl::quickstart.adoc[] to learn more about how to use stackablectl. + +=== Helm + +You can also use Helm to install the Operators. Add the Stackable Helm repository: +[source,bash] +---- +include::example$code/getting-started.sh[tag=helm-add-repo] +---- + +Then install the Stackable Operators: +[source,bash] +---- +include::example$code/getting-started.sh[tag=helm-install-operators] +---- + +Helm will deploy the Operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. + +== What's next + +xref:first_steps.adoc[Set up a Kafka cluster] and its dependencies. diff --git a/docs/templating_vars.yaml b/docs/templating_vars.yaml new file mode 100644 index 00000000..8cc93cf6 --- /dev/null +++ b/docs/templating_vars.yaml @@ -0,0 +1,4 @@ +--- +helm: + repo_name: stackable-dev + repo_url: https://repo.stackable.tech/repository/helm-dev/ From 47195f5adf418dcbae77ce70c2caee9b0ab10511 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 12:47:55 +0200 Subject: [PATCH 02/10] stackablectl and helm scripts working --- .../examples/code/getting-started.sh | 130 ++++++----------- .../examples/code/getting-started.sh.j2 | 42 +++++- .../examples/code/install-operator-output.txt | 6 + .../code/install-operator-output.txt.j2 | 6 + .../getting_started/pages/first_steps.adoc | 133 ++++++------------ docs/modules/getting_started/pages/index.adoc | 3 +- .../getting_started/pages/installation.adoc | 25 ++-- docs/templating_vars.yaml | 5 + 8 files changed, 154 insertions(+), 196 deletions(-) create mode 100644 docs/modules/getting_started/examples/code/install-operator-output.txt create mode 100644 docs/modules/getting_started/examples/code/install-operator-output.txt.j2 diff --git a/docs/modules/getting_started/examples/code/getting-started.sh b/docs/modules/getting_started/examples/code/getting-started.sh index d5edd8b9..dda343b5 100755 --- a/docs/modules/getting_started/examples/code/getting-started.sh +++ b/docs/modules/getting_started/examples/code/getting-started.sh @@ -28,8 +28,7 @@ echo "Installing Operators with Helm" helm install --wait commons-operator stackable-dev/commons-operator --version 0.3.0-nightly helm install --wait secret-operator stackable-dev/secret-operator --version 0.6.0-nightly helm install --wait zookeeper-operator stackable-dev/zookeeper-operator --version 0.11.0-nightly -helm install --wait hdfs-operator stackable-dev/hdfs-operator --version 0.5.0-nightly -helm install --wait druid-operator stackable-dev/druid-operator --version 0.7.0-nightly +helm install --wait kafka-operator stackable-dev/kafka-operator --version 0.7.0-nightly # end::helm-install-operators[] ;; "stackablectl") @@ -39,12 +38,11 @@ stackablectl operator install \ commons=0.3.0-nightly \ secret=0.6.0-nightly \ zookeeper=0.11.0-nightly \ - hdfs=0.5.0-nightly \ - druid=0.7.0-nightly + kafka=0.7.0-nightly # end::stackablectl-install-operators[] ;; *) -echo "Need to give 'helm' or 'stackablectl' as an argument for which installation method to use!" +echo "Need to provide 'helm' or 'stackablectl' as an argument for which installation method to use!" exit 1 ;; esac @@ -54,6 +52,11 @@ echo "Installing ZooKeeper from zookeeper.yaml" kubectl apply -f zookeeper.yaml # end::install-zookeeper[] +echo "Installing ZNode from kafka-znode.yaml" +# tag::install-znode[] +kubectl apply -f kafka-znode.yaml +# end::install-znode[] + sleep 5 echo "Awaiting ZooKeeper rollout finish" @@ -61,94 +64,49 @@ echo "Awaiting ZooKeeper rollout finish" kubectl rollout status --watch statefulset/simple-zk-server-default # end::watch-zookeeper-rollout[] -echo "Installing HDFS from hdfs.yaml" -# tag::install-hdfs[] -kubectl apply -f hdfs.yaml -# end::install-hdfs[] - -sleep 5 - -echo "Awaiting HDFS rollout finish" -# tag::watch-hdfs-rollout[] -kubectl rollout status --watch statefulset/simple-hdfs-datanode-default -kubectl rollout status --watch statefulset/simple-hdfs-journalnode-default -kubectl rollout status --watch statefulset/simple-hdfs-namenode-default -# end::watch-hdfs-rollout[] - -echo "Install DruidCluster from druid.yaml" -# tag::install-druid[] -kubectl apply -f druid.yaml -# end::install-druid[] +echo "Install KafkaCluster from kafka.yaml" +# tag::install-kafka[] +kubectl apply -f kafka.yaml +# end::install-kafka[] sleep 5 -echo "Awaiting Druid rollout finish" -# tag::watch-druid-rollout[] -kubectl rollout status --watch statefulset/simple-druid-broker-default -kubectl rollout status --watch statefulset/simple-druid-coordinator-default -kubectl rollout status --watch statefulset/simple-druid-historical-default -kubectl rollout status --watch statefulset/simple-druid-middlemanager-default -kubectl rollout status --watch statefulset/simple-druid-router-default -# end::watch-druid-rollout[] +echo "Awaiting Kafka rollout finish" +# tag::watch-kafka-rollout[] +kubectl rollout status --watch statefulset/simple-kafka-broker-default +# end::watch-kafka-rollout[] -echo "Starting port-forwarding of port 8888" +echo "Starting port-forwarding of port 9092" # tag::port-forwarding[] -kubectl port-forward svc/simple-druid-router 8888 2>&1 >/dev/null & +kubectl port-forward svc/simple-kafka 9092 2>&1 >/dev/null & # end::port-forwarding[] PORT_FORWARD_PID=$! trap "kill $PORT_FORWARD_PID" EXIT -sleep 5 - -submit_job() { -# tag::submit-job[] -curl -s -X 'POST' -H 'Content-Type:application/json' -d @ingestion_spec.json http://localhost:8888/druid/indexer/v1/task -# end::submit-job[] -} - -echo "Submitting job" -task_id=$(submit_job | sed -e 's/.*":"\([^"]\+\).*/\1/g') -request_job_status() { - curl -s "http://localhost:8888/druid/indexer/v1/task/${task_id}/status" | sed -e 's/.*statusCode":"\([^"]\+\).*/\1/g' -} - -while [ "$(request_job_status)" == "RUNNING" ]; do - echo "Task still running..." - sleep 5 -done - -task_status=$(request_job_status) - -if [ "$task_status" == "SUCCESS" ]; then - echo "Task finished successfully!" -else - echo "Task not successful: $task_status" - exit 1 -fi +sleep 5 -segment_load_status() { - curl -s http://localhost:8888/druid/coordinator/v1/loadstatus | sed -e 's/.*wikipedia":\([0-9\.]\+\).*/\1/g' -} - -while [ "$(segment_load_status)" != "100.0" ]; do - echo "Segments still loading..." - sleep 5 -done - -query_data() { -# tag::query-data[] -curl -s -X 'POST' -H 'Content-Type:application/json' -d @query.json http://localhost:8888/druid/v2/sql -# end::query-data[] -} - -echo "Querying data..." -query_result=$(query_data) - -if [ "$query_result" == "$(cat expected_query_result.json)" ]; then - echo "Query result is as expected!" -else - echo "Query result differs from expected result." - echo "Query: $query_result" - echo "Expected: $(cat expected_query_result.json)" - exit 1 -fi +echo "Creating test data" +# tag::kcat-create-data[] +echo "some test data" > data +# end::kcat-create-data[] + +echo "Writing test data" +# tag::kcat-write-data[] +kafkacat -b localhost:9092 -t test-data-topic -P data +# end::kcat-write-data[] + +echo "Reading test data" +# tag::kcat-read-data[] +kafkacat -b localhost:9092 -t test-data-topic -C -e > read-data +# end::kcat-read-data[] + +echo "Check contents" +# tag::kcat-check-data[] +cat read-data | grep "some test data" +# end::kcat-check-data[] + +echo "Cleanup" +# tag::kcat-cleanup-data[] +rm data +rm read-data +# end::kcat-cleanup-data[] diff --git a/docs/modules/getting_started/examples/code/getting-started.sh.j2 b/docs/modules/getting_started/examples/code/getting-started.sh.j2 index cdf1b075..1774ccbd 100755 --- a/docs/modules/getting_started/examples/code/getting-started.sh.j2 +++ b/docs/modules/getting_started/examples/code/getting-started.sh.j2 @@ -25,16 +25,20 @@ helm repo add {{ helm.repo_name }} {{ helm.repo_url }} # end::helm-add-repo[] echo "Installing Operators with Helm" # tag::helm-install-operators[] -helm install --wait commons-operator {{ helm.repo_name }}/commons-operator --devel -helm install --wait secret-operator {{ helm.repo_name }}/secret-operator --devel -helm install --wait zookeeper-operator {{ helm.repo_name }}/zookeeper-operator --devel -helm install --wait kafka-operator {{ helm.repo_name }}/kafka-operator --devel +helm install --wait commons-operator {{ helm.repo_name }}/commons-operator --version {{ versions.commons }} +helm install --wait secret-operator {{ helm.repo_name }}/secret-operator --version {{ versions.secret }} +helm install --wait zookeeper-operator {{ helm.repo_name }}/zookeeper-operator --version {{ versions.zookeeper }} +helm install --wait kafka-operator {{ helm.repo_name }}/kafka-operator --version {{ versions.kafka }} # end::helm-install-operators[] ;; "stackablectl") echo "installing Operators with stackablectl" # tag::stackablectl-install-operators[] -stackablectl operator install commons secret zookeeper kafka +stackablectl operator install \ + commons={{ versions.commons }} \ + secret={{ versions.secret }} \ + zookeeper={{ versions.zookeeper }} \ + kafka={{ versions.kafka }} # end::stackablectl-install-operators[] ;; *) @@ -74,9 +78,35 @@ kubectl rollout status --watch statefulset/simple-kafka-broker-default echo "Starting port-forwarding of port 9092" # tag::port-forwarding[] -kubectl port-forward svc/simple-kafka-broker 9092 2>&1 >/dev/null & +kubectl port-forward svc/simple-kafka 9092 2>&1 >/dev/null & # end::port-forwarding[] PORT_FORWARD_PID=$! trap "kill $PORT_FORWARD_PID" EXIT sleep 5 + +echo "Creating test data" +# tag::kcat-create-data[] +echo "some test data" > data +# end::kcat-create-data[] + +echo "Writing test data" +# tag::kcat-write-data[] +kafkacat -b localhost:9092 -t test-data-topic -P data +# end::kcat-write-data[] + +echo "Reading test data" +# tag::kcat-read-data[] +kafkacat -b localhost:9092 -t test-data-topic -C -e > read-data +# end::kcat-read-data[] + +echo "Check contents" +# tag::kcat-check-data[] +cat read-data | grep "some test data" +# end::kcat-check-data[] + +echo "Cleanup" +# tag::kcat-cleanup-data[] +rm data +rm read-data +# end::kcat-cleanup-data[] diff --git a/docs/modules/getting_started/examples/code/install-operator-output.txt b/docs/modules/getting_started/examples/code/install-operator-output.txt new file mode 100644 index 00000000..9d22d2b9 --- /dev/null +++ b/docs/modules/getting_started/examples/code/install-operator-output.txt @@ -0,0 +1,6 @@ +# tag::stackablectl-install-operators-output[] +[INFO ] Installing commons operator in version 0.3.0-nightly +[INFO ] Installing secret operator in version 0.6.0-nightly +[INFO ] Installing zookeeper operator in version 0.11.0-nightly +[INFO ] Installing kafka operator in version 0.7.0-nightly +# end::stackablectl-install-operators-output[] diff --git a/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 b/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 new file mode 100644 index 00000000..9a56a44c --- /dev/null +++ b/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 @@ -0,0 +1,6 @@ +# tag::stackablectl-install-operators-output[] +[INFO ] Installing commons operator in version {{ versions.commons }} +[INFO ] Installing secret operator in version {{ versions.secret }} +[INFO ] Installing zookeeper operator in version {{ versions.zookeeper }} +[INFO ] Installing kafka operator in version {{ versions.kafka }} +# end::stackablectl-install-operators-output[] diff --git a/docs/modules/getting_started/pages/first_steps.adoc b/docs/modules/getting_started/pages/first_steps.adoc index bd22e5b7..58f68f90 100644 --- a/docs/modules/getting_started/pages/first_steps.adoc +++ b/docs/modules/getting_started/pages/first_steps.adoc @@ -1,16 +1,15 @@ = First steps -After going through the xref:installation.adoc[] section and having installed all the Operators, you will now deploy a Druid cluster and it's dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by ingesting example data and subsequently query it. +After going through the xref:installation.adoc[] section and having installed all the operators, you will now deploy a Kafka cluster and it's dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by ingesting example data and subsequently query it. == Setup -Three things need to be installed to have a Druid cluster: +Two things need to be installed to create a Kafka cluster: -* A ZooKeeper instance for internal use by Druid -* An HDFS instance to be used as a backend for deep storage -* The Druid cluster itself +* A ZooKeeper instance for internal use by Kafka +* The Kafka cluster itself -We will create them in this order, each one is created by applying a manifest file. The Operators you just installed will then create the resources according to the manifest. +We will create them in this order, each one is created by applying a manifest file. The operators you just installed will then create the resources according to the manifest. === ZooKeeper @@ -22,48 +21,47 @@ Create a file named `zookeeper.yaml` with the following content: include::example$code/zookeeper.yaml[] ---- -Then create the resources by applying the manifest file +and apply it: [source,bash] ---- include::example$code/getting-started.sh[tag=install-zookeeper] ---- -=== HDFS +Create a file `kafka-znode.yaml` with the following content: -Create `hdfs.yaml` with the following contents: - -[source,yaml] +[source,bash] ---- -include::example$code/hdfs.yaml[] +include::example$code/kafka-znode.yaml[] ---- -And apply it: +and apply it: +[source,bash] ---- -include::example$code/getting-started.sh[tag=install-hdfs] +include::example$code/getting-started.sh[tag=install-znode] ---- -=== Druid +=== Kafka -Create a file named `druid.yaml` with the following contents: +Create a file named `kafka.yaml` with the following contents: [source,yaml] ---- -include::example$code/druid.yaml[] +include::example$code/kafka.yaml[] ---- -And apply it: +and apply it: ---- -include::example$code/getting-started.sh[tag=install-druid] +include::example$code/getting-started.sh[tag=install-kafka] ---- -This will create the actual druid instance. +This will create the actual Kafka instance. == Verify that it works -Next you will submit an ingestion job and then query the ingested data - either through the web interface or the API. +Next you will produce data into a topic and read it via https://github.com/edenhill/kcat#install[kcat]. Depending on your platform you may need to replace `kafkacat` in the commands below with `kcat`. First, make sure that all the Pods in the StatefulSets are ready: @@ -76,93 +74,48 @@ The output should show all pods ready: ---- NAME READY AGE -simple-druid-broker-default 1/1 5m -simple-druid-coordinator-default 1/1 5m -simple-druid-historical-default 1/1 5m -simple-druid-middlemanager-default 1/1 5m -simple-druid-router-default 1/1 5m -simple-hdfs-datanode-default 1/1 6m -simple-hdfs-journalnode-default 1/1 6m -simple-hdfs-namenode-default 2/2 6m +simple-kafka-broker-default 3/3 5m simple-zk-server-default 3/3 7m ---- -Then, create a port-forward for the Druid Router: +Then, create a port-forward for the Kafka Broker: ---- include::example$code/getting-started.sh[tag=port-forwarding] ---- -=== Ingest example data - -Next, we will ingest some example data using the web interface. If you prefer to use the command line instead, follow the instructions in the collapsed section below. - - -[#ingest-cmd-line] -.Alternative: Using the command line -[%collapsible] -==== - -If you prefer to not use the web interface and instead interact with the API, create a file `ingestion_spec.json` with the following contents: - -[source,json] -include::example$code/ingestion_spec.json[] - -Submit the file with the following `curl` command: - -[source,bash] -include::example$code/getting-started.sh[tag=submit-job] - -Continue with the <<_query_the_data,next section>>. - -==== - -To open the web interface navigate your browser to https://localhost:8888/ to find the dashboard: - -image::dashboard.png[] - -Now load the example data: - -image::load_example.png[] +Create a file containing some data: -Click through all pages of the load process. You can also follow the https://druid.apache.org/docs/latest/tutorials/index.html#step-4-load-data[Druid Quickstart Guide]. - -Once you finished the ingestion dialog you should see the ingestion overview with the job, which will eventually show SUCCESS: - -image::load_success.png[] - - - -=== Query the data - -Query from the user interface by navigating to the "Query" interface in the menu and query the `wikipedia` table: - -[#query-cmd-line] -.Alternative: Using the command line -[%collapsible] -==== +---- +include::example$code/getting-started.sh[tag=kcat-create-data] +---- -To query from the commandline, create a file called `query.json` with the query: +Write that data: -[source,json] -include::example$code/query.json[] +---- +include::example$code/getting-started.sh[tag=kcat-write-data] +---- -and execute it: +Read that data: -[source,bash] -include::example$code/getting-started.sh[tag=query-data] +---- +include::example$code/getting-started.sh[tag=kcat-read-data] +---- -The result should be similar to: +Check the content: -[source,json] -include::example$code/expected_query_result.json[] +---- +include::example$code/getting-started.sh[tag=kcat-check-data] +---- -==== +And clean up: -image::query.png[] +---- +include::example$code/getting-started.sh[tag=kcat-cleanup-data] +---- -Great! You've set up your first Druid cluster, ingested some data and queried it in the web interface! +You successfully created a Kafka cluster and produced and consumed data. == What's next -Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Operator, such as S3 backed deep storage or OPA based authorization. +Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Operator such as encryption and authentication or how to connect to OPA. diff --git a/docs/modules/getting_started/pages/index.adoc b/docs/modules/getting_started/pages/index.adoc index 0f93f440..c00b1d4c 100644 --- a/docs/modules/getting_started/pages/index.adoc +++ b/docs/modules/getting_started/pages/index.adoc @@ -8,7 +8,8 @@ You will need: * a Kubernetes cluster * kubectl -* optional: Helm +* Helm +* https://github.com/edenhill/kcat#install[kcat] for testing == What's next diff --git a/docs/modules/getting_started/pages/installation.adoc b/docs/modules/getting_started/pages/installation.adoc index 142a0911..35be85f3 100644 --- a/docs/modules/getting_started/pages/installation.adoc +++ b/docs/modules/getting_started/pages/installation.adoc @@ -1,21 +1,20 @@ = Installation -On this page you will install the Stackable Operator for Apache Kafka and Operators for its dependencies - ZooKeeper - as well as the commons and secret operator which are required by all Stackable Operators. +On this page you will install the Stackable Operator for Apache Kafka and operators for its dependencies - ZooKeeper - as well as the commons and secret operator which are required by all Stackable Operators. == Stackable Operators -There are 2 ways to run Stackable Operators +There are 2 ways to install Stackable Operators: -1. Using xref:stackablectl::index.adoc[] +1. Using xref:stackablectl::index.adoc[_stackablectl_] -1. Using Helm +2. Using Helm === stackablectl -stackablectl is the command line tool to interact with Stackable operators and our recommended way to install Operators. -Follow the xref:stackablectl::installation.adoc[installation steps] for your platform. +The _stackablectl_ command line tool is the recommended way to interact with operators and dependencies. Follow the xref:stackablectl::installation.adoc[installation steps] for your platform if you choose to work with _stackablectl_. -After you have installed stackablectl run the following command to install all Operators necessary for Druid: +After you have installed stackablectl, run the following command to install all operators necessary for Kafka: [source,bash] ---- @@ -24,30 +23,30 @@ include::example$code/getting-started.sh[tag=stackablectl-install-operators] The tool will show +[source,bash] ---- -[INFO ] Installing commons operator -[INFO ] Installing secret operator -[INFO ] Installing zookeeper operator -[INFO ] Installing kafka operator +include::example$code/install-operator-output.txt[tag=stackablectl-install-operators-output] ---- TIP: Consult the xref:stackablectl::quickstart.adoc[] to learn more about how to use stackablectl. === Helm -You can also use Helm to install the Operators. Add the Stackable Helm repository: +You can also use Helm to install the operators. Add the Stackable Helm repository: + [source,bash] ---- include::example$code/getting-started.sh[tag=helm-add-repo] ---- Then install the Stackable Operators: + [source,bash] ---- include::example$code/getting-started.sh[tag=helm-install-operators] ---- -Helm will deploy the Operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. +Helm will deploy the operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. == What's next diff --git a/docs/templating_vars.yaml b/docs/templating_vars.yaml index 8cc93cf6..e65915ad 100644 --- a/docs/templating_vars.yaml +++ b/docs/templating_vars.yaml @@ -2,3 +2,8 @@ helm: repo_name: stackable-dev repo_url: https://repo.stackable.tech/repository/helm-dev/ +versions: + commons: 0.3.0-nightly + secret: 0.6.0-nightly + zookeeper: 0.11.0-nightly + kafka: 0.7.0-nightly From 588c7920a96ee36b9ebb254efd9754838acef230 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 12:51:32 +0200 Subject: [PATCH 03/10] fixed ingestion data to produce --- docs/modules/getting_started/pages/first_steps.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/getting_started/pages/first_steps.adoc b/docs/modules/getting_started/pages/first_steps.adoc index 58f68f90..76e4042d 100644 --- a/docs/modules/getting_started/pages/first_steps.adoc +++ b/docs/modules/getting_started/pages/first_steps.adoc @@ -1,6 +1,6 @@ = First steps -After going through the xref:installation.adoc[] section and having installed all the operators, you will now deploy a Kafka cluster and it's dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by ingesting example data and subsequently query it. +After going through the xref:installation.adoc[] section and having installed all the operators, you will now deploy a Kafka cluster and the required dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by producing test data into a topic and consuming it. == Setup From d6c0f13a62234c54fbc792492b288d0c16e04aa3 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 13:35:39 +0200 Subject: [PATCH 04/10] fixed clippy --- rust/crd/src/lib.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 38b95e35..c0b23fab 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -123,7 +123,7 @@ pub struct KafkaClusterSpec { pub stopped: Option, } -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct GlobalKafkaConfig { /// Only affects client connections. This setting controls: @@ -162,13 +162,13 @@ impl Default for GlobalKafkaConfig { } } -#[derive(Clone, Default, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Default, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct ClientAuthenticationClass { pub authentication_class: String, } -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct TlsSecretClass { pub secret_class: String, From cb7d461973b8ad49c9f13b566d743d122b39214f Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 13:35:49 +0200 Subject: [PATCH 05/10] _stackablectl_ -> stackablectl --- docs/modules/getting_started/pages/installation.adoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/modules/getting_started/pages/installation.adoc b/docs/modules/getting_started/pages/installation.adoc index 35be85f3..828f00c1 100644 --- a/docs/modules/getting_started/pages/installation.adoc +++ b/docs/modules/getting_started/pages/installation.adoc @@ -6,13 +6,13 @@ On this page you will install the Stackable Operator for Apache Kafka and operat There are 2 ways to install Stackable Operators: -1. Using xref:stackablectl::index.adoc[_stackablectl_] +1. Using xref:stackablectl::index.adoc[stackablectl] 2. Using Helm === stackablectl -The _stackablectl_ command line tool is the recommended way to interact with operators and dependencies. Follow the xref:stackablectl::installation.adoc[installation steps] for your platform if you choose to work with _stackablectl_. +The stackablectl command line tool is the recommended way to interact with operators and dependencies. Follow the xref:stackablectl::installation.adoc[installation steps] for your platform if you choose to work with stackablectl. After you have installed stackablectl, run the following command to install all operators necessary for Kafka: From 08b45aeb306fbcfab60bc6528369806565d48141 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 15:57:19 +0200 Subject: [PATCH 06/10] removed old installation.adoc --- docs/modules/ROOT/nav.adoc | 1 - docs/modules/ROOT/pages/installation.adoc | 60 ----------------------- 2 files changed, 61 deletions(-) delete mode 100644 docs/modules/ROOT/pages/installation.adoc diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index f6349474..3e62b35a 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,5 +1,4 @@ * xref:dependencies.adoc[] -* xref:installation.adoc[] * xref:configuration.adoc[] * xref:usage.adoc[] * Concepts diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc deleted file mode 100644 index 75b20e46..00000000 --- a/docs/modules/ROOT/pages/installation.adoc +++ /dev/null @@ -1,60 +0,0 @@ -= Installation - -There are three ways to run the Kafka Operator: - -1. Helm managed Docker container deployment on Kubernetes - -2. As a Docker container - -3. Build from source. - - -== Helm -Helm allows you to download and deploy Stackable operators on Kubernetes and is by far the easiest installation method. First ensure that you have installed the Stackable Operators Helm repository: -[source,bash] ----- -$ helm repo add stackable https://repo.stackable.tech/repository/helm-dev/ ----- -Then install the Stackable Operator for Apache Kafka -[source,bash] ----- -$ helm install kafka-operator stackable/kafka-operator ----- - -Helm will deploy the operator in a Kubernetes container and apply the CRDs for the Apache Kafka service. You're now ready to deploy Apache Kafka in Kubernetes. - -== Docker - -This Operator is published as a Docker image: - -[source] ----- -docker.stackable.tech/stackable/kafka-operator ----- - -When installing manually with Docker you will need to install the Stackable CRDs for Apache Kafka in your Kubernetes environment. These are available on the -https://github.com/stackabletech/kafka-operator/tree/main/deploy/crd[Stackable GitHub repository] for this operator. -[source] ----- -$ sudo kubectl apply -f kafkacluster.crd.yaml ----- - -To run it straight from Docker you can use this command: -[source,bash] ----- -docker run \ - --name kafka-operator \ - --network host \ - --env KUBECONFIG=/home/stackable/.kube/config \ - --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \ - docker.stackable.tech/stackable/kafka-operator:latest ----- - -== Building the operator from source - -This operator is written in Rust and is developed against the latest stable Rust release (1.56 at the time of writing). - -[source] ----- -cargo build ----- From c97439225c2bba94b33c524306d3d69f41e73688 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 15:57:33 +0200 Subject: [PATCH 07/10] removed simple example that is covered in getting started guide --- examples/simple-kafka-cluster.yaml | 45 ------------------------------ 1 file changed, 45 deletions(-) delete mode 100644 examples/simple-kafka-cluster.yaml diff --git a/examples/simple-kafka-cluster.yaml b/examples/simple-kafka-cluster.yaml deleted file mode 100644 index 744a6560..00000000 --- a/examples/simple-kafka-cluster.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: zookeeper.stackable.tech/v1alpha1 -kind: ZookeeperCluster -metadata: - name: simple-zk -spec: - version: 3.8.0-stackable0.7.1 - servers: - roleGroups: - default: - selector: - matchLabels: - kubernetes.io/os: linux - replicas: 3 - config: {} ---- -apiVersion: zookeeper.stackable.tech/v1alpha1 -kind: ZookeeperZnode -metadata: - name: simple-kafka-znode -spec: - clusterRef: - name: simple-zk ---- -apiVersion: kafka.stackable.tech/v1alpha1 -kind: KafkaCluster -metadata: - name: simple-kafka -spec: - version: 3.2.0-stackable0.1.0 - zookeeperConfigMapName: simple-kafka-znode - brokers: - config: - resources: - storage: - logDirs: - capacity: '2Gi' - cpu: - max: '500m' - min: '250m' - memory: - limit: '1Gi' - roleGroups: - default: - replicas: 3 From 7e4ded78a6fd52f01bf6a9efdd386e49d3736806 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 16:41:17 +0200 Subject: [PATCH 08/10] Apply suggestions from code review Co-authored-by: Felix Hennig --- docs/modules/getting_started/examples/code/zookeeper.yaml | 3 --- docs/modules/getting_started/pages/first_steps.adoc | 2 +- docs/modules/getting_started/pages/index.adoc | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/docs/modules/getting_started/examples/code/zookeeper.yaml b/docs/modules/getting_started/examples/code/zookeeper.yaml index a594993d..ba0be48c 100644 --- a/docs/modules/getting_started/examples/code/zookeeper.yaml +++ b/docs/modules/getting_started/examples/code/zookeeper.yaml @@ -8,7 +8,4 @@ spec: servers: roleGroups: default: - selector: - matchLabels: - kubernetes.io/os: linux replicas: 3 diff --git a/docs/modules/getting_started/pages/first_steps.adoc b/docs/modules/getting_started/pages/first_steps.adoc index 76e4042d..22efb1fc 100644 --- a/docs/modules/getting_started/pages/first_steps.adoc +++ b/docs/modules/getting_started/pages/first_steps.adoc @@ -118,4 +118,4 @@ You successfully created a Kafka cluster and produced and consumed data. == What's next -Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Operator such as encryption and authentication or how to connect to OPA. +Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Kafka Operator. diff --git a/docs/modules/getting_started/pages/index.adoc b/docs/modules/getting_started/pages/index.adoc index c00b1d4c..37cc2016 100644 --- a/docs/modules/getting_started/pages/index.adoc +++ b/docs/modules/getting_started/pages/index.adoc @@ -8,7 +8,7 @@ You will need: * a Kubernetes cluster * kubectl -* Helm +* optional: Helm * https://github.com/edenhill/kcat#install[kcat] for testing == What's next From c601dc33442831a56fe2b9abd17751256d7e50cf Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 16:42:01 +0200 Subject: [PATCH 09/10] removed resources from example --- docs/modules/getting_started/examples/code/kafka.yaml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/docs/modules/getting_started/examples/code/kafka.yaml b/docs/modules/getting_started/examples/code/kafka.yaml index f4e5b8d1..61662476 100644 --- a/docs/modules/getting_started/examples/code/kafka.yaml +++ b/docs/modules/getting_started/examples/code/kafka.yaml @@ -9,16 +9,6 @@ spec: config: tls: null brokers: - config: - resources: - storage: - logDirs: - capacity: '2Gi' - cpu: - max: '500m' - min: '250m' - memory: - limit: '1Gi' roleGroups: default: replicas: 3 From 877386e1903b444e1c0ef8ba5bfc7e37ab5b2472 Mon Sep 17 00:00:00 2001 From: Malte Sander Date: Thu, 18 Aug 2022 17:20:51 +0200 Subject: [PATCH 10/10] Update docs/modules/getting_started/pages/installation.adoc Co-authored-by: Felix Hennig --- docs/modules/getting_started/pages/installation.adoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/modules/getting_started/pages/installation.adoc b/docs/modules/getting_started/pages/installation.adoc index 828f00c1..a0085b43 100644 --- a/docs/modules/getting_started/pages/installation.adoc +++ b/docs/modules/getting_started/pages/installation.adoc @@ -23,7 +23,7 @@ include::example$code/getting-started.sh[tag=stackablectl-install-operators] The tool will show -[source,bash] +[source] ---- include::example$code/install-operator-output.txt[tag=stackablectl-install-operators-output] ----