diff --git a/docs/antora.yml b/docs/antora.yml index f169cad4..ef527255 100644 --- a/docs/antora.yml +++ b/docs/antora.yml @@ -2,5 +2,6 @@ name: kafka version: "nightly" title: Stackable Operator for Apache Kafka nav: + - modules/getting_started/nav.adoc - modules/ROOT/nav.adoc prerelease: true diff --git a/docs/modules/ROOT/nav.adoc b/docs/modules/ROOT/nav.adoc index f6349474..3e62b35a 100644 --- a/docs/modules/ROOT/nav.adoc +++ b/docs/modules/ROOT/nav.adoc @@ -1,5 +1,4 @@ * xref:dependencies.adoc[] -* xref:installation.adoc[] * xref:configuration.adoc[] * xref:usage.adoc[] * Concepts diff --git a/docs/modules/ROOT/pages/installation.adoc b/docs/modules/ROOT/pages/installation.adoc deleted file mode 100644 index 75b20e46..00000000 --- a/docs/modules/ROOT/pages/installation.adoc +++ /dev/null @@ -1,60 +0,0 @@ -= Installation - -There are three ways to run the Kafka Operator: - -1. Helm managed Docker container deployment on Kubernetes - -2. As a Docker container - -3. Build from source. - - -== Helm -Helm allows you to download and deploy Stackable operators on Kubernetes and is by far the easiest installation method. First ensure that you have installed the Stackable Operators Helm repository: -[source,bash] ----- -$ helm repo add stackable https://repo.stackable.tech/repository/helm-dev/ ----- -Then install the Stackable Operator for Apache Kafka -[source,bash] ----- -$ helm install kafka-operator stackable/kafka-operator ----- - -Helm will deploy the operator in a Kubernetes container and apply the CRDs for the Apache Kafka service. You're now ready to deploy Apache Kafka in Kubernetes. - -== Docker - -This Operator is published as a Docker image: - -[source] ----- -docker.stackable.tech/stackable/kafka-operator ----- - -When installing manually with Docker you will need to install the Stackable CRDs for Apache Kafka in your Kubernetes environment. These are available on the -https://github.com/stackabletech/kafka-operator/tree/main/deploy/crd[Stackable GitHub repository] for this operator. -[source] ----- -$ sudo kubectl apply -f kafkacluster.crd.yaml ----- - -To run it straight from Docker you can use this command: -[source,bash] ----- -docker run \ - --name kafka-operator \ - --network host \ - --env KUBECONFIG=/home/stackable/.kube/config \ - --mount type=bind,source="$HOME/.kube/config",target="/home/stackable/.kube/config" \ - docker.stackable.tech/stackable/kafka-operator:latest ----- - -== Building the operator from source - -This operator is written in Rust and is developed against the latest stable Rust release (1.56 at the time of writing). - -[source] ----- -cargo build ----- diff --git a/docs/modules/getting_started/examples/code/getting-started.sh b/docs/modules/getting_started/examples/code/getting-started.sh new file mode 100755 index 00000000..dda343b5 --- /dev/null +++ b/docs/modules/getting_started/examples/code/getting-started.sh @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +# The getting started guide script +# It uses tagged regions which are included in the documentation +# https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/ +# +# There are two variants to go through the guide - using stackablectl or helm +# The script takes either 'stackablectl' or 'helm' as an argument +# +# The script can be run as a test as well, to make sure that the tutorial works +# It includes some assertions throughout, and at the end especially. + +if [ $# -eq 0 ] +then + echo "Installation method argument ('helm' or 'stackablectl') required." + exit 1 +fi + +case "$1" in +"helm") +echo "Adding 'stackable-dev' Helm Chart repository" +# tag::helm-add-repo[] +helm repo add stackable-dev https://repo.stackable.tech/repository/helm-dev/ +# end::helm-add-repo[] +echo "Installing Operators with Helm" +# tag::helm-install-operators[] +helm install --wait commons-operator stackable-dev/commons-operator --version 0.3.0-nightly +helm install --wait secret-operator stackable-dev/secret-operator --version 0.6.0-nightly +helm install --wait zookeeper-operator stackable-dev/zookeeper-operator --version 0.11.0-nightly +helm install --wait kafka-operator stackable-dev/kafka-operator --version 0.7.0-nightly +# end::helm-install-operators[] +;; +"stackablectl") +echo "installing Operators with stackablectl" +# tag::stackablectl-install-operators[] +stackablectl operator install \ + commons=0.3.0-nightly \ + secret=0.6.0-nightly \ + zookeeper=0.11.0-nightly \ + kafka=0.7.0-nightly +# end::stackablectl-install-operators[] +;; +*) +echo "Need to provide 'helm' or 'stackablectl' as an argument for which installation method to use!" +exit 1 +;; +esac + +echo "Installing ZooKeeper from zookeeper.yaml" +# tag::install-zookeeper[] +kubectl apply -f zookeeper.yaml +# end::install-zookeeper[] + +echo "Installing ZNode from kafka-znode.yaml" +# tag::install-znode[] +kubectl apply -f kafka-znode.yaml +# end::install-znode[] + +sleep 5 + +echo "Awaiting ZooKeeper rollout finish" +# tag::watch-zookeeper-rollout[] +kubectl rollout status --watch statefulset/simple-zk-server-default +# end::watch-zookeeper-rollout[] + +echo "Install KafkaCluster from kafka.yaml" +# tag::install-kafka[] +kubectl apply -f kafka.yaml +# end::install-kafka[] + +sleep 5 + +echo "Awaiting Kafka rollout finish" +# tag::watch-kafka-rollout[] +kubectl rollout status --watch statefulset/simple-kafka-broker-default +# end::watch-kafka-rollout[] + +echo "Starting port-forwarding of port 9092" +# tag::port-forwarding[] +kubectl port-forward svc/simple-kafka 9092 2>&1 >/dev/null & +# end::port-forwarding[] +PORT_FORWARD_PID=$! +trap "kill $PORT_FORWARD_PID" EXIT + +sleep 5 + +echo "Creating test data" +# tag::kcat-create-data[] +echo "some test data" > data +# end::kcat-create-data[] + +echo "Writing test data" +# tag::kcat-write-data[] +kafkacat -b localhost:9092 -t test-data-topic -P data +# end::kcat-write-data[] + +echo "Reading test data" +# tag::kcat-read-data[] +kafkacat -b localhost:9092 -t test-data-topic -C -e > read-data +# end::kcat-read-data[] + +echo "Check contents" +# tag::kcat-check-data[] +cat read-data | grep "some test data" +# end::kcat-check-data[] + +echo "Cleanup" +# tag::kcat-cleanup-data[] +rm data +rm read-data +# end::kcat-cleanup-data[] diff --git a/docs/modules/getting_started/examples/code/getting-started.sh.j2 b/docs/modules/getting_started/examples/code/getting-started.sh.j2 new file mode 100755 index 00000000..1774ccbd --- /dev/null +++ b/docs/modules/getting_started/examples/code/getting-started.sh.j2 @@ -0,0 +1,112 @@ +#!/usr/bin/env bash +set -euo pipefail + +# The getting started guide script +# It uses tagged regions which are included in the documentation +# https://docs.asciidoctor.org/asciidoc/latest/directives/include-tagged-regions/ +# +# There are two variants to go through the guide - using stackablectl or helm +# The script takes either 'stackablectl' or 'helm' as an argument +# +# The script can be run as a test as well, to make sure that the tutorial works +# It includes some assertions throughout, and at the end especially. + +if [ $# -eq 0 ] +then + echo "Installation method argument ('helm' or 'stackablectl') required." + exit 1 +fi + +case "$1" in +"helm") +echo "Adding '{{ helm.repo_name }}' Helm Chart repository" +# tag::helm-add-repo[] +helm repo add {{ helm.repo_name }} {{ helm.repo_url }} +# end::helm-add-repo[] +echo "Installing Operators with Helm" +# tag::helm-install-operators[] +helm install --wait commons-operator {{ helm.repo_name }}/commons-operator --version {{ versions.commons }} +helm install --wait secret-operator {{ helm.repo_name }}/secret-operator --version {{ versions.secret }} +helm install --wait zookeeper-operator {{ helm.repo_name }}/zookeeper-operator --version {{ versions.zookeeper }} +helm install --wait kafka-operator {{ helm.repo_name }}/kafka-operator --version {{ versions.kafka }} +# end::helm-install-operators[] +;; +"stackablectl") +echo "installing Operators with stackablectl" +# tag::stackablectl-install-operators[] +stackablectl operator install \ + commons={{ versions.commons }} \ + secret={{ versions.secret }} \ + zookeeper={{ versions.zookeeper }} \ + kafka={{ versions.kafka }} +# end::stackablectl-install-operators[] +;; +*) +echo "Need to provide 'helm' or 'stackablectl' as an argument for which installation method to use!" +exit 1 +;; +esac + +echo "Installing ZooKeeper from zookeeper.yaml" +# tag::install-zookeeper[] +kubectl apply -f zookeeper.yaml +# end::install-zookeeper[] + +echo "Installing ZNode from kafka-znode.yaml" +# tag::install-znode[] +kubectl apply -f kafka-znode.yaml +# end::install-znode[] + +sleep 5 + +echo "Awaiting ZooKeeper rollout finish" +# tag::watch-zookeeper-rollout[] +kubectl rollout status --watch statefulset/simple-zk-server-default +# end::watch-zookeeper-rollout[] + +echo "Install KafkaCluster from kafka.yaml" +# tag::install-kafka[] +kubectl apply -f kafka.yaml +# end::install-kafka[] + +sleep 5 + +echo "Awaiting Kafka rollout finish" +# tag::watch-kafka-rollout[] +kubectl rollout status --watch statefulset/simple-kafka-broker-default +# end::watch-kafka-rollout[] + +echo "Starting port-forwarding of port 9092" +# tag::port-forwarding[] +kubectl port-forward svc/simple-kafka 9092 2>&1 >/dev/null & +# end::port-forwarding[] +PORT_FORWARD_PID=$! +trap "kill $PORT_FORWARD_PID" EXIT + +sleep 5 + +echo "Creating test data" +# tag::kcat-create-data[] +echo "some test data" > data +# end::kcat-create-data[] + +echo "Writing test data" +# tag::kcat-write-data[] +kafkacat -b localhost:9092 -t test-data-topic -P data +# end::kcat-write-data[] + +echo "Reading test data" +# tag::kcat-read-data[] +kafkacat -b localhost:9092 -t test-data-topic -C -e > read-data +# end::kcat-read-data[] + +echo "Check contents" +# tag::kcat-check-data[] +cat read-data | grep "some test data" +# end::kcat-check-data[] + +echo "Cleanup" +# tag::kcat-cleanup-data[] +rm data +rm read-data +# end::kcat-cleanup-data[] diff --git a/docs/modules/getting_started/examples/code/install-operator-output.txt b/docs/modules/getting_started/examples/code/install-operator-output.txt new file mode 100644 index 00000000..9d22d2b9 --- /dev/null +++ b/docs/modules/getting_started/examples/code/install-operator-output.txt @@ -0,0 +1,6 @@ +# tag::stackablectl-install-operators-output[] +[INFO ] Installing commons operator in version 0.3.0-nightly +[INFO ] Installing secret operator in version 0.6.0-nightly +[INFO ] Installing zookeeper operator in version 0.11.0-nightly +[INFO ] Installing kafka operator in version 0.7.0-nightly +# end::stackablectl-install-operators-output[] diff --git a/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 b/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 new file mode 100644 index 00000000..9a56a44c --- /dev/null +++ b/docs/modules/getting_started/examples/code/install-operator-output.txt.j2 @@ -0,0 +1,6 @@ +# tag::stackablectl-install-operators-output[] +[INFO ] Installing commons operator in version {{ versions.commons }} +[INFO ] Installing secret operator in version {{ versions.secret }} +[INFO ] Installing zookeeper operator in version {{ versions.zookeeper }} +[INFO ] Installing kafka operator in version {{ versions.kafka }} +# end::stackablectl-install-operators-output[] diff --git a/docs/modules/getting_started/examples/code/kafka-znode.yaml b/docs/modules/getting_started/examples/code/kafka-znode.yaml new file mode 100644 index 00000000..5fdc3f59 --- /dev/null +++ b/docs/modules/getting_started/examples/code/kafka-znode.yaml @@ -0,0 +1,8 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperZnode +metadata: + name: simple-kafka-znode +spec: + clusterRef: + name: simple-zk diff --git a/docs/modules/getting_started/examples/code/kafka.yaml b/docs/modules/getting_started/examples/code/kafka.yaml new file mode 100644 index 00000000..61662476 --- /dev/null +++ b/docs/modules/getting_started/examples/code/kafka.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: kafka.stackable.tech/v1alpha1 +kind: KafkaCluster +metadata: + name: simple-kafka +spec: + version: 3.2.0-stackable0.1.0 + zookeeperConfigMapName: simple-kafka-znode + config: + tls: null + brokers: + roleGroups: + default: + replicas: 3 diff --git a/docs/modules/getting_started/examples/code/zookeeper.yaml b/docs/modules/getting_started/examples/code/zookeeper.yaml new file mode 100644 index 00000000..ba0be48c --- /dev/null +++ b/docs/modules/getting_started/examples/code/zookeeper.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: zookeeper.stackable.tech/v1alpha1 +kind: ZookeeperCluster +metadata: + name: simple-zk +spec: + version: 3.8.0-stackable0.7.1 + servers: + roleGroups: + default: + replicas: 3 diff --git a/docs/modules/getting_started/nav.adoc b/docs/modules/getting_started/nav.adoc new file mode 100644 index 00000000..7a35d5ba --- /dev/null +++ b/docs/modules/getting_started/nav.adoc @@ -0,0 +1,3 @@ +* xref:index.adoc[] +** xref:installation.adoc[] +** xref:first_steps.adoc[] diff --git a/docs/modules/getting_started/pages/first_steps.adoc b/docs/modules/getting_started/pages/first_steps.adoc new file mode 100644 index 00000000..22efb1fc --- /dev/null +++ b/docs/modules/getting_started/pages/first_steps.adoc @@ -0,0 +1,121 @@ += First steps + +After going through the xref:installation.adoc[] section and having installed all the operators, you will now deploy a Kafka cluster and the required dependencies. Afterwards you can <<_verify_that_it_works, verify that it works>> by producing test data into a topic and consuming it. + +== Setup + +Two things need to be installed to create a Kafka cluster: + +* A ZooKeeper instance for internal use by Kafka +* The Kafka cluster itself + +We will create them in this order, each one is created by applying a manifest file. The operators you just installed will then create the resources according to the manifest. + +=== ZooKeeper + +Create a file named `zookeeper.yaml` with the following content: + +[source,yaml] + +---- +include::example$code/zookeeper.yaml[] +---- + +and apply it: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=install-zookeeper] +---- + +Create a file `kafka-znode.yaml` with the following content: + +[source,bash] +---- +include::example$code/kafka-znode.yaml[] +---- + +and apply it: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=install-znode] +---- + +=== Kafka + +Create a file named `kafka.yaml` with the following contents: + +[source,yaml] +---- +include::example$code/kafka.yaml[] +---- + +and apply it: + +---- +include::example$code/getting-started.sh[tag=install-kafka] +---- + +This will create the actual Kafka instance. + +== Verify that it works + +Next you will produce data into a topic and read it via https://github.com/edenhill/kcat#install[kcat]. Depending on your platform you may need to replace `kafkacat` in the commands below with `kcat`. + +First, make sure that all the Pods in the StatefulSets are ready: + +[source,bash] +---- +kubectl get statefulset +---- + +The output should show all pods ready: + +---- +NAME READY AGE +simple-kafka-broker-default 3/3 5m +simple-zk-server-default 3/3 7m +---- + +Then, create a port-forward for the Kafka Broker: + +---- +include::example$code/getting-started.sh[tag=port-forwarding] +---- + +Create a file containing some data: + +---- +include::example$code/getting-started.sh[tag=kcat-create-data] +---- + +Write that data: + +---- +include::example$code/getting-started.sh[tag=kcat-write-data] +---- + +Read that data: + +---- +include::example$code/getting-started.sh[tag=kcat-read-data] +---- + +Check the content: + +---- +include::example$code/getting-started.sh[tag=kcat-check-data] +---- + +And clean up: + +---- +include::example$code/getting-started.sh[tag=kcat-cleanup-data] +---- + +You successfully created a Kafka cluster and produced and consumed data. + +== What's next + +Have a look at the xref:ROOT:usage.adoc[] page to find out more about the features of the Kafka Operator. diff --git a/docs/modules/getting_started/pages/index.adoc b/docs/modules/getting_started/pages/index.adoc new file mode 100644 index 00000000..37cc2016 --- /dev/null +++ b/docs/modules/getting_started/pages/index.adoc @@ -0,0 +1,19 @@ += Getting started + +This guide will get you started with Apache Kafka using the Stackable Operator. It will guide you through the installation of the Operator and its dependencies, setting up your first Kafka instance and create, write to and read from a topic. + +== Prerequisites + +You will need: + +* a Kubernetes cluster +* kubectl +* optional: Helm +* https://github.com/edenhill/kcat#install[kcat] for testing + +== What's next + +The Guide is divided into two steps: + +* xref:installation.adoc[Installing the Operators]. +* xref:first_steps.adoc[Setting up the Kafka instance]. diff --git a/docs/modules/getting_started/pages/installation.adoc b/docs/modules/getting_started/pages/installation.adoc new file mode 100644 index 00000000..a0085b43 --- /dev/null +++ b/docs/modules/getting_started/pages/installation.adoc @@ -0,0 +1,53 @@ += Installation + +On this page you will install the Stackable Operator for Apache Kafka and operators for its dependencies - ZooKeeper - as well as the commons and secret operator which are required by all Stackable Operators. + +== Stackable Operators + +There are 2 ways to install Stackable Operators: + +1. Using xref:stackablectl::index.adoc[stackablectl] + +2. Using Helm + +=== stackablectl + +The stackablectl command line tool is the recommended way to interact with operators and dependencies. Follow the xref:stackablectl::installation.adoc[installation steps] for your platform if you choose to work with stackablectl. + +After you have installed stackablectl, run the following command to install all operators necessary for Kafka: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=stackablectl-install-operators] +---- + +The tool will show + +[source] +---- +include::example$code/install-operator-output.txt[tag=stackablectl-install-operators-output] +---- + +TIP: Consult the xref:stackablectl::quickstart.adoc[] to learn more about how to use stackablectl. + +=== Helm + +You can also use Helm to install the operators. Add the Stackable Helm repository: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=helm-add-repo] +---- + +Then install the Stackable Operators: + +[source,bash] +---- +include::example$code/getting-started.sh[tag=helm-install-operators] +---- + +Helm will deploy the operators in a Kubernetes Deployment and apply the CRDs for the Apache Kafka service (as well as the CRDs for the required operators). You are now ready to deploy Apache Kafka in Kubernetes. + +== What's next + +xref:first_steps.adoc[Set up a Kafka cluster] and its dependencies. diff --git a/docs/templating_vars.yaml b/docs/templating_vars.yaml new file mode 100644 index 00000000..e65915ad --- /dev/null +++ b/docs/templating_vars.yaml @@ -0,0 +1,9 @@ +--- +helm: + repo_name: stackable-dev + repo_url: https://repo.stackable.tech/repository/helm-dev/ +versions: + commons: 0.3.0-nightly + secret: 0.6.0-nightly + zookeeper: 0.11.0-nightly + kafka: 0.7.0-nightly diff --git a/examples/simple-kafka-cluster.yaml b/examples/simple-kafka-cluster.yaml deleted file mode 100644 index 744a6560..00000000 --- a/examples/simple-kafka-cluster.yaml +++ /dev/null @@ -1,45 +0,0 @@ ---- -apiVersion: zookeeper.stackable.tech/v1alpha1 -kind: ZookeeperCluster -metadata: - name: simple-zk -spec: - version: 3.8.0-stackable0.7.1 - servers: - roleGroups: - default: - selector: - matchLabels: - kubernetes.io/os: linux - replicas: 3 - config: {} ---- -apiVersion: zookeeper.stackable.tech/v1alpha1 -kind: ZookeeperZnode -metadata: - name: simple-kafka-znode -spec: - clusterRef: - name: simple-zk ---- -apiVersion: kafka.stackable.tech/v1alpha1 -kind: KafkaCluster -metadata: - name: simple-kafka -spec: - version: 3.2.0-stackable0.1.0 - zookeeperConfigMapName: simple-kafka-znode - brokers: - config: - resources: - storage: - logDirs: - capacity: '2Gi' - cpu: - max: '500m' - min: '250m' - memory: - limit: '1Gi' - roleGroups: - default: - replicas: 3 diff --git a/rust/crd/src/lib.rs b/rust/crd/src/lib.rs index 38b95e35..c0b23fab 100644 --- a/rust/crd/src/lib.rs +++ b/rust/crd/src/lib.rs @@ -123,7 +123,7 @@ pub struct KafkaClusterSpec { pub stopped: Option, } -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct GlobalKafkaConfig { /// Only affects client connections. This setting controls: @@ -162,13 +162,13 @@ impl Default for GlobalKafkaConfig { } } -#[derive(Clone, Default, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Default, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct ClientAuthenticationClass { pub authentication_class: String, } -#[derive(Clone, Debug, Deserialize, JsonSchema, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, Eq, JsonSchema, PartialEq, Serialize)] #[serde(rename_all = "camelCase")] pub struct TlsSecretClass { pub secret_class: String,