diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 20cdfa02f..392974af5 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -2,7 +2,7 @@ /cc -/assign +/assign /cherry-pick diff --git a/docs/HACKING.md b/docs/HACKING.md index 120e6797a..8abbc6d06 100644 --- a/docs/HACKING.md +++ b/docs/HACKING.md @@ -2,134 +2,48 @@ ## Prerequisites -- Cluster administrator must set `vm.max_map_count` sysctl to 262144 on the host level of each node in your cluster prior to running the operator. -- In case hostmounted volume is used, the directory on the host must have 777 permissions and the following selinux labels (TODO). -- In case secure cluster is used, the certificates must be pre-generated and uploaded to the secret `-certs` +In the case this operator is being used without the Cluster Logging Operator or Jaeger Operator: -## Kubernetes +- the namespace where the Elasticsearch cluster will be deployed into needs to be created. By default our make targets use `openshift-logging`, however they can be overridden by setting `DEPLOYMENT_NAMESPACE`. -Make sure certificates are pre-generated and deployed as secret. -Upload the Custom Resource Definition to your Kubernetes cluster: - - $ kubectl create -f deploy/crd.yaml - -Deploy the required roles to the cluster: - - $ kubectl create -f deploy/rbac.yaml - -Deploy custom resource and the Deployment resource of the operator: - - $ kubectl create -f deploy/cr.yaml - $ kubectl create -f deploy/operator.yaml +- certificates must be provided in the form of a secret where the name matches the pattern `-secret`. For the convenience of local development the make target `deploy-example-secret` can be used to do this. ## OpenShift -As a cluster admin apply the template with the roles and permissions: - - $ oc process -f deploy/openshift/admin-elasticsearch-template.yaml | oc apply -f - - -The template deploys CRD, roles and rolebindings. You can pass variables: - -- `NAMESPACE` to specify which namespace's default ServiceAccount will be allowed to manage the Custom Resource. -- `ELASTICSEARCH_ADMIN_USER` to specify which user of OpenShift will be allowed to manage the Custom Resource. - -For example: - - $ oc process NAMESPACE=myproject ELASTICSEARCH_ADMIN_USER=developer -f deploy/openshift/admin-elasticsearch-template.yaml | oc apply -f - - -Grant permissions to extra users by giving them the role `elasticsearch-operator`. - -As the user which was specified as `ELASTICSEARCH_ADMIN_USER` on previous step: - -Make sure the secret with Elasticsearch certificates exists and is named `-certs` - -Then process the following template: - - $ oc process -f deploy/openshift/elasticsearch-template.yaml | oc apply -f - - -The template deploys the Custom Resource and the operator deployment. You can pass the following variables to the template: - -- `NAMESPACE` - namespace where the Elasticsearch cluster will be deployed. Must be the same as the one specified by admin -- `ELASTICSEARCH_CLUSTER_NAME` - name of the Elasticsearch cluster to be deployed - -For example: - - $ oc process NAMESPACE=myproject ELASTICSEARCH_CLUSTER_NAME=elastic1 -f deploy/openshift/elasticsearch-template.yaml | oc apply -f - - -## Openshift Alternatives - Make targets -If you are using an image built in a different node, you can specify to use a remote registry by setting -the environment variable `REMOTE_REGISTRY=true` before running any of the targets below. See `hack/deploy-image.sh` -and `hack/deploy.sh` for more details. - -* `REMOTE_REGISTRY` Set to `true` if you are running the cluster on a different machine - than the one you are developing on. For example, if you are running a cluster in a - local libvirt or minishift environment, you may want to build the image on the host - and push them to the cluster running in the VM. - You will need a username with a password (i.e. not the default `system:admin` user). - If your cluster was deployed with the `allow_all` identity provider, you can create - a user like this: `oc login --username=admin --password=admin`, then assign it rights: - `oc login --username=system:admin` - `oc adm policy add-cluster-role-to-user cluster-admin admin` - If you used the new `openshift-installer`, it created a user named `kubeadmin` - with the password in the file `installer/auth/kubeadmin_password`. - `oc login --username=kubeadmin --password=$( cat ../installer/auth/kubeadmin_password )` - The user should already have `cluster-admin` rights. - -* To build the image on your dev machine, base images need to be pulled from the openshift - CI registry. First, request a token from `https://api.ci.openshift.org/oauth/token/request` - and use the token to login the registry cluster: oc login --token= --server=https://api.ci.openshift.org. Retrieve the registry secret to a local file by - `oc registry login --to=pull-secret.txt`. Copy the `pull-secret.txt` to the configuration - file of docker registry, e.g., `cp pull-secret.txt $HOME/.docker/config.json`. Then `make image` - -It is additionally possible to deploy the operator to an Openshift cluster using the provided make targets. These -targets assume you have cluster admin access. Following are a few of these targets: - -### deploy -Deploy the resources for the operator, build the operator image, push the image to the Openshift registry - -### deploy-setup -Deploy the pre-requirements for the operator to function (i.e. CRD, RBAC, sample secret) - -### deploy-example -Install the operator and deploy an example custom resource for a single node -Elasticsearch cluster in the default namespace `openshift-logging`. To deploy -in a different namespace: `DEPLOYMENT_NAMESPACE=newproject make deploy-example`. +To build a local Elasticsearch Operator image and deploy the example CR you can use: +``` +make deploy-example +``` -### elasticsearch-cleanup -Remove all deployed resources +Alternatively, if you just want to deploy the Elasticsearch Operator and provide your own CR afterwards you can use: +``` +make deploy +``` -### run -Deploy the example cluster and start running the operator. The end result is that there will be an -`elasticsearch` custom resource, and an elasticsearch pod running. You can view the operator log by -looking at the log file specified by `$(RUN_LOG)` (default `elasticsearch-operator.log`). The command -is run in the background - when finished, kill the process by killing the pid, which is written to the -file `$(RUN_PID)` (default `elasticsearch-operator.pid`) e.g. `kill $(cat elasticsearch-operator.pid)` +There is an example CR provided in the `hack` directory that can be created with: +``` +oc create -f hack/cr.yaml +``` # Customize your cluster ## Image customization The operator is designed to work with `quay.io/openshift/origin-logging-elasticsearch6` image. To use -a different image, edit `manifests/image-references` before deployment, or edit the elasticsearch -cr after deployment e.g. `oc edit elasticsearch elasticsearch`. +a different image, edit `manifests/image-references` before deployment, or edit the environment variable in the Elasticsearch Operator csv after deployment. e.g. `oc edit csv -n openshift-operators-redhat elasticsearch-operator.v4.6.0`. ## Storage configuration Storage is configurable per individual node type. Possible configuration options: -- Hostmounted directory - Empty directory -- Existing PersistentVolume -- New PersistentVolume generated by StorageClass +- PersistentVolume generated by StorageClass (if storage class is left off the cluster default is used) ## Elasticsearch cluster topology customization Decide how many nodes you want to run. -## Elasticsearch node configuration customization - -TODO ## Exposing elasticsearch service with a route @@ -146,68 +60,47 @@ You do not need to set the spec.tls.key, spec.tls.certificate and spec.tls.caCer Kubernetes TBD+ and OpenShift TBD+ are supported. -- [x] SSL-secured deployment (using Searchguard) +- [x] SSL-secured deployment (using Opendistro) - [x] Insecure deployment (requires different image) - [x] Index per tenant -- [x] Logging to a file or to console -- [ ] Elasticsearch 6.x support -- [x] Elasticsearch 5.6.x support +- [x] Elasticsearch 6.x support - [x] Master role - [x] Client role - [x] Data role - [x] Clientdata role - [x] Clientdatamaster role -- [ ] Elasticsearch snapshots - [x] Prometheus monitoring -- [ ] Status monitoring -- [ ] Rolling restarts +- [x] Status monitoring +- [x] Rolling restarts # Testing In a real deployment OpenShift monitoring will be installed. However for testing purposes, you should install the monitoring CRDs: ``` -[REMOTE_REGISTRY=true] make deploy-setup +make deploy ``` -Use `REMOTE_REGISTRY=true make deploy-image` to build the image and copy it -to the remote registry. - ## E2E Testing -To run the e2e tests, install the above CRDs and from the repo directory, run: +To run the e2e tests run: ``` -make test-e2e +make test-e2e-olm ``` This assumes: -* the operator-sdk installed (e.g. `make operator-sdk`) -* the operator image is built (e.g. `make image`) and available to the OKD cluster - -**Note:** It is necessary to set the `IMAGE_ELASTICSEARCH_OPERATOR` environment variable to a valid pull spec in order to run this test against local changes to the `elasticsearch-operator`. For example: -``` -make deploy-image && \ -IMAGE_ELASTICSEARCH_OPERATOR=quay.io/openshift/origin-elasticsearch-operator:latest make test-e2e -``` +* the Elasticsearch Operator pod is already deployed and running +* the Elasticsearch Operator Registry pod is already deployed and running ## Dev Testing -You should first ensure that you have commands such as `imagebuilder` and `operator-sdk` -available by using something like `https://github.com/openshift/origin-aggregated-logging/blob/master/hack/sdk_setup.sh`. +You should first ensure that you have `podman` available. -To set up your local environment based on what will be provided by OLM, run: +To test on an OKD cluster, you can run: ``` -sudo sysctl -w vm.max_map_count=262144 -ELASTICSEARCH_OPERATOR=$GOPATH/src/github.com/openshift/elasticsearch-operator -[REMOTE_REGISTRY=true] make deploy-setup -[REMOTE_REGISTRY=true] make deploy-example +make deploy ``` - -To test on an OKD cluster, you can run: - - make run - To remove created API objects: ``` -make undeploy +make uninstall ``` ## Building a Universal Base Image (UBI) based image diff --git a/manifests/4.7/elasticsearch-operator.v4.7.0.clusterserviceversion.yaml b/manifests/4.7/elasticsearch-operator.v4.7.0.clusterserviceversion.yaml index ad3327ca3..168150df4 100644 --- a/manifests/4.7/elasticsearch-operator.v4.7.0.clusterserviceversion.yaml +++ b/manifests/4.7/elasticsearch-operator.v4.7.0.clusterserviceversion.yaml @@ -16,14 +16,14 @@ metadata: capabilities: "Seamless Upgrades" certified: "false" description: |- - The Elasticsearch Operator for OKD provides a means for configuring and managing an Elasticsearch cluster for tracing and cluster logging. + The Elasticsearch Operator for OCP provides a means for configuring and managing an Elasticsearch cluster for tracing and cluster logging. ## Prerequisites and Requirements ### Elasticsearch Operator Namespace The Elasticsearch Operator must be deployed to the global operator group namespace ### Memory Considerations Elasticsearch is a memory intensive application. The initial - set of OKD nodes may not be large enough to support the Elasticsearch cluster. Additional OKD nodes must be added - to the OKD cluster if you desire to run with the recommended (or better) memory. Each ES node can operate with a + set of OCP nodes may not be large enough to support the Elasticsearch cluster. Additional OCP nodes must be added + to the OCP cluster if you desire to run with the recommended (or better) memory. Each ES node can operate with a lower memory setting though this is not recommended for production deployments. containerImage: quay.io/openshift/origin-elasticsearch-operator:latest createdAt: 2019-02-20T08:00:00Z @@ -31,54 +31,84 @@ metadata: olm.skipRange: ">=4.5.0-0 <4.7.0" alm-examples: |- [ - { - "apiVersion": "logging.openshift.io/v1", - "kind": "Elasticsearch", - "metadata": { - "name": "elasticsearch" - }, - "spec": { - "managementState": "Managed", - "nodeSpec": { - "image": "quay.io/openshift/origin-logging-elasticsearch6:latest", - "resources": { - "limits": { - "memory": "1Gi" - }, - "requests": { - "memory": "512Mi" - } - } - }, - "redundancyPolicy": "SingleRedundancy", - "nodes": [ - { - "nodeCount": 1, - "roles": ["client","data","master"] - } - ] - } + { + "apiVersion": "logging.openshift.io/v1", + "kind": "Elasticsearch", + "metadata": { + "name": "elasticsearch" }, - { - "apiVersion": "logging.openshift.io/v1", - "kind": "Kibana", - "metadata": { - "name": "kibana" - }, - "spec": { - "managementState": "Managed", - "replicas": 1, - "nodeSelector": {}, + "spec": { + "managementState": "Managed" + "nodeSpec": { "resources": { "limits": { - "memory": "512Mi" + "memory": "1Gi" }, "requests": { - "memory": "512Mi" + "cpu": "100m" + "memory": "1Gi" + } + } + }, + "nodes": [ + { + "nodeCount": 1, + "roles": ["client", "data", "master"], + "storage": { + "size": "20G" } } + ], + "redundancyPolicy": "ZeroRedundancy", + "indexManagement": { + "policies": [ + { + "name": "infra-policy", + "pollInterval": "30m", + "phases": { + "hot": { + "actions": { + "rollover": { + "maxAge": "8h" + } + } + }, + "delete": { + "minAge": "2d" + } + } + } + ], + "mappings": [ + { + "name": "infra", + "policyRef": "infra-policy", + "aliases": ["infra", "logs.infra"] + } + ] + } + } + }, + { + "apiVersion": "logging.openshift.io/v1", + "kind": "Kibana", + "metadata": { + "name": "kibana" + }, + "spec": { + "managementState": "Managed", + "replicas": 1, + "nodeSelector": {}, + "resources": { + "limits": { + "memory": "512Mi" + }, + "requests": { + "memory": "512Mi" + } } } + } ] spec: version: 4.7.0 @@ -86,9 +116,9 @@ spec: minKubeVersion: 1.18.3 description: | - The Elasticsearch Operator for OKD provides a means for configuring and managing an Elasticsearch cluster for use in tracing and cluster logging. - This operator only supports OKD Cluster Logging and Jaeger. It is tightly coupled to each and is not currently capable of - being used as a general purpose manager of Elasticsearch clusters running on OKD. + The Elasticsearch Operator for OCP provides a means for configuring and managing an Elasticsearch cluster for use in tracing and cluster logging. + This operator only supports OCP Cluster Logging and Jaeger. It is tightly coupled to each and is not currently capable of + being used as a general purpose manager of Elasticsearch clusters running on OCP. Once installed, the operator provides the following features: * **Create/Destroy**: Deploy an Elasticsearch cluster to the same namespace in which the Elasticsearch custom resource is created. @@ -100,7 +130,7 @@ spec: email: aos-logging@redhat.com provider: - name: Red Hat, Inc + name: Red Hat links: - name: Elastic