From 681bf5b7920226b672be6b143c7fc5a967ad8515 Mon Sep 17 00:00:00 2001 From: Eric Wolinetz Date: Thu, 14 Feb 2019 14:51:38 -0600 Subject: [PATCH] Refactoring elasticsearch operator to improve code readability --- cmd/elasticsearch-operator/main.go | 6 +- hack/test-e2e.sh | 5 + pkg/apis/elasticsearch/v1alpha1/types.go | 30 +- pkg/k8shandler/cluster.go | 647 ++++++-------------- pkg/k8shandler/common.go | 496 +++++++++++++++ pkg/k8shandler/configmaps.go | 244 +++++--- pkg/k8shandler/defaults.go | 18 +- pkg/k8shandler/deployment.go | 621 ++++++++++++++----- pkg/k8shandler/desirednodestate.go | 731 ----------------------- pkg/k8shandler/elasticsearch.go | 328 ++++++++++ pkg/k8shandler/nodetypefactory.go | 118 ++-- pkg/k8shandler/persistentvolumeclaims.go | 6 +- pkg/k8shandler/prometheus_rule.go | 7 +- pkg/k8shandler/rbac.go | 97 ++- pkg/k8shandler/render_config.go | 69 --- pkg/k8shandler/secret.go | 52 ++ pkg/k8shandler/service.go | 145 +++++ pkg/k8shandler/service_monitor.go | 7 +- pkg/k8shandler/serviceaccount.go | 33 +- pkg/k8shandler/services.go | 110 ---- pkg/k8shandler/statefulset.go | 495 +++++++++++++-- pkg/k8shandler/status.go | 298 +++++---- pkg/k8shandler/util.go | 430 +------------ pkg/k8shandler/util_test.go | 172 ------ pkg/stub/handler.go | 63 +- pkg/utils/exec.go | 68 --- pkg/utils/exec_util.go | 112 ---- pkg/utils/utils.go | 265 -------- test/e2e/elasticsearch_test.go | 42 +- test/e2e/main_test.go | 3 +- test/files/ca.crt | 44 +- test/files/elasticsearch.crt | 58 +- test/files/elasticsearch.key | 76 ++- test/files/logging-es.crt | 62 +- test/files/logging-es.key | 76 ++- test/files/system.admin.crt | 47 +- test/files/system.admin.key | 76 ++- test/utils/utils.go | 64 ++ 38 files changed, 3115 insertions(+), 3106 deletions(-) create mode 100644 pkg/k8shandler/common.go delete mode 100644 pkg/k8shandler/desirednodestate.go create mode 100644 pkg/k8shandler/elasticsearch.go delete mode 100644 pkg/k8shandler/render_config.go create mode 100644 pkg/k8shandler/secret.go create mode 100644 pkg/k8shandler/service.go delete mode 100644 pkg/k8shandler/services.go delete mode 100644 pkg/k8shandler/util_test.go delete mode 100644 pkg/utils/exec.go delete mode 100644 pkg/utils/exec_util.go create mode 100644 test/utils/utils.go diff --git a/cmd/elasticsearch-operator/main.go b/cmd/elasticsearch-operator/main.go index edf5c7e4d..79495e424 100644 --- a/cmd/elasticsearch-operator/main.go +++ b/cmd/elasticsearch-operator/main.go @@ -10,13 +10,13 @@ import ( "strings" "time" - stub "github.com/openshift/elasticsearch-operator/pkg/stub" "github.com/openshift/elasticsearch-operator/pkg/utils" + "github.com/sirupsen/logrus" + + stub "github.com/openshift/elasticsearch-operator/pkg/stub" sdk "github.com/operator-framework/operator-sdk/pkg/sdk" k8sutil "github.com/operator-framework/operator-sdk/pkg/util/k8sutil" sdkVersion "github.com/operator-framework/operator-sdk/version" - - "github.com/sirupsen/logrus" ) const ( diff --git a/hack/test-e2e.sh b/hack/test-e2e.sh index e14fb01b4..c31b355a0 100755 --- a/hack/test-e2e.sh +++ b/hack/test-e2e.sh @@ -6,6 +6,11 @@ if [ -n "${DEBUG:-}" ]; then fi IMAGE_ELASTICSEARCH_OPERATOR=${IMAGE_ELASTICSEARCH_OPERATOR:-quay.io/openshift/origin-elasticsearch-operator:latest} + +if [ -n "${IMAGE_FORMAT:-}" ] ; then + IMAGE_ELASTICSEARCH_OPERATOR=$(sed -e "s,\${component},elasticsearch-operator," <(echo $IMAGE_FORMAT)) +fi + KUBECONFIG=${KUBECONFIG:-$HOME/.kube/config} repo_dir="$(dirname $0)/.." diff --git a/pkg/apis/elasticsearch/v1alpha1/types.go b/pkg/apis/elasticsearch/v1alpha1/types.go index a931c9cfa..df96e56e2 100644 --- a/pkg/apis/elasticsearch/v1alpha1/types.go +++ b/pkg/apis/elasticsearch/v1alpha1/types.go @@ -64,7 +64,7 @@ type ElasticsearchNode struct { } type ElasticsearchStorageSpec struct { - StorageClassName string `json:"storageClassName,omitempty"` + StorageClassName *string `json:"storageClassName,omitempty"` Size *resource.Quantity `json:"size,omitempty"` } @@ -80,8 +80,10 @@ type ElasticsearchNodeStatus struct { } type ElasticsearchNodeUpgradeStatus struct { - UnderUpgrade UpgradeStatus `json:"underUpgrade,omitempty"` - UpgradePhase ElasticsearchUpgradePhase `json:"upgradePhase,omitempty"` + ScheduledForUpgrade v1.ConditionStatus `json:"scheduledUpgrade,omitempty"` + ScheduledForRedeploy v1.ConditionStatus `json:"scheduledRedeploy,omitempty"` + UnderUpgrade v1.ConditionStatus `json:"underUpgrade,omitempty"` + UpgradePhase ElasticsearchUpgradePhase `json:"upgradePhase,omitempty"` } type ElasticsearchUpgradePhase string @@ -98,13 +100,6 @@ type ElasticsearchNodeSpec struct { Resources v1.ResourceRequirements `json:"resources"` } -type UpgradeStatus string - -const ( - UnderUpgradeTrue UpgradeStatus = "True" - UnderUpgradeFalse UpgradeStatus = "False" -) - type ElasticsearchRequiredAction string const ( @@ -127,8 +122,9 @@ const ( type ShardAllocationState string const ( - ShardAllocationTrue ShardAllocationState = "True" - ShardAllocationFalse ShardAllocationState = "False" + ShardAllocationAll ShardAllocationState = "all" + ShardAllocationNone ShardAllocationState = "none" + ShardAllocationUnknown ShardAllocationState = "shard allocation unknown" ) // ElasticsearchStatus represents the status of Elasticsearch cluster @@ -165,7 +161,7 @@ type ClusterCondition struct { // Type is the type of the condition. Type ClusterConditionType `json:"type"` // Status is the status of the condition. - Status ConditionStatus `json:"status"` + Status v1.ConditionStatus `json:"status"` // Last time the condition transitioned from one status to another. LastTransitionTime metav1.Time `json:"lastTransitionTime"` // Unique, one-word, CamelCase reason for the condition's last transition. @@ -185,14 +181,6 @@ const ( Restarting ClusterConditionType = "Restarting" ) -type ConditionStatus string - -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - type ClusterEvent string const ( diff --git a/pkg/k8shandler/cluster.go b/pkg/k8shandler/cluster.go index ce04c5f66..dfe0517ef 100644 --- a/pkg/k8shandler/cluster.go +++ b/pkg/k8shandler/cluster.go @@ -2,40 +2,32 @@ package k8shandler import ( "fmt" + "reflect" - apps "k8s.io/api/apps/v1" + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/openshift/elasticsearch-operator/pkg/utils" - "github.com/operator-framework/operator-sdk/pkg/sdk" - "github.com/sirupsen/logrus" ) -// ClusterState struct represents the state of the cluster -type ClusterState struct { - Nodes []*nodeState - DanglingStatefulSets *apps.StatefulSetList - DanglingDeployments *apps.DeploymentList - DanglingReplicaSets *apps.ReplicaSetList - DanglingPods *v1.PodList +var wrongConfig bool +var nodes map[string][]NodeTypeInterface + +func FlushNodes(clusterName, namespace string) { + nodes[nodeMapKey(clusterName, namespace)] = []NodeTypeInterface{} } -var wrongConfig bool +func nodeMapKey(clusterName, namespace string) string { + return fmt.Sprintf("%v-%v", clusterName, namespace) +} // CreateOrUpdateElasticsearchCluster creates an Elasticsearch deployment -func CreateOrUpdateElasticsearchCluster(dpl *v1alpha1.Elasticsearch, configMapName, serviceAccountName string) error { - - cState, err := NewClusterState(dpl, configMapName, serviceAccountName) - if err != nil { - return err - } +func CreateOrUpdateElasticsearchCluster(cluster *v1alpha1.Elasticsearch) error { // Verify that we didn't scale up too many masters - err = isValidConf(dpl) + err := isValidConf(cluster) if err != nil { // if wrongConfig=true then we've already print out error message // don't flood the stderr of the operator with the same message @@ -47,524 +39,259 @@ func CreateOrUpdateElasticsearchCluster(dpl *v1alpha1.Elasticsearch, configMapNa } wrongConfig = false - action, err := cState.getRequiredAction(dpl) - if err != nil { - return err - } - - switch { - case action == v1alpha1.ElasticsearchActionNewClusterNeeded: - err = cState.buildNewCluster(dpl, asOwner(dpl)) - if err != nil { - return err - } - case action == v1alpha1.ElasticsearchActionScaleDownNeeded: - // TODO: provide documentation for manual scale down - return fmt.Errorf("Scale down operation requested but is not supported by the operator. For manual scale down, follow this document %s", "") - // err = cState.removeStaleNodes(dpl) - // if err != nil { - // return err - // } - case action == v1alpha1.ElasticsearchActionRollingRestartNeeded: - if err = cState.restartCluster(dpl, asOwner(dpl)); err != nil { - return err - } - case action == v1alpha1.ElasticsearchActionNone: - if dpl.Spec.ManagementState == v1alpha1.ManagementStateManaged { - // Make sure that the deployments are Paused - if err = cState.pauseCluster(dpl, asOwner(dpl)); err != nil { - return err - } - } - default: - return fmt.Errorf("Unknown cluster action requested: %v", action) - } + getNodes(cluster) - // Determine if a change to cluster size was made, - // if yes, update variables in config map and also - // reload live configuration - if err = updateClusterSettings(dpl); err != nil { - return err - } - // Scrape cluster health from elasticsearch every time - err = cState.UpdateStatus(dpl) - if err != nil { - return err - } - return nil -} + // if there is a node currently being upgraded, work on that first + upgradeInProgressNode := getNodeUpgradeInProgress(cluster) + scheduledUpgradeNodes := getScheduledUpgradeNodes(cluster) + if upgradeInProgressNode != nil { -// NewClusterState func generates ClusterState for the current cluster -func NewClusterState(dpl *v1alpha1.Elasticsearch, configMapName, serviceAccountName string) (ClusterState, error) { - nodes := []*nodeState{} - cState := ClusterState{ - Nodes: nodes, - } + clusterStatus := cluster.Status.DeepCopy() + index, nodeStatus := getNodeStatus(upgradeInProgressNode.name(), clusterStatus) - numMasters := getMasterCount(dpl) - numDatas := getDataCount(dpl) + if _, ok := containsNodeTypeInterface(upgradeInProgressNode, scheduledUpgradeNodes); ok { + logrus.Debugf("Continuing update for %v", upgradeInProgressNode.name()) + upgradeInProgressNode.update(nodeStatus) + } else { + logrus.Debugf("Continuing restart for %v", upgradeInProgressNode.name()) + upgradeInProgressNode.restart(nodeStatus) + } - var i int32 - for nodeNum, node := range dpl.Spec.Nodes { + nodeState := upgradeInProgressNode.state() - for i = 1; i <= node.NodeCount; i++ { - nodeCfg, err := constructNodeSpec(dpl, node, configMapName, serviceAccountName, int32(nodeNum), i, numMasters, numDatas) - if err != nil { - return cState, fmt.Errorf("Unable to construct ES node config %v", err) - } + nodeStatus.UpgradeStatus.ScheduledForUpgrade = nodeState.UpgradeStatus.ScheduledForUpgrade + nodeStatus.UpgradeStatus.ScheduledForRedeploy = nodeState.UpgradeStatus.ScheduledForRedeploy - node := nodeState{ - Desired: nodeCfg, - } - cState.Nodes = append(cState.Nodes, &node) + if index == NOT_FOUND_INDEX { + clusterStatus.Nodes = append(clusterStatus.Nodes, *nodeStatus) + } else { + clusterStatus.Nodes[index] = *nodeStatus } - } - err := cState.amendDeployments(dpl) - if err != nil { - return cState, fmt.Errorf("Unable to amend Deployments to status: %v", err) - } + updateNodeStatus(cluster, *clusterStatus) - err = cState.amendStatefulSets(dpl) - if err != nil { - return cState, fmt.Errorf("Unable to amend StatefulSets to status: %v", err) - } + } else { - err = cState.amendReplicaSets(dpl) - if err != nil { - return cState, fmt.Errorf("Unable to amend ReplicaSets to status: %v", err) - } + if len(scheduledUpgradeNodes) > 0 { + for _, node := range scheduledUpgradeNodes { + logrus.Debugf("Perform a update for %v", node.name()) + clusterStatus := cluster.Status.DeepCopy() + index, nodeStatus := getNodeStatus(node.name(), clusterStatus) - err = cState.amendPods(dpl) - if err != nil { - return cState, fmt.Errorf("Unable to amend Pods to status: %v", err) - } + err := node.update(nodeStatus) + nodeState := node.state() - return cState, nil -} - -// getRequiredAction checks the desired state against what's present in current -// deployments/statefulsets/pods -func (cState *ClusterState) getRequiredAction(dpl *v1alpha1.Elasticsearch) (v1alpha1.ElasticsearchRequiredAction, error) { - // TODO: Add condition that if an operation is currently in progress - // not to try to queue another action. Instead return ElasticsearchActionInProgress which - // is noop. + nodeStatus.UpgradeStatus.ScheduledForUpgrade = nodeState.UpgradeStatus.ScheduledForUpgrade + nodeStatus.UpgradeStatus.ScheduledForRedeploy = nodeState.UpgradeStatus.ScheduledForRedeploy - // TODO: Handle failures. Maybe introduce some ElasticsearchCondition which says - // what action was attempted last, when, how many tries and what the result is. + if index == NOT_FOUND_INDEX { + clusterStatus.Nodes = append(clusterStatus.Nodes, *nodeStatus) + } else { + clusterStatus.Nodes[index] = *nodeStatus + } - if dpl.Spec.ManagementState == v1alpha1.ManagementStateManaged { + updateNodeStatus(cluster, *clusterStatus) - for _, node := range cState.Nodes { - if node.Actual.Deployment == nil && node.Actual.StatefulSet == nil { - return v1alpha1.ElasticsearchActionNewClusterNeeded, nil + if err != nil { + logrus.Warnf("Error occurred while updating node %v: %v", node.name(), err) + } } - } - if node := upgradeInProgress(dpl); node != nil { - return v1alpha1.ElasticsearchActionRollingRestartNeeded, nil - } - for _, node := range cState.Nodes { - if node.Desired.IsUpdateNeeded() { - return v1alpha1.ElasticsearchActionRollingRestartNeeded, nil - } - } + } else { - // If some deployments exist that are not specified in CR, they'll be in DanglingDeployments - // we need to remove those to comply with the desired cluster structure. - if cState.DanglingDeployments != nil { - return v1alpha1.ElasticsearchActionScaleDownNeeded, nil - } - } + scheduledRedeployNodes := getScheduledRedeployOnlyNodes(cluster) + if len(scheduledRedeployNodes) > 0 { + // get all nodes that need only a rollout + // TODO: ready cluster for a pod restart first + for _, node := range scheduledRedeployNodes { + logrus.Debugf("Perform a redeploy for %v", node.name()) + clusterStatus := cluster.Status.DeepCopy() + index, nodeStatus := getNodeStatus(node.name(), clusterStatus) - return v1alpha1.ElasticsearchActionNone, nil -} + node.restart(nodeStatus) + nodeState := node.state() -func (cState *ClusterState) pauseCluster(dpl *v1alpha1.Elasticsearch, owner metav1.OwnerReference) error { + nodeStatus.UpgradeStatus.ScheduledForUpgrade = nodeState.UpgradeStatus.ScheduledForUpgrade + nodeStatus.UpgradeStatus.ScheduledForRedeploy = nodeState.UpgradeStatus.ScheduledForRedeploy - // check if the node is Paused: false - for _, currentNode := range cState.Nodes { - if currentNode.Desired.IsPauseNeeded() { - currentNode.Desired.PauseNode(owner) - } - } + if index == NOT_FOUND_INDEX { + clusterStatus.Nodes = append(clusterStatus.Nodes, *nodeStatus) + } else { + clusterStatus.Nodes[index] = *nodeStatus + } - return nil -} + updateNodeStatus(cluster, *clusterStatus) + } -func (cState *ClusterState) buildNewCluster(dpl *v1alpha1.Elasticsearch, owner metav1.OwnerReference) error { - // Mark the operation in case of operator failure - if err := utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionTrue, utils.UpdateScalingUpCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) - } - if err := utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionTrue, utils.UpdateUpdatingSettingsCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) - } - // Create the new nodes - for _, node := range cState.Nodes { - err := node.Desired.CreateNode(owner) - if err != nil { - return fmt.Errorf("Unable to create Elasticsearch node: %v", err) - } - } - return nil -} + } else { + + for _, node := range nodes[nodeMapKey(cluster.Name, cluster.Namespace)] { + clusterStatus := cluster.Status.DeepCopy() + index, nodeStatus := getNodeStatus(node.name(), clusterStatus) + + // Verify that we didn't scale up too many masters + err := isValidConf(cluster) + if err != nil { + // if wrongConfig=true then we've already print out error message + // don't flood the stderr of the operator with the same message + if wrongConfig { + return nil + } + wrongConfig = true + return err + } -// list existing StatefulSets and delete those unmanaged by the operator -func (cState *ClusterState) removeStaleNodes(dpl *v1alpha1.Elasticsearch) error { - // Set 'ScalingDown' condition to True before beggining the actual scale event - if err := utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionTrue, utils.UpdateScalingDownCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) - } - if err := utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionTrue, utils.UpdateUpdatingSettingsCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) - } - // Prepare the cluster for the scale down event - if err := updateClusterSettings(dpl); err != nil { - return err - } - // Remove extra Deployments - for _, node := range cState.DanglingDeployments.Items { - // the returned deployment doesn't have TypeMeta, so we're adding it. - node.TypeMeta = metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - } - err := sdk.Delete(&node) - if err != nil { - return fmt.Errorf("Unable to delete resource %v: ", err) - } - } - return nil -} + node.create() + nodeState := node.state() -func (cState *ClusterState) restartCluster(dpl *v1alpha1.Elasticsearch, owner metav1.OwnerReference) error { - nodeUnderUpgrade := upgradeInProgress(dpl) - // find a pod that can handle requests - // in a single-master deployment the operator will complain about not having any master pods - masterPod, err := getRunningMasterPod(dpl.Name, dpl.Namespace) - if err != nil { - return nil - } - if nodeUnderUpgrade == nil { - // don't attempt to restart the cluster unless cluster health is green - if ok := canRestartCluster(dpl); !ok { - logrus.Warnf("Cluster Rolling Restart requested but cluster isn't ready.") - return nil - } - nodeUnderUpgrade, err = cState.beginUpgrade(dpl, owner) - if err != nil { - // try to revert shard allocation settings, best-effort operation - masterPod, err = getRunningMasterPod(dpl.Name, dpl.Namespace) - if err != nil { - return nil - } - enableShardAllocation(dpl, masterPod) - return err - } - logrus.Infof("Rolling restart: began upgrading node: %v", nodeUnderUpgrade.DeploymentName) - } + nodeStatus.UpgradeStatus.ScheduledForUpgrade = nodeState.UpgradeStatus.ScheduledForUpgrade + nodeStatus.UpgradeStatus.ScheduledForRedeploy = nodeState.UpgradeStatus.ScheduledForRedeploy + + if index == NOT_FOUND_INDEX { + // this is a new status, just append + nodeStatus.DeploymentName = nodeState.DeploymentName + nodeStatus.StatefulSetName = nodeState.StatefulSetName + clusterStatus.Nodes = append(clusterStatus.Nodes, *nodeStatus) + } else { + // this is an existing status, update in place + clusterStatus.Nodes[index] = *nodeStatus + } + + // update status here + updateNodeStatus(cluster, *clusterStatus) + + updateMinMasters(cluster) + } - // wait for node to start and rejoin the cluster - masterPod, err = getRunningMasterPod(dpl.Name, dpl.Namespace) - if err != nil { - return nil - } - if rejoined := nodeRejoinedCluster(dpl, masterPod); !rejoined { - if nodeUnderUpgrade.UpgradeStatus.UpgradePhase != v1alpha1.NodeRestarting { - logrus.Infof("Rolling restart: waiting for node '%s' to rejoin the cluster...", nodeUnderUpgrade.DeploymentName) - if retryErr := utils.UpdateNodeUpgradeStatusWithRetry(dpl, nodeUnderUpgrade.DeploymentName, utils.NodeRestarting()); retryErr != nil { - return err - } - } - return nil - } - // enable shard allocation - masterPod, err = getRunningMasterPod(dpl.Name, dpl.Namespace) - if err != nil { - return nil - } - if err = enableShardAllocation(dpl, masterPod); err != nil { - return err - } - // wait for rebalancing to finish - if health := clusterHealth(dpl); health != "green" { - if nodeUnderUpgrade.UpgradeStatus.UpgradePhase != v1alpha1.RecoveringData { - logrus.Infof("Rolling restart: node '%s' rejoined cluster, recovering its data...", nodeUnderUpgrade.PodName) - if retryErr := utils.UpdateNodeUpgradeStatusWithRetry(dpl, nodeUnderUpgrade.DeploymentName, utils.NodeRecoveringData()); retryErr != nil { - return err } } - return nil } - // node upgraded - logrus.Debugf("Rolling restart: marked node %s as upgraded", nodeUnderUpgrade.DeploymentName) - return utils.UpdateNodeUpgradeStatusWithRetry(dpl, nodeUnderUpgrade.DeploymentName, utils.NodeNormalOperation()) -} -func canRestartCluster(dpl *v1alpha1.Elasticsearch) bool { - health := clusterHealth(dpl) - if health == "green" { - return true - } - return false + // Scrape cluster health from elasticsearch every time + return UpdateClusterStatus(cluster) } -func nodeRejoinedCluster(dpl *v1alpha1.Elasticsearch, masterPod *v1.Pod) bool { - desiredNumberOfNodes := int(getNodeCount(dpl)) - actualNumberOfNodes := utils.NumberOfNodes(masterPod) - logrus.Debugf("NodeRejoinedCluster = desired: %d, actual %d", desiredNumberOfNodes, actualNumberOfNodes) - return desiredNumberOfNodes == actualNumberOfNodes -} +func updateMinMasters(cluster *v1alpha1.Elasticsearch) { + // do as best effort -- whenever we create a node update min masters (if required) -func (cState *ClusterState) amendStatefulSets(dpl *v1alpha1.Elasticsearch) error { - statefulSets, err := listStatefulSets(dpl.Name, dpl.Namespace) + currentMasterCount, err := GetMinMasterNodes(cluster.Name, cluster.Namespace) if err != nil { - return fmt.Errorf("Unable to list Elasticsearch's StatefulSets: %v", err) + logrus.Debugf("Unable to get current min master count for cluster %v", cluster.Name) } - var element apps.StatefulSet - var ok bool + desiredMasterCount := getMasterCount(cluster) + currentNodeCount, err := GetClusterNodeCount(cluster.Name, cluster.Namespace) - for _, node := range cState.Nodes { - statefulSets, element, ok = popStatefulSet(statefulSets, node.Desired) - if ok { - node.setStatefulSet(element) + // check that we have the required number of master nodes in the cluster... + if currentNodeCount >= desiredMasterCount { + if currentMasterCount != desiredMasterCount { + if _, setErr := SetMinMasterNodes(cluster.Name, cluster.Namespace, desiredMasterCount); setErr != nil { + logrus.Debugf("Unable to set min master count to %d for cluster %v", desiredMasterCount, cluster.Name) + } } } - if len(statefulSets.Items) != 0 { - cState.DanglingStatefulSets = statefulSets - } - return nil } -func (cState *ClusterState) amendDeployments(dpl *v1alpha1.Elasticsearch) error { - deployments, err := listDeployments(dpl.Name, dpl.Namespace) - if err != nil { - return fmt.Errorf("Unable to list Elasticsearch's Deployments: %v", err) - } - - var element apps.Deployment - var ok bool - - for _, node := range cState.Nodes { - deployments, element, ok = popDeployment(deployments, node.Desired) - if ok { - node.setDeployment(element) +func getNodeUpgradeInProgress(cluster *v1alpha1.Elasticsearch) NodeTypeInterface { + for _, node := range cluster.Status.Nodes { + if node.UpgradeStatus.UnderUpgrade == v1.ConditionTrue { + for _, nodeTypeInterface := range nodes[nodeMapKey(cluster.Name, cluster.Namespace)] { + if node.DeploymentName == nodeTypeInterface.name() || + node.StatefulSetName == nodeTypeInterface.name() { + return nodeTypeInterface + } + } } } - if len(deployments.Items) != 0 { - cState.DanglingDeployments = deployments - } + return nil } -func (cState *ClusterState) amendReplicaSets(dpl *v1alpha1.Elasticsearch) error { - replicaSets, err := listReplicaSets(dpl.Name, dpl.Namespace) - if err != nil { - return fmt.Errorf("Unable to list Elasticsearch's ReplicaSets: %v", err) - } - var replicaSet apps.ReplicaSet +func getNodes(cluster *v1alpha1.Elasticsearch) { - for _, node := range cState.Nodes { - var ok bool - replicaSets, replicaSet, ok = popReplicaSet(replicaSets, node.Actual) - if ok { - node.setReplicaSet(replicaSet) - } + if nodes == nil { + nodes = make(map[string][]NodeTypeInterface) } - if len(replicaSets.Items) != 0 { - cState.DanglingReplicaSets = replicaSets - } - return nil -} + // get list of client only nodes, and collapse node info into the node (self field) if needed + for index, node := range cluster.Spec.Nodes { + // build the NodeTypeInterface list + for _, nodeTypeInterface := range GetNodeTypeInterface(index, node, cluster) { -func (cState *ClusterState) amendPods(dpl *v1alpha1.Elasticsearch) error { - pods, err := listPods(dpl.Name, dpl.Namespace) - if err != nil { - return fmt.Errorf("Unable to list Elasticsearch's Pods: %v", err) - } - var pod v1.Pod + nodeIndex, ok := containsNodeTypeInterface(nodeTypeInterface, nodes[nodeMapKey(cluster.Name, cluster.Namespace)]) + if !ok { + nodes[nodeMapKey(cluster.Name, cluster.Namespace)] = append(nodes[nodeMapKey(cluster.Name, cluster.Namespace)], nodeTypeInterface) + } else { + nodes[nodeMapKey(cluster.Name, cluster.Namespace)][nodeIndex].updateReference(nodeTypeInterface) + } - for _, node := range cState.Nodes { - var ok bool - pods, pod, ok = popPod(pods, node.Actual) - if ok { - node.setPod(pod) } } - - if len(pods.Items) != 0 { - cState.DanglingPods = pods - } - return nil } -func upgradeInProgress(dpl *v1alpha1.Elasticsearch) *v1alpha1.ElasticsearchNodeStatus { - for _, node := range dpl.Status.Nodes { - if node.UpgradeStatus.UnderUpgrade == v1alpha1.UnderUpgradeTrue { - return &node - } - } - return nil -} +func getScheduledUpgradeNodes(cluster *v1alpha1.Elasticsearch) []NodeTypeInterface { + upgradeNodes := []NodeTypeInterface{} -func (cState *ClusterState) selectNodeForUpgrade(dpl *v1alpha1.Elasticsearch) (*desiredNodeState, *v1alpha1.ElasticsearchNodeStatus) { - for _, nodeStatus := range dpl.Status.Nodes { - // find a node which isn't under upgrade right now - if nodeStatus.UpgradeStatus.UnderUpgrade == v1alpha1.UnderUpgradeFalse { - // check if the node has old image - for _, currentNode := range cState.Nodes { - if currentNode.Desired.DeployName == nodeStatus.DeploymentName { - if currentNode.Desired.IsUpdateNeeded() { - return ¤tNode.Desired, &nodeStatus - } + for _, node := range cluster.Status.Nodes { + if node.UpgradeStatus.ScheduledForUpgrade == v1.ConditionTrue { + for _, nodeTypeInterface := range nodes[nodeMapKey(cluster.Name, cluster.Namespace)] { + if node.DeploymentName == nodeTypeInterface.name() || + node.StatefulSetName == nodeTypeInterface.name() { + upgradeNodes = append(upgradeNodes, nodeTypeInterface) } } } } - return nil, nil -} -func (cState *ClusterState) beginUpgrade(dpl *v1alpha1.Elasticsearch, owner metav1.OwnerReference) (*v1alpha1.ElasticsearchNodeStatus, error) { - masterPod, err := getRunningMasterPod(dpl.Name, dpl.Namespace) - if err != nil { - return nil, err - } - if err = disableShardAllocation(dpl, masterPod); err != nil { - return nil, err - } - if err = utils.PerformSyncedFlush(masterPod); err != nil { - return nil, err - } - return cState.upgradeNode(dpl, owner) + return upgradeNodes } -func (cState *ClusterState) upgradeNode(dpl *v1alpha1.Elasticsearch, owner metav1.OwnerReference) (*v1alpha1.ElasticsearchNodeStatus, error) { - nodeForUpgrade, nodeStatus := cState.selectNodeForUpgrade(dpl) - if nodeForUpgrade == nil { - // upgrade requested, but there are no nodes for upgrade? - return nil, fmt.Errorf("Upgrade requested but no nodes for upgrade found") - } - // TODO: maybe first mark the node 'underUpgrade' and revert that - // if the upgrade fails? - if err := nodeForUpgrade.UpdateNode(owner); err != nil { - return nil, fmt.Errorf("Unable to create Elasticsearch node: %v", err) - } - if err := utils.UpdateNodeUpgradeStatusWithRetry(dpl, nodeForUpgrade.DeployName, utils.NodeControllerUpdated()); err != nil { - return nil, err - } - nodeStatus = nil - for i, node := range dpl.Status.Nodes { - if node.DeploymentName == nodeForUpgrade.DeployName { - nodeStatus = &dpl.Status.Nodes[i] - logrus.Debugf("Rolling restart: marked node %s as under upgrade", node.DeploymentName) - break +func getScheduledRedeployOnlyNodes(cluster *v1alpha1.Elasticsearch) []NodeTypeInterface { + redeployNodes := []NodeTypeInterface{} + + for _, node := range cluster.Status.Nodes { + if node.UpgradeStatus.ScheduledForRedeploy == v1.ConditionTrue && + node.UpgradeStatus.ScheduledForUpgrade == v1.ConditionFalse { + for _, nodeTypeInterface := range nodes[nodeMapKey(cluster.Name, cluster.Namespace)] { + if node.DeploymentName == nodeTypeInterface.name() || + node.StatefulSetName == nodeTypeInterface.name() { + redeployNodes = append(redeployNodes, nodeTypeInterface) + } + } } } - if nodeStatus == nil { - // Deployment controller was deleted (not by this operator, but let's be prepared for it) - return nil, fmt.Errorf("Couldn't find Deployment %s", nodeForUpgrade.DeployName) - } - return nodeStatus, nil -} - -func disableShardAllocation(dpl *v1alpha1.Elasticsearch, masterPod *v1.Pod) error { - return setShardAllocation(dpl, masterPod, v1alpha1.ShardAllocationFalse) -} -func enableShardAllocation(dpl *v1alpha1.Elasticsearch, masterPod *v1.Pod) error { - return setShardAllocation(dpl, masterPod, v1alpha1.ShardAllocationTrue) + return redeployNodes } -func setShardAllocation(dpl *v1alpha1.Elasticsearch, masterPod *v1.Pod, enabled v1alpha1.ShardAllocationState) error { - if enabled == dpl.Status.ShardAllocationEnabled { +func updateNodeStatus(cluster *v1alpha1.Elasticsearch, status v1alpha1.ElasticsearchStatus) error { + // if there is nothing to update, don't + if reflect.DeepEqual(cluster.Status, status) { return nil } - if err := utils.SetShardAllocation(masterPod, enabled); err != nil { - return err - } + + nretries := -1 retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if getErr := sdk.Get(dpl); getErr != nil { + nretries++ + if getErr := sdk.Get(cluster); getErr != nil { + logrus.Debugf("Could not get Elasticsearch %v: %v", cluster.Name, getErr) return getErr } - if dpl.Status.ShardAllocationEnabled == enabled { - return nil - } - dpl.Status.ShardAllocationEnabled = enabled - return sdk.Update(dpl) - }) - // TODO: should we revert shard allocation? - // if retryErr != nil { - // if err := utils.SetShardAllocation(masterPod, enabled); err != nil { - // return err - // } - // } - logrus.Debugf("Set cluster shard allocation to: %s", enabled) - return retryErr -} -func updateClusterSettings(dpl *v1alpha1.Elasticsearch) error { - masterPods, err := listRunningMasterPods(dpl.Name, dpl.Namespace) - if err != nil { - return err - } + cluster.Status = status - // no running elasticsearch masters were found - // config map already has the latest configuration - // all nodes spawned later will read the config map - if len(masterPods.Items) == 0 { - // in case ClusterSettingsUpdate had been requested and all master pods disapeared cancel the request - if err = utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionFalse, utils.UpdateUpdatingSettingsCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) + if updateErr := sdk.Update(cluster); updateErr != nil { + logrus.Debugf("Failed to update Elasticsearch %s status. Reason: %v. Trying again...", cluster.Name, updateErr) + return updateErr } - return nil - } - masterPod := &masterPods.Items[0] - switch getClusterEvent(dpl, masterPod) { - case v1alpha1.UpdateClusterSettings: - if err := execClusterSettingsUpdate(dpl, masterPod); err != nil { - return err - } - err = utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionFalse, utils.UpdateUpdatingSettingsCondition) - case v1alpha1.ScaledDown: - err = utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionFalse, utils.UpdateScalingDownCondition) - case v1alpha1.ScaledUp: - if err := execClusterSettingsUpdate(dpl, masterPod); err != nil { - return err - } - err = utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionFalse, utils.UpdateUpdatingSettingsCondition) - err = utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionFalse, utils.UpdateScalingUpCondition) - case v1alpha1.NoEvent: + logrus.Debugf("Updated Elasticsearch %v after %v retries", cluster.Name, nretries) return nil - } - return err -} + }) -func getClusterEvent(dpl *v1alpha1.Elasticsearch, pod *v1.Pod) v1alpha1.ClusterEvent { - desiredNumberOfNodes := int(getNodeCount(dpl)) - actualNumberOfNodes := utils.NumberOfNodes(pod) - if utils.IsUpdatingSettings(&dpl.Status) { - // it is very unlikely that the pods would disapear so quickly, but it could still happen... - if utils.IsClusterScalingDown(&dpl.Status) { - return v1alpha1.UpdateClusterSettings - } - // scalingUp and all pods joined the cluster => scaled up - if utils.IsClusterScalingUp(&dpl.Status) && desiredNumberOfNodes == actualNumberOfNodes { - return v1alpha1.ScaledUp - } - } else { - // settings is up-to-date and all extra nodes left the cluster - if utils.IsClusterScalingDown(&dpl.Status) && desiredNumberOfNodes == actualNumberOfNodes { - return v1alpha1.ScaledDown - } + if retryErr != nil { + return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", cluster.Name, nretries, retryErr) } - return v1alpha1.NoEvent -} -func execClusterSettingsUpdate(dpl *v1alpha1.Elasticsearch, pod *v1.Pod) error { - masterNodesQuorum := int(getMasterCount(dpl))/2 + 1 - return utils.UpdateClusterSettings(pod, masterNodesQuorum) + return nil } diff --git a/pkg/k8shandler/common.go b/pkg/k8shandler/common.go new file mode 100644 index 000000000..f63a14c95 --- /dev/null +++ b/pkg/k8shandler/common.go @@ -0,0 +1,496 @@ +package k8shandler + +import ( + "fmt" + "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sort" + "strconv" + + "github.com/openshift/elasticsearch-operator/pkg/utils" + + api "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// addOwnerRefToObject appends the desired OwnerReference to the object +func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) { + if (metav1.OwnerReference{}) != r { + o.SetOwnerReferences(append(o.GetOwnerReferences(), r)) + } +} + +func getImage(commonImage string) string { + image := commonImage + if image == "" { + image = elasticsearchDefaultImage + } + return image +} + +func getNodeRoleMap(node api.ElasticsearchNode) map[api.ElasticsearchNodeRole]bool { + isClient := false + isData := false + isMaster := false + + for _, role := range node.Roles { + if role == api.ElasticsearchRoleClient { + isClient = true + } + + if role == api.ElasticsearchRoleData { + isData = true + } + + if role == api.ElasticsearchRoleMaster { + isMaster = true + } + } + return map[api.ElasticsearchNodeRole]bool{ + api.ElasticsearchRoleClient: isClient, + api.ElasticsearchRoleData: isData, + api.ElasticsearchRoleMaster: isMaster, + } +} + +// getOwnerRef returns an owner reference set as the vault cluster CR +func getOwnerRef(v *api.Elasticsearch) metav1.OwnerReference { + trueVar := true + return metav1.OwnerReference{ + APIVersion: api.SchemeGroupVersion.String(), + Kind: v.Kind, + Name: v.Name, + UID: v.UID, + Controller: &trueVar, + } +} + +func isOnlyClientNode(node api.ElasticsearchNode) bool { + for _, role := range node.Roles { + if role != api.ElasticsearchRoleClient { + return false + } + } + + return true +} + +func isClientNode(node api.ElasticsearchNode) bool { + for _, role := range node.Roles { + if role == api.ElasticsearchRoleClient { + return true + } + } + + return false +} + +func isOnlyMasterNode(node api.ElasticsearchNode) bool { + for _, role := range node.Roles { + if role != api.ElasticsearchRoleMaster { + return false + } + } + + return true +} + +func isMasterNode(node api.ElasticsearchNode) bool { + for _, role := range node.Roles { + if role == api.ElasticsearchRoleMaster { + return true + } + } + + return false +} + +func isDataNode(node api.ElasticsearchNode) bool { + for _, role := range node.Roles { + if role == api.ElasticsearchRoleData { + return true + } + } + + return false +} + +func newAffinity(roleMap map[api.ElasticsearchNodeRole]bool) *v1.Affinity { + + labelSelectorReqs := []metav1.LabelSelectorRequirement{} + if roleMap[api.ElasticsearchRoleClient] { + labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ + Key: "es-node-client", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"true"}, + }) + } + if roleMap[api.ElasticsearchRoleData] { + labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ + Key: "es-node-data", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"true"}, + }) + } + if roleMap[api.ElasticsearchRoleMaster] { + labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ + Key: "es-node-master", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"true"}, + }) + } + + return &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: v1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: labelSelectorReqs, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + } +} + +func newElasticsearchContainer(imageName string, envVars []v1.EnvVar, resourceRequirements v1.ResourceRequirements) v1.Container { + + return v1.Container{ + Name: "elasticsearch", + Image: imageName, + ImagePullPolicy: "IfNotPresent", + Env: envVars, + Ports: []v1.ContainerPort{ + v1.ContainerPort{ + Name: "cluster", + ContainerPort: 9300, + Protocol: v1.ProtocolTCP, + }, + v1.ContainerPort{ + Name: "restapi", + ContainerPort: 9200, + Protocol: v1.ProtocolTCP, + }, + }, + ReadinessProbe: &v1.Probe{ + TimeoutSeconds: 30, + InitialDelaySeconds: 10, + PeriodSeconds: 5, + Handler: v1.Handler{ + Exec: &v1.ExecAction{ + Command: []string{ + "/usr/share/elasticsearch/probe/readiness.sh", + }, + }, + }, + }, + VolumeMounts: []v1.VolumeMount{ + v1.VolumeMount{ + Name: "elasticsearch-storage", + MountPath: "/elasticsearch/persistent", + }, + v1.VolumeMount{ + Name: "elasticsearch-config", + MountPath: elasticsearchConfigPath, + }, + v1.VolumeMount{ + Name: "certificates", + MountPath: elasticsearchCertsPath, + }, + }, + Resources: resourceRequirements, + } +} + +func newProxyContainer(imageName, clusterName string) (v1.Container, error) { + proxyCookieSecret, err := utils.RandStringBase64(16) + if err != nil { + return v1.Container{}, err + } + container := v1.Container{ + Name: "proxy", + Image: imageName, + ImagePullPolicy: "IfNotPresent", + Ports: []v1.ContainerPort{ + v1.ContainerPort{ + Name: "metrics", + ContainerPort: 60000, + Protocol: v1.ProtocolTCP, + }, + }, + VolumeMounts: []v1.VolumeMount{ + v1.VolumeMount{ + Name: fmt.Sprintf("%s-%s", clusterName, "metrics"), + MountPath: "/etc/proxy/secrets", + }, + v1.VolumeMount{ + Name: "certificates", + MountPath: "/etc/proxy/elasticsearch", + }, + }, + Args: []string{ + "--https-address=:60000", + "--provider=openshift", + "--upstream=https://127.0.0.1:9200", + "--tls-cert=/etc/proxy/secrets/tls.crt", + "--tls-key=/etc/proxy/secrets/tls.key", + "--upstream-ca=/etc/proxy/elasticsearch/admin-ca", + "--openshift-service-account=elasticsearch", + `-openshift-sar={"resource": "namespaces", "verb": "get"}`, + `-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get"}}`, + "--pass-user-bearer-token", + fmt.Sprintf("--cookie-secret=%s", proxyCookieSecret), + }, + } + return container, nil +} + +func newEnvVars(nodeName, clusterName, instanceRam string, roleMap map[api.ElasticsearchNodeRole]bool) []v1.EnvVar { + + return []v1.EnvVar{ + v1.EnvVar{ + Name: "DC_NAME", + Value: nodeName, + }, + v1.EnvVar{ + Name: "NAMESPACE", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + v1.EnvVar{ + Name: "KUBERNETES_TRUST_CERT", + Value: "true", + }, + v1.EnvVar{ + Name: "SERVICE_DNS", + Value: fmt.Sprintf("%s-cluster", clusterName), + }, + v1.EnvVar{ + Name: "CLUSTER_NAME", + Value: clusterName, + }, + v1.EnvVar{ + Name: "INSTANCE_RAM", + Value: instanceRam, + }, + v1.EnvVar{ + Name: "HEAP_DUMP_LOCATION", + Value: heapDumpLocation, + }, + v1.EnvVar{ + Name: "RECOVER_AFTER_TIME", + Value: "5m", + }, + v1.EnvVar{ + Name: "READINESS_PROBE_TIMEOUT", + Value: "30", + }, + v1.EnvVar{ + Name: "POD_LABEL", + Value: fmt.Sprintf("cluster=%s", clusterName), + }, + v1.EnvVar{ + Name: "IS_MASTER", + Value: strconv.FormatBool(roleMap[api.ElasticsearchRoleMaster]), + }, + v1.EnvVar{ + Name: "HAS_DATA", + Value: strconv.FormatBool(roleMap[api.ElasticsearchRoleData]), + }, + } +} + +func newLabels(clusterName string, roleMap map[api.ElasticsearchNodeRole]bool) map[string]string { + + return map[string]string{ + "es-node-client": strconv.FormatBool(roleMap[api.ElasticsearchRoleClient]), + "es-node-data": strconv.FormatBool(roleMap[api.ElasticsearchRoleData]), + "es-node-master": strconv.FormatBool(roleMap[api.ElasticsearchRoleMaster]), + "cluster-name": clusterName, + "component": clusterName, + "tuned.openshift.io/elasticsearch": "true", + } +} + +func newLabelSelector(clusterName string, roleMap map[api.ElasticsearchNodeRole]bool) map[string]string { + + return map[string]string{ + "es-node-client": strconv.FormatBool(roleMap[api.ElasticsearchRoleClient]), + "es-node-data": strconv.FormatBool(roleMap[api.ElasticsearchRoleData]), + "es-node-master": strconv.FormatBool(roleMap[api.ElasticsearchRoleMaster]), + "cluster-name": clusterName, + } +} + +func newPodTemplateSpec(nodeName, clusterName, namespace string, node api.ElasticsearchNode, commonSpec api.ElasticsearchNodeSpec, labels map[string]string, roleMap map[api.ElasticsearchNodeRole]bool) v1.PodTemplateSpec { + + resourceRequirements := newResourceRequirements(node.Resources, commonSpec.Resources) + proxyImage := utils.LookupEnvWithDefault("PROXY_IMAGE", "quay.io/openshift/origin-oauth-proxy:latest") + proxyContainer, _ := newProxyContainer(proxyImage, clusterName) + + return v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: v1.PodSpec{ + Affinity: newAffinity(roleMap), + Containers: []v1.Container{ + newElasticsearchContainer( + getImage(commonSpec.Image), + newEnvVars(nodeName, clusterName, resourceRequirements.Limits.Memory().String(), roleMap), + resourceRequirements, + ), + proxyContainer, + }, + NodeSelector: node.NodeSelector, + ServiceAccountName: clusterName, + Volumes: newVolumes(clusterName, nodeName, namespace, node), + }, + } +} + +func newResourceRequirements(nodeResRequirements, commonResRequirements v1.ResourceRequirements) v1.ResourceRequirements { + limitCPU := nodeResRequirements.Limits.Cpu() + if limitCPU.IsZero() { + if commonResRequirements.Limits.Cpu().IsZero() { + CPU, _ := resource.ParseQuantity(defaultCPULimit) + limitCPU = &CPU + } else { + limitCPU = commonResRequirements.Limits.Cpu() + } + } + limitMem := nodeResRequirements.Limits.Memory() + if limitMem.IsZero() { + if commonResRequirements.Limits.Memory().IsZero() { + Mem, _ := resource.ParseQuantity(defaultMemoryLimit) + limitMem = &Mem + } else { + limitMem = commonResRequirements.Limits.Memory() + } + + } + requestCPU := nodeResRequirements.Requests.Cpu() + if requestCPU.IsZero() { + if commonResRequirements.Requests.Cpu().IsZero() { + CPU, _ := resource.ParseQuantity(defaultCPURequest) + requestCPU = &CPU + } else { + requestCPU = commonResRequirements.Requests.Cpu() + } + } + requestMem := nodeResRequirements.Requests.Memory() + if requestMem.IsZero() { + if commonResRequirements.Requests.Memory().IsZero() { + Mem, _ := resource.ParseQuantity(defaultMemoryRequest) + requestMem = &Mem + } else { + requestMem = commonResRequirements.Requests.Memory() + } + } + + return v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "cpu": *limitCPU, + "memory": *limitMem, + }, + Requests: v1.ResourceList{ + "cpu": *requestCPU, + "memory": *requestMem, + }, + } +} + +func newVolumes(clusterName, nodeName, namespace string, node api.ElasticsearchNode) []v1.Volume { + return []v1.Volume{ + v1.Volume{ + Name: "elasticsearch-config", + VolumeSource: v1.VolumeSource{ + ConfigMap: &v1.ConfigMapVolumeSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: clusterName, + }, + }, + }, + }, + v1.Volume{ + Name: "elasticsearch-storage", + VolumeSource: newVolumeSource(clusterName, nodeName, namespace, node), + }, + v1.Volume{ + Name: "certificates", + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: clusterName, + }, + }, + }, + v1.Volume{ + Name: fmt.Sprintf("%s-%s", clusterName, "metrics"), + VolumeSource: v1.VolumeSource{ + Secret: &v1.SecretVolumeSource{ + SecretName: fmt.Sprintf("%s-%s", clusterName, "metrics"), + }, + }, + }, + } +} + +func newVolumeSource(clusterName, nodeName, namespace string, node api.ElasticsearchNode) v1.VolumeSource { + + specVol := node.Storage + volSource := v1.VolumeSource{} + + switch { + case specVol.StorageClassName != nil && specVol.Size != nil: + claimName := fmt.Sprintf("%s-%s", clusterName, nodeName) + volSource.PersistentVolumeClaim = &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + } + + volSpec := v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: *specVol.Size, + }, + }, + StorageClassName: specVol.StorageClassName, + } + + err := createOrUpdatePersistentVolumeClaim(volSpec, claimName, namespace) + if err != nil { + logrus.Errorf("Unable to create PersistentVolumeClaim: %v", err) + } + + case specVol.Size != nil: + volSource.EmptyDir = &v1.EmptyDirVolumeSource{ + SizeLimit: specVol.Size, + } + + default: + volSource.EmptyDir = &v1.EmptyDirVolumeSource{} + } + + return volSource +} + +func sortDataHashKeys(dataHash map[string][32]byte) []string { + keys := []string{} + for key, _ := range dataHash { + keys = append(keys, key) + } + + sort.Strings(keys) + + return keys +} diff --git a/pkg/k8shandler/configmaps.go b/pkg/k8shandler/configmaps.go index b37a4958a..63e848a09 100644 --- a/pkg/k8shandler/configmaps.go +++ b/pkg/k8shandler/configmaps.go @@ -4,17 +4,18 @@ import ( "bytes" "crypto/sha256" "fmt" + "html/template" + "io" "strconv" + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/util/retry" v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/openshift/elasticsearch-operator/pkg/utils" - "github.com/operator-framework/operator-sdk/pkg/sdk" - "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( @@ -23,115 +24,131 @@ const ( indexSettingsConfig = "index_settings" ) -// CreateOrUpdateConfigMaps ensures the existence of ConfigMaps with Elasticsearch configuration -func CreateOrUpdateConfigMaps(dpl *v1alpha1.Elasticsearch) (string, error) { - owner := asOwner(dpl) - configMapName := v1alpha1.ConfigMapName +// esYmlStruct is used to render esYmlTmpl to a proper elasticsearch.yml format +type esYmlStruct struct { + KibanaIndexMode string + EsUnicastHost string + NodeQuorum string + RecoverExpectedShards string +} + +type log4j2PropertiesStruct struct { + RootLogger string +} +type indexSettingsStruct struct { + PrimaryShards string + ReplicaShards string +} + +// CreateOrUpdateConfigMaps ensures the existence of ConfigMaps with Elasticsearch configuration +func CreateOrUpdateConfigMaps(dpl *v1alpha1.Elasticsearch) (err error) { kibanaIndexMode, err := kibanaIndexMode("") if err != nil { - return "", err + return err } dataNodeCount := int((getDataCount(dpl))) masterNodeCount := int((getMasterCount(dpl))) - primaryShardsCount := strconv.Itoa(dataNodeCount) - replicaShardsCount := strconv.Itoa(calculateReplicaCount(dpl)) - recoverExpectedShards := strconv.Itoa(dataNodeCount) - nodeQuorum := strconv.Itoa(masterNodeCount/2 + 1) - - esUnicastHost := esUnicastHost(dpl.Name) - rootLogger := rootLogger() + configmap := newConfigMap( + dpl.Name, + dpl.Namespace, + dpl.Labels, + kibanaIndexMode, + esUnicastHost(dpl.Name, dpl.Namespace), + rootLogger(), + strconv.Itoa(masterNodeCount/2+1), + strconv.Itoa(dataNodeCount), + strconv.Itoa(dataNodeCount), + strconv.Itoa(calculateReplicaCount(dpl)), + ) - err = createOrUpdateConfigMap(dpl, configMapName, dpl.Namespace, dpl.Name, kibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, owner, dpl.Labels) - if err != nil { - return configMapName, fmt.Errorf("Failure creating ConfigMap %v", err) - } - return configMapName, nil -} + addOwnerRefToObject(configmap, getOwnerRef(dpl)) -func createOrUpdateConfigMap(dpl *v1alpha1.Elasticsearch, configMapName, namespace, clusterName, kibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount string, - owner metav1.OwnerReference, labels map[string]string) error { - elasticsearchCM, err := createConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, labels) + err = sdk.Create(configmap) if err != nil { - return err - } - addOwnerRefToObject(elasticsearchCM, owner) - err = sdk.Create(elasticsearchCM) - if err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("Failure constructing Elasticsearch ConfigMap: %v", err) - } else if errors.IsAlreadyExists(err) { - // Get existing configMap to check if it is same as what we want - existingCM := configMap(configMapName, namespace, labels) - err = sdk.Get(existingCM) - if err != nil { - return fmt.Errorf("Unable to get Elasticsearch cluster configMap: %v", err) + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Failure constructing Elasticsearch ConfigMap: %v", err) } - if configMapContentChanged(existingCM, elasticsearchCM) { - // Cluster settings has changed, make sure it doesnt go unnoticed - if err := utils.UpdateConditionWithRetry(dpl, v1alpha1.ConditionTrue, utils.UpdateUpdatingSettingsCondition); err != nil { - return fmt.Errorf("Unable to update Elasticsearch cluster status: %v", err) + if errors.IsAlreadyExists(err) { + // Get existing configMap to check if it is same as what we want + current := configmap.DeepCopy() + err = sdk.Get(current) + if err != nil { + return fmt.Errorf("Unable to get Elasticsearch cluster configMap: %v", err) } - return retry.RetryOnConflict(retry.DefaultRetry, func() error { - if getErr := sdk.Get(existingCM); getErr != nil { - logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr) - return getErr + if configMapContentChanged(current, configmap) { + // Cluster settings has changed, make sure it doesnt go unnoticed + if err := updateConditionWithRetry(dpl, v1.ConditionTrue, updateUpdatingSettingsCondition); err != nil { + return err } - existingCM.Data[esConfig] = elasticsearchCM.Data[esConfig] - existingCM.Data[log4jConfig] = elasticsearchCM.Data[log4jConfig] - existingCM.Data[indexSettingsConfig] = elasticsearchCM.Data[indexSettingsConfig] + return retry.RetryOnConflict(retry.DefaultRetry, func() error { + if getErr := sdk.Get(current); getErr != nil { + logrus.Debugf("Could not get Elasticsearch configmap %v: %v", configmap.Name, getErr) + return getErr + } - if updateErr := sdk.Update(existingCM); updateErr != nil { - logrus.Debugf("Failed to update Elasticsearch %v status: %v", dpl.Name, updateErr) - return updateErr - } - return nil - }) + current.Data = configmap.Data + if updateErr := sdk.Update(current); updateErr != nil { + logrus.Debugf("Failed to update Elasticsearch configmap %v: %v", configmap.Name, updateErr) + return updateErr + } + return nil + }) + } } } + return nil } -func createConfigMap(configMapName, namespace, clusterName, kibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount string, - labels map[string]string) (*v1.ConfigMap, error) { - cm := configMap(configMapName, namespace, labels) - cm.Data = map[string]string{} +func renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, rootLogger string) (error, map[string]string) { + + data := map[string]string{} buf := &bytes.Buffer{} if err := renderEsYml(buf, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards); err != nil { - return cm, err + return err, data } - cm.Data[esConfig] = buf.String() + data[esConfig] = buf.String() buf = &bytes.Buffer{} if err := renderLog4j2Properties(buf, rootLogger); err != nil { - return cm, err + return err, data } - cm.Data[log4jConfig] = buf.String() + data[log4jConfig] = buf.String() buf = &bytes.Buffer{} if err := renderIndexSettings(buf, primaryShardsCount, replicaShardsCount); err != nil { - return cm, err + return err, data } - cm.Data[indexSettingsConfig] = buf.String() + data[indexSettingsConfig] = buf.String() - return cm, nil + return nil, data } -// configMap returns a v1.ConfigMap object -func configMap(configMapName string, namespace string, labels map[string]string) *v1.ConfigMap { +// newConfigMap returns a v1.ConfigMap object +func newConfigMap(configMapName, namespace string, labels map[string]string, + kibanaIndexMode, esUnicastHost, rootLogger, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount string) *v1.ConfigMap { + + err, data := renderData(kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards, primaryShardsCount, replicaShardsCount, rootLogger) + if err != nil { + return nil + } + return &v1.ConfigMap{ TypeMeta: metav1.TypeMeta{ Kind: "ConfigMap", - APIVersion: "v1", + APIVersion: v1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: configMapName, Namespace: namespace, Labels: labels, }, + Data: data, } } @@ -159,3 +176,94 @@ func configMapContentChanged(old, new *v1.ConfigMap) bool { return false } + +func renderEsYml(w io.Writer, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards string) error { + t := template.New("elasticsearch.yml") + config := esYmlTmpl + t, err := t.Parse(config) + if err != nil { + return err + } + esy := esYmlStruct{ + KibanaIndexMode: kibanaIndexMode, + EsUnicastHost: esUnicastHost, + NodeQuorum: nodeQuorum, + RecoverExpectedShards: recoverExpectedShards, + } + + return t.Execute(w, esy) +} + +func renderLog4j2Properties(w io.Writer, rootLogger string) error { + t := template.New("log4j2.properties") + t, err := t.Parse(log4j2PropertiesTmpl) + if err != nil { + return err + } + + log4jProp := log4j2PropertiesStruct{ + RootLogger: rootLogger, + } + + return t.Execute(w, log4jProp) +} + +func renderIndexSettings(w io.Writer, primaryShardsCount, replicaShardsCount string) error { + t := template.New("index_settings") + t, err := t.Parse(indexSettingsTmpl) + if err != nil { + return err + } + + indexSettings := indexSettingsStruct{ + PrimaryShards: primaryShardsCount, + ReplicaShards: replicaShardsCount, + } + + return t.Execute(w, indexSettings) +} + +func getConfigmap(configmapName, namespace string) *v1.ConfigMap { + + configMap := v1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + Kind: "ConfigMap", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: configmapName, + Namespace: namespace, + }, + } + + err := sdk.Get(&configMap) + + if err != nil { + // check if doesn't exist + } + + return &configMap +} + +func getConfigmapDataHash(configmapName, namespace string) string { + + hash := "" + + configMap := getConfigmap(configmapName, namespace) + + dataHashes := make(map[string][32]byte) + + for key, data := range configMap.Data { + if key != "index_settings" { + dataHashes[key] = sha256.Sum256([]byte(data)) + } + } + + sortedKeys := sortDataHashKeys(dataHashes) + + for _, key := range sortedKeys { + hash = fmt.Sprintf("%s%s", hash, dataHashes[key]) + } + + return hash +} diff --git a/pkg/k8shandler/defaults.go b/pkg/k8shandler/defaults.go index 150967e46..64dd424be 100644 --- a/pkg/k8shandler/defaults.go +++ b/pkg/k8shandler/defaults.go @@ -10,6 +10,20 @@ const ( modeUnique = "unique" modeSharedOps = "shared_ops" defaultMode = modeSharedOps + + defaultMasterCPULimit = "100m" + defaultMasterCPURequest = "100m" + defaultCPULimit = "4000m" + defaultCPURequest = "100m" + defaultMemoryLimit = "4Gi" + defaultMemoryRequest = "1Gi" + elasticsearchDefaultImage = "quay.io/openshift/origin-logging-elasticsearch5" + + maxMasterCount = 3 + + elasticsearchCertsPath = "/etc/openshift/elasticsearch/secret" + elasticsearchConfigPath = "/usr/share/java/elasticsearch/config" + heapDumpLocation = "/elasticsearch/persistent/heapdump.hprof" ) func kibanaIndexMode(mode string) (string, error) { @@ -22,8 +36,8 @@ func kibanaIndexMode(mode string) (string, error) { return "", fmt.Errorf("invalid kibana index mode provided [%s]", mode) } -func esUnicastHost(clusterName string) string { - return clusterName + "-cluster" +func esUnicastHost(clusterName, namespace string) string { + return fmt.Sprintf("%v-cluster.%v.svc", clusterName, namespace) } func rootLogger() string { diff --git a/pkg/k8shandler/deployment.go b/pkg/k8shandler/deployment.go index 605c6c061..682392b0c 100644 --- a/pkg/k8shandler/deployment.go +++ b/pkg/k8shandler/deployment.go @@ -2,223 +2,534 @@ package k8shandler import ( "fmt" + "time" - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + "github.com/operator-framework/operator-sdk/pkg/sdk" "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" - "github.com/operator-framework/operator-sdk/pkg/sdk" + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) type deploymentNode struct { - resource apps.Deployment + self apps.Deployment + // prior hash for configmap content + configmapHash string + // prior hash for secret content + secretHash string + + clusterName string + + currentRevision string + + clusterSize int32 } -func (node *deploymentNode) getResource() runtime.Object { - return &node.resource +func (deploymentNode *deploymentNode) populateReference(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool, replicas int32) { + + labels := newLabels(cluster.Name, roleMap) + + deployment := apps.Deployment{ + TypeMeta: metav1.TypeMeta{ + Kind: "Deployment", + APIVersion: apps.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: cluster.Namespace, + Labels: labels, + }, + } + + progressDeadlineSeconds := int32(1800) + + deployment.Spec = apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: newLabelSelector(cluster.Name, roleMap), + }, + Strategy: apps.DeploymentStrategy{ + Type: "Recreate", + }, + ProgressDeadlineSeconds: &progressDeadlineSeconds, + Paused: false, + Template: newPodTemplateSpec(nodeName, cluster.Name, cluster.Namespace, node, cluster.Spec.Spec, labels, roleMap), + } + + addOwnerRefToObject(&deployment, getOwnerRef(cluster)) + + deploymentNode.self = deployment + deploymentNode.clusterName = cluster.Name +} + +func (current *deploymentNode) updateReference(node NodeTypeInterface) { + current.self = node.(*deploymentNode).self } -func (node *deploymentNode) getRevision(cfg *desiredNodeState) (string, error) { - val, ok := node.resource.ObjectMeta.Annotations["deployment.kubernetes.io/revision"] +func (node *deploymentNode) name() string { + return node.self.Name +} - if ok { - return val, nil +func (node *deploymentNode) state() v1alpha1.ElasticsearchNodeStatus { + + rolloutForReload := v1.ConditionFalse + rolloutForUpdate := v1.ConditionFalse + + // see if we need to update the deployment object + if node.isChanged() { + rolloutForUpdate = v1.ConditionTrue + } + + // check if the configmapHash changed + /*newConfigmapHash := getConfigmapDataHash(node.clusterName, node.self.Namespace) + if newConfigmapHash != node.configmapHash { + rolloutForReload = v1.ConditionTrue + }*/ + + // check if the secretHash changed + newSecretHash := getSecretDataHash(node.clusterName, node.self.Namespace) + if newSecretHash != node.secretHash { + rolloutForReload = v1.ConditionTrue } - return "", fmt.Errorf("Unable to find revision annotation value for %v", cfg.DeployName) + return v1alpha1.ElasticsearchNodeStatus{ + DeploymentName: node.self.Name, + UpgradeStatus: v1alpha1.ElasticsearchNodeUpgradeStatus{ + ScheduledForUpgrade: rolloutForUpdate, + ScheduledForRedeploy: rolloutForReload, + }, + } } -func (node *deploymentNode) awaitingRollout(cfg *desiredNodeState, currentRevision string) (bool, error) { +func (node *deploymentNode) create() error { - actualRevision, err := node.getRevision(cfg) + err := sdk.Create(&node.self) if err != nil { - return true, err + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Could not create node resource: %v", err) + } else { + // node already exists, make sure the deployment is paused + return node.pause() + } } - return actualRevision == currentRevision, nil + // created unpaused, pause after deployment... + // wait until we have a revision annotation... + if err = node.waitForInitialRollout(); err != nil { + return err + } + + if err = node.pause(); err != nil { + return err + } + + // update the hashmaps + node.configmapHash = getConfigmapDataHash(node.clusterName, node.self.Namespace) + node.secretHash = getSecretDataHash(node.clusterName, node.self.Namespace) + + return nil } -// needsPause determines whether a Deployment needs to be paused -// A Deployment doesn't need to be paused if it is already paused. -// Otherwise it need to be paused. -func (node *deploymentNode) needsPause(cfg *desiredNodeState) (bool, error) { +func (node *deploymentNode) waitForInitialRollout() error { + err := wait.Poll(time.Second*1, time.Second*30, func() (done bool, err error) { + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return false, getErr + } + + _, ok := node.self.ObjectMeta.Annotations["deployment.kubernetes.io/revision"] + if ok { + return true, nil + } + + return false, nil + }) + return err +} + +func (node *deploymentNode) nodeRevision() string { + val, ok := node.self.ObjectMeta.Annotations["deployment.kubernetes.io/revision"] - if node.resource.Spec.Paused == false { - logrus.Debugf("Deployment %v is not currently paused.", node.resource.Name) - return true, nil + if ok { + return val } - return false, nil + return "" +} + +func (node *deploymentNode) waitForNodeRollout(currentRevision string) error { + err := wait.Poll(time.Second*1, time.Second*30, func() (done bool, err error) { + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return false, getErr + } + + revision := node.nodeRevision() + + return (revision != currentRevision), nil + }) + return err +} + +func (node *deploymentNode) pause() error { + return node.setPaused(true) +} + +func (node *deploymentNode) unpause() error { + return node.setPaused(false) } -// Since this is called as part of doing an upgrade we check if deployments need to be -// paused again as a separate call to avoid unnecessary rollouts -func (node *deploymentNode) isDifferent(cfg *desiredNodeState) (bool, error) { +func (node *deploymentNode) setPaused(paused bool) error { + nretries := -1 + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nretries++ + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return getErr + } + + if node.self.Spec.Paused == paused { + return nil + } + + node.self.Spec.Paused = paused - // Check replicas number - actualReplicas := *node.resource.Spec.Replicas - if cfg.getReplicas() != actualReplicas { - logrus.Debugf("Different number of replicas detected, updating deployment %q", cfg.DeployName) - return true, nil + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Error: could not update Elasticsearch node %v after %v retries: %v", node.self.Name, nretries, retryErr) } - // Check if labels are correct - for label, value := range cfg.Labels { - val, ok := node.resource.Labels[label] - if !ok || val != value { - logrus.Debugf("Different labels detected, updating deployment %q", node.resource.GetName()) - return true, nil + return nil +} + +func (node *deploymentNode) setReplicaCount(replicas int32) error { + nretries := -1 + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nretries++ + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return getErr + } + + if *node.self.Spec.Replicas == replicas { + return nil } + + node.self.Spec.Replicas = &replicas + + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Error: could not update Elasticsearch node %v after %v retries: %v", node.self.Name, nretries, retryErr) } - // Check if the Variables are the desired ones - envVars := node.resource.Spec.Template.Spec.Containers[0].Env - desiredVars := cfg.EnvVars - if len(envVars) != len(desiredVars) { - logrus.Debugf("Different environmental variables detected, updating deployment %q", node.resource.GetName()) - return true, nil + return nil +} + +func (node *deploymentNode) replicaCount() (error, int32) { + if err := sdk.Get(&node.self); err != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, err) + return err, -1 } - for index, value := range envVars { - if value.ValueFrom == nil { - if desiredVars[index] != value { - logrus.Debugf("Different environmental variables detected, updating deployment %q", node.resource.GetName()) - return true, nil - } - } else { - if desiredVars[index].ValueFrom.FieldRef.FieldPath != value.ValueFrom.FieldRef.FieldPath { - logrus.Debugf("Different environmental variables detected, updating deployment %q", node.resource.GetName()) - return true, nil - } + + return nil, node.self.Status.Replicas +} + +func (node *deploymentNode) waitForNodeRejoinCluster() (error, bool) { + err := wait.Poll(time.Second*1, time.Second*60, func() (done bool, err error) { + clusterSize, getErr := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size waiting for %v to rejoin cluster", node.name()) + return false, getErr } - } - // Check that storage configuration is the same - // Maybe this needs to be split into a separate method since this - // may indicate that we need a new cluster spin up, not rolling restart - for _, volume := range node.resource.Spec.Template.Spec.Volumes { - if volume.Name == "elasticsearch-storage" { - switch { - case volume.PersistentVolumeClaim != nil && &cfg.ESNodeSpec.Storage.StorageClassName != nil: - desiredClaimName := fmt.Sprintf("%s-%s", cfg.ClusterName, cfg.DeployName) - if volume.PersistentVolumeClaim.ClaimName == desiredClaimName { - return false, nil - } + return (node.clusterSize <= clusterSize), nil + }) - logrus.Warn("Detected change in storage") - return true, nil - case volume.EmptyDir != nil && cfg.ESNodeSpec.Storage == v1alpha1.ElasticsearchStorageSpec{}: - return false, nil - default: - logrus.Warn("Detected change in storage") - return true, nil - } + return err, (err == nil) +} + +func (node *deploymentNode) waitForNodeLeaveCluster() (error, bool) { + err := wait.Poll(time.Second*1, time.Second*60, func() (done bool, err error) { + clusterSize, getErr := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size waiting for %v to leave cluster", node.name()) + return false, getErr } - } - return false, nil + + return (node.clusterSize > clusterSize), nil + }) + + return err, (err == nil) } -// isUpdateNeeded compares existing Deployment resource and its desired state. -// If a difference is found the existing resource will be updated. This triggers -// a new rollout, which is handeled by the operator in rolling fashion. -// Currently only Image and CPU & memory Resources trigger rolling restart -func (node *deploymentNode) isUpdateNeeded(cfg *desiredNodeState) bool { - // Only Image and Resources (CPU & memory) differences trigger rolling restart - for _, container := range node.resource.Spec.Template.Spec.Containers { - if container.Name == "elasticsearch" { - // Check image of Elasticsearch container - if container.Image != cfg.ESImage { - logrus.Debugf("Resource '%s' has different container image than desired", node.resource.Name) - return true - } - // Check CPU limits - if cfg.ESNodeSpec.Resources.Limits.Cpu().Cmp(*container.Resources.Limits.Cpu()) != 0 { - logrus.Debugf("Resource '%s' has different CPU limit than desired", node.resource.Name) - return true - } - // Check memory limits - if cfg.ESNodeSpec.Resources.Limits.Memory().Cmp(*container.Resources.Limits.Memory()) != 0 { - logrus.Debugf("Resource '%s' has different Memory limit than desired", node.resource.Name) - return true +func (node *deploymentNode) restart(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) { + + if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { + if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) + return + } + + size, err := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size prior to restart for %v", node.name()) + return + } + node.clusterSize = size + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionTrue + } + + if upgradeStatus.UpgradeStatus.UpgradePhase == "" || + upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.ControllerUpdated { + + err, replicas := node.replicaCount() + if err != nil { + logrus.Warnf("Unable to get replica count for %v", node.name()) + } + + if replicas > 0 { + if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok { + logrus.Warnf("Unable to perform synchronized flush: %v", err) + return } - // Check CPU requests - if cfg.ESNodeSpec.Resources.Requests.Cpu().Cmp(*container.Resources.Requests.Cpu()) != 0 { - logrus.Debugf("Resource '%s' has different CPU Request than desired", node.resource.Name) - return true + + // disable shard allocation + if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, v1alpha1.ShardAllocationNone); !ok { + logrus.Warnf("Unable to disable shard allocation: %v", err) + return } - // Check memory requests - if cfg.ESNodeSpec.Resources.Requests.Memory().Cmp(*container.Resources.Requests.Memory()) != 0 { - logrus.Debugf("Resource '%s' has different Memory Request than desired", node.resource.Name) - return true + + // check for available replicas empty + // node.self.Status.Replicas + // if we aren't at 0, then we need to scale down to 0 + if err = node.setReplicaCount(0); err != nil { + logrus.Warnf("Unable to scale down %v", node.name()) + return } } + + if err, _ = node.waitForNodeLeaveCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to leave the cluster", node.name()) + return + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.NodeRestarting } - return false -} -func (node *deploymentNode) query() error { - err := sdk.Get(&node.resource) - return err + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.NodeRestarting { + + if err := node.setReplicaCount(1); err != nil { + logrus.Warnf("Unable to scale up %v", node.name()) + return + } + + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to rejoin cluster", node.name()) + return + } + + node.refreshHashes() + + // reenable shard allocation + if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, v1alpha1.ShardAllocationAll); !ok { + logrus.Warnf("Unable to enable shard allocation: %v", err) + return + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.RecoveringData + } + + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.RecoveringData { + + if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to complete recovery: %v / green", status) + return + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.ControllerUpdated + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionFalse + } } -// constructNodeDeployment creates the deployment for the node -func (node *deploymentNode) constructNodeResource(cfg *desiredNodeState, owner metav1.OwnerReference) (runtime.Object, error) { +func (node *deploymentNode) update(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) error { + + // set our state to being under upgrade + if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { + if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to be fully recovered before upgrading %v: %v / green", node.name(), status) + return fmt.Errorf("Cluster not in green state before beginning upgrade: %v", status) + } + + size, err := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size prior to update for %v", node.name()) + } + node.clusterSize = size + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionTrue + } - // Check if deployment exists + // use UpgradePhase to gate what we work on, update phase when we complete a task + if upgradeStatus.UpgradeStatus.UpgradePhase == "" || + upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.ControllerUpdated { - // FIXME: remove hardcode + if ok, err := DoSynchronizedFlush(node.clusterName, node.self.Namespace); !ok { + logrus.Warnf("Unable to perform synchronized flush: %v", err) + return err + } - replicas := cfg.getReplicas() + // disable shard allocation + if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, v1alpha1.ShardAllocationNone); !ok { + logrus.Warnf("Unable to disable shard allocation: %v", err) + return err + } - // deployment := node.resource - deployment := apps.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: cfg.DeployName, - Namespace: cfg.Namespace, - }, + // see if we need to update the deployment object and verify we have latest to update + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // isChanged() will get the latest revision from the apiserver + // and return false if there is nothing to change and will update the node object if required + if node.isChanged() { + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + + return nil + } else { + return nil + } + }) + + if retryErr != nil { + return retryErr + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.NodeRestarting + node.currentRevision = node.nodeRevision() } - progressDeadlineSeconds := int32(1800) - deployment.ObjectMeta.Labels = cfg.getLabels() - podTemplate, err := cfg.constructPodTemplateSpec() - if err != nil { - return nil, err + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.NodeRestarting { + + // do a unpause, wait, and pause again + node.unpause() + + // wait for rollout + if err := node.waitForNodeRollout(node.currentRevision); err != nil { + logrus.Infof("Timed out waiting for node %v to rollout", node.name()) + return err + } + + // pause again + node.pause() + + // once we've restarted this is taken care of + node.refreshHashes() + + // wait for node to rejoin cluster + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to rejoin cluster", node.name()) + return fmt.Errorf("Node %v has not rejoined cluster %v yet", node.name(), node.clusterName) + } + + // reenable shard allocation + if ok, err := SetShardAllocation(node.clusterName, node.self.Namespace, v1alpha1.ShardAllocationAll); !ok { + logrus.Warnf("Unable to enable shard allocation: %v", err) + return err + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.RecoveringData } - deployment.Spec = apps.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: cfg.getLabelSelector(), - }, - Strategy: apps.DeploymentStrategy{ - Type: "Recreate", - }, - ProgressDeadlineSeconds: &progressDeadlineSeconds, - Template: podTemplate, - Paused: cfg.Paused, + + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.RecoveringData { + + if status, err := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to complete recovery: %v / green", status) + return err + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.ControllerUpdated + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionFalse } - // if storageClass != "default" { - // deployment.Spec.VolumeClaimTemplates[0].Annotations = map[string]string{ - // "volume.beta.kubernetes.io/storage-class": storageClass, - // } - // } - // sset, _ := json.Marshal(deployment) - // s := string(sset[:]) + return nil +} - addOwnerRefToObject(&deployment, owner) +func (node *deploymentNode) refreshHashes() { + newConfigmapHash := getConfigmapDataHash(node.clusterName, node.self.Namespace) + if newConfigmapHash != node.configmapHash { + node.configmapHash = newConfigmapHash + } - return &deployment, nil + newSecretHash := getSecretDataHash(node.clusterName, node.self.Namespace) + if newSecretHash != node.secretHash { + node.secretHash = newSecretHash + } } -func (node *deploymentNode) delete() error { - err := sdk.Delete(&node.resource) +func (node *deploymentNode) isChanged() bool { + + changed := false + + desired := node.self.DeepCopy() + err := sdk.Get(&node.self) + // error check that it exists, etc if err != nil { - return fmt.Errorf("Unable to delete Deployment %v: ", err) + // if it doesn't exist, return true + return false } - return nil + + // Only Image and Resources (CPU & memory) differences trigger rolling restart + // we will only have one container, no need to do range + nodeContainer := node.self.Spec.Template.Spec.Containers[0] + desiredContainer := desired.Spec.Template.Spec.Containers[0] + + // check that both exist + + if nodeContainer.Image != desiredContainer.Image { + logrus.Debugf("Resource '%s' has different container image than desired", node.self.Name) + nodeContainer.Image = desiredContainer.Image + changed = true + } + + if desiredContainer.Resources.Limits.Cpu().Cmp(*nodeContainer.Resources.Limits.Cpu()) != 0 { + logrus.Debugf("Resource '%s' has different CPU limit than desired", node.self.Name) + nodeContainer.Resources.Limits[v1.ResourceCPU] = *desiredContainer.Resources.Limits.Cpu() + changed = true + } + // Check memory limits + if desiredContainer.Resources.Limits.Memory().Cmp(*nodeContainer.Resources.Limits.Memory()) != 0 { + logrus.Debugf("Resource '%s' has different Memory limit than desired", node.self.Name) + nodeContainer.Resources.Limits[v1.ResourceMemory] = *desiredContainer.Resources.Limits.Memory() + changed = true + } + // Check CPU requests + if desiredContainer.Resources.Requests.Cpu().Cmp(*nodeContainer.Resources.Requests.Cpu()) != 0 { + logrus.Debugf("Resource '%s' has different CPU Request than desired", node.self.Name) + nodeContainer.Resources.Requests[v1.ResourceCPU] = *desiredContainer.Resources.Requests.Cpu() + changed = true + } + // Check memory requests + if desiredContainer.Resources.Requests.Memory().Cmp(*nodeContainer.Resources.Requests.Memory()) != 0 { + logrus.Debugf("Resource '%s' has different Memory Request than desired", node.self.Name) + nodeContainer.Resources.Requests[v1.ResourceMemory] = *desiredContainer.Resources.Requests.Memory() + changed = true + } + + node.self.Spec.Template.Spec.Containers[0] = nodeContainer + + return changed } diff --git a/pkg/k8shandler/desirednodestate.go b/pkg/k8shandler/desirednodestate.go deleted file mode 100644 index c5ca0551b..000000000 --- a/pkg/k8shandler/desirednodestate.go +++ /dev/null @@ -1,731 +0,0 @@ -package k8shandler - -import ( - "fmt" - "os" - "sort" - "strconv" - "strings" - "time" - - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/openshift/elasticsearch-operator/pkg/utils" - "github.com/operator-framework/operator-sdk/pkg/sdk" - "github.com/sirupsen/logrus" - apps "k8s.io/api/apps/v1" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/util/retry" -) - -const ( - elasticsearchCertsPath = "/etc/openshift/elasticsearch/secret" - elasticsearchConfigPath = "/usr/share/java/elasticsearch/config" - elasticsearchDefaultImage = "quay.io/openshift/origin-logging-elasticsearch5" - heapDumpLocation = "/elasticsearch/persistent/heapdump.hprof" - proxyImageEnv = "PROXY_IMAGE" -) - -type nodeState struct { - Desired desiredNodeState - Actual actualNodeState -} - -type desiredNodeState struct { - ClusterName string - Namespace string - DeployName string - Roles []v1alpha1.ElasticsearchNodeRole - ESNodeSpec v1alpha1.ElasticsearchNode - ESImage string - ProxyImage string - SecretName string - NodeNum int32 - ReplicaNum int32 - ServiceAccountName string - ConfigMapName string - Labels map[string]string - MasterNum int32 - DataNum int32 - EnvVars []v1.EnvVar - Paused bool -} - -type actualNodeState struct { - StatefulSet *apps.StatefulSet - Deployment *apps.Deployment - ReplicaSet *apps.ReplicaSet - Pod *v1.Pod -} - -func constructNodeSpec(dpl *v1alpha1.Elasticsearch, esNode v1alpha1.ElasticsearchNode, configMapName, serviceAccountName string, nodeNum int32, replicaNum int32, masterNum, dataNum int32) (desiredNodeState, error) { - nodeCfg := desiredNodeState{ - ClusterName: dpl.Name, - Namespace: dpl.Namespace, - Roles: esNode.Roles, - ESNodeSpec: esNode, - SecretName: v1alpha1.SecretName, - NodeNum: nodeNum, - ReplicaNum: replicaNum, - ServiceAccountName: serviceAccountName, - ConfigMapName: configMapName, - Labels: dpl.Labels, - MasterNum: masterNum, - DataNum: dataNum, - Paused: true, - } - deployName, err := constructDeployName(dpl.Name, esNode.Roles, nodeNum, replicaNum) - if err != nil { - return nodeCfg, err - } - nodeCfg.DeployName = deployName - nodeCfg.EnvVars = nodeCfg.getEnvVars() - - nodeCfg.ESNodeSpec.Resources = getResourceRequirements(dpl.Spec.Spec.Resources, esNode.Resources) - nodeCfg.ESImage = dpl.Spec.Spec.Image - // proxyImage isn't part of the CRD, because we may not need the proxy forever - value, ok := os.LookupEnv(proxyImageEnv) - if !ok { - return nodeCfg, fmt.Errorf("proxy Image not specified, use %s environmental variable", proxyImageEnv) - } - nodeCfg.ProxyImage = value - return nodeCfg, nil -} - -func constructDeployName(name string, roles []v1alpha1.ElasticsearchNodeRole, nodeNum int32, replicaNum int32) (string, error) { - if len(roles) == 0 { - return "", fmt.Errorf("No node roles specified for a node in cluster %s", name) - } - if len(roles) == 1 && roles[0] == "master" { - return fmt.Sprintf("%s-master-%d", name, nodeNum), nil - } - var nodeType []string - for _, role := range roles { - if role != "client" && role != "data" && role != "master" { - return "", fmt.Errorf("Unknown node's role: %s", role) - } - nodeType = append(nodeType, string(role)) - } - - sort.Strings(nodeType) - - return fmt.Sprintf("%s-%s-%d-%d", name, strings.Join(nodeType, ""), nodeNum, replicaNum), nil -} - -// getReplicas returns the desired number of replicas in the deployment/statefulset -// if this is a data deployment, we always want to create separate deployment per replica -// so we'll return 1. if this is not a data node, we can simply scale existing replica. -func (cfg *desiredNodeState) getReplicas() int32 { - if cfg.isNodeData() { - return 1 - } - return cfg.ESNodeSpec.NodeCount -} - -func (cfg *desiredNodeState) isNodeMaster() bool { - for _, role := range cfg.Roles { - if role == "master" { - return true - } - } - return false -} - -func (cfg *desiredNodeState) isNodePureMaster() bool { - if len(cfg.Roles) > 1 { - return false - } - for _, role := range cfg.Roles { - if role != v1alpha1.ElasticsearchRoleMaster { - return false - } - } - return true -} - -func (cfg *desiredNodeState) isNodeData() bool { - for _, role := range cfg.Roles { - if role == "data" { - return true - } - } - return false -} - -func (cfg *desiredNodeState) isNodeClient() bool { - for _, role := range cfg.Roles { - if role == "client" { - return true - } - } - return false -} - -func (cfg *desiredNodeState) getLabels() map[string]string { - labels := cfg.Labels - if labels == nil { - labels = make(map[string]string) - } - labels["es-node-client"] = strconv.FormatBool(cfg.isNodeClient()) - labels["es-node-data"] = strconv.FormatBool(cfg.isNodeData()) - labels["es-node-master"] = strconv.FormatBool(cfg.isNodeMaster()) - labels["cluster-name"] = cfg.ClusterName - labels["component"] = cfg.ClusterName - labels["tuned.openshift.io/elasticsearch"] = "true" - return labels -} - -func (cfg *desiredNodeState) getLabelSelector() map[string]string { - return map[string]string{ - "es-node-client": strconv.FormatBool(cfg.isNodeClient()), - "es-node-data": strconv.FormatBool(cfg.isNodeData()), - "es-node-master": strconv.FormatBool(cfg.isNodeMaster()), - "cluster-name": cfg.ClusterName, - } -} - -func (cfg *desiredNodeState) getNode() NodeTypeInterface { - if cfg.isNodeData() { - return NewDeploymentNode(cfg.DeployName, cfg.Namespace) - } - return NewStatefulSetNode(cfg.DeployName, cfg.Namespace) -} - -func (cfg *desiredNodeState) CreateNode(owner metav1.OwnerReference) error { - node := cfg.getNode() - err := node.query() - if err != nil { - // Node's resource doesn't exist, we can construct one - cfg.Paused = false - logrus.Infof("Constructing new resource %v", cfg.DeployName) - dep, err := node.constructNodeResource(cfg, owner) - if err != nil { - return fmt.Errorf("Could not construct node resource: %v", err) - } - err = sdk.Create(dep) - if err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("Could not create node resource: %v", err) - } - } - return nil -} - -func (cfg *desiredNodeState) PauseNode(owner metav1.OwnerReference) error { - node := cfg.getNode() - err := node.query() - - // TODO: what is allowed to be changed in the StatefulSet ? - // Validate Elasticsearch cluster parameters - needsPause, err := node.needsPause(cfg) - if err != nil { - return fmt.Errorf("Failed to see if the node resource is different from what's needed: %v", err) - } - - if needsPause { - // set it back to being paused - cfg.Paused = true - nretries := -1 - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nretries++ - if getErr := node.query(); getErr != nil { - logrus.Debugf("Could not get Elasticsearch node resource %v: %v", cfg.DeployName, getErr) - return getErr - } - dep, updateErr := node.constructNodeResource(cfg, owner) - if updateErr != nil { - return fmt.Errorf("Could not construct node resource %v for update: %v", cfg.DeployName, updateErr) - } - if nretries == 0 { - logrus.Infof("Updating node resource to be paused again %v", cfg.DeployName) - } - if updateErr = sdk.Update(dep); updateErr != nil { - logrus.Debugf("Failed to update node resource %v: %v", cfg.DeployName, updateErr) - } - return updateErr - }) - if retryErr != nil { - return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", cfg.DeployName, nretries, retryErr) - } - logrus.Debugf("Updated Elasticsearch %v after %v retries", cfg.DeployName, nretries) - } - return nil -} - -func (cfg *desiredNodeState) UpdateNode(owner metav1.OwnerReference) error { - node := cfg.getNode() - if err := node.query(); err != nil { - return err - } - - // TODO: what is allowed to be changed in the StatefulSet ? - // Validate Elasticsearch cluster parameters - diff := node.isUpdateNeeded(cfg) - - if diff { - cfg.Paused = false - nretries := -1 - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nretries++ - if getErr := node.query(); getErr != nil { - logrus.Debugf("Could not get Elasticsearch node resource %v: %v", cfg.DeployName, getErr) - return getErr - } - dep, updateErr := node.constructNodeResource(cfg, owner) - if updateErr != nil { - return fmt.Errorf("Could not construct node resource %v for update: %v", cfg.DeployName, updateErr) - } - logrus.Infof("Updating node resource %v", cfg.DeployName) - if updateErr = sdk.Update(dep); updateErr != nil { - logrus.Debugf("Failed to update node resource %v: %v", cfg.DeployName, updateErr) - } - return updateErr - }) - if retryErr != nil { - return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", cfg.DeployName, nretries, retryErr) - } - logrus.Debugf("Updated Elasticsearch %v after %v retries", cfg.DeployName, nretries) - - currentRevision, err := node.getRevision(cfg) - if err != nil { - return err - } - - err = wait.Poll(time.Second*1, time.Second*10, func() (done bool, err error) { - if getErr := node.query(); getErr != nil { - logrus.Debugf("Could not get Elasticsearch node resource %v: %v", cfg.DeployName, getErr) - return false, getErr - } - - if awaitingRollout, _ := node.awaitingRollout(cfg, currentRevision); !awaitingRollout { - err = cfg.PauseNode(owner) - return err == nil, err - } - - return false, nil - }) - return err - } - return nil -} - -func (cfg *desiredNodeState) IsPauseNeeded() bool { - // FIXME: to be refactored. query() must not exist here, since - // we already have information in clusterState - node := cfg.getNode() - err := node.query() - if err != nil { - // resource doesn't exist, so the pause is not needed - return false - } - - unpaused, err := node.needsPause(cfg) - if err != nil { - logrus.Errorf("Failed to obtain if there is a pause needed for resource: %v", err) - return false - } - - return unpaused -} - -func (cfg *desiredNodeState) IsUpdateNeeded() bool { - // FIXME: to be refactored. query() must not exist here, since - // we already have information in clusterState - node := cfg.getNode() - err := node.query() - if err != nil { - // resource doesn't exist, so the update is needed - return true - } - return node.isUpdateNeeded(cfg) -} - -func (node *nodeState) setStatefulSet(statefulSet apps.StatefulSet) { - node.Actual.StatefulSet = &statefulSet -} - -func (node *nodeState) setDeployment(deployment apps.Deployment) { - node.Actual.Deployment = &deployment -} - -func (node *nodeState) setReplicaSet(replicaSet apps.ReplicaSet) { - node.Actual.ReplicaSet = &replicaSet -} - -func (node *nodeState) setPod(pod v1.Pod) { - node.Actual.Pod = &pod -} - -func (cfg *desiredNodeState) getAffinity() v1.Affinity { - labelSelectorReqs := []metav1.LabelSelectorRequirement{} - if cfg.isNodeClient() { - labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ - Key: "es-node-client", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"true"}, - }) - } - if cfg.isNodeData() { - labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ - Key: "es-node-data", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"true"}, - }) - } - if cfg.isNodeMaster() { - labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ - Key: "es-node-master", - Operator: metav1.LabelSelectorOpIn, - Values: []string{"true"}, - }) - } - - return v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - Weight: 100, - PodAffinityTerm: v1.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: labelSelectorReqs, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - } -} - -func (cfg *desiredNodeState) getEnvVars() []v1.EnvVar { - return []v1.EnvVar{ - v1.EnvVar{ - Name: "DC_NAME", - Value: cfg.DeployName, - }, - v1.EnvVar{ - Name: "NAMESPACE", - ValueFrom: &v1.EnvVarSource{ - FieldRef: &v1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - v1.EnvVar{ - Name: "KUBERNETES_TRUST_CERTIFICATES", - Value: "true", - }, - v1.EnvVar{ - Name: "SERVICE_DNS", - Value: fmt.Sprintf("%s-cluster", cfg.ClusterName), - }, - v1.EnvVar{ - Name: "CLUSTER_NAME", - Value: cfg.ClusterName, - }, - v1.EnvVar{ - Name: "INSTANCE_RAM", - Value: cfg.getInstanceRAM(), - }, - v1.EnvVar{ - Name: "HEAP_DUMP_LOCATION", - Value: heapDumpLocation, - }, - v1.EnvVar{ - Name: "RECOVER_AFTER_TIME", - Value: "5m", - }, - v1.EnvVar{ - Name: "READINESS_PROBE_TIMEOUT", - Value: "30", - }, - v1.EnvVar{ - Name: "POD_LABEL", - Value: fmt.Sprintf("cluster=%s", cfg.ClusterName), - }, - v1.EnvVar{ - Name: "IS_MASTER", - Value: strconv.FormatBool(cfg.isNodeMaster()), - }, - v1.EnvVar{ - Name: "HAS_DATA", - Value: strconv.FormatBool(cfg.isNodeData()), - }, - } -} - -func (cfg *desiredNodeState) getInstanceRAM() string { - memory := cfg.ESNodeSpec.Resources.Limits.Memory() - if !memory.IsZero() { - return memory.String() - } - return defaultMemoryLimit -} - -func (cfg *desiredNodeState) getESContainer() v1.Container { - probe := getReadinessProbe() - return v1.Container{ - Name: "elasticsearch", - Image: cfg.ESImage, - ImagePullPolicy: "IfNotPresent", - Env: cfg.getEnvVars(), - Ports: []v1.ContainerPort{ - v1.ContainerPort{ - Name: "cluster", - ContainerPort: 9300, - Protocol: v1.ProtocolTCP, - }, - v1.ContainerPort{ - Name: "restapi", - ContainerPort: 9200, - Protocol: v1.ProtocolTCP, - }, - }, - ReadinessProbe: &probe, - VolumeMounts: cfg.getVolumeMounts(), - Resources: cfg.ESNodeSpec.Resources, - } -} - -func (cfg *desiredNodeState) getProxyContainer() (v1.Container, error) { - proxyCookieSecret, err := utils.RandStringBase64(16) - if err != nil { - return v1.Container{}, err - } - container := v1.Container{ - Name: "proxy", - Image: cfg.ProxyImage, - ImagePullPolicy: "IfNotPresent", - Ports: []v1.ContainerPort{ - v1.ContainerPort{ - Name: "metrics", - ContainerPort: 60000, - Protocol: v1.ProtocolTCP, - }, - }, - VolumeMounts: []v1.VolumeMount{ - v1.VolumeMount{ - Name: fmt.Sprintf("%s-%s", cfg.ClusterName, "metrics"), - MountPath: "/etc/proxy/secrets", - }, - v1.VolumeMount{ - Name: "certificates", - MountPath: "/etc/proxy/elasticsearch", - }, - }, - Args: []string{ - "--https-address=:60000", - "--provider=openshift", - "--upstream=https://127.0.0.1:9200", - "--tls-cert=/etc/proxy/secrets/tls.crt", - "--tls-key=/etc/proxy/secrets/tls.key", - "--upstream-ca=/etc/proxy/elasticsearch/admin-ca", - "--openshift-service-account=elasticsearch", - `-openshift-sar={"resource": "namespaces", "verb": "get"}`, - `-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get"}}`, - "--pass-user-bearer-token", - fmt.Sprintf("--cookie-secret=%s", proxyCookieSecret), - }, - } - return container, nil -} - -func (cfg *desiredNodeState) getVolumeMounts() []v1.VolumeMount { - return []v1.VolumeMount{ - v1.VolumeMount{ - Name: "elasticsearch-storage", - MountPath: "/elasticsearch/persistent", - }, - v1.VolumeMount{ - Name: "elasticsearch-config", - MountPath: elasticsearchConfigPath, - }, - v1.VolumeMount{ - Name: "certificates", - MountPath: elasticsearchCertsPath, - }, - } -} - -// generateMasterPVC method builds PVC for pure master nodes to be used in -// volumeClaimTemplate in StatefulSet spec -func (cfg *desiredNodeState) generateMasterPVC() (v1.PersistentVolumeClaim, bool, error) { - specVol := cfg.ESNodeSpec.Storage - if &specVol.StorageClassName != nil && - specVol.Size != nil { - pvc := v1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "elasticsearch-storage", - Labels: cfg.getLabels(), - }, - Spec: v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: *specVol.Size, - }, - }, - StorageClassName: &specVol.StorageClassName, - }, - } - return pvc, true, nil - } else if (specVol == v1alpha1.ElasticsearchStorageSpec{}) { - return v1.PersistentVolumeClaim{}, false, nil - } - - return v1.PersistentVolumeClaim{}, false, fmt.Errorf("Unsupported volume configuration for master in cluster %s", cfg.ClusterName) -} - -func (cfg *desiredNodeState) generatePersistentStorage() v1.VolumeSource { - volSource := v1.VolumeSource{} - specVol := cfg.ESNodeSpec.Storage - - switch { - /* - case specVol.PersistentVolumeClaim != nil: - volSource.PersistentVolumeClaim = specVol.PersistentVolumeClaim - */ - - case &specVol.StorageClassName != nil && specVol.Size != nil: - claimName := fmt.Sprintf("%s-%s", cfg.ClusterName, cfg.DeployName) - volSource.PersistentVolumeClaim = &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - } - - volSpec := v1.PersistentVolumeClaimSpec{ - AccessModes: []v1.PersistentVolumeAccessMode{ - v1.ReadWriteOnce, - }, - Resources: v1.ResourceRequirements{ - Requests: v1.ResourceList{ - v1.ResourceStorage: *specVol.Size, - }, - }, - StorageClassName: &specVol.StorageClassName, - } - - err := createOrUpdatePersistentVolumeClaim(volSpec, claimName, cfg.Namespace) - if err != nil { - logrus.Errorf("Unable to create PersistentVolumeClaim: %v", err) - } - - default: - logrus.Debugf("Defaulting volume source to emptyDir for node %q", cfg.DeployName) - volSource.EmptyDir = &v1.EmptyDirVolumeSource{} - } - - return volSource -} - -func (cfg *desiredNodeState) getVolumes() []v1.Volume { - vols := []v1.Volume{ - v1.Volume{ - Name: "elasticsearch-config", - VolumeSource: v1.VolumeSource{ - ConfigMap: &v1.ConfigMapVolumeSource{ - LocalObjectReference: v1.LocalObjectReference{ - Name: cfg.ConfigMapName, - }, - }, - }, - }, - v1.Volume{ - Name: fmt.Sprintf("%s-%s", cfg.ClusterName, "metrics"), - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: fmt.Sprintf("%s-%s", cfg.ClusterName, "metrics"), - }, - }, - }, - } - - if !cfg.isNodePureMaster() { - vols = append(vols, v1.Volume{ - Name: "elasticsearch-storage", - VolumeSource: cfg.generatePersistentStorage(), - }) - } - - secretName := cfg.SecretName - if cfg.SecretName == "" { - secretName = cfg.ClusterName - } - vols = append(vols, v1.Volume{ - Name: "certificates", - VolumeSource: v1.VolumeSource{ - Secret: &v1.SecretVolumeSource{ - SecretName: secretName, - }, - }, - }) - return vols -} - -func (cfg *desiredNodeState) getSelector() (map[string]string, bool) { - if len(cfg.ESNodeSpec.NodeSelector) == 0 { - return nil, false - } - return cfg.ESNodeSpec.NodeSelector, true -} - -func (actualState *actualNodeState) isStatusUpdateNeeded(nodesInStatus v1alpha1.ElasticsearchStatus) bool { - if actualState.Deployment == nil { - return false - } - for _, node := range nodesInStatus.Nodes { - if actualState.Deployment.Name == node.DeploymentName { - if actualState.ReplicaSet == nil { - return false - } - // This is the proper item in the array of node statuses - if actualState.ReplicaSet.Name != node.ReplicaSetName { - return true - } - - if actualState.Pod == nil { - return false - } - - if actualState.Pod.Name != node.PodName || string(actualState.Pod.Status.Phase) != node.Status { - return true - } - return false - - } - } - - // no corresponding nodes in status - return true -} - -func (cfg *desiredNodeState) constructPodTemplateSpec() (v1.PodTemplateSpec, error) { - affinity := cfg.getAffinity() - - proxyPodSpec, err := cfg.getProxyContainer() - if err != nil { - return v1.PodTemplateSpec{}, err - } - - template := v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: cfg.getLabels(), - }, - Spec: v1.PodSpec{ - Affinity: &affinity, - Containers: []v1.Container{ - cfg.getESContainer(), - proxyPodSpec, - }, - Volumes: cfg.getVolumes(), - ServiceAccountName: cfg.ServiceAccountName, - }, - } - nodeSelector, ok := cfg.getSelector() - if ok { - template.Spec.NodeSelector = nodeSelector - } - return template, nil -} diff --git a/pkg/k8shandler/elasticsearch.go b/pkg/k8shandler/elasticsearch.go new file mode 100644 index 000000000..4c98b62b5 --- /dev/null +++ b/pkg/k8shandler/elasticsearch.go @@ -0,0 +1,328 @@ +package k8shandler + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + + "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + "github.com/operator-framework/operator-sdk/pkg/sdk" + "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + certLocalPath = "/tmp/" +) + +type esCurlStruct struct { + Method string // use net/http constants https://golang.org/pkg/net/http/#pkg-constants + Uri string + RequestBody string + StatusCode int + ResponseBody map[string]interface{} + Error error +} + +func SetShardAllocation(clusterName, namespace string, state v1alpha1.ShardAllocationState) (bool, error) { + + payload := &esCurlStruct{ + Method: http.MethodPut, + Uri: "_cluster/settings", + RequestBody: fmt.Sprintf("{%q:{%q:%q}}", "transient", "cluster.routing.allocation.enable", state), + } + + curlESService(clusterName, namespace, payload) + + acknowledged := false + if payload.ResponseBody["acknowledged"] != nil { + acknowledged = payload.ResponseBody["acknowledged"].(bool) + } + return (payload.StatusCode == 200 && acknowledged), payload.Error +} + +func GetShardAllocation(clusterName, namespace string) (string, error) { + + payload := &esCurlStruct{ + Method: http.MethodGet, + Uri: "_cluster/settings", + } + + curlESService(clusterName, namespace, payload) + + allocation := "" + if payload.ResponseBody["transient"] != nil { + transientBody := payload.ResponseBody["transient"].(map[string]interface{}) + if transientBody["cluster"] != nil { + clusterBody := transientBody["cluster"].(map[string]interface{}) + if clusterBody["routing"] != nil { + routingBody := clusterBody["routing"].(map[string]interface{}) + if routingBody["allocation"] != nil { + allocationBody := routingBody["allocation"].(map[string]interface{}) + if allocationBody["enable"] != nil { + allocation = allocationBody["enable"].(string) + } + } + } + } + } + + return allocation, payload.Error +} + +func SetMinMasterNodes(clusterName, namespace string, numberMasters int32) (bool, error) { + + payload := &esCurlStruct{ + Method: http.MethodPut, + Uri: "_cluster/settings", + RequestBody: fmt.Sprintf("{%q:{%q:%d}}", "persistent", "discovery.zen.minimum_master_nodes", numberMasters), + } + + curlESService(clusterName, namespace, payload) + + acknowledged := false + if payload.ResponseBody["acknowledged"] != nil { + acknowledged = payload.ResponseBody["acknowledged"].(bool) + } + + return (payload.StatusCode == 200 && acknowledged), payload.Error +} + +func GetMinMasterNodes(clusterName, namespace string) (int32, error) { + + payload := &esCurlStruct{ + Method: http.MethodGet, + Uri: "_cluster/settings", + } + + curlESService(clusterName, namespace, payload) + + masterCount := int32(0) + if payload.ResponseBody["persistent"] != nil { + persistentBody := payload.ResponseBody["persistent"].(map[string]interface{}) + if persistentBody["discovery.zen.minimum_master_nodes"] != nil { + masterCount = int32(persistentBody["discovery.zen.minimum_master_nodes"].(float64)) + } + } + + return masterCount, payload.Error +} + +func GetClusterHealth(clusterName, namespace string) (string, error) { + + payload := &esCurlStruct{ + Method: http.MethodGet, + Uri: "_cluster/health", + } + + curlESService(clusterName, namespace, payload) + + status := "" + if payload.ResponseBody["status"] != nil { + status = payload.ResponseBody["status"].(string) + } + + return status, payload.Error +} + +func GetClusterNodeCount(clusterName, namespace string) (int32, error) { + + payload := &esCurlStruct{ + Method: http.MethodGet, + Uri: "_cluster/health", + } + + curlESService(clusterName, namespace, payload) + + nodeCount := int32(0) + if payload.ResponseBody["number_of_nodes"] != nil { + nodeCount = int32(payload.ResponseBody["number_of_nodes"].(float64)) + } + + return nodeCount, payload.Error +} + +// TODO: also check that the number of shards in the response > 0? +func DoSynchronizedFlush(clusterName, namespace string) (bool, error) { + + payload := &esCurlStruct{ + Method: http.MethodPost, + Uri: "_flush/synced", + } + + curlESService(clusterName, namespace, payload) + + return (payload.StatusCode == 200), payload.Error +} + +// This will curl the ES service and provide the certs required for doing so +// it will also return the http and string response +func curlESService(clusterName, namespace string, payload *esCurlStruct) { + + urlString := fmt.Sprintf("https://%s.%s.svc:9200/%s", clusterName, namespace, payload.Uri) + urlURL, err := url.Parse(urlString) + + if err != nil { + logrus.Warnf("Unable to parse URL %v: %v", urlString, err) + return + } + + request := &http.Request{ + Method: payload.Method, + URL: urlURL, + } + + switch payload.Method { + case http.MethodGet: + // no more to do to request... + case http.MethodPost: + if payload.RequestBody != "" { + // add to the request + request.Header = map[string][]string{ + "Content-Type": []string{ + "application/json", + }, + } + request.Body = ioutil.NopCloser(bytes.NewReader([]byte(payload.RequestBody))) + } + + case http.MethodPut: + if payload.RequestBody != "" { + // add to the request + request.Header = map[string][]string{ + "Content-Type": []string{ + "application/json", + }, + } + request.Body = ioutil.NopCloser(bytes.NewReader([]byte(payload.RequestBody))) + } + + default: + // unsupported method -- do nothing + return + } + + client := getClient(clusterName, namespace) + resp, err := client.Do(request) + + if resp != nil { + payload.StatusCode = resp.StatusCode + payload.ResponseBody = getMapFromBody(resp.Body) + } + payload.Error = err +} + +func getRootCA(clusterName, namespace string) *x509.CertPool { + certPool := x509.NewCertPool() + + // load cert into []byte + caPem, err := ioutil.ReadFile(path.Join(certLocalPath, clusterName, "admin-ca")) + if err != nil { + logrus.Errorf("Unable to read file to get contents: %v", err) + return nil + } + + certPool.AppendCertsFromPEM(caPem) + + return certPool +} + +func getMapFromBody(body io.ReadCloser) map[string]interface{} { + buf := new(bytes.Buffer) + buf.ReadFrom(body) + + var results map[string]interface{} + err := json.Unmarshal([]byte(buf.String()), &results) + if err != nil { + + } + + return results +} + +func getClientCertificates(clusterName, namespace string) []tls.Certificate { + certificate, err := tls.LoadX509KeyPair( + path.Join(certLocalPath, clusterName, "admin-cert"), + path.Join(certLocalPath, clusterName, "admin-key"), + ) + if err != nil { + return []tls.Certificate{} + } + + return []tls.Certificate{ + certificate, + } +} + +func getClient(clusterName, namespace string) *http.Client { + + // get the contents of the secret + extractSecret(clusterName, namespace) + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: false, + RootCAs: getRootCA(clusterName, namespace), + Certificates: getClientCertificates(clusterName, namespace), + }, + }, + } +} + +func extractSecret(secretName, namespace string) { + secret := &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + } + if err := sdk.Get(secret); err != nil { + if errors.IsNotFound(err) { + //return err + logrus.Errorf("Unable to find secret %v: %v", secretName, err) + } + + logrus.Errorf("Error reading secret %v: %v", secretName, err) + //return fmt.Errorf("Unable to extract secret to file: %v", secretName, err) + } + + // make sure that the dir === secretName exists + if _, err := os.Stat(path.Join(certLocalPath, secretName)); os.IsNotExist(err) { + err = os.MkdirAll(path.Join(certLocalPath, secretName), 0755) + if err != nil { + logrus.Errorf("Error creating dir %v: %v", path.Join(certLocalPath, secretName), err) + } + } + + for _, key := range []string{"admin-ca", "admin-cert", "admin-key"} { + + value, ok := secret.Data[key] + + // check to see if the map value exists + if !ok { + logrus.Errorf("Error secret key %v not found", key) + //return fmt.Errorf("No secret data \"%s\" found", key) + } + + if err := ioutil.WriteFile(path.Join(certLocalPath, secretName, key), value, 0644); err != nil { + //return fmt.Errorf("Unable to write to working dir: %v", err) + logrus.Errorf("Error writing %v to %v: %v", value, path.Join(certLocalPath, secretName, key), err) + } + } +} diff --git a/pkg/k8shandler/nodetypefactory.go b/pkg/k8shandler/nodetypefactory.go index 866afa3dc..0671e20f6 100644 --- a/pkg/k8shandler/nodetypefactory.go +++ b/pkg/k8shandler/nodetypefactory.go @@ -1,55 +1,97 @@ package k8shandler import ( - apps "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "fmt" + "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" ) // NodeTypeInterface interace represents individual Elasticsearch node type NodeTypeInterface interface { - getResource() runtime.Object - isDifferent(cfg *desiredNodeState) (bool, error) - isUpdateNeeded(cfg *desiredNodeState) bool - needsPause(cfg *desiredNodeState) (bool, error) - awaitingRollout(cfg *desiredNodeState, currentRevision string) (bool, error) - getRevision(cfg *desiredNodeState) (string, error) - constructNodeResource(cfg *desiredNodeState, owner metav1.OwnerReference) (runtime.Object, error) - delete() error - query() error + populateReference(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool, replicas int32) + state() v1alpha1.ElasticsearchNodeStatus // this will get the current -- used for status + create() error // this will create the node in the case where it is new + update(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) error // this will handle updates + restart(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) + name() string + updateReference(node NodeTypeInterface) } // NodeTypeFactory is a factory to construct either statefulset or deployment type NodeTypeFactory func(name, namespace string) NodeTypeInterface -// NewDeploymentNode constructs deploymentNode struct for data nodes -func NewDeploymentNode(name, namespace string) NodeTypeInterface { - depl := apps.Deployment{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, +// this can potentially return a list if we have replicas > 1 for a data node +func GetNodeTypeInterface(nodeIndex int, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch) []NodeTypeInterface { + + nodes := []NodeTypeInterface{} + + roleMap := getNodeRoleMap(node) + + // common spec => cluster.Spec.Spec + nodeName := fmt.Sprintf("%s-%s", cluster.Name, getNodeSuffix(nodeIndex, roleMap)) + + // if we have a data node then we need to create one deployment per replica + if isDataNode(node) { + // for loop from 1 to replica as replicaIndex + // it is 1 instead of 0 because of legacy code + for replicaIndex := int32(1); replicaIndex <= node.NodeCount; replicaIndex++ { + dataNodeName := addDataNodeSuffix(nodeName, replicaIndex) + node := newDeploymentNode(dataNodeName, node, cluster, roleMap) + nodes = append(nodes, node) + } + } else { + node := newStatefulSetNode(nodeName, node, cluster, roleMap) + nodes = append(nodes, node) } - node := deploymentNode{resource: depl} - return &node + + return nodes } -// NewStatefulSetNode constructs statefulSetNode struct for non-data nodes -func NewStatefulSetNode(name, namespace string) NodeTypeInterface { - depl := apps.StatefulSet{ - TypeMeta: metav1.TypeMeta{ - Kind: "StatefulSet", - APIVersion: "apps/v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, +func getNodeSuffix(nodeIndex int, roleMap map[v1alpha1.ElasticsearchNodeRole]bool) string { + + suffix := "" + if roleMap[v1alpha1.ElasticsearchRoleClient] { + suffix = fmt.Sprintf("%s%s", suffix, "client") } - ss := statefulSetNode{resource: depl} - return &ss + + if roleMap[v1alpha1.ElasticsearchRoleData] { + suffix = fmt.Sprintf("%s%s", suffix, "data") + } + + if roleMap[v1alpha1.ElasticsearchRoleMaster] { + suffix = fmt.Sprintf("%s%s", suffix, "master") + } + + return fmt.Sprintf("%s-%d", suffix, nodeIndex) +} + +func addDataNodeSuffix(nodeName string, replicaNumber int32) string { + return fmt.Sprintf("%s-%d", nodeName, replicaNumber) +} + +// newDeploymentNode constructs deploymentNode struct for data nodes +func newDeploymentNode(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool) NodeTypeInterface { + deploymentNode := deploymentNode{} + + deploymentNode.populateReference(nodeName, node, cluster, roleMap, int32(1)) + + return &deploymentNode +} + +// newStatefulSetNode constructs statefulSetNode struct for non-data nodes +func newStatefulSetNode(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool) NodeTypeInterface { + statefulSetNode := statefulSetNode{} + + statefulSetNode.populateReference(nodeName, node, cluster, roleMap, node.NodeCount) + + return &statefulSetNode +} + +func containsNodeTypeInterface(node NodeTypeInterface, list []NodeTypeInterface) (int, bool) { + for index, nodeTypeInterface := range list { + if nodeTypeInterface.name() == node.name() { + return index, true + } + } + + return -1, false } diff --git a/pkg/k8shandler/persistentvolumeclaims.go b/pkg/k8shandler/persistentvolumeclaims.go index 3e64b2b89..f94253967 100644 --- a/pkg/k8shandler/persistentvolumeclaims.go +++ b/pkg/k8shandler/persistentvolumeclaims.go @@ -3,11 +3,11 @@ package k8shandler import ( "fmt" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/operator-framework/operator-sdk/pkg/sdk" "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func createOrUpdatePersistentVolumeClaim(pvc v1.PersistentVolumeClaimSpec, newName string, namespace string) error { diff --git a/pkg/k8shandler/prometheus_rule.go b/pkg/k8shandler/prometheus_rule.go index f10cddc95..2eee69837 100644 --- a/pkg/k8shandler/prometheus_rule.go +++ b/pkg/k8shandler/prometheus_rule.go @@ -6,11 +6,12 @@ import ( "io/ioutil" "os" - monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1" - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" "github.com/openshift/elasticsearch-operator/pkg/utils" "github.com/operator-framework/operator-sdk/pkg/sdk" "k8s.io/apimachinery/pkg/api/errors" + + monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1" + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sYAML "k8s.io/apimachinery/pkg/util/yaml" ) @@ -22,7 +23,7 @@ const ( func CreateOrUpdatePrometheusRules(dpl *v1alpha1.Elasticsearch) error { ruleName := fmt.Sprintf("%s-%s", dpl.Name, "prometheus-rules") - owner := asOwner(dpl) + owner := getOwnerRef(dpl) promRule, err := buildPrometheusRule(ruleName, dpl.Namespace, dpl.Labels) if err != nil { diff --git a/pkg/k8shandler/rbac.go b/pkg/k8shandler/rbac.go index 40ee75788..0f7ce5f72 100644 --- a/pkg/k8shandler/rbac.go +++ b/pkg/k8shandler/rbac.go @@ -3,31 +3,32 @@ package k8shandler import ( "fmt" - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/openshift/elasticsearch-operator/pkg/utils" "github.com/operator-framework/operator-sdk/pkg/sdk" "github.com/sirupsen/logrus" + "k8s.io/client-go/util/retry" + + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" rbac "k8s.io/api/rbac/v1" errors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/util/retry" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func CreateOrUpdateRBAC(dpl *v1alpha1.Elasticsearch) error { - owner := asOwner(dpl) + owner := getOwnerRef(dpl) // elasticsearch RBAC - elasticsearchRole := utils.NewClusterRole( + elasticsearchRole := newClusterRole( "elasticsearch-metrics", - utils.NewPolicyRules( - utils.NewPolicyRule( + newPolicyRules( + newPolicyRule( []string{""}, []string{"pods", "services", "endpoints"}, []string{}, []string{"list", "watch"}, []string{}, ), - utils.NewPolicyRule( + newPolicyRule( []string{}, []string{}, []string{}, @@ -43,17 +44,17 @@ func CreateOrUpdateRBAC(dpl *v1alpha1.Elasticsearch) error { return err } - subject := utils.NewSubject( + subject := newSubject( "ServiceAccount", "prometheus-k8s", "openshift-monitoring", ) subject.APIGroup = "" - elasticsearchRoleBinding := utils.NewClusterRoleBinding( + elasticsearchRoleBinding := newClusterRoleBinding( "elasticsearch-metrics", "elasticsearch-metrics", - utils.NewSubjects( + newSubjects( subject, ), ) @@ -65,17 +66,17 @@ func CreateOrUpdateRBAC(dpl *v1alpha1.Elasticsearch) error { } // proxy RBAC - proxyRole := utils.NewClusterRole( + proxyRole := newClusterRole( "elasticsearch-proxy", - utils.NewPolicyRules( - utils.NewPolicyRule( + newPolicyRules( + newPolicyRule( []string{"authentication.k8s.io"}, []string{"tokenreviews"}, []string{}, []string{"create"}, []string{}, ), - utils.NewPolicyRule( + newPolicyRule( []string{"authorization.k8s.io"}, []string{"subjectaccessreviews"}, []string{}, @@ -91,17 +92,17 @@ func CreateOrUpdateRBAC(dpl *v1alpha1.Elasticsearch) error { return err } - subject = utils.NewSubject( + subject = newSubject( "ServiceAccount", - "elasticsearch", + dpl.Name, dpl.Namespace, ) subject.APIGroup = "" - proxyRoleBinding := utils.NewClusterRoleBinding( + proxyRoleBinding := newClusterRoleBinding( "elasticsearch-proxy", "elasticsearch-proxy", - utils.NewSubjects( + newSubjects( subject, ), ) @@ -152,3 +153,61 @@ func createOrUpdateClusterRoleBinding(roleBinding *rbac.ClusterRoleBinding) erro } return nil } + +func newPolicyRule(apiGroups, resources, resourceNames, verbs, urls []string) rbac.PolicyRule { + return rbac.PolicyRule{ + APIGroups: apiGroups, + Resources: resources, + ResourceNames: resourceNames, + Verbs: verbs, + NonResourceURLs: urls, + } +} + +func newPolicyRules(rules ...rbac.PolicyRule) []rbac.PolicyRule { + return rules +} + +func newClusterRole(roleName string, rules []rbac.PolicyRule) *rbac.ClusterRole { + return &rbac.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: rules, + } +} + +func newSubject(kind, name, namespace string) rbac.Subject { + return rbac.Subject{ + Kind: kind, + Name: name, + Namespace: namespace, + APIGroup: rbac.GroupName, + } +} + +func newSubjects(subjects ...rbac.Subject) []rbac.Subject { + return subjects +} + +func newClusterRoleBinding(bindingName, roleName string, subjects []rbac.Subject) *rbac.ClusterRoleBinding { + return &rbac.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRoleBinding", + APIVersion: rbac.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: bindingName, + }, + RoleRef: rbac.RoleRef{ + Kind: "ClusterRole", + Name: roleName, + APIGroup: rbac.GroupName, + }, + Subjects: subjects, + } +} diff --git a/pkg/k8shandler/render_config.go b/pkg/k8shandler/render_config.go deleted file mode 100644 index 54b745d2c..000000000 --- a/pkg/k8shandler/render_config.go +++ /dev/null @@ -1,69 +0,0 @@ -package k8shandler - -import ( - "html/template" - "io" -) - -// esYmlStruct is used to render esYmlTmpl to a proper elasticsearch.yml format -type esYmlStruct struct { - KibanaIndexMode string - EsUnicastHost string - NodeQuorum string - RecoverExpectedShards string -} - -func renderEsYml(w io.Writer, kibanaIndexMode, esUnicastHost, nodeQuorum, recoverExpectedShards string) error { - t := template.New("elasticsearch.yml") - config := esYmlTmpl - t, err := t.Parse(config) - if err != nil { - return err - } - esy := esYmlStruct{ - KibanaIndexMode: kibanaIndexMode, - EsUnicastHost: esUnicastHost, - NodeQuorum: nodeQuorum, - RecoverExpectedShards: recoverExpectedShards, - } - - return t.Execute(w, esy) -} - -type log4j2PropertiesStruct struct { - RootLogger string -} - -func renderLog4j2Properties(w io.Writer, rootLogger string) error { - t := template.New("log4j2.properties") - t, err := t.Parse(log4j2PropertiesTmpl) - if err != nil { - return err - } - - log4jProp := log4j2PropertiesStruct{ - RootLogger: rootLogger, - } - - return t.Execute(w, log4jProp) -} - -type indexSettingsStruct struct { - PrimaryShards string - ReplicaShards string -} - -func renderIndexSettings(w io.Writer, primaryShardsCount, replicaShardsCount string) error { - t := template.New("index_settings") - t, err := t.Parse(indexSettingsTmpl) - if err != nil { - return err - } - - indexSettings := indexSettingsStruct{ - PrimaryShards: primaryShardsCount, - ReplicaShards: replicaShardsCount, - } - - return t.Execute(w, indexSettings) -} diff --git a/pkg/k8shandler/secret.go b/pkg/k8shandler/secret.go new file mode 100644 index 000000000..bc0a27d00 --- /dev/null +++ b/pkg/k8shandler/secret.go @@ -0,0 +1,52 @@ +package k8shandler + +import ( + "crypto/sha256" + "fmt" + + "github.com/operator-framework/operator-sdk/pkg/sdk" + "k8s.io/api/core/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func getSecret(secretName, namespace string) *v1.Secret { + secret := v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + } + + err := sdk.Get(&secret) + + if err != nil { + // check if doesn't exist + } + + return &secret +} + +func getSecretDataHash(secretName, namespace string) string { + hash := "" + + secret := getSecret(secretName, namespace) + + dataHashes := make(map[string][32]byte) + + for key, data := range secret.Data { + dataHashes[key] = sha256.Sum256([]byte(data)) + } + + sortedKeys := sortDataHashKeys(dataHashes) + + for _, key := range sortedKeys { + hash = fmt.Sprintf("%s%s", hash, dataHashes[key]) + } + + return hash +} diff --git a/pkg/k8shandler/service.go b/pkg/k8shandler/service.go new file mode 100644 index 000000000..9221e4cbb --- /dev/null +++ b/pkg/k8shandler/service.go @@ -0,0 +1,145 @@ +package k8shandler + +import ( + "fmt" + + "github.com/operator-framework/operator-sdk/pkg/sdk" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/util/retry" + + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CreateOrUpdateServices ensures the existence of the services for Elasticsearch cluster +func CreateOrUpdateServices(dpl *v1alpha1.Elasticsearch) error { + ownerRef := getOwnerRef(dpl) + annotations := make(map[string]string) + + err := createOrUpdateService( + fmt.Sprintf("%s-%s", dpl.Name, "cluster"), + dpl.Namespace, + dpl.Name, + "cluster", + 9300, + selectorForES("es-node-master", dpl.Name), + annotations, + true, + ownerRef, + ) + if err != nil { + return fmt.Errorf("Failure creating service %v", err) + } + + err = createOrUpdateService( + dpl.Name, + dpl.Namespace, + dpl.Name, + "restapi", + 9200, + selectorForES("es-node-client", dpl.Name), + annotations, + false, + ownerRef, + ) + if err != nil { + return fmt.Errorf("Failure creating service %v", err) + } + + annotations["service.alpha.openshift.io/serving-cert-secret-name"] = fmt.Sprintf("%s-%s", dpl.Name, "metrics") + err = createOrUpdateService( + fmt.Sprintf("%s-%s", dpl.Name, "metrics"), + dpl.Namespace, + dpl.Name, + "metrics", + 9200, + selectorForES("es-node-client", dpl.Name), + annotations, + false, + ownerRef, + ) + if err != nil { + return fmt.Errorf("Failure creating service %v", err) + } + return nil +} + +func createOrUpdateService(serviceName, namespace, clusterName, targetPortName string, port int32, selector, annotations map[string]string, publishNotReady bool, owner metav1.OwnerReference) error { + + labels := appendDefaultLabel(clusterName, map[string]string{}) + + service := newService( + serviceName, + namespace, + clusterName, + targetPortName, + port, + selector, + annotations, + labels, + publishNotReady, + ) + addOwnerRefToObject(service, owner) + + err := sdk.Create(service) + if err != nil { + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Failure constructing %v service: %v", service.Name, err) + } + + current := service.DeepCopy() + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + if err = sdk.Get(current); err != nil { + if errors.IsNotFound(err) { + // the object doesn't exist -- it was likely culled + // recreate it on the next time through if necessary + return nil + } + return fmt.Errorf("Failed to get %v service: %v", service.Name, err) + } + + current.Spec.Ports = service.Spec.Ports + current.Spec.Selector = service.Spec.Selector + current.Spec.PublishNotReadyAddresses = service.Spec.PublishNotReadyAddresses + current.Labels = service.Labels + if err = sdk.Update(current); err != nil { + return err + } + return nil + }) + if retryErr != nil { + return retryErr + } + } + + return nil +} + +func newService(serviceName, namespace, clusterName, targetPortName string, port int32, selector, annotations, labels map[string]string, publishNotReady bool) *v1.Service { + return &v1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: serviceName, + Namespace: namespace, + Labels: labels, + Annotations: annotations, + }, + Spec: v1.ServiceSpec{ + Selector: selector, + Ports: []v1.ServicePort{ + v1.ServicePort{ + Port: port, + Protocol: "TCP", + TargetPort: intstr.FromString(targetPortName), + Name: clusterName, + }, + }, + PublishNotReadyAddresses: publishNotReady, + }, + } +} diff --git a/pkg/k8shandler/service_monitor.go b/pkg/k8shandler/service_monitor.go index b920ac254..292638928 100644 --- a/pkg/k8shandler/service_monitor.go +++ b/pkg/k8shandler/service_monitor.go @@ -3,10 +3,11 @@ package k8shandler import ( "fmt" - monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1" - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" "github.com/operator-framework/operator-sdk/pkg/sdk" "k8s.io/apimachinery/pkg/api/errors" + + monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1" + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -17,7 +18,7 @@ const ( // CreateOrUpdateServiceMonitors ensures the existence of ServiceMonitors for Elasticsearch cluster func CreateOrUpdateServiceMonitors(dpl *v1alpha1.Elasticsearch) error { serviceMonitorName := fmt.Sprintf("monitor-%s-%s", dpl.Name, "cluster") - owner := asOwner(dpl) + owner := getOwnerRef(dpl) labelsWithDefault := appendDefaultLabel(dpl.Name, dpl.Labels) diff --git a/pkg/k8shandler/serviceaccount.go b/pkg/k8shandler/serviceaccount.go index c18c51839..67b99f0f7 100644 --- a/pkg/k8shandler/serviceaccount.go +++ b/pkg/k8shandler/serviceaccount.go @@ -3,46 +3,45 @@ package k8shandler import ( "fmt" + "github.com/operator-framework/operator-sdk/pkg/sdk" "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/api/errors" v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/operator-framework/operator-sdk/pkg/sdk" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CreateOrUpdateServiceAccount ensures the existence of the serviceaccount for Elasticsearch cluster -func CreateOrUpdateServiceAccount(dpl *v1alpha1.Elasticsearch) (string, error) { - serviceAccountName := v1alpha1.ServiceAccountName +func CreateOrUpdateServiceAccount(dpl *v1alpha1.Elasticsearch) (err error) { - owner := asOwner(dpl) - - err := createOrUpdateServiceAccount(serviceAccountName, dpl.Namespace, owner) + err = createOrUpdateServiceAccount(dpl.Name, dpl.Namespace, getOwnerRef(dpl)) if err != nil { - return serviceAccountName, fmt.Errorf("Failure creating ServiceAccount %v", err) + return fmt.Errorf("Failure creating ServiceAccount %v", err) } - return serviceAccountName, nil + return nil } -func createOrUpdateServiceAccount(serviceAccountName, namespace string, owner metav1.OwnerReference) error { - elasticsearchSA := serviceAccount(serviceAccountName, namespace) - addOwnerRefToObject(elasticsearchSA, owner) - err := sdk.Get(elasticsearchSA) +func createOrUpdateServiceAccount(serviceAccountName, namespace string, ownerRef metav1.OwnerReference) error { + serviceAccount := newServiceAccount(serviceAccountName, namespace) + addOwnerRefToObject(serviceAccount, ownerRef) + + err := sdk.Create(serviceAccount) if err != nil { - err = sdk.Create(elasticsearchSA) - if err != nil { + if !errors.IsAlreadyExists(err) { return fmt.Errorf("Failure constructing serviceaccount for the Elasticsearch cluster: %v", err) } } + return nil } // serviceAccount returns a v1.ServiceAccount object -func serviceAccount(serviceAccountName string, namespace string) *v1.ServiceAccount { +func newServiceAccount(serviceAccountName string, namespace string) *v1.ServiceAccount { return &v1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ Kind: "ServiceAccount", - APIVersion: "v1", + APIVersion: v1.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, diff --git a/pkg/k8shandler/services.go b/pkg/k8shandler/services.go deleted file mode 100644 index 613bb6c46..000000000 --- a/pkg/k8shandler/services.go +++ /dev/null @@ -1,110 +0,0 @@ -package k8shandler - -import ( - "fmt" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/operator-framework/operator-sdk/pkg/sdk" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// CreateOrUpdateServices ensures the existence of the services for Elasticsearch cluster -func CreateOrUpdateServices(dpl *v1alpha1.Elasticsearch) error { - elasticsearchClusterSvcName := fmt.Sprintf("%s-%s", dpl.Name, "cluster") - elasticsearchRestSvcName := dpl.Name - metricsSvcName := fmt.Sprintf("%s-%s", dpl.Name, "metrics") - owner := asOwner(dpl) - - labelsWithDefault := appendDefaultLabel(dpl.Name, dpl.Labels) - - err := createOrUpdateService(elasticsearchClusterSvcName, dpl.Namespace, dpl.Name, - "cluster", 9300, selectorForES("es-node-master", dpl.Name), map[string]string{}, - labelsWithDefault, true, owner) - - if err != nil { - return fmt.Errorf("Failure creating service %v", err) - } - - err = createOrUpdateService(elasticsearchRestSvcName, dpl.Namespace, dpl.Name, - "restapi", 9200, selectorForES("es-node-client", dpl.Name), map[string]string{}, - labelsWithDefault, false, owner) - - if err != nil { - return fmt.Errorf("Failure creating service %v", err) - } - - annotations := map[string]string{ - "service.alpha.openshift.io/serving-cert-secret-name": metricsSvcName, - } - - err = createOrUpdateService(metricsSvcName, dpl.Namespace, metricsSvcName, - "metrics", 60000, selectorForES("es-node-client", dpl.Name), annotations, - labelsWithDefault, false, owner) - - if err != nil { - return fmt.Errorf("Failure creating service %v", err) - } - - return nil -} - -func createOrUpdateService(serviceName, namespace, clusterName, targetPortName string, port int32, selector, annotations, labels map[string]string, publishNotReady bool, owner metav1.OwnerReference) error { - - elasticsearchSvc := createService(serviceName, namespace, clusterName, - targetPortName, port, selector, annotations, - labels, publishNotReady) - - addOwnerRefToObject(elasticsearchSvc, owner) - err := sdk.Create(elasticsearchSvc) - if err != nil && !errors.IsAlreadyExists(err) { - return fmt.Errorf("Failure constructing Elasticsearch service: %v", err) - } else if errors.IsAlreadyExists(err) { - // Get existing service to check if it is same as what we want - existingSvc := service(serviceName, namespace) - err = sdk.Get(existingSvc) - if err != nil { - return fmt.Errorf("Unable to get Elasticsearch cluster service: %v", err) - } - - // TODO: Compare existing service labels, selectors and port - // TODO: use retry.RetryOnConflict for Updates - } - return nil -} - -func createService(serviceName, namespace, clusterName, targetPortName string, port int32, selector, annotations, labels map[string]string, publishNotReady bool) *v1.Service { - svc := service(serviceName, namespace) - svc.Annotations = annotations - svc.Labels = labels - svc.Spec = v1.ServiceSpec{ - Selector: selector, - Ports: []v1.ServicePort{ - v1.ServicePort{ - Port: port, - Protocol: "TCP", - TargetPort: intstr.FromString(targetPortName), - Name: clusterName, - }, - }, - PublishNotReadyAddresses: publishNotReady, - } - return svc -} - -// service returns a v1.Service object -func service(serviceName string, namespace string) *v1.Service { - return &v1.Service{ - TypeMeta: metav1.TypeMeta{ - Kind: "Service", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: namespace, - }, - } -} diff --git a/pkg/k8shandler/statefulset.go b/pkg/k8shandler/statefulset.go index 7a7728bca..963861af7 100644 --- a/pkg/k8shandler/statefulset.go +++ b/pkg/k8shandler/statefulset.go @@ -2,98 +2,491 @@ package k8shandler import ( "fmt" + "time" "github.com/operator-framework/operator-sdk/pkg/sdk" - apps "k8s.io/api/apps/v1" + "github.com/sirupsen/logrus" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + apps "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) type statefulSetNode struct { - resource apps.StatefulSet + self apps.StatefulSet + // prior hash for configmap content + configmapHash string + // prior hash for secret content + secretHash string + + clusterName string + clusterSize int32 + priorReplicaCount int32 } -func (node *statefulSetNode) getResource() runtime.Object { - return &node.resource +func (statefulSetNode *statefulSetNode) populateReference(nodeName string, node v1alpha1.ElasticsearchNode, cluster *v1alpha1.Elasticsearch, roleMap map[v1alpha1.ElasticsearchNodeRole]bool, replicas int32) { + + labels := newLabels(cluster.Name, roleMap) + + statefulSet := apps.StatefulSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "StatefulSet", + APIVersion: apps.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: nodeName, + Namespace: cluster.Namespace, + Labels: labels, + }, + } + + partition := int32(0) + + statefulSet.Spec = apps.StatefulSetSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: newLabelSelector(cluster.Name, roleMap), + }, + Template: newPodTemplateSpec(nodeName, cluster.Name, cluster.Namespace, node, cluster.Spec.Spec, labels, roleMap), + UpdateStrategy: apps.StatefulSetUpdateStrategy{ + Type: apps.RollingUpdateStatefulSetStrategyType, + RollingUpdate: &apps.RollingUpdateStatefulSetStrategy{ + Partition: &partition, + }, + }, + } + statefulSet.Spec.Template.Spec.Containers[0].ReadinessProbe = nil + + addOwnerRefToObject(&statefulSet, getOwnerRef(cluster)) + + statefulSetNode.self = statefulSet + statefulSetNode.clusterName = cluster.Name } -func (node *statefulSetNode) getRevision(cfg *desiredNodeState) (string, error) { - return "", nil +func (current *statefulSetNode) updateReference(desired NodeTypeInterface) { + current.self = desired.(*statefulSetNode).self } -func (node *statefulSetNode) awaitingRollout(cfg *desiredNodeState, currentRevision string) (bool, error) { - return false, nil +func (node *statefulSetNode) state() v1alpha1.ElasticsearchNodeStatus { + rolloutForReload := v1.ConditionFalse + rolloutForUpdate := v1.ConditionFalse + + // see if we need to update the deployment object + if node.isChanged() { + rolloutForUpdate = v1.ConditionTrue + } + + // check if the configmapHash changed + /*newConfigmapHash := getConfigmapDataHash(node.clusterName, node.self.Namespace) + if newConfigmapHash != node.configmapHash { + rolloutForReload = v1.ConditionTrue + }*/ + + // check if the secretHash changed + newSecretHash := getSecretDataHash(node.clusterName, node.self.Namespace) + if newSecretHash != node.secretHash { + rolloutForReload = v1.ConditionTrue + } + + return v1alpha1.ElasticsearchNodeStatus{ + StatefulSetName: node.self.Name, + UpgradeStatus: v1alpha1.ElasticsearchNodeUpgradeStatus{ + ScheduledForUpgrade: rolloutForUpdate, + ScheduledForRedeploy: rolloutForReload, + }, + } +} + +func (node *statefulSetNode) name() string { + return node.self.Name } -func (node *statefulSetNode) needsPause(cfg *desiredNodeState) (bool, error) { - return false, nil +func (node *statefulSetNode) waitForNodeRejoinCluster() (error, bool) { + err := wait.Poll(time.Second*1, time.Second*60, func() (done bool, err error) { + clusterSize, getErr := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size waiting for %v to rejoin cluster", node.name()) + return false, getErr + } + + return (node.clusterSize <= clusterSize), nil + }) + + return err, (err == nil) } -func (node *statefulSetNode) isDifferent(cfg *desiredNodeState) (bool, error) { - // Check replicas number - if cfg.getReplicas() != *node.resource.Spec.Replicas { - return true, nil +func (node *statefulSetNode) waitForNodeLeaveCluster() (error, bool) { + err := wait.Poll(time.Second*1, time.Second*60, func() (done bool, err error) { + clusterSize, getErr := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size waiting for %v to leave cluster", node.name()) + return false, getErr + } + + return (node.clusterSize > clusterSize), nil + }) + + return err, (err == nil) +} + +func (node *statefulSetNode) setPartition(partitions int32) error { + nretries := -1 + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nretries++ + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return getErr + } + + if *node.self.Spec.UpdateStrategy.RollingUpdate.Partition == partitions { + return nil + } + + node.self.Spec.UpdateStrategy.RollingUpdate.Partition = &partitions + + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Error: could not update Elasticsearch node %v after %v retries: %v", node.self.Name, nretries, retryErr) } - // Check if the Variables are the desired ones + return nil +} - return false, nil +func (node *statefulSetNode) partition() (error, int32) { + if err := sdk.Get(&node.self); err != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, err) + return err, -1 + } + + return nil, *node.self.Spec.UpdateStrategy.RollingUpdate.Partition } -// isUpdateNeeded returns true if update is needed -func (node *statefulSetNode) isUpdateNeeded(cfg *desiredNodeState) bool { - // This operator doesn't update nodes managed by StatefulSets in rolling fashion - return false +func (node *statefulSetNode) setReplicaCount(replicas int32) error { + nretries := -1 + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nretries++ + if getErr := sdk.Get(&node.self); getErr != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, getErr) + return getErr + } + + if *node.self.Spec.Replicas == replicas { + return nil + } + + node.self.Spec.Replicas = &replicas + + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + return nil + }) + if retryErr != nil { + return fmt.Errorf("Error: could not update Elasticsearch node %v after %v retries: %v", node.self.Name, nretries, retryErr) + } + + return nil } -func (node *statefulSetNode) query() error { - err := sdk.Get(&node.resource) - return err +func (node *statefulSetNode) replicaCount() (error, int32) { + if err := sdk.Get(&node.self); err != nil { + logrus.Debugf("Could not get Elasticsearch node resource %v: %v", node.self.Name, err) + return err, -1 + } + + return nil, node.self.Status.Replicas } -// constructNodeStatefulSet creates the StatefulSet for the node -func (node *statefulSetNode) constructNodeResource(cfg *desiredNodeState, owner metav1.OwnerReference) (runtime.Object, error) { +func (node *statefulSetNode) restart(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) { - replicas := cfg.getReplicas() + if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { + if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) + return + } - statefulSet := node.resource - //statefulSet(cfg.DeployName, node.resource.ObjectMeta.Namespace) - statefulSet.ObjectMeta.Labels = cfg.getLabels() + size, err := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size prior to restart for %v", node.name()) + return + } + node.clusterSize = size - podTemplate, err := cfg.constructPodTemplateSpec() - if err != nil { - return nil, err + err, replicas := node.replicaCount() + if err != nil { + logrus.Warnf("Unable to get number of replicas prior to restart for %v", node.name()) + return + } + + node.setPartition(replicas) + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionTrue } - statefulSet.Spec = apps.StatefulSetSpec{ - Replicas: &replicas, - ServiceName: cfg.DeployName, - Selector: &metav1.LabelSelector{ - MatchLabels: cfg.getLabels(), - }, - Template: podTemplate, + if upgradeStatus.UpgradeStatus.UpgradePhase == "" || + upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.ControllerUpdated { + + // nothing to do here -- just maintaing a framework structure + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.NodeRestarting } - pvc, ok, err := cfg.generateMasterPVC() + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.NodeRestarting { + + err, ordinal := node.partition() + if err != nil { + logrus.Infof("Unable to get node ordinal value: %v", err) + return + } + + for index := ordinal; index > 0; index-- { + // get podName based on ordinal index and node.name() + podName := fmt.Sprintf("%v-%v", node.name(), index) + + // make sure we have all nodes in the cluster first -- always + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v pods to rejoin cluster", node.name()) + return + } + + // delete the pod + if err := DeletePod(podName, node.self.Namespace); err != nil { + logrus.Infof("Unable to delete pod %v for restart", podName) + return + } + + // wait for node to leave cluster + if err, _ := node.waitForNodeLeaveCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to leave the cluster", podName) + return + } + + // used for tracking in case of timeout + node.setPartition(index - 1) + } + + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v pods to rejoin cluster", node.name()) + return + } + + node.refreshHashes() + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.RecoveringData + } + + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.RecoveringData { + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.ControllerUpdated + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionFalse + } +} + +func (node *statefulSetNode) create() error { + err := sdk.Create(&node.self) if err != nil { - return &statefulSet, err + if !errors.IsAlreadyExists(err) { + return fmt.Errorf("Could not create node resource: %v", err) + } else { + node.scale() + } } - if ok { - statefulSet.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{ - pvc, + + // update the hashmaps + node.configmapHash = getConfigmapDataHash(node.clusterName, node.self.Namespace) + node.secretHash = getSecretDataHash(node.clusterName, node.self.Namespace) + + return nil +} + +func (node *statefulSetNode) update(upgradeStatus *v1alpha1.ElasticsearchNodeStatus) error { + if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { + if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace); status != "green" { + logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) + return fmt.Errorf("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) + } + + size, err := GetClusterNodeCount(node.clusterName, node.self.Namespace) + if err != nil { + logrus.Warnf("Unable to get cluster size prior to restart for %v", node.name()) } + node.clusterSize = size + + err, replicas := node.replicaCount() + if err != nil { + logrus.Warnf("Unable to get number of replicas prior to restart for %v", node.name()) + return fmt.Errorf("Unable to get number of replicas prior to restart for %v", node.name()) + } + + node.setPartition(replicas) + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionTrue + } + + if upgradeStatus.UpgradeStatus.UpgradePhase == "" || + upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.ControllerUpdated { + + // see if we need to update the deployment object and verify we have latest to update + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // isChanged() will get the latest revision from the apiserver + // and return false if there is nothing to change and will update the node object if required + if node.isChanged() { + if updateErr := sdk.Update(&node.self); updateErr != nil { + logrus.Debugf("Failed to update node resource %v: %v", node.self.Name, updateErr) + return updateErr + } + + return nil + } else { + return nil + } + }) + + if retryErr != nil { + return retryErr + } + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.NodeRestarting } - addOwnerRefToObject(&statefulSet, owner) + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.NodeRestarting { + + err, ordinal := node.partition() + if err != nil { + logrus.Infof("Unable to get node ordinal value: %v", err) + return err + } + + // start partition at replicas and incrementally update it to 0 + // making sure nodes rejoin between each one + for index := ordinal; index > 0; index-- { + + // make sure we have all nodes in the cluster first -- always + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to rejoin cluster", node.name()) + return fmt.Errorf("Timed out waiting for %v to rejoin cluster", node.name()) + } - return &statefulSet, nil + // update partition to cause next pod to be updated + node.setPartition(index - 1) + + // wait for the node to leave the cluster + if err, _ := node.waitForNodeLeaveCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to leave the cluster", node.name()) + return fmt.Errorf("Timed out waiting for %v to leave the cluster", node.name()) + } + } + + // this is here again because we need to make sure all nodes have rejoined + // before we move on and say we're done + if err, _ := node.waitForNodeRejoinCluster(); err != nil { + logrus.Infof("Timed out waiting for %v to rejoin cluster", node.name()) + return fmt.Errorf("Timed out waiting for %v to rejoin cluster", node.name()) + } + + node.refreshHashes() + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.RecoveringData + } + + if upgradeStatus.UpgradeStatus.UpgradePhase == v1alpha1.RecoveringData { + + upgradeStatus.UpgradeStatus.UpgradePhase = v1alpha1.ControllerUpdated + upgradeStatus.UpgradeStatus.UnderUpgrade = v1.ConditionFalse + } + + return nil +} + +func (node *statefulSetNode) refreshHashes() { + newConfigmapHash := getConfigmapDataHash(node.clusterName, node.self.Namespace) + if newConfigmapHash != node.configmapHash { + node.configmapHash = newConfigmapHash + } + + newSecretHash := getSecretDataHash(node.clusterName, node.self.Namespace) + if newSecretHash != node.secretHash { + node.secretHash = newSecretHash + } } -func (node *statefulSetNode) delete() error { - err := sdk.Delete(&node.resource) +func (node *statefulSetNode) scale() { + + desired := node.self.DeepCopy() + err := sdk.Get(&node.self) + // error check that it exists, etc if err != nil { - return fmt.Errorf("Unable to delete StatefulSet %v: ", err) + // if it doesn't exist, return true + return } - return nil + + logrus.Infof("Replica for desired: %d ; replica for current: %d", *desired.Spec.Replicas, *node.self.Spec.Replicas) + + if *desired.Spec.Replicas != *node.self.Spec.Replicas { + node.self.Spec.Replicas = desired.Spec.Replicas + logrus.Infof("Resource '%s' has different container replicas than desired", node.self.Name) + + node.setReplicaCount(*node.self.Spec.Replicas) + } +} + +func (node *statefulSetNode) isChanged() bool { + + changed := false + + desired := node.self.DeepCopy() + err := sdk.Get(&node.self) + // error check that it exists, etc + if err != nil { + // if it doesn't exist, return true + return false + } + + // we will only have one container, no need to do range + nodeContainer := node.self.Spec.Template.Spec.Containers[0] + desiredContainer := desired.Spec.Template.Spec.Containers[0] + + // check that both exist + + if nodeContainer.Image != desiredContainer.Image { + logrus.Debugf("Resource '%s' has different container image than desired", node.self.Name) + nodeContainer.Image = desiredContainer.Image + changed = true + } + + if desiredContainer.Resources.Limits.Cpu().Cmp(*nodeContainer.Resources.Limits.Cpu()) != 0 { + logrus.Debugf("Resource '%s' has different CPU limit than desired", node.self.Name) + nodeContainer.Resources.Limits[v1.ResourceCPU] = *desiredContainer.Resources.Limits.Cpu() + changed = true + } + // Check memory limits + if desiredContainer.Resources.Limits.Memory().Cmp(*nodeContainer.Resources.Limits.Memory()) != 0 { + logrus.Debugf("Resource '%s' has different Memory limit than desired", node.self.Name) + nodeContainer.Resources.Limits[v1.ResourceMemory] = *desiredContainer.Resources.Limits.Memory() + changed = true + } + // Check CPU requests + if desiredContainer.Resources.Requests.Cpu().Cmp(*nodeContainer.Resources.Requests.Cpu()) != 0 { + logrus.Debugf("Resource '%s' has different CPU Request than desired", node.self.Name) + nodeContainer.Resources.Requests[v1.ResourceCPU] = *desiredContainer.Resources.Requests.Cpu() + changed = true + } + // Check memory requests + if desiredContainer.Resources.Requests.Memory().Cmp(*nodeContainer.Resources.Requests.Memory()) != 0 { + logrus.Debugf("Resource '%s' has different Memory Request than desired", node.self.Name) + nodeContainer.Resources.Requests[v1.ResourceMemory] = *desiredContainer.Resources.Requests.Memory() + changed = true + } + + node.self.Spec.Template.Spec.Containers[0] = nodeContainer + + return changed } diff --git a/pkg/k8shandler/status.go b/pkg/k8shandler/status.go index bfa81d4ee..2b5e8237a 100644 --- a/pkg/k8shandler/status.go +++ b/pkg/k8shandler/status.go @@ -2,158 +2,82 @@ package k8shandler import ( "fmt" + "reflect" - "k8s.io/client-go/util/retry" - - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/openshift/elasticsearch-operator/pkg/utils" "github.com/operator-framework/operator-sdk/pkg/sdk" "github.com/sirupsen/logrus" "k8s.io/api/core/v1" + "k8s.io/client-go/util/retry" + + v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const healthUnknown = "cluster health unknown" +const NOT_FOUND_INDEX = -1 -// UpdateStatus updates the status of Elasticsearch CRD -func (cState *ClusterState) UpdateStatus(dpl *v1alpha1.Elasticsearch) error { - // TODO: only update this when is different from current... - nretries := -1 - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - nretries++ - if getErr := sdk.Get(dpl); getErr != nil { - logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr) - return getErr - } - dpl.Status.ClusterHealth = clusterHealth(dpl) - if dpl.Status.ShardAllocationEnabled == "" { - dpl.Status.ShardAllocationEnabled = v1alpha1.ShardAllocationTrue - } +func UpdateClusterStatus(cluster *v1alpha1.Elasticsearch) error { - nodes := []v1alpha1.ElasticsearchNodeStatus{} - for _, node := range cState.Nodes { - nodes = append(nodes, *updateNodeStatus(node, &dpl.Status)) - } - dpl.Status.Nodes = nodes - updateStatusConditions(&dpl.Status) - dpl.Status.Pods = rolePodStateMap(dpl.Namespace, dpl.Name) - if updateErr := sdk.Update(dpl); updateErr != nil { - logrus.Debugf("Failed to update Elasticsearch %s status. Reason: %v. Trying again...", dpl.Name, updateErr) - return updateErr - } - return nil - }) - - if retryErr != nil { - return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", dpl.Name, nretries, retryErr) - } - logrus.Debugf("Updated Elasticsearch %v after %v retries", dpl.Name, nretries) - return nil -} - -func updateNodeStatus(node *nodeState, status *v1alpha1.ElasticsearchStatus) *v1alpha1.ElasticsearchNodeStatus { - if status.Nodes == nil { - status.Nodes = []v1alpha1.ElasticsearchNodeStatus{} - } - - _, nodeStatus := statusExists(node, status) - if nodeStatus == nil { - nodeStatus = &v1alpha1.ElasticsearchNodeStatus{} - nodeStatus.UpgradeStatus = *utils.NodeNormalOperation() - } - if node.Actual.Deployment != nil { - nodeStatus.DeploymentName = node.Actual.Deployment.Name - } - - if node.Actual.ReplicaSet != nil { - nodeStatus.ReplicaSetName = node.Actual.ReplicaSet.Name - } + clusterStatus := cluster.Status.DeepCopy() - if node.Actual.Pod != nil { - nodeStatus.PodName = node.Actual.Pod.Name - nodeStatus.Status = string(node.Actual.Pod.Status.Phase) - } - - if node.Actual.StatefulSet != nil { - nodeStatus.StatefulSetName = node.Actual.StatefulSet.Name - } + health, err := GetClusterHealth(cluster.Name, cluster.Namespace) + if err != nil { + health = healthUnknown + } + clusterStatus.ClusterHealth = health + + allocation, err := GetShardAllocation(cluster.Name, cluster.Namespace) + switch { + case allocation == "none": + clusterStatus.ShardAllocationEnabled = v1alpha1.ShardAllocationNone + case err != nil: + clusterStatus.ShardAllocationEnabled = v1alpha1.ShardAllocationUnknown + default: + clusterStatus.ShardAllocationEnabled = v1alpha1.ShardAllocationAll + } + + clusterStatus.Pods = rolePodStateMap(cluster.Namespace, cluster.Name) + updateStatusConditions(clusterStatus) + + if !reflect.DeepEqual(clusterStatus, cluster.Status) { + nretries := -1 + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + nretries++ + if getErr := sdk.Get(cluster); getErr != nil { + logrus.Debugf("Could not get Elasticsearch %v: %v", cluster.Name, getErr) + return getErr + } - if node.Desired.Roles != nil { - nodeStatus.Roles = node.Desired.Roles - } - return nodeStatus -} + cluster.Status.ClusterHealth = clusterStatus.ClusterHealth + cluster.Status.Conditions = clusterStatus.Conditions + cluster.Status.Pods = clusterStatus.Pods + cluster.Status.ShardAllocationEnabled = clusterStatus.ShardAllocationEnabled -func statusExists(node *nodeState, status *v1alpha1.ElasticsearchStatus) (int, *v1alpha1.ElasticsearchNodeStatus) { - var deploymentName string - if node.Actual.Deployment != nil { - deploymentName = node.Actual.Deployment.Name - } - if node.Actual.StatefulSet != nil { - deploymentName = node.Actual.StatefulSet.Name - } - if deploymentName == "" { - return -1, nil - } + if updateErr := sdk.Update(cluster); updateErr != nil { + logrus.Debugf("Failed to update Elasticsearch %s status. Reason: %v. Trying again...", cluster.Name, updateErr) + return updateErr + } + return nil + }) - for index, nodeStatus := range status.Nodes { - if deploymentName == nodeStatus.DeploymentName || - deploymentName == nodeStatus.StatefulSetName { - return index, &nodeStatus + if retryErr != nil { + return fmt.Errorf("Error: could not update status for Elasticsearch %v after %v retries: %v", cluster.Name, nretries, retryErr) } + logrus.Debugf("Updated Elasticsearch %v after %v retries", cluster.Name, nretries) } - return -1, nil -} -func updateStatusConditions(status *v1alpha1.ElasticsearchStatus) { - if status.Conditions == nil { - status.Conditions = make([]v1alpha1.ClusterCondition, 0, 4) - } - if _, condition := utils.GetESNodeCondition(status, v1alpha1.UpdatingSettings); condition == nil { - utils.UpdateUpdatingSettingsCondition(status, v1alpha1.ConditionFalse) - } - if _, condition := utils.GetESNodeCondition(status, v1alpha1.ScalingUp); condition == nil { - utils.UpdateScalingUpCondition(status, v1alpha1.ConditionFalse) - } - if _, condition := utils.GetESNodeCondition(status, v1alpha1.ScalingDown); condition == nil { - utils.UpdateScalingDownCondition(status, v1alpha1.ConditionFalse) - } - if _, condition := utils.GetESNodeCondition(status, v1alpha1.Restarting); condition == nil { - utils.UpdateRestartingCondition(status, v1alpha1.ConditionFalse) - } + return nil } -func clusterHealth(dpl *v1alpha1.Elasticsearch) string { - pods, err := listRunningPods(dpl.Name, dpl.Namespace) - if err != nil { - return healthUnknown - } - - // no running elasticsearch pods were found - if len(pods.Items) == 0 { - return "" - } - - // use arbitrary pod - pod := pods.Items[0] - - clusterHealth, err := utils.ClusterHealth(&pod) - if err != nil { - return healthUnknown - } - - status, present := clusterHealth["status"] - if !present { - logrus.Debug("response from elasticsearch health API did not contain 'status' field") - return healthUnknown - } - - // convert from type interface{} to string - health, ok := status.(string) - if !ok { - return healthUnknown +// if a status doesn't exist, provide a new one +func getNodeStatus(name string, status *v1alpha1.ElasticsearchStatus) (int, *v1alpha1.ElasticsearchNodeStatus) { + for index, status := range status.Nodes { + if status.DeploymentName == name || status.StatefulSetName == name { + return index, &status + } } - return health + return NOT_FOUND_INDEX, &v1alpha1.ElasticsearchNodeStatus{} } func rolePodStateMap(namespace string, clusterName string) map[v1alpha1.ElasticsearchNodeRole]v1alpha1.PodStateMap { @@ -205,3 +129,113 @@ func isPodReady(pod v1.Pod) bool { return true } + +func updateStatusConditions(status *v1alpha1.ElasticsearchStatus) { + if status.Conditions == nil { + status.Conditions = make([]v1alpha1.ClusterCondition, 0, 4) + } + if _, condition := getESNodeCondition(status, v1alpha1.UpdatingSettings); condition == nil { + updateUpdatingSettingsCondition(status, v1.ConditionFalse) + } + if _, condition := getESNodeCondition(status, v1alpha1.ScalingUp); condition == nil { + updateScalingUpCondition(status, v1.ConditionFalse) + } + if _, condition := getESNodeCondition(status, v1alpha1.ScalingDown); condition == nil { + updateScalingDownCondition(status, v1.ConditionFalse) + } + if _, condition := getESNodeCondition(status, v1alpha1.Restarting); condition == nil { + updateRestartingCondition(status, v1.ConditionFalse) + } +} + +func getESNodeCondition(status *v1alpha1.ElasticsearchStatus, conditionType v1alpha1.ClusterConditionType) (int, *v1alpha1.ClusterCondition) { + if status == nil { + return -1, nil + } + for i := range status.Conditions { + if status.Conditions[i].Type == conditionType { + return i, &status.Conditions[i] + } + } + return -1, nil +} + +func updateESNodeCondition(status *v1alpha1.ElasticsearchStatus, condition *v1alpha1.ClusterCondition) bool { + condition.LastTransitionTime = metav1.Now() + // Try to find this node condition. + conditionIndex, oldCondition := getESNodeCondition(status, condition.Type) + + if oldCondition == nil { + // We are adding new node condition. + status.Conditions = append(status.Conditions, *condition) + return true + } + // We are updating an existing condition, so we need to check if it has changed. + if condition.Status == oldCondition.Status { + condition.LastTransitionTime = oldCondition.LastTransitionTime + } + + isEqual := condition.Status == oldCondition.Status && + condition.Reason == oldCondition.Reason && + condition.Message == oldCondition.Message && + condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) + + status.Conditions[conditionIndex] = *condition + // Return true if one of the fields have changed. + return !isEqual +} + +func updateConditionWithRetry(dpl *v1alpha1.Elasticsearch, value v1.ConditionStatus, + executeUpdateCondition func(*v1alpha1.ElasticsearchStatus, v1.ConditionStatus) bool) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + if getErr := sdk.Get(dpl); getErr != nil { + logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr) + return getErr + } + + executeUpdateCondition(&dpl.Status, value) + + if updateErr := sdk.Update(dpl); updateErr != nil { + logrus.Debugf("Failed to update Elasticsearch %v status: %v", dpl.Name, updateErr) + return updateErr + } + return nil + }) + return retryErr +} + +func updateUpdatingSettingsCondition(status *v1alpha1.ElasticsearchStatus, value v1.ConditionStatus) bool { + var message string + if value == v1.ConditionTrue { + message = "Config Map is different" + } else { + message = "Config Map is up to date" + } + return updateESNodeCondition(status, &v1alpha1.ClusterCondition{ + Type: v1alpha1.UpdatingSettings, + Status: value, + Reason: "ConfigChange", + Message: message, + }) +} + +func updateScalingUpCondition(status *v1alpha1.ElasticsearchStatus, value v1.ConditionStatus) bool { + return updateESNodeCondition(status, &v1alpha1.ClusterCondition{ + Type: v1alpha1.ScalingUp, + Status: value, + }) +} + +func updateScalingDownCondition(status *v1alpha1.ElasticsearchStatus, value v1.ConditionStatus) bool { + return updateESNodeCondition(status, &v1alpha1.ClusterCondition{ + Type: v1alpha1.ScalingDown, + Status: value, + }) +} + +func updateRestartingCondition(status *v1alpha1.ElasticsearchStatus, value v1.ConditionStatus) bool { + return updateESNodeCondition(status, &v1alpha1.ClusterCondition{ + Type: v1alpha1.Restarting, + Status: value, + }) +} diff --git a/pkg/k8shandler/util.go b/pkg/k8shandler/util.go index e9f223c22..c217a3baf 100644 --- a/pkg/k8shandler/util.go +++ b/pkg/k8shandler/util.go @@ -3,43 +3,13 @@ package k8shandler import ( "fmt" - api "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" "github.com/operator-framework/operator-sdk/pkg/sdk" - - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - apps "k8s.io/api/apps/v1" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" -) -const ( - defaultMasterCPULimit = "100m" - defaultMasterCPURequest = "100m" - defaultCPULimit = "4000m" - defaultCPURequest = "100m" - defaultMemoryLimit = "4Gi" - defaultMemoryRequest = "1Gi" - maxMasterCount = 3 + api "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// addOwnerRefToObject appends the desired OwnerReference to the object -func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) { - if (metav1.OwnerReference{}) != r { - o.SetOwnerReferences(append(o.GetOwnerReferences(), r)) - } -} - -func isOwner(subject metav1.ObjectMeta, ownerMeta metav1.ObjectMeta) bool { - for _, ref := range subject.GetOwnerReferences() { - if ref.UID == ownerMeta.UID { - return true - } - } - return false -} - func selectorForES(nodeRole string, clusterName string) map[string]string { return map[string]string{ @@ -66,301 +36,6 @@ func appendDefaultLabel(clusterName string, labels map[string]string) map[string return labels } -// asOwner returns an owner reference set as the vault cluster CR -func asOwner(v *api.Elasticsearch) metav1.OwnerReference { - trueVar := true - return metav1.OwnerReference{ - APIVersion: api.SchemeGroupVersion.String(), - Kind: v.Kind, - Name: v.Name, - UID: v.UID, - Controller: &trueVar, - } -} - -func getReadinessProbe() v1.Probe { - return v1.Probe{ - TimeoutSeconds: 30, - InitialDelaySeconds: 10, - PeriodSeconds: 5, - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - Command: []string{ - "/usr/share/elasticsearch/probe/readiness.sh", - }, - }, - }, - } -} - -func getResourceRequirements(commonResRequirements, nodeResRequirements v1.ResourceRequirements) v1.ResourceRequirements { - limitCPU := nodeResRequirements.Limits.Cpu() - if limitCPU.IsZero() { - if commonResRequirements.Limits.Cpu().IsZero() { - CPU, _ := resource.ParseQuantity(defaultCPULimit) - limitCPU = &CPU - } else { - limitCPU = commonResRequirements.Limits.Cpu() - } - } - limitMem := nodeResRequirements.Limits.Memory() - if limitMem.IsZero() { - if commonResRequirements.Limits.Memory().IsZero() { - Mem, _ := resource.ParseQuantity(defaultMemoryLimit) - limitMem = &Mem - } else { - limitMem = commonResRequirements.Limits.Memory() - } - - } - requestCPU := nodeResRequirements.Requests.Cpu() - if requestCPU.IsZero() { - if commonResRequirements.Requests.Cpu().IsZero() { - CPU, _ := resource.ParseQuantity(defaultCPURequest) - requestCPU = &CPU - } else { - requestCPU = commonResRequirements.Requests.Cpu() - } - } - requestMem := nodeResRequirements.Requests.Memory() - if requestMem.IsZero() { - if commonResRequirements.Requests.Memory().IsZero() { - Mem, _ := resource.ParseQuantity(defaultMemoryRequest) - requestMem = &Mem - } else { - requestMem = commonResRequirements.Requests.Memory() - } - } - - return v1.ResourceRequirements{ - Limits: v1.ResourceList{ - "cpu": *limitCPU, - "memory": *limitMem, - }, - Requests: v1.ResourceList{ - "cpu": *requestCPU, - "memory": *requestMem, - }, - } - -} - -func listDeployments(clusterName, namespace string) (*apps.DeploymentList, error) { - list := deploymentList() - labelSelector := labels.SelectorFromSet(labelsForESCluster(clusterName)).String() - listOps := &metav1.ListOptions{LabelSelector: labelSelector} - err := sdk.List(namespace, list, sdk.WithListOptions(listOps)) - if err != nil { - return list, fmt.Errorf("Unable to list deployments: %v", err) - } - - return list, nil -} - -func listReplicaSets(clusterName, namespace string) (*apps.ReplicaSetList, error) { - list := replicaSetList() - labelSelector := labels.SelectorFromSet(labelsForESCluster(clusterName)).String() - listOps := &metav1.ListOptions{LabelSelector: labelSelector} - err := sdk.List(namespace, list, sdk.WithListOptions(listOps)) - if err != nil { - return list, fmt.Errorf("Unable to list ReplicaSets: %v", err) - } - - return list, nil -} - -func listStatefulSets(clusterName, namespace string) (*apps.StatefulSetList, error) { - list := statefulSetList() - labelSelector := labels.SelectorFromSet(labelsForESCluster(clusterName)).String() - listOps := &metav1.ListOptions{LabelSelector: labelSelector} - err := sdk.List(namespace, list, sdk.WithListOptions(listOps)) - if err != nil { - return list, fmt.Errorf("Unable to list StatefulSets: %v", err) - } - - return list, nil -} - -func statefulSetList() *apps.StatefulSetList { - return &apps.StatefulSetList{ - TypeMeta: metav1.TypeMeta{ - Kind: "StatefulSet", - APIVersion: "apps/v1", - }, - } -} - -func deploymentList() *apps.DeploymentList { - return &apps.DeploymentList{ - TypeMeta: metav1.TypeMeta{ - Kind: "Deployment", - APIVersion: "apps/v1", - }, - } -} - -func popDeployment(deployments *apps.DeploymentList, cfg desiredNodeState) (*apps.DeploymentList, apps.Deployment, bool) { - var deployment apps.Deployment - var index = -1 - for i, dpl := range deployments.Items { - if dpl.Name == cfg.DeployName { - deployment = dpl - index = i - break - } - } - if index == -1 { - return deployments, deployment, false - } - dpls := deploymentList() - deployments.Items[index] = deployments.Items[len(deployments.Items)-1] - dpls.Items = deployments.Items[:len(deployments.Items)-1] - return dpls, deployment, true -} - -func replicaSetList() *apps.ReplicaSetList { - return &apps.ReplicaSetList{ - TypeMeta: metav1.TypeMeta{ - Kind: "ReplicaSet", - APIVersion: "apps/v1", - }, - } -} - -func popReplicaSet(replicaSets *apps.ReplicaSetList, cfg actualNodeState) (*apps.ReplicaSetList, apps.ReplicaSet, bool) { - var replicaSet apps.ReplicaSet - var index = -1 - if cfg.Deployment == nil { - return replicaSets, replicaSet, false - } - for i, rsItem := range replicaSets.Items { - // multiple ReplicaSets managed by single Deployment can exist, before they're GC'd - desiredReplicas := *rsItem.Spec.Replicas - if desiredReplicas == 0 { - // ignore old ReplicaSets - continue - } - if isOwner(rsItem.ObjectMeta, cfg.Deployment.ObjectMeta) { - replicaSet = rsItem - index = i - break - } - } - if index == -1 { - return replicaSets, replicaSet, false - } - rsList := replicaSetList() - replicaSets.Items[index] = replicaSets.Items[len(replicaSets.Items)-1] - rsList.Items = replicaSets.Items[:len(replicaSets.Items)-1] - return rsList, replicaSet, true -} - -func popPod(pods *v1.PodList, cfg actualNodeState) (*v1.PodList, v1.Pod, bool) { - var ( - pod v1.Pod - index = -1 - parentObjectMeta metav1.ObjectMeta - ) - if cfg.ReplicaSet != nil { - parentObjectMeta = cfg.ReplicaSet.ObjectMeta - } else if cfg.StatefulSet != nil { - parentObjectMeta = cfg.StatefulSet.ObjectMeta - } else { - return pods, pod, false - } - for i, podItem := range pods.Items { - if isOwner(podItem.ObjectMeta, parentObjectMeta) { - pod = podItem - index = i - break - } - } - if index == -1 { - return pods, pod, false - } - podList := podList() - pods.Items[index] = pods.Items[len(pods.Items)-1] - podList.Items = pods.Items[:len(pods.Items)-1] - return podList, pod, true - -} - -// podList returns a v1.PodList object -func podList() *v1.PodList { - return &v1.PodList{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - } -} - -func listPods(clusterName, namespace string) (*v1.PodList, error) { - podList := podList() - labelSelector := labels.SelectorFromSet(labelsForESCluster(clusterName)).String() - listOps := &metav1.ListOptions{LabelSelector: labelSelector} - err := sdk.List(namespace, podList, sdk.WithListOptions(listOps)) - if err != nil { - return podList, fmt.Errorf("failed to list pods: %v", err) - } - return podList, nil -} - -func listRunningPods(clusterName, namespace string) (*v1.PodList, error) { - pods, err := listPods(clusterName, namespace) - if err != nil { - return nil, err - } - // empty slice with memory allocated for len(pods.Items) v1.Pod objects - runningPods := make([]v1.Pod, 0, len(pods.Items)) - for _, pod := range pods.Items { - if pod.Status.Phase == v1.PodRunning { - podReady := true - for _, cs := range pod.Status.ContainerStatuses { - if !cs.Ready { - podReady = false - break - } - } - if podReady { - runningPods = append(runningPods, pod) - } - } - } - result := podList() - result.Items = runningPods - return result, nil -} - -func listRunningMasterPods(clusterName, namespace string) (*v1.PodList, error) { - pods, err := listRunningPods(clusterName, namespace) - if err != nil { - return nil, err - } - var masterPods []v1.Pod - for _, pod := range pods.Items { - for _, envVar := range pod.Spec.Containers[0].Env { - if envVar.Name == "IS_MASTER" && envVar.Value == "true" { - masterPods = append(masterPods, pod) - } - } - } - result := podList() - result.Items = masterPods - return result, nil -} - -func getRunningMasterPod(clusterName, namespace string) (*v1.Pod, error) { - pods, err := listRunningMasterPods(clusterName, namespace) - if err != nil { - return nil, err - } - if len(pods.Items) == 0 { - return nil, fmt.Errorf("no running master pods found") - } - return &pods.Items[0], nil -} - // getPodNames returns the pod names of the array of pods passed in func getPodNames(pods []v1.Pod) []string { var podNames []string @@ -370,42 +45,21 @@ func getPodNames(pods []v1.Pod) []string { return podNames } -func popStatefulSet(statefulSets *apps.StatefulSetList, cfg desiredNodeState) (*apps.StatefulSetList, apps.StatefulSet, bool) { - var statefulSet apps.StatefulSet - var index = -1 - for i, ss := range statefulSets.Items { - if ss.Name == cfg.DeployName { - statefulSet = ss - index = i - break - } - } - if index == -1 { - return statefulSets, statefulSet, false - } - dpls := statefulSetList() - statefulSets.Items[index] = statefulSets.Items[len(statefulSets.Items)-1] - dpls.Items = statefulSets.Items[:len(statefulSets.Items)-1] - return dpls, statefulSet, true -} - -func getMasterCount(dpl *v1alpha1.Elasticsearch) int32 { +func getMasterCount(dpl *api.Elasticsearch) int32 { masterCount := int32(0) - for _, node := range dpl.Spec.Nodes { - if isNodeMaster(&node) { - masterCount = masterCount + node.NodeCount + if isMasterNode(node) { + masterCount += node.NodeCount } } return masterCount } -func getDataCount(dpl *v1alpha1.Elasticsearch) int32 { +func getDataCount(dpl *api.Elasticsearch) int32 { dataCount := int32(0) - for _, node := range dpl.Spec.Nodes { - if isNodeData(&node) { + if isDataNode(node) { dataCount = dataCount + node.NodeCount } } @@ -413,11 +67,10 @@ func getDataCount(dpl *v1alpha1.Elasticsearch) int32 { return dataCount } -func getClientCount(dpl *v1alpha1.Elasticsearch) int32 { +func getClientCount(dpl *api.Elasticsearch) int32 { clientCount := int32(0) - for _, node := range dpl.Spec.Nodes { - if isNodeClient(&node) { + if isClientNode(node) { clientCount = clientCount + node.NodeCount } } @@ -425,58 +78,22 @@ func getClientCount(dpl *v1alpha1.Elasticsearch) int32 { return clientCount } -func getNodeCount(dpl *v1alpha1.Elasticsearch) int32 { - nodeCount := int32(0) - - for _, node := range dpl.Spec.Nodes { - nodeCount = nodeCount + node.NodeCount - } - return nodeCount -} - -func isNodeMaster(node *v1alpha1.ElasticsearchNode) bool { - for _, role := range node.Roles { - if role == v1alpha1.ElasticsearchRoleMaster { - return true - } - } - return false -} - -func isNodeData(node *v1alpha1.ElasticsearchNode) bool { - for _, role := range node.Roles { - if role == v1alpha1.ElasticsearchRoleData { - return true - } - } - return false -} - -func isNodeClient(node *v1alpha1.ElasticsearchNode) bool { - for _, role := range node.Roles { - if role == v1alpha1.ElasticsearchRoleClient { - return true - } - } - return false -} - -func isValidMasterCount(dpl *v1alpha1.Elasticsearch) bool { +func isValidMasterCount(dpl *api.Elasticsearch) bool { masterCount := int(getMasterCount(dpl)) - return (masterCount <= maxMasterCount) + return (masterCount <= maxMasterCount && masterCount > 0) } -func isValidDataCount(dpl *v1alpha1.Elasticsearch) bool { +func isValidDataCount(dpl *api.Elasticsearch) bool { dataCount := int(getDataCount(dpl)) return dataCount > 0 } -func isValidRedundancyPolicy(dpl *v1alpha1.Elasticsearch) bool { +func isValidRedundancyPolicy(dpl *api.Elasticsearch) bool { dataCount := int(getDataCount(dpl)) - return !(dataCount == 1 && dpl.Spec.RedundancyPolicy == v1alpha1.SingleRedundancy) + return !(dataCount == 1 && dpl.Spec.RedundancyPolicy == api.SingleRedundancy) } -func isValidConf(dpl *v1alpha1.Elasticsearch) error { +func isValidConf(dpl *api.Elasticsearch) error { if !isValidMasterCount(dpl) { return fmt.Errorf("Invalid master nodes count. Please ensure there are no more than %v total nodes with master roles", maxMasterCount) } @@ -489,6 +106,23 @@ func isValidConf(dpl *v1alpha1.Elasticsearch) error { return nil } +func DeletePod(podName, namespace string) error { + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: namespace, + }, + } + + err := sdk.Delete(pod) + + return err +} + func GetPodList(namespace string, selector string) (*v1.PodList, error) { list := &v1.PodList{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/k8shandler/util_test.go b/pkg/k8shandler/util_test.go deleted file mode 100644 index f4876f38f..000000000 --- a/pkg/k8shandler/util_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package k8shandler - -import ( - "fmt" - "reflect" - "testing" - - v1alpha1 "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -func TestGetReadinessProbe(t *testing.T) { - goodProbe := v1.Probe{ - TimeoutSeconds: 30, - InitialDelaySeconds: 10, - FailureThreshold: 15, - Handler: v1.Handler{ - TCPSocket: &v1.TCPSocketAction{ - Port: intstr.FromInt(9300), - }, - }, - } - if !reflect.DeepEqual(goodProbe, getReadinessProbe()) { - t.Errorf("Probe was incorrect: %v", getReadinessProbe()) - } -} - -func TestGetAffinity(t *testing.T) { - rolesArray := [][]string{{"master"}, {"client", "data", "master"}, - {"client", "data"}, {"data"}, {"client"}} - goodAffinities := []v1.Affinity{} - for _, roles := range rolesArray { - labelSelectorReqs := []metav1.LabelSelectorRequirement{} - for _, role := range roles { - labelSelectorReqs = append(labelSelectorReqs, metav1.LabelSelectorRequirement{ - Key: fmt.Sprintf("es-node-%s", role), - Operator: metav1.LabelSelectorOpIn, - Values: []string{"true"}, - }) - } - aff := v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - Weight: 100, - PodAffinityTerm: v1.PodAffinityTerm{ - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: labelSelectorReqs, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - } - goodAffinities = append(goodAffinities, aff) - } - - for i, roles := range rolesArray { - ndRoles := []v1alpha1.ElasticsearchNodeRole{} - for _, role := range roles { - ndRoles = append(ndRoles, v1alpha1.ElasticsearchNodeRole(role)) - } - cfg := desiredNodeState{ - Roles: ndRoles, - } - if !reflect.DeepEqual(goodAffinities[i], cfg.getAffinity()) { - t.Errorf("Incorrect v1.Affinity constructed for role setb: %v", roles) - - } - } -} - -func TestGetResourceRequirements(t *testing.T) { - CPU1, _ := resource.ParseQuantity("110m") - CPU2, _ := resource.ParseQuantity("210m") - Mem1, _ := resource.ParseQuantity("257Mi") - Mem2, _ := resource.ParseQuantity("513Mi") - defMemLim, _ := resource.ParseQuantity(defaultMemoryLimit) - defCPUReq, _ := resource.ParseQuantity(defaultCPURequest) - defMemReq, _ := resource.ParseQuantity(defaultMemoryRequest) - defCPULim, _ := resource.ParseQuantity(defaultCPULimit) - - resList1 := v1.ResourceList{ - "cpu": CPU1, - } - resList2 := v1.ResourceList{ - "memory": Mem1, - } - resList3 := v1.ResourceList{ - "cpu": CPU2, - "memory": Mem2, - } - req1 := v1.ResourceRequirements{ - Limits: resList1, - Requests: resList2, - } - req2 := v1.ResourceRequirements{ - Limits: resList2, - Requests: resList1, - } - req3 := v1.ResourceRequirements{ - Limits: resList3, - Requests: resList3, - } - req4 := v1.ResourceRequirements{} - resReq1 := v1.ResourceRequirements{ - Limits: v1.ResourceList{ - "cpu": CPU1, - "memory": Mem1, - }, - Requests: v1.ResourceList{ - "cpu": CPU1, - "memory": Mem1, - }, - } - resReq2 := v1.ResourceRequirements{ - Limits: v1.ResourceList{ - "cpu": CPU1, - "memory": Mem2, - }, - Requests: v1.ResourceList{ - "cpu": CPU2, - "memory": Mem1, - }, - } - resReq3 := v1.ResourceRequirements{ - Limits: v1.ResourceList{ - "cpu": CPU1, - "memory": defMemLim, - }, - Requests: v1.ResourceList{ - "cpu": defCPUReq, - "memory": Mem1, - }, - } - resReq4 := v1.ResourceRequirements{ - Limits: v1.ResourceList{ - "cpu": defCPULim, - "memory": Mem1, - }, - Requests: v1.ResourceList{ - "cpu": CPU1, - "memory": defMemReq, - }, - } - - var table = []struct { - commonRequirements v1.ResourceRequirements - nodeRequirements v1.ResourceRequirements - result v1.ResourceRequirements - }{ - {req1, req2, resReq1}, - {req2, req1, resReq1}, - {req1, req3, req3}, - {req3, req1, resReq2}, - {req1, req4, resReq3}, - {req4, req1, resReq3}, - {req2, req4, resReq4}, - } - - for _, tt := range table { - actual := getResourceRequirements(tt.commonRequirements, tt.nodeRequirements) - if !reflect.DeepEqual(actual, tt.result) { - t.Errorf("Incorrect v1.ResourceRequirements constructed: %v, should be: %v", actual, tt.result) - - } - } -} diff --git a/pkg/stub/handler.go b/pkg/stub/handler.go index 293a3d359..f28f1a88c 100644 --- a/pkg/stub/handler.go +++ b/pkg/stub/handler.go @@ -7,68 +7,75 @@ import ( "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" "github.com/openshift/elasticsearch-operator/pkg/k8shandler" "github.com/operator-framework/operator-sdk/pkg/sdk" + + "github.com/sirupsen/logrus" ) func NewHandler() sdk.Handler { return &Handler{} } -type Handler struct { - // Fill me -} +type Handler struct{} func (h *Handler) Handle(ctx context.Context, event sdk.Event) error { - if event.Deleted { - return nil - } switch o := event.Object.(type) { case *v1alpha1.Elasticsearch: + if event.Deleted { + Flush(o) + return nil + } + return Reconcile(o) } return nil } +func Flush(cluster *v1alpha1.Elasticsearch) { + logrus.Infof("Flushing nodes for cluster %v in %v", cluster.Name, cluster.Namespace) + k8shandler.FlushNodes(cluster.Name, cluster.Namespace) +} + // Reconcile reconciles the cluster's state to the spec specified -func Reconcile(es *v1alpha1.Elasticsearch) (err error) { - err = k8shandler.CreateOrUpdateServices(es) - if err != nil { - return fmt.Errorf("Failed to reconcile Services for Elasticsearch cluster: %v", err) - } +func Reconcile(cluster *v1alpha1.Elasticsearch) (err error) { - // Ensure existence of clusterroles and clusterrolebindings - if err := k8shandler.CreateOrUpdateRBAC(es); err != nil { - return fmt.Errorf("Failed to reconcile Roles and RoleBindings for Elasticsearch cluster: %v", err) + if cluster.Spec.ManagementState == v1alpha1.ManagementStateUnmanaged { + return nil } // Ensure existence of servicesaccount - serviceAccountName, err := k8shandler.CreateOrUpdateServiceAccount(es) - if err != nil { + if err = k8shandler.CreateOrUpdateServiceAccount(cluster); err != nil { return fmt.Errorf("Failed to reconcile ServiceAccount for Elasticsearch cluster: %v", err) } + // Ensure existence of clusterroles and clusterrolebindings + if err := k8shandler.CreateOrUpdateRBAC(cluster); err != nil { + return fmt.Errorf("Failed to reconcile Roles and RoleBindings for Elasticsearch cluster: %v", err) + } + // Ensure existence of config maps - configMapName, err := k8shandler.CreateOrUpdateConfigMaps(es) - if err != nil { + if err = k8shandler.CreateOrUpdateConfigMaps(cluster); err != nil { return fmt.Errorf("Failed to reconcile ConfigMaps for Elasticsearch cluster: %v", err) } - // Ensure existence of prometheus rules - if err = k8shandler.CreateOrUpdatePrometheusRules(es); err != nil { - return fmt.Errorf("Failed to reconcile PrometheusRules for Elasticsearch cluster: %v", err) + if err = k8shandler.CreateOrUpdateServices(cluster); err != nil { + return fmt.Errorf("Failed to reconcile Services for Elasticsearch cluster: %v", err) + } + + // Ensure Elasticsearch cluster itself is up to spec + //if err = k8shandler.CreateOrUpdateElasticsearchCluster(cluster, "elasticsearch", "elasticsearch"); err != nil { + if err = k8shandler.CreateOrUpdateElasticsearchCluster(cluster); err != nil { + return fmt.Errorf("Failed to reconcile Elasticsearch deployment spec: %v", err) } // Ensure existence of service monitors - if err = k8shandler.CreateOrUpdateServiceMonitors(es); err != nil { + if err = k8shandler.CreateOrUpdateServiceMonitors(cluster); err != nil { return fmt.Errorf("Failed to reconcile Service Monitors for Elasticsearch cluster: %v", err) } - // TODO: Ensure existence of storage? - - // Ensure Elasticsearch cluster itself is up to spec - err = k8shandler.CreateOrUpdateElasticsearchCluster(es, configMapName, serviceAccountName) - if err != nil { - return fmt.Errorf("Failed to reconcile Elasticsearch deployment spec: %v", err) + // Ensure existence of prometheus rules + if err = k8shandler.CreateOrUpdatePrometheusRules(cluster); err != nil { + return fmt.Errorf("Failed to reconcile PrometheusRules for Elasticsearch cluster: %v", err) } return nil diff --git a/pkg/utils/exec.go b/pkg/utils/exec.go deleted file mode 100644 index 403bc855a..000000000 --- a/pkg/utils/exec.go +++ /dev/null @@ -1,68 +0,0 @@ -package utils - -import ( - "bytes" - "fmt" - - "github.com/operator-framework/operator-sdk/pkg/k8sclient" - "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" -) - -type ExecConfig struct { - Pod *v1.Pod - ContainerName string - Command []string - KubeConfigPath string - MasterURL string - StdOut bool - StdErr bool - Tty bool -} - -func PodExec(config *ExecConfig) (*bytes.Buffer, *bytes.Buffer, error) { - var ( - execOut bytes.Buffer - execErr bytes.Buffer - ) - - esPod := config.Pod - if esPod.Status.Phase != v1.PodRunning { - return nil, nil, fmt.Errorf("elasticsearch pod [%s] found but isn't running", esPod.Name) - } - - client := k8sclient.GetKubeClient() - execRequest := client.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(esPod.Name). - Namespace(esPod.Namespace). - SubResource("exec") - - execRequest.VersionedParams(&v1.PodExecOptions{ - Container: config.ContainerName, - Command: config.Command, - Stdout: config.StdOut, - Stderr: config.StdErr, - }, scheme.ParameterCodec) - - restClientConfig, err := clientcmd.BuildConfigFromFlags(config.MasterURL, config.KubeConfigPath) - if err != nil { - return nil, nil, fmt.Errorf("error when creating rest client command: %v", err) - } - exec, err := remotecommand.NewSPDYExecutor(restClientConfig, "POST", execRequest.URL()) - if err != nil { - return nil, nil, fmt.Errorf("error when creating remote command executor: %v", err) - } - err = exec.Stream(remotecommand.StreamOptions{ - Stdout: &execOut, - Stderr: &execErr, - Tty: config.Tty, - }) - if err != nil { - return nil, nil, fmt.Errorf("remote execution failed: %v", err) - } - - return &execOut, &execErr, nil -} diff --git a/pkg/utils/exec_util.go b/pkg/utils/exec_util.go deleted file mode 100644 index a4a47386a..000000000 --- a/pkg/utils/exec_util.go +++ /dev/null @@ -1,112 +0,0 @@ -package utils - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - - "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" -) - -func ElasticsearchExec(pod *v1.Pod, command []string) (*bytes.Buffer, *bytes.Buffer, error) { - // when running in a pod, use the values provided for the sa - // this is primarily used when testing - kubeConfigPath := LookupEnvWithDefault("KUBERNETES_CONFIG", "") - masterURL := "https://kubernetes.default.svc" - if kubeConfigPath == "" { - // ExecConfig requires both are "", or both have a real value - masterURL = "" - } - config := &ExecConfig{ - Pod: pod, - ContainerName: "elasticsearch", - Command: command, - KubeConfigPath: kubeConfigPath, - MasterURL: masterURL, - StdOut: true, - StdErr: true, - Tty: false, - } - return PodExec(config) -} - -func UpdateClusterSettings(pod *v1.Pod, quorum int) error { - command := []string{"sh", "-c", - fmt.Sprintf("es_util --query=_cluster/settings -H 'Content-Type: application/json' -X PUT -d '{\"persistent\":{%s}}'", - minimumMasterNodesCommand(quorum))} - - _, _, err := ElasticsearchExec(pod, command) - - return err -} - -func ClusterHealth(pod *v1.Pod) (map[string]interface{}, error) { - command := []string{"es_util", "--query=_cluster/health?pretty=true"} - execOut, _, err := ElasticsearchExec(pod, command) - if err != nil { - logrus.Debug(err) - return nil, err - } - - var result map[string]interface{} - - err = json.Unmarshal(execOut.Bytes(), &result) - if err != nil { - logrus.Debug("could not unmarshal: %v", err) - return nil, err - } - return result, nil -} - -func NumberOfNodes(pod *v1.Pod) int { - healthResponse, err := ClusterHealth(pod) - if err != nil { - // logrus.Debugf("failed to get _cluster/health: %v", err) - return -1 - } - - // is it present? - value, present := healthResponse["number_of_nodes"] - if !present { - return -1 - } - - // json numbers are represented as floats - // so let's convert from type interface{} to float - numberofNodes, ok := value.(float64) - if !ok { - return -1 - } - - // wow that's a lot of boilerplate... - return int(numberofNodes) -} - -func PerformSyncedFlush(pod *v1.Pod) error { - command := []string{"sh", "-c", "es_util --query=_flush/synced -X POST"} - - _, _, err := ElasticsearchExec(pod, command) - - return err -} - -func SetShardAllocation(pod *v1.Pod, enabled v1alpha1.ShardAllocationState) error { - command := []string{"sh", "-c", - fmt.Sprintf("es_util --query=_cluster/settings -H 'Content-Type: application/json' -X PUT -d '{\"transient\":{%s}}'", - shardAllocationCommand(enabled))} - - _, _, err := ElasticsearchExec(pod, command) - - return err -} - -func shardAllocationCommand(shardAllocation v1alpha1.ShardAllocationState) string { - return fmt.Sprintf("%s:%s", strconv.Quote("cluster.routing.allocation.enable"), strconv.Quote(string(shardAllocation))) -} - -func minimumMasterNodesCommand(nodes int) string { - return fmt.Sprintf("%s:%d", strconv.Quote("discovery.zen.minimum_master_nodes"), nodes) -} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 8ac3f680b..b0aef2fd3 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -4,43 +4,9 @@ import ( "crypto/rand" "encoding/base64" "fmt" - "io/ioutil" "os" - - api "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" - "github.com/operator-framework/operator-sdk/pkg/sdk" - "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" - rbac "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" ) -func GetFileContents(filePath string) []byte { - contents, err := ioutil.ReadFile(filePath) - if err != nil { - logrus.Errorf("Unable to read file to get contents: %v", err) - return nil - } - - return contents -} - -func Secret(secretName string, namespace string, data map[string][]byte) *v1.Secret { - return &v1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: v1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: namespace, - }, - Type: "Opaque", - Data: data, - } -} - func LookupEnvWithDefault(envName, defaultValue string) string { if value, ok := os.LookupEnv(envName); ok { return value @@ -48,179 +14,6 @@ func LookupEnvWithDefault(envName, defaultValue string) string { return defaultValue } -func GetESNodeCondition(status *api.ElasticsearchStatus, conditionType api.ClusterConditionType) (int, *api.ClusterCondition) { - if status == nil { - return -1, nil - } - for i := range status.Conditions { - if status.Conditions[i].Type == conditionType { - return i, &status.Conditions[i] - } - } - return -1, nil -} - -func UpdateESNodeCondition(status *api.ElasticsearchStatus, condition *api.ClusterCondition) bool { - condition.LastTransitionTime = metav1.Now() - // Try to find this node condition. - conditionIndex, oldCondition := GetESNodeCondition(status, condition.Type) - - if oldCondition == nil { - // We are adding new node condition. - status.Conditions = append(status.Conditions, *condition) - return true - } - // We are updating an existing condition, so we need to check if it has changed. - if condition.Status == oldCondition.Status { - condition.LastTransitionTime = oldCondition.LastTransitionTime - } - - isEqual := condition.Status == oldCondition.Status && - condition.Reason == oldCondition.Reason && - condition.Message == oldCondition.Message && - condition.LastTransitionTime.Equal(&oldCondition.LastTransitionTime) - - status.Conditions[conditionIndex] = *condition - // Return true if one of the fields have changed. - return !isEqual -} - -func UpdateConditionWithRetry(dpl *api.Elasticsearch, value api.ConditionStatus, - executeUpdateCondition func(*api.ElasticsearchStatus, api.ConditionStatus) bool) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if getErr := sdk.Get(dpl); getErr != nil { - logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr) - return getErr - } - - executeUpdateCondition(&dpl.Status, value) - - if updateErr := sdk.Update(dpl); updateErr != nil { - logrus.Debugf("Failed to update Elasticsearch %v status: %v", dpl.Name, updateErr) - return updateErr - } - return nil - }) - return retryErr -} - -func UpdateUpdatingSettingsCondition(status *api.ElasticsearchStatus, value api.ConditionStatus) bool { - var message string - if value == api.ConditionTrue { - message = "Config Map is different" - } else { - message = "Config Map is up to date" - } - return UpdateESNodeCondition(status, &api.ClusterCondition{ - Type: api.UpdatingSettings, - Status: value, - Reason: "ConfigChange", - Message: message, - }) -} - -func UpdateScalingUpCondition(status *api.ElasticsearchStatus, value api.ConditionStatus) bool { - return UpdateESNodeCondition(status, &api.ClusterCondition{ - Type: api.ScalingUp, - Status: value, - }) -} - -func UpdateScalingDownCondition(status *api.ElasticsearchStatus, value api.ConditionStatus) bool { - return UpdateESNodeCondition(status, &api.ClusterCondition{ - Type: api.ScalingDown, - Status: value, - }) -} - -func UpdateRestartingCondition(status *api.ElasticsearchStatus, value api.ConditionStatus) bool { - return UpdateESNodeCondition(status, &api.ClusterCondition{ - Type: api.Restarting, - Status: value, - }) -} - -func IsUpdatingSettings(status *api.ElasticsearchStatus) bool { - _, settingsUpdateCondition := GetESNodeCondition(status, api.UpdatingSettings) - if settingsUpdateCondition != nil && settingsUpdateCondition.Status == api.ConditionTrue { - return true - } - return false -} - -func IsClusterScalingUp(status *api.ElasticsearchStatus) bool { - _, scaleUpCondition := GetESNodeCondition(status, api.ScalingUp) - if scaleUpCondition != nil && scaleUpCondition.Status == api.ConditionTrue { - return true - } - return false -} - -func IsClusterScalingDown(status *api.ElasticsearchStatus) bool { - _, scaleDownCondition := GetESNodeCondition(status, api.ScalingDown) - if scaleDownCondition != nil && scaleDownCondition.Status == api.ConditionTrue { - return true - } - return false -} - -func IsRestarting(status *api.ElasticsearchStatus) bool { - _, restartingCondition := GetESNodeCondition(status, api.Restarting) - if restartingCondition != nil && restartingCondition.Status == api.ConditionTrue { - return true - } - return false -} - -func UpdateNodeUpgradeStatusWithRetry(dpl *api.Elasticsearch, deployName string, value *api.ElasticsearchNodeUpgradeStatus) error { - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - if getErr := sdk.Get(dpl); getErr != nil { - logrus.Debugf("Could not get Elasticsearch %v: %v", dpl.Name, getErr) - return getErr - } - - for i, node := range dpl.Status.Nodes { - if node.DeploymentName == deployName { - dpl.Status.Nodes[i].UpgradeStatus = *value - } - } - - if updateErr := sdk.Update(dpl); updateErr != nil { - logrus.Debugf("Failed to update Elasticsearch %v status: %v", dpl.Name, updateErr) - return updateErr - } - return nil - }) - return retryErr -} - -func NodeRestarting() *api.ElasticsearchNodeUpgradeStatus { - return &api.ElasticsearchNodeUpgradeStatus{ - UnderUpgrade: api.UnderUpgradeTrue, - UpgradePhase: api.NodeRestarting, - } -} - -func NodeRecoveringData() *api.ElasticsearchNodeUpgradeStatus { - return &api.ElasticsearchNodeUpgradeStatus{ - UnderUpgrade: api.UnderUpgradeTrue, - UpgradePhase: api.RecoveringData, - } -} - -func NodeControllerUpdated() *api.ElasticsearchNodeUpgradeStatus { - return &api.ElasticsearchNodeUpgradeStatus{ - UnderUpgrade: api.UnderUpgradeTrue, - UpgradePhase: api.ControllerUpdated, - } -} - -func NodeNormalOperation() *api.ElasticsearchNodeUpgradeStatus { - return &api.ElasticsearchNodeUpgradeStatus{ - UnderUpgrade: api.UnderUpgradeFalse, - } -} - func RandStringBase64(length int) (string, error) { if length <= 0 { return "", fmt.Errorf("Can't generate random strings of length: %d", length) @@ -237,61 +30,3 @@ func RandStringBase64(length int) (string, error) { return randStringBase64, nil } - -func NewPolicyRule(apiGroups, resources, resourceNames, verbs, urls []string) rbac.PolicyRule { - return rbac.PolicyRule{ - APIGroups: apiGroups, - Resources: resources, - ResourceNames: resourceNames, - Verbs: verbs, - NonResourceURLs: urls, - } -} - -func NewPolicyRules(rules ...rbac.PolicyRule) []rbac.PolicyRule { - return rules -} - -func NewClusterRole(roleName string, rules []rbac.PolicyRule) *rbac.ClusterRole { - return &rbac.ClusterRole{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRole", - APIVersion: rbac.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: roleName, - }, - Rules: rules, - } -} - -func NewSubject(kind, name, namespace string) rbac.Subject { - return rbac.Subject{ - Kind: kind, - Name: name, - Namespace: namespace, - APIGroup: rbac.GroupName, - } -} - -func NewSubjects(subjects ...rbac.Subject) []rbac.Subject { - return subjects -} - -func NewClusterRoleBinding(bindingName, roleName string, subjects []rbac.Subject) *rbac.ClusterRoleBinding { - return &rbac.ClusterRoleBinding{ - TypeMeta: metav1.TypeMeta{ - Kind: "ClusterRoleBinding", - APIVersion: rbac.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: bindingName, - }, - RoleRef: rbac.RoleRef{ - Kind: "ClusterRole", - Name: roleName, - APIGroup: rbac.GroupName, - }, - Subjects: subjects, - } -} diff --git a/test/e2e/elasticsearch_test.go b/test/e2e/elasticsearch_test.go index 1e2a781c5..51f619958 100644 --- a/test/e2e/elasticsearch_test.go +++ b/test/e2e/elasticsearch_test.go @@ -1,17 +1,17 @@ package e2e import ( - goctx "context" "fmt" "testing" "time" - "github.com/openshift/elasticsearch-operator/pkg/utils" + "github.com/openshift/elasticsearch-operator/test/utils" "github.com/operator-framework/operator-sdk/pkg/test/e2eutil" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" + goctx "context" + v1 "k8s.io/api/core/v1" elasticsearch "github.com/openshift/elasticsearch-operator/pkg/apis/elasticsearch/v1alpha1" framework "github.com/operator-framework/operator-sdk/pkg/test" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,6 +22,7 @@ var ( timeout = time.Second * 300 cleanupRetryInterval = time.Second * 1 cleanupTimeout = time.Second * 5 + elasticsearchCRName = "example-elasticsearch" ) func TestElasticsearch(t *testing.T) { @@ -49,7 +50,7 @@ func createRequiredSecret(f *framework.Framework, ctx *framework.TestCtx) error } elasticsearchSecret := utils.Secret( - "elasticsearch", + elasticsearchCRName, namespace, map[string][]byte{ "elasticsearch.key": utils.GetFileContents("test/files/elasticsearch.key"), @@ -79,7 +80,7 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra cpuValue, _ := resource.ParseQuantity("500m") memValue, _ := resource.ParseQuantity("2Gi") - esNode := elasticsearch.ElasticsearchNode{ + esDataNode := elasticsearch.ElasticsearchNode{ Roles: []elasticsearch.ElasticsearchNodeRole{ elasticsearch.ElasticsearchRoleClient, elasticsearch.ElasticsearchRoleData, @@ -89,6 +90,15 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra Storage: elasticsearch.ElasticsearchStorageSpec{}, } + esNonDataNode := elasticsearch.ElasticsearchNode{ + Roles: []elasticsearch.ElasticsearchNodeRole{ + elasticsearch.ElasticsearchRoleClient, + elasticsearch.ElasticsearchRoleMaster, + }, + NodeCount: int32(1), + Storage: elasticsearch.ElasticsearchStorageSpec{}, + } + // create clusterlogging custom resource exampleElasticsearch := &elasticsearch.Elasticsearch{ TypeMeta: metav1.TypeMeta{ @@ -96,12 +106,12 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra APIVersion: elasticsearch.SchemeGroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ - Name: "example-elasticsearch", + Name: elasticsearchCRName, Namespace: namespace, }, Spec: elasticsearch.ElasticsearchSpec{ Spec: elasticsearch.ElasticsearchNodeSpec{ - Image: "openshift/origin-logging-elasticsearch5:latest", + Image: "", Resources: v1.ResourceRequirements{ Limits: v1.ResourceList{ v1.ResourceCPU: cpuValue, @@ -114,7 +124,7 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra }, }, Nodes: []elasticsearch.ElasticsearchNode{ - esNode, + esDataNode, }, ManagementState: elasticsearch.ManagementStateManaged, }, @@ -132,7 +142,7 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra // Scale up current node // then look for example-elasticsearch-clientdatamaster-0-2 and prior node - exampleName := types.NamespacedName{Name: "example-elasticsearch", Namespace: namespace} + exampleName := types.NamespacedName{Name: elasticsearchCRName, Namespace: namespace} if err = f.Client.Get(goctx.TODO(), exampleName, exampleElasticsearch); err != nil { return fmt.Errorf("failed to get exampleElasticsearch: %v", err) } @@ -156,7 +166,7 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra if err = f.Client.Get(goctx.TODO(), exampleName, exampleElasticsearch); err != nil { return fmt.Errorf("failed to get exampleElasticsearch: %v", err) } - exampleElasticsearch.Spec.Nodes = append(exampleElasticsearch.Spec.Nodes, esNode) + exampleElasticsearch.Spec.Nodes = append(exampleElasticsearch.Spec.Nodes, esNonDataNode) err = f.Client.Update(goctx.TODO(), exampleElasticsearch) if err != nil { return fmt.Errorf("could not update exampleElasticsearch with an additional node: %v", err) @@ -174,11 +184,11 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra return fmt.Errorf("timed out waiting for Deployment example-elasticsearch-clientdatamaster-0-2: %v", err) } - err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-elasticsearch-clientdatamaster-1-1", 1, retryInterval, timeout) + err = utils.WaitForStatefulset(t, f.KubeClient, namespace, "example-elasticsearch-clientmaster-1", 1, retryInterval, timeout) if err != nil { - return fmt.Errorf("timed out waiting for Deployment example-elasticsearch-clientdatamaster-1-1: %v", err) + return fmt.Errorf("timed out waiting for Statefulset example-elasticsearch-clientmaster-1: %v", err) } - t.Log("Created 3 deployments") + t.Log("Created non-data statefulset") // Incorrect scale up and verify we don't see a 4th master created if err = f.Client.Get(goctx.TODO(), exampleName, exampleElasticsearch); err != nil { @@ -187,12 +197,12 @@ func elasticsearchFullClusterTest(t *testing.T, f *framework.Framework, ctx *fra exampleElasticsearch.Spec.Nodes[1].NodeCount = int32(2) err = f.Client.Update(goctx.TODO(), exampleElasticsearch) if err != nil { - return fmt.Errorf("could not update exampleElasticsearch with an additional node and replica: %v", err) + return fmt.Errorf("could not update exampleElasticsearch with an additional statefulset replica: %v", err) } - err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "example-elasticsearch-clientdatamaster-1-2", 0, retryInterval, time.Second*30) + err = utils.WaitForStatefulset(t, f.KubeClient, namespace, "example-elasticsearch-clientmaster-1", 2, retryInterval, time.Second*30) if err == nil { - return fmt.Errorf("unexpected deployment example-elasticsearch-clientdatamaster-1-2 found") + return fmt.Errorf("unexpected statefulset replica count for example-elasticsearch-clientmaster-1 found") } t.Log("Finished successfully") diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index e8df1719d..d990450af 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -1,8 +1,9 @@ package e2e import ( - framework "github.com/operator-framework/operator-sdk/pkg/test" "testing" + + framework "github.com/operator-framework/operator-sdk/pkg/test" ) func TestMain(m *testing.M) { diff --git a/test/files/ca.crt b/test/files/ca.crt index dad127f54..045c6f3fc 100644 --- a/test/files/ca.crt +++ b/test/files/ca.crt @@ -1,18 +1,30 @@ -----BEGIN CERTIFICATE----- -MIIC6jCCAdKgAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDDBtvcGVu -c2hpZnQtc2lnbmVyQDE1NDA0NzY0NTUwHhcNMTgxMDI1MTQwNzM1WhcNMjMxMDI0 -MTQwNzM2WjAmMSQwIgYDVQQDDBtvcGVuc2hpZnQtc2lnbmVyQDE1NDA0NzY0NTUw -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDQ1+8Z00Ovc93IpUjOsWs2 -ueNfcaDpDYW7P2y50jqy4XGdRRdHiLQKQv7S3Zqy/wTlnaDpk8TLMp6YSZ41p1Qy -5HhIbXMviR9Bqg1JziSx9HaFvo5w79gn7YbICpftKpsi1L2KBk2ekgnTnt+pw+Ml -L8N/ELtWtK8pu5cNZJIkJptxTWJzPyeUSVfDDPbEXP0VSDw2MOSoIxqcH1C4mR42 -Wr/6Crxtr2oyvAzLZvAtWCtUEY9fkmJVTGMjzfwRnrvxsfMguwSHKyyrRdDLoQMp -sFn8cRBu0omTIbyMQlHo+QVgdQ+P6LnV9Dbjn5fX7+5ithyu4iTrJFqjDsd47vBN -AgMBAAGjIzAhMA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBCwUAA4IBAQBIzSylEYbVIl/KzelTWz0FZznczPMJnBlJAa4OGUHt8VuY -viUcItOdjrGhzwCzJn+IfbLtuv7d5YHYKEZZFNbUIPyvRByAM+iNKXXFSSnGBelz -J3onnrM4kMD8TwNZA4AJGpWy7Ntmtw/ie2BZ1AgBR9AhpxLC1Md7zxly59QGmahT -ym2aYUfblwQRm0AgRlccRgyVZdf3fCU5/BxLZkRT/hOKMQbAPPlxIGnpk/uJJcD4 -ZxKXypiQ2tzsKId8VybBYmgdG482bKrd6q/bsJGZx4B1++hHrvIPDaJYrmJG3UMP -x8z5unyK1BzrXFcyID1Dw998m4dhspjJeYPVNd2a +MIIFLDCCAxSgAwIBAgIJANr0Zw8HvJE2MA0GCSqGSIb3DQEBCwUAMCsxKTAnBgNV +BAMMIG9wZW5zaGlmdC1jbHVzdGVyLWxvZ2dpbmctc2lnbmVyMB4XDTE5MDMwODIy +NDU1MFoXDTI0MDMwNjIyNDU1MFowKzEpMCcGA1UEAwwgb3BlbnNoaWZ0LWNsdXN0 +ZXItbG9nZ2luZy1zaWduZXIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQCzVpeu4xdWE4/fu1DNw+nUMC1MbwPpBadcrFH2x2vXfDoQ2tVf0RGNTyWK/loV +2RIIb8uxJv8JmZ4zdsS+nW5b9BvnSqv/rD4Z5Ei4SFPWbsAqCk9gXWMUNDBQHLmd +40AxWhVdFkk2YEbY28KpVeaxRyFiJ0aKBOwhSQoGrd6rRs3NI833ZSZVgtVKl9zU +2pj/htxVOOGdZs3tmQTyNaJaRZQeyu7P6K9tzI736ZDTd8GtvIC3Li+pFCnOmCRS +egLxUvGD0XNxmTrWC9JTHkOMY03d933LY6xFlbs5+qtUfXkJ33pW0hIlFe0ZuBC+ +HJg4l4/EJnxuvod/ONxT991Qfs6LDF1ZJPKlbG1JhUetsrWkRh2tceZ/2T5uMPUZ +qnxCEm2zHpEyzD7rylNKy8t6TbPXog3nbbQQ2kyYL6mP0PRJMijxoo1jQxiv56Qg +1takfgEZCKVCtKOuHESLKv2iDpooiTvmRtBT155DL/zAqdtjkU8VOolw1kwmD9es +xtUCw8a4utJnf+ammlOojM1ks90TURNJ5ZC0j2YMYPxeNn5p0SA3xeI2ifoAZOgX +9P8pS4gdf7CfrPf4A86uUwFQqAQURtDGBGDSTr6mpzRayj+itoFMF5nb0uQG704x +56pXHLZL4xQFRnYPfdP3KSR5Kkn9X0OBKDVNmua7d3UpowIDAQABo1MwUTAdBgNV +HQ4EFgQUUs7ay1V2V+BH6EaYr/UkxxPymkcwHwYDVR0jBBgwFoAUUs7ay1V2V+BH +6EaYr/UkxxPymkcwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEA +pTnNCxvoevCkPtxmXhwFhl7DbSd6LMawS69uoxplxAcjj+BQ5CDsh4RaLUhD0NTd +yr0dvuQzLv2s/tzysNlcoug3FjTOS650KYhqjuIQTxv4g7RN/MGEprpvOw7JufXf +zkfzzGJ7jTdBVAh48RNTzDUzsBrTV4mmDNsbBNzeLm1hODyafVl0pvtbBQo6cH6a +QbSHvV855795uSP5Jca1FFcozlm4TSv4nXGFwoELSUwPsuqsWE5Ob7C/fEBT4Tpy +XcCq0an67En1ak99O+DP7a/9F8QFfBKGfsCj9L+t3ITgDVUvxHGAI7g8FMeo/cew +PRAeptXh1BQGSJww25p5fOQOj81f/9hWPUvdcwO9X/XoYZC1IU1RbkhLWBxuLT6j +KuNTCGIjrUlcOfxupzd0ln1TcYebwjMPZQVAFEakXyZIDNkE6SuVWckPcLivIo+F +7qiUCCSa7bLhZR5oYoZqDKc6n0lJUx/HHE3x8aX5LFYmjskEQwcIO/74+JHeOwqp +j+WE3SqIWmEEJN6pihQdXjt83KQZcAYhTSyTPegxXWGfMKcDpySvnKaOc3RmZciy +JrBC16Yl/Fjw1PqM5+j6dTqO67CKDz9nHRqBWhdwicRp7o9q1+wc1vPF20kDivTJ +HYk3ZEdecd1YK6eGO7IhWY1H0qGZZx8+i6jT/ovXutU= -----END CERTIFICATE----- diff --git a/test/files/elasticsearch.crt b/test/files/elasticsearch.crt index 827d83ceb..84f4b88c7 100644 --- a/test/files/elasticsearch.crt +++ b/test/files/elasticsearch.crt @@ -1,22 +1,40 @@ -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIBBDANBgkqhkiG9w0BAQUFADAmMSQwIgYDVQQDDBtvcGVu -c2hpZnQtc2lnbmVyQDE1NDA0NzY0NTUwHhcNMTgxMDI1MTQwODEwWhcNMjAxMDI0 -MTQwODEwWjA+MRAwDgYDVQQKDAdMb2dnaW5nMRIwEAYDVQQLDAlPcGVuU2hpZnQx -FjAUBgNVBAMMDWVsYXN0aWNzZWFyY2gwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCuwB2EC2WbuK/KzVPC7fz5A4TWpLQeYODWIrXd1TIZwE5kMvA0vAtw -09xTka8RiaMrNfu/rOEGg2AZmeqr2O2XluvAUr9ToikTGg+V3pSqOpLD1YIWAI4c -gaN+0oKcn7uNsFj9UUz2D9FLEGcLhvmeErNuyX77LrFoqrSLHvQevhG2tHahO2l/ -SWlxERyxqi/FT3jMgMR0HY1ELZhh+HBScIzA3PFEOAyiMEyLgjbxrMID+yN/zDxD -80/F1YVNwGOQVqnmLOeZHkxachKP+kRoCIHjkPESsrKyKDsOoE9KTpr3bWHvVRa5 -owImvfME+BKIavIJDpuS56FfS4hFdlgDAgMBAAGjgcIwgb8wDgYDVR0PAQH/BAQD -AgWgMAkGA1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0G -A1UdDgQWBBRjYOX4tTczh1qY3N1eRU8vbFNwkzAJBgNVHSMEAjAAMFkGA1UdEQRS -MFCHBH8AAAGCCWxvY2FsaG9zdIINZWxhc3RpY3NlYXJjaIITZWxhc3RpY3NlYXJj -aC1pbmZyYYISZWxhc3RpY3NlYXJjaC1hcHBziAUqAwQFBTANBgkqhkiG9w0BAQUF -AAOCAQEApAoIeSKLavhDMZ8v/ilWF/2Q4/OvwLHMf1Tcy8nt2zBAuCWPVxGBnxFT -WcB+udC+ZdaPLg0uYZf3wSAX23WtpDth96yBTOi/6Vp40O4s3K5Miqc//jfWCHc5 -3bbIvI/MNMX8gauPDxYKZazYAWl8bpt4BPnfUbjyMtbb1zDD4Ziv3CYLniQSM+c8 -gb9jxGHdAOfJEibqBdFI1KVwH3HDwG4i/LmZ/ltv7BsHHKtBnzMIS1dMscwScjkT -bJx0pUVsFHmxTP/xfslc1sdKKQi6ebV+HmCi8LM5V8zkA2ZGIG3mYpLe/6d5hMXN -7UfbiC6TwG5KjT+TE4OPq/Gz2gdOtA== +MIIG9TCCBN2gAwIBAgIBBjANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu +c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTNaFw0y +MTAzMDcyMjQ1NTNaMD4xEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T +aGlmdDEWMBQGA1UEAwwNZWxhc3RpY3NlYXJjaDCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBALjD3jvlx4rujhgvQHCk0T3Znj3hGw9c6Tlr3hltpmFs+JXh +z9smk6no/MrVmPuFP8UBLDibQCNZ0DlF2fH6yReD6phYkSw82qTNPdcgCnMJnch1 +3PQJTzx3SaGw+M5whblenXE4HW0fW+bVQloQBw52uvJs9tTkw0v10JlSntnUUnc0 +m2stUii4Omt1XA9ZIrbHa6b8sIYNFQXPnC7MVAFAyg+XN42o8UdOxFbHY+P1Gf4R +nmK2aMDq6PjHNktbaG0WJfoI/BQ/oQtV8zH3/Np8FlKXVB+EjVZTnYYgLNBQZ6tN +92Jw2F2ngIqHUbgC+buIKdYgQE/97cYwjtdQgAQzGRTHfBjDkvgLjx7YCVV9AUDR +tBX5BORD73bNJMPJKxliLUMgXAL5SrFRXn3oNUsjs90BhTT1uoer2iOnfBCcreI+ +KKFbDF/Bss3xv4ierUfRHA6nmv2M8YA9WPsiTYiIekla6o+iTSGleR615s6MD1La +B6QPNr+oyOYuLdnuoLZVhnr4LyVb93k5JjQ0gxeFE/ahM06o2RlYhQLNBcTO1Zrb +/jx8EtGZYzYHBm3vKWOGCCROKAxmyqUxi5cU8kM3KITvBh9TsN212rXZze+1XG3p +YiGGK1Bs7MADl54umA7jYEkvvJaJpXIzKdn6OUBYktlxgBGLM72heinSbkl1AgMB +AAGjggIPMIICCzAOBgNVHQ8BAf8EBAMCBaAwCQYDVR0TBAIwADAdBgNVHSUEFjAU +BggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFLGvE/vSnSEFNITtXAhYqFiP +9sYkMB8GA1UdIwQYMBaAFFLO2stVdlfgR+hGmK/1JMcT8ppHMIIBjQYDVR0RBIIB +hDCCAYCHBH8AAAGCCWxvY2FsaG9zdIIVZXhhbXBsZS1lbGFzdGljc2VhcmNogiNl +eGFtcGxlLWVsYXN0aWNzZWFyY2guY2x1c3Rlci5sb2NhbIIrZXhhbXBsZS1lbGFz +dGljc2VhcmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Y4I5ZXhhbXBsZS1lbGFzdGlj +c2VhcmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2Fsgh1leGFt +cGxlLWVsYXN0aWNzZWFyY2gtY2x1c3RlcoIrZXhhbXBsZS1lbGFzdGljc2VhcmNo +LWNsdXN0ZXIuY2x1c3Rlci5sb2NhbIIzZXhhbXBsZS1lbGFzdGljc2VhcmNoLWNs +dXN0ZXIub3BlbnNoaWZ0LWxvZ2dpbmcuc3ZjgkFleGFtcGxlLWVsYXN0aWNzZWFy +Y2gtY2x1c3Rlci5vcGVuc2hpZnQtbG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbIgF +KgMEBQUwDQYJKoZIhvcNAQENBQADggIBAHrfw7A2DVxYtq+OW8ScGx3fEAkfuIy/ +tLc9Rr6Fot0jETz7sB01BnoYtD1NWV6h61Fox39BplYJFmumrjEdPxD3/X3Vf6YS +se1aHcmPdIKklUPBOFPrxnsPDZ1TrPQXiR9RIlmMQHIjsa4tiBYm7CvyPY7V21h/ +0owIEMVgqoM3TzZ5FzafbORaGw3MOqU81KBLP2LZA5mIQYA6v1R8kNvEk9Gp/ICk +OxBdLQYyT9y/U9hw8jGlOZl8U+ztSshYOj3+8/qrh/kUGhAYpFdnDDXgPRelp3vk +I+fPqXgQOQLUjemj1/aKFo8cFqNCi4sniP7A3vlPD1iF3GuEl7pkK8CifkJFsko8 +jNaMIPpmbitsrHhIhLahKE6jukijiloVUeB2hro99dW5IB7aDf9ruebk+MzyMjWn +McCZ0AtNynvOI/DkESzRTodQz6W3p2B20Uskx5QlrMeKNetH9B0nCrgCCllJ7E2d +NqVyamJL7TgkFTSZBCPLsFW0FfdK6wc225OPfDEIMbfUC4ii9jhkt7rlWcqdHcV1 +Vk+R1sPLfDs7ydUsTircZlVX3T1ITGq7cZkmAsGDD1Q9t9HCxebuLxV5l0ubCsvc +uDcl5cwT5dssQ5B7EmLMLiyJvw6SzVU8oHbXJ4/ilH4nWUMWNTh1QZpWCGhf9eaa +V7B/mXvB0B+W -----END CERTIFICATE----- diff --git a/test/files/elasticsearch.key b/test/files/elasticsearch.key index d2daa0e7e..44ac6481b 100644 --- a/test/files/elasticsearch.key +++ b/test/files/elasticsearch.key @@ -1,28 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCuwB2EC2WbuK/K -zVPC7fz5A4TWpLQeYODWIrXd1TIZwE5kMvA0vAtw09xTka8RiaMrNfu/rOEGg2AZ -meqr2O2XluvAUr9ToikTGg+V3pSqOpLD1YIWAI4cgaN+0oKcn7uNsFj9UUz2D9FL -EGcLhvmeErNuyX77LrFoqrSLHvQevhG2tHahO2l/SWlxERyxqi/FT3jMgMR0HY1E -LZhh+HBScIzA3PFEOAyiMEyLgjbxrMID+yN/zDxD80/F1YVNwGOQVqnmLOeZHkxa -chKP+kRoCIHjkPESsrKyKDsOoE9KTpr3bWHvVRa5owImvfME+BKIavIJDpuS56Ff -S4hFdlgDAgMBAAECggEAEnnLZReTYZhwNZ/p2DUzRtyhYdQ+GuwhOWKoGdEdn+1e -HGfqT5kGGKK9q7Q+bzs/HiVx/Xz4uaiQM4S7c/nWfO1+9XGKOvxtHVDnDvBehXT1 -6XCcdVkPrWHjeFgnaQnWWY+oiPfcjDN17FrKmkRmBdlyUSdKnqH8LdXGiuL16n3/ -RHxYQEk4gwDYzCjYLtuDgFQikRWwRLPxm/uNqFFiTn9fUBePITHjN7DMfdYEEkw5 -Q2calBSi+jAZU4P8kMvFwN8k5RKpnEC0t4gR2RagNha7JMfNGJatxY8k23duT32t -tZ8MfTwGvq8Wg09obzLJZu4gOQhrMvualSdKUvZnwQKBgQDW0SMvq+faACglwhSf -c6wK5zZq7rNgzfHQngAca9HqEQ/begz0WTXOulHoOU5/ZXGgKJZUUxQRILlAqJ0r -1x13WaUnf28DGaHbD2DEJUnKo9n+hrVkVmHZqh3RV1y0Gh2bmP0/xkHnvHVj8iUv -eYFqlQaRQqkinMTwmS6sN5FwYwKBgQDQQJSJxUdpp8tfNUyAgSKYvJBuXJeVJHth -ndJjZacJfheVQRcszzAKMloBzJX/SSA295H1S9tpuxwTosTaTYC6KiwkcHAh8U45 -WRP9RvHlcCGcPtv/ZvEIdiS0B203QHK3pTLo2LKuUPD7Z+r6f48EsjMWEGA3sx9Y -L5lG0cF74QKBgAauHj//Qoh+9MCn1mhW+oSkSAbvQVjDTr/EChDYMYDr4uhaVUjL -9WZmWOG11oETsILggDxDER6vO9OM6sZQPDRX71DZH6SjCWt8gO/YkQDiM4c8vStP -nF9ecOrvooxfO4h5K3buPWJtc+EVyJ4/UwzzcW7W+dd/tIge21qzp6wtAoGBAKwr -mCJGzYGWTdcK05KyMI4h5y62skOFg9PyjMvMcMqySoEE7UoIiOkFM9Ck+aWzKUUI -bM1lx6KkJNQ/D4yD4tcGorQFmU8Wlttmv9X332kWyVLJzUxcKQ4bzsBgG3VAfRdD -JtHGxgLDvFe9VFGBQBZb1Ux6gXHLTzCzOX+JTvNBAoGAbZEdqIFyW2JB/I4wwak2 -e6TMrrvEBTXtE2uCiggGoJjwM0VtQu7XE8jWm0V1uLUvlqEzRtoaP8USmUZhoUpa -v0DCKCn3qCMEuHpc8QYCgrOcUwjTrORZ7fJaT5jOVOoApQr6Oj1uuIcr4Xto9tWg -FN9L7EfoA9oUt/fEoUB/zv4= +MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQC4w9475ceK7o4Y +L0BwpNE92Z494RsPXOk5a94ZbaZhbPiV4c/bJpOp6PzK1Zj7hT/FASw4m0AjWdA5 +Rdnx+skXg+qYWJEsPNqkzT3XIApzCZ3Iddz0CU88d0mhsPjOcIW5Xp1xOB1tH1vm +1UJaEAcOdrrybPbU5MNL9dCZUp7Z1FJ3NJtrLVIouDprdVwPWSK2x2um/LCGDRUF +z5wuzFQBQMoPlzeNqPFHTsRWx2Pj9Rn+EZ5itmjA6uj4xzZLW2htFiX6CPwUP6EL +VfMx9/zafBZSl1QfhI1WU52GICzQUGerTfdicNhdp4CKh1G4Avm7iCnWIEBP/e3G +MI7XUIAEMxkUx3wYw5L4C48e2AlVfQFA0bQV+QTkQ+92zSTDySsZYi1DIFwC+Uqx +UV596DVLI7PdAYU09bqHq9ojp3wQnK3iPiihWwxfwbLN8b+Inq1H0RwOp5r9jPGA +PVj7Ik2IiHpJWuqPok0hpXketebOjA9S2gekDza/qMjmLi3Z7qC2VYZ6+C8lW/d5 +OSY0NIMXhRP2oTNOqNkZWIUCzQXEztWa2/48fBLRmWM2BwZt7yljhggkTigMZsql +MYuXFPJDNyiE7wYfU7Ddtdq12c3vtVxt6WIhhitQbOzAA5eeLpgO42BJL7yWiaVy +MynZ+jlAWJLZcYARizO9oXop0m5JdQIDAQABAoICAEJj9y30sg/lCl/8Up5nStx7 +ntXDVlLd4twEO4lNkjR90JEi0+p+YR7znipQOClgxvpGIpqwhoptUlnSFK9TmwB9 +IUXexUNtFm+TZD4xwC937B9E4sE5uyQSBP45th2P5y0lp1Mwg7pbQ02dobr0+WQk +G6bSqprzHI3l5S/CtVu2ZG97EsGfRl9lW3CTeiH1uHiPDcqyD8gLWVt+LzPNrf3J +RxH1Fzwq0Y/66Kf5+5XE07Msp4n224s+nboO8x5+2PYhuEGxCstH1dlZ6AozfsMP +RYIl6E9u9M5pOzgfxZqQ2b9/Q3426Dg24QRl/WIuh4fwNIBBlHpp1w+ZESqjPWwo +LfOCJn+rQoHIJpyvCsxY4aSgQ8eFNpvgo9yvqIKGAOVH1FYf2NdXskDNWOgAsfVp +Zs9nGzJ9+DdUc1Bg1RmO4YDOz6wAjjaYbYF46bixhzKtCHnpuIjYUFmSVDrAMLVD +we5isQ3Jx21YT61AD4wqKcSLO0T2bCzUsXD0AKGss09juGByRvNnJY4Rk87DglV9 +LR+BlXdLpuT8Ur8aZwydDRXAKNiQUAaOuMltuv0L/xr2oKVIpuR88foA4Wk4VZd9 +lsF5YpPSJDIgzQ5RD1qXW1z6iox30Wr9yTqikwLoS9roo3/0uFB8U6WtCFadxREQ +jZq97/vZG3wCwEF3ALuhAoIBAQDp6TkcDS2uIWcbScJjf9/kmgOn12pN4FDrTadQ +BunSbx6Sn4SCPFQJOjrFPhyO5BhAI1THelRlxxKuctsWQaQMRJskbS8TulcJX1Pw +nfmT9WgZLqRmx3HqDsceFn64cxewviG8C7/mwGRWdlHnydH5VR2oz/XMBB9v7iAC +t0qyBqdfrguYJ7ymvrIhX72G/PV4PhabOctHFRaeipO8Q8N8P01dISeEZN/5lY5Q +5Cp8zPekYmbfqVs7jT9/Han2t/RIUxD5JCaA+SCmFbJkO2taW2KM54CWgY7L2wMA +MiZvJTfa4YgybUjk5WMT1SmR6FaQnH0CXpf9MiNJYT2Mfc2dAoIBAQDKNowPgtuK +enVJXW5lY3NFvZ0tP9Osd7zDB2VfKj4JQ8QDZsMKR3rWm+NyLkI4BRcQ9ZGhiczG +fmU1eAQGPOjGmi28NEdvB5lZ3gcl9bLhdAw+TTMXhdKoZ+0Wt6FnG9ty9Yp8L++m +y3apQuZgX4k/E3579RKBwjIcllANVVdmYQdI/TAcJXtGQGgiOTxA/4owM+8awp3A +5/nG/mLgpC+cKjycPcVpg36DtYUdTjTNwF6OemBwMyBm+Hyi4SKumJVG4sTqN7V7 +CxgSvwiAi3Gd+wHiAEw5yxmB+dmr1a7vskqeXlSJ23thrM58uRYMuIw61tqV6RFb +DO3c8sZBMo+5AoIBAEH+VOh/J1YrgmWGh9t+pnJeqY1fD1TtZqccf4nqiWmfhCal +7sK3tpXr4czoWzJNVDI0RaUJ9GnKopCpQvqihmAXsxWx4EhWmFvCk3Idf96orDf+ +ms+Mka0RgkgQ3Ku5fQOWgPoG7ptxyF1EgJM+s4j+5KFOGvD2cNAphMp/YAmeKvap +qgfBnk4FG0ijNFuzXqYQDly5D4r8fic4vbmt/Kc/TNprkjSeKTrHYSGdXgdb5Lpy +MMgFhgHlNfbtLZi4CammFfHUqzBUGUwCbxQsV3tksQdEAVl1MA5/ufcCLynIfFu6 +qaxgfOJnfW6JA3Nw84tVO0fB3GvJC3+WecD7dVkCggEAdYdssptBFOSGBOUsA/vg +hNlnYGRnmCdj7AsBbWV09xeO9tckZ3YSPGgQs02VqU+0D2Rbh4M7JOdT1dbyp9mB +BeWYzXmpRywogmYcy4BZvtYfQ5rFJlfej4kP8RM72V0EmHWETxfhd5VsW4aJtdvx +PlsLOerAHfEMBTeMQaOnj9a3UEHwU3upgbRvkeyoS7L603dr8qbI39U0hOdX+u8S +yPZ3kewJMIF/5/d57gWFhUzY6IDSSoQ8wfyNHHI0ITyIDtJA100XyvMww7yKqYPi +voqkloHLzA9yvOvGCyzAZ4q9+fwjsJiDxsqUnUNKQIyG3LACDf2P29bQ7Ymhl0VW +6QKCAQEAjJPpgVZAnJ3dtTRBl/APAT41SiZMABt4HyT1LR05HKDkTm8IA4DSycUc +EpMMayjIFK62W6N952Uk8q9G/pvnJ/6jfvwnln2L3KuXJpyzb/SRjWB07cM8S/Ub +nxdB1pJ6PXaX47YfQzBrcEy0o9+ALlZsIoDYVSGFi7eFHe2c0iVTw6q3SKDjmV5/ +mc6j3d/0ITuIjknx4917VFOXg82Ub6LlwOLMuqnRDUg+1AIWfjqWdqVH2du77jIg +60Qej/jSGFvr58Vo+iwXrW23TEkkTuUq3l6oOhOzCzRontF9s8DCRthLWB7f8cv2 +xKBAD3AaNWNH3GEQS9rRQ6cjuvpc+A== -----END PRIVATE KEY----- diff --git a/test/files/logging-es.crt b/test/files/logging-es.crt index bc763565d..41f2d4fa8 100644 --- a/test/files/logging-es.crt +++ b/test/files/logging-es.crt @@ -1,31 +1,35 @@ -----BEGIN CERTIFICATE----- -MIIFUDCCBDigAwIBAgIBBTANBgkqhkiG9w0BAQUFADAmMSQwIgYDVQQDDBtvcGVu -c2hpZnQtc2lnbmVyQDE1NDA0NzY0NTUwHhcNMTgxMDI1MTQwODEwWhcNMjAxMDI0 -MTQwODEwWjA7MRAwDgYDVQQKDAdMb2dnaW5nMRIwEAYDVQQLDAlPcGVuU2hpZnQx -EzARBgNVBAMMCmxvZ2dpbmctZXMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDQmLn0sHTQFLoatj7MEixdlLo6Y/sNeMfes0dADSRdN998wrF9SB9klxbd -jDubqYKDGzMczGM3gHTNQqYC9CqpeV20MeoTnTM0HGai9rp7hCOo46aPEyy+e4pb -c4+TDYoFjnrMo5xTOIhKEjylci+pP3s2fVMibqVwy/+wzbqg9tOjRaDgpOxqBAl7 -U1DAh3jqZKJROCtL6f6f0fMQogjdPQ6F5tVhugItgzwQ/saSBfbKEod/b7AcFhSc -S1ReehU3WSuBTx1cwDQtwbwzwBrZMnqJj0oLEwGHmp4hURcwkXfeYGvLvAsMUbnt -8Mrare//huk1ttgYXr3vSQ43aIkJAgMBAAGjggJyMIICbjAOBgNVHQ8BAf8EBAMC -BaAwCQYDVR0TBAIwADAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYD -VR0OBBYEFJTooKTIdEJg5sQmzUimdDEMskxuMAkGA1UdIwQCMAAwggIGBgNVHREE -ggH9MIIB+YcEfwAAAYIJbG9jYWxob3N0gg1lbGFzdGljc2VhcmNogjFlbGFzdGlj -c2VhcmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2FsghVlbGFz -dGljc2VhcmNoLWNsdXN0ZXKCOWVsYXN0aWNzZWFyY2gtY2x1c3Rlci5vcGVuc2hp -ZnQtbG9nZ2luZy5zdmMuY2x1c3Rlci5sb2NhbIITZWxhc3RpY3NlYXJjaC1pbmZy -YYI3ZWxhc3RpY3NlYXJjaC1pbmZyYS5vcGVuc2hpZnQtbG9nZ2luZy5zdmMuY2x1 -c3Rlci5sb2NhbIIbZWxhc3RpY3NlYXJjaC1pbmZyYS1jbHVzdGVygj9lbGFzdGlj -c2VhcmNoLWluZnJhLWNsdXN0ZXIub3BlbnNoaWZ0LWxvZ2dpbmcuc3ZjLmNsdXN0 -ZXIubG9jYWyCEmVsYXN0aWNzZWFyY2gtYXBwc4I2ZWxhc3RpY3NlYXJjaC1hcHBz -Lm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2FsghplbGFzdGljc2Vh -cmNoLWFwcHMtY2x1c3RlcoI+ZWxhc3RpY3NlYXJjaC1hcHBzLWNsdXN0ZXIub3Bl -bnNoaWZ0LWxvZ2dpbmcuc3ZjLmNsdXN0ZXIubG9jYWwwDQYJKoZIhvcNAQEFBQAD -ggEBAFfSHugsd+3xa7vq3rwW0C3bGRKYBfjcS8ihMUnYhTv8yqertpgawC7tCo2A -EIGPHvg7LvJrJBZup7AajIjm2O7vYBEKDPyuvAweX3j4PI5HCcEQonQ4HsDEHiHa -Ws2tnst6K9wS/nx+2k6TnHTj5phml0jf9YPJkPbpABP5nwaEtXIGjKt43xZbcta+ -QG8KAeTg7HrIbRLQjdIxefJcgLpcNSRHgSjxVXtZ4ioTwRyjQyo5PopaEuAazw4K -hvlZCKM1EiVU2dyimi/5WZCAMOKeeCGlsO6iqNeMnfSxhoc1pAWon07To1WUzJLT -KO691P2qz/k9qgTzIY7tr00Y3cU= +MIIGJDCCBAygAwIBAgIBBzANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu +c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTNaFw0y +MTAzMDcyMjQ1NTNaMDsxEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T +aGlmdDETMBEGA1UEAwwKbG9nZ2luZy1lczCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBALAQOpMbLbAtWkSY0xY0y7yJGLkjf5B5YIDlJRT7VqiYgiMLJ/dZ +RM6TIM+lXFUY09kKaLrDgLwf5BkT5nWQ4umDqCdvkyg3qse3AbQKGQFZp+J2nLKq +2bxhJfQtUwA//5NJfQPKJRbR5qACeAgwz2Mi6OQ/cpI+oL7UF5AfD2UnhXNIzKS7 +APbgkQGkhnRZQnUkL0rCy2RbpbWey3iVRk1o+TV3mFGm5xLZM+rIl9WCsmaBnyRc +GUwUHnj97tObX6VhWBT5tSorXl7hsR2hwSGsoOCmARZNNqf3UGYMnuEGdTxNTQn/ +f61qnVZ6V2QK0sj+eNV4VSn9KJfRBZdp94uF5hr9WhJqcFiV8f2Q8r8VvByWV/0F +x+sOv52ySXZBBAL0t4vcHnkjk+Hk1WfETz5JdN9TNBG4o+1cw5sEQD+wUrv4036O +ire1TFkxG2+ESvZlhiEI5JeLTzNDWVHE9gHFvZJalQpaYg9bgrORrm+Sd5ZTHx3T +wP1vfkkkB1NjMGjqj89DtE0D9aI728Qk4XvnHoX76RM8eVI8yS3NMSEpHnsyQDaR +SF5jYzlG32//BeiN+2n3+yzndxF577eZ3IaKoGpmpdNHhjJp2qOdg8dn6ikQ2I22 +MYnbC4tvYbQbNTJni5WtCfHYPwXdobf7BebZ79Rb6d2HZLLf1eIxzVDLAgMBAAGj +ggFBMIIBPTAOBgNVHQ8BAf8EBAMCBaAwCQYDVR0TBAIwADAdBgNVHSUEFjAUBggr +BgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFOmoJ6cfU96XoeRzgwERiqTVIaqL +MB8GA1UdIwQYMBaAFFLO2stVdlfgR+hGmK/1JMcT8ppHMIHABgNVHREEgbgwgbWH +BH8AAAGCCWxvY2FsaG9zdIIVZXhhbXBsZS1lbGFzdGljc2VhcmNogiNleGFtcGxl +LWVsYXN0aWNzZWFyY2guY2x1c3Rlci5sb2NhbIIrZXhhbXBsZS1lbGFzdGljc2Vh +cmNoLm9wZW5zaGlmdC1sb2dnaW5nLnN2Y4I5ZXhhbXBsZS1lbGFzdGljc2VhcmNo +Lm9wZW5zaGlmdC1sb2dnaW5nLnN2Yy5jbHVzdGVyLmxvY2FsMA0GCSqGSIb3DQEB +DQUAA4ICAQBY8/71A7nl6sm0zzCwbzOmWDLpK4fxmDZyUckKab8PZHukBiJQ7/M4 +Yp3eV++uRoztl9aLgSxNQ0klzPixVFoIfq9EQ70ZF4tgDcVyYVSUsQV9+VTzi/0S +MRnFQ/kBdn0VVfp3WmA2osvQ9H2OFq/GHxfhn7N/YNmBpmY2LcNyO5c10PfOCsA5 +xa+3pZMdsGmsv1BVYJGjUcsbJLYavvTDLGO+l3zAnlTXfSVPKsaeCQePIAnbPj1Q +r2f10KsLZH+BnOabTNFN5gz3eslVw11A9lK4DxcBPVDTFp2NT6F22lQMB9AD9AAZ +FJx77OYvtlbinr39/p4awJ7XQPJ9ZS4GkU7vEdznRZE0FOskhNfHu17yedjIBkKZ +Gf1GYo6kcK6BlSEsDtZOBGGJUVfu+kjLYi4375wzd2LejwcBFhZqZ6uDXH4d1L10 +rBq1FfKNmaQ6ULnwget6sBWdFqZxBBr6guwziB9ekpgiXTeqs3OSFLRFVDxoxXQo +mpAXoPwqaLc4TUtJOmYQtE7HBPbpS9OOGyX2V+oXFvszZHQ5NRrGO7M3rUAa/SlX +h/AktX7kLD1FNJLYO2KLBCGtlmj4SCuVhuGEHFlh1HVntoRajmQxhI0wB5wC8WUf +Dll9Hhb6bBVa2QIcz3HSHtR234SPwZ8Pwg9uUNjsQcQCpI41fPLnIA== -----END CERTIFICATE----- diff --git a/test/files/logging-es.key b/test/files/logging-es.key index 36da622b2..04933813d 100644 --- a/test/files/logging-es.key +++ b/test/files/logging-es.key @@ -1,28 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDQmLn0sHTQFLoa -tj7MEixdlLo6Y/sNeMfes0dADSRdN998wrF9SB9klxbdjDubqYKDGzMczGM3gHTN -QqYC9CqpeV20MeoTnTM0HGai9rp7hCOo46aPEyy+e4pbc4+TDYoFjnrMo5xTOIhK -Ejylci+pP3s2fVMibqVwy/+wzbqg9tOjRaDgpOxqBAl7U1DAh3jqZKJROCtL6f6f -0fMQogjdPQ6F5tVhugItgzwQ/saSBfbKEod/b7AcFhScS1ReehU3WSuBTx1cwDQt -wbwzwBrZMnqJj0oLEwGHmp4hURcwkXfeYGvLvAsMUbnt8Mrare//huk1ttgYXr3v -SQ43aIkJAgMBAAECggEBALe6ja+T7I+KchEtDEq6hxE1+oGLpojLJWBmEA++g5UH -rXU6FxL9/4id+6nP5oDI4QeRYYewDoPIRRi153sajKkRGROqEz6La5oumchYARDh -5nbPbNkATpzIO6kpNACWIEeco1SzlvFGDeUQAxLGbGxqWaA46oydYGLli6Syz/sN -VLHNYV3KdBW3q1Qlx69RAhpXoV6+lQN5ocudX4G5/UlZw9IReKPeBd6P+cbtxp+N -904VBNrm7AKmsLOXdilWfzKScq/OVIRgMuldBHLBZxydsDH7B9b/XmFaYqRNktI6 -950CwKdDimOiT9DSVSemPJt7DwgOQBiwKefA7VwJnfECgYEA9XEOtpcc3sBn2ltg -nali5UL1iXrLUyq5enr1S8WlR6eTk3A3wuIyl/tugDgHY15NaNSTBrfS+09W+2iI -ET6t0vcVDyTBK0HKGvRxhocXZ3B3kSShoYi3fH8rUKFk31l6PfRoo2oB00QY9xps -wsnjHb2sUKnbtdlE+/Fe2/Re8uMCgYEA2ZHpD1A84eXxitbbJGNm/MypY/1dIMgp -GGx2bJQxJOQrqpwb1uMf/D45dAZYoQlNjexcWQ+nUcCwd1YI738J790YLpZ8ykSX -zExQD0l3/1NZsfWiNCVw6FF3G7FYIcXkpHnKPOLsiav1nn/YqId0f9TdA9tlBd4r -nCviHrDZnCMCgYBidkdzwm1ngwJmm6/Hr/Mni9QaofM9Wu9rjjYnfpOOilBunl8Q -RFQNmmU69L8qAuPFATo2QLpX7P01ADtNx7pN3qdJGvKPWuYsKBH+6OTfr0OYK3zX -45PQUufagdSIsUmT3x+3JsiyqyfkwWoOPpVpEfU1qt6kNhQxiiQG0yLgBwKBgGuy -n3RWGp0TzvoUGwwrLFqtEJiPXV8R1c3iOsjKnn0NeVQNzHYVncqmHpiBX7PvP/Wl -75yidAey42dfLjMnZorWndbJ5WHWjHGzwNmgW4cHAlpmJfG8KuTFFrmdkuu84s5L -/0Eeb0pC1eWskVnzG0b/3pu/KYW7EUlQK7FXh5CTAoGBAO4rP678+ARN+BTlMqMW -w3zlYCfqNbphe7LkxVaAoqYDpWqdZLs6Z+7cprRhW25b9+NBD20sPwFA2b4UJso0 -smyDLeYBkZttABRelAlX6WCAu6cM20bxnRaQ9MRUcm3NMCQ4ffwwEZqETfQvWDIY -xf1Z2d0DgDMfD77oT6TmYCnE +MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQCwEDqTGy2wLVpE +mNMWNMu8iRi5I3+QeWCA5SUU+1aomIIjCyf3WUTOkyDPpVxVGNPZCmi6w4C8H+QZ +E+Z1kOLpg6gnb5MoN6rHtwG0ChkBWafidpyyqtm8YSX0LVMAP/+TSX0DyiUW0eag +AngIMM9jIujkP3KSPqC+1BeQHw9lJ4VzSMykuwD24JEBpIZ0WUJ1JC9KwstkW6W1 +nst4lUZNaPk1d5hRpucS2TPqyJfVgrJmgZ8kXBlMFB54/e7Tm1+lYVgU+bUqK15e +4bEdocEhrKDgpgEWTTan91BmDJ7hBnU8TU0J/3+tap1WeldkCtLI/njVeFUp/SiX +0QWXafeLheYa/VoSanBYlfH9kPK/Fbwcllf9BcfrDr+dskl2QQQC9LeL3B55I5Ph +5NVnxE8+SXTfUzQRuKPtXMObBEA/sFK7+NN+joq3tUxZMRtvhEr2ZYYhCOSXi08z +Q1lRxPYBxb2SWpUKWmIPW4Kzka5vkneWUx8d08D9b35JJAdTYzBo6o/PQ7RNA/Wi +O9vEJOF75x6F++kTPHlSPMktzTEhKR57MkA2kUheY2M5Rt9v/wXojftp9/ss53cR +ee+3mdyGiqBqZqXTR4YyadqjnYPHZ+opENiNtjGJ2wuLb2G0GzUyZ4uVrQnx2D8F +3aG3+wXm2e/UW+ndh2Sy39XiMc1QywIDAQABAoICABLwi/yLLe0H0/ARKJP49fnA +HcK8yNo31GPJQqXxK69TSJyQdKotFjPKq/rqZPZah1+PuRhuM4kJPTZdZ6s5/M71 +9L7ZR9FJu7tkOnCtrWbxyBeaftzReD7FjJmpzoX2XH3xp9sigb7ksZSA73yiE40s +kjULhj2pMw2ULzy3uqY9DCQMrscBvubP3Yl5s2UMvRNw4w19yvduzHKvNGAyXq+x +HS3dQat28uCaMPWOwpabSqmwQ/hbV/VRLL4DQw/MZQubXyhSfGSe6bX3PWBcqDQB +KAVClldZ0wlal3GC5gx6712fSyxKKefz0h0daA1BBM/OYB1GppKvwxAh0iIpqhFQ +gpGeHGr26Hf79Ucrs9UfVefcjGiXyOwtPdpSpbWx/3Lu2FHaeqNfkpsVZki+ptI3 +dzgRyTFw3jSHJE3Jb0oNP+UomOpYU9cNVGtz4D8lS6+wVkWT6+KmPr6uTR0hHANF +96m3O7pluH9K1FL/10GB42QiEsIx2XvE5IB1HQVMDTbuX7o6goWwm5FXbmj0R6Av +UNrGbNq91GDNpGPEZvLl4yEz4s0tOxjmZjLUViLEkzc8B/qiHrvhffu1FyYBpuoT +8R58AdtVouNkOZ91EYHwmZ+/iF7QigqzAwTovLcFL0+aYTUXMfZ12eIKNMmq66oa +/+QqEwJyj/96bY6aWjYBAoIBAQDhNHJg274eCoa8JWz0KqyvqlPUTXr5nQdYW6FF +dgD5Zdk+gkD1anAxW9Ay4rZkydrMYteLz0pF+tL7TgHvEaSfAqUxzAo59Mxitefi +up4bPPuQ0u681kv2vl4TK7FGpOjORMg9bwoK6+I0GUvjL3eo2rjR0dMt1C65AoLZ +7uXOQMKSG0CXBDDZdP/9QmhG/rKHEeRXWAiR5wcdm78Tl0Z2XW5xd3DZVYW5b8K3 +dlwgfdnL1CYc8e05dipwVQwe4MdFbkCNfiKDzNwtmtjPIG4AIMyKiNSgMpHPJdXf +sgru5vGyHz23wncg7nJuHblFDL55jjz7G4ANNRS9m6OjMhhLAoIBAQDII4dTJE8Y +DmunNPUWrlU+O+9zO+xvUSnvG+LX3228yqXQ0NP8i5azZmElTE5HpLsQFQZczuk3 +RFKdj9qr+7cCIubmeQEaesfEVhG42ycNAngt4kKO0SAAShAxkMeOnaEjOuzeRrIK +p3ONGXO7MifgI5ULsOwRyJT2hzM9VSB9GMq+wj5FY6EsII3NBiLCVx0TnGyCFWSf +a1gMUxLBDiah+hfBSuBMPLNH3luGTCIT6Sm6s5+Thjwc5825r8N9a06+lHsleHjv +XOvyIsEMqh8DAAjkbY8TfuM82y59QNhh4+cHdYNyAlqM48n5G7YY6VxsgnKhsH9o +ha0BlOoZN1mBAoIBAQCZfwLaq8vYc/pDsQqjHZcYIHMEyE6iZfpEqiewzW56joXV +Cji2TXbs3ZR5qncBGWgtWM1reL06F2zIZvIAfbkDvGzWFSl/OTA5s5y6t9Hd5OHe +YzbftPyP0E8Up/ormWkodk85OD7TFNXYBsnnrknT6EiCko5qtS2nYmPHoQI3Y4J6 +zWJnzC5zeUCl4SZsBVuvnm1RbypgL+R0tNw2wSwAr0wAwJVFPPppWxiPrXe7mavi +NtZHcknrmXPxnrYlMbYAx9Xt2uQxi2cjGmDeRE9VfQWNAxhRnEYvt60fzB2Rmg1x +B8QsRLqn0n5iOZY2zyngdidwS90qo1xgo/2T1SgjAoIBAQCPV/sFjnzj0vhB0wkz +THubTlwrIaEu/WfWbRek288CJ/ZpQBiEygmOxmYPy75JS+/7DcP31u5seg/d7/mD +so6aBhtwuPwUVhocQjUBPF2U0M74thRLq/aKnoFIrtRvDfEqhXq/nMzKGrMFPVSJ +n9u1imam7/m0pwAiLiWB6SWS053q0L6+/iOislI9pQZiQPh/YkrJRL9D+yMd+KX3 +Bcafvrsi4xkRIWyareTJpF/H+Pi9UweZJsyJO3E6bGvMuX21vC0TYlzju572VvQD +uRjrEixMRzfxk7D3lliOt1IDkrKWwhD6KDtoq4GuXK2o3AMpaKjnq70lRJDsEBc3 +JlCBAoIBAQDd4E1cVyT0J5B3dX5KTWlhzUCqAetrpcKp1VNQ59D+GU8sPL8Cqhw7 +wqzd0KIn0ztK5m7pNParlhy6RTN818TjTMVc9vJUWXUX5kHwwFQWmJ9deaNC05e3 ++7yv6LLfNvourE1UYIkBCxtTGzFNVsr+ekU5RpfAOOoBx6lgygOGnMfrB8o/skd9 +QztK28LWeKB4Dc8HT9Q7EWiNKKZhpNYxiZdsmopg2xLGnUYSfm2iw2SkD7RoLUAX +PX9o36p0npgnQk/8MK2g4ovbcRJv2GIDU7bZiI4n2U1dEPDj7I64ivn1MGP4bDKh +QrzsamhY4r/rtblZdYABajuM87+gzslt -----END PRIVATE KEY----- diff --git a/test/files/system.admin.crt b/test/files/system.admin.crt index 8b8cb3e84..c41220de4 100644 --- a/test/files/system.admin.crt +++ b/test/files/system.admin.crt @@ -1,20 +1,31 @@ -----BEGIN CERTIFICATE----- -MIIDRDCCAiygAwIBAgIBAzANBgkqhkiG9w0BAQUFADAmMSQwIgYDVQQDDBtvcGVu -c2hpZnQtc2lnbmVyQDE1NDA0NzY0NTUwHhcNMTgxMDI1MTQwODEwWhcNMjAxMDI0 -MTQwODEwWjA9MRAwDgYDVQQKDAdMb2dnaW5nMRIwEAYDVQQLDAlPcGVuU2hpZnQx -FTATBgNVBAMMDHN5c3RlbS5hZG1pbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBALU3uDje/90jJpteGrCphlyhJV3Ln9r0f2Ri1v9vv6uESexW/gm7ng96 -LjHXGOifjY6CsXT00JAoq5R/6stu3Ey6zBgFmoFGPOheJM0wBciSNZvoid5bdt5C -D0RRUiXUHyPdMrxVRrXGPQMnR6iGgz5FdwWJhznIMiMJ45UfH9WJzcAP/nTy4nZF -fmhKUj+MgLWP1D3Agr6gapEPLfSqFmJRK+kWSr/8HH4FwFJ8L20JumLayjfULUlK -wU5wVP+05NqyrjIt8RvHrItwxvXQaC8GYddaEhxJAo1+fqlTaCz0YZCtTahSzUNi -IWPMRy4U/RiaBrALnEaUMOIm9G1g/PsCAwEAAaNmMGQwDgYDVR0PAQH/BAQDAgWg -MAkGA1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1Ud -DgQWBBT5J1FePw4bhAa9gAMZzudpYB6n3DAJBgNVHSMEAjAAMA0GCSqGSIb3DQEB -BQUAA4IBAQAtkF0Rxbo8en/r/fJdz0Z0zKmufdnJ2mprB42OLDdZSj6khDuO6BHa -oDFdJ8wT6iMSeMMPwow3/9CAH+NMfwy+fRHu1BMVeH11D7c4pL4UgdfNrK8CCsqA -XYd5jalEsznho6W4ILAP6/9JtSyJlK59+q7qIV+YJcmI8SVCr1YxBZnls8xTk1mR -09Xgil07q32QPLnhPgtTa9IB2Ph+F4Ffa37v5bP0ijtkSrp9MRilzVPG72NNpOQt -KjHq7aN4irLJeT/GoAJ0uYRBVGjg9sr7zVnhaYYgdMHob2Bz+BDqlLQnHrH9HNEu -6XoKw17fmSk6lCs0GYxi4TyzkukHWb34 +MIIFXzCCA0egAwIBAgIBBDANBgkqhkiG9w0BAQ0FADArMSkwJwYDVQQDDCBvcGVu +c2hpZnQtY2x1c3Rlci1sb2dnaW5nLXNpZ25lcjAeFw0xOTAzMDgyMjQ1NTJaFw0y +MTAzMDcyMjQ1NTJaMD0xEDAOBgNVBAoMB0xvZ2dpbmcxEjAQBgNVBAsMCU9wZW5T +aGlmdDEVMBMGA1UEAwwMc3lzdGVtLmFkbWluMIICIjANBgkqhkiG9w0BAQEFAAOC +Ag8AMIICCgKCAgEAsdS2urpvLCQ0jwcJC7sxhx9vOgFGWmVUCc3q5UDUXrJ4pi2x +ZkvaM/X7cy7/Q/uEthAWBmpTB3kQg/w5Fg2FGS8cwBU3G6lVGqseCxH9RVNAd7Lm +h5Ot1yRUB3V6vh8X9c9c49IHBI66MscefYMN/tPYMPp3ebDKDPaMswqp/eButhUi +3Dq6fwZlZWvf7MsAcl5rUzP8B5EGd4E8hRKxUB19F1k+1nz3gP7Pg4KUljNwjZYj +/BISb5E84HyL1newW5AYyAfhIjbkwDlYRUQQo+skHAkxQxbZhezv4CcUO4i8Q9IX +NTf0Yy7XR6G9CNMFPjFfdwIUf/2GfPtn2b5GgqqVz06yytrPr3k92RArkrbgpHLK +ssoZTTz9GzT1QCOywOBE2f6mcdkquzY69PN4HmkYmM3c736cFRO9Nrl54HQJ9L0F +hVSN0XuwfSuqbkU2/2iMcWf01ZkQJQTNKvEEENn/rfrbsgC3GATrasEXIKffWPkf +fuYWqDKtKZjAN0wiBoUniNUxGxqK3wM0LWj1mpxdDFlqzVMAD9EQa/V1dYGGU+tU +OQbtsj2riVA/T9R5gSRMwbLlQmd7zMmTvD4PzASwE1mK7d2DXT7vJFGug9nulLXH +rdjaUhFNYBWkjhWW4AjqaJ4tMEruN7UMjljupLyWSMmSlpU+Lo4yQFlzyZMCAwEA +AaN8MHowDgYDVR0PAQH/BAQDAgWgMAkGA1UdEwQCMAAwHQYDVR0lBBYwFAYIKwYB +BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQ+IZwmNLTNKTk4VWDC6Tmwfe/UvTAf +BgNVHSMEGDAWgBRSztrLVXZX4EfoRpiv9STHE/KaRzANBgkqhkiG9w0BAQ0FAAOC +AgEAWvRn6X6EFpiRGEYD87BQ/5fn/u8oiBKh/lmhkOhbC/UBVn0jaBqOkTDUdc2x +Xlfqiowb2RuySTXjgRHKBZGyTDc/bCptP8vRl0Nd5XpeuIdbsuPrW0oapZ/mWdUP +L5WPIX/uByGcEuldKa7mjep58F+4wLFQeOfZ63MSumGs7dKKKXHKVb2CXMWfeKa2 +UIyYQM44AT6xq70sEjzTUutbwjwdFOuf/TXNnjLQXFxL42tS35Xd5b4uIUeMjUHM +Wj/HfcBkQPsPRqRuoiqu3cWaO/+gRiOdTiqC/E8bw0C+pkrc1OeGfjeLbbEzVNOZ +KogJFLcaJwqL6D8jqyHCSMEJrzJ26+rmZ2kmjfFtaOBwFD//IIA+NyMx4hsfAeHx +KuWk5E+EGHEFwj2VupW7c0mRzNt8kwEwmSmc2Sz+CZDrFzTmsFXhLHagi8YepdQ7 +Q+THUhATBpURZxx2mdKKbZqcUO262aFtnsNnGulLxw7lE+3Fq/MD4A/v1j+2Y1C/ +6kQfdw7AXrWkENqTzrXTIqURJDzPQPbJbckhdMke9vSSqJ/ZyubEl1Jr8MIhQUYM +Qcl8d5UCx3TYm6fKTfWD4a/u2ctopS3w6cSnD4YW9jcZ/BXNfanfsSHnYMmO45lJ +tgi5JpGppFODDtELjwJtClSknajTp/Kt6y/BfW/WvN9xaIE= -----END CERTIFICATE----- diff --git a/test/files/system.admin.key b/test/files/system.admin.key index 41e328400..a6b1f8bea 100644 --- a/test/files/system.admin.key +++ b/test/files/system.admin.key @@ -1,28 +1,52 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC1N7g43v/dIyab -XhqwqYZcoSVdy5/a9H9kYtb/b7+rhEnsVv4Ju54Pei4x1xjon42OgrF09NCQKKuU -f+rLbtxMuswYBZqBRjzoXiTNMAXIkjWb6IneW3beQg9EUVIl1B8j3TK8VUa1xj0D -J0eohoM+RXcFiYc5yDIjCeOVHx/Vic3AD/508uJ2RX5oSlI/jIC1j9Q9wIK+oGqR -Dy30qhZiUSvpFkq//Bx+BcBSfC9tCbpi2so31C1JSsFOcFT/tOTasq4yLfEbx6yL -cMb10GgvBmHXWhIcSQKNfn6pU2gs9GGQrU2oUs1DYiFjzEcuFP0YmgawC5xGlDDi -JvRtYPz7AgMBAAECggEAPMQqaiec9LJMx9yOZp7yBy2iMgmN2QYTkb3zCkSWqRT7 -FN024lyfa+E0jFHscFTQSvFYQDe3mB+LKYibWwwZo0EgHw4B+UdsNR8ciBGKXu9o -OpeHY6CYA39zTx1VxomSx5vs32pZA6st2ljZlpFEgSUZ/ClZxu5yoX03a1G8gwXZ -znDwoaOtIgpQBk1VZU8/GRs6ykQMMDSNwBmHpyfBK+ySmo1I8THQ39ggO4YO/V4T -2bG2iAqZKbU0QezPDwHjvwVEgwAIRzJm6W/YWMN3gsPmS9nxJVxSsB34I/48Jy18 -VCFGS9ARlwnwmnNKpgHX/03ncHHhuVbezyUGHvq/wQKBgQDl6UaFW/QOwHqD/xJH -eMEAKNNr4EDHkH/bBjbCX06WzaCKe+Mjj/v6ho3djWZ51YnM5XDAf8g0Tt8AdhDp -Hkl0HmBX5lWpYnNcR+CCTgm8YKgsgyA+M/kDXc3C3quFPZgk2Z/vQGRZ+UGFuX2R -oss9XrilZ5qIKWZpR11jk2zi3QKBgQDJx+/47YR6vOHJSpbeerIaOF6cTXu97SnP -gu6yNyWJOuDr5crUc0Dgno0KJdauj/w7fqdzAZADLeHaHEw2V/MYwFEteYypraYo -nMNo/k66pPNlsugiAs8Eiiw18bjwYob4wkNkZ3wKRZzuXn8cIk0IrrjDP2L7Mayc -8hBsjYaFtwKBgF6VDib0wj2DX6OHvqxaeAOpx/hcJCo1PvQK5WInNTyC0WXHO5U4 -ua/BxQD9F8hNWchKJ415xBII1GLrL1/Hzu4PRRFEN79cOhsfkrPoiMbPeL//HbeZ -R/rjjy94zVUeFsYqJUqW0fHVwefSfF76euP/YLTjH0aeyXvMGRZK4wOdAoGBAJ+P -a0P9DnXARwtV4FetzojNaoNPAjr9/8IIV5i14wjRdmsUG4BEY1k8B2uu4ETJtzuU -u/VFlwvsjFxdZCbVBXbdHVRIeeLzXiFfVWcBMmCFb7NL9iRu9CUy3iMKMqRkRTcQ -5xnOe1fkFGSqdPlaZnAnOhOrXCom7cht4m70uxuhAoGAELJB9PwxgIFiBm7aJPQY -22bKHYrhGq2b4xAjqXENHGjbtjBteFwwEi6yi14AvxLOIeXepYodK5JtfnBjflGm -PUlbissZ7KoU7epmPyfvoHQIcIUfwK++AiI4aZWpDzteBfIfvCi3eaFzMAdoekkO -7Ehx0rSkWA4nWIrGCDjY/QQ= +MIIJQQIBADANBgkqhkiG9w0BAQEFAASCCSswggknAgEAAoICAQCx1La6um8sJDSP +BwkLuzGHH286AUZaZVQJzerlQNResnimLbFmS9oz9ftzLv9D+4S2EBYGalMHeRCD +/DkWDYUZLxzAFTcbqVUaqx4LEf1FU0B3suaHk63XJFQHdXq+Hxf1z1zj0gcEjroy +xx59gw3+09gw+nd5sMoM9oyzCqn94G62FSLcOrp/BmVla9/sywByXmtTM/wHkQZ3 +gTyFErFQHX0XWT7WfPeA/s+DgpSWM3CNliP8EhJvkTzgfIvWd7BbkBjIB+EiNuTA +OVhFRBCj6yQcCTFDFtmF7O/gJxQ7iLxD0hc1N/RjLtdHob0I0wU+MV93AhR//YZ8 ++2fZvkaCqpXPTrLK2s+veT3ZECuStuCkcsqyyhlNPP0bNPVAI7LA4ETZ/qZx2Sq7 +Njr083geaRiYzdzvfpwVE702uXngdAn0vQWFVI3Re7B9K6puRTb/aIxxZ/TVmRAl +BM0q8QQQ2f+t+tuyALcYBOtqwRcgp99Y+R9+5haoMq0pmMA3TCIGhSeI1TEbGorf +AzQtaPWanF0MWWrNUwAP0RBr9XV1gYZT61Q5Bu2yPauJUD9P1HmBJEzBsuVCZ3vM +yZO8Pg/MBLATWYrt3YNdPu8kUa6D2e6Utcet2NpSEU1gFaSOFZbgCOponi0wSu43 +tQyOWO6kvJZIyZKWlT4ujjJAWXPJkwIDAQABAoICAFQnm8nGDHJRN+YvqCI7Ffch +8xr6G3cP2LNDFVQkV9vwjZPmr7r/TmWklLgvl3Fuh6E4/5NNobk5m406QTGkeEYw +u2RTJd8bRUD3laIbg0XZXfrHWLz0MCJN+M4G1G1AfbA/z2+optWLTaZWAKHY8TiU +vAyBmySlexijXHYmJ8gS+5GDcxnRWQxf1IAoirGeZ7m34QZg5XYXNX48VI2NCQgm +zFnOTCRowx/ydfWASBzEfxEh6imRy9OsYajCh9KYlYbfLDsNL4dnft1QxesRiOZp +ko8J8pwTJiFwvRvQooyB6sYVmBIBRs/hDNPDQJf6dNK3vrus6lKfgZOHzd+HYgoN +6wjnQ8CoFeOwKjNKiUZXzAjmljPjRQ+GC7U2CAay/8wK1EfJsfjcEFi6dzuJxvW7 ++G3858Lhpey2JHSJl2Pvu7B39q6CFVhZzhkzdxk83wV1NHMJq+qvQ2IYlaTRWQga +kkHx5XfdsCZeR74vgjXSB6CoTQO4PCCDgdIJf9IOzPHELlC4bxbT6JhCe1DH7Dfs +AaMk/2epel12409NMYkjlJYFc8aBOEFHlWra083AOQMzVLABYq4sHZkRfLj2un4e +VXRjgBAbr9ycT56CiEm/TxpRRsXUBCWlSoyH3XK0FHl6A3vfoRWtM2Lj86K3wKoy +V6l5x3m8JwUDk/rQ9sNRAoIBAQDc7BOI/KcGz4ktJAWELV5/uyxB6iOXJW3YM0n0 +tyVdXCTgtT5VC8uadwguY36UHGvEhTKqwwsGx4GD2zogtdYwGEuFlc1LnKxTNbky +YdO/yUWBdhQFiXKbx3MpesDCf4768paT0RboxiDBSFbXjpyVia2wz4QQIZW5dfVZ +Fzu1x+x9dukW9H/CUxW7CSMOZXuJc/RWytYuj42+B9vBF0HWU0Q4pISc3Hlgw4AI +qJEvRQsgU+HUQewmeACY8kJrqy/sNtPMDbRbBDinLusbZaKI+t11uplOJvJRscrh +Iyv1m1gcMxN0r8VcWfiR5lfoivu/lzBTCFIQNCEAiR+tqE+1AoIBAQDOERZnB2E5 +qA+p1oqfOI4L+RrcvWiStixU3SN/0uQn1Z5WXGSfb84H0TGcyIFokOBUybq3bwNP +qHqRPkwvRqTkk5OPXl1Hyq0lpyHtoEJEHkAur63H235/5mhLWhijHp7MOm+9OLdt +6xacUhlRFsKawBhF+Iy6pzpDLhQcpXhWllIVjnoDvWWmcNa+fUJ9Um4EOLBpwADX +l2UO6pk20iO2T+OFPVl58T2eZhfFsXNytxDdMbgmMo2mh0/8e0me/U7YPe03T2ZZ +cJip8QhpQcM5Z1RC1QH2iRkczC+TBIo+sgm6dLAmEJCzkX0qdVESk1qlc79SIXEL +8SG41tIEFjEnAoIBAGYiFd3sv+McKxTdZFd5CgkPJSTL8+w6d4/OFlC2IuYIZVwl +Lk3vCA+/G98mTCx4/zF5SrU7OmvF7BBjV3or1nuhn7iASsq8AcbuPVIXe+dcS2/Q +gO0WYA+4o3r3rwm8IwPNBuQLdAXlItt4b/1zhtxzLANjzHAQtsFo0SaA4S/m32QZ +hyT/n9jcxF0VmklbjUM8gidl2qLn5uWFiIi6Ecvd7too5M4H921OtHBMTeKGjuBB +J1QTrfMS42PSC/buy1bu+feKFmlFiFNyE2s1D3E4WQ3GWH2S78/o3Bw9QGNpj9Bp +pIL7h8hsJ8h7rwRozH4EjWi1ngqzrtYqjqSf/UkCggEAdYYXUcFKuPhZnpJmUvKN +SNTUdipsNqhBVlEcyuz7BKD3geeF18yLI7gyZGmSLJHb719x5uYgbFD5PlbaWmge +6OIl2TGHX8d+wqe0WPL6eCEMl9PH9+D+H3HH75m9zWJMTMvTKIbtTnoyVgsYtz+U +029VKKrdkCKJvwDLpZ7VqARpYjP08KcXDzrxroh/4Duf1TaDnnxLvqlwkHZJ1ZM5 +nOOpLjdUDZEBhJJTYoOXBUjVDGIr1VUlpmErxCKIVW3AhuGipdXZ2I638swt8OH5 +toZw7wDO8s00DlGBnIaNVb5yf+3G/J6b+F20lf98smxp2UzPQ8cg0x++DM4vzvbd +JwKCAQAoPDj4AVyYq22aOqldzdNrBKR1BRYS//o36zBxiOWpK0lU053dKd8/+aqY +tvvrzT5Esugbdp0vVN2Y3wg1RLBHLpq7hEc2IE/Qk60mkfTgT2Pxr61Vp/99jU4A +LsiRH+42U3/WszgmJE3cjArFujxQVRYPcZyHvthcsOgQej3sIv3IOwfNNvgFAlaz +CnGrL+YH7sdKLxMDj64UCtxR+O4ktbThu7W6XpermPgJcOtH/fYMaYEKLXuFdP3e +iaX/RplC9JzarbBdA+Sp5pwy+jO9umjX11lkgJyD8zxhI7p2222Ntuv6G+m9Bbjz +DTZvgTswB+AEWfMHRQtVhVkuysA3 -----END PRIVATE KEY----- diff --git a/test/utils/utils.go b/test/utils/utils.go new file mode 100644 index 000000000..3af31d49e --- /dev/null +++ b/test/utils/utils.go @@ -0,0 +1,64 @@ +package utils + +import ( + "io/ioutil" + "testing" + "time" + + "github.com/sirupsen/logrus" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +func GetFileContents(filePath string) []byte { + contents, err := ioutil.ReadFile(filePath) + if err != nil { + logrus.Errorf("Unable to read file to get contents: %v", err) + return nil + } + + return contents +} + +func Secret(secretName string, namespace string, data map[string][]byte) *v1.Secret { + return &v1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: v1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + Type: "Opaque", + Data: data, + } +} + +func WaitForStatefulset(t *testing.T, kubeclient kubernetes.Interface, namespace, name string, replicas int, retryInterval, timeout time.Duration) error { + err := wait.Poll(retryInterval, timeout, func() (done bool, err error) { + statefulset, err := kubeclient.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{IncludeUninitialized: true}) + if err != nil { + if apierrors.IsNotFound(err) { + t.Logf("Waiting for availability of %s statefulset\n", name) + return false, nil + } + return false, err + } + + if int(statefulset.Status.ReadyReplicas) == replicas { + return true, nil + } + t.Logf("Waiting for full availability of %s statefulset (%d/%d)\n", name, statefulset.Status.ReadyReplicas, replicas) + return false, nil + }) + if err != nil { + return err + } + t.Logf("Statefulset available (%d/%d)\n", replicas, replicas) + return nil +}