diff --git a/Gopkg.lock b/Gopkg.lock index 8bca278e5e..87fa20c549 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -372,13 +372,13 @@ version = "v3.9.0" [[projects]] - branch = "master-post-release" + branch = "master" name = "github.com/openshift/elasticsearch-operator" packages = [ "pkg/apis", "pkg/apis/logging/v1" ] - revision = "8277848ad2e56dd1c660fa889f6e3df32b2395ec" + revision = "e40983a4e4b9a0506a758b1912b989a840362d51" [[projects]] name = "github.com/operator-framework/operator-sdk" @@ -1058,6 +1058,6 @@ [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "f83b834b36d9ad1334604ffc4a2a7114e81badd97287c3765a5dbcc3c9cc2fef" + inputs-digest = "c50b635f491130c2c9ef10f305a7ff28c321842f8e48a415fb7e645a11e13ba3" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index c12957f8c1..12f1e1f9e0 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -64,7 +64,7 @@ required = [ [[constraint]] name = "github.com/openshift/elasticsearch-operator" - branch = "master-post-release" + branch = "master" [prune] go-tests = true diff --git a/Makefile b/Makefile index 793a957130..0468591cfd 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,7 @@ export IMAGE_TAG MAIN_PKG=cmd/manager/main.go export CSV_FILE=$(CURPATH)/manifests/latest export NAMESPACE?=openshift-logging -export EO_CSV_FILE=$(CURPATH)/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml +export EO_CSV_FILE=$(CURPATH)/vendor/github.com/openshift/elasticsearch-operator/manifests/latest PKGS=$(shell go list ./... | grep -v -E '/vendor/|/test|/examples') diff --git a/pkg/apis/logging/v1/clusterlogging_types.go b/pkg/apis/logging/v1/clusterlogging_types.go index 5ff37f33d2..cfc7e4cf47 100644 --- a/pkg/apis/logging/v1/clusterlogging_types.go +++ b/pkg/apis/logging/v1/clusterlogging_types.go @@ -27,11 +27,9 @@ type ClusterLoggingStatus struct { LogStore LogStoreStatus `json:"logStore"` Collection CollectionStatus `json:"collection"` Curation CurationStatus `json:"curation"` - Message string `json:"message"` + Conditions []ClusterCondition `json:"clusterConditions,omitempty"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // This is the struct that will contain information pertinent to Log visualization (Kibana) type VisualizationSpec struct { Type VisualizationType `json:"type"` @@ -100,6 +98,7 @@ type CuratorSpec struct { Schedule string `json:"schedule"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterLogging is the Schema for the clusterloggings API // +k8s:openapi-gen=true type ClusterLogging struct { @@ -115,10 +114,11 @@ type VisualizationStatus struct { } type KibanaStatus struct { - Replicas int32 `json:"replicas"` - Deployment string `json:"deployment"` - ReplicaSets []string `json:"replicaSets"` - Pods PodStateMap `json:"pods"` + Replicas int32 `json:"replicas"` + Deployment string `json:"deployment"` + ReplicaSets []string `json:"replicaSets"` + Pods PodStateMap `json:"pods"` + Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` } type LogStoreStatus struct { @@ -126,13 +126,17 @@ type LogStoreStatus struct { } type ElasticsearchStatus struct { - ClusterName string `json:"clusterName"` - NodeCount int32 `json:"nodeCount"` - ReplicaSets []string `json:"replicaSets"` - Deployments []string `json:"deployments"` - StatefulSets []string `json:"statefulSets"` - ClusterHealth string `json:"clusterHealth"` - Pods map[ElasticsearchRoleType]PodStateMap `json:"pods"` + ClusterName string `json:"clusterName"` + NodeCount int32 `json:"nodeCount"` + ReplicaSets []string `json:"replicaSets,omitempty"` + Deployments []string `json:"deployments,omitempty"` + StatefulSets []string `json:"statefulSets,omitempty"` + ClusterHealth string `json:"clusterHealth,omitempty"` + Cluster elasticsearch.ClusterHealth `json:"cluster"` + Pods map[ElasticsearchRoleType]PodStateMap `json:"pods"` + ShardAllocationEnabled elasticsearch.ShardAllocationState `json:shardAllocationEnabled` + ClusterConditions []elasticsearch.ClusterCondition `json:"clusterConditions,omitempty"` + NodeConditions map[string][]elasticsearch.ClusterCondition `json:"nodeConditions,omitempty"` } type CollectionStatus struct { @@ -148,21 +152,24 @@ type EventCollectionStatus struct { } type FluentdCollectorStatus struct { - DaemonSet string `json:"daemonSet"` - Nodes map[string]string `json:"nodes"` - Pods PodStateMap `json:"pods"` + DaemonSet string `json:"daemonSet"` + Nodes map[string]string `json:"nodes"` + Pods PodStateMap `json:"pods"` + Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` } type RsyslogCollectorStatus struct { - DaemonSet string `json:"daemonSet"` - Nodes map[string]string `json:"Nodes"` - Pods PodStateMap `json:"pods"` + DaemonSet string `json:"daemonSet"` + Nodes map[string]string `json:"Nodes"` + Pods PodStateMap `json:"pods"` + Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` } type FluentdNormalizerStatus struct { - Replicas int32 `json:"replicas"` - ReplicaSets []string `json:"replicaSets"` - Pods PodStateMap `json:"pods"` + Replicas int32 `json:"replicas"` + ReplicaSets []string `json:"replicaSets"` + Pods PodStateMap `json:"pods"` + Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` } type NormalizerStatus struct { @@ -174,9 +181,10 @@ type CurationStatus struct { } type CuratorStatus struct { - CronJob string `json:"cronJobs"` - Schedule string `json:"schedules"` - Suspended bool `json:"suspended"` + CronJob string `json:"cronJobs"` + Schedule string `json:"schedules"` + Suspended bool `json:"suspended"` + Conditions map[string][]ClusterCondition `json:"clusterCondition,omitempty"` } type PodStateMap map[PodStateType][]string @@ -236,6 +244,30 @@ const ( ManagementStateUnmanaged ManagementState = "Unmanaged" ) +// ConditionStatus contains details for the current condition of this elasticsearch cluster. +// Status: the status of the condition. +// LastTransitionTime: Last time the condition transitioned from one status to another. +// Reason: Unique, one-word, CamelCase reason for the condition's last transition. +// Message: Human-readable message indicating details about last transition. +type ClusterCondition struct { + Type ClusterConditionType `json:"type"` + Status v1.ConditionStatus `json:"status"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// ClusterConditionType is a valid value for ClusterCondition.Type +type ClusterConditionType string + +const ( + IncorrectCRName ClusterConditionType = "IncorrectCRName" + ContainerWaiting ClusterConditionType = "ContainerWaiting" + ContainerTerminated ClusterConditionType = "ContainerTerminated" + Unschedulable ClusterConditionType = "Unschedulable" + NodeStorage ClusterConditionType = "NodeStorage" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterLoggingList contains a list of ClusterLogging diff --git a/pkg/apis/logging/v1/zz_generated.deepcopy.go b/pkg/apis/logging/v1/zz_generated.deepcopy.go index 0c0ac2a558..b90622f586 100644 --- a/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -5,16 +5,35 @@ package v1 import ( + loggingv1 "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCondition) DeepCopyInto(out *ClusterCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCondition. +func (in *ClusterCondition) DeepCopy() *ClusterCondition { + if in == nil { + return nil + } + out := new(ClusterCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterLogging) DeepCopyInto(out *ClusterLogging) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) return } @@ -72,6 +91,10 @@ func (in *ClusterLoggingList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterLoggingSpec) DeepCopyInto(out *ClusterLoggingSpec) { *out = *in + in.Visualization.DeepCopyInto(&out.Visualization) + in.LogStore.DeepCopyInto(&out.LogStore) + in.Collection.DeepCopyInto(&out.Collection) + in.Curation.DeepCopyInto(&out.Curation) return } @@ -88,6 +111,17 @@ func (in *ClusterLoggingSpec) DeepCopy() *ClusterLoggingSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterLoggingStatus) DeepCopyInto(out *ClusterLoggingStatus) { *out = *in + in.Visualization.DeepCopyInto(&out.Visualization) + in.LogStore.DeepCopyInto(&out.LogStore) + in.Collection.DeepCopyInto(&out.Collection) + in.Curation.DeepCopyInto(&out.Curation) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -100,3 +134,771 @@ func (in *ClusterLoggingStatus) DeepCopy() *ClusterLoggingStatus { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionSpec) DeepCopyInto(out *CollectionSpec) { + *out = *in + in.Logs.DeepCopyInto(&out.Logs) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionSpec. +func (in *CollectionSpec) DeepCopy() *CollectionSpec { + if in == nil { + return nil + } + out := new(CollectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CollectionStatus) DeepCopyInto(out *CollectionStatus) { + *out = *in + in.Logs.DeepCopyInto(&out.Logs) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CollectionStatus. +func (in *CollectionStatus) DeepCopy() *CollectionStatus { + if in == nil { + return nil + } + out := new(CollectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CurationSpec) DeepCopyInto(out *CurationSpec) { + *out = *in + in.CuratorSpec.DeepCopyInto(&out.CuratorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurationSpec. +func (in *CurationSpec) DeepCopy() *CurationSpec { + if in == nil { + return nil + } + out := new(CurationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CurationStatus) DeepCopyInto(out *CurationStatus) { + *out = *in + if in.CuratorStatus != nil { + in, out := &in.CuratorStatus, &out.CuratorStatus + *out = make([]CuratorStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CurationStatus. +func (in *CurationStatus) DeepCopy() *CurationStatus { + if in == nil { + return nil + } + out := new(CurationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CuratorSpec) DeepCopyInto(out *CuratorSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CuratorSpec. +func (in *CuratorSpec) DeepCopy() *CuratorSpec { + if in == nil { + return nil + } + out := new(CuratorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CuratorStatus) DeepCopyInto(out *CuratorStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(map[string][]ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CuratorStatus. +func (in *CuratorStatus) DeepCopy() *CuratorStatus { + if in == nil { + return nil + } + out := new(CuratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchSpec) DeepCopyInto(out *ElasticsearchSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchSpec. +func (in *ElasticsearchSpec) DeepCopy() *ElasticsearchSpec { + if in == nil { + return nil + } + out := new(ElasticsearchSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) { + *out = *in + if in.ReplicaSets != nil { + in, out := &in.ReplicaSets, &out.ReplicaSets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Deployments != nil { + in, out := &in.Deployments, &out.Deployments + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StatefulSets != nil { + in, out := &in.StatefulSets, &out.StatefulSets + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Cluster = in.Cluster + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(map[ElasticsearchRoleType]PodStateMap, len(*in)) + for key, val := range *in { + var outVal map[PodStateType][]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + (*out)[key] = outVal + } + } + if in.ClusterConditions != nil { + in, out := &in.ClusterConditions, &out.ClusterConditions + *out = make([]loggingv1.ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NodeConditions != nil { + in, out := &in.NodeConditions, &out.NodeConditions + *out = make(map[string][]loggingv1.ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []loggingv1.ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]loggingv1.ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ElasticsearchStatus. +func (in *ElasticsearchStatus) DeepCopy() *ElasticsearchStatus { + if in == nil { + return nil + } + out := new(ElasticsearchStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventCollectionSpec) DeepCopyInto(out *EventCollectionSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventCollectionSpec. +func (in *EventCollectionSpec) DeepCopy() *EventCollectionSpec { + if in == nil { + return nil + } + out := new(EventCollectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventCollectionStatus) DeepCopyInto(out *EventCollectionStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventCollectionStatus. +func (in *EventCollectionStatus) DeepCopy() *EventCollectionStatus { + if in == nil { + return nil + } + out := new(EventCollectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FluentdCollectorStatus) DeepCopyInto(out *FluentdCollectorStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(map[string][]ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdCollectorStatus. +func (in *FluentdCollectorStatus) DeepCopy() *FluentdCollectorStatus { + if in == nil { + return nil + } + out := new(FluentdCollectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FluentdNormalizerStatus) DeepCopyInto(out *FluentdNormalizerStatus) { + *out = *in + if in.ReplicaSets != nil { + in, out := &in.ReplicaSets, &out.ReplicaSets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(map[string][]ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdNormalizerStatus. +func (in *FluentdNormalizerStatus) DeepCopy() *FluentdNormalizerStatus { + if in == nil { + return nil + } + out := new(FluentdNormalizerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FluentdSpec) DeepCopyInto(out *FluentdSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FluentdSpec. +func (in *FluentdSpec) DeepCopy() *FluentdSpec { + if in == nil { + return nil + } + out := new(FluentdSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KibanaSpec) DeepCopyInto(out *KibanaSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.ProxySpec.DeepCopyInto(&out.ProxySpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaSpec. +func (in *KibanaSpec) DeepCopy() *KibanaSpec { + if in == nil { + return nil + } + out := new(KibanaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KibanaStatus) DeepCopyInto(out *KibanaStatus) { + *out = *in + if in.ReplicaSets != nil { + in, out := &in.ReplicaSets, &out.ReplicaSets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(map[string][]ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KibanaStatus. +func (in *KibanaStatus) DeepCopy() *KibanaStatus { + if in == nil { + return nil + } + out := new(KibanaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogCollectionSpec) DeepCopyInto(out *LogCollectionSpec) { + *out = *in + in.FluentdSpec.DeepCopyInto(&out.FluentdSpec) + in.RsyslogSpec.DeepCopyInto(&out.RsyslogSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectionSpec. +func (in *LogCollectionSpec) DeepCopy() *LogCollectionSpec { + if in == nil { + return nil + } + out := new(LogCollectionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogCollectionStatus) DeepCopyInto(out *LogCollectionStatus) { + *out = *in + in.FluentdStatus.DeepCopyInto(&out.FluentdStatus) + in.RsyslogStatus.DeepCopyInto(&out.RsyslogStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogCollectionStatus. +func (in *LogCollectionStatus) DeepCopy() *LogCollectionStatus { + if in == nil { + return nil + } + out := new(LogCollectionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogStoreSpec) DeepCopyInto(out *LogStoreSpec) { + *out = *in + in.ElasticsearchSpec.DeepCopyInto(&out.ElasticsearchSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStoreSpec. +func (in *LogStoreSpec) DeepCopy() *LogStoreSpec { + if in == nil { + return nil + } + out := new(LogStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogStoreStatus) DeepCopyInto(out *LogStoreStatus) { + *out = *in + if in.ElasticsearchStatus != nil { + in, out := &in.ElasticsearchStatus, &out.ElasticsearchStatus + *out = make([]ElasticsearchStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogStoreStatus. +func (in *LogStoreStatus) DeepCopy() *LogStoreStatus { + if in == nil { + return nil + } + out := new(LogStoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NormalizerStatus) DeepCopyInto(out *NormalizerStatus) { + *out = *in + if in.FluentdStatus != nil { + in, out := &in.FluentdStatus, &out.FluentdStatus + *out = make([]FluentdNormalizerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NormalizerStatus. +func (in *NormalizerStatus) DeepCopy() *NormalizerStatus { + if in == nil { + return nil + } + out := new(NormalizerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in PodStateMap) DeepCopyInto(out *PodStateMap) { + { + in := &in + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStateMap. +func (in PodStateMap) DeepCopy() PodStateMap { + if in == nil { + return nil + } + out := new(PodStateMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxySpec) DeepCopyInto(out *ProxySpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec. +func (in *ProxySpec) DeepCopy() *ProxySpec { + if in == nil { + return nil + } + out := new(ProxySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RsyslogCollectorStatus) DeepCopyInto(out *RsyslogCollectorStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make(PodStateMap, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(map[string][]ClusterCondition, len(*in)) + for key, val := range *in { + var outVal []ClusterCondition + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]ClusterCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RsyslogCollectorStatus. +func (in *RsyslogCollectorStatus) DeepCopy() *RsyslogCollectorStatus { + if in == nil { + return nil + } + out := new(RsyslogCollectorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RsyslogSpec) DeepCopyInto(out *RsyslogSpec) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RsyslogSpec. +func (in *RsyslogSpec) DeepCopy() *RsyslogSpec { + if in == nil { + return nil + } + out := new(RsyslogSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationSpec) DeepCopyInto(out *VisualizationSpec) { + *out = *in + in.KibanaSpec.DeepCopyInto(&out.KibanaSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationSpec. +func (in *VisualizationSpec) DeepCopy() *VisualizationSpec { + if in == nil { + return nil + } + out := new(VisualizationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationStatus) DeepCopyInto(out *VisualizationStatus) { + *out = *in + if in.KibanaStatus != nil { + in, out := &in.KibanaStatus, &out.KibanaStatus + *out = make([]KibanaStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationStatus. +func (in *VisualizationStatus) DeepCopy() *VisualizationStatus { + if in == nil { + return nil + } + out := new(VisualizationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/k8shandler/status.go b/pkg/k8shandler/status.go index 317306edd6..4f52f515b1 100644 --- a/pkg/k8shandler/status.go +++ b/pkg/k8shandler/status.go @@ -5,6 +5,7 @@ import ( logging "github.com/openshift/cluster-logging-operator/pkg/apis/logging/v1" elasticsearch "github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1" + core "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -30,6 +31,8 @@ func (clusterRequest *ClusterLoggingRequest) getCuratorStatus() ([]logging.Curat Suspended: *cronjob.Spec.Suspend, } + curatorStatus.Conditions = clusterRequest.getPodConditions("curator") + status = append(status, curatorStatus) } @@ -63,6 +66,8 @@ func (clusterRequest *ClusterLoggingRequest) getFluentdCollectorStatus() (loggin } fluentdStatus.Pods = podStateMap(podList.Items) fluentdStatus.Nodes = podNodeMap + + fluentdStatus.Conditions = clusterRequest.getPodConditions("fluentd") } return fluentdStatus, nil @@ -95,6 +100,8 @@ func (clusterRequest *ClusterLoggingRequest) getRsyslogCollectorStatus() (loggin } rsyslogStatus.Pods = podStateMap(podList.Items) rsyslogStatus.Nodes = podNodeMap + + rsyslogStatus.Conditions = clusterRequest.getPodConditions("rsyslog") } return rsyslogStatus, nil @@ -131,6 +138,8 @@ func (clusterRequest *ClusterLoggingRequest) getKibanaStatus() ([]logging.Kibana podList, _ := clusterRequest.GetPodList(selector) kibanaStatus.Pods = podStateMap(podList.Items) + kibanaStatus.Conditions = clusterRequest.getPodConditions("kibana") + status = append(status, kibanaStatus) } @@ -156,18 +165,40 @@ func (clusterRequest *ClusterLoggingRequest) getElasticsearchStatus() ([]logging } if len(esList.Items) != 0 { - for _, node := range esList.Items { + for _, cluster := range esList.Items { + + nodeConditions := make(map[string][]elasticsearch.ClusterCondition) nodeStatus := logging.ElasticsearchStatus{ - ClusterName: node.Name, - NodeCount: node.Spec.Nodes[0].NodeCount, - Deployments: getDeploymentNames(node.Status), - ReplicaSets: getReplicaSetNames(node.Status), - StatefulSets: getStatefulSetNames(node.Status), - Pods: getPodMap(node.Status), - ClusterHealth: node.Status.ClusterHealth, + ClusterName: cluster.Name, + NodeCount: cluster.Spec.Nodes[0].NodeCount, + ClusterHealth: cluster.Status.ClusterHealth, + Cluster: cluster.Status.Cluster, + Pods: getPodMap(cluster.Status), + ClusterConditions: cluster.Status.Conditions, + ShardAllocationEnabled: cluster.Status.ShardAllocationEnabled, + } + + for _, node := range cluster.Status.Nodes { + nodeName := "" + + if node.DeploymentName != "" { + nodeName = node.DeploymentName + } + + if node.StatefulSetName != "" { + nodeName = node.StatefulSetName + } + + if node.Conditions != nil { + nodeConditions[nodeName] = node.Conditions + } else { + nodeConditions[nodeName] = []elasticsearch.ClusterCondition{} + } } + nodeStatus.NodeConditions = nodeConditions + status = append(status, nodeStatus) } } @@ -258,3 +289,71 @@ func isPodReady(pod v1.Pod) bool { return true } + +func (clusterRequest *ClusterLoggingRequest) getPodConditions(component string) map[string][]logging.ClusterCondition { + // Get all pods based on status.Nodes[] and check their conditions + // get pod with label 'node-name=node.getName()' + podConditions := make(map[string][]logging.ClusterCondition) + + nodePodList := &core.PodList{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: core.SchemeGroupVersion.String(), + }, + } + + clusterRequest.List( + map[string]string{ + "component": component, + }, + nodePodList, + ) + + for _, nodePod := range nodePodList.Items { + + conditions := []logging.ClusterCondition{} + + isUnschedulable := false + for _, podCondition := range nodePod.Status.Conditions { + if podCondition.Type == v1.PodScheduled && podCondition.Status == v1.ConditionFalse { + conditions = append(conditions, logging.ClusterCondition{ + Type: logging.Unschedulable, + Status: v1.ConditionTrue, + Reason: podCondition.Reason, + Message: podCondition.Message, + LastTransitionTime: podCondition.LastTransitionTime, + }) + isUnschedulable = true + } + } + + if !isUnschedulable { + for _, containerStatus := range nodePod.Status.ContainerStatuses { + if containerStatus.State.Waiting != nil { + conditions = append(conditions, logging.ClusterCondition{ + Type: logging.ContainerWaiting, + Status: v1.ConditionTrue, + Reason: containerStatus.State.Waiting.Reason, + Message: containerStatus.State.Waiting.Message, + LastTransitionTime: metav1.Now(), + }) + } + if containerStatus.State.Terminated != nil { + conditions = append(conditions, logging.ClusterCondition{ + Type: logging.ContainerTerminated, + Status: v1.ConditionTrue, + Reason: containerStatus.State.Terminated.Reason, + Message: containerStatus.State.Terminated.Message, + LastTransitionTime: metav1.Now(), + }) + } + } + } + + if len(conditions) > 0 { + podConditions[nodePod.Name] = conditions + } + } + + return podConditions +} diff --git a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile index a229835733..b8ab488500 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile +++ b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile @@ -4,13 +4,15 @@ COPY . . RUN make FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base +ARG CSV=4.1 ENV ALERTS_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_alerts.yml" ENV RULES_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_rules.yml" COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/_output/bin/elasticsearch-operator /usr/bin/ -COPY files/ /etc/elasticsearch-operator/files/ -ADD controller-manifests /manifests +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/files/ /etc/elasticsearch-operator/files/ +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/$CSV /manifests/$CSV +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml /manifests/ WORKDIR /usr/bin ENTRYPOINT ["elasticsearch-operator"] diff --git a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile.rhel7 b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile.rhel7 index 0dcfa4e5b0..3dc8916c57 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Dockerfile.rhel7 +++ b/vendor/github.com/openshift/elasticsearch-operator/Dockerfile.rhel7 @@ -4,13 +4,15 @@ COPY . . RUN make FROM registry.svc.ci.openshift.org/ocp/4.0:base +ARG CSV=4.1 ENV ALERTS_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_alerts.yml" ENV RULES_FILE_PATH="/etc/elasticsearch-operator/files/prometheus_rules.yml" COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/_output/bin/elasticsearch-operator /usr/bin/ -COPY files/ /etc/elasticsearch-operator/files/ -ADD controller-manifests /manifests +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/files/ /etc/elasticsearch-operator/files/ +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/$CSV /manifests/$CSV +COPY --from=builder /go/src/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml /manifests/ WORKDIR /usr/bin ENTRYPOINT ["elasticsearch-operator"] diff --git a/vendor/github.com/openshift/elasticsearch-operator/Makefile b/vendor/github.com/openshift/elasticsearch-operator/Makefile index 25f81baffc..fc6b1f38c4 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/Makefile +++ b/vendor/github.com/openshift/elasticsearch-operator/Makefile @@ -10,7 +10,8 @@ IMAGE_BUILD=$(IMAGE_BUILDER) export IMAGE_TAGGER?=docker tag export APP_NAME=elasticsearch-operator -export IMAGE_TAG=quay.io/openshift/origin-$(APP_NAME):latest +IMAGE_TAG?=quay.io/openshift/origin-$(APP_NAME):latest +export IMAGE_TAG APP_REPO=github.com/openshift/$(APP_NAME) TARGET=$(TARGET_DIR)/bin/$(APP_NAME) KUBECONFIG?=$(HOME)/.kube/config diff --git a/vendor/github.com/openshift/elasticsearch-operator/cmd/manager/main.go b/vendor/github.com/openshift/elasticsearch-operator/cmd/manager/main.go index 54259572d2..e0aca01de5 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/cmd/manager/main.go +++ b/vendor/github.com/openshift/elasticsearch-operator/cmd/manager/main.go @@ -13,6 +13,7 @@ import ( "github.com/openshift/elasticsearch-operator/pkg/apis" "github.com/openshift/elasticsearch-operator/pkg/controller" + monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" "github.com/operator-framework/operator-sdk/pkg/k8sutil" "github.com/operator-framework/operator-sdk/pkg/leader" "github.com/operator-framework/operator-sdk/pkg/log/zap" @@ -23,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/runtime/signals" - monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" ) // Change below variables to serve metrics on different host or port. diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh index 4f801f3f9d..1a420467a2 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy-setup.sh @@ -39,13 +39,6 @@ load_manifest() { load_manifest ${repo_dir} ${NAMESPACE} -#hack openshift-monitoring -pushd vendor/github.com/coreos/prometheus-operator/example/prometheus-operator-crd - for file in prometheusrule.crd.yaml servicemonitor.crd.yaml; do - oc create -n ${NAMESPACE} -f ${file} ||: - done -popd - oc create -f hack/prometheus-operator-crd-cluster-roles.yaml ||: oc create clusterrolebinding elasticsearch-operator-prometheus-rolebinding \ diff --git a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh index d59b2a307f..c42f6a22ca 100755 --- a/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh +++ b/vendor/github.com/openshift/elasticsearch-operator/hack/deploy.sh @@ -7,8 +7,17 @@ source "$(dirname $0)/common" if [ $REMOTE_REGISTRY = false ] ; then oc create -n ${NAMESPACE} -f manifests/05-deployment.yaml else + if [ -n "${IMAGE_OVERRIDE:-}" ] ; then + replace_image() { + sed -e "s, image:.*\$, image: ${IMAGE_OVERRIDE}," + } + else + replace_image() { + sed -e "s,${IMAGE_TAG},${registry_host}:5000/${image_tag}," + } + fi image_tag=$( echo "$IMAGE_TAG" | sed -e 's,quay.io/,,' ) cat manifests/05-deployment.yaml | \ - sed -e "s,${IMAGE_TAG},${registry_host}:5000/${image_tag}," | \ + replace_image | \ oc create -n ${NAMESPACE} -f - fi diff --git a/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml similarity index 98% rename from vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml index dbee15c3bf..699a1ea4db 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/elasticsearch-operator.v4.1.0.clusterserviceversion.yaml @@ -182,14 +182,12 @@ spec: valueFrom: fieldRef: fieldPath: metadata.annotations['olm.targetNamespaces'] - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - name: OPERATOR_NAME value: "elasticsearch-operator" - name: PROXY_IMAGE value: "quay.io/openshift/origin-oauth-proxy:latest" + - name: ELASTICSEARCH_IMAGE + value: "quay.io/openshift/origin-logging-elasticsearch5:latest" version: 4.1.0 customresourcedefinitions: owned: diff --git a/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearches.crd.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/elasticsearches.crd.yaml similarity index 100% rename from vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearches.crd.yaml rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/elasticsearches.crd.yaml diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/image-references b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/image-references similarity index 72% rename from vendor/github.com/openshift/elasticsearch-operator/manifests/image-references rename to vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/image-references index be1d08eaaa..caa47ea916 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/manifests/image-references +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.1/image-references @@ -4,10 +4,14 @@ apiVersion: image.openshift.io/v1 spec: tags: - name: elasticsearch-operator - from: + from: kind: DockerImage name: quay.io/openshift/origin-elasticsearch-operator:latest - name: logging-elasticsearch5 from: kind: DockerImage name: quay.io/openshift/origin-logging-elasticsearch5:latest + - name: oauth-proxy + from: + kind: DockerImage + name: quay.io/openshift/origin-oauth-proxy:latest diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearch-operator.v4.2.0.clusterserviceversion.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearch-operator.v4.2.0.clusterserviceversion.yaml new file mode 100644 index 0000000000..82493abca5 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearch-operator.v4.2.0.clusterserviceversion.yaml @@ -0,0 +1,281 @@ +#! validate-crd: deploy/chart/templates/0000_30_02-clusterserviceversion.crd.yaml +#! parse-kind: ClusterServiceVersion +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + name: elasticsearch-operator.v4.2.0 + namespace: placeholder + annotations: + categories: "OpenShift Optional, Logging & Tracing" + capabilities: "Seamless Upgrades" + certified: "false" + description: |- + The Elasticsearch Operator for OKD provides a means for configuring and managing an Elasticsearch cluster for tracing and cluster logging. + ## Prerequisites and Requirements + ### Elasticsearch Operator Namespace + The Elasticsearch Operator must be deployed to the global operator group namespace + ### Memory Considerations + Elasticsearch is a memory intensive application. The initial + set of OKD nodes may not be large enough to support the Elasticsearch cluster. Additional OKD nodes must be added + to the OKD cluster if you desire to run with the recommended (or better) memory. Each ES node can operate with a + lower memory setting though this is not recommended for production deployments. + containerImage: quay.io/openshift/origin-elasticsearch-operator:latest + createdAt: 2019-02-20T08:00:00Z + support: AOS Cluster Logging, Jaeger + alm-examples: |- + [ + { + "apiVersion": "logging.openshift.io/v1", + "kind": "Elasticsearch", + "metadata": { + "name": "elasticsearch" + }, + "spec": { + "managementState": "Managed", + "nodeSpec": { + "image": "quay.io/openshift/origin-logging-elasticsearch5:latest", + "resources": { + "limits": { + "memory": "1Gi" + }, + "requests": { + "memory": "512Mi" + } + } + }, + "nodes": [ + { + "nodeCount": 1, + "roles": ["client","data","master"], + "redundancyPolicy": "SingleRedundancy" + } + ] + } + } + ] +spec: + displayName: Elasticsearch Operator + + replaces: elasticsearch-operator.v4.1.0 + + description: | + The Elasticsearch Operator for OKD provides a means for configuring and managing an Elasticsearch cluster for use in tracing and cluster logging. + This operator only supports OKD Cluster Logging and Jaeger. It is tightly coupled to each and is not currently capable of + being used as a general purpose manager of Elasticsearch clusters running on OKD. + + It is recommended this operator be deployed to the **openshift-operators** namespace to properly support the Cluster Logging and Jaeger use cases. + + Once installed, the operator provides the following features: + * **Create/Destroy**: Deploy an Elasticsearch cluster to the same namespace in which the Elasticsearch custom resource is created. + + keywords: ['elasticsearch', 'jaeger'] + + maintainers: + - name: Red Hat, AOS Logging + email: aos-logging@redhat.com + + provider: + name: Red Hat, Inc + + links: + - name: Elastic + url: https://www.elastic.co/ + - name: Elasticsearch Operator + url: https://github.com/openshift/elasticsearch-operator + installModes: + - type: OwnNamespace + supported: true + - type: SingleNamespace + supported: false + - type: MultiNamespace + supported: false + - type: AllNamespaces + supported: true + install: + strategy: deployment + spec: + clusterPermissions: + - serviceAccountName: elasticsearch-operator + rules: + - apiGroups: + - logging.openshift.io + resources: + - "*" + verbs: + - "*" + - apiGroups: + - "" + resources: + - pods + - pods/exec + - services + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - serviceaccounts + verbs: + - "*" + - apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - "*" + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + - servicemonitors + verbs: + - "*" + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + - clusterrolebindings + verbs: + - "*" + - nonResourceURLs: + - "/metrics" + verbs: + - get + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + - subjectaccessreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + deployments: + - name: elasticsearch-operator + spec: + replicas: 1 + selector: + matchLabels: + name: elasticsearch-operator + template: + metadata: + labels: + name: elasticsearch-operator + spec: + serviceAccountName: elasticsearch-operator + containers: + - name: elasticsearch-operator + image: quay.io/openshift/origin-elasticsearch-operator:latest + imagePullPolicy: IfNotPresent + command: + - elasticsearch-operator + ports: + - containerPort: 60000 + name: metrics + env: + - name: WATCH_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.annotations['olm.targetNamespaces'] + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: OPERATOR_NAME + value: "elasticsearch-operator" + - name: PROXY_IMAGE + value: "quay.io/openshift/origin-oauth-proxy:latest" + - name: ELASTICSEARCH_IMAGE + value: "quay.io/openshift/origin-logging-elasticsearch5:latest" + version: 4.2.0 + customresourcedefinitions: + owned: + - name: elasticsearches.logging.openshift.io + version: v1 + kind: Elasticsearch + displayName: Elasticsearch + description: An Elasticsearch cluster instance + resources: + - kind: Deployment + version: v1 + - kind: StatefulSet + version: v1 + - kind: ReplicaSet + version: v1 + - kind: Pod + version: v1 + - kind: ConfigMap + version: v1 + - kind: Service + version: v1 + - kind: Route + version: v1 + specDescriptors: + - description: Limits describes the minimum/maximum amount of compute resources required/allowed + displayName: Resource Requirements + path: nodeSpec.resources + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:resourceRequirements' + statusDescriptors: + - description: The current Status of the Elasticsearch Cluster + displayName: Status + path: cluster.status + x-descriptors: + - 'urn:alm:descriptor:io.kubernetes.phase' + - description: The number of Active Primary Shards for the Elasticsearch Cluster + displayName: Active Primary Shards + path: cluster.activePrimShards + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Active Shards for the Elasticsearch Cluster + displayName: Active Shards + path: cluster.activeShards + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Initializing Shards for the Elasticsearch Cluster + displayName: Initializing Shards + path: cluster.initializingShards + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Data Nodes for the Elasticsearch Cluster + displayName: Number of Data Nodes + path: cluster.numDataNodes + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Nodes for the Elasticsearch Cluster + displayName: Number of Nodes + path: cluster.numNodes + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Relocating Shards for the Elasticsearch Cluster + displayName: Relocating Shards + path: cluster.relocatingShards + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The number of Unassigned Shards for the Elasticsearch Cluster + displayName: Unassigned Shards + path: cluster.unassignedShards + x-descriptors: + - 'urn:alm:descriptor:text' + - description: The status for each of the Elasticsearch pods with the Client role + displayName: Elasticsearch Client Status + path: pods.client + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podStatuses' + - description: The status for each of the Elasticsearch pods with the Data role + displayName: Elasticsearch Data Status + path: pods.data + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podStatuses' + - description: The status for each of the Elasticsearch pods with the Master role + displayName: Elasticsearch Master Status + path: pods.master + x-descriptors: + - 'urn:alm:descriptor:com.tectonic.ui:podStatuses' diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearches.crd.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearches.crd.yaml new file mode 100644 index 0000000000..1f2f7ffd40 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/elasticsearches.crd.yaml @@ -0,0 +1,98 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: elasticsearches.logging.openshift.io +spec: + group: logging.openshift.io + names: + kind: Elasticsearch + listKind: ElasticsearchList + plural: elasticsearches + singular: elasticsearch + scope: Namespaced + version: v1 + validation: + openAPIV3Schema: + properties: + spec: + description: Specification of the desired behavior of the Elasticsearch cluster + properties: + managementState: + description: Indicator if the resource is 'Managed' or 'Unmanaged' by the operator + type: string + enum: + - "Managed" + - "Unmanaged" + redundancyPolicy: + description: The policy towards data redundancy to specify the number of redundant primary shards + type: string + enum: + - "FullRedundancy" + - "MultipleRedundancy" + - "SingleRedundancy" + - "ZeroRedundancy" + nodes: + description: Specification of the different Elasticsearch nodes + type: array + items: + type: object + properties: + roles: + description: The specific Elasticsearch cluster roles the node should perform + type: array + items: + type: string + nodeCount: + description: Number of nodes to deploy + format: int32 + type: integer + nodeSpec: + description: Specification of a specific Elasticsearch node + properties: + image: + description: The image to use for the Elasticsearch node + type: string + resources: + description: The resource requirements for the Elasticsearch node + properties: + limits: + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + nodeSelector: + description: Define which Nodes the Pods are scheduled on. + type: object + storage: + description: The type of backing storage that should be used for the node + properties: + storageClassName: + description: The name of the storage class to use with creating the node's PVC + type: string + size: + description: The max storage capacity for the node + type: string + nodeSpec: + description: Default specification applied to all Elasticsearch nodes + properties: + image: + description: The image to use for the Elasticsearch nodes + type: string + resources: + description: The resource requirements for the Elasticsearch nodes + properties: + limits: + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + requests: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/image-references b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/image-references new file mode 100644 index 0000000000..caa47ea916 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/4.2/image-references @@ -0,0 +1,17 @@ +--- +kind: ImageStream +apiVersion: image.openshift.io/v1 +spec: + tags: + - name: elasticsearch-operator + from: + kind: DockerImage + name: quay.io/openshift/origin-elasticsearch-operator:latest + - name: logging-elasticsearch5 + from: + kind: DockerImage + name: quay.io/openshift/origin-logging-elasticsearch5:latest + - name: oauth-proxy + from: + kind: DockerImage + name: quay.io/openshift/origin-oauth-proxy:latest diff --git a/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.package.yaml b/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml similarity index 80% rename from vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.package.yaml rename to vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml index 3b77468e6a..a718a56826 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/controller-manifests/elasticsearch-operator.package.yaml +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/elasticsearch-operator.package.yaml @@ -2,4 +2,4 @@ packageName: elasticsearch-operator channels: - name: preview - currentCSV: elasticsearch-operator.v4.1.0 + currentCSV: elasticsearch-operator.v4.2.0 diff --git a/vendor/github.com/openshift/elasticsearch-operator/manifests/latest b/vendor/github.com/openshift/elasticsearch-operator/manifests/latest new file mode 120000 index 0000000000..7db6ab3327 --- /dev/null +++ b/vendor/github.com/openshift/elasticsearch-operator/manifests/latest @@ -0,0 +1 @@ +4.2/elasticsearch-operator.v4.2.0.clusterserviceversion.yaml \ No newline at end of file diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go index f311909f33..f27d99da66 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/elasticsearch_types.go @@ -59,11 +59,24 @@ type ElasticsearchStatus struct { Nodes []ElasticsearchNodeStatus `json:"nodes"` ClusterHealth string `json:"clusterHealth"` + Cluster ClusterHealth `json:"cluster"` ShardAllocationEnabled ShardAllocationState `json:"shardAllocationEnabled"` Pods map[ElasticsearchNodeRole]PodStateMap `json:"pods"` Conditions []ClusterCondition `json:"conditions"` } +type ClusterHealth struct { + Status string `json:"status"` + NumNodes int32 `json:"numNodes"` + NumDataNodes int32 `json:"numDataNodes"` + ActivePrimaryShards int32 `json:"activePrimaryShards"` + ActiveShards int32 `json:"activeShards"` + RelocatingShards int32 `json:"relocatingShards"` + InitializingShards int32 `json:"initializingShards"` + UnassignedShards int32 `json:"unassignedShards"` + PendingTasks int32 `json:"pendingTasks"` +} + // ElasticsearchNode struct represents individual node in Elasticsearch cluster // GenUUID will be populated by the operator if not provided type ElasticsearchNode struct { diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go index 6c738b359f..4a46be7329 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/apis/logging/v1/zz_generated.deepcopy.go @@ -25,6 +25,22 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterHealth) DeepCopyInto(out *ClusterHealth) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterHealth. +func (in *ClusterHealth) DeepCopy() *ClusterHealth { + if in == nil { + return nil + } + out := new(ClusterHealth) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Elasticsearch) DeepCopyInto(out *Elasticsearch) { *out = *in @@ -224,6 +240,7 @@ func (in *ElasticsearchStatus) DeepCopyInto(out *ElasticsearchStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + out.Cluster = in.Cluster if in.Pods != nil { in, out := &in.Pods, &out.Pods *out = make(map[ElasticsearchNodeRole]PodStateMap, len(*in)) diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go index 218afc4462..c5d4168d9a 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/common.go @@ -26,7 +26,7 @@ func addOwnerRefToObject(o metav1.Object, r metav1.OwnerReference) { func getImage(commonImage string) string { image := commonImage if image == "" { - image = elasticsearchDefaultImage + image = utils.LookupEnvWithDefault("ELASTICSEARCH_IMAGE", elasticsearchDefaultImage) } return image } @@ -214,6 +214,17 @@ func newProxyContainer(imageName, clusterName string) (v1.Container, error) { if err != nil { return v1.Container{}, err } + + cpuLimit, err := resource.ParseQuantity("100m") + if err != nil { + return v1.Container{}, err + } + + memoryLimit, err := resource.ParseQuantity("64Mi") + if err != nil { + return v1.Container{}, err + } + container := v1.Container{ Name: "proxy", Image: imageName, @@ -248,6 +259,15 @@ func newProxyContainer(imageName, clusterName string) (v1.Container, error) { "--pass-user-bearer-token", fmt.Sprintf("--cookie-secret=%s", proxyCookieSecret), }, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "memory": memoryLimit, + }, + Requests: v1.ResourceList{ + "cpu": cpuLimit, + "memory": memoryLimit, + }, + }, } return container, nil } @@ -358,6 +378,13 @@ func newPodTemplateSpec(nodeName, clusterName, namespace string, node api.Elasti NodeSelector: mergeSelectors(node.NodeSelector, commonSpec.NodeSelector), ServiceAccountName: clusterName, Volumes: newVolumes(clusterName, nodeName, namespace, node, client), + Tolerations: []v1.Toleration{ + v1.Toleration{ + Key: "node.kubernetes.io/disk-pressure", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, }, } } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/deployment.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/deployment.go index aa66ecca46..dfc1938e6d 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/deployment.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/deployment.go @@ -292,7 +292,7 @@ func (node *deploymentNode) waitForNodeLeaveCluster() (error, bool) { func (node *deploymentNode) restart(upgradeStatus *api.ElasticsearchNodeStatus) { if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { - if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, _ := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) return } @@ -368,7 +368,7 @@ func (node *deploymentNode) restart(upgradeStatus *api.ElasticsearchNodeStatus) if upgradeStatus.UpgradeStatus.UpgradePhase == api.RecoveringData { - if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, _ := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to complete recovery: %v / green", status) return } @@ -382,7 +382,7 @@ func (node *deploymentNode) update(upgradeStatus *api.ElasticsearchNodeStatus) e // set our state to being under upgrade if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { - if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, _ := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to be fully recovered before upgrading %v: %v / green", node.name(), status) return fmt.Errorf("Cluster not in green state before beginning upgrade: %v", status) } @@ -451,7 +451,7 @@ func (node *deploymentNode) update(upgradeStatus *api.ElasticsearchNodeStatus) e if upgradeStatus.UpgradeStatus.UpgradePhase == api.RecoveringData { - if status, err := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, err := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to complete recovery: %v / green", status) return err } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go index 1a86b92563..d432d34d30 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/elasticsearch.go @@ -67,12 +67,7 @@ func GetShardAllocation(clusterName, namespace string, client client.Client) (st curlESService(clusterName, namespace, payload, client) - allocation := "" - value := walkInterfaceMap("transient.cluster.routing.allocation.enable", payload.ResponseBody) - - if allocationString, ok := value.(string); ok { - allocation = allocationString - } + allocation := parseString("transient.cluster.routing.allocation.enable", payload.ResponseBody) return allocation, payload.Error } @@ -224,6 +219,40 @@ func GetDiskWatermarks(clusterName, namespace string, client client.Client) (int return low, high, payload.Error } +func parseBool(path string, interfaceMap map[string]interface{}) bool { + value := walkInterfaceMap(path, interfaceMap) + + if parsedBool, ok := value.(bool); ok { + return parsedBool + } else { + return false + } +} + +func parseString(path string, interfaceMap map[string]interface{}) string { + value := walkInterfaceMap(path, interfaceMap) + + if parsedString, ok := value.(string); ok { + return parsedString + } else { + return "" + } +} + +func parseInt32(path string, interfaceMap map[string]interface{}) int32 { + return int32(parseFloat64(path, interfaceMap)) +} + +func parseFloat64(path string, interfaceMap map[string]interface{}) float64 { + value := walkInterfaceMap(path, interfaceMap) + + if parsedFloat, ok := value.(float64); ok { + return parsedFloat + } else { + return float64(-1) + } +} + func walkInterfaceMap(path string, interfaceMap map[string]interface{}) interface{} { current := interfaceMap @@ -322,7 +351,35 @@ func GetMinMasterNodes(clusterName, namespace string, client client.Client) (int return masterCount, payload.Error } -func GetClusterHealth(clusterName, namespace string, client client.Client) (string, error) { +func GetClusterHealth(clusterName, namespace string, client client.Client) (api.ClusterHealth, error) { + + clusterHealth := api.ClusterHealth{} + + payload := &esCurlStruct{ + Method: http.MethodGet, + URI: "_cluster/health", + } + + curlESService(clusterName, namespace, payload, client) + + if payload.Error != nil { + return clusterHealth, payload.Error + } + + clusterHealth.Status = parseString("status", payload.ResponseBody) + clusterHealth.NumNodes = parseInt32("number_of_nodes", payload.ResponseBody) + clusterHealth.NumDataNodes = parseInt32("number_of_data_nodes", payload.ResponseBody) + clusterHealth.ActivePrimaryShards = parseInt32("active_primary_shards", payload.ResponseBody) + clusterHealth.ActiveShards = parseInt32("active_shards", payload.ResponseBody) + clusterHealth.RelocatingShards = parseInt32("relocating_shards", payload.ResponseBody) + clusterHealth.InitializingShards = parseInt32("initializing_shards", payload.ResponseBody) + clusterHealth.UnassignedShards = parseInt32("unassigned_shards", payload.ResponseBody) + clusterHealth.PendingTasks = parseInt32("number_of_pending_tasks", payload.ResponseBody) + + return clusterHealth, nil +} + +func GetClusterHealthStatus(clusterName, namespace string, client client.Client) (string, error) { payload := &esCurlStruct{ Method: http.MethodGet, diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/statefulset.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/statefulset.go index 058b669cd2..ed4b31e831 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/statefulset.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/statefulset.go @@ -222,7 +222,7 @@ func (node *statefulSetNode) replicaCount() (int32, error) { func (node *statefulSetNode) restart(upgradeStatus *api.ElasticsearchNodeStatus) { if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { - if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, _ := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) return } @@ -343,7 +343,7 @@ func (node *statefulSetNode) executeUpdate() error { func (node *statefulSetNode) update(upgradeStatus *api.ElasticsearchNodeStatus) error { if upgradeStatus.UpgradeStatus.UnderUpgrade != v1.ConditionTrue { - if status, _ := GetClusterHealth(node.clusterName, node.self.Namespace, node.client); status != "green" { + if status, _ := GetClusterHealthStatus(node.clusterName, node.self.Namespace, node.client); status != "green" { logrus.Infof("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) return fmt.Errorf("Waiting for cluster to be fully recovered before restarting %v: %v / green", node.name(), status) } diff --git a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/status.go b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/status.go index 15aeb0aec6..4fbf5a2aa4 100644 --- a/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/status.go +++ b/vendor/github.com/openshift/elasticsearch-operator/pkg/k8shandler/status.go @@ -33,9 +33,9 @@ func (elasticsearchRequest *ElasticsearchRequest) UpdateClusterStatus() error { health, err := GetClusterHealth(cluster.Name, cluster.Namespace, elasticsearchRequest.client) if err != nil { - health = healthUnknown + health.Status = healthUnknown } - clusterStatus.ClusterHealth = health + clusterStatus.Cluster = health allocation, err := GetShardAllocation(cluster.Name, cluster.Namespace, elasticsearchRequest.client) switch { @@ -60,7 +60,7 @@ func (elasticsearchRequest *ElasticsearchRequest) UpdateClusterStatus() error { return getErr } - cluster.Status.ClusterHealth = clusterStatus.ClusterHealth + cluster.Status.Cluster = clusterStatus.Cluster cluster.Status.Conditions = clusterStatus.Conditions cluster.Status.Pods = clusterStatus.Pods cluster.Status.ShardAllocationEnabled = clusterStatus.ShardAllocationEnabled