Skip to content

Commit

Permalink
Merge pull request #34969 from Crassirostris/es-kibana-usage-update
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue

Update elasticsearch and kibana usage

```release-note
Updated default Elasticsearch and Kibana used for elasticsearch logging destination to versions 2.4.1 and 4.6.1 respectively.
```

Updated controllers for elasticsearch and kibana to use newer versions of images. Fixed e2e test because of elasticsearch backward incompatible API changes.

Fixed out of sync elasticsearch controller for coreos.

@piosz
  • Loading branch information
Kubernetes Submit Queue committed Oct 20, 2016
2 parents d249236 + 9832ae1 commit 35943d6
Show file tree
Hide file tree
Showing 5 changed files with 17 additions and 47 deletions.
4 changes: 2 additions & 2 deletions cluster/addons/fluentd-elasticsearch/es-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.9
name: elasticsearch-logging
- image: gcr.io/google_containers/elasticsearch:v2.4.1
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:1.3
image: gcr.io/google_containers/kibana:v4.6.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,12 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.9
name: elasticsearch-logging
- image: gcr.io/google_containers/elasticsearch:v2.4.1
name: elasticsearch-logging
resources:
# keep request = limit to keep this container in guaranteed class
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 100m
cpu: 1000m
requests:
cpu: 100m
ports:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:1.3
image: gcr.io/google_containers/kibana:v4.6.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
Expand Down
48 changes: 9 additions & 39 deletions test/e2e/cluster_logging_es.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,8 +111,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {

By("Checking to make sure we are talking to an Elasticsearch service.")
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
var statusCode float64
var esResponse map[string]interface{}
var statusCode int
err = nil
var body []byte
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
Expand All @@ -122,52 +121,25 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
continue
}
// Query against the root URL for Elasticsearch.
body, err = proxyRequest.Namespace(api.NamespaceSystem).
response := proxyRequest.Namespace(api.NamespaceSystem).
Name("elasticsearch-logging").
DoRaw()
Do()
err = response.Error()
response.StatusCode(&statusCode)

if err != nil {
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
continue
}
err = json.Unmarshal(body, &esResponse)
if err != nil {
framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
continue
}
statusIntf, ok := esResponse["status"]
if !ok {
framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
continue
}
statusCode, ok = statusIntf.(float64)
if !ok {
// Assume this is a string returning Failure. Retry.
framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
continue
}
if int(statusCode) != 200 {
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
continue
}
break
}

if err != nil {
return err
}

Expect(err).NotTo(HaveOccurred())
if int(statusCode) != 200 {
return fmt.Errorf("Elasticsearch cluster has a bad status: %v", statusCode)
}

// Check to see if have a cluster_name field.
clusterName, ok := esResponse["cluster_name"]
if !ok {
return fmt.Errorf("No cluster_name field in Elasticsearch response: %v", esResponse)
}

if clusterName != "kubernetes-logging" {
return fmt.Errorf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
}

// Now assume we really are talking to an Elasticsearch instance.
Expand All @@ -188,8 +160,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
if err != nil {
continue
}

var health map[string]interface{}
health := make(map[string]interface{})
err := json.Unmarshal(body, &health)
if err != nil {
framework.Logf("Bad json response from elasticsearch: %v", err)
Expand All @@ -210,7 +181,6 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
break
}
}

if !healthy {
return fmt.Errorf("After %v elasticsearch cluster is not healthy", graceTime)
}
Expand Down

0 comments on commit 35943d6

Please sign in to comment.