/
report.go
95 lines (81 loc) · 3.44 KB
/
report.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
package clustercompression
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/k-cloud-labs/kluster-capacity/pkg"
"github.com/k-cloud-labs/kluster-capacity/pkg/utils"
)
type ClusterCompressionReview struct {
metav1.TypeMeta
Status ClusterCompressionReviewReviewStatus `json:"status"`
}
type ClusterCompressionReviewReviewStatus struct {
CreationTimestamp time.Time `json:"creationTimestamp"`
StopReason *ClusterCompressionReviewScheduleStopReason `json:"stopReason"`
ScaleDownNodeNames []string `json:"scaleDownNodeNames"`
SelectNodeCount int `json:"SelectNodeCount"`
SchedulerCount int `json:"schedulerCount"`
FailedSchedulerCount int `json:"failedSchedulerCount"`
}
type ClusterCompressionReviewScheduleStopReason struct {
StopType string `json:"stopType"`
StopMessage string `json:"stopMessage"`
}
func generateReport(status *pkg.Status) *ClusterCompressionReview {
return &ClusterCompressionReview{
Status: getReviewStatus(status),
}
}
func getReviewStatus(status *pkg.Status) ClusterCompressionReviewReviewStatus {
return ClusterCompressionReviewReviewStatus{
CreationTimestamp: time.Now(),
StopReason: getMainStopReason(status.StopReason),
ScaleDownNodeNames: status.NodesToScaleDown,
SelectNodeCount: status.SelectNodeCount,
SchedulerCount: status.SchedulerCount,
FailedSchedulerCount: status.FailedSchedulerCount,
}
}
func getMainStopReason(message string) *ClusterCompressionReviewScheduleStopReason {
slicedMessage := strings.Split(message, "\n")
colon := strings.Index(slicedMessage[0], ":")
reason := &ClusterCompressionReviewScheduleStopReason{
StopType: slicedMessage[0][:colon],
StopMessage: strings.Trim(slicedMessage[0][colon+1:], " "),
}
return reason
}
func (r *ClusterCompressionReview) Print(verbose bool, format string) error {
switch format {
case "json":
return utils.PrintJson(r)
default:
return clusterCapacityReviewDefaultPrint(r, verbose)
}
}
func clusterCapacityReviewDefaultPrint(r *ClusterCompressionReview, verbose bool) error {
if r != nil && len(r.Status.ScaleDownNodeNames) > 0 {
if verbose {
fmt.Printf("Select node %d times.\n", r.Status.SelectNodeCount)
fmt.Printf("Scheduled pod %d times, with %d scheduling failure.\n", r.Status.SchedulerCount+r.Status.FailedSchedulerCount, r.Status.FailedSchedulerCount)
fmt.Printf("%d node(s) in the cluster can be scaled down.\n", len(r.Status.ScaleDownNodeNames))
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.StopReason.StopType, r.Status.StopReason.StopMessage)
fmt.Printf("\nnodes selected to be scaled down:\n")
for i := range r.Status.ScaleDownNodeNames {
fmt.Printf("\t- %s\n", r.Status.ScaleDownNodeNames[i])
}
} else {
for i := range r.Status.ScaleDownNodeNames {
fmt.Println(r.Status.ScaleDownNodeNames[i])
}
}
} else {
fmt.Printf("Select node %d times.\n", r.Status.SelectNodeCount)
fmt.Printf("Scheduled pod %d times, with %d scheduling failure.\n", r.Status.SchedulerCount+r.Status.FailedSchedulerCount, r.Status.FailedSchedulerCount)
fmt.Println("No nodes in the cluster can be scaled down.")
fmt.Printf("\nTermination reason: %v: %v\n", r.Status.StopReason.StopType, r.Status.StopReason.StopMessage)
}
return nil
}