Skip to content

Commit

Permalink
Resource util kinda sorta working
Browse files Browse the repository at this point in the history
  • Loading branch information
robscott committed Feb 3, 2019
1 parent 2ee6276 commit f88864e
Show file tree
Hide file tree
Showing 2 changed files with 116 additions and 171 deletions.
149 changes: 55 additions & 94 deletions pkg/capacity/list.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,13 @@
package capacity

import (
"encoding/json"
"fmt"
"os"
"sort"
"text/tabwriter"
"time"

"github.com/robscott/kube-capacity/pkg/kube"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)

func List(args []string, outputFormat string) {
Expand Down Expand Up @@ -53,35 +49,31 @@ func List(args []string, outputFormat string) {
panic(err.Error())
}

cr := clusterResource{
cpuAllocatable: resource.Quantity{},
cpuRequest: resource.Quantity{},
cpuLimit: resource.Quantity{},
memAllocatable: resource.Quantity{},
memRequest: resource.Quantity{},
memLimit: resource.Quantity{},
capacityByNode: map[string]*nodeResource{},
cm := clusterMetric{
cpu: &resourceMetric{},
memory: &resourceMetric{},
nodeMetrics: map[string]*nodeMetric{},
podMetrics: map[string]*podMetric{},
}

for _, node := range nodeList.Items {
cr.capacityByNode[node.Name] = &nodeResource{
cpuAllocatable: node.Status.Allocatable["cpu"],
cpuRequest: resource.Quantity{},
cpuLimit: resource.Quantity{},
memAllocatable: node.Status.Allocatable["memory"],
memRequest: resource.Quantity{},
memLimit: resource.Quantity{},
podResources: []podResource{},
cm.nodeMetrics[node.Name] = &nodeMetric{
cpu: &resourceMetric{
allocatable: node.Status.Allocatable["cpu"],
},
memory: &resourceMetric{
allocatable: node.Status.Allocatable["memory"],
},
podMetrics: map[string]*podMetric{},
}
}

for _, pod := range podList.Items {
n, ok := cr.capacityByNode[pod.Spec.NodeName]
if ok {
n.addPodResources(&pod)
}
}
for _, pod := range podList.Items {
cm.addPodMetric(&pod)
}

cr.addNodeCapacity(cr.capacityByNode[node.Name])
for _, node := range nodeList.Items {
cm.addNodeMetric(cm.nodeMetrics[node.Name])
}

nmList, err := mClientset.MetricsV1beta1().NodeMetricses().List(metav1.ListOptions{})
Expand All @@ -90,10 +82,12 @@ func List(args []string, outputFormat string) {
panic(err.Error())
}

fmt.Printf("========> %#v\n", nmList)

for _, nm := range nmList.Items {
fmt.Printf("nm =============> %#v\n", nm)
for _, node := range nmList.Items {
nm := cm.nodeMetrics[node.GetName()]
cm.cpu.utilization.Add(node.Usage["cpu"])
cm.memory.utilization.Add(node.Usage["memory"])
nm.cpu.utilization = node.Usage["cpu"]
nm.memory.utilization = node.Usage["memory"]
}

pmList, err := mClientset.MetricsV1beta1().PodMetricses("").List(metav1.ListOptions{})
Expand All @@ -102,20 +96,22 @@ func List(args []string, outputFormat string) {
panic(err.Error())
}

fmt.Printf("pmList ========> %#v\n", pmList)

for _, pm := range pmList.Items {
fmt.Printf("nm =============> %#v\n", pm)
for _, pod := range pmList.Items {
pm := cm.podMetrics[fmt.Sprintf("%s-%s", pod.GetNamespace(), pod.GetName())]
for _, container := range pod.Containers {
pm.cpu.utilization.Add(container.Usage["cpu"])
pm.memory.utilization.Add(container.Usage["memory"])
}
}

printList(cr, outputFormat)
printList(cm, outputFormat)
}

func printList(cr clusterResource, outputFormat string) {
names := make([]string, len(cr.capacityByNode))
func printList(cm clusterMetric, outputFormat string) {
names := make([]string, len(cm.nodeMetrics))

i := 0
for name := range cr.capacityByNode {
for name := range cm.nodeMetrics {
names[i] = name
i++
}
Expand All @@ -125,80 +121,45 @@ func printList(cr clusterResource, outputFormat string) {
w.Init(os.Stdout, 0, 8, 2, ' ', 0)

if outputFormat == "wide" {
fmt.Fprintln(w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS")
fmt.Fprintln(w, "NODE\t NAMESPACE\t POD\t CPU REQUESTS \t CPU LIMITS \t CPU UTIL \t MEMORY REQUESTS \t MEMORY LIMITS \t MEMORY UTIL")

if len(names) > 1 {
fmt.Fprintf(w, "* \t *\t *\t %s \t %s \t %s \t %s \n",
cr.cpuRequestString(), cr.cpuLimitString(),
cr.memRequestString(), cr.memLimitString())
fmt.Fprintf(w, "* \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n",
cm.cpu.requestString(), cm.cpu.limitString(), cm.cpu.utilString(),
cm.memory.requestString(), cm.memory.limitString(), cm.memory.utilString())
fmt.Fprintln(w, "\t\t\t\t\t\t\t\t")
}
} else {
fmt.Fprintln(w, "NODE\t CPU REQUESTS \t CPU LIMITS \t MEMORY REQUESTS \t MEMORY LIMITS")

if len(names) > 1 {
fmt.Fprintf(w, "* \t %s \t %s \t %s \t %s \n",
cr.cpuRequestString(), cr.cpuLimitString(),
cr.memRequestString(), cr.memLimitString())
cm.cpu.requestString(), cm.cpu.limitString(),
cm.memory.requestString(), cm.memory.limitString())
}
}

for _, name := range names {
cap := cr.capacityByNode[name]
nm := cm.nodeMetrics[name]

if outputFormat == "wide" {
fmt.Fprintf(w, "%s \t *\t *\t %s \t %s \t %s \t %s \n", name,
cap.cpuRequestString(), cap.cpuLimitString(),
cap.memRequestString(), cap.memLimitString())

for _, pod := range cap.podResources {
fmt.Fprintf(w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \n", name,
pod.namespace, pod.name,
pod.cpuRequestString(cap), pod.cpuLimitString(cap),
pod.memRequestString(cap), pod.memLimitString(cap))
fmt.Fprintf(w, "%s \t *\t *\t %s \t %s \t %s \t %s \t %s \t %s \n", name,
nm.cpu.requestString(), nm.cpu.limitString(), nm.cpu.utilString(),
nm.memory.requestString(), nm.memory.limitString(), nm.memory.utilString())

for _, pm := range nm.podMetrics {
fmt.Fprintf(w, "%s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \t %s \n", name,
pm.namespace, pm.name,
pm.cpu.requestStringPar(nm.cpu), pm.cpu.limitStringPar(nm.cpu), pm.cpu.utilStringPar(nm.cpu),
pm.memory.requestStringPar(nm.memory), pm.memory.limitStringPar(nm.memory), pm.memory.utilStringPar(nm.memory))
}
fmt.Fprintln(w)
fmt.Fprintln(w, "\t\t\t\t\t\t\t\t")
} else {
fmt.Fprintf(w, "%s \t %s \t %s \t %s \t %s \n", name,
cap.cpuRequestString(), cap.cpuLimitString(),
cap.memRequestString(), cap.memLimitString())
nm.cpu.requestString(), nm.cpu.limitString(),
nm.memory.requestString(), nm.memory.limitString())
}
}

w.Flush()
}

// PodMetricsList : PodMetricsList
type PodMetricsList struct {
Kind string `json:"kind"`
APIVersion string `json:"apiVersion"`
Metadata struct {
SelfLink string `json:"selfLink"`
} `json:"metadata"`
Items []struct {
Metadata struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
SelfLink string `json:"selfLink"`
CreationTimestamp time.Time `json:"creationTimestamp"`
} `json:"metadata"`
Timestamp time.Time `json:"timestamp"`
Window string `json:"window"`
Containers []struct {
Name string `json:"name"`
Usage struct {
CPU string `json:"cpu"`
Memory string `json:"memory"`
} `json:"usage"`
} `json:"containers"`
} `json:"items"`
}

func getMetrics(clientset *kubernetes.Clientset) (*PodMetricsList, error) {
data, err := clientset.RESTClient().Get().AbsPath("apis/metrics.k8s.io/v1beta1/pods").DoRaw()
if err != nil {
return nil, err
}
pods := &PodMetricsList{}
err = json.Unmarshal(data, pods)
return pods, err
}
138 changes: 61 additions & 77 deletions pkg/capacity/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,108 +22,92 @@ import (
resourcehelper "k8s.io/kubernetes/pkg/kubectl/util/resource"
)

type clusterResource struct {
cpuAllocatable resource.Quantity
cpuRequest resource.Quantity
cpuLimit resource.Quantity
memAllocatable resource.Quantity
memRequest resource.Quantity
memLimit resource.Quantity
capacityByNode map[string]*nodeResource
type resourceMetric struct {
allocatable resource.Quantity
utilization resource.Quantity
request resource.Quantity
limit resource.Quantity
}

type nodeResource struct {
cpuAllocatable resource.Quantity
cpuRequest resource.Quantity
cpuLimit resource.Quantity
memAllocatable resource.Quantity
memRequest resource.Quantity
memLimit resource.Quantity
podResources []podResource
type clusterMetric struct {
cpu *resourceMetric
memory *resourceMetric
nodeMetrics map[string]*nodeMetric
podMetrics map[string]*podMetric
}

type podResource struct {
name string
namespace string
cpuRequest resource.Quantity
cpuLimit resource.Quantity
memRequest resource.Quantity
memLimit resource.Quantity
type nodeMetric struct {
cpu *resourceMetric
memory *resourceMetric
podMetrics map[string]*podMetric
}

func (nr *nodeResource) addPodResources(pod *corev1.Pod) {
req, limit := resourcehelper.PodRequestsAndLimits(pod)

nr.podResources = append(nr.podResources, podResource{
name: pod.Name,
namespace: pod.Namespace,
cpuRequest: req["cpu"],
cpuLimit: limit["cpu"],
memRequest: req["memory"],
memLimit: limit["memory"],
})

nr.cpuRequest.Add(req["cpu"])
nr.cpuLimit.Add(limit["cpu"])
nr.memRequest.Add(req["memory"])
nr.memLimit.Add(limit["memory"])
}

func (cr *clusterResource) addNodeCapacity(nr *nodeResource) {
cr.cpuAllocatable.Add(nr.cpuAllocatable)
cr.cpuRequest.Add(nr.cpuRequest)
cr.cpuLimit.Add(nr.cpuLimit)
cr.memAllocatable.Add(nr.memAllocatable)
cr.memRequest.Add(nr.memRequest)
cr.memLimit.Add(nr.memLimit)
}

func (cr *clusterResource) cpuRequestString() string {
return resourceString(cr.cpuRequest, cr.cpuAllocatable)
type podMetric struct {
name string
namespace string
cpu *resourceMetric
memory *resourceMetric
}

func (cr *clusterResource) cpuLimitString() string {
return resourceString(cr.cpuLimit, cr.cpuAllocatable)
func (rm *resourceMetric) addMetric(m *resourceMetric) {
rm.allocatable.Add(m.allocatable)
rm.utilization.Add(m.utilization)
rm.request.Add(m.request)
rm.limit.Add(m.limit)
}

func (cr *clusterResource) memRequestString() string {
return resourceString(cr.memRequest, cr.memAllocatable)
}

func (cr *clusterResource) memLimitString() string {
return resourceString(cr.memLimit, cr.memAllocatable)
}
func (cm *clusterMetric) addPodMetric(pod *corev1.Pod) {
req, limit := resourcehelper.PodRequestsAndLimits(pod)
key := fmt.Sprintf("%s-%s", pod.Namespace, pod.Name)

cm.podMetrics[key] = &podMetric{
name: pod.Name,
namespace: pod.Namespace,
cpu: &resourceMetric{
request: req["cpu"],
limit: limit["cpu"],
},
memory: &resourceMetric{
request: req["memory"],
limit: limit["memory"],
},
}

func (nr *nodeResource) cpuRequestString() string {
return resourceString(nr.cpuRequest, nr.cpuAllocatable)
nm := cm.nodeMetrics[pod.Spec.NodeName]
nm.podMetrics[key] = cm.podMetrics[key]
nm.cpu.request.Add(req["cpu"])
nm.cpu.limit.Add(limit["cpu"])
nm.memory.request.Add(req["memory"])
nm.memory.limit.Add(limit["memory"])
}

func (nr *nodeResource) cpuLimitString() string {
return resourceString(nr.cpuLimit, nr.cpuAllocatable)
func (cm *clusterMetric) addNodeMetric(nm *nodeMetric) {
cm.cpu.addMetric(nm.cpu)
cm.memory.addMetric(nm.memory)
}

func (nr *nodeResource) memRequestString() string {
return resourceString(nr.memRequest, nr.memAllocatable)
func (rm *resourceMetric) requestString() string {
return resourceString(rm.request, rm.allocatable)
}

func (nr *nodeResource) memLimitString() string {
return resourceString(nr.memLimit, nr.memAllocatable)
func (rm *resourceMetric) limitString() string {
return resourceString(rm.limit, rm.allocatable)
}

func (pr *podResource) cpuRequestString(nr *nodeResource) string {
return resourceString(pr.cpuRequest, nr.cpuAllocatable)
func (rm *resourceMetric) utilString() string {
return resourceString(rm.utilization, rm.allocatable)
}

func (pr *podResource) cpuLimitString(nr *nodeResource) string {
return resourceString(pr.cpuLimit, nr.cpuAllocatable)
func (rm *resourceMetric) requestStringPar(pm *resourceMetric) string {
return resourceString(rm.request, pm.allocatable)
}

func (pr *podResource) memRequestString(nr *nodeResource) string {
return resourceString(pr.memRequest, nr.memAllocatable)
func (rm *resourceMetric) limitStringPar(pm *resourceMetric) string {
return resourceString(rm.limit, pm.allocatable)
}

func (pr *podResource) memLimitString(nr *nodeResource) string {
return resourceString(pr.memLimit, nr.memAllocatable)
func (rm *resourceMetric) utilStringPar(pm *resourceMetric) string {
return resourceString(rm.utilization, pm.allocatable)
}

func resourceString(actual, allocatable resource.Quantity) string {
Expand Down

0 comments on commit f88864e

Please sign in to comment.