Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add CPU usage nano cores for windows nodes #80176

Merged
merged 4 commits into from
Jul 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
32 changes: 28 additions & 4 deletions pkg/kubelet/winstats/perfcounter_nodestats.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,14 +53,20 @@ var (

// NewPerfCounterClient creates a client using perf counters
func NewPerfCounterClient() (Client, error) {
return newClient(&perfCounterNodeStatsClient{})
// Initialize the cache
initCache := cpuUsageCoreNanoSecondsCache{0, 0}
return newClient(&perfCounterNodeStatsClient{
cpuUsageCoreNanoSecondsCache: initCache,
})
}

// perfCounterNodeStatsClient is a client that provides Windows Stats via PerfCounters
type perfCounterNodeStatsClient struct {
nodeMetrics
mu sync.RWMutex // mu protects nodeMetrics
nodeInfo
// cpuUsageCoreNanoSecondsCache caches the cpu usage for nodes.
cpuUsageCoreNanoSecondsCache
}

func (p *perfCounterNodeStatsClient) startMonitoring() error {
Expand Down Expand Up @@ -110,6 +116,17 @@ func (p *perfCounterNodeStatsClient) startMonitoring() error {
p.collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter, networkAdapterCounter)
}, perfCounterUpdatePeriod)

// Cache the CPU usage every defaultCachePeriod
go wait.Forever(func() {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add a comment for why we aren't just adding this to p.collectMetricsData. Seems like it's because we want it to have a different update frequency?

On that note, could having a different frequency have any side effects/result in any inconsistency in a given metric?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah that's right, the update frequency is different in here.

And for the cpuUsageNanoCores it could have at most 14 seconds lag of the real value, as the goroutine runs every 15 seconds and if we query at the 29th second it will still return the value at the 15th second.

newValue := p.nodeMetrics.cpuUsageCoreNanoSeconds
p.mu.Lock()
defer p.mu.Unlock()
p.cpuUsageCoreNanoSecondsCache = cpuUsageCoreNanoSecondsCache{
previousValue: p.cpuUsageCoreNanoSecondsCache.latestValue,
latestValue: newValue,
}
}, defaultCachePeriod)

return nil
}

Expand Down Expand Up @@ -145,6 +162,7 @@ func (p *perfCounterNodeStatsClient) getNodeInfo() nodeInfo {

func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSetCounter, memCommittedBytesCounter *perfCounter, networkAdapterCounter *networkCounter) {
cpuValue, err := cpuCounter.getData()
cpuCores := runtime.NumCPU()
if err != nil {
klog.Errorf("Unable to get cpu perf counter data; err: %v", err)
return
Expand All @@ -171,23 +189,29 @@ func (p *perfCounterNodeStatsClient) collectMetricsData(cpuCounter, memWorkingSe
p.mu.Lock()
defer p.mu.Unlock()
p.nodeMetrics = nodeMetrics{
cpuUsageCoreNanoSeconds: p.convertCPUValue(cpuValue),
cpuUsageCoreNanoSeconds: p.convertCPUValue(cpuCores, cpuValue),
cpuUsageNanoCores: p.getCPUUsageNanoCores(),
memoryPrivWorkingSetBytes: memWorkingSetValue,
memoryCommittedBytes: memCommittedBytesValue,
interfaceStats: networkAdapterStats,
timeStamp: time.Now(),
}
}

func (p *perfCounterNodeStatsClient) convertCPUValue(cpuValue uint64) uint64 {
cpuCores := runtime.NumCPU()
func (p *perfCounterNodeStatsClient) convertCPUValue(cpuCores int, cpuValue uint64) uint64 {
// This converts perf counter data which is cpu percentage for all cores into nanoseconds.
// The formula is (cpuPercentage / 100.0) * #cores * 1e+9 (nano seconds). More info here:
// https://github.com/kubernetes/heapster/issues/650
newValue := p.nodeMetrics.cpuUsageCoreNanoSeconds + uint64((float64(cpuValue)/100.0)*float64(cpuCores)*1e9)
return newValue
}

func (p *perfCounterNodeStatsClient) getCPUUsageNanoCores() uint64 {
cachePeriodSeconds := uint64(defaultCachePeriod / time.Second)
cpuUsageNanoCores := (p.cpuUsageCoreNanoSecondsCache.latestValue - p.cpuUsageCoreNanoSecondsCache.previousValue) / cachePeriodSeconds
return cpuUsageNanoCores
}

func getPhysicallyInstalledSystemMemoryBytes() (uint64, error) {
// We use GlobalMemoryStatusEx instead of GetPhysicallyInstalledSystemMemory
// on Windows node for the following reasons:
Expand Down
4 changes: 4 additions & 0 deletions pkg/kubelet/winstats/perfcounters.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@ const (
// Perf counters are updated every second. This is the same as the default cadvisor collection period
// see https://github.com/google/cadvisor/blob/master/docs/runtime_options.md#housekeeping
perfCounterUpdatePeriod = 1 * time.Second
// defaultCachePeriod is the default cache period for each cpuUsage.
// This matches with the cadvisor setting and the time interval we use for containers.
// see https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/cadvisor/cadvisor_linux.go#L63
defaultCachePeriod = 10 * time.Second
)

type perfCounter struct {
Expand Down
11 changes: 11 additions & 0 deletions pkg/kubelet/winstats/winstats.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ type winNodeStatsClient interface {

type nodeMetrics struct {
cpuUsageCoreNanoSeconds uint64
cpuUsageNanoCores uint64
memoryPrivWorkingSetBytes uint64
memoryCommittedBytes uint64
timeStamp time.Time
Expand All @@ -69,6 +70,11 @@ type nodeInfo struct {
startTime time.Time
}

type cpuUsageCoreNanoSecondsCache struct {
latestValue uint64
previousValue uint64
}

// newClient constructs a Client.
func newClient(statsNodeClient winNodeStatsClient) (Client, error) {
statsClient := new(StatsClient)
Expand Down Expand Up @@ -122,6 +128,11 @@ func (c *StatsClient) createRootContainerInfo() (*cadvisorapiv2.ContainerInfo, e
Total: nodeMetrics.cpuUsageCoreNanoSeconds,
},
},
CpuInst: &cadvisorapiv2.CpuInstStats{
Usage: cadvisorapiv2.CpuInstUsage{
Total: nodeMetrics.cpuUsageNanoCores,
},
},
Memory: &cadvisorapi.MemoryStats{
WorkingSet: nodeMetrics.memoryPrivWorkingSetBytes,
Usage: nodeMetrics.memoryCommittedBytes,
Expand Down
47 changes: 47 additions & 0 deletions pkg/kubelet/winstats/winstats_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func (f fakeWinNodeStatsClient) startMonitoring() error {
func (f fakeWinNodeStatsClient) getNodeMetrics() (nodeMetrics, error) {
return nodeMetrics{
cpuUsageCoreNanoSeconds: 123,
cpuUsageNanoCores: 23,
memoryPrivWorkingSetBytes: 1234,
memoryCommittedBytes: 12345,
timeStamp: timeStamp,
Expand Down Expand Up @@ -78,6 +79,11 @@ func TestWinContainerInfos(t *testing.T) {
Total: 123,
},
},
CpuInst: &cadvisorapiv2.CpuInstStats{
Usage: cadvisorapiv2.CpuInstUsage{
Total: 23,
},
},
Memory: &cadvisorapi.MemoryStats{
WorkingSet: 1234,
Usage: 12345,
Expand All @@ -100,6 +106,7 @@ func TestWinContainerInfos(t *testing.T) {
assert.Equal(t, actualRootInfos["/"].Spec, infos["/"].Spec)
assert.Equal(t, len(actualRootInfos["/"].Stats), len(infos["/"].Stats))
assert.Equal(t, actualRootInfos["/"].Stats[0].Cpu, infos["/"].Stats[0].Cpu)
assert.Equal(t, actualRootInfos["/"].Stats[0].CpuInst, infos["/"].Stats[0].CpuInst)
assert.Equal(t, actualRootInfos["/"].Stats[0].Memory, infos["/"].Stats[0].Memory)
}

Expand All @@ -123,6 +130,46 @@ func TestWinVersionInfo(t *testing.T) {
KernelVersion: "v42"})
}

func TestConvertCPUValue(t *testing.T) {
testCases := []struct {
cpuValue uint64
expected uint64
}{
{cpuValue: uint64(50), expected: uint64(2000000000)},
{cpuValue: uint64(0), expected: uint64(0)},
{cpuValue: uint64(100), expected: uint64(4000000000)},
}
var cpuCores = 4

for _, tc := range testCases {
p := perfCounterNodeStatsClient{}
newValue := p.convertCPUValue(cpuCores, tc.cpuValue)
assert.Equal(t, newValue, tc.expected)
}
}

func TestGetCPUUsageNanoCores(t *testing.T) {
testCases := []struct {
latestValue uint64
previousValue uint64
expected uint64
}{
{latestValue: uint64(0), previousValue: uint64(0), expected: uint64(0)},
{latestValue: uint64(2000000000), previousValue: uint64(0), expected: uint64(200000000)},
{latestValue: uint64(5000000000), previousValue: uint64(2000000000), expected: uint64(300000000)},
}

for _, tc := range testCases {
p := perfCounterNodeStatsClient{}
p.cpuUsageCoreNanoSecondsCache = cpuUsageCoreNanoSecondsCache{
latestValue: tc.latestValue,
previousValue: tc.previousValue,
}
cpuUsageNanoCores := p.getCPUUsageNanoCores()
assert.Equal(t, cpuUsageNanoCores, tc.expected)
}
}

func getClient(t *testing.T) Client {
f := fakeWinNodeStatsClient{}
c, err := newClient(f)
Expand Down