Skip to content

Commit

Permalink
Merge pull request #72420 from Pingan2017/cleanup-outofdisk
Browse files Browse the repository at this point in the history
clean up redundant conditiontype ‘OutOfDisk’
  • Loading branch information
k8s-ci-robot committed Jul 3, 2019
2 parents f794c82 + e94d7b3 commit 2a82853
Show file tree
Hide file tree
Showing 13 changed files with 12 additions and 82 deletions.
3 changes: 0 additions & 3 deletions pkg/apis/core/types.go
Expand Up @@ -3737,9 +3737,6 @@ type NodeConditionType string
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/eviction/eviction_manager.go
Expand Up @@ -146,7 +146,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
}

// When node has memory pressure and TaintNodesByCondition is enabled, check BestEffort Pod's toleration:
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. OutOfDisk.
// admit it if tolerates memory pressure taint, fail for other tolerations, e.g. DiskPressure.
if utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition) &&
v1helper.TolerationsTolerateTaint(attrs.Pod.Spec.Tolerations, &v1.Taint{
Key: schedulerapi.TaintNodeMemoryPressure,
Expand Down
1 change: 0 additions & 1 deletion pkg/kubelet/kubelet_node_status.go
Expand Up @@ -550,7 +550,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
nodestatus.PIDPressureCondition(kl.clock.Now, kl.evictionManager.IsUnderPIDPressure, kl.recordNodeStatusEvent),
nodestatus.ReadyCondition(kl.clock.Now, kl.runtimeState.runtimeErrors, kl.runtimeState.networkErrors, kl.runtimeState.storageErrors, validateHostFunc, kl.containerManager.Status, kl.recordNodeStatusEvent),
nodestatus.VolumesInUse(kl.volumeManager.ReconcilerStatesHasBeenSynced, kl.volumeManager.GetVolumesInUse),
nodestatus.RemoveOutOfDiskCondition(),
// TODO(mtaufen): I decided not to move this setter for now, since all it does is send an event
// and record state back to the Kubelet runtime object. In the future, I'd like to isolate
// these side-effects by decoupling the decisions to send events and partial status recording
Expand Down
15 changes: 0 additions & 15 deletions pkg/kubelet/nodestatus/setters.go
Expand Up @@ -747,18 +747,3 @@ func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLim
return nil
}
}

// RemoveOutOfDiskCondition removes stale OutOfDisk condition
// OutOfDisk condition has been removed from kubelet in 1.12
func RemoveOutOfDiskCondition() Setter {
return func(node *v1.Node) error {
var conditions []v1.NodeCondition
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type != v1.NodeOutOfDisk {
conditions = append(conditions, node.Status.Conditions[i])
}
}
node.Status.Conditions = conditions
return nil
}
}
48 changes: 0 additions & 48 deletions pkg/kubelet/nodestatus/setters_test.go
Expand Up @@ -1518,54 +1518,6 @@ func TestVolumeLimits(t *testing.T) {
}
}

func TestRemoveOutOfDiskCondition(t *testing.T) {
now := time.Now()

var cases = []struct {
desc string
inputNode *v1.Node
expectNode *v1.Node
}{
{
desc: "should remove stale OutOfDiskCondition from node status",
inputNode: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
*makeMemoryPressureCondition(false, now, now),
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
},
*makeDiskPressureCondition(false, now, now),
},
},
},
expectNode: &v1.Node{
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
*makeMemoryPressureCondition(false, now, now),
*makeDiskPressureCondition(false, now, now),
},
},
},
},
}

for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// construct setter
setter := RemoveOutOfDiskCondition()
// call setter on node
if err := setter(tc.inputNode); err != nil {
t.Fatalf("unexpected error: %v", err)
}
// check expected node
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.inputNode),
"Diff: %s", diff.ObjectDiff(tc.expectNode, tc.inputNode))
})
}
}

// Test Helpers:

// sortableNodeAddress is a type for sorting []v1.NodeAddress
Expand Down
10 changes: 5 additions & 5 deletions pkg/scheduler/eventhandlers_test.go
Expand Up @@ -227,14 +227,14 @@ func TestNodeConditionsChanged(t *testing.T) {
{
Name: "no condition changed",
Changed: false,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
},
{
Name: "only LastHeartbeatTime changed",
Changed: false,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
},
{
Name: "new node has more healthy conditions",
Expand All @@ -245,7 +245,7 @@ func TestNodeConditionsChanged(t *testing.T) {
{
Name: "new node has less unhealthy conditions",
Changed: true,
OldConditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
NewConditions: []v1.NodeCondition{},
},
{
Expand Down
3 changes: 0 additions & 3 deletions staging/src/k8s.io/api/core/v1/types.go
Expand Up @@ -4208,9 +4208,6 @@ type NodeConditionType string
const (
// NodeReady means kubelet is healthy and ready to accept pods.
NodeReady NodeConditionType = "Ready"
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
// NodeDiskPressure means the kubelet is under pressure due to insufficient available disk.
Expand Down
Expand Up @@ -55,7 +55,7 @@ type CarpStatus struct {
// A human readable message indicating details about why the carp is in this state.
// +optional
Message string
// A brief CamelCase message indicating details about why the carp is in this state. e.g. 'OutOfDisk'
// A brief CamelCase message indicating details about why the carp is in this state. e.g. 'DiskPressure'
// +optional
Reason string

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Expand Up @@ -66,7 +66,7 @@ type CarpStatus struct {
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the carp is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`

Expand Down
2 changes: 1 addition & 1 deletion staging/src/k8s.io/apiserver/pkg/apis/example/types.go
Expand Up @@ -55,7 +55,7 @@ type PodStatus struct {
// A human readable message indicating details about why the pod is in this state.
// +optional
Message string
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'OutOfDisk'
// A brief CamelCase message indicating details about why the pod is in this state. e.g. 'DiskPressure'
// +optional
Reason string

Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion staging/src/k8s.io/apiserver/pkg/apis/example/v1/types.go
Expand Up @@ -66,7 +66,7 @@ type PodStatus struct {
// +optional
Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"`
// A brief CamelCase message indicating details about why the pod is in this state.
// e.g. 'OutOfDisk'
// e.g. 'DiskPressure'
// +optional
Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"`

Expand Down

0 comments on commit 2a82853

Please sign in to comment.