forked from k3s-io/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
status_manager.go
153 lines (133 loc) · 4.82 KB
/
status_manager.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"fmt"
"reflect"
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
kubecontainer "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/container"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
type podStatusSyncRequest struct {
pod *api.Pod
status api.PodStatus
}
// Updates pod statuses in apiserver. Writes only when new status has changed.
// All methods are thread-safe.
type statusManager struct {
kubeClient client.Interface
// Map from pod full name to sync status of the corresponding pod.
podStatusesLock sync.RWMutex
podStatuses map[string]api.PodStatus
podStatusChannel chan podStatusSyncRequest
}
func newStatusManager(kubeClient client.Interface) *statusManager {
return &statusManager{
kubeClient: kubeClient,
podStatuses: make(map[string]api.PodStatus),
podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses
}
}
func (s *statusManager) Start() {
// syncBatch blocks when no updates are available, we can run it in a tight loop.
glog.Info("Starting to sync pod status with apiserver")
go util.Forever(func() {
err := s.syncBatch()
if err != nil {
glog.Warningf("Failed to updated pod status: %v", err)
}
}, 0)
}
func (s *statusManager) GetPodStatus(podFullName string) (api.PodStatus, bool) {
s.podStatusesLock.RLock()
defer s.podStatusesLock.RUnlock()
status, ok := s.podStatuses[podFullName]
return status, ok
}
func (s *statusManager) SetPodStatus(pod *api.Pod, status api.PodStatus) {
podFullName := kubecontainer.GetPodFullName(pod)
s.podStatusesLock.Lock()
defer s.podStatusesLock.Unlock()
oldStatus, found := s.podStatuses[podFullName]
// ensure that the start time does not change across updates.
if found && oldStatus.StartTime != nil {
status.StartTime = oldStatus.StartTime
}
// if the status has no start time, we need to set an initial time
// TODO(yujuhong): Consider setting StartTime when generating the pod
// status instead, which would allow statusManager to become a simple cache
// again.
if status.StartTime.IsZero() {
if pod.Status.StartTime.IsZero() {
// the pod did not have a previously recorded value so set to now
now := util.Now()
status.StartTime = &now
} else {
// the pod had a recorded value, but the kubelet restarted so we need to rebuild cache
// based on last observed value
status.StartTime = pod.Status.StartTime
}
}
if !found || !reflect.DeepEqual(oldStatus, status) {
s.podStatuses[podFullName] = status
s.podStatusChannel <- podStatusSyncRequest{pod, status}
} else {
glog.V(3).Infof("Ignoring same pod status for %s - old: %s new: %s", podFullName, oldStatus, status)
}
}
func (s *statusManager) DeletePodStatus(podFullName string) {
s.podStatusesLock.Lock()
defer s.podStatusesLock.Unlock()
delete(s.podStatuses, podFullName)
}
// TODO(filipg): It'd be cleaner if we can do this without signal from user.
func (s *statusManager) RemoveOrphanedStatuses(podFullNames map[string]bool) {
s.podStatusesLock.Lock()
defer s.podStatusesLock.Unlock()
for key := range s.podStatuses {
if _, ok := podFullNames[key]; !ok {
glog.V(5).Infof("Removing %q from status map.", key)
delete(s.podStatuses, key)
}
}
}
// syncBatch syncs pods statuses with the apiserver.
func (s *statusManager) syncBatch() error {
syncRequest := <-s.podStatusChannel
pod := syncRequest.pod
podFullName := kubecontainer.GetPodFullName(pod)
status := syncRequest.status
var err error
statusPod := &api.Pod{
ObjectMeta: pod.ObjectMeta,
}
// TODO: make me easier to express from client code
statusPod, err = s.kubeClient.Pods(statusPod.Namespace).Get(statusPod.Name)
if err == nil {
statusPod.Status = status
_, err = s.kubeClient.Pods(pod.Namespace).UpdateStatus(statusPod)
// TODO: handle conflict as a retry, make that easier too.
if err == nil {
glog.V(3).Infof("Status for pod %q updated successfully", pod.Name)
return nil
}
}
// We failed to update status. In order to make sure we retry next time
// we delete cached value. This may result in an additional update, but
// this is ok.
s.DeletePodStatus(podFullName)
return fmt.Errorf("error updating status for pod %q: %v", pod.Name, err)
}