/
runtime_service.go
218 lines (194 loc) · 6.2 KB
/
runtime_service.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
package checkpoint
import (
"context"
"time"
"github.com/golang/glog"
"google.golang.org/grpc"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/kubernetes-incubator/bootkube/pkg/checkpoint/cri/v1alpha1"
"github.com/kubernetes-incubator/bootkube/pkg/checkpoint/cri/v1alpha2"
"github.com/kubernetes-incubator/bootkube/pkg/checkpoint/internal"
)
// Copied from "k8s.io/kubernetes/pkg/kubelet/types"
const (
kubernetesPodNameLabel = "io.kubernetes.pod.name"
kubernetesPodNamespaceLabel = "io.kubernetes.pod.namespace"
kubernetesPodUIDLabel = "io.kubernetes.pod.uid"
kubernetesContainerNameLabel = "io.kubernetes.container.name"
kubernetesContainerTypeLabel = "io.kubernetes.container.type"
)
type remoteRuntimeService struct {
timeout time.Duration
v1alpha1Client v1alpha1.RuntimeServiceClient
v1alpha2Client v1alpha2.RuntimeServiceClient
}
func newRemoteRuntimeService(endpoint string, connectionTimeout time.Duration) (*remoteRuntimeService, error) {
glog.Infof("Connecting to runtime service %s", endpoint)
addr, dialer, err := internal.GetAddressAndDialer(endpoint)
if err != nil {
return nil, err
}
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithTimeout(connectionTimeout), grpc.WithDialer(dialer))
if err != nil {
glog.Errorf("Connect remote runtime %s failed: %v", addr, err)
return nil, err
}
return &remoteRuntimeService{
timeout: connectionTimeout,
v1alpha1Client: v1alpha1.NewRuntimeServiceClient(conn),
v1alpha2Client: v1alpha2.NewRuntimeServiceClient(conn),
}, nil
}
// localRunningPods uses the CRI shim to retrieve the local container runtime pod state
func (r *remoteRuntimeService) localRunningPods() map[string]*v1.Pod {
pods := make(map[string]*v1.Pod)
// Retrieving sandboxes is likely redundant but is done to maintain sameness with what the kubelet does
sandboxes, err := r.getRunningKubeletSandboxes()
if err != nil {
glog.Errorf("failed to list running sandboxes: %v", err)
return nil
}
// Add pods from all sandboxes
for _, s := range sandboxes {
podName := s.Namespace + "/" + s.Name
if _, ok := pods[podName]; !ok {
p := &v1.Pod{}
p.UID = types.UID(s.Uid)
p.Name = s.Name
p.Namespace = s.Namespace
pods[podName] = p
}
}
containers, err := r.getRunningKubeletContainers()
if err != nil {
glog.Errorf("failed to list running containers: %v", err)
return nil
}
// Add all pods that containers are apart of
for _, c := range containers {
podName := c.Labels[kubernetesPodNamespaceLabel] + "/" + c.Labels[kubernetesPodNameLabel]
if _, ok := pods[podName]; !ok {
p := &v1.Pod{}
p.UID = types.UID(c.Labels[kubernetesPodUIDLabel])
p.Name = c.Labels[kubernetesPodNameLabel]
p.Namespace = c.Labels[kubernetesPodNamespaceLabel]
pods[podName] = p
}
}
return pods
}
type criContainer struct {
Labels map[string]string
}
type criSandbox struct {
Uid string
Name string
Namespace string
Labels map[string]string
}
func (r *remoteRuntimeService) getRunningKubeletContainers() ([]criContainer, error) {
ctx, cancel := context.WithTimeout(context.Background(), r.timeout)
defer cancel()
var containers []criContainer
if _, err := r.v1alpha1Client.Version(ctx, &v1alpha1.VersionRequest{}); err == nil {
resp, err := r.v1alpha1Client.ListContainers(ctx, &v1alpha1.ListContainersRequest{
Filter: &v1alpha1.ContainerFilter{
State: &v1alpha1.ContainerStateValue{
// Filter out non-running containers
State: v1alpha1.ContainerState_CONTAINER_RUNNING,
},
},
})
if err != nil {
glog.Errorf("ListContainers with filter from runtime service failed: %v", err)
return nil, err
}
for _, c := range resp.Containers {
if c.Metadata == nil {
glog.V(4).Infof("Container does not have metadata: %+v", c)
continue
}
containers = append(containers, criContainer{Labels: c.Labels})
}
return containers, nil
}
// Try v1alpha2
resp, err := r.v1alpha2Client.ListContainers(ctx, &v1alpha2.ListContainersRequest{
Filter: &v1alpha2.ContainerFilter{
State: &v1alpha2.ContainerStateValue{
// Filter out non-running containers
State: v1alpha2.ContainerState_CONTAINER_RUNNING,
},
},
})
if err != nil {
glog.Errorf("ListContainers with filter from runtime service failed: %v", err)
return nil, err
}
for _, c := range resp.Containers {
if c.Metadata == nil {
glog.V(4).Infof("Container does not have metadata: %+v", c)
continue
}
containers = append(containers, criContainer{Labels: c.Labels})
}
return containers, nil
}
func (r *remoteRuntimeService) getRunningKubeletSandboxes() ([]criSandbox, error) {
ctx, cancel := context.WithTimeout(context.Background(), r.timeout)
defer cancel()
var sandboxes []criSandbox
if _, err := r.v1alpha1Client.Version(ctx, &v1alpha1.VersionRequest{}); err == nil {
resp, err := r.v1alpha1Client.ListPodSandbox(ctx, &v1alpha1.ListPodSandboxRequest{
Filter: &v1alpha1.PodSandboxFilter{
// Filter out non-running sandboxes
State: &v1alpha1.PodSandboxStateValue{
State: v1alpha1.PodSandboxState_SANDBOX_READY,
},
},
})
if err != nil {
glog.Errorf("ListPodSandbox with filter from runtime sevice failed: %v", err)
return nil, err
}
for _, c := range resp.Items {
if c.Metadata == nil {
glog.V(4).Infof("Sandbox does not have metadata: %+v", c)
continue
}
sandboxes = append(sandboxes, criSandbox{
Uid: c.Metadata.Uid,
Name: c.Metadata.Name,
Namespace: c.Metadata.Namespace,
Labels: c.Labels,
})
}
return sandboxes, nil
}
resp, err := r.v1alpha2Client.ListPodSandbox(ctx, &v1alpha2.ListPodSandboxRequest{
Filter: &v1alpha2.PodSandboxFilter{
// Filter out non-running sandboxes
State: &v1alpha2.PodSandboxStateValue{
State: v1alpha2.PodSandboxState_SANDBOX_READY,
},
},
})
if err != nil {
glog.Errorf("ListPodSandbox with filter from runtime sevice failed: %v", err)
return nil, err
}
for _, c := range resp.Items {
if c.Metadata == nil {
glog.V(4).Infof("Sandbox does not have metadata: %+v", c)
continue
}
sandboxes = append(sandboxes, criSandbox{
Uid: c.Metadata.Uid,
Name: c.Metadata.Name,
Namespace: c.Metadata.Namespace,
Labels: c.Labels,
})
}
return sandboxes, nil
}