Skip to content
This repository was archived by the owner on Feb 8, 2021. It is now read-only.

Commit fdb110c

Browse files
Fix the rest of the code
1 parent 8d0187a commit fdb110c

File tree

129 files changed

+625
-663
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

129 files changed

+625
-663
lines changed

cluster/addons/dns/kube2sky/kube2sky.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,7 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.
192192
endpointPort := &e.Subsets[idx].Ports[portIdx]
193193
portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol)
194194
if portSegment != "" {
195-
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port)
195+
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, int(endpointPort.Port))
196196
if err != nil {
197197
return err
198198
}
@@ -343,7 +343,7 @@ func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *k
343343
port := &service.Spec.Ports[i]
344344
portSegment := buildPortSegmentString(port.Name, port.Protocol)
345345
if portSegment != "" {
346-
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, port.Port)
346+
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, int(port.Port))
347347
if err != nil {
348348
return err
349349
}

cluster/addons/dns/kube2sky/kube2sky_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ type hostPort struct {
111111
func getHostPort(service *kapi.Service) *hostPort {
112112
return &hostPort{
113113
Host: service.Spec.ClusterIP,
114-
Port: service.Spec.Ports[0].Port,
114+
Port: int(service.Spec.Ports[0].Port),
115115
}
116116
}
117117

@@ -181,7 +181,7 @@ func newService(namespace, serviceName, clusterIP, portName string, portNumber i
181181
Spec: kapi.ServiceSpec{
182182
ClusterIP: clusterIP,
183183
Ports: []kapi.ServicePort{
184-
{Port: portNumber, Name: portName, Protocol: "TCP"},
184+
{Port: int32(portNumber), Name: portName, Protocol: "TCP"},
185185
},
186186
},
187187
}
@@ -212,7 +212,7 @@ func newSubset() kapi.EndpointSubset {
212212

213213
func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.EndpointSubset {
214214
subset := newSubset()
215-
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: port, Name: portName, Protocol: "TCP"})
215+
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(port), Name: portName, Protocol: "TCP"})
216216
for _, ip := range ips {
217217
subset.Addresses = append(subset.Addresses, kapi.EndpointAddress{IP: ip})
218218
}
@@ -221,7 +221,7 @@ func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.Endpoin
221221

222222
func newSubsetWithTwoPorts(portName1 string, portNumber1 int, portName2 string, portNumber2 int, ips ...string) kapi.EndpointSubset {
223223
subset := newSubsetWithOnePort(portName1, portNumber1, ips...)
224-
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: portNumber2, Name: portName2, Protocol: "TCP"})
224+
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(portNumber2), Name: portName2, Protocol: "TCP"})
225225
return subset
226226
}
227227

cmd/kube-controller-manager/app/controllermanager.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ func Run(s *options.CMServer) error {
126126
kubeconfig.ContentConfig.ContentType = s.ContentType
127127
// Override kubeconfig qps/burst settings from flags
128128
kubeconfig.QPS = s.KubeAPIQPS
129-
kubeconfig.Burst = s.KubeAPIBurst
129+
kubeconfig.Burst = int(s.KubeAPIBurst)
130130

131131
kubeClient, err := client.New(kubeconfig)
132132
if err != nil {
@@ -144,7 +144,7 @@ func Run(s *options.CMServer) error {
144144
mux.Handle("/metrics", prometheus.Handler())
145145

146146
server := &http.Server{
147-
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
147+
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
148148
Handler: mux,
149149
}
150150
glog.Fatal(server.ListenAndServe())
@@ -198,20 +198,20 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
198198
informers[reflect.TypeOf(&api.Pod{})] = podInformer
199199

200200
go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).
201-
Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
201+
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
202202
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
203203

204204
go replicationcontroller.NewReplicationManager(
205205
podInformer,
206206
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")),
207207
ResyncPeriod(s),
208208
replicationcontroller.BurstReplicas,
209-
s.LookupCacheSizeForRC,
210-
).Run(s.ConcurrentRCSyncs, wait.NeverStop)
209+
int(s.LookupCacheSizeForRC),
210+
).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
211211
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
212212

213213
if s.TerminatedPodGCThreshold > 0 {
214-
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
214+
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)).
215215
Run(wait.NeverStop)
216216
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
217217
}
@@ -224,8 +224,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
224224
// this cidr has been validated already
225225
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
226226
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
227-
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
228-
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
227+
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
228+
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
229229
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
230230
nodeController.Run(s.NodeSyncPeriod.Duration)
231231
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
@@ -268,7 +268,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
268268
ReplenishmentResyncPeriod: ResyncPeriod(s),
269269
GroupKindsToReplenish: groupKindsToReplenish,
270270
}
271-
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
271+
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
272272
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
273273

274274
// If apiserver is not running we should wait for some time and fail only then. This is particularly
@@ -299,7 +299,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
299299
glog.Fatalf("Failed to get supported resources from server: %v", err)
300300
}
301301
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
302-
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
302+
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
303303
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
304304

305305
groupVersion := "extensions/v1beta1"
@@ -324,29 +324,29 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
324324

325325
if containsResource(resources, "daemonsets") {
326326
glog.Infof("Starting daemon set controller")
327-
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), s.LookupCacheSizeForDaemonSet).
328-
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
327+
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)).
328+
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
329329
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
330330
}
331331

332332
if containsResource(resources, "jobs") {
333333
glog.Infof("Starting job controller")
334334
go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))).
335-
Run(s.ConcurrentJobSyncs, wait.NeverStop)
335+
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
336336
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
337337
}
338338

339339
if containsResource(resources, "deployments") {
340340
glog.Infof("Starting deployment controller")
341341
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
342-
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
342+
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
343343
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
344344
}
345345

346346
if containsResource(resources, "replicasets") {
347347
glog.Infof("Starting ReplicaSet controller")
348-
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
349-
Run(s.ConcurrentRSSyncs, wait.NeverStop)
348+
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
349+
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
350350
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
351351
}
352352
}
@@ -364,7 +364,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
364364
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
365365
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
366366
s.PVClaimBinderSyncPeriod.Duration,
367-
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
367+
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
368368
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
369369
cloud,
370370
)

0 commit comments

Comments
 (0)