Skip to content

Commit

Permalink
Merge pull request #32443 from foxish/automated-cherry-pick-of-#31947-#…
Browse files Browse the repository at this point in the history
…32072-#32105-#32197-#32181-upstream-release-1.4

Automated cherry pick of #31947 #32072 #32105 #32197 #32181
  • Loading branch information
Phillip Wittrock committed Sep 10, 2016
2 parents f7b03a3 + 4f75947 commit 532f507
Show file tree
Hide file tree
Showing 8 changed files with 43 additions and 33 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ binary | sha256 hash
### Other notable changes

* rkt: Improve support for privileged pod (pod whose all containers are privileged) ([#31286](https://github.com/kubernetes/kubernetes/pull/31286), [@yifan-gu](https://github.com/yifan-gu))
* The pod annotation `security.alpha.kubernetes.io/sysctls` now allows customization of namespaced and well isolated kernel parameters (sysctls), starting with `kernel.shm_rmid_forced`, `net.ipv4.ip_local_port_range`, `net.ipv4.tcp_max_syn_backlog` and `net.ipv4.tcp_syncookies` for Kubernetes 1.4. ([#27180](https://github.com/kubernetes/kubernetes/pull/27180), [@sttts](https://github.com/sttts))
* The pod annotation `security.alpha.kubernetes.io/sysctls` now allows customization of namespaced and well isolated kernel parameters (sysctls), starting with `kernel.shm_rmid_forced`, `net.ipv4.ip_local_port_range` and `net.ipv4.tcp_syncookies` for Kubernetes 1.4. ([#27180](https://github.com/kubernetes/kubernetes/pull/27180), [@sttts](https://github.com/sttts))
* The pod annotation `security.alpha.kubernetes.io/unsafe-sysctls` allows customization of namespaced sysctls where isolation is unclear. Unsafe sysctls must be enabled at-your-own-risk on the kubelet with the `--experimental-allowed-unsafe-sysctls` flag. Future versions will improve on resource isolation and more sysctls will be considered safe.
* Increase request timeout based on termination grace period ([#31275](https://github.com/kubernetes/kubernetes/pull/31275), [@dims](https://github.com/dims))
* Fixed two issues of kubectl bash completion. ([#31135](https://github.com/kubernetes/kubernetes/pull/31135), [@xingzhou](https://github.com/xingzhou))
Expand Down
1 change: 1 addition & 0 deletions build/common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -943,6 +943,7 @@ function kube::release::package_kube_manifests_tarball() {
cp "${salt_dir}/kube-addons/kube-addon-manager.yaml" "${dst_dir}"
cp "${salt_dir}/l7-gcp/glbc.manifest" "${dst_dir}"
cp "${salt_dir}/rescheduler/rescheduler.manifest" "${dst_dir}/"
cp "${salt_dir}/e2e-image-puller/e2e-image-puller.manifest" "${dst_dir}/"
cp "${KUBE_ROOT}/cluster/gce/trusty/configure-helper.sh" "${dst_dir}/trusty-configure-helper.sh"
cp "${KUBE_ROOT}/cluster/gce/gci/configure-helper.sh" "${dst_dir}/gci-configure-helper.sh"
cp "${KUBE_ROOT}/cluster/gce/gci/health-monitor.sh" "${dst_dir}/health-monitor.sh"
Expand Down
3 changes: 0 additions & 3 deletions cluster/gce/config-default.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,6 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}

MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
# By default a cluster will be started with the master on GCI and nodes on
# containervm. If you are updating the containervm version, update this
# variable.
CVM_VERSION=container-v1-3-v20160604
GCI_VERSION="gci-dev-54-8743-3-0"
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
Expand Down
3 changes: 0 additions & 3 deletions cluster/gce/config-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,6 @@ PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}

MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
# By default a cluster will be started with the master on GCI and nodes on
# containervm. If you are updating the containervm version, update this
# variable.
CVM_VERSION=container-v1-3-v20160604
GCI_VERSION="gci-dev-54-8743-3-0"
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
Expand Down
24 changes: 20 additions & 4 deletions cluster/gce/gci/configure-helper.sh
Original file line number Diff line number Diff line change
Expand Up @@ -1080,10 +1080,23 @@ function start-fluentd {
fi
}

# Starts an image-puller - used in test clusters.
function start-image-puller {
echo "Start image-puller"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/e2e-image-puller.manifest" \
/etc/kubernetes/manifests/
}

# Starts kube-registry proxy
function start-kube-registry-proxy {
echo "Start kube-registry-proxy"
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
}

# Starts a l7 loadbalancing controller for ingress.
function start-lb-controller {
if [[ "${ENABLE_L7_LOADBALANCING:-}" == "glbc" ]]; then
echo "Starting GCE L7 pod"
echo "Start GCE L7 pod"
prepare-log-file /var/log/glbc.log
setup-addon-manifests "addons" "cluster-loadbalancing/glbc"
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/glbc.manifest" \
Expand All @@ -1094,7 +1107,7 @@ function start-lb-controller {
# Starts rescheduler.
function start-rescheduler {
if [[ "${ENABLE_RESCHEDULER:-}" == "true" ]]; then
echo "Starting Rescheduler"
echo "Start Rescheduler"
prepare-log-file /var/log/rescheduler.log
cp "${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/rescheduler.manifest" \
/etc/kubernetes/manifests/
Expand Down Expand Up @@ -1188,8 +1201,11 @@ else
start-kube-proxy
# Kube-registry-proxy.
if [[ "${ENABLE_CLUSTER_REGISTRY:-}" == "true" ]]; then
cp "${KUBE_HOME}/kube-manifests/kubernetes/kube-registry-proxy.yaml" /etc/kubernetes/manifests
fi
start-kube-registry-proxy
fi
if [[ "${PREPULL_E2E_IMAGES:-}" == "true" ]]; then
start-image-puller
fi
fi
start-fluentd
reset-motd
Expand Down
1 change: 0 additions & 1 deletion pkg/kubelet/sysctl/whitelist.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ func SafeSysctlWhitelist() []string {
return []string{
"kernel.shm_rmid_forced",
"net.ipv4.ip_local_port_range",
"net.ipv4.tcp_max_syn_backlog",
"net.ipv4.tcp_syncookies",
}
}
Expand Down
1 change: 1 addition & 0 deletions pkg/util/config/feature_gate.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ func (f *featureGate) AddFlag(fs *pflag.FlagSet) {
}
known = append(known, fmt.Sprintf("%s=true|false (%sdefault=%t)", k, pre, v.enabled))
}
sort.Strings(known)
fs.Var(f, flagName, ""+
"A set of key=value pairs that describe feature gates for alpha/experimental features. "+
"Options are:\n"+strings.Join(known, "\n"))
Expand Down
41 changes: 20 additions & 21 deletions test/e2e/federated-ingress.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
// Validate federation apiserver, does not rely on underlying clusters or federation ingress controller.
Describe("Ingress objects", func() {
AfterEach(func() {

nsName := f.FederationNamespace.Name
// Delete registered ingresses.
ingressList, err := f.FederationClientset_1_4.Extensions().Ingresses(nsName).List(api.ListOptions{})
Expand Down Expand Up @@ -94,24 +93,24 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
jig = newFederationTestJig(f.FederationClientset_1_4)
clusters = map[string]*cluster{}
primaryClusterName = registerClusters(clusters, UserAgentName, federationName, f)
ns = f.Namespace.Name
ns = f.FederationNamespace.Name
})

AfterEach(func() {
unregisterClusters(clusters, f)
})

It("should create and update matching ingresses in underlying clusters", func() {
ingress := createIngressOrFail(f.FederationClientset_1_4, f.Namespace.Name)
ingress := createIngressOrFail(f.FederationClientset_1_4, ns)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting ingress %q in namespace %q", ingress.Name, f.Namespace.Name))
err := f.FederationClientset_1_4.Ingresses(f.Namespace.Name).Delete(ingress.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, f.Namespace.Name)
By(fmt.Sprintf("Deleting ingress %q in namespace %q", ingress.Name, ns))
err := f.FederationClientset_1_4.Ingresses(ns).Delete(ingress.Name, &api.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ns)
}()
// wait for ingress shards being created
waitForIngressShardsOrFail(f.Namespace.Name, ingress, clusters)
ingress = updateIngressOrFail(f.FederationClientset_1_4, f.Namespace.Name)
waitForIngressShardsUpdatedOrFail(f.Namespace.Name, ingress, clusters)
waitForIngressShardsOrFail(ns, ingress, clusters)
ingress = updateIngressOrFail(f.FederationClientset_1_4, ns)
waitForIngressShardsUpdatedOrFail(ns, ingress, clusters)
})

var _ = Describe("Ingress connectivity and DNS", func() {
Expand All @@ -123,27 +122,27 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
BeforeEach(func() {
framework.SkipUnlessFederated(f.Client)
// create backend pod
createBackendPodsOrFail(clusters, f.Namespace.Name, FederatedIngressServicePodName)
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create backend service
service = createServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name, FederatedIngressServiceName)
service = createServiceOrFail(f.FederationClientset_1_4, ns, FederatedIngressServiceName)
// create ingress object
jig.ing = createIngressOrFail(f.FederationClientset_1_4, f.Namespace.Name)
jig.ing = createIngressOrFail(f.FederationClientset_1_4, ns)
// wait for services objects sync
waitForServiceShardsOrFail(f.Namespace.Name, service, clusters)
waitForServiceShardsOrFail(ns, service, clusters)
// wait for ingress objects sync
waitForIngressShardsOrFail(f.Namespace.Name, jig.ing, clusters)
waitForIngressShardsOrFail(ns, jig.ing, clusters)
})

AfterEach(func() {
deleteBackendPodsOrFail(clusters, f.Namespace.Name)
deleteBackendPodsOrFail(clusters, ns)
if service != nil {
deleteServiceOrFail(f.FederationClientset_1_4, f.Namespace.Name, service.Name)
deleteServiceOrFail(f.FederationClientset_1_4, ns, service.Name)
service = nil
} else {
By("No service to delete. Service is nil")
}
if jig.ing != nil {
deleteIngressOrFail(f.FederationClientset_1_4, f.Namespace.Name, jig.ing.Name)
deleteIngressOrFail(f.FederationClientset_1_4, ns, jig.ing.Name)
jig.ing = nil
} else {
By("No ingress to delete. Ingress is nil")
Expand All @@ -153,11 +152,11 @@ var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func(
PIt("should be able to discover a federated ingress service", func() {
// we are about the ingress name
svcDNSNames := []string{
fmt.Sprintf("%s.%s", FederatedIngressServiceName, f.Namespace.Name),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedIngressServiceName, f.Namespace.Name),
fmt.Sprintf("%s.%s", FederatedIngressServiceName, ns),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns),
// TODO these two entries are not set yet
//fmt.Sprintf("%s.%s.%s", FederatedIngressServiceName, f.Namespace.Name, federationName),
//fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedIngressServiceName, f.Namespace.Name, federationName),
//fmt.Sprintf("%s.%s.%s", FederatedIngressServiceName, ns, federationName),
//fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns, federationName),
}
// check dns records in underlying cluster
for i, DNSName := range svcDNSNames {
Expand Down

0 comments on commit 532f507

Please sign in to comment.