Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

1.4 Cherrypick Batch Update #32602

Merged
merged 3 commits into from
Sep 13, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 5 additions & 3 deletions cluster/log-dump.sh
Original file line number Diff line number Diff line change
Expand Up @@ -58,18 +58,20 @@ function copy-logs-from-node() {
function save-logs() {
local -r node_name="${1}"
local -r dir="${2}"
local files="${3} ${common_logfiles}"
local files="${3}"
if [[ "${KUBERNETES_PROVIDER}" == "gce" || "${KUBERNETES_PROVIDER}" == "gke" ]]; then
files="${files} ${gce_logfiles}"
fi
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
files="${files} ${aws_logfiles}"
fi

if ssh-to-node "${node_name}" "sudo systemctl status kubelet.service" &> /dev/null; then
ssh-to-node "${node_name}" "sudo journalctl --output=cat -u kubelet.service" > "${dir}/kubelet.log" || true
ssh-to-node "${node_name}" "sudo journalctl --output=cat -u docker.service" > "${dir}/docker.log" || true
ssh-to-node "${node_name}" "sudo journalctl --output=cat -k" > "${dir}/kern.log" || true
else
files="${files} ${initd_logfiles} ${supervisord_logfiles}"
files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
fi
copy-logs-from-node "${node_name}" "${dir}" "${files}"
}
Expand All @@ -81,7 +83,7 @@ readonly master_logfiles="kube-apiserver kube-scheduler kube-controller-manager
readonly node_logfiles="kube-proxy"
readonly aws_logfiles="cloud-init-output"
readonly gce_logfiles="startupscript"
readonly common_logfiles="kern"
readonly kern_logfile="kern"
readonly initd_logfiles="docker"
readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr"

Expand Down
17 changes: 16 additions & 1 deletion pkg/controller/namespace/namespace_controller_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ type operation string
const (
operationDeleteCollection operation = "deleteCollection"
operationList operation = "list"
// assume a default estimate for finalizers to complete when found on items pending deletion.
finalizerEstimateSeconds int64 = int64(15)
)

// operationKey is an entry in a cache.
Expand Down Expand Up @@ -154,7 +156,12 @@ func deleteCollection(
}

apiResource := unversioned.APIResource{Name: gvr.Resource, Namespaced: true}
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(nil, &v1.ListOptions{})

// namespace controller does not want the garbage collector to insert the orphan finalizer since it calls
// resource deletions generically. it will ensure all resources in the namespace are purged prior to releasing
// namespace itself.
orphanDependents := false
err := dynamicClient.Resource(&apiResource, namespace).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, &v1.ListOptions{})

if err == nil {
return true, nil
Expand Down Expand Up @@ -300,6 +307,14 @@ func deleteAllContentForGroupVersionResource(
}
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining - namespace: %s, gvr: %v, items: %v", namespace, gvr, len(unstructuredList.Items))
if len(unstructuredList.Items) != 0 && estimate == int64(0) {
// if any item has a finalizer, we treat that as a normal condition, and use a default estimation to allow for GC to complete.
for _, item := range unstructuredList.Items {
if len(item.GetFinalizers()) > 0 {
glog.V(5).Infof("namespace controller - deleteAllContentForGroupVersionResource - items remaining with finalizers - namespace: %s, gvr: %v, finalizers: %v", namespace, gvr, item.GetFinalizers())
return finalizerEstimateSeconds, nil
}
}
// nothing reported a finalizer, so something was unexpected as it should have been deleted.
return estimate, fmt.Errorf("unexpected items still remain in namespace: %s for gvr: %v", namespace, gvr)
}
return estimate, nil
Expand Down
6 changes: 3 additions & 3 deletions pkg/util/cache/lruexpirecache.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (

type LRUExpireCache struct {
cache *lru.Cache
lock sync.RWMutex
lock sync.Mutex
}

func NewLRUExpireCache(maxSize int) *LRUExpireCache {
Expand All @@ -46,8 +46,8 @@ func (c *LRUExpireCache) Add(key lru.Key, value interface{}, ttl time.Duration)
}

func (c *LRUExpireCache) Get(key lru.Key) (interface{}, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
c.lock.Lock()
defer c.lock.Unlock()
e, ok := c.cache.Get(key)
if !ok {
return nil, false
Expand Down