Skip to content

Commit

Permalink
added basic scrub pod and watch, needs debugging
Browse files Browse the repository at this point in the history
  • Loading branch information
markturansky committed May 26, 2015
1 parent 970c79b commit 41ad32b
Show file tree
Hide file tree
Showing 2 changed files with 83 additions and 18 deletions.
61 changes: 45 additions & 16 deletions pkg/volume/host_path/host_path.go
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)

Expand Down Expand Up @@ -82,9 +83,9 @@ func (plugin *hostPathPlugin) NewRecycler(spec *volume.Spec) (volume.Recycler, e

func newRecycler(spec *volume.Spec, host volume.VolumeHost) (volume.Recycler, error) {
if spec.VolumeSource.HostPath != nil {
return &hostPathRecycler{spec.VolumeSource.HostPath.Path, host}, nil
return &hostPathRecycler{spec.Name, spec.VolumeSource.HostPath.Path, host}, nil
} else {
return &hostPathRecycler{spec.PersistentVolumeSource.HostPath.Path, host}, nil
return &hostPathRecycler{spec.Name, spec.PersistentVolumeSource.HostPath.Path, host}, nil
}
}

Expand Down Expand Up @@ -121,25 +122,53 @@ func (hp *hostPath) TearDownAt(dir string) error {
// hostPathRecycler scrubs a hostPath volume by running "rm -rf" on the volume in a pod
// This recycler only works on a single host cluster and is for testing purposes only.
type hostPathRecycler struct {
name string
path string
host volume.VolumeHost
}

func (hp *hostPathRecycler) GetPath() string {
return hp.path
func (r *hostPathRecycler) GetPath() string {
return r.path
}

// Recycler provides methods to reclaim the volume resource.
func (hp *hostPathRecycler) Recycle() error {

// TODO implement "basic scrub" recycler -- busybox w/ "rm -rf" for volume

/*
1. make a pod that uses a volume like me
2. use host.client to save to API and watch it
3. if pod errors or times out, return error
if pod exits, return nil for success
*/

return nil
func (r *hostPathRecycler) Recycle() error {
uuid := string(util.NewUUID())
timeout := int64(30)

pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "scrubber-" + r.name,
Namespace: api.NamespaceDefault,
Labels: map[string]string{
"scrubber": uuid,
},
},
Spec: api.PodSpec{
Volumes: []api.Volume{
{
Name: uuid,
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{ r.path },
},
},
},
Containers: []api.Container{
{
Name: "scrubber-" + uuid,
Image: "busybox",
Command: []string{"ls -la"},
WorkingDir: "/scrub",
VolumeMounts: []api.VolumeMount{
{
Name: uuid,
MountPath: "/scrub",
},
},
},
},
ActiveDeadlineSeconds: &timeout,
},
}
return volume.ScrubPodVolumeAndWatchUntilCompletion(pod, r.host.GetKubeClient())
}
40 changes: 38 additions & 2 deletions pkg/volume/util.go
Expand Up @@ -23,6 +23,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"

"github.com/golang/glog"
)

func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string {
Expand Down Expand Up @@ -57,8 +59,42 @@ func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeA
return false
}

// basicVolumeScrubber is an implementation of volume.Recycler
type basicVolumeScrubber struct {
func ScrubPodVolumeAndWatchUntilCompletion(pod *api.Pod, client client.Interface) error {

glog.V(5).Infof("Creating scrubber pod for volume %s\n", pod.Name)
pod, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
return fmt.Errorf("Unexpected error creating a pod to scrub volume %s: %+v\n", pod.Name, err)
}

// the binder will eventually catch up and set status on Claims
watch := newPodWatch(client, pod.Namespace, pod.Name, 5)
defer watch.Stop()

success := false
for {
event := <-watch.ResultChan()
pod := event.Object.(*api.Pod)

glog.V(5).Infof("Handling %s event for pod %+v\n", event.Type, pod)

if pod.Status.Phase == api.PodSucceeded {
success = true
break
} else {

// TODO how to handle pods that were killed by ActiveDeadlineSeconds

glog.V(5).Infof("Pod event %+v\n", pod)
}
}

if success {
glog.V(5).Infof("Successfully scrubbed volume with pod %s\n", pod.Name)
return nil
} else {
return fmt.Errorf("Volume was not recycled: %+v", pod.Name)
}
}

// podWatch provides watch semantics for a pod backed by a poller, since
Expand Down

0 comments on commit 41ad32b

Please sign in to comment.