Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Cherry pick #14044 to release-1.1 #17334

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
11 changes: 9 additions & 2 deletions examples/rbd/README.md
Expand Up @@ -7,7 +7,7 @@

Install Ceph on the Kubernetes host. For example, on Fedora 21

# yum -y install ceph
# yum -y install ceph-common

If you don't have a Ceph cluster, you can set up a [containerized Ceph cluster](https://github.com/rootfs/docker-ceph)

Expand All @@ -26,7 +26,14 @@ Once you have installed Ceph and new Kubernetes, you can create a pod based on m

# Use Ceph Authentication Secret

If Ceph authentication secret is provided, the secret should be first be base64 encoded, then encoded string is placed in a secret yaml. An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command.
If Ceph authentication secret is provided, the secret should be first be *base64 encoded*, then encoded string is placed in a secret yaml. For example, getting Ceph user `kube`'s base64 encoded secret can use the following command:

```console
# grep key /etc/ceph/ceph.client.kube.keyring |awk '{printf "%s", $NF}'|base64
QVFBTWdYaFZ3QkNlRGhBQTlubFBhRnlmVVNhdEdENGRyRldEdlE9PQ==
```

An example yaml is provided [here](secret/ceph-secret.yaml). Then post the secret through ```kubectl``` in the following command.

```console
# kubectl create -f examples/rbd/secret/ceph-secret.yaml
Expand Down
2 changes: 1 addition & 1 deletion pkg/volume/rbd/rbd.go
Expand Up @@ -131,7 +131,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
Pool: pool,
ReadOnly: readOnly,
manager: manager,
mounter: mounter,
mounter: &mount.SafeFormatAndMount{mounter, exec.New()},
plugin: plugin,
},
Mon: source.CephMonitors,
Expand Down
60 changes: 47 additions & 13 deletions pkg/volume/rbd/rbd_util.go
Expand Up @@ -25,31 +25,63 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path"
"regexp"
"strings"
"time"

"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/volume"
)

// search /sys/bus for rbd device that matches given pool and image
func getDevFromImageAndPool(pool, image string) (string, bool) {
// /sys/bus/rbd/devices/X/name and /sys/bus/rbd/devices/X/pool
sys_path := "/sys/bus/rbd/devices"
if dirs, err := ioutil.ReadDir(sys_path); err == nil {
for _, f := range dirs {
// pool and name format:
// see rbd_pool_show() and rbd_name_show() at
// https://github.com/torvalds/linux/blob/master/drivers/block/rbd.c
name := f.Name()
// first match pool, then match name
po := path.Join(sys_path, name, "pool")
img := path.Join(sys_path, name, "name")
exe := exec.New()
out, err := exe.Command("cat", po, img).CombinedOutput()
if err != nil {
continue
}
matched, err := regexp.MatchString("^"+pool+"\n"+image+"\n$", string(out))
if err != nil || !matched {
continue
}
// found a match, check if device exists
devicePath := "/dev/rbd" + name
if _, err := os.Lstat(devicePath); err == nil {
return devicePath, true
}
}
}
return "", false
}

// stat a path, if not exists, retry maxRetries times
func waitForPathToExist(devicePath string, maxRetries int) bool {
func waitForPath(pool, image string, maxRetries int) (string, bool) {
for i := 0; i < maxRetries; i++ {
_, err := os.Stat(devicePath)
if err == nil {
return true
}
if err != nil && !os.IsNotExist(err) {
return false
devicePath, found := getDevFromImageAndPool(pool, image)
if found {
return devicePath, true
}
time.Sleep(time.Second)
}
return false
return "", false
}

// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
Expand Down Expand Up @@ -178,9 +210,9 @@ func (util *RBDUtil) defencing(c rbdCleaner) error {

func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
var err error
devicePath := strings.Join([]string{"/dev/rbd", b.Pool, b.Image}, "/")
exist := waitForPathToExist(devicePath, 1)
if !exist {

devicePath, found := waitForPath(b.Pool, b.Image, 1)
if !found {
// modprobe
_, err = b.plugin.execCommand("modprobe", []string{"rbd"})
if err != nil {
Expand Down Expand Up @@ -209,8 +241,8 @@ func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
if err != nil {
return err
}
exist = waitForPathToExist(devicePath, 10)
if !exist {
devicePath, found = waitForPath(b.Pool, b.Image, 10)
if !found {
return errors.New("Could not map image: Timeout after 10s")
}
// mount it
Expand All @@ -230,6 +262,8 @@ func (util *RBDUtil) AttachDisk(b rbdBuilder) error {

// fence off other mappers
if err := util.fencing(b); err != nil {
// rbd unmap before exit
b.plugin.execCommand("rbd", []string{"unmap", devicePath})
return fmt.Errorf("rbd: image %s is locked by other nodes", b.Image)
}
// rbd lock remove needs ceph and image config
Expand Down