Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use local PX endpoint for mount, unmount, detach and attach calls #48898

Merged
merged 3 commits into from Jul 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 4 additions & 4 deletions pkg/volume/portworx/portworx.go
Expand Up @@ -266,7 +266,7 @@ func (b *portworxVolumeMounter) SetUp(fsGroup *int64) error {
// SetUpAt attaches the disk and bind mounts to the volume path.
func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("Portworx Volume set up: %s %v %v", dir, !notMnt, err)
glog.Infof("Portworx Volume set up. Dir: %s %v %v", dir, !notMnt, err)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we really want to log these messages always in the log files? Looks to me that, older logging priority was more accurate.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We have found tracking down events for volume setup and teardown very important in debugging field issues and hence I've changed the logging level. SetupAt and TearDownAt get invoked only when pods get scheduled on a new node (new pod or existing pod getting re-scheduled). So these logs entries won't be very chatty.

if err != nil && !os.IsNotExist(err) {
glog.Errorf("Cannot validate mountpoint: %s", dir)
return err
Expand All @@ -291,7 +291,7 @@ func (b *portworxVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Portworx Volume %s mounted to %s", b.volumeID, dir)
glog.Infof("Portworx Volume %s setup at %s", b.volumeID, dir)
return nil
}

Expand All @@ -314,8 +314,8 @@ func (c *portworxVolumeUnmounter) TearDown() error {
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *portworxVolumeUnmounter) TearDownAt(dir string) error {
glog.V(4).Infof("Portworx Volume TearDown of %s", dir)
// Call Portworx Unmount for Portworx's book-keeping.
glog.Infof("Portworx Volume TearDown of %s", dir)

if err := c.manager.UnmountVolume(c, dir); err != nil {
return err
}
Expand Down
56 changes: 41 additions & 15 deletions pkg/volume/portworx/portworx_util.go
Expand Up @@ -43,19 +43,22 @@ type PortworxVolumeUtil struct {

// CreateVolume creates a Portworx volume.
func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (string, int, map[string]string, error) {
driver, err := util.getPortworxDriver(p.plugin.host)
driver, err := util.getPortworxDriver(p.plugin.host, false /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return "", 0, nil, err
}

glog.Infof("Creating Portworx volume for PVC: %v", p.options.PVC.Name)

capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
// Portworx Volumes are specified in GB
requestGB := int(volume.RoundUpSize(capacity.Value(), 1024*1024*1024))

specHandler := osdspec.NewSpecHandler()
spec, err := specHandler.SpecFromOpts(p.options.Parameters)
if err != nil {
glog.Errorf("Error parsing parameters for PVC: %v. Err: %v", p.options.PVC.Name, err)
return "", 0, nil, err
}
spec.Size = uint64(requestGB * 1024 * 1024 * 1024)
Expand All @@ -68,86 +71,88 @@ func (util *PortworxVolumeUtil) CreateVolume(p *portworxVolumeProvisioner) (stri
locator.VolumeLabels[pvcClaimLabel] = p.options.PVC.Name
volumeID, err := driver.Create(&locator, &source, spec)
if err != nil {
glog.V(2).Infof("Error creating Portworx Volume : %v", err)
glog.Errorf("Error creating Portworx Volume : %v", err)
}

glog.Infof("Successfully created Portworx volume for PVC: %v", p.options.PVC.Name)
return volumeID, requestGB, nil, err
}

// DeleteVolume deletes a Portworx volume
func (util *PortworxVolumeUtil) DeleteVolume(d *portworxVolumeDeleter) error {
driver, err := util.getPortworxDriver(d.plugin.host)
driver, err := util.getPortworxDriver(d.plugin.host, false /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}

err = driver.Delete(d.volumeID)
if err != nil {
glog.V(2).Infof("Error deleting Portworx Volume (%v): %v", d.volName, err)
glog.Errorf("Error deleting Portworx Volume (%v): %v", d.volName, err)
return err
}
return nil
}

// AttachVolume attaches a Portworx Volume
func (util *PortworxVolumeUtil) AttachVolume(m *portworxVolumeMounter) (string, error) {
driver, err := util.getPortworxDriver(m.plugin.host)
driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return "", err
}

devicePath, err := driver.Attach(m.volName)
if err != nil {
glog.V(2).Infof("Error attaching Portworx Volume (%v): %v", m.volName, err)
glog.Errorf("Error attaching Portworx Volume (%v): %v", m.volName, err)
return "", err
}
return devicePath, nil
}

// DetachVolume detaches a Portworx Volume
func (util *PortworxVolumeUtil) DetachVolume(u *portworxVolumeUnmounter) error {
driver, err := util.getPortworxDriver(u.plugin.host)
driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}

err = driver.Detach(u.volName)
if err != nil {
glog.V(2).Infof("Error detaching Portworx Volume (%v): %v", u.volName, err)
glog.Errorf("Error detaching Portworx Volume (%v): %v", u.volName, err)
return err
}
return nil
}

// MountVolume mounts a Portworx Volume on the specified mountPath
func (util *PortworxVolumeUtil) MountVolume(m *portworxVolumeMounter, mountPath string) error {
driver, err := util.getPortworxDriver(m.plugin.host)
driver, err := util.getPortworxDriver(m.plugin.host, true /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}

err = driver.Mount(m.volName, mountPath)
if err != nil {
glog.V(2).Infof("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err)
glog.Errorf("Error mounting Portworx Volume (%v) on Path (%v): %v", m.volName, mountPath, err)
return err
}
return nil
}

// UnmountVolume unmounts a Portworx Volume
func (util *PortworxVolumeUtil) UnmountVolume(u *portworxVolumeUnmounter, mountPath string) error {
driver, err := util.getPortworxDriver(u.plugin.host)
driver, err := util.getPortworxDriver(u.plugin.host, true /*localOnly*/)
if err != nil || driver == nil {
glog.Errorf("Failed to get portworx driver. Err: %v", err)
return err
}

err = driver.Unmount(u.volName, mountPath)
if err != nil {
glog.V(2).Infof("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err)
glog.Errorf("Error unmounting Portworx Volume (%v) on Path (%v): %v", u.volName, mountPath, err)
return err
}
return nil
Expand Down Expand Up @@ -181,13 +186,34 @@ func createDriverClient(hostname string) (*osdclient.Client, error) {
}
}

func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost) (volumeapi.VolumeDriver, error) {
// getPortworxDriver() returns a Portworx volume driver which can be used for volume operations
// localOnly: If true, the returned driver will be connected to Portworx API server on volume host.
// If false, driver will be connected to API server on volume host or Portworx k8s service cluster IP
// This flag is required to explicitly force certain operations (mount, unmount, detach, attach) to
// go to the volume host instead of the k8s service which might route it to any host. This pertains to how
// Portworx mounts and attaches a volume to the running container. The node getting these requests needs to
// see the pod container mounts (specifically /var/lib/kubelet/pods/<pod_id>)
// Operations like create and delete volume don't need to be restricted to local volume host since
// any node in the Portworx cluster can co-ordinate the create/delete request and forward the operations to
// the Portworx node that will own/owns the data.
func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost, localOnly bool) (volumeapi.VolumeDriver, error) {
var err error
if localOnly {
util.portworxClient, err = createDriverClient(volumeHost.GetHostName())
if err != nil {
return nil, err
} else {
glog.V(4).Infof("Using portworx local service at: %v as api endpoint", volumeHost.GetHostName())
return volumeclient.VolumeDriver(util.portworxClient), nil
}
}

// check if existing saved client is valid
if isValid, _ := isClientValid(util.portworxClient); isValid {
return volumeclient.VolumeDriver(util.portworxClient), nil
}

// create new client
var err error
util.portworxClient, err = createDriverClient(volumeHost.GetHostName()) // for backward compatibility
if err != nil || util.portworxClient == nil {
// Create client from portworx service
Expand Down Expand Up @@ -215,7 +241,7 @@ func (util *PortworxVolumeUtil) getPortworxDriver(volumeHost volume.VolumeHost)
return nil, err
}

glog.Infof("Using portworx service at: %v as api endpoint", svc.Spec.ClusterIP)
glog.Infof("Using portworx cluster service at: %v as api endpoint", svc.Spec.ClusterIP)
} else {
glog.Infof("Using portworx service at: %v as api endpoint", volumeHost.GetHostName())
}
Expand Down