Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make interface references consistent across the gluster plugin code. #45895

Merged
merged 2 commits into from
May 17, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions hack/.linted_packages
Original file line number Diff line number Diff line change
Expand Up @@ -264,6 +264,7 @@ pkg/util/yaml
pkg/version/prometheus
pkg/volume
pkg/volume/downwardapi
pkg/volume/glusterfs
pkg/volume/projected
pkg/volume/quobyte
pkg/volume/util/nestedpendingoperations
Expand Down
90 changes: 44 additions & 46 deletions pkg/volume/glusterfs/glusterfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ import (
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)

// This is the primary entrypoint for volume plugins.
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&glusterfsPlugin{host: nil, exe: exec.New(), gidTable: make(map[string]*MinMaxAllocator)}}
}
Expand Down Expand Up @@ -203,9 +203,8 @@ func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.Gl
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly
} else {
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
}
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
}

func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
Expand Down Expand Up @@ -366,29 +365,28 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
var addrlist []string
if b.hosts == nil {
return fmt.Errorf("glusterfs: endpoint is nil")
} else {
addr := make(map[string]struct{})
if b.hosts.Subsets != nil {
for _, s := range b.hosts.Subsets {
for _, a := range s.Addresses {
addr[a.IP] = struct{}{}
addrlist = append(addrlist, a.IP)
}
}
addr := make(map[string]struct{})
if b.hosts.Subsets != nil {
for _, s := range b.hosts.Subsets {
for _, a := range s.Addresses {
addr[a.IP] = struct{}{}
addrlist = append(addrlist, a.IP)
}

}

options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
}

// Avoid mount storm, pick a host randomly.
// Iterate all hosts until mount succeeds.
for _, ip := range addrlist {
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
if errs == nil {
glog.Infof("glusterfs: successfully mounted %s", dir)
return nil
}
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))

// Avoid mount storm, pick a host randomly.
// Iterate all hosts until mount succeeds.
for _, ip := range addrlist {
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
if errs == nil {
glog.Infof("glusterfs: successfully mounted %s", dir)
return nil
}
}

Expand Down Expand Up @@ -513,8 +511,8 @@ func (d *glusterfsVolumeDeleter) GetPath() string {
// Traverse the PVs, fetching all the GIDs from those
// in a given storage class, and mark them in the table.
//
func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
kubeClient := p.host.GetKubeClient()
func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocator) error {
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return fmt.Errorf("glusterfs: failed to get kube client when collecting gids")
}
Expand Down Expand Up @@ -562,11 +560,11 @@ func (p *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAllocato
// used in PVs of this storage class by traversing the PVs.
// - Adapt the range of the table to the current range of the SC.
//
func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) {
func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinMaxAllocator, error) {
var err error
p.gidTableLock.Lock()
gidTable, ok := p.gidTable[className]
p.gidTableLock.Unlock()
plugin.gidTableLock.Lock()
gidTable, ok := plugin.gidTable[className]
plugin.gidTableLock.Unlock()

if ok {
err = gidTable.SetRange(min, max)
Expand All @@ -584,7 +582,7 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM
}

// collect gids with the full range
err = p.collectGids(className, newGidTable)
err = plugin.collectGids(className, newGidTable)
if err != nil {
return nil, err
}
Expand All @@ -597,10 +595,10 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM

// if in the meantime a table appeared, use it

p.gidTableLock.Lock()
defer p.gidTableLock.Unlock()
plugin.gidTableLock.Lock()
defer plugin.gidTableLock.Unlock()

gidTable, ok = p.gidTable[className]
gidTable, ok = plugin.gidTable[className]
if ok {
err = gidTable.SetRange(min, max)
if err != nil {
Expand All @@ -610,7 +608,7 @@ func (p *glusterfsPlugin) getGidTable(className string, min int, max int) (*MinM
return gidTable, nil
}

p.gidTable[className] = newGidTable
plugin.gidTable[className] = newGidTable

return newGidTable, nil
}
Expand Down Expand Up @@ -768,23 +766,23 @@ func (d *glusterfsVolumeDeleter) Delete() error {
return nil
}

func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
var err error
if r.options.PVC.Spec.Selector != nil {
if p.options.PVC.Spec.Selector != nil {
glog.V(4).Infof("glusterfs: not able to parse your claim Selector")
return nil, fmt.Errorf("glusterfs: not able to parse your claim Selector")
}
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", r.options)
scName := v1helper.GetPersistentVolumeClaimClass(r.options.PVC)
cfg, err := parseClassParameters(r.options.Parameters, r.plugin.host.GetKubeClient())
glog.V(4).Infof("glusterfs: Provison VolumeOptions %v", p.options)
scName := v1helper.GetPersistentVolumeClaimClass(p.options.PVC)
cfg, err := parseClassParameters(p.options.Parameters, p.plugin.host.GetKubeClient())
if err != nil {
return nil, err
}
r.provisionerConfig = *cfg
p.provisionerConfig = *cfg

glog.V(4).Infof("glusterfs: creating volume with configuration %+v", r.provisionerConfig)
glog.V(4).Infof("glusterfs: creating volume with configuration %+v", p.provisionerConfig)

gidTable, err := r.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
gidTable, err := p.plugin.getGidTable(scName, cfg.gidMin, cfg.gidMax)
if err != nil {
return nil, fmt.Errorf("glusterfs: failed to get gidTable: %v", err)
}
Expand All @@ -795,9 +793,9 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
return nil, fmt.Errorf("glusterfs: failed to reserve gid from table: %v", err)
}

glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, r.options.PVC.Name)
glog.V(2).Infof("glusterfs: got gid [%d] for PVC %s", gid, p.options.PVC.Name)

glusterfs, sizeGB, err := r.CreateVolume(gid)
glusterfs, sizeGB, err := p.CreateVolume(gid)
if err != nil {
if releaseErr := gidTable.Release(gid); releaseErr != nil {
glog.Errorf("glusterfs: error when releasing gid in storageclass: %s", scName)
Expand All @@ -808,10 +806,10 @@ func (r *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
}
pv := new(v1.PersistentVolume)
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
pv.Spec.PersistentVolumeReclaimPolicy = r.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = r.options.PVC.Spec.AccessModes
pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy
pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes
if len(pv.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = r.plugin.GetAccessModes()
pv.Spec.AccessModes = p.plugin.GetAccessModes()
}

gidStr := strconv.FormatInt(int64(gid), 10)
Expand Down
32 changes: 24 additions & 8 deletions pkg/volume/glusterfs/glusterfs_minmax.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,23 @@ import (
)

var (
ErrNotFound = errors.New("number not allocated")
ErrConflict = errors.New("number already allocated")
//ErrConflict returned when value is already in use.
ErrConflict = errors.New("number already allocated")

//ErrInvalidRange returned invalid range, for eg# min > max
ErrInvalidRange = errors.New("invalid range")
ErrOutOfRange = errors.New("out of range")
ErrRangeFull = errors.New("range full")
ErrInternal = errors.New("internal error")

//ErrOutOfRange returned when value is not in pool range.
ErrOutOfRange = errors.New("out of range")

//ErrRangeFull returned when no more free values in the pool.
ErrRangeFull = errors.New("range full")

//ErrInternal returned when no free item found, but a.free != 0.
ErrInternal = errors.New("internal error")
)

//MinMaxAllocator defines allocator struct.
type MinMaxAllocator struct {
lock sync.Mutex
min int
Expand All @@ -57,6 +66,7 @@ type Rangeable interface {
SetRange(min, max int) error
}

// NewMinMaxAllocator return a new allocator or error based on provided min/max value.
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
if min > max {
return nil, ErrInvalidRange
Expand All @@ -69,6 +79,7 @@ func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
}, nil
}

//SetRange defines the range/pool with provided min and max values.
func (a *MinMaxAllocator) SetRange(min, max int) error {
if min > max {
return ErrInvalidRange
Expand All @@ -86,17 +97,18 @@ func (a *MinMaxAllocator) SetRange(min, max int) error {
a.max = max

// Recompute how many free we have in the range
num_used := 0
numUsed := 0
for i := range a.used {
if a.inRange(i) {
num_used++
numUsed++
}
}
a.free = 1 + max - min - num_used
a.free = 1 + max - min - numUsed

return nil
}

//Allocate allocates provided value in the allocator and mark it as used.
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
Expand All @@ -115,6 +127,7 @@ func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
return true, nil
}

//AllocateNext allocates next value from the allocator.
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
Expand All @@ -137,6 +150,7 @@ func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
return 0, false, ErrInternal
}

//Release free/delete provided value from the allocator.
func (a *MinMaxAllocator) Release(i int) error {
a.lock.Lock()
defer a.lock.Unlock()
Expand All @@ -159,13 +173,15 @@ func (a *MinMaxAllocator) has(i int) bool {
return ok
}

//Has check whether the provided value is used in the allocator
func (a *MinMaxAllocator) Has(i int) bool {
a.lock.Lock()
defer a.lock.Unlock()

return a.has(i)
}

//Free returns the number of free values in the allocator.
func (a *MinMaxAllocator) Free() int {
a.lock.Lock()
defer a.lock.Unlock()
Expand Down