Skip to content

Commit

Permalink
Remove upload healing related dead code (#5404)
Browse files Browse the repository at this point in the history
  • Loading branch information
donatello authored and kannappanr committed Jan 16, 2018
1 parent 78a641f commit aa7e5c7
Show file tree
Hide file tree
Showing 8 changed files with 3 additions and 448 deletions.
5 changes: 0 additions & 5 deletions cmd/fs-v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -1057,8 +1057,3 @@ func (fs fsObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma
func (fs fsObjects) ListBucketsHeal() ([]BucketInfo, error) {
return []BucketInfo{}, errors.Trace(NotImplemented{})
}

func (fs fsObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, errors.Trace(NotImplemented{})
}
6 changes: 0 additions & 6 deletions cmd/gateway-unsupported.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,6 @@ func (a GatewayUnsupported) ListObjectsHeal(bucket, prefix, marker, delimiter st
return loi, errors.Trace(NotImplemented{})
}

// ListUploadsHeal - Not implemented stub
func (a GatewayUnsupported) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {
return lmi, errors.Trace(NotImplemented{})
}

// AnonListObjects - List objects anonymously
func (a GatewayUnsupported) AnonListObjects(bucket string, prefix string, marker string, delimiter string,
maxKeys int) (loi ListObjectsInfo, err error) {
Expand Down
2 changes: 0 additions & 2 deletions cmd/object-api-interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,6 @@ type ObjectLayer interface {
ListBucketsHeal() (buckets []BucketInfo, err error)
HealObject(bucket, object string) (int, int, error)
ListObjectsHeal(bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error)
ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (ListMultipartsInfo, error)

// Locking operations
ListLocks(bucket, prefix string, duration time.Duration) ([]VolumeLockInfo, error)
Expand Down
222 changes: 0 additions & 222 deletions cmd/xl-v1-list-objects-heal.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
package cmd

import (
"path/filepath"
"sort"
"strings"

Expand Down Expand Up @@ -197,224 +196,3 @@ func (xl xlObjects) ListObjectsHeal(bucket, prefix, marker, delimiter string, ma
// Return error at the end.
return loi, toObjectErr(err, bucket, prefix)
}

// ListUploadsHeal - lists ongoing multipart uploads that require
// healing in one or more disks.
func (xl xlObjects) ListUploadsHeal(bucket, prefix, marker, uploadIDMarker,
delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {

// For delimiter and prefix as '/' we do not list anything at all
// since according to s3 spec we stop at the 'delimiter' along
// with the prefix. On a flat namespace with 'prefix' as '/'
// we don't have any entries, since all the keys are of form 'keyName/...'
if delimiter == slashSeparator && prefix == slashSeparator {
return lmi, nil
}

// Initiate a list operation.
listMultipartInfo, err := xl.listMultipartUploadsHeal(bucket, prefix,
marker, uploadIDMarker, delimiter, maxUploads)
if err != nil {
return lmi, toObjectErr(err, bucket, prefix)
}

// We got the entries successfully return.
return listMultipartInfo, nil
}

// Fetches list of multipart uploadIDs given bucket, keyMarker, uploadIDMarker.
func (xl xlObjects) fetchMultipartUploadIDs(bucket, keyMarker, uploadIDMarker string,
maxUploads int, disks []StorageAPI) (uploads []MultipartInfo, end bool,
err error) {

// Hold a read lock on keyMarker path.
keyMarkerLock := xl.nsMutex.NewNSLock(minioMetaMultipartBucket,
pathJoin(bucket, keyMarker))
if err = keyMarkerLock.GetRLock(globalHealingTimeout); err != nil {
return uploads, end, err
}
for _, disk := range disks {
if disk == nil {
continue
}
uploads, end, err = xl.listMultipartUploadIDs(bucket, keyMarker,
uploadIDMarker, maxUploads, disk)
if err == nil ||
!errors.IsErrIgnored(err, objMetadataOpIgnoredErrs...) {
break
}
}
keyMarkerLock.RUnlock()
return uploads, end, err
}

// listMultipartUploadsHeal - Returns a list of incomplete multipart
// uploads that need to be healed.
func (xl xlObjects) listMultipartUploadsHeal(bucket, prefix, keyMarker,
uploadIDMarker, delimiter string, maxUploads int) (lmi ListMultipartsInfo, e error) {

result := ListMultipartsInfo{
IsTruncated: true,
MaxUploads: maxUploads,
KeyMarker: keyMarker,
Prefix: prefix,
Delimiter: delimiter,
}

recursive := delimiter != slashSeparator

var uploads []MultipartInfo
var err error
// List all upload ids for the given keyMarker, starting from
// uploadIDMarker.
if uploadIDMarker != "" {
uploads, _, err = xl.fetchMultipartUploadIDs(bucket, keyMarker,
uploadIDMarker, maxUploads, xl.getLoadBalancedDisks())
if err != nil {
return lmi, err
}
maxUploads = maxUploads - len(uploads)
}

// We can't use path.Join() as it strips off the trailing '/'.
multipartPrefixPath := pathJoin(bucket, prefix)
// multipartPrefixPath should have a trailing '/' when prefix = "".
if prefix == "" {
multipartPrefixPath += slashSeparator
}

multipartMarkerPath := ""
if keyMarker != "" {
multipartMarkerPath = pathJoin(bucket, keyMarker)
}

// `heal bool` is used to differentiate listing of incomplete
// uploads (and parts) from a regular listing of incomplete
// parts by client SDKs or mc-like commands, within a treewalk
// pool.
heal := true
// The listing is truncated if we have maxUploads entries and
// there are more entries to be listed.
truncated := true
var walkerCh chan treeWalkResult
var walkerDoneCh chan struct{}
// Check if we have room left to send more uploads.
if maxUploads > 0 {
uploadsLeft := maxUploads

walkerCh, walkerDoneCh = xl.listPool.Release(listParams{
bucket: minioMetaMultipartBucket,
recursive: recursive,
marker: multipartMarkerPath,
prefix: multipartPrefixPath,
heal: heal,
})
if walkerCh == nil {
walkerDoneCh = make(chan struct{})
isLeaf := xl.isMultipartUpload
listDir := listDirFactory(isLeaf, xlTreeWalkIgnoredErrs,
xl.getLoadBalancedDisks()...)
walkerCh = startTreeWalk(minioMetaMultipartBucket,
multipartPrefixPath, multipartMarkerPath,
recursive, listDir, isLeaf, walkerDoneCh)
}
// Collect uploads until leftUploads limit is reached.
for {
walkResult, ok := <-walkerCh
if !ok {
truncated = false
break
}
// For any error during tree walk, we should return right away.
if walkResult.err != nil {
return lmi, walkResult.err
}

entry := strings.TrimPrefix(walkResult.entry,
retainSlash(bucket))
// Skip entries that are not object directory.
if hasSuffix(walkResult.entry, slashSeparator) {
uploads = append(uploads, MultipartInfo{
Object: entry,
})
uploadsLeft--
if uploadsLeft == 0 {
break
}
continue
}

// For an object entry we get all its pending
// uploadIDs.
var newUploads []MultipartInfo
var end bool
uploadIDMarker = ""
newUploads, end, err = xl.fetchMultipartUploadIDs(bucket, entry, uploadIDMarker,
uploadsLeft, xl.getLoadBalancedDisks())
if err != nil {
return lmi, err
}
uploads = append(uploads, newUploads...)
uploadsLeft -= len(newUploads)
if end && walkResult.end {
truncated = false
break
}
if uploadsLeft == 0 {
break
}
}

}

// For all received uploads fill in the multiparts result.
for _, upload := range uploads {
var objectName string
var uploadID string
if hasSuffix(upload.Object, slashSeparator) {
// All directory entries are common
// prefixes. For common prefixes, upload ids
// are empty.
uploadID = ""
objectName = upload.Object
result.CommonPrefixes = append(result.CommonPrefixes, objectName)
} else {
// Check if upload needs healing.
uploadIDPath := filepath.Join(bucket, upload.Object, upload.UploadID)
partsMetadata, errs := readAllXLMetadata(xl.storageDisks,
minioMetaMultipartBucket, uploadIDPath)
if xlShouldHeal(xl.storageDisks, partsMetadata, errs,
minioMetaMultipartBucket, uploadIDPath) {

healUploadInfo := xlHealStat(xl, partsMetadata, errs)
upload.HealUploadInfo = &healUploadInfo
result.Uploads = append(result.Uploads, upload)
}
uploadID = upload.UploadID
objectName = upload.Object
}

result.NextKeyMarker = objectName
result.NextUploadIDMarker = uploadID
}

if truncated {
// Put back the tree walk go-routine into the pool for
// subsequent use.
xl.listPool.Set(listParams{
bucket: bucket,
recursive: recursive,
marker: result.NextKeyMarker,
prefix: prefix,
heal: heal,
}, walkerCh, walkerDoneCh)
}

result.IsTruncated = truncated
// Result is not truncated, reset the markers.
if !result.IsTruncated {
result.NextKeyMarker = ""
result.NextUploadIDMarker = ""
}
return result, nil
}
77 changes: 0 additions & 77 deletions cmd/xl-v1-list-objects-heal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ package cmd
import (
"bytes"
"os"
"path"
"path/filepath"
"strconv"
"testing"
)
Expand Down Expand Up @@ -142,78 +140,3 @@ func TestListObjectsHeal(t *testing.T) {
}

}

// Test for ListUploadsHeal API for XL.
func TestListUploadsHeal(t *testing.T) {
initNSLock(false)

rootPath, err := newTestConfig(globalMinioDefaultRegion)
if err != nil {
t.Fatalf("Init Test config failed")
}
// Remove config directory after the test ends.
defer os.RemoveAll(rootPath)

// Create an instance of XL backend.
xl, fsDirs, err := prepareXL16()
if err != nil {
t.Fatal(err)
}
// Cleanup backend directories on function return.
defer removeRoots(fsDirs)

bucketName := "bucket"
prefix := "prefix"
objName := path.Join(prefix, "obj")

// Create test bucket.
err = xl.MakeBucketWithLocation(bucketName, "")
if err != nil {
t.Fatal(err)
}

// Create a new multipart upload.
uploadID, err := xl.NewMultipartUpload(bucketName, objName, nil)
if err != nil {
t.Fatal(err)
}

// Upload a part.
data := bytes.Repeat([]byte("a"), 1024)
_, err = xl.PutObjectPart(bucketName, objName, uploadID, 1,
mustGetHashReader(t, bytes.NewReader(data), int64(len(data)), "", ""))
if err != nil {
t.Fatal(err)
}

// Check if list uploads heal returns any uploads to be healed
// incorrectly.
listUploadsInfo, err := xl.ListUploadsHeal(bucketName, prefix, "", "", "", 1000)
if err != nil {
t.Fatal(err)
}

// All uploads intact nothing to heal.
if len(listUploadsInfo.Uploads) != 0 {
t.Errorf("Expected no uploads but received %d", len(listUploadsInfo.Uploads))
}

// Delete the part from the first disk to make the upload (and
// its part) to appear in upload heal listing.
firstDisk := xl.(*xlObjects).storageDisks[0]
err = firstDisk.DeleteFile(minioMetaMultipartBucket,
filepath.Join(bucketName, objName, uploadID, xlMetaJSONFile))
if err != nil {
t.Fatal(err)
}

listUploadsInfo, err = xl.ListUploadsHeal(bucketName, prefix, "", "", "", 1000)
if err != nil {
t.Fatal(err)
}

// One upload with missing xl.json on first disk.
if len(listUploadsInfo.Uploads) != 1 {
t.Errorf("Expected 1 upload but received %d", len(listUploadsInfo.Uploads))
}
}
6 changes: 3 additions & 3 deletions pkg/madmin/API.md
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,9 @@ If object is successfully healed returns nil, otherwise returns error indicating

| Value | Description |
|---|---|
|`HealNone` | Object/Upload wasn't healed on any of the disks |
|`HealPartial` | Object/Upload was healed on some of the disks needing heal |
| `HealOK` | Object/Upload was healed on all the disks needing heal |
|`HealNone` | Object wasn't healed on any of the disks |
|`HealPartial` | Object was healed on some of the disks needing heal |
| `HealOK` | Object was healed on all the disks needing heal |


__Example__
Expand Down

0 comments on commit aa7e5c7

Please sign in to comment.