Skip to content

Commit

Permalink
fix: legacy objects with 10MiB blockSize should use right buffers
Browse files Browse the repository at this point in the history
healing code was using incorrect buffers to heal older
objects with 10MiB erasure blockSize, incorrect calculation
of such buffers can lead to incorrect premature closure of
io.Pipe() during healing.

fixes #12410
  • Loading branch information
harshavardhana committed Jun 7, 2021
1 parent 8a9ff2b commit 7904c70
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 2 deletions.
9 changes: 7 additions & 2 deletions cmd/erasure-healing.go
Original file line number Diff line number Diff line change
Expand Up @@ -457,7 +457,8 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
}
checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber)
partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber))
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize())
readers[i] = newBitrotReader(disk, partsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo,
checksumInfo.Hash, erasure.ShardSize())
}
writers := make([]io.Writer, len(outDatedDisks))
for i, disk := range outDatedDisks {
Expand All @@ -473,7 +474,11 @@ func (er erasureObjects) healObject(ctx context.Context, bucket string, object s
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
}
}
err = erasure.Heal(ctx, readers, writers, partSize, er.bp)
bp := er.bp
if erasure.blockSize == blockSizeV1 {
bp = er.bpOld
}
err = erasure.Heal(ctx, readers, writers, partSize, bp)
closeBitrotReaders(readers)
closeBitrotWriters(writers)
if err != nil {
Expand Down
9 changes: 9 additions & 0 deletions cmd/erasure-sets.go
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,14 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
// setCount * setDriveCount with each memory upto blockSizeV2.
bp := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2)

// Initialize byte pool for all sets, bpool size is set to
// setCount * setDriveCount with each memory upto blockSizeV1
//
// Number of buffers, max 10GiB
m := (10 * humanize.GiByte) / (blockSizeV1 * 2)

bpOld := bpool.NewBytePoolCap(m, blockSizeV1, blockSizeV1*2)

for i := 0; i < setCount; i++ {
s.erasureDisks[i] = make([]StorageAPI, setDriveCount)
}
Expand Down Expand Up @@ -440,6 +448,7 @@ func newErasureSets(ctx context.Context, endpoints Endpoints, storageDisks []Sto
deletedCleanupSleeper: newDynamicSleeper(10, 2*time.Second),
nsMutex: mutex,
bp: bp,
bpOld: bpOld,
mrfOpCh: make(chan partialOperation, 10000),
}
}
Expand Down
4 changes: 4 additions & 0 deletions cmd/erasure.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,10 @@ type erasureObjects struct {
// Byte pools used for temporary i/o buffers.
bp *bpool.BytePoolCap

// Byte pools used for temporary i/o buffers,
// legacy objects.
bpOld *bpool.BytePoolCap

mrfOpCh chan partialOperation

deletedCleanupSleeper *dynamicSleeper
Expand Down

0 comments on commit 7904c70

Please sign in to comment.