Skip to content

Commit

Permalink
satellite/gc: improve test for copies
Browse files Browse the repository at this point in the history
Initial space used for pieces is calcualted, not retrieved
from storage nodes and at the end of test we are deleting
also copies that become ancestors to verify that all data
was removed from storage nodes.

Change-Id: I9804adb9fa488dc0094a67a6e258c144977e7f5d
  • Loading branch information
mniewrzal authored and profclems committed Apr 11, 2022
1 parent a3d9016 commit 99ec4c8
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 22 deletions.
63 changes: 41 additions & 22 deletions satellite/gc/gc_test.go
Expand Up @@ -26,6 +26,7 @@ import (
"storj.io/storj/satellite/metabase"
"storj.io/storj/storage"
"storj.io/storj/storagenode"
"storj.io/uplink/private/eestream"
"storj.io/uplink/private/testuplink"
)

Expand Down Expand Up @@ -151,25 +152,31 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

allSpaceUsedForPieces := func() (all int64) {
for _, node := range planet.StorageNodes {
total, _, _, err := node.Storage2.Store.SpaceUsedTotalAndBySatellite(ctx)
_, piecesContent, _, err := node.Storage2.Store.SpaceUsedTotalAndBySatellite(ctx)
require.NoError(t, err)
all += total
all += piecesContent
}
return all
}

expectedRemoteData := testrand.Bytes(8 * memory.KiB)
expectedInlineData := testrand.Bytes(1 * memory.KiB)

require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "remote", expectedRemoteData))
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "inline", expectedInlineData))
encryptedSize, err := encryption.CalcEncryptedSize(int64(len(expectedRemoteData)), storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: 29 * 256 * memory.B.Int32(), // hardcoded value from uplink
})
require.NoError(t, err)

require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
redundancyStrategy, err := planet.Satellites[0].Config.Metainfo.RS.RedundancyStrategy()
require.NoError(t, err)

// how much used space we should have after deleting objects
expectedUsedAfterDeleteAndGC := allSpaceUsedForPieces()
require.NotZero(t, expectedUsedAfterDeleteAndGC)
pieceSize := eestream.CalcPieceSize(encryptedSize, redundancyStrategy.ErasureScheme)
singleRemoteUsed := pieceSize * int64(len(planet.StorageNodes))
totalUsedByNodes := 2 * singleRemoteUsed // two remote objects

require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "remote", expectedRemoteData))
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "inline", expectedInlineData))
require.NoError(t, planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "remote-no-copy", expectedRemoteData))

_, err = project.CopyObject(ctx, "testbucket", "remote", "testbucket", "remote-copy", nil)
Expand All @@ -179,7 +186,8 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))

totalUsedByNodes := allSpaceUsedForPieces()
afterTotalUsedByNodes := allSpaceUsedForPieces()
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)

// run GC
gcService.Loop.TriggerWait()
Expand All @@ -188,19 +196,19 @@ func TestGarbageCollectionWithCopies(t *testing.T) {
}

// we should see all space used by all objects
afterTotalUsedByNodes := allSpaceUsedForPieces()
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)

// delete ancestors, no change in used space
_, err = project.DeleteObject(ctx, "testbucket", "remote")
require.NoError(t, err)

_, err = project.DeleteObject(ctx, "testbucket", "inline")
require.NoError(t, err)

// delete object without copy, used space should be decreased
_, err = project.DeleteObject(ctx, "testbucket", "remote-no-copy")
require.NoError(t, err)
for _, toDelete := range []string{
// delete ancestors, no change in used space
"remote",
"inline",
// delete object without copy, used space should be decreased
"remote-no-copy",
} {
_, err = project.DeleteObject(ctx, "testbucket", toDelete)
require.NoError(t, err)
}

planet.WaitForStorageNodeDeleters(ctx)

Expand All @@ -212,7 +220,18 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

// verify that we deleted only pieces for "remote-no-copy" object
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, expectedUsedAfterDeleteAndGC, afterTotalUsedByNodes)
require.Equal(t, singleRemoteUsed, afterTotalUsedByNodes)

// delete rest of objects to verify that everything will be removed also from SNs
for _, toDelete := range []string{
"remote-copy",
"inline-copy",
} {
_, err = project.DeleteObject(ctx, "testbucket", toDelete)
require.NoError(t, err)
}

planet.WaitForStorageNodeDeleters(ctx)

// run GC
gcService.Loop.TriggerWait()
Expand All @@ -222,7 +241,7 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

// verify that nothing more was deleted from storage nodes after GC
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, expectedUsedAfterDeleteAndGC, afterTotalUsedByNodes)
require.EqualValues(t, 0, afterTotalUsedByNodes)
})
}

Expand Down
13 changes: 13 additions & 0 deletions satellite/metainfo/config.go
Expand Up @@ -9,9 +9,12 @@ import (
"strings"
"time"

"github.com/vivint/infectious"

"storj.io/common/memory"
"storj.io/storj/satellite/metabase/segmentloop"
"storj.io/storj/satellite/metainfo/piecedeletion"
"storj.io/uplink/private/eestream"
)

const (
Expand Down Expand Up @@ -90,6 +93,16 @@ func (rs *RSConfig) Set(s string) error {
return nil
}

// RedundancyStrategy creates eestream.RedundancyStrategy from config values.
func (rs *RSConfig) RedundancyStrategy() (eestream.RedundancyStrategy, error) {
fec, err := infectious.NewFEC(rs.Min, rs.Total)
if err != nil {
return eestream.RedundancyStrategy{}, err
}
erasureScheme := eestream.NewRSScheme(fec, rs.ErasureShareSize.Int())
return eestream.NewRedundancyStrategy(erasureScheme, rs.Repair, rs.Success)
}

// RateLimiterConfig is a configuration struct for endpoint rate limiting.
type RateLimiterConfig struct {
Enabled bool `help:"whether rate limiting is enabled." releaseDefault:"true" devDefault:"true"`
Expand Down

0 comments on commit 99ec4c8

Please sign in to comment.