Skip to content

Commit

Permalink
satellite/metainfo: stop sending delete requests to SN
Browse files Browse the repository at this point in the history
We decided that we will stop sending explicit delete requests to nodes
and we will cleanup deleted with GC instead.

#5888

Change-Id: I65a308cca6fb17e97e3ba85eb3212584c96a32cd
  • Loading branch information
mniewrzal authored and Storj Robot committed Jun 14, 2023
1 parent 4a49bc4 commit 6c08d50
Show file tree
Hide file tree
Showing 8 changed files with 21 additions and 221 deletions.
1 change: 0 additions & 1 deletion satellite/api.go
Expand Up @@ -445,7 +445,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Log.Named("metainfo:endpoint"),
peer.Buckets.Service,
peer.Metainfo.Metabase,
peer.Metainfo.PieceDeletion,
peer.Orders.Service,
peer.Overlay.Service,
peer.DB.Attribution(),
Expand Down
4 changes: 2 additions & 2 deletions satellite/gc/gc_test.go
Expand Up @@ -268,7 +268,7 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

// verify that we deleted only pieces for "remote-no-copy" object
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.Equal(t, singleRemoteUsed, afterTotalUsedByNodes)
require.Equal(t, totalUsedByNodes, afterTotalUsedByNodes)

// delete rest of objects to verify that everything will be removed also from SNs
for _, toDelete := range []string{
Expand All @@ -295,7 +295,7 @@ func TestGarbageCollectionWithCopies(t *testing.T) {

// verify that nothing more was deleted from storage nodes after GC
afterTotalUsedByNodes = allSpaceUsedForPieces()
require.EqualValues(t, 0, afterTotalUsedByNodes)
require.EqualValues(t, totalUsedByNodes, afterTotalUsedByNodes)
})
}

Expand Down
39 changes: 1 addition & 38 deletions satellite/metabase/delete_bucket.go
Expand Up @@ -26,10 +26,6 @@ const (
type DeleteBucketObjects struct {
Bucket BucketLocation
BatchSize int

// DeletePieces is called for every batch of objects.
// Slice `segments` will be reused between calls.
DeletePieces func(ctx context.Context, segments []DeletedSegmentInfo) error
}

var deleteObjectsCockroachSubSQL = `
Expand Down Expand Up @@ -134,33 +130,7 @@ func (db *DB) deleteBucketObjectBatchWithCopyFeatureEnabled(ctx context.Context,
return 0, err
}

deletedObjectCount = int64(len(objects))

if opts.DeletePieces == nil {
// no callback, this should only be in test path
return deletedObjectCount, err
}

for _, object := range objects {
if object.PromotedAncestor != nil {
// don't remove pieces, they are now linked to the new ancestor
continue
}
for _, segment := range object.Segments {
// Is there an advantage to batching this?
err := opts.DeletePieces(ctx, []DeletedSegmentInfo{
{
RootPieceID: segment.RootPieceID,
Pieces: segment.Pieces,
},
})
if err != nil {
return deletedObjectCount, err
}
}
}

return deletedObjectCount, err
return int64(len(objects)), err
}

func (db *DB) scanBucketObjectsDeletionServerSideCopy(ctx context.Context, location BucketLocation, rows tagsql.Rows) (result []deletedObjectInfo, err error) {
Expand Down Expand Up @@ -292,12 +262,5 @@ func (db *DB) deleteBucketObjectsWithCopyFeatureDisabled(ctx context.Context, op
if len(deletedSegments) == 0 {
return deletedObjectCount, nil
}

if opts.DeletePieces != nil {
err = opts.DeletePieces(ctx, deletedSegments)
if err != nil {
return deletedObjectCount, Error.Wrap(err)
}
}
}
}
62 changes: 0 additions & 62 deletions satellite/metabase/delete_bucket_test.go
Expand Up @@ -5,7 +5,6 @@ package metabase_test

import (
"context"
"errors"
"fmt"
"testing"
"time"
Expand Down Expand Up @@ -68,9 +67,6 @@ func TestDeleteBucketObjects(t *testing.T) {
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return errors.New("shouldn't be called")
},
},
Deleted: 0,
}.Check(ctx, t, db)
Expand All @@ -83,26 +79,13 @@ func TestDeleteBucketObjects(t *testing.T) {

metabasetest.CreateObject(ctx, t, db, obj1, 2)

nSegments := 0
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
nSegments += len(segments)

for _, s := range segments {
if len(s.Pieces) != 1 {
return errors.New("expected 1 piece per segment")
}
}
return nil
},
},
Deleted: 1,
}.Check(ctx, t, db)

require.Equal(t, 2, nSegments)

metabasetest.Verify{}.Check(ctx, t, db)
})

Expand All @@ -114,9 +97,6 @@ func TestDeleteBucketObjects(t *testing.T) {
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return errors.New("expected no segments")
},
},
Deleted: 1,
}.Check(ctx, t, db)
Expand All @@ -131,27 +111,14 @@ func TestDeleteBucketObjects(t *testing.T) {
metabasetest.CreateObject(ctx, t, db, obj2, 2)
metabasetest.CreateObject(ctx, t, db, obj3, 2)

nSegments := 0
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
nSegments += len(segments)

for _, s := range segments {
if len(s.Pieces) != 1 {
return errors.New("expected 1 piece per segment")
}
}
return nil
},
},
Deleted: 3,
}.Check(ctx, t, db)

require.Equal(t, 6, nSegments)

metabasetest.Verify{}.Check(ctx, t, db)
})

Expand Down Expand Up @@ -239,22 +206,14 @@ func TestDeleteBucketObjects(t *testing.T) {

metabasetest.CreateObject(ctx, t, db, obj1, 37)

nSegments := 0
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: obj1.Location().Bucket(),
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
nSegments += len(segments)

return nil
},
},
Deleted: 1,
}.Check(ctx, t, db)

require.Equal(t, 37, nSegments)

metabasetest.Verify{}.Check(ctx, t, db)
})

Expand All @@ -269,20 +228,14 @@ func TestDeleteBucketObjects(t *testing.T) {
metabasetest.CreateObject(ctx, t, db, obj, 5)
}

segmentsDeleted := 0
metabasetest.DeleteBucketObjects{
Opts: metabase.DeleteBucketObjects{
Bucket: root.Location().Bucket(),
BatchSize: 1,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
segmentsDeleted += len(segments)
return nil
},
},
Deleted: 5,
}.Check(ctx, t, db)

require.Equal(t, 25, segmentsDeleted)
metabasetest.Verify{}.Check(ctx, t, db)
})
})
Expand Down Expand Up @@ -310,9 +263,6 @@ func TestDeleteBucketObjectsParallel(t *testing.T) {
_, err := db.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: root.Location().Bucket(),
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return nil
},
})
return err
})
Expand All @@ -334,9 +284,6 @@ func TestDeleteBucketObjectsCancel(t *testing.T) {
_, err := db.DeleteBucketObjects(testCtx, metabase.DeleteBucketObjects{
Bucket: object.Location().Bucket(),
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return nil
},
})
require.Error(t, err)

Expand Down Expand Up @@ -382,9 +329,6 @@ func TestDeleteBucketWithCopies(t *testing.T) {
BucketName: "copy-bucket",
},
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return nil
},
})
require.NoError(t, err)

Expand Down Expand Up @@ -426,9 +370,6 @@ func TestDeleteBucketWithCopies(t *testing.T) {
BucketName: "original-bucket",
},
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return nil
},
})
require.NoError(t, err)

Expand Down Expand Up @@ -493,9 +434,6 @@ func TestDeleteBucketWithCopies(t *testing.T) {
BucketName: "bucket2",
},
BatchSize: 2,
DeletePieces: func(ctx context.Context, segments []metabase.DeletedSegmentInfo) error {
return nil
},
})
require.NoError(t, err)

Expand Down
6 changes: 1 addition & 5 deletions satellite/metainfo/endpoint.go
Expand Up @@ -28,7 +28,6 @@ import (
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metainfo/piecedeletion"
"storj.io/storj/satellite/metainfo/pointerverification"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
Expand Down Expand Up @@ -67,7 +66,6 @@ type Endpoint struct {
log *zap.Logger
buckets *buckets.Service
metabase *metabase.DB
deletePieces *piecedeletion.Service
orders *orders.Service
overlay *overlay.Service
attributions attribution.DB
Expand All @@ -88,8 +86,7 @@ type Endpoint struct {

// NewEndpoint creates new metainfo endpoint instance.
func NewEndpoint(log *zap.Logger, buckets *buckets.Service, metabaseDB *metabase.DB,
deletePieces *piecedeletion.Service, orders *orders.Service, cache *overlay.Service,
attributions attribution.DB, peerIdentities overlay.PeerIdentities,
orders *orders.Service, cache *overlay.Service, attributions attribution.DB, peerIdentities overlay.PeerIdentities,
apiKeys APIKeys, projectUsage *accounting.Service, projectLimits *accounting.ProjectLimitCache, projects console.Projects,
satellite signing.Signer, revocations revocation.DB, config Config) (*Endpoint, error) {
// TODO do something with too many params
Expand All @@ -115,7 +112,6 @@ func NewEndpoint(log *zap.Logger, buckets *buckets.Service, metabaseDB *metabase
log: log,
buckets: buckets,
metabase: metabaseDB,
deletePieces: deletePieces,
orders: orders,
overlay: cache,
attributions: attributions,
Expand Down
4 changes: 0 additions & 4 deletions satellite/metainfo/endpoint_bucket.go
Expand Up @@ -289,10 +289,6 @@ func (endpoint *Endpoint) deleteBucketObjects(ctx context.Context, projectID uui
bucketLocation := metabase.BucketLocation{ProjectID: projectID, BucketName: string(bucketName)}
deletedObjects, err := endpoint.metabase.DeleteBucketObjects(ctx, metabase.DeleteBucketObjects{
Bucket: bucketLocation,
DeletePieces: func(ctx context.Context, deleted []metabase.DeletedSegmentInfo) error {
endpoint.deleteSegmentPieces(ctx, deleted)
return nil
},
})

return deletedObjects, Error.Wrap(err)
Expand Down

0 comments on commit 6c08d50

Please sign in to comment.