Skip to content

Commit

Permalink
satellite/metainfo: remove ServerSideCopyDuplicateMetadata
Browse files Browse the repository at this point in the history
#5891

Change-Id: Ib5440169107acca6e832c2280e1ad12dfd380f28
  • Loading branch information
mniewrzal authored and Storj Robot committed Aug 8, 2023
1 parent 9550b5f commit 7be8443
Show file tree
Hide file tree
Showing 22 changed files with 79 additions and 1,479 deletions.
2 changes: 0 additions & 2 deletions satellite/accounting/projectusage_test.go
Expand Up @@ -1161,8 +1161,6 @@ func TestProjectBandwidthUsageWithCopies(t *testing.T) {
// this effectively disable live accounting cache
config.LiveAccounting.BandwidthCacheTTL = -1
config.LiveAccounting.AsOfSystemInterval = 0

config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
Expand Down
7 changes: 0 additions & 7 deletions satellite/audit/audit_test.go
Expand Up @@ -10,15 +10,13 @@ import (
"time"

"github.com/stretchr/testify/require"
"go.uber.org/zap"

"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/audit"
)
Expand Down Expand Up @@ -84,11 +82,6 @@ func TestAuditOrderLimit(t *testing.T) {
func TestAuditSkipsRemoteCopies(t *testing.T) {
testWithRangedLoop(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet, pauseQueueing pauseQueueingFunc, runQueueingOnce runQueueingOnceFunc) {
satellite := planet.Satellites[0]
audits := satellite.Audit
Expand Down
8 changes: 1 addition & 7 deletions satellite/gc/gc_test.go
Expand Up @@ -22,7 +22,6 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/gc/bloomfilter"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/rangedloop"
Expand Down Expand Up @@ -306,12 +305,7 @@ func TestGarbageCollectionWithCopiesWithDuplicateMetadata(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
),
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
Expand Down
8 changes: 1 addition & 7 deletions satellite/gracefulexit/gracefulexit_test.go
Expand Up @@ -11,7 +11,6 @@ import (

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"golang.org/x/exp/slices"

"storj.io/common/memory"
Expand Down Expand Up @@ -235,12 +234,7 @@ func TestGracefulExit_CopiedObjects(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.ServerSideCopyDuplicateMetadata = true
},
),
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
Expand Down
125 changes: 24 additions & 101 deletions satellite/metabase/copy_object.go
Expand Up @@ -52,10 +52,6 @@ type FinishCopyObject struct {

NewSegmentKeys []EncryptedKeyAndNonce

// If set, copy the object by duplicating the metadata and
// remote_alias_pieces list, rather than using segment_copies.
DuplicateMetadata bool

// VerifyLimits holds a callback by which the caller can interrupt the copy
// if it turns out completing the copy would exceed a limit.
// It will be called only once.
Expand Down Expand Up @@ -156,8 +152,7 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje

redundancySchemes := make([]int64, sourceObject.SegmentCount)

if opts.DuplicateMetadata {
err = withRows(db.db.QueryContext(ctx, `
err = withRows(db.db.QueryContext(ctx, `
SELECT
position,
expires_at,
Expand All @@ -172,75 +167,35 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&remoteAliasPiecesLists[index],
&placementConstraints[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}

if err := rows.Err(); err != nil {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&remoteAliasPiecesLists[index],
&placementConstraints[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}

if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}

return nil
})
} else {
err = withRows(db.db.QueryContext(ctx, `
SELECT
position,
expires_at,
root_piece_id,
encrypted_size, plain_offset, plain_size,
redundancy,
inline_data
FROM segments
WHERE stream_id = $1
ORDER BY position ASC
LIMIT $2
`, sourceObject.StreamID, sourceObject.SegmentCount))(func(rows tagsql.Rows) error {
index := 0
for rows.Next() {
err := rows.Scan(
&positions[index],
&expiresAts[index],
&rootPieceIDs[index],
&encryptedSizes[index], &plainOffsets[index], &plainSizes[index],
&redundancySchemes[index],
&inlineDatas[index],
)
if err != nil {
return err
}
index++
}
if err := rows.Err(); err != nil {
return err
}

if err := rows.Err(); err != nil {
return err
}
if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}

if index != int(sourceObject.SegmentCount) {
return Error.New("could not load all of the segment information")
}
return nil
})

return nil
})
}
if err != nil {
return Error.New("unable to copy object: %w", err)
}
Expand Down Expand Up @@ -351,19 +306,6 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje
return nil
}

if !opts.DuplicateMetadata {
_, err = tx.ExecContext(ctx, `
INSERT INTO segment_copies (
stream_id, ancestor_stream_id
) VALUES (
$1, $2
)
`, opts.NewStreamID, ancestorStreamID)
if err != nil {
return Error.New("unable to copy object: %w", err)
}
}

return nil
})

Expand All @@ -387,15 +329,13 @@ func (db *DB) FinishCopyObject(ctx context.Context, opts FinishCopyObject) (obje

// Fetch the following in a single query:
// - object at copy source location (error if it's not there)
// - source ancestor stream id (if any)
// - next version available
// - object at copy destination location (if any).
func getObjectAtCopySourceAndDestination(
ctx context.Context, tx tagsql.Tx, opts FinishCopyObject,
) (sourceObject Object, ancestorStreamID uuid.UUID, destinationObject *Object, nextAvailableVersion Version, err error) {
defer mon.Task()(&ctx)(&err)

var ancestorStreamIDBytes []byte
var highestVersion Version

sourceObject.ProjectID = opts.ProjectID
Expand All @@ -422,11 +362,9 @@ func getObjectAtCopySourceAndDestination(
encrypted_metadata,
total_plain_size, total_encrypted_size, fixed_segment_size,
encryption,
segment_copies.ancestor_stream_id,
0,
coalesce((SELECT max(version) FROM destination_current_versions),0) AS highest_version
FROM objects
LEFT JOIN segment_copies ON objects.stream_id = segment_copies.stream_id
WHERE
project_id = $1 AND
bucket_name = $3 AND
Expand All @@ -441,7 +379,6 @@ func getObjectAtCopySourceAndDestination(
NULL,
total_plain_size, total_encrypted_size, fixed_segment_size,
encryption,
NULL,
version,
(SELECT max(version) FROM destination_current_versions) AS highest_version
FROM objects
Expand Down Expand Up @@ -472,7 +409,6 @@ func getObjectAtCopySourceAndDestination(
&sourceObject.EncryptedMetadata,
&sourceObject.TotalPlainSize, &sourceObject.TotalEncryptedSize, &sourceObject.FixedSegmentSize,
encryptionParameters{&sourceObject.Encryption},
&ancestorStreamIDBytes,
&highestVersion,
&highestVersion,
)
Expand All @@ -483,19 +419,7 @@ func getObjectAtCopySourceAndDestination(
return Object{}, uuid.UUID{}, nil, 0, ErrObjectNotFound.New("object was changed during copy")
}

if len(ancestorStreamIDBytes) != 0 {
// Source object already was a copy, the new copy becomes yet another copy of the existing ancestor
ancestorStreamID, err = uuid.FromBytes(ancestorStreamIDBytes)
if err != nil {
return Object{}, uuid.UUID{}, nil, 0, err
}
} else {
// Source object was not a copy, it will now become an ancestor (unless it has only inline segments)
ancestorStreamID = sourceObject.StreamID
}

if rows.Next() {
var _bogusBytes []byte
destinationObject = &Object{}
destinationObject.ProjectID = opts.ProjectID
destinationObject.BucketName = opts.NewBucket
Expand All @@ -509,7 +433,6 @@ func getObjectAtCopySourceAndDestination(
&destinationObject.EncryptedMetadata,
&destinationObject.TotalPlainSize, &destinationObject.TotalEncryptedSize, &destinationObject.FixedSegmentSize,
encryptionParameters{&destinationObject.Encryption},
&_bogusBytes,
&destinationObject.Version,
&highestVersion,
)
Expand Down

0 comments on commit 7be8443

Please sign in to comment.