Skip to content

Commit

Permalink
satellite: prevents uplink from creating a bucket once it exceeds the…
Browse files Browse the repository at this point in the history
… max bucket allocation.

Change-Id: I4b3822ed723c03dbbc0df136b2201027e19ba0cd
  • Loading branch information
jenlij authored and stefanbenten committed Jul 15, 2020
1 parent 62fec25 commit 784a156
Show file tree
Hide file tree
Showing 26 changed files with 1,039 additions and 107 deletions.
23 changes: 23 additions & 0 deletions private/dbutil/pgutil/query.go
Expand Up @@ -168,6 +168,9 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
if err != nil {
return errs.Wrap(err)
}
if isAutogeneratedCockroachIndex(index) {
continue
}
schema.Indexes = append(schema.Indexes, index)
}

Expand Down Expand Up @@ -272,3 +275,23 @@ func parseIndexDefinition(indexdef string) (*dbschema.Index, error) {
Columns: strings.Split(indexDirRemove.Replace(matches[4]), ", "),
}, nil
}

// hackity hack:
//
// Cockroach sometimes creates automatic indexes to enforce foreign key
// relationships, if it doesn't think the need is already met by other
// indexes. If you then add the other indexes after creating the table,
// the auto-generated index does not go away. So you get different results
// when establishing one table with a set of constraints over multiple
// steps, versus creating that same table with the same set of constraints
// all at once. Unfortunately, our system wants very much for those two
// paths to produce exactly the same result.
//
// This should make it so that we disregard the difference in the cases
// that it arises.
//
// See above for an important lesson about going in against a database when
// death is on the line.
func isAutogeneratedCockroachIndex(index *dbschema.Index) bool {
return strings.Contains(index.Name, "_auto_index_fk_")
}
9 changes: 0 additions & 9 deletions private/dbutil/pgutil/query_test.go
Expand Up @@ -11,7 +11,6 @@ import (
"github.com/stretchr/testify/require"

"storj.io/common/testcontext"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/dbschema"
"storj.io/storj/private/dbutil/pgtest"
"storj.io/storj/private/dbutil/pgutil"
Expand Down Expand Up @@ -94,14 +93,6 @@ func TestQuery(t *testing.T) {
},
}

if db.Implementation == dbutil.Cockroach {
expected.Indexes = append(expected.Indexes, &dbschema.Index{
Name: "names_auto_index_fk_users_a_ref_users",
Table: "names",
Columns: []string{"users_a"},
})
}

expected.Sort()
schema.Sort()
assert.Equal(t, expected, schema)
Expand Down
7 changes: 5 additions & 2 deletions private/testplanet/satellite.go
Expand Up @@ -443,6 +443,11 @@ func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtes
CacheCapacity: 100,
CacheExpiration: 10 * time.Second,
},
ProjectLimits: metainfo.ProjectLimitConfig{
MaxBuckets: 1000,
DefaultMaxUsage: 25 * memory.GB,
DefaultMaxBandwidth: 25 * memory.GB,
},
PieceDeletion: piecedeletion.Config{
MaxConcurrency: 100,

Expand Down Expand Up @@ -519,8 +524,6 @@ func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtes
},
Rollup: rollup.Config{
Interval: defaultInterval,
DefaultMaxUsage: 25 * memory.GB,
DefaultMaxBandwidth: 25 * memory.GB,
DeleteTallies: false,
},
ReportedRollup: reportedrollup.Config{
Expand Down
4 changes: 2 additions & 2 deletions satellite/accounting/projectusage_test.go
Expand Up @@ -40,8 +40,8 @@ func TestProjectUsageStorage(t *testing.T) {
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Rollup.DefaultMaxUsage = 1 * memory.MB
config.Rollup.DefaultMaxBandwidth = 1 * memory.MB
config.Metainfo.ProjectLimits.DefaultMaxUsage = 1 * memory.MB
config.Metainfo.ProjectLimits.DefaultMaxBandwidth = 1 * memory.MB
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
Expand Down
3 changes: 0 additions & 3 deletions satellite/accounting/rollup/rollup.go
Expand Up @@ -9,7 +9,6 @@ import (

"go.uber.org/zap"

"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/sync2"
Expand All @@ -19,8 +18,6 @@ import (
// Config contains configurable values for rollup
type Config struct {
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"`
DefaultMaxUsage memory.Size `help:"the default storage usage limit" releaseDefault:"50GB" devDefault:"200GB"`
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" releaseDefault:"50GB" devDefault:"200GB"`
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
}

Expand Down
4 changes: 2 additions & 2 deletions satellite/api.go
Expand Up @@ -297,8 +297,8 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
config.Rollup.DefaultMaxUsage,
config.Rollup.DefaultMaxBandwidth,
config.Metainfo.ProjectLimits.DefaultMaxUsage,
config.Metainfo.ProjectLimits.DefaultMaxBandwidth,
config.LiveAccounting.BandwidthCacheTTL,
)
}
Expand Down
7 changes: 5 additions & 2 deletions satellite/console/projects.go
Expand Up @@ -35,6 +35,9 @@ type Projects interface {

// UpdateRateLimit is a method for updating projects rate limit.
UpdateRateLimit(ctx context.Context, id uuid.UUID, newLimit int) error

// GetMaxBuckets is a method to get the maximum number of buckets allowed for the project
GetMaxBuckets(ctx context.Context, id uuid.UUID) (int, error)
}

// Project is a database object that describes Project entity
Expand All @@ -46,8 +49,8 @@ type Project struct {
PartnerID uuid.UUID `json:"partnerId"`
OwnerID uuid.UUID `json:"ownerId"`
RateLimit *int `json:"rateLimit"`

CreatedAt time.Time `json:"createdAt"`
MaxBuckets int `json:"maxBuckets"`
CreatedAt time.Time `json:"createdAt"`
}

// ProjectInfo holds data needed to create/update Project
Expand Down
13 changes: 13 additions & 0 deletions satellite/console/projects_test.go
Expand Up @@ -234,3 +234,16 @@ func TestProjectsList(t *testing.T) {
})))
})
}

func TestGetMaxBuckets(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
maxCount := 100
consoleDB := db.Console()
project, err := consoleDB.Projects().Insert(ctx, &console.Project{Name: "testproject1", MaxBuckets: maxCount})
require.NoError(t, err)
projectsDB := db.Console().Projects()
max, err := projectsDB.GetMaxBuckets(ctx, project.ID)
require.NoError(t, err)
require.Equal(t, maxCount, max)
})
}
4 changes: 2 additions & 2 deletions satellite/core.go
Expand Up @@ -247,8 +247,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
config.Rollup.DefaultMaxUsage,
config.Rollup.DefaultMaxBandwidth,
config.Metainfo.ProjectLimits.DefaultMaxUsage,
config.Metainfo.ProjectLimits.DefaultMaxBandwidth,
config.LiveAccounting.BandwidthCacheTTL,
)
}
Expand Down
8 changes: 8 additions & 0 deletions satellite/metainfo/config.go
Expand Up @@ -46,6 +46,13 @@ type RateLimiterConfig struct {
CacheExpiration time.Duration `help:"how long to cache the projects limiter." releaseDefault:"10m" devDefault:"10s"`
}

// ProjectLimitConfig is a configuration struct for default project limits
type ProjectLimitConfig struct {
MaxBuckets int `help:"max bucket count for a project." default:"100"`
DefaultMaxUsage memory.Size `help:"the default storage usage limit" releaseDefault:"50GB" devDefault:"200GB"`
DefaultMaxBandwidth memory.Size `help:"the default bandwidth usage limit" releaseDefault:"50GB" devDefault:"200GB"`
}

// Config is a configuration struct that is everything you need to start a metainfo
type Config struct {
DatabaseURL string `help:"the database connection string to use" default:"postgres://"`
Expand All @@ -58,6 +65,7 @@ type Config struct {
RS RSConfig `help:"redundancy scheme configuration"`
Loop LoopConfig `help:"loop configuration"`
RateLimiter RateLimiterConfig `help:"rate limiter configuration"`
ProjectLimits ProjectLimitConfig `help:"project limit configuration"`
PieceDeletion piecedeletion.Config `help:"piece deletion configuration"`
}

Expand Down
2 changes: 2 additions & 0 deletions satellite/metainfo/db.go
Expand Up @@ -25,4 +25,6 @@ type BucketsDB interface {
DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error)
// List returns all buckets for a project
ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error)
// CountBuckets returns the number of buckets a project currently has
CountBuckets(ctx context.Context, projectID uuid.UUID) (int, error)
}
14 changes: 14 additions & 0 deletions satellite/metainfo/db_test.go
Expand Up @@ -49,6 +49,10 @@ func TestBasicBucketOperations(t *testing.T) {
bucketsDB := db.Buckets()
expectedBucket := newTestBucket("testbucket", project.ID)

count, err := bucketsDB.CountBuckets(ctx, project.ID)
require.NoError(t, err)
require.Equal(t, 0, count)

// CreateBucket
_, err = bucketsDB.CreateBucket(ctx, expectedBucket)
require.NoError(t, err)
Expand All @@ -64,6 +68,16 @@ func TestBasicBucketOperations(t *testing.T) {
require.Equal(t, expectedBucket.DefaultRedundancyScheme, bucket.DefaultRedundancyScheme)
require.Equal(t, expectedBucket.DefaultEncryptionParameters, bucket.DefaultEncryptionParameters)

//CountBuckets
count, err = bucketsDB.CountBuckets(ctx, project.ID)
require.NoError(t, err)
require.Equal(t, 1, count)
_, err = bucketsDB.CreateBucket(ctx, newTestBucket("testbucket2", project.ID))
require.NoError(t, err)
count, err = bucketsDB.CountBuckets(ctx, project.ID)
require.NoError(t, err)
require.Equal(t, 2, count)

// DeleteBucket
err = bucketsDB.DeleteBucket(ctx, []byte("testbucket"), project.ID)
require.NoError(t, err)
Expand Down

0 comments on commit 784a156

Please sign in to comment.