diff --git a/satellite/satellitedb/dbx/project.dbx b/satellite/satellitedb/dbx/project.dbx index 802ad7f5f2e2..fcee37a565f3 100644 --- a/satellite/satellitedb/dbx/project.dbx +++ b/satellite/satellitedb/dbx/project.dbx @@ -225,13 +225,15 @@ model api_key ( // head is restrictions for the api key. field head blob // name helps users to identify the purpose of the api key. - field name text (updatable) + field name text (updatable) // secret is the macaroon secret. field secret blob // user_agent is the referred partner who created the project. - field user_agent blob (nullable) + field user_agent blob (nullable) // created_at indicates when the api key was added. - field created_at timestamp (autoinsert) + field created_at timestamp (autoinsert) + // created_by is an UUID of the user who created this key. + field created_by user.id restrict (nullable) ) create api_key ( ) diff --git a/satellite/satellitedb/dbx/project_bucket.dbx b/satellite/satellitedb/dbx/project_bucket.dbx index a362bbd58f63..14c1faabd62f 100644 --- a/satellite/satellitedb/dbx/project_bucket.dbx +++ b/satellite/satellitedb/dbx/project_bucket.dbx @@ -69,6 +69,9 @@ model bucket_metainfo ( // 5 - Invalid, when there's no information about the placement. // 6 - NR (no Russia, Belarus or other sanctioned country) field placement int (nullable, updatable) + + // created_by is an UUID of the user created this bucket. + field created_by user.id restrict (nullable) ) create bucket_metainfo () diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.go b/satellite/satellitedb/dbx/satellitedb.dbx.go index ca92db8e3b27..033bfad8f0ad 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.go +++ b/satellite/satellitedb/dbx/satellitedb.dbx.go @@ -836,6 +836,7 @@ CREATE TABLE api_keys ( secret bytea NOT NULL, user_agent bytea, created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( id ), UNIQUE ( head ), UNIQUE ( name, project_id ) @@ -858,6 +859,7 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, placement integer, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( project_id, name ) ); CREATE TABLE project_invitations ( @@ -1539,6 +1541,7 @@ CREATE TABLE api_keys ( secret bytea NOT NULL, user_agent bytea, created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( id ), UNIQUE ( head ), UNIQUE ( name, project_id ) @@ -1561,6 +1564,7 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, placement integer, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( project_id, name ) ); CREATE TABLE project_invitations ( @@ -11165,12 +11169,14 @@ type ApiKey struct { Secret []byte UserAgent []byte CreatedAt time.Time + CreatedBy []byte } func (ApiKey) _Table() string { return "api_keys" } type ApiKey_Create_Fields struct { UserAgent ApiKey_UserAgent_Field + CreatedBy ApiKey_CreatedBy_Field } type ApiKey_Update_Fields struct { @@ -11323,6 +11329,38 @@ func (f ApiKey_CreatedAt_Field) value() interface{} { func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" } +type ApiKey_CreatedBy_Field struct { + _set bool + _null bool + _value []byte +} + +func ApiKey_CreatedBy(v []byte) ApiKey_CreatedBy_Field { + return ApiKey_CreatedBy_Field{_set: true, _value: v} +} + +func ApiKey_CreatedBy_Raw(v []byte) ApiKey_CreatedBy_Field { + if v == nil { + return ApiKey_CreatedBy_Null() + } + return ApiKey_CreatedBy(v) +} + +func ApiKey_CreatedBy_Null() ApiKey_CreatedBy_Field { + return ApiKey_CreatedBy_Field{_set: true, _null: true} +} + +func (f ApiKey_CreatedBy_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f ApiKey_CreatedBy_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (ApiKey_CreatedBy_Field) _Column() string { return "created_by" } + type BucketMetainfo struct { Id []byte ProjectId []byte @@ -11341,6 +11379,7 @@ type BucketMetainfo struct { DefaultRedundancyOptimalShares int DefaultRedundancyTotalShares int Placement *int + CreatedBy []byte } func (BucketMetainfo) _Table() string { return "bucket_metainfos" } @@ -11349,6 +11388,7 @@ type BucketMetainfo_Create_Fields struct { UserAgent BucketMetainfo_UserAgent_Field Versioning BucketMetainfo_Versioning_Field Placement BucketMetainfo_Placement_Field + CreatedBy BucketMetainfo_CreatedBy_Field } type BucketMetainfo_Update_Fields struct { @@ -11731,6 +11771,38 @@ func (f BucketMetainfo_Placement_Field) value() interface{} { func (BucketMetainfo_Placement_Field) _Column() string { return "placement" } +type BucketMetainfo_CreatedBy_Field struct { + _set bool + _null bool + _value []byte +} + +func BucketMetainfo_CreatedBy(v []byte) BucketMetainfo_CreatedBy_Field { + return BucketMetainfo_CreatedBy_Field{_set: true, _value: v} +} + +func BucketMetainfo_CreatedBy_Raw(v []byte) BucketMetainfo_CreatedBy_Field { + if v == nil { + return BucketMetainfo_CreatedBy_Null() + } + return BucketMetainfo_CreatedBy(v) +} + +func BucketMetainfo_CreatedBy_Null() BucketMetainfo_CreatedBy_Field { + return BucketMetainfo_CreatedBy_Field{_set: true, _null: true} +} + +func (f BucketMetainfo_CreatedBy_Field) isnull() bool { return !f._set || f._null || f._value == nil } + +func (f BucketMetainfo_CreatedBy_Field) value() interface{} { + if !f._set || f._null { + return nil + } + return f._value +} + +func (BucketMetainfo_CreatedBy_Field) _Column() string { return "created_by" } + type ProjectInvitation struct { ProjectId []byte Email string @@ -13508,17 +13580,18 @@ func (obj *pgxImpl) Create_ApiKey(ctx context.Context, __secret_val := api_key_secret.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now + __created_by_val := optional.CreatedBy.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at, created_by ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val) + __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val, __created_by_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt, &api_key.CreatedBy) if err != nil { return nil, obj.makeErr(err) } @@ -13561,15 +13634,16 @@ func (obj *pgxImpl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value() __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() __placement_val := optional.Placement.value() + __created_by_val := optional.CreatedBy.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement, created_by")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO bucket_metainfos "), __clause, __sqlbundle_Literal(" RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO bucket_metainfos "), __clause, __sqlbundle_Literal(" RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) + __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val, __created_by_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -13592,7 +13666,7 @@ func (obj *pgxImpl) Create_BucketMetainfo(ctx context.Context, obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, obj.makeErr(err) } @@ -16736,7 +16810,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx context.Context row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -16745,7 +16819,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx context.Context obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -16758,7 +16832,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_Project_RateLimit_Project_BurstL row *ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id, projects.rate_limit, projects.burst_limit, projects.segment_limit, projects.usage_limit, projects.bandwidth_limit FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id, projects.rate_limit, projects.burst_limit, projects.segment_limit, projects.usage_limit, projects.bandwidth_limit FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -16767,7 +16841,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_Project_RateLimit_Project_BurstL obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId, &row.Project_RateLimit, &row.Project_BurstLimit, &row.Project_SegmentLimit, &row.Project_UsageLimit, &row.Project_BandwidthLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId, &row.Project_RateLimit, &row.Project_BurstLimit, &row.Project_SegmentLimit, &row.Project_UsageLimit, &row.Project_BandwidthLimit) if err != nil { return (*ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row)(nil), obj.makeErr(err) } @@ -16781,7 +16855,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiKey_Projec row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") var __values []interface{} __values = append(__values, api_key_name.value(), api_key_project_id.value()) @@ -16790,7 +16864,7 @@ func (obj *pgxImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiKey_Projec obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -16804,7 +16878,7 @@ func (obj *pgxImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context bucket_metainfo *BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -16813,7 +16887,7 @@ func (obj *pgxImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx context.Context obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return (*BucketMetainfo)(nil), obj.makeErr(err) } @@ -16942,7 +17016,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_ rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -16962,7 +17036,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_GreaterOrEqual_ for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, err } @@ -16992,7 +17066,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value()) @@ -17012,7 +17086,7 @@ func (obj *pgxImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greater_OrderBy for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, err } @@ -19438,7 +19512,7 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Cont defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -19517,7 +19591,7 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx context.Cont obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err == sql.ErrNoRows { return nil, nil } @@ -19536,7 +19610,7 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name_And_Versioning_G defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? AND bucket_metainfos.versioning >= ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? AND bucket_metainfos.versioning >= ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -19615,7 +19689,7 @@ func (obj *pgxImpl) Update_BucketMetainfo_By_ProjectId_And_Name_And_Versioning_G obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err == sql.ErrNoRows { return nil, nil } @@ -22136,17 +22210,18 @@ func (obj *pgxcockroachImpl) Create_ApiKey(ctx context.Context, __secret_val := api_key_secret.value() __user_agent_val := optional.UserAgent.value() __created_at_val := __now + __created_by_val := optional.CreatedBy.value() - var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at ) VALUES ( ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at") + var __embed_stmt = __sqlbundle_Literal("INSERT INTO api_keys ( id, project_id, head, name, secret, user_agent, created_at, created_by ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) RETURNING api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by") var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val) + __values = append(__values, __id_val, __project_id_val, __head_val, __name_val, __secret_val, __user_agent_val, __created_at_val, __created_by_val) var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) obj.logStmt(__stmt, __values...) api_key = &ApiKey{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&api_key.Id, &api_key.ProjectId, &api_key.Head, &api_key.Name, &api_key.Secret, &api_key.UserAgent, &api_key.CreatedAt, &api_key.CreatedBy) if err != nil { return nil, obj.makeErr(err) } @@ -22189,15 +22264,16 @@ func (obj *pgxcockroachImpl) Create_BucketMetainfo(ctx context.Context, __default_redundancy_optimal_shares_val := bucket_metainfo_default_redundancy_optimal_shares.value() __default_redundancy_total_shares_val := bucket_metainfo_default_redundancy_total_shares.value() __placement_val := optional.Placement.value() + __created_by_val := optional.CreatedBy.value() - var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement")} - var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} + var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, project_id, name, user_agent, path_cipher, created_at, default_segment_size, default_encryption_cipher_suite, default_encryption_block_size, default_redundancy_algorithm, default_redundancy_share_size, default_redundancy_required_shares, default_redundancy_repair_shares, default_redundancy_optimal_shares, default_redundancy_total_shares, placement, created_by")} + var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO bucket_metainfos "), __clause, __sqlbundle_Literal(" RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO bucket_metainfos "), __clause, __sqlbundle_Literal(" RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} var __values []interface{} - __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val) + __values = append(__values, __id_val, __project_id_val, __name_val, __user_agent_val, __path_cipher_val, __created_at_val, __default_segment_size_val, __default_encryption_cipher_suite_val, __default_encryption_block_size_val, __default_redundancy_algorithm_val, __default_redundancy_share_size_val, __default_redundancy_required_shares_val, __default_redundancy_repair_shares_val, __default_redundancy_optimal_shares_val, __default_redundancy_total_shares_val, __placement_val, __created_by_val) __optional_columns := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "} @@ -22220,7 +22296,7 @@ func (obj *pgxcockroachImpl) Create_BucketMetainfo(ctx context.Context, obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, obj.makeErr(err) } @@ -25364,7 +25440,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx contex row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.id = ?") var __values []interface{} __values = append(__values, api_key_id.value()) @@ -25373,7 +25449,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Id(ctx contex obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -25386,7 +25462,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_Project_RateLimit_Proje row *ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id, projects.rate_limit, projects.burst_limit, projects.segment_limit, projects.usage_limit, projects.bandwidth_limit FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id, projects.rate_limit, projects.burst_limit, projects.segment_limit, projects.usage_limit, projects.bandwidth_limit FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.head = ?") var __values []interface{} __values = append(__values, api_key_head.value()) @@ -25395,7 +25471,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_Project_RateLimit_Proje obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId, &row.Project_RateLimit, &row.Project_BurstLimit, &row.Project_SegmentLimit, &row.Project_UsageLimit, &row.Project_BandwidthLimit) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId, &row.Project_RateLimit, &row.Project_BurstLimit, &row.Project_SegmentLimit, &row.Project_UsageLimit, &row.Project_BandwidthLimit) if err != nil { return (*ApiKey_Project_PublicId_Project_RateLimit_Project_BurstLimit_Project_SegmentLimit_Project_UsageLimit_Project_BandwidthLimit_Row)(nil), obj.makeErr(err) } @@ -25409,7 +25485,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiK row *ApiKey_Project_PublicId_Row, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT api_keys.id, api_keys.project_id, api_keys.head, api_keys.name, api_keys.secret, api_keys.user_agent, api_keys.created_at, api_keys.created_by, projects.public_id FROM projects JOIN api_keys ON projects.id = api_keys.project_id WHERE api_keys.name = ? AND api_keys.project_id = ?") var __values []interface{} __values = append(__values, api_key_name.value(), api_key_project_id.value()) @@ -25418,7 +25494,7 @@ func (obj *pgxcockroachImpl) Get_ApiKey_Project_PublicId_By_ApiKey_Name_And_ApiK obj.logStmt(__stmt, __values...) row = &ApiKey_Project_PublicId_Row{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.Project_PublicId) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.ApiKey.Id, &row.ApiKey.ProjectId, &row.ApiKey.Head, &row.ApiKey.Name, &row.ApiKey.Secret, &row.ApiKey.UserAgent, &row.ApiKey.CreatedAt, &row.ApiKey.CreatedBy, &row.Project_PublicId) if err != nil { return (*ApiKey_Project_PublicId_Row)(nil), obj.makeErr(err) } @@ -25432,7 +25508,7 @@ func (obj *pgxcockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx contex bucket_metainfo *BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name.value()) @@ -25441,7 +25517,7 @@ func (obj *pgxcockroachImpl) Get_BucketMetainfo_By_ProjectId_And_Name(ctx contex obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return (*BucketMetainfo)(nil), obj.makeErr(err) } @@ -25570,7 +25646,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name >= ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater_or_equal.value()) @@ -25590,7 +25666,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, err } @@ -25620,7 +25696,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate rows []*BucketMetainfo, err error) { defer mon.Task()(&ctx)(&err) - var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") + var __embed_stmt = __sqlbundle_Literal("SELECT bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by FROM bucket_metainfos WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name > ? ORDER BY bucket_metainfos.name LIMIT ? OFFSET ?") var __values []interface{} __values = append(__values, bucket_metainfo_project_id.value(), bucket_metainfo_name_greater.value()) @@ -25640,7 +25716,7 @@ func (obj *pgxcockroachImpl) Limited_BucketMetainfo_By_ProjectId_And_Name_Greate for __rows.Next() { bucket_metainfo := &BucketMetainfo{} - err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = __rows.Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err != nil { return nil, err } @@ -28066,7 +28142,7 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx con defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -28145,7 +28221,7 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name(ctx con obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err == sql.ErrNoRows { return nil, nil } @@ -28164,7 +28240,7 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name_And_Ver defer mon.Task()(&ctx)(&err) var __sets = &__sqlbundle_Hole{} - var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? AND bucket_metainfos.versioning >= ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement")}} + var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE bucket_metainfos SET "), __sets, __sqlbundle_Literal(" WHERE bucket_metainfos.project_id = ? AND bucket_metainfos.name = ? AND bucket_metainfos.versioning >= ? RETURNING bucket_metainfos.id, bucket_metainfos.project_id, bucket_metainfos.name, bucket_metainfos.user_agent, bucket_metainfos.versioning, bucket_metainfos.path_cipher, bucket_metainfos.created_at, bucket_metainfos.default_segment_size, bucket_metainfos.default_encryption_cipher_suite, bucket_metainfos.default_encryption_block_size, bucket_metainfos.default_redundancy_algorithm, bucket_metainfos.default_redundancy_share_size, bucket_metainfos.default_redundancy_required_shares, bucket_metainfos.default_redundancy_repair_shares, bucket_metainfos.default_redundancy_optimal_shares, bucket_metainfos.default_redundancy_total_shares, bucket_metainfos.placement, bucket_metainfos.created_by")}} __sets_sql := __sqlbundle_Literals{Join: ", "} var __values []interface{} @@ -28243,7 +28319,7 @@ func (obj *pgxcockroachImpl) Update_BucketMetainfo_By_ProjectId_And_Name_And_Ver obj.logStmt(__stmt, __values...) bucket_metainfo = &BucketMetainfo{} - err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement) + err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&bucket_metainfo.Id, &bucket_metainfo.ProjectId, &bucket_metainfo.Name, &bucket_metainfo.UserAgent, &bucket_metainfo.Versioning, &bucket_metainfo.PathCipher, &bucket_metainfo.CreatedAt, &bucket_metainfo.DefaultSegmentSize, &bucket_metainfo.DefaultEncryptionCipherSuite, &bucket_metainfo.DefaultEncryptionBlockSize, &bucket_metainfo.DefaultRedundancyAlgorithm, &bucket_metainfo.DefaultRedundancyShareSize, &bucket_metainfo.DefaultRedundancyRequiredShares, &bucket_metainfo.DefaultRedundancyRepairShares, &bucket_metainfo.DefaultRedundancyOptimalShares, &bucket_metainfo.DefaultRedundancyTotalShares, &bucket_metainfo.Placement, &bucket_metainfo.CreatedBy) if err == sql.ErrNoRows { return nil, nil } diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql index d3889cf9a0ae..7fd29dc1004d 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgx.sql @@ -523,6 +523,7 @@ CREATE TABLE api_keys ( secret bytea NOT NULL, user_agent bytea, created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( id ), UNIQUE ( head ), UNIQUE ( name, project_id ) @@ -545,6 +546,7 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, placement integer, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( project_id, name ) ); CREATE TABLE project_invitations ( diff --git a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql index d3889cf9a0ae..7fd29dc1004d 100644 --- a/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql +++ b/satellite/satellitedb/dbx/satellitedb.dbx.pgxcockroach.sql @@ -523,6 +523,7 @@ CREATE TABLE api_keys ( secret bytea NOT NULL, user_agent bytea, created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( id ), UNIQUE ( head ), UNIQUE ( name, project_id ) @@ -545,6 +546,7 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, placement integer, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( project_id, name ) ); CREATE TABLE project_invitations ( diff --git a/satellite/satellitedb/migrate.go b/satellite/satellitedb/migrate.go index 037d472c3417..ecb6acaa456a 100644 --- a/satellite/satellitedb/migrate.go +++ b/satellite/satellitedb/migrate.go @@ -2669,6 +2669,15 @@ func (db *satelliteDB) ProductionMigration() *migrate.Migration { `ALTER TABLE stripe_customers ADD COLUMN billing_customer_id TEXT;`, }, }, + { + DB: &db.migrationDB, + Description: "add created_by column to api_keys and bucket_metainfos to be populated with a user id value", + Version: 264, + Action: migrate.SQL{ + `ALTER TABLE api_keys ADD COLUMN created_by bytea REFERENCES users( id ) DEFAULT NULL;`, + `ALTER TABLE bucket_metainfos ADD COLUMN created_by bytea REFERENCES users( id ) DEFAULT NULL;`, + }, + }, // NB: after updating testdata in `testdata`, run // `go generate` to update `migratez.go`. }, diff --git a/satellite/satellitedb/migratez.go b/satellite/satellitedb/migratez.go index e96006ec450c..bf7b2b6d3e8f 100755 --- a/satellite/satellitedb/migratez.go +++ b/satellite/satellitedb/migratez.go @@ -13,7 +13,7 @@ func (db *satelliteDB) testMigration() *migrate.Migration { { DB: &db.migrationDB, Description: "Testing setup", - Version: 263, + Version: 264, Action: migrate.SQL{`-- AUTOGENERATED BY storj.io/dbx -- DO NOT EDIT CREATE TABLE account_freeze_events ( @@ -539,6 +539,7 @@ CREATE TABLE api_keys ( secret bytea NOT NULL, user_agent bytea, created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( id ), UNIQUE ( head ), UNIQUE ( name, project_id ) @@ -561,6 +562,7 @@ CREATE TABLE bucket_metainfos ( default_redundancy_optimal_shares integer NOT NULL, default_redundancy_total_shares integer NOT NULL, placement integer, + created_by bytea REFERENCES users( id ), PRIMARY KEY ( project_id, name ) ); CREATE TABLE project_invitations ( diff --git a/satellite/satellitedb/testdata/postgres.v264.sql b/satellite/satellitedb/testdata/postgres.v264.sql new file mode 100644 index 000000000000..4ed1fb4accbd --- /dev/null +++ b/satellite/satellitedb/testdata/postgres.v264.sql @@ -0,0 +1,809 @@ +-- AUTOGENERATED BY storj.io/dbx +-- DO NOT EDIT +CREATE TABLE account_freeze_events ( + user_id bytea NOT NULL, + event integer NOT NULL, + limits jsonb, + days_till_escalation integer, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + PRIMARY KEY ( user_id, event ) +); +CREATE TABLE accounting_rollups ( + node_id bytea NOT NULL, + start_time timestamp with time zone NOT NULL, + put_total bigint NOT NULL, + get_total bigint NOT NULL, + get_audit_total bigint NOT NULL, + get_repair_total bigint NOT NULL, + put_repair_total bigint NOT NULL, + at_rest_total double precision NOT NULL, + interval_end_time timestamp with time zone, + PRIMARY KEY ( node_id, start_time ) +); +CREATE TABLE accounting_timestamps ( + name text NOT NULL, + value timestamp with time zone NOT NULL, + PRIMARY KEY ( name ) +); +CREATE TABLE billing_balances ( + user_id bytea NOT NULL, + balance bigint NOT NULL, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ) +); +CREATE TABLE billing_transactions ( + id bigserial NOT NULL, + user_id bytea NOT NULL, + amount bigint NOT NULL, + currency text NOT NULL, + description text NOT NULL, + source text NOT NULL, + status text NOT NULL, + type text NOT NULL, + metadata jsonb NOT NULL, + timestamp timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE bucket_bandwidth_rollups ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( project_id, bucket_name, interval_start, action ) +); +CREATE TABLE bucket_bandwidth_rollup_archives ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + inline bigint NOT NULL, + allocated bigint NOT NULL, + settled bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start, action ) +); +CREATE TABLE bucket_storage_tallies ( + bucket_name bytea NOT NULL, + project_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + total_bytes bigint NOT NULL DEFAULT 0, + inline bigint NOT NULL, + remote bigint NOT NULL, + total_segments_count integer NOT NULL DEFAULT 0, + remote_segments_count integer NOT NULL, + inline_segments_count integer NOT NULL, + object_count integer NOT NULL, + metadata_size bigint NOT NULL, + PRIMARY KEY ( bucket_name, project_id, interval_start ) +); +CREATE TABLE coinpayments_transactions ( + id text NOT NULL, + user_id bytea NOT NULL, + address text NOT NULL, + amount_numeric bigint NOT NULL, + received_numeric bigint NOT NULL, + status integer NOT NULL, + key text NOT NULL, + timeout integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE graceful_exit_progress ( + node_id bytea NOT NULL, + bytes_transferred bigint NOT NULL, + pieces_transferred bigint NOT NULL DEFAULT 0, + pieces_failed bigint NOT NULL DEFAULT 0, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE graceful_exit_segment_transfer_queue ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_num integer NOT NULL, + root_piece_id bytea, + durability_ratio double precision NOT NULL, + queued_at timestamp with time zone NOT NULL, + requested_at timestamp with time zone, + last_failed_at timestamp with time zone, + last_failed_code integer, + failed_count integer, + finished_at timestamp with time zone, + order_limit_send_count integer NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, stream_id, position, piece_num ) +); +CREATE TABLE nodes ( + id bytea NOT NULL, + address text NOT NULL DEFAULT '', + last_net text NOT NULL, + last_ip_port text, + country_code text, + protocol integer NOT NULL DEFAULT 0, + email text NOT NULL, + wallet text NOT NULL, + wallet_features text NOT NULL DEFAULT '', + free_disk bigint NOT NULL DEFAULT -1, + piece_count bigint NOT NULL DEFAULT 0, + major bigint NOT NULL DEFAULT 0, + minor bigint NOT NULL DEFAULT 0, + patch bigint NOT NULL DEFAULT 0, + hash text NOT NULL DEFAULT '', + timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00', + release boolean NOT NULL DEFAULT false, + latency_90 bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch', + last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch', + disqualified timestamp with time zone, + disqualification_reason integer, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + exit_initiated_at timestamp with time zone, + exit_loop_completed_at timestamp with time zone, + exit_finished_at timestamp with time zone, + exit_success boolean NOT NULL DEFAULT false, + contained timestamp with time zone, + last_offline_email timestamp with time zone, + last_software_update_email timestamp with time zone, + noise_proto integer, + noise_public_key bytea, + debounce_limit integer NOT NULL DEFAULT 0, + features integer NOT NULL DEFAULT 0, + PRIMARY KEY ( id ) +); +CREATE TABLE node_api_versions ( + id bytea NOT NULL, + api_version integer NOT NULL, + created_at timestamp with time zone NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE node_events ( + id bytea NOT NULL, + email text NOT NULL, + last_ip_port text, + node_id bytea NOT NULL, + event integer NOT NULL, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_attempted timestamp with time zone, + email_sent timestamp with time zone, + PRIMARY KEY ( id ) +); +CREATE TABLE node_tags ( + node_id bytea NOT NULL, + name text NOT NULL, + value bytea NOT NULL, + signed_at timestamp with time zone NOT NULL, + signer bytea NOT NULL, + PRIMARY KEY ( node_id, name, signer ) +); +CREATE TABLE oauth_clients ( + id bytea NOT NULL, + encrypted_secret bytea NOT NULL, + redirect_url text NOT NULL, + user_id bytea NOT NULL, + app_name text NOT NULL, + app_logo_url text NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE oauth_codes ( + client_id bytea NOT NULL, + user_id bytea NOT NULL, + scope text NOT NULL, + redirect_url text NOT NULL, + challenge text NOT NULL, + challenge_method text NOT NULL, + code text NOT NULL, + created_at timestamp with time zone NOT NULL, + expires_at timestamp with time zone NOT NULL, + claimed_at timestamp with time zone, + PRIMARY KEY ( code ) +); +CREATE TABLE oauth_tokens ( + client_id bytea NOT NULL, + user_id bytea NOT NULL, + scope text NOT NULL, + kind integer NOT NULL, + token bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( token ) +); +CREATE TABLE peer_identities ( + node_id bytea NOT NULL, + leaf_serial_number bytea NOT NULL, + chain bytea NOT NULL, + updated_at timestamp with time zone NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE projects ( + id bytea NOT NULL, + public_id bytea, + name text NOT NULL, + description text NOT NULL, + usage_limit bigint, + bandwidth_limit bigint, + user_specified_usage_limit bigint, + user_specified_bandwidth_limit bigint, + segment_limit bigint DEFAULT 1000000, + rate_limit integer, + burst_limit integer, + max_buckets integer, + user_agent bytea, + owner_id bytea NOT NULL, + salt bytea, + created_at timestamp with time zone NOT NULL, + default_placement integer, + default_versioning integer NOT NULL DEFAULT 1, + PRIMARY KEY ( id ) +); +CREATE TABLE project_bandwidth_daily_rollups ( + project_id bytea NOT NULL, + interval_day date NOT NULL, + egress_allocated bigint NOT NULL, + egress_settled bigint NOT NULL, + egress_dead bigint NOT NULL DEFAULT 0, + PRIMARY KEY ( project_id, interval_day ) +); +CREATE TABLE registration_tokens ( + secret bytea NOT NULL, + owner_id bytea, + project_limit integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE repair_queue ( + stream_id bytea NOT NULL, + position bigint NOT NULL, + attempted_at timestamp with time zone, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + segment_health double precision NOT NULL DEFAULT 1, + placement integer, + PRIMARY KEY ( stream_id, position ) +); +CREATE TABLE reputations ( + id bytea NOT NULL, + audit_success_count bigint NOT NULL DEFAULT 0, + total_audit_count bigint NOT NULL DEFAULT 0, + vetted_at timestamp with time zone, + created_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + disqualified timestamp with time zone, + disqualification_reason integer, + unknown_audit_suspended timestamp with time zone, + offline_suspended timestamp with time zone, + under_review timestamp with time zone, + online_score double precision NOT NULL DEFAULT 1, + audit_history bytea NOT NULL, + audit_reputation_alpha double precision NOT NULL DEFAULT 1, + audit_reputation_beta double precision NOT NULL DEFAULT 0, + unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1, + unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0, + PRIMARY KEY ( id ) +); +CREATE TABLE reset_password_tokens ( + secret bytea NOT NULL, + owner_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( secret ), + UNIQUE ( owner_id ) +); +CREATE TABLE reverification_audits ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_num integer NOT NULL, + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + last_attempt timestamp with time zone, + reverify_count bigint NOT NULL DEFAULT 0, + PRIMARY KEY ( node_id, stream_id, position ) +); +CREATE TABLE revocations ( + revoked bytea NOT NULL, + api_key_id bytea NOT NULL, + PRIMARY KEY ( revoked ) +); +CREATE TABLE segment_pending_audits ( + node_id bytea NOT NULL, + stream_id bytea NOT NULL, + position bigint NOT NULL, + piece_id bytea NOT NULL, + stripe_index bigint NOT NULL, + share_size bigint NOT NULL, + expected_share_hash bytea NOT NULL, + reverify_count bigint NOT NULL, + PRIMARY KEY ( node_id ) +); +CREATE TABLE storagenode_bandwidth_rollups ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollup_archives ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_bandwidth_rollups_phase2 ( + storagenode_id bytea NOT NULL, + interval_start timestamp with time zone NOT NULL, + interval_seconds integer NOT NULL, + action integer NOT NULL, + allocated bigint DEFAULT 0, + settled bigint NOT NULL, + PRIMARY KEY ( storagenode_id, interval_start, action ) +); +CREATE TABLE storagenode_payments ( + id bigserial NOT NULL, + created_at timestamp with time zone NOT NULL, + node_id bytea NOT NULL, + period text NOT NULL, + amount bigint NOT NULL, + receipt text, + notes text, + PRIMARY KEY ( id ) +); +CREATE TABLE storagenode_paystubs ( + period text NOT NULL, + node_id bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + codes text NOT NULL, + usage_at_rest double precision NOT NULL, + usage_get bigint NOT NULL, + usage_put bigint NOT NULL, + usage_get_repair bigint NOT NULL, + usage_put_repair bigint NOT NULL, + usage_get_audit bigint NOT NULL, + comp_at_rest bigint NOT NULL, + comp_get bigint NOT NULL, + comp_put bigint NOT NULL, + comp_get_repair bigint NOT NULL, + comp_put_repair bigint NOT NULL, + comp_get_audit bigint NOT NULL, + surge_percent bigint NOT NULL, + held bigint NOT NULL, + owed bigint NOT NULL, + disposed bigint NOT NULL, + paid bigint NOT NULL, + distributed bigint NOT NULL, + PRIMARY KEY ( period, node_id ) +); +CREATE TABLE storagenode_storage_tallies ( + node_id bytea NOT NULL, + interval_end_time timestamp with time zone NOT NULL, + data_total double precision NOT NULL, + PRIMARY KEY ( interval_end_time, node_id ) +); +CREATE TABLE storjscan_payments ( + chain_id bigint NOT NULL DEFAULT 0, + block_hash bytea NOT NULL, + block_number bigint NOT NULL, + transaction bytea NOT NULL, + log_index integer NOT NULL, + from_address bytea NOT NULL, + to_address bytea NOT NULL, + token_value bigint NOT NULL, + usd_value bigint NOT NULL, + status text NOT NULL, + timestamp timestamp with time zone NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( block_hash, log_index ) +); +CREATE TABLE storjscan_wallets ( + user_id bytea NOT NULL, + wallet_address bytea NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id, wallet_address ) +); +CREATE TABLE stripe_customers ( + user_id bytea NOT NULL, + customer_id text NOT NULL, + billing_customer_id text, + package_plan text, + purchased_package_at timestamp with time zone, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( user_id ), + UNIQUE ( customer_id ) +); +CREATE TABLE stripecoinpayments_invoice_project_records ( + id bytea NOT NULL, + project_id bytea NOT NULL, + storage double precision NOT NULL, + egress bigint NOT NULL, + objects bigint, + segments bigint, + period_start timestamp with time zone NOT NULL, + period_end timestamp with time zone NOT NULL, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ), + UNIQUE ( project_id, period_start, period_end ) +); +CREATE TABLE stripecoinpayments_tx_conversion_rates ( + tx_id text NOT NULL, + rate_numeric double precision NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE TABLE users ( + id bytea NOT NULL, + email text NOT NULL, + normalized_email text NOT NULL, + full_name text NOT NULL, + short_name text, + password_hash bytea NOT NULL, + status integer NOT NULL, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + project_limit integer NOT NULL DEFAULT 0, + project_bandwidth_limit bigint NOT NULL DEFAULT 0, + project_storage_limit bigint NOT NULL DEFAULT 0, + project_segment_limit bigint NOT NULL DEFAULT 0, + paid_tier boolean NOT NULL DEFAULT false, + position text, + company_name text, + company_size integer, + working_on text, + is_professional boolean NOT NULL DEFAULT false, + employee_count text, + have_sales_contact boolean NOT NULL DEFAULT false, + mfa_enabled boolean NOT NULL DEFAULT false, + mfa_secret_key text, + mfa_recovery_codes text, + signup_promo_code text, + verification_reminders integer NOT NULL DEFAULT 0, + trial_notifications integer NOT NULL DEFAULT 0, + failed_login_count integer, + login_lockout_expiration timestamp with time zone, + signup_captcha double precision, + default_placement integer, + activation_code text, + signup_id text, + trial_expiration timestamp with time zone, + upgrade_time timestamp with time zone, + PRIMARY KEY ( id ) +); +CREATE TABLE user_settings ( + user_id bytea NOT NULL, + session_minutes integer, + passphrase_prompt boolean, + onboarding_start boolean NOT NULL DEFAULT true, + onboarding_end boolean NOT NULL DEFAULT true, + onboarding_step text, + notice_dismissal jsonb NOT NULL DEFAULT '{}', + PRIMARY KEY ( user_id ) +); +CREATE TABLE value_attributions ( + project_id bytea NOT NULL, + bucket_name bytea NOT NULL, + user_agent bytea, + last_updated timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, bucket_name ) +); +CREATE TABLE verification_audits ( + inserted_at timestamp with time zone NOT NULL DEFAULT current_timestamp, + stream_id bytea NOT NULL, + position bigint NOT NULL, + expires_at timestamp with time zone, + encrypted_size integer NOT NULL, + PRIMARY KEY ( inserted_at, stream_id, position ) +); +CREATE TABLE webapp_sessions ( + id bytea NOT NULL, + user_id bytea NOT NULL, + ip_address text NOT NULL, + user_agent text NOT NULL, + status integer NOT NULL, + expires_at timestamp with time zone NOT NULL, + PRIMARY KEY ( id ) +); +CREATE TABLE api_keys ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + head bytea NOT NULL, + name text NOT NULL, + secret bytea NOT NULL, + user_agent bytea, + created_at timestamp with time zone NOT NULL, + created_by bytea REFERENCES users( id ), + PRIMARY KEY ( id ), + UNIQUE ( head ), + UNIQUE ( name, project_id ) +); +CREATE TABLE bucket_metainfos ( + id bytea NOT NULL, + project_id bytea NOT NULL REFERENCES projects( id ), + name bytea NOT NULL, + user_agent bytea, + versioning integer NOT NULL DEFAULT 0, + path_cipher integer NOT NULL, + created_at timestamp with time zone NOT NULL, + default_segment_size integer NOT NULL, + default_encryption_cipher_suite integer NOT NULL, + default_encryption_block_size integer NOT NULL, + default_redundancy_algorithm integer NOT NULL, + default_redundancy_share_size integer NOT NULL, + default_redundancy_required_shares integer NOT NULL, + default_redundancy_repair_shares integer NOT NULL, + default_redundancy_optimal_shares integer NOT NULL, + default_redundancy_total_shares integer NOT NULL, + placement integer, + created_by bytea REFERENCES users( id ), + PRIMARY KEY ( project_id, name ) +); +CREATE TABLE project_invitations ( + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + email text NOT NULL, + inviter_id bytea REFERENCES users( id ) ON DELETE SET NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( project_id, email ) +); +CREATE TABLE project_members ( + member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE, + project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( member_id, project_id ) +); +CREATE TABLE stripecoinpayments_apply_balance_intents ( + tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE, + state integer NOT NULL, + created_at timestamp with time zone NOT NULL, + PRIMARY KEY ( tx_id ) +); +CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time ) ; +CREATE INDEX billing_transactions_timestamp_index ON billing_transactions ( timestamp ) ; +CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start ) ; +CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id ) ; +CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start ) ; +CREATE INDEX bucket_storage_tallies_interval_start_index ON bucket_storage_tallies ( interval_start ) ; +CREATE INDEX graceful_exit_segment_transfer_nid_dr_qa_fa_lfa_index ON graceful_exit_segment_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at ) ; +CREATE INDEX node_last_ip ON nodes ( last_net ) ; +CREATE INDEX nodes_dis_unk_off_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, offline_suspended, exit_finished_at, last_contact_success ) ; +CREATE INDEX nodes_last_cont_success_free_disk_ma_mi_patch_vetted_partial_index ON nodes ( last_contact_success, free_disk, major, minor, patch, vetted_at ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true AND nodes.last_net != '' ; +CREATE INDEX nodes_dis_unk_aud_exit_init_rel_last_cont_success_stored_index ON nodes ( disqualified, unknown_audit_suspended, exit_initiated_at, release, last_contact_success ) WHERE nodes.disqualified is NULL AND nodes.unknown_audit_suspended is NULL AND nodes.exit_initiated_at is NULL AND nodes.release = true ; +CREATE INDEX node_events_email_event_created_at_index ON node_events ( email, event, created_at ) WHERE node_events.email_sent is NULL ; +CREATE INDEX oauth_clients_user_id_index ON oauth_clients ( user_id ) ; +CREATE INDEX oauth_codes_user_id_index ON oauth_codes ( user_id ) ; +CREATE INDEX oauth_codes_client_id_index ON oauth_codes ( client_id ) ; +CREATE INDEX oauth_tokens_user_id_index ON oauth_tokens ( user_id ) ; +CREATE INDEX oauth_tokens_client_id_index ON oauth_tokens ( client_id ) ; +CREATE INDEX projects_public_id_index ON projects ( public_id ) ; +CREATE INDEX projects_owner_id_index ON projects ( owner_id ) ; +CREATE INDEX project_bandwidth_daily_rollup_interval_day_index ON project_bandwidth_daily_rollups ( interval_day ) ; +CREATE INDEX repair_queue_updated_at_index ON repair_queue ( updated_at ) ; +CREATE INDEX repair_queue_num_healthy_pieces_attempted_at_index ON repair_queue ( segment_health, attempted_at ) ; +CREATE INDEX repair_queue_placement_index ON repair_queue ( placement ) ; +CREATE INDEX reverification_audits_inserted_at_index ON reverification_audits ( inserted_at ) ; +CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start ) ; +CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start ) ; +CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period ) ; +CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id ) ; +CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id ) ; +CREATE INDEX storjscan_payments_chain_id_block_number_log_index_index ON storjscan_payments ( chain_id, block_number, log_index ) ; +CREATE INDEX storjscan_wallets_wallet_address_index ON storjscan_wallets ( wallet_address ) ; +CREATE INDEX users_email_status_index ON users ( normalized_email, status ) ; +CREATE INDEX webapp_sessions_user_id_index ON webapp_sessions ( user_id ) ; +CREATE INDEX project_invitations_project_id_index ON project_invitations ( project_id ) ; +CREATE INDEX project_invitations_email_index ON project_invitations ( email ) ; +CREATE INDEX project_members_project_id_index ON project_members ( project_id ) ; + +-- MAIN DATA -- + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000); + +INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00'); +INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "vetted_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2020-03-18 12:00:00.000000+00'); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NUll, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', false, 10, 50000000000, 50000000000, false, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit", "have_sales_contact", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\304\\313\\206\\311",'::bytea, 'Ian', 'Pires', '3email3@mail.test', '3EMAIL3@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-03-18 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 51, true, '1-50', 10, 50000000000, 50000000000, true, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "employee_count", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\312",'::bytea, 'Campbell', 'Wright', '4email4@mail.test', '4EMAIL4@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-07-17 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 82, true, '1-50', 10, 50000000000, 50000000000, 150000); +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00', 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00', 150000); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00'); +INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00'); + +INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000); + +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); +INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); +INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0); + +INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00'); + +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, '2019-02-14 08:28:24.267934+00'); + +INSERT INTO "value_attributions" ("project_id", "bucket_name", "user_agent", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, NULL, '2019-02-14 08:07:31.028103+00'); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "versioning", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, 0, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10); + +INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00'); + +INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00'); + +INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate_numeric", "created_at") VALUES ('tx_id', '1.929883831', '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount_numeric", "received_numeric", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', 1411112222, 1311112222, 1, 'key', 60, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024); + +INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, 2000000, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00', 150000); + +INSERT INTO "project_bandwidth_daily_rollups"("project_id", "interval_day", egress_allocated, egress_settled, egress_dead) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2021-04-22', 10000, 5000, 0); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "owner_id", "created_at", "segment_limit") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', 5e11, 5e11, NULL, 2000000, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00', 150000); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0); +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); +INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000, 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101, 150000); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); + +INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024); +INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024); + +INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117); +INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117); + +INSERT INTO "reputations"("id", "audit_success_count", "total_audit_count", "created_at", "updated_at", "disqualified", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "online_score", "audit_history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', NULL, 1000, 0, 1, 0, 1, '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "graceful_exit_segment_transfer_queue" ("node_id", "stream_id", "position", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 10 , 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0); + +INSERT INTO "segment_pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "stream_id", position) VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, '\x010101', 1); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\342U\\303\\312\\204",'::bytea, 'Noahson', 'William', '100email1@mail.test', '100EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', false, 10, 100000000000000, 25000000000000, true, 100000000); + +INSERT INTO "repair_queue" ("stream_id", "position", "attempted_at", "segment_health", "updated_at", "inserted_at") VALUES ('\x01', 1, null, 1, '2020-09-01 00:00:00.000000+00', '2021-09-01 00:00:00.000000+00'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\204",'::bytea, 'Noahson William', '101email1@mail.test', '101EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2019-02-14 08:28:24.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6g7h8"]', 3, 50000000000, 50000000000, 150000); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\251\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\303\\312\\205",'::bytea, 'Felicia Smith', '99email1@mail.test', '99EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000); + +INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "segments", "period_start", "period_end", "state", "created_at") VALUES (E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\300\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55517', '', 0, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2021-02-14 08:07:31.028103+00', '2021-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, 'DE'); +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "versioning", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares", "placement") VALUES (E'\\144/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketotheruniquename'::bytea, 0, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10, 1); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\017', '127.0.0.1:55517', '', 0, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\267\\342U\\303\\312\\203",'::bytea, 'Jessica Thompson', '143email1@mail.test', '143EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-04 08:27:56.614594+00', true, 'mfa secret key', '["2b3c4d5e","f6a7e8e9"]', 'promo123', 3, '150000000000', '150000000000', 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Heather Jackson', '762email@mail.test', '762EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-11-05 03:22:39.614594+00', true, 'mfa secret key', '["5e4d3c2b","e9e8a7f6"]', 'promo123', 3, '100000000000000', '25000000000000', 150000); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit") VALUES (E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Michael Mint', '333email2@mail.test', '333EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-10-05 03:22:39.614594+00', true, 'mfa secret key', '["5e4d3c2c","e9e8a7f7"]', 'promo123', 3, '100000000000000', '25000000000000', 150000); + +INSERT INTO "oauth_clients"("id", "encrypted_secret", "redirect_url", "user_id", "app_name", "app_logo_url") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'610B723B-E1FF-4B1D-B372-521250690C6E'::bytea, 'https://example.test/callback/storj', E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'Example App', 'https://example.test/logo.png'); + +INSERT INTO "oauth_codes"("client_id", "user_id", "scope", "redirect_url", "challenge", "challenge_method", "code", "created_at", "expires_at", "claimed_at") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'scope', 'http://localhost:12345/callback', 'challenge', 'challenge method', 'plaintext code', '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00'); + +INSERT INTO "oauth_tokens"("client_id", "user_id", "scope", "kind", "token", "created_at", "expires_at") VALUES (E'FD6209C0-7A17-4FC3-895C-E57A6C7CBBE1'::bytea, E'\\364\\312\\033w\\222\\303Ci\\265\\342U\\303\\312\\202",'::bytea, 'scope', 1, E'B9C93D5F-CBD7-4615-9184-E714CFE14365'::bytea, '2021-12-05 03:22:39.614594+00', '2021-12-05 03:22:39.614594+00'); + +INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount_numeric", "received_numeric", "status", "key", "timeout", "created_at") VALUES ('different_tx_id_from_before', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', 125419938429, 1, 1, 'key', 60, '2021-07-28 20:24:11.932313-05'); +INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate_numeric", "created_at") VALUES ('different_tx_id_from_before', 3.14159265359, '2021-07-28 20:24:11.932313-05'); + +INSERT INTO "webapp_sessions"("id", "user_id", "ip_address", "user_agent", "status", "expires_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '127.0.0.1', 'Firefox', 0, '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "users"("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit", "verification_reminders") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\304\\312\\205",'::bytea, 'Felicia Smith', '1testemail1@mail.test', '1TESTEMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000, 1); + +INSERT INTO "reputations"("id", "audit_success_count", "total_audit_count", "created_at", "updated_at", "disqualified", "disqualification_reason", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "online_score", "audit_history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\002', 2, 5, '2022-04-20 04:20:59.028103+00', '2022-04-20 04:21:09.028103+00', '2022-04-20 04:22:09.028103+00', 3, 50, 0, 1, 0, 1, '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a'); + +INSERT INTO "storjscan_wallets" ("user_id", "wallet_address", "created_at") VALUES (E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, E'\\343\\301\\042w\\222\\263Ci\\245\\312U\\304\\312\\202",'::bytea, '2021-07-28 20:04:11.932313+00'); + +INSERT INTO "storjscan_payments" ("chain_id", "block_hash", "block_number", "transaction", "log_index", "from_address", "to_address", "token_value", "usd_value", "status", "timestamp", "created_at") VALUES (1, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 0, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 0, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, E'\\363\\301\\032w\\222\\203Ci\\245\\342U\\304\\332\\202",'::bytea, 1, 1, 'example', '2022-04-20 04:22:09.028103+00', '2022-04-20 04:22:09.028103+00'); + +INSERT INTO "projects"("id", "public_id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit") VALUES (E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\251\\247'::bytea, E'300\\273|\\342N\\347\\347\\363\\347\\363\\371>+F\\241\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000); + +INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total", "interval_end_time") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-10 00:00:00+00', 2875, 5750, 8635, 11500, 0, 14375, '2019-02-10 23:00:00+00'); + +INSERT INTO "billing_transactions" ("id", "user_id", "amount", "currency", "description", "source", "status", "type", "metadata", "timestamp", "created_at") VALUES (1, E'\\363\\331\\032w\\212\\213Ci\\245\\322U\\314\\302\\202",'::bytea, 113219736213, 'usd', 'some_description', 'some_source', 'some_status', 'some_type', '{ "Wallet": "0x1234", "ReferenceID": "0987654321"}'::jsonb, '2021-07-28 19:14:11.932313+00', '2021-07-28 19:34:11.932323+00'); + +INSERT INTO "billing_balances" ("user_id", "balance", "last_updated") VALUES (E'\\363\\331\\032w\\222\\203Ci\\245\\312U\\304\\322\\212",'::bytea, 113219736213, '2021-07-28 19:34:11.932323+00'); + +INSERT INTO "projects"("id", "public_id", "name", "description", "usage_limit", "bandwidth_limit", "user_specified_usage_limit", "user_specified_bandwidth_limit", "rate_limit", "burst_limit", "owner_id", "created_at", "max_buckets", "segment_limit", "salt") VALUES (E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\252\\247'::bytea, E'300\\273|\\342N\\347\\347\\363\\347\\363\\371>+F\\241\\247'::bytea, 'Limit Test 2', 'This project is below the default', 5e11, 5e11, NULL, NULL, 2000000, 4000000, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL, 150000, E'300\\273|\\342N\\347\\347\\347\\342\\363\\371>+F\\252\\247'::bytea); + +INSERT INTO "users" ("id", "full_name", "email", "normalized_email", "password_hash", "status", "created_at", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "signup_promo_code", "project_limit", "project_bandwidth_limit", "project_storage_limit", "project_segment_limit", "verification_reminders", "signup_captcha") VALUES (E'\\363\\311\\033w\\222\\303Ci\\266\\344U\\304\\312\\206",'::bytea, 'Harold Smith', '1testemail206@mail.test', '1TESTEMAIL206@MAIL.TEST', E'some_readable_hash'::bytea, 1, '2021-08-14 09:13:44.614594+00', true, 'mfa secret key', '["1a2b3c4d","e5f6d7h8"]', 'promo123', 3, 50000000000, 50000000000, 150000, 1, 1); + +INSERT INTO "reverification_audits" ("node_id", "stream_id", "position", "piece_num", "inserted_at", "last_attempt", "reverify_count") VALUES (E'\\xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855', E'\\x01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b', 1152921504606846976, 4, '2008-06-06 14:13:08.845574-07', '2009-08-23 02:19:52.922832-07', 5); + +INSERT INTO "node_events" ("id", "email", "node_id", "event", "created_at", "email_sent") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\017', 'test@storj.test', E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:28:24.614594+00', '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "verification_audits" ("inserted_at", "stream_id", "position", "expires_at", "encrypted_size") VALUES ('2022-10-31 00:00:00.000000+00', E'\\xb5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c', 42949672970, NULL, 2147483647); +INSERT INTO "verification_audits" ("inserted_at", "stream_id", "position", "expires_at", "encrypted_size") VALUES ('2022-10-31 00:01:00.000000+00', E'\\x6e96e45029870a9b08cff2ed6ac840ccde3edce244327cc1bddefa1e555bc81f', 450971566185, '2023-01-01 23:59:59.999999+13', 12); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "contained") VALUES (E'\\342\\341\\363\\342>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', NULL, NULL, false, '2022-06-14 05:07:31.108963+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code", "last_offline_email") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\345\\017', '127.0.0.1:55517', '', 0, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL, '2021-10-13 08:07:31.108963+00'); + +INSERT INTO "nodes"("id", "address", "last_net", "protocol", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90","created_at", "updated_at", "last_contact_success", "last_contact_failure", "disqualified", "disqualification_reason", "exit_success", "country_code", "last_software_update_email") VALUES (E'\\362\\341\\363\\371>+F\\256\\262\\300\\273|\\342N\\347\\017', '127.0.0.1:55517', '', 0, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, '2020-02-14 08:07:31.028103+00', '2021-10-13 08:07:31.108963+00', 'epoch', 'epoch', '2021-10-13 08:07:31.108963+00', 0, false, NULL, '2021-10-13 08:07:31.108963+00'); + +INSERT INTO "node_events"("id", "email", "node_id", "event", "created_at", "last_attempted", "email_sent") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 'test@storj.test', E'\\153\\313\\234\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:28:24.614594+00', '2020-02-14 08:28:24.614594+00', '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "account_freeze_events"("user_id", "event", "limits", "days_till_escalation", "created_at") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 0, '{"userLimits": {"storage": 100, "egress": 100}, "projectLimits": {"projectID0": {"storage": 100, "egress": 100}}}'::jsonb, 60, '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "user_settings"("user_id", "session_minutes", "passphrase_prompt", "onboarding_start", "onboarding_end", "onboarding_step") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 15, NULL, true, true, NULL); + +INSERT INTO "stripe_customers"("user_id", "customer_id", "package_plan", "purchased_package_at", "created_at") VALUES (E'\\363\\312\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id0', 'package-name', '2023-03-22 15:34:07.123456+00','2019-06-01 08:28:24.267934+00'); + +INSERT INTO "project_invitations"("project_id", "email", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300', '3EMAIL3@MAIL.TEST', '2023-04-24 00:00:00+00'); +INSERT INTO "project_invitations"("project_id", "email", "inviter_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '3EMAIL3@MAIL.TEST', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",', '2023-05-09 00:00:00+00'); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_segment_limit", "default_placement") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\225\\211",'::bytea, 'Angela', 'Berg', 'eu@mail.test', 'eu@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL, 150000, 1); +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit", "default_placement", "default_versioning") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\072'::bytea, 'projName1', 'Test project 1', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00', 150000, 1, 1); + +INSERT INTO "node_tags"("node_id", "name", "value", "signed_at", "signer")VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 'foo', E'\\xCAFEBABE','2023-04-24 00:00:00+00',E'\\x010203'); + +INSERT INTO "repair_queue" ("stream_id", "position", "attempted_at", "segment_health", "updated_at", "inserted_at", "placement") VALUES ('\x02', 1, null, 1, '2020-09-01 00:00:00.000000+00', '2021-09-01 00:00:00.000000+00', 10); + +INSERT INTO "account_freeze_events"("user_id", "event", "limits", "days_till_escalation", "created_at") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 1, '{"userLimits": {"storage": 100, "egress": 100}, "projectLimits": {"projectID0": {"storage": 100, "egress": 100}}}'::jsonb, 15, '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_segment_limit", "default_placement", "activation_code", "signup_id") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\313\\225\\211",'::bytea, 'Angela', 'Berg', 'eu@mail.test', 'eu@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL, 150000, 1, '223432', 'H2Oqwerty'); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit", "default_placement", "default_versioning") VALUES (E'\\233\\342\\363\\371>+F\\236\\263\\321\\273|\\312N\\147\\272'::bytea, 'projName2', 'Test project 2', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.656949+00', 150000, 1, 1); + +INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "owner_id", "created_at", "segment_limit", "default_placement", "default_versioning") VALUES (E'\\213\\342\\364\\371>+F\\236\\263\\311\\253|\\312N\\147\\272'::bytea, 'projName3', 'Test project 3', 5e11, 5e11, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.676949+00', 150000, 1, 2); + +INSERT INTO "node_events"("id", "email", "last_ip_port", "node_id", "event", "created_at", "last_attempted", "email_sent") VALUES(E'\\361\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\017', 'test@storj.test', '127.0.0.1:1234', E'\\153\\313\\234\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:28:24.614594+00', '2020-02-14 08:28:24.614594+00', '2019-02-14 08:28:24.614594+00'); + +INSERT INTO "user_settings"("user_id", "session_minutes", "passphrase_prompt", "onboarding_start", "onboarding_end", "onboarding_step", "notice_dismissal") VALUES(E'\\362\\341\\363\\371>+F\\256\\263\\300\\274|\\342N\\347\\022', 15, NULL, true, true, NULL, '{"someNotice": true}'::jsonb); + +INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "created_at", "position", "company_name", "working_on", "company_size", "is_professional", "project_limit", "project_bandwidth_limit", "project_storage_limit", "paid_tier", "mfa_enabled", "mfa_secret_key", "mfa_recovery_codes", "project_segment_limit", "default_placement", "activation_code", "signup_id", "trial_notifications", "trial_expiration", "upgrade_time") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\314\\225\\211",'::bytea, 'Angela', 'Berg', 'eu@mail.test', 'eu@MAIL.TEST', E'some_readable_hash'::bytea, 2, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true, 10, 50000000000, 50000000000, false, false, NULL, NULL, 150000, 1, '223432', 'H2Oqwerty', 0, NULL, NUll); + +INSERT INTO "stripe_customers"("user_id", "customer_id", "billing_customer_id", "package_plan", "purchased_package_at", "created_at") VALUES (E'\\361\\322\\033w\\232\\303Ci\\255\\343U\\303\\313\\205",'::bytea, 'stripe_id1', 'stripe_id0', 'package-name', '2024-03-05 15:34:07.123456+00','2020-06-01 08:28:24.267934+00'); + +-- NEW DATA -- +INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "created_at", "created_by") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\034'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\137'::bytea, 'key 3', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, '2019-02-14 08:28:24.267934+00', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\314\\225\\211",'::bytea); + +INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "versioning", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares", "placement", "created_by") VALUES (E'\\144/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\034'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketotheruniquename 1'::bytea, 0, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\314\\225\\211",'::bytea); \ No newline at end of file