Skip to content

Commit

Permalink
Add 'storedData' field to api.Host type (#1172)
Browse files Browse the repository at this point in the history
Closes #1113

This makes it easier to figure out how much data we store with a host
and cleans up the `contractor` a bit.
  • Loading branch information
ChrisSchinnerl committed Apr 17, 2024
2 parents 24d407d + 1bea53e commit 743b2a8
Show file tree
Hide file tree
Showing 8 changed files with 60 additions and 33 deletions.
1 change: 1 addition & 0 deletions api/host.go
Expand Up @@ -158,6 +158,7 @@ type (
Scanned bool `json:"scanned"`
Blocked bool `json:"blocked"`
Checks map[string]HostCheck `json:"checks"`
StoredData uint64 `json:"storedData"`
}

HostAddress struct {
Expand Down
16 changes: 7 additions & 9 deletions autopilot/contractor/contractor.go
Expand Up @@ -283,12 +283,10 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro
usedHosts[contract.HostKey] = struct{}{}
}

// compile map of stored data per host
// compile map of stored data per contract
contractData := make(map[types.FileContractID]uint64)
hostData := make(map[types.PublicKey]uint64)
for _, c := range contracts {
contractData[c.ID] = c.FileSize()
hostData[c.HostKey] += c.FileSize()
}

// fetch all hosts
Expand All @@ -311,7 +309,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro
}

// fetch candidate hosts
candidates, unusableHosts, err := c.candidateHosts(mCtx, hosts, usedHosts, hostData, minValidScore) // avoid 0 score hosts
candidates, unusableHosts, err := c.candidateHosts(mCtx, hosts, usedHosts, minValidScore) // avoid 0 score hosts
if err != nil {
return false, err
}
Expand All @@ -325,7 +323,7 @@ func (c *Contractor) performContractMaintenance(ctx *mCtx, w Worker) (bool, erro
}

// run host checks
checks, err := c.runHostChecks(mCtx, hosts, hostData, minScore)
checks, err := c.runHostChecks(mCtx, hosts, minScore)
if err != nil {
return false, fmt.Errorf("failed to run host checks, err: %v", err)
}
Expand Down Expand Up @@ -743,7 +741,7 @@ LOOP:
return toKeep, toArchive, toStopUsing, toRefresh, toRenew
}

func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, hostData map[types.PublicKey]uint64, minScore float64) (map[types.PublicKey]*api.HostCheck, error) {
func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, minScore float64) (map[types.PublicKey]*api.HostCheck, error) {
// fetch consensus state
cs, err := c.bus.ConsensusState(ctx)
if err != nil {
Expand All @@ -757,7 +755,7 @@ func (c *Contractor) runHostChecks(ctx *mCtx, hosts []api.Host, hostData map[typ
checks := make(map[types.PublicKey]*api.HostCheck)
for _, h := range hosts {
h.PriceTable.HostBlockHeight = cs.BlockHeight // ignore HostBlockHeight
checks[h.PublicKey] = checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore, hostData[h.PublicKey])
checks[h.PublicKey] = checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore)
}
return checks, nil
}
Expand Down Expand Up @@ -1230,7 +1228,7 @@ func (c *Contractor) calculateMinScore(candidates []scoredHost, numContracts uin
return minScore
}

func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, storedData map[types.PublicKey]uint64, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) {
func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[types.PublicKey]struct{}, minScore float64) ([]scoredHost, unusableHostsBreakdown, error) {
start := time.Now()

// fetch consensus state
Expand Down Expand Up @@ -1283,7 +1281,7 @@ func (c *Contractor) candidateHosts(ctx *mCtx, hosts []api.Host, usedHosts map[t
// NOTE: ignore the pricetable's HostBlockHeight by setting it to our
// own blockheight
h.PriceTable.HostBlockHeight = cs.BlockHeight
hc := checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore, storedData[h.PublicKey])
hc := checkHost(ctx.AutopilotConfig(), ctx.state.RS, gc, h, minScore)
if hc.Usability.IsUsable() {
candidates = append(candidates, scoredHost{h, hc.Score.Score()})
continue
Expand Down
4 changes: 2 additions & 2 deletions autopilot/contractor/evaluate.go
Expand Up @@ -9,7 +9,7 @@ import (
func countUsableHosts(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Currency, currentPeriod uint64, rs api.RedundancySettings, gs api.GougingSettings, hosts []api.Host) (usables uint64) {
gc := worker.NewGougingChecker(gs, cs, fee, currentPeriod, cfg.Contracts.RenewWindow)
for _, host := range hosts {
hc := checkHost(cfg, rs, gc, host, minValidScore, 0)
hc := checkHost(cfg, rs, gc, host, minValidScore)
if hc.Usability.IsUsable() {
usables++
}
Expand All @@ -25,7 +25,7 @@ func EvaluateConfig(cfg api.AutopilotConfig, cs api.ConsensusState, fee types.Cu

resp.Hosts = uint64(len(hosts))
for _, host := range hosts {
hc := checkHost(cfg, rs, gc, host, 0, 0)
hc := checkHost(cfg, rs, gc, host, 0)
if hc.Usability.IsUsable() {
resp.Usable++
continue
Expand Down
4 changes: 2 additions & 2 deletions autopilot/contractor/hostfilter.go
Expand Up @@ -236,7 +236,7 @@ func isUpForRenewal(cfg api.AutopilotConfig, r types.FileContractRevision, block
}

// checkHost performs a series of checks on the host.
func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h api.Host, minScore float64, storedData uint64) *api.HostCheck {
func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.GougingChecker, h api.Host, minScore float64) *api.HostCheck {
if rs.Validate() != nil {
panic("invalid redundancy settings were supplied - developer error")
}
Expand Down Expand Up @@ -278,7 +278,7 @@ func checkHost(cfg api.AutopilotConfig, rs api.RedundancySettings, gc worker.Gou
// not gouging, this because the core package does not have overflow
// checks in its cost calculations needed to calculate the period
// cost
sb = hostScore(cfg, h, storedData, rs.Redundancy())
sb = hostScore(cfg, h, rs.Redundancy())
if sb.Score() < minScore {
ub.LowScore = true
}
Expand Down
4 changes: 2 additions & 2 deletions autopilot/contractor/hostscore.go
Expand Up @@ -22,7 +22,7 @@ const (
minValidScore = math.SmallestNonzeroFloat64
)

func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedRedundancy float64) api.HostScoreBreakdown {
func hostScore(cfg api.AutopilotConfig, h api.Host, expectedRedundancy float64) api.HostScoreBreakdown {
cCfg := cfg.Contracts
// idealDataPerHost is the amount of data that we would have to put on each
// host assuming that our storage requirements were spread evenly across
Expand All @@ -44,7 +44,7 @@ func hostScore(cfg api.AutopilotConfig, h api.Host, storedData uint64, expectedR
Collateral: collateralScore(cCfg, h.PriceTable.HostPriceTable, uint64(allocationPerHost)),
Interactions: interactionScore(h),
Prices: priceAdjustmentScore(hostPeriodCost, cCfg),
StorageRemaining: storageRemainingScore(h.Settings, storedData, allocationPerHost),
StorageRemaining: storageRemainingScore(h.Settings, h.StoredData, allocationPerHost),
Uptime: uptimeScore(h),
Version: versionScore(h.Settings, cfg.Hosts.MinProtocolVersion),
}
Expand Down
18 changes: 9 additions & 9 deletions autopilot/contractor/hostscore_test.go
Expand Up @@ -42,13 +42,13 @@ func TestHostScore(t *testing.T) {

// assert both hosts score equal
redundancy := 3.0
if hostScore(cfg, h1, 0, redundancy) != hostScore(cfg, h2, 0, redundancy) {
if hostScore(cfg, h1, redundancy) != hostScore(cfg, h2, redundancy) {
t.Fatal("unexpected")
}

// assert age affects the score
h1.KnownSince = time.Now().Add(-1 * day)
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

Expand All @@ -57,50 +57,50 @@ func TestHostScore(t *testing.T) {
settings.Collateral = settings.Collateral.Div64(2)
settings.MaxCollateral = settings.MaxCollateral.Div64(2)
h1 = newHost(settings) // reset
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

// assert interactions affect the score
h1 = newHost(test.NewHostSettings()) // reset
h1.Interactions.SuccessfulInteractions++
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

// assert uptime affects the score
h2 = newHost(test.NewHostSettings()) // reset
h2.Interactions.SecondToLastScanSuccess = false
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() || ageScore(h1) != ageScore(h2) {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() || ageScore(h1) != ageScore(h2) {
t.Fatal("unexpected")
}

// assert version affects the score
h2Settings := test.NewHostSettings()
h2Settings.Version = "1.5.6" // lower
h2 = newHost(h2Settings) // reset
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

// asseret remaining storage affects the score.
h1 = newHost(test.NewHostSettings()) // reset
h2.Settings.RemainingStorage = 100
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

// assert MaxCollateral affects the score.
h2 = newHost(test.NewHostSettings()) // reset
h2.PriceTable.MaxCollateral = types.ZeroCurrency
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}

// assert price affects the score.
h2 = newHost(test.NewHostSettings()) // reset
h2.PriceTable.WriteBaseCost = types.Siacoins(1)
if hostScore(cfg, h1, 0, redundancy).Score() <= hostScore(cfg, h2, 0, redundancy).Score() {
if hostScore(cfg, h1, redundancy).Score() <= hostScore(cfg, h2, redundancy).Score() {
t.Fatal("unexpected")
}
}
Expand Down
13 changes: 12 additions & 1 deletion internal/test/e2e/cluster_test.go
Expand Up @@ -612,7 +612,7 @@ func TestUploadDownloadBasic(t *testing.T) {
// mine a block to get the revisions mined.
cluster.MineBlocks(1)

// check the revision height was updated.
// check the revision height and size were updated.
tt.Retry(100, 100*time.Millisecond, func() error {
// fetch the contracts.
contracts, err := cluster.Bus.Contracts(context.Background(), api.ContractsOpts{})
Expand All @@ -623,10 +623,21 @@ func TestUploadDownloadBasic(t *testing.T) {
for _, c := range contracts {
if c.RevisionHeight == 0 {
return errors.New("revision height should be > 0")
} else if c.Size != rhpv2.SectorSize {
return fmt.Errorf("size should be %v, got %v", rhpv2.SectorSize, c.Size)
}
}
return nil
})

// Check that stored data on hosts was updated
hosts, err := cluster.Bus.Hosts(context.Background(), api.GetHostsOptions{})
tt.OK(err)
for _, host := range hosts {
if host.StoredData != rhpv2.SectorSize {
t.Fatalf("stored data should be %v, got %v", rhpv2.SectorSize, host.StoredData)
}
}
}

// TestUploadDownloadExtended is an integration test that verifies objects can
Expand Down
33 changes: 25 additions & 8 deletions stores/hostdb.go
Expand Up @@ -255,7 +255,7 @@ func (dbAllowlistEntry) TableName() string { return "host_allowlist_entries" }
func (dbBlocklistEntry) TableName() string { return "host_blocklist_entries" }

// convert converts a host into a api.HostInfo
func (h dbHost) convert(blocked bool) api.Host {
func (h dbHost) convert(blocked bool, storedData uint64) api.Host {
var lastScan time.Time
if h.LastScan > 0 {
lastScan = time.Unix(0, h.LastScan)
Expand Down Expand Up @@ -283,11 +283,12 @@ func (h dbHost) convert(blocked bool) api.Host {
HostPriceTable: h.PriceTable.convert(),
Expiry: h.PriceTableExpiry.Time,
},
PublicKey: types.PublicKey(h.PublicKey),
Scanned: h.Scanned,
Settings: rhpv2.HostSettings(h.Settings),
Blocked: blocked,
Checks: checks,
PublicKey: types.PublicKey(h.PublicKey),
Scanned: h.Scanned,
Settings: rhpv2.HostSettings(h.Settings),
Blocked: blocked,
Checks: checks,
StoredData: storedData,
}
}

Expand Down Expand Up @@ -571,9 +572,25 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us
Preload("Blocklist")
}

// fetch stored data for each host
var storedData []struct {
HostID uint
StoredData uint64
}
err := ss.db.Raw("SELECT host_id, SUM(size) as StoredData FROM contracts GROUP BY host_id").
Scan(&storedData).
Error
if err != nil {
return nil, fmt.Errorf("failed to fetch stored data: %w", err)
}
storedDataMap := make(map[uint]uint64)
for _, host := range storedData {
storedDataMap[host.HostID] = host.StoredData
}

var hosts []api.Host
var fullHosts []dbHost
err := query.
err = query.
Offset(offset).
Limit(limit).
FindInBatches(&fullHosts, hostRetrievalBatchSize, func(tx *gorm.DB, batch int) error {
Expand All @@ -584,7 +601,7 @@ func (ss *SQLStore) SearchHosts(ctx context.Context, autopilotID, filterMode, us
} else {
blocked = filterMode == api.HostFilterModeBlocked
}
hosts = append(hosts, fh.convert(blocked))
hosts = append(hosts, fh.convert(blocked, storedDataMap[fh.ID]))
}
return nil
}).
Expand Down

0 comments on commit 743b2a8

Please sign in to comment.