From 603399ea937dcacd1429ad67e091c6b73e7af872 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Mon, 12 Mar 2018 16:35:53 -0700 Subject: [PATCH 001/212] move TestRenterSiapathValidate to more appropriate location --- modules/renter/renter_test.go | 29 +++++++++++++++++++++++++++++ modules/renter/upload_test.go | 29 ----------------------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/modules/renter/renter_test.go b/modules/renter/renter_test.go index fd8c3fefc7..f7f3a6723e 100644 --- a/modules/renter/renter_test.go +++ b/modules/renter/renter_test.go @@ -249,3 +249,32 @@ func TestRenterPricesVolatility(t *testing.T) { t.Fatal("expected renter price estimation to change after mining a block") } } + +// TestRenterSiapathValidate verifies that the validateSiapath function correctly validates SiaPaths. +func TestRenterSiapathValidate(t *testing.T) { + var pathtests = []struct { + in string + valid bool + }{ + {"valid/siapath", true}, + {"../../../directory/traversal", false}, + {"testpath", true}, + {"valid/siapath/../with/directory/traversal", false}, + {"validpath/test", true}, + {"..validpath/..test", true}, + {"./invalid/path", false}, + {"test/path", true}, + {"/leading/slash", false}, + {"foo/./bar", false}, + {"", false}, + } + for _, pathtest := range pathtests { + err := validateSiapath(pathtest.in) + if err != nil && pathtest.valid { + t.Fatal("validateSiapath failed on valid path: ", pathtest.in) + } + if err == nil && !pathtest.valid { + t.Fatal("validateSiapath succeeded on invalid path: ", pathtest.in) + } + } +} diff --git a/modules/renter/upload_test.go b/modules/renter/upload_test.go index 0ce51ff4ad..7913ede80d 100644 --- a/modules/renter/upload_test.go +++ b/modules/renter/upload_test.go @@ -8,35 +8,6 @@ import ( "github.com/NebulousLabs/Sia/modules" ) -// TestRenterSiapathValidate verifies that the validateSiapath function correctly validates SiaPaths. -func TestRenterSiapathValidate(t *testing.T) { - var pathtests = []struct { - in string - valid bool - }{ - {"valid/siapath", true}, - {"../../../directory/traversal", false}, - {"testpath", true}, - {"valid/siapath/../with/directory/traversal", false}, - {"validpath/test", true}, - {"..validpath/..test", true}, - {"./invalid/path", false}, - {"test/path", true}, - {"/leading/slash", false}, - {"foo/./bar", false}, - {"", false}, - } - for _, pathtest := range pathtests { - err := validateSiapath(pathtest.in) - if err != nil && pathtest.valid { - t.Fatal("validateSiapath failed on valid path: ", pathtest.in) - } - if err == nil && !pathtest.valid { - t.Fatal("validateSiapath succeeded on invalid path: ", pathtest.in) - } - } -} - // TestRenterUploadDirectory verifies that the renter returns an error if a // directory is provided as the source of an upload. func TestRenterUploadInode(t *testing.T) { From 218be5f0728b9488cb870adfd7482e7983f00bdf Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 10:01:17 -0400 Subject: [PATCH 002/212] Fix deleted renter file showing up after restart --- modules/renter/files.go | 4 ++++ modules/renter/persist.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/modules/renter/files.go b/modules/renter/files.go index e96bcc55bb..c2d53bdd23 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -36,6 +36,7 @@ type file struct { erasureCode modules.ErasureCoder // Static - can be accessed without lock. pieceSize uint64 // Static - can be accessed without lock. mode uint32 // actually an os.FileMode + deleted bool // indicates if the file has been deleted. staticUID string // A UID assigned to the file when it gets created. @@ -246,6 +247,9 @@ func (r *Renter) DeleteFile(nickname string) error { f.mu.Lock() defer f.mu.Unlock() + // mark the file as deleted + f.deleted = true + // TODO: delete the sectors of the file as well. return nil diff --git a/modules/renter/persist.go b/modules/renter/persist.go index 02991e3c3b..b9129478b0 100644 --- a/modules/renter/persist.go +++ b/modules/renter/persist.go @@ -161,6 +161,9 @@ func (f *file) UnmarshalSia(r io.Reader) error { // saveFile saves a file to the renter directory. func (r *Renter) saveFile(f *file) error { + if f.deleted { + return errors.New("can't save deleted file") + } // Create directory structure specified in nickname. fullPath := filepath.Join(r.persistDir, f.name+ShareExtension) err := os.MkdirAll(filepath.Dir(fullPath), 0700) From d1f19d07a89fb8354ebd392a58095bea8410ae28 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Fri, 16 Mar 2018 11:51:47 -0700 Subject: [PATCH 003/212] added additional tests --- modules/renter/renter_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/renter/renter_test.go b/modules/renter/renter_test.go index f7f3a6723e..ec23f8b77c 100644 --- a/modules/renter/renter_test.go +++ b/modules/renter/renter_test.go @@ -263,6 +263,11 @@ func TestRenterSiapathValidate(t *testing.T) { {"validpath/test", true}, {"..validpath/..test", true}, {"./invalid/path", false}, + {".../path", true}, + {"valid./path", true}, + {"valid../path", true}, + {"valid/path./test", true}, + {"valid/path../test", true}, {"test/path", true}, {"/leading/slash", false}, {"foo/./bar", false}, From 9ee04190d7c969199d2bd790a8085e129ecc2f7a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 8 Mar 2018 15:47:48 -0500 Subject: [PATCH 004/212] peer-to-peer ip discovery --- modules/gateway/consts.go | 20 +++++++++ modules/gateway/gateway.go | 2 + modules/gateway/ip.go | 87 ++++++++++++++++++++++++++++++++++++++ modules/gateway/ip_test.go | 45 ++++++++++++++++++++ modules/gateway/upnp.go | 3 ++ 5 files changed, 157 insertions(+) create mode 100644 modules/gateway/ip.go create mode 100644 modules/gateway/ip_test.go diff --git a/modules/gateway/consts.go b/modules/gateway/consts.go index 621474ffa5..35689282c4 100644 --- a/modules/gateway/consts.go +++ b/modules/gateway/consts.go @@ -212,3 +212,23 @@ var ( Testing: 5 * time.Second, }).(time.Duration) ) + +var ( + // minPeersForIPDiscovery is the minimum number of peer connections we wait + // for before we try to discover our public ip from them. It is also the + // minimum number of successful replies we expect from our peers before we + // accept a result. + minPeersForIPDiscovery = build.Select(build.Var{ + Standard: 5, + Dev: 1, + Testing: 1, + }).(int) + + // peerDiscoveryRetryInterval is the time we wait when there were not + // enough peers to determine our public ip address before trying again. + peerDiscoveryRetryInterval = build.Select(build.Var{ + Standard: 10 * time.Second, + Dev: 1 * time.Second, + Testing: 100 * time.Millisecond, + }).(time.Duration) +) diff --git a/modules/gateway/gateway.go b/modules/gateway/gateway.go index feae376378..3c056e525f 100644 --- a/modules/gateway/gateway.go +++ b/modules/gateway/gateway.go @@ -233,10 +233,12 @@ func New(addr string, bootstrap bool, persistDir string) (*Gateway, error) { // Register RPCs. g.RegisterRPC("ShareNodes", g.shareNodes) + g.RegisterRPC("DiscoverIP", g.discoverPeerIP) g.RegisterConnectCall("ShareNodes", g.requestNodes) // Establish the de-registration of the RPCs. g.threads.OnStop(func() { g.UnregisterRPC("ShareNodes") + g.UnregisterRPC("DiscoverIP") g.UnregisterConnectCall("ShareNodes") }) diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go new file mode 100644 index 0000000000..39fa9a79bf --- /dev/null +++ b/modules/gateway/ip.go @@ -0,0 +1,87 @@ +package gateway + +import ( + "net" + "time" + + "github.com/NebulousLabs/Sia/encoding" + "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/errors" +) + +// discoverPeerIP is the handler for the discoverPeer RPC. It returns the +// public ip of the caller back to the caller. This allows for peer-to-peer ip +// discovery without centralized services. +func (g *Gateway) discoverPeerIP(conn modules.PeerConn) error { + conn.SetDeadline(time.Now().Add(connStdDeadline)) + host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) + if err != nil { + return errors.AddContext(err, "failed to split host from port") + } + return encoding.WriteObject(conn, host) +} + +// managedIPFromPeers asks the peers the node is connected to for the node's public ip address. If not enough peers are available +func (g *Gateway) managedIPFromPeers() (string, error) { + for { + // Check for shutdown signal. + select { + case <-g.peerTG.StopChan(): + return "", errors.New("interrupted by shutdown") + default: + } + + // Get peers + g.mu.RLock() + peers := g.Peers() + g.mu.RUnlock() + + // Check if there are enough peers. Otherwise wait. + if len(peers) < minPeersForIPDiscovery { + select { + case <-time.After(peerDiscoveryRetryInterval): + case <-g.peerTG.StopChan(): + } + continue + } + + // Ask all the peers about our ip in parallel + returnChan := make(chan modules.NetAddress) + for _, peer := range peers { + go g.RPC(peer.NetAddress, "DiscoverIP", func(conn modules.PeerConn) error { + var address string + err := encoding.ReadObject(conn, &address, 100) + returnChan <- modules.NetAddress(address) + return err + }) + } + // Wait for their responses + addresses := make(map[string]int) + for i := 0; i < len(peers); i++ { + addr := <-returnChan + if addr.IsValid() == nil { + addresses[addr.Host()]++ + } + } + // If there haven't been enough successful responses we wait some time. + if len(addresses) < minPeersForIPDiscovery { + select { + case <-time.After(peerDiscoveryRetryInterval): + case <-g.peerTG.StopChan(): + } + continue + } + // If an address was returned by more than half the peers we consider + // it valid. + for addr, count := range addresses { + if count > len(addresses)/2 { + return addr, nil + } + } + // Otherwise we wait before trying again. + select { + case <-time.After(peerDiscoveryRetryInterval): + case <-g.peerTG.StopChan(): + } + } +} diff --git a/modules/gateway/ip_test.go b/modules/gateway/ip_test.go new file mode 100644 index 0000000000..8fbd7e7f74 --- /dev/null +++ b/modules/gateway/ip_test.go @@ -0,0 +1,45 @@ +package gateway + +import ( + "fmt" + "testing" + + "github.com/NebulousLabs/Sia/encoding" + "github.com/NebulousLabs/Sia/modules" +) + +// TestIpRPC tests the ip discovery RPC. +func TestIpRPC(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + t.Parallel() + + // Create gateways for testing. + g1 := newNamedTestingGateway(t, "1") + defer g1.Close() + g2 := newNamedTestingGateway(t, "2") + defer g2.Close() + + // Connect gateways. + err := g1.Connect(g2.Address()) + if err != nil { + t.Fatal(err) + } + + // Call RPC + err = g1.RPC(g2.Address(), "DiscoverIP", func(conn modules.PeerConn) error { + var address string + err := encoding.ReadObject(conn, &address, 100) + if err != nil { + t.Error("failed to read object from response", err) + } + if address != g1.Address().Host() { + return fmt.Errorf("ip addresses don't match %v != %v", g1.Address().Host(), address) + } + return nil + }) + if err != nil { + t.Fatal("RPC failed", err) + } +} diff --git a/modules/gateway/upnp.go b/modules/gateway/upnp.go index eff290086a..dded58539d 100644 --- a/modules/gateway/upnp.go +++ b/modules/gateway/upnp.go @@ -63,6 +63,9 @@ func (g *Gateway) threadedLearnHostname() { if err != nil { host, err = myExternalIP() } + if err != nil { + host, err = g.managedIPFromPeers() + } if err != nil { g.log.Println("WARN: failed to discover external IP:", err) return From 34f7673360478590c78f7737956f3549751a579b Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 8 Mar 2018 17:50:53 -0500 Subject: [PATCH 005/212] added test for peer discovery --- modules/gateway/consts.go | 4 ++-- modules/gateway/ip.go | 28 +++++++++++++++++++--------- modules/gateway/ip_test.go | 35 +++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 11 deletions(-) diff --git a/modules/gateway/consts.go b/modules/gateway/consts.go index 35689282c4..8aecb1d141 100644 --- a/modules/gateway/consts.go +++ b/modules/gateway/consts.go @@ -220,8 +220,8 @@ var ( // accept a result. minPeersForIPDiscovery = build.Select(build.Var{ Standard: 5, - Dev: 1, - Testing: 1, + Dev: 3, + Testing: 2, }).(int) // peerDiscoveryRetryInterval is the time we wait when there were not diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go index 39fa9a79bf..49ea36ae92 100644 --- a/modules/gateway/ip.go +++ b/modules/gateway/ip.go @@ -30,12 +30,10 @@ func (g *Gateway) managedIPFromPeers() (string, error) { return "", errors.New("interrupted by shutdown") default: } - // Get peers g.mu.RLock() peers := g.Peers() g.mu.RUnlock() - // Check if there are enough peers. Otherwise wait. if len(peers) < minPeersForIPDiscovery { select { @@ -44,27 +42,39 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } continue } - // Ask all the peers about our ip in parallel - returnChan := make(chan modules.NetAddress) + returnChan := make(chan string) for _, peer := range peers { go g.RPC(peer.NetAddress, "DiscoverIP", func(conn modules.PeerConn) error { var address string err := encoding.ReadObject(conn, &address, 100) - returnChan <- modules.NetAddress(address) + if err != nil { + returnChan <- "" + g.log.Debugf("DEBUG: failed to receive ip address: %v", err) + return err + } + addr := net.ParseIP(address) + if addr == nil { + returnChan <- "" + g.log.Debug("DEBUG: failed to parse ip address") + return errors.New("failed to parse ip address") + } + returnChan <- addr.String() return err }) } // Wait for their responses addresses := make(map[string]int) + successfulResponses := 0 for i := 0; i < len(peers); i++ { addr := <-returnChan - if addr.IsValid() == nil { - addresses[addr.Host()]++ + if addr != "" { + addresses[addr]++ + successfulResponses++ } } // If there haven't been enough successful responses we wait some time. - if len(addresses) < minPeersForIPDiscovery { + if successfulResponses < minPeersForIPDiscovery { select { case <-time.After(peerDiscoveryRetryInterval): case <-g.peerTG.StopChan(): @@ -74,7 +84,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { // If an address was returned by more than half the peers we consider // it valid. for addr, count := range addresses { - if count > len(addresses)/2 { + if count > successfulResponses/2 { return addr, nil } } diff --git a/modules/gateway/ip_test.go b/modules/gateway/ip_test.go index 8fbd7e7f74..4278d1b3b4 100644 --- a/modules/gateway/ip_test.go +++ b/modules/gateway/ip_test.go @@ -43,3 +43,38 @@ func TestIpRPC(t *testing.T) { t.Fatal("RPC failed", err) } } + +// TestIpFromPeers test the functionality of managedIPFromPeers. +func TestIPFromPeers(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + t.Parallel() + + // Create gateways for testing. + g1 := newNamedTestingGateway(t, "1") + defer g1.Close() + g2 := newNamedTestingGateway(t, "2") + defer g2.Close() + g3 := newNamedTestingGateway(t, "3") + defer g2.Close() + + // Connect gateways. + err := g1.Connect(g2.Address()) + if err != nil { + t.Fatal(err) + } + err = g1.Connect(g3.Address()) + if err != nil { + t.Fatal(err) + } + + // Discover ip using the peers + host, err := g1.managedIPFromPeers() + if err != nil { + t.Fatal("failed to get ip", err) + } + if host != g1.Address().Host() { + t.Fatalf("ip should be %v but was %v", g1.Address().Host(), host) + } +} From 7840e9bf1debbd93d713f4fbcec22f5903415886 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 9 Mar 2018 09:29:03 -0500 Subject: [PATCH 006/212] put select statements in separate method --- modules/gateway/ip.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go index 49ea36ae92..7e5618c1ea 100644 --- a/modules/gateway/ip.go +++ b/modules/gateway/ip.go @@ -36,10 +36,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { g.mu.RUnlock() // Check if there are enough peers. Otherwise wait. if len(peers) < minPeersForIPDiscovery { - select { - case <-time.After(peerDiscoveryRetryInterval): - case <-g.peerTG.StopChan(): - } + g.waitForPeerDiscoverySignal() continue } // Ask all the peers about our ip in parallel @@ -75,10 +72,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } // If there haven't been enough successful responses we wait some time. if successfulResponses < minPeersForIPDiscovery { - select { - case <-time.After(peerDiscoveryRetryInterval): - case <-g.peerTG.StopChan(): - } + g.waitForPeerDiscoverySignal() continue } // If an address was returned by more than half the peers we consider @@ -89,9 +83,15 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } } // Otherwise we wait before trying again. - select { - case <-time.After(peerDiscoveryRetryInterval): - case <-g.peerTG.StopChan(): - } + g.waitForPeerDiscoverySignal() + } +} + +// waitForPeerDiscoverySignal blocks for the time specified in +// peerDiscoveryRetryInterval. +func (g *Gateway) waitForPeerDiscoverySignal() { + select { + case <-time.After(peerDiscoveryRetryInterval): + case <-g.peerTG.StopChan(): } } From d6fb709d441ceb4ace551c6b9ba4e04b80a27775 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 19 Mar 2018 12:01:05 -0400 Subject: [PATCH 007/212] add logging and wrap comment --- modules/gateway/ip.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go index 7e5618c1ea..034e526e7e 100644 --- a/modules/gateway/ip.go +++ b/modules/gateway/ip.go @@ -21,7 +21,8 @@ func (g *Gateway) discoverPeerIP(conn modules.PeerConn) error { return encoding.WriteObject(conn, host) } -// managedIPFromPeers asks the peers the node is connected to for the node's public ip address. If not enough peers are available +// managedIPFromPeers asks the peers the node is connected to for the node's +// public ip address. If not enough peers are available func (g *Gateway) managedIPFromPeers() (string, error) { for { // Check for shutdown signal. @@ -79,6 +80,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { // it valid. for addr, count := range addresses { if count > successfulResponses/2 { + g.log.Println("ip successfully discovered using peers:", addr) return addr, nil } } From 305d962304f44274690123d9770a2378f7da1899 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 26 Mar 2018 10:22:07 -0400 Subject: [PATCH 008/212] rediscover ip after some time and make peer-to-peer discovery time out --- modules/gateway/consts.go | 24 ++++++++++++ modules/gateway/ip.go | 18 ++++++--- modules/gateway/upnp.go | 80 +++++++++++++++++++++++---------------- 3 files changed, 84 insertions(+), 38 deletions(-) diff --git a/modules/gateway/consts.go b/modules/gateway/consts.go index 8aecb1d141..b788d42c32 100644 --- a/modules/gateway/consts.go +++ b/modules/gateway/consts.go @@ -224,6 +224,30 @@ var ( Testing: 2, }).(int) + // timeoutIPDiscovery is the time after which managedIPFromPeers will fail + // if the ip couldn't be discovered successfully. + timeoutIPDiscovery = build.Select(build.Var{ + Standard: 5 * time.Minute, + Dev: 5 * time.Minute, + Testing: time.Minute, + }).(time.Duration) + + // rediscoverIPIntervalSuccess is the time that has to pass after a + // successful IP discovery before we rediscover the IP. + rediscoverIPIntervalSuccess = build.Select(build.Var{ + Standard: 6 * time.Hour, + Dev: 10 * time.Minute, + Testing: 30 * time.Second, + }).(time.Duration) + + // rediscoverIPIntervalFailure is the time that has to pass after a failed + // IP discovery before we try again. + rediscoverIPIntervalFailure = build.Select(build.Var{ + Standard: 30 * time.Minute, + Dev: 5 * time.Minute, + Testing: 10 * time.Second, + }).(time.Duration) + // peerDiscoveryRetryInterval is the time we wait when there were not // enough peers to determine our public ip address before trying again. peerDiscoveryRetryInterval = build.Select(build.Var{ diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go index 034e526e7e..47cf4afe3c 100644 --- a/modules/gateway/ip.go +++ b/modules/gateway/ip.go @@ -22,13 +22,18 @@ func (g *Gateway) discoverPeerIP(conn modules.PeerConn) error { } // managedIPFromPeers asks the peers the node is connected to for the node's -// public ip address. If not enough peers are available +// public ip address. If not enough peers are available we wait a bit and try +// again. In the worst case managedIPFromPeers will fail after a few minutes. func (g *Gateway) managedIPFromPeers() (string, error) { + // Stop after timeoutIPDiscovery time. + timeout := time.After(timeoutIPDiscovery) for { - // Check for shutdown signal. + // Check for shutdown signal or timeout. select { case <-g.peerTG.StopChan(): return "", errors.New("interrupted by shutdown") + case <-timeout: + return "", errors.New("failed to discover ip in time") default: } // Get peers @@ -37,7 +42,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { g.mu.RUnlock() // Check if there are enough peers. Otherwise wait. if len(peers) < minPeersForIPDiscovery { - g.waitForPeerDiscoverySignal() + g.waitForPeerDiscoverySignal(timeout) continue } // Ask all the peers about our ip in parallel @@ -73,7 +78,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } // If there haven't been enough successful responses we wait some time. if successfulResponses < minPeersForIPDiscovery { - g.waitForPeerDiscoverySignal() + g.waitForPeerDiscoverySignal(timeout) continue } // If an address was returned by more than half the peers we consider @@ -85,15 +90,16 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } } // Otherwise we wait before trying again. - g.waitForPeerDiscoverySignal() + g.waitForPeerDiscoverySignal(timeout) } } // waitForPeerDiscoverySignal blocks for the time specified in // peerDiscoveryRetryInterval. -func (g *Gateway) waitForPeerDiscoverySignal() { +func (g *Gateway) waitForPeerDiscoverySignal(timeout <-chan time.Time) { select { case <-time.After(peerDiscoveryRetryInterval): case <-g.peerTG.StopChan(): + case <-timeout: } } diff --git a/modules/gateway/upnp.go b/modules/gateway/upnp.go index dded58539d..0620d8ec87 100644 --- a/modules/gateway/upnp.go +++ b/modules/gateway/upnp.go @@ -41,9 +41,7 @@ func myExternalIP() (string, error) { return strings.TrimSpace(string(buf)), nil } -// threadedLearnHostname discovers the external IP of the Gateway. Once the IP -// has been discovered, it registers the ShareNodes RPC to be called on new -// connections, advertising the IP to other nodes. +// threadedLearnHostname discovers the external IP of the Gateway regularly. func (g *Gateway) threadedLearnHostname() { if err := g.threads.Add(); err != nil { return @@ -54,36 +52,54 @@ func (g *Gateway) threadedLearnHostname() { return } - // try UPnP first, then fallback to myexternalip.com - var host string - d, err := upnp.Discover() - if err == nil { - host, err = d.ExternalIP() - } - if err != nil { - host, err = myExternalIP() + for { + // try UPnP first, then fallback to myexternalip.com and peer-to-peer + // discovery. + var host string + d, err := upnp.Discover() + if err == nil { + host, err = d.ExternalIP() + } + if err != nil { + host, err = myExternalIP() + } + if err != nil { + host, err = g.managedIPFromPeers() + } + if err != nil { + g.log.Println("WARN: failed to discover external IP:", err) + } + // If we were unable to discover our IP we try again later. + if err != nil { + select { + case <-g.threads.StopChan(): + return + case <-time.After(rediscoverIPIntervalFailure): + continue + } + } + + g.mu.RLock() + addr := modules.NetAddress(net.JoinHostPort(host, g.port)) + g.mu.RUnlock() + if err := addr.IsValid(); err != nil { + g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) + return + } + + g.mu.Lock() + g.myAddr = addr + g.mu.Unlock() + + g.log.Println("INFO: our address is", addr) + + // Rediscover the IP later in case it changed. + select { + case <-g.threads.StopChan(): + return + case <-time.After(rediscoverIPIntervalSuccess): + } } - if err != nil { - host, err = g.managedIPFromPeers() - } - if err != nil { - g.log.Println("WARN: failed to discover external IP:", err) - return - } - - g.mu.RLock() - addr := modules.NetAddress(net.JoinHostPort(host, g.port)) - g.mu.RUnlock() - if err := addr.IsValid(); err != nil { - g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) - return - } - - g.mu.Lock() - g.myAddr = addr - g.mu.Unlock() - - g.log.Println("INFO: our address is", addr) } // threadedForwardPort adds a port mapping to the router. From 608440db16c63b8f4f1caf71d427b6d59d01f9d4 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 27 Mar 2018 12:22:11 -0400 Subject: [PATCH 009/212] Add dependency injection to siatest package --- modules/renter/contractor/contractor.go | 6 +-- modules/renter/contractor/contractor_test.go | 4 +- modules/renter/contractor/dependencies.go | 10 ++-- modules/renter/contractor/persist_test.go | 4 +- modules/renter/hostdb/hostdb.go | 6 +-- modules/renter/hostdb/hostdb_test.go | 2 +- modules/renter/hostdb/persist_test.go | 2 +- modules/renter/renter.go | 6 +-- modules/wallet/wallet.go | 5 +- modules/wallet/wallet_test.go | 2 +- node/node.go | 56 +++++++++++++++++++- 11 files changed, 78 insertions(+), 25 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 372e096fbe..44b0d69a01 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -204,11 +204,11 @@ func New(cs consensusSet, wallet walletShim, tpool transactionPool, hdb hostDB, } // Create Contractor using production dependencies. - return newContractor(cs, &walletBridge{w: wallet}, tpool, hdb, contractSet, newPersist(persistDir), logger, modules.ProdDependencies) + return NewCustomContractor(cs, &WalletBridge{W: wallet}, tpool, hdb, contractSet, NewPersist(persistDir), logger, modules.ProdDependencies) } -// newContractor creates a Contractor using the provided dependencies. -func newContractor(cs consensusSet, w wallet, tp transactionPool, hdb hostDB, contractSet *proto.ContractSet, p persister, l *persist.Logger, deps modules.Dependencies) (*Contractor, error) { +// NewCustomContractor creates a Contractor using the provided dependencies. +func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb hostDB, contractSet *proto.ContractSet, p persister, l *persist.Logger, deps modules.Dependencies) (*Contractor, error) { // Create the Contractor object. c := &Contractor{ cs: cs, diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index 9afde79ec6..6a843a36ef 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -294,7 +294,7 @@ func TestAllowanceSpending(t *testing.T) { } var minerRewards types.Currency - w := c.wallet.(*walletBridge).w.(modules.Wallet) + w := c.wallet.(*WalletBridge).W.(modules.Wallet) txns, err := w.Transactions(0, 1000) if err != nil { t.Fatal(err) @@ -522,7 +522,7 @@ func (ws *testWalletShim) StartTransaction() modules.TransactionBuilder { // TestWalletBridge tests the walletBridge type. func TestWalletBridge(t *testing.T) { shim := new(testWalletShim) - bridge := walletBridge{shim} + bridge := WalletBridge{shim} bridge.NextAddress() if !shim.nextAddressCalled { t.Error("NextAddress was not called on the shim") diff --git a/modules/renter/contractor/dependencies.go b/modules/renter/contractor/dependencies.go index 5131a16fa6..69cd5a8b9d 100644 --- a/modules/renter/contractor/dependencies.go +++ b/modules/renter/contractor/dependencies.go @@ -64,12 +64,12 @@ type ( // Because wallet is not directly compatible with modules.Wallet (wrong // type signature for StartTransaction), we must provide a bridge type. -type walletBridge struct { - w walletShim +type WalletBridge struct { + W walletShim } -func (ws *walletBridge) NextAddress() (types.UnlockConditions, error) { return ws.w.NextAddress() } -func (ws *walletBridge) StartTransaction() transactionBuilder { return ws.w.StartTransaction() } +func (ws *WalletBridge) NextAddress() (types.UnlockConditions, error) { return ws.W.NextAddress() } +func (ws *WalletBridge) StartTransaction() transactionBuilder { return ws.W.StartTransaction() } // stdPersist implements the persister interface. The filename required by // these functions is internal to stdPersist. @@ -90,7 +90,7 @@ func (p *stdPersist) load(data *contractorPersist) error { return persist.LoadJSON(persistMeta, &data, p.filename) } -func newPersist(dir string) *stdPersist { +func NewPersist(dir string) *stdPersist { return &stdPersist{ filename: filepath.Join(dir, "contractor.json"), } diff --git a/modules/renter/contractor/persist_test.go b/modules/renter/contractor/persist_test.go index 26b7d757e4..695a4762d1 100644 --- a/modules/renter/contractor/persist_test.go +++ b/modules/renter/contractor/persist_test.go @@ -63,7 +63,7 @@ func TestSaveLoad(t *testing.T) { } // use stdPersist instead of mock - c.persist = newPersist(build.TempDir("contractor", t.Name())) + c.persist = NewPersist(build.TempDir("contractor", t.Name())) os.MkdirAll(build.TempDir("contractor", t.Name()), 0700) // save, clear, and reload @@ -115,7 +115,7 @@ func TestConvertPersist(t *testing.T) { // load the persist var p contractorPersist - err = newPersist(dir).load(&p) + err = NewPersist(dir).load(&p) if err != nil { t.Fatal(err) } diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index c5963acb6e..7d95a5f60d 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -64,13 +64,13 @@ func New(g modules.Gateway, cs modules.ConsensusSet, persistDir string) (*HostDB return nil, errNilCS } // Create HostDB using production dependencies. - return newHostDB(g, cs, persistDir, modules.ProdDependencies) + return NewCustomHostDB(g, cs, persistDir, modules.ProdDependencies) } -// newHostDB creates a HostDB using the provided dependencies. It loads the old +// NewCustomHostDB creates a HostDB using the provided dependencies. It loads the old // persistence data, spawns the HostDB's scanning threads, and subscribes it to // the consensusSet. -func newHostDB(g modules.Gateway, cs modules.ConsensusSet, persistDir string, deps modules.Dependencies) (*HostDB, error) { +func NewCustomHostDB(g modules.Gateway, cs modules.ConsensusSet, persistDir string, deps modules.Dependencies) (*HostDB, error) { // Create the HostDB object. hdb := &HostDB{ cs: cs, diff --git a/modules/renter/hostdb/hostdb_test.go b/modules/renter/hostdb/hostdb_test.go index d080674733..c191ca9b60 100644 --- a/modules/renter/hostdb/hostdb_test.go +++ b/modules/renter/hostdb/hostdb_test.go @@ -93,7 +93,7 @@ func newHDBTesterDeps(name string, deps modules.Dependencies) (*hdbTester, error if err != nil { return nil, err } - hdb, err := newHostDB(g, cs, filepath.Join(testDir, modules.RenterDir), deps) + hdb, err := NewCustomHostDB(g, cs, filepath.Join(testDir, modules.RenterDir), deps) if err != nil { return nil, err } diff --git a/modules/renter/hostdb/persist_test.go b/modules/renter/hostdb/persist_test.go index 28b0fab807..51f9d8edf2 100644 --- a/modules/renter/hostdb/persist_test.go +++ b/modules/renter/hostdb/persist_test.go @@ -71,7 +71,7 @@ func TestSaveLoad(t *testing.T) { if err != nil { t.Fatal(err) } - hdbt.hdb, err = newHostDB(hdbt.gateway, hdbt.cs, filepath.Join(hdbt.persistDir, modules.RenterDir), &quitAfterLoadDeps{}) + hdbt.hdb, err = NewCustomHostDB(hdbt.gateway, hdbt.cs, filepath.Join(hdbt.persistDir, modules.RenterDir), &quitAfterLoadDeps{}) if err != nil { t.Fatal(err) } diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 59459628fb..08629870ff 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -384,8 +384,8 @@ func validateSiapath(siapath string) error { // Enforce that Renter satisfies the modules.Renter interface. var _ modules.Renter = (*Renter)(nil) -// newRenter initializes a renter and returns it. -func newRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.TransactionPool, hdb hostDB, hc hostContractor, persistDir string, deps modules.Dependencies) (*Renter, error) { +// NewCustomRenter initializes a renter and returns it. +func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.TransactionPool, hdb hostDB, hc hostContractor, persistDir string, deps modules.Dependencies) (*Renter, error) { if g == nil { return nil, errNilGateway } @@ -472,5 +472,5 @@ func New(g modules.Gateway, cs modules.ConsensusSet, wallet modules.Wallet, tpoo return nil, err } - return newRenter(g, cs, tpool, hdb, hc, persistDir, modules.ProdDependencies) + return NewCustomRenter(g, cs, tpool, hdb, hc, persistDir, modules.ProdDependencies) } diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index d62c63f3a1..21310f6cc1 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -127,10 +127,11 @@ func (w *Wallet) Height() types.BlockHeight { // not loaded into the wallet during the call to 'new', but rather during the // call to 'Unlock'. func New(cs modules.ConsensusSet, tpool modules.TransactionPool, persistDir string) (*Wallet, error) { - return newWallet(cs, tpool, persistDir, modules.ProdDependencies) + return NewCustomWallet(cs, tpool, persistDir, modules.ProdDependencies) } -func newWallet(cs modules.ConsensusSet, tpool modules.TransactionPool, persistDir string, deps modules.Dependencies) (*Wallet, error) { +// NewCustomWallet creates a new wallet using custom dependencies. +func NewCustomWallet(cs modules.ConsensusSet, tpool modules.TransactionPool, persistDir string, deps modules.Dependencies) (*Wallet, error) { // Check for nil dependencies. if cs == nil { return nil, errNilConsensusSet diff --git a/modules/wallet/wallet_test.go b/modules/wallet/wallet_test.go index b9184d3e1a..31d3d8f84b 100644 --- a/modules/wallet/wallet_test.go +++ b/modules/wallet/wallet_test.go @@ -46,7 +46,7 @@ func createWalletTester(name string, deps modules.Dependencies) (*walletTester, if err != nil { return nil, err } - w, err := newWallet(cs, tp, filepath.Join(testdir, modules.WalletDir), deps) + w, err := NewCustomWallet(cs, tp, filepath.Join(testdir, modules.WalletDir), deps) if err != nil { return nil, err } diff --git a/node/node.go b/node/node.go index 1912c0e581..e166bb0745 100644 --- a/node/node.go +++ b/node/node.go @@ -18,8 +18,12 @@ import ( "github.com/NebulousLabs/Sia/modules/host" "github.com/NebulousLabs/Sia/modules/miner" "github.com/NebulousLabs/Sia/modules/renter" + "github.com/NebulousLabs/Sia/modules/renter/contractor" + "github.com/NebulousLabs/Sia/modules/renter/hostdb" + "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" + "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/errors" ) @@ -71,6 +75,13 @@ type NodeParams struct { TransactionPool modules.TransactionPool Wallet modules.Wallet + // Dependencies for each module supporting dependency injection. + ContractorDeps modules.Dependencies + ContractSetDeps modules.Dependencies + HostDBDeps modules.Dependencies + RenterDeps modules.Dependencies + WalletDeps modules.Dependencies + // The high level directory where all the persistence gets stored for the // moudles. Dir string @@ -205,7 +216,11 @@ func New(params NodeParams) (*Node, error) { if !params.CreateWallet { return nil, nil } - return wallet.New(cs, tp, filepath.Join(dir, modules.WalletDir)) + walletDeps := params.WalletDeps + if walletDeps == nil { + walletDeps = modules.ProdDependencies + } + return wallet.NewCustomWallet(cs, tp, filepath.Join(dir, modules.WalletDir), walletDeps) }() if err != nil { return nil, errors.Extend(err, errors.New("unable to create wallet")) @@ -239,7 +254,44 @@ func New(params NodeParams) (*Node, error) { if !params.CreateRenter { return nil, nil } - return renter.New(g, cs, w, tp, filepath.Join(dir, modules.RenterDir)) + contractorDeps := params.ContractorDeps + if contractorDeps == nil { + contractorDeps = modules.ProdDependencies + } + contractSetDeps := params.ContractSetDeps + if contractSetDeps == nil { + contractSetDeps = modules.ProdDependencies + } + hostDBDeps := params.HostDBDeps + if hostDBDeps == nil { + hostDBDeps = modules.ProdDependencies + } + renterDeps := params.RenterDeps + if renterDeps == nil { + renterDeps = modules.ProdDependencies + } + persistDir := filepath.Join(dir, modules.RenterDir) + + // HostDB + hdb, err := hostdb.NewCustomHostDB(g, cs, persistDir, hostDBDeps) + if err != nil { + return nil, err + } + // ContractSet + contractSet, err := proto.NewContractSet(filepath.Join(persistDir, "contracts"), contractSetDeps) + if err != nil { + return nil, err + } + // Contractor + logger, err := persist.NewFileLogger(filepath.Join(persistDir, "contractor.log")) + if err != nil { + return nil, err + } + hc, err := contractor.NewCustomContractor(cs, &contractor.WalletBridge{W: w}, tp, hdb, contractSet, contractor.NewPersist(persistDir), logger, contractorDeps) + if err != nil { + return nil, err + } + return renter.NewCustomRenter(g, cs, tp, hdb, hc, persistDir, renterDeps) }() if err != nil { return nil, errors.Extend(err, errors.New("unable to create renter")) From e4a94033eb122eeebb87841ae3c52c15a7075515 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 27 Mar 2018 12:34:25 -0400 Subject: [PATCH 010/212] fix build --- modules/renter/contractor/dependencies.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/modules/renter/contractor/dependencies.go b/modules/renter/contractor/dependencies.go index 69cd5a8b9d..840e3af28b 100644 --- a/modules/renter/contractor/dependencies.go +++ b/modules/renter/contractor/dependencies.go @@ -62,14 +62,19 @@ type ( } ) -// Because wallet is not directly compatible with modules.Wallet (wrong -// type signature for StartTransaction), we must provide a bridge type. +// WalletBridge is a bridge for the wallet because wallet is not directly +// compatible with modules.Wallet (wrong type signature for StartTransaction), +// we must provide a bridge type. type WalletBridge struct { W walletShim } +// NextAddress computes and returns the next address of the wallet. func (ws *WalletBridge) NextAddress() (types.UnlockConditions, error) { return ws.W.NextAddress() } -func (ws *WalletBridge) StartTransaction() transactionBuilder { return ws.W.StartTransaction() } + +// StartTransaction creates a new transactionBuilder that can be used to create +// and sign a transaction. +func (ws *WalletBridge) StartTransaction() transactionBuilder { return ws.W.StartTransaction() } // stdPersist implements the persister interface. The filename required by // these functions is internal to stdPersist. @@ -90,6 +95,7 @@ func (p *stdPersist) load(data *contractorPersist) error { return persist.LoadJSON(persistMeta, &data, p.filename) } +// NewPersist create a new stdPersist. func NewPersist(dir string) *stdPersist { return &stdPersist{ filename: filepath.Join(dir, "contractor.json"), From 5f745cf8e34880ceda6f86a10a4377cd50907232 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 28 Mar 2018 12:16:31 -0400 Subject: [PATCH 011/212] implement requested changes and cut the ip rediscovery timeouts in half --- modules/gateway/consts.go | 6 +++--- modules/gateway/ip.go | 16 +++------------- modules/gateway/upnp.go | 25 ++++++++++++------------- 3 files changed, 18 insertions(+), 29 deletions(-) diff --git a/modules/gateway/consts.go b/modules/gateway/consts.go index b788d42c32..a506c7da77 100644 --- a/modules/gateway/consts.go +++ b/modules/gateway/consts.go @@ -235,7 +235,7 @@ var ( // rediscoverIPIntervalSuccess is the time that has to pass after a // successful IP discovery before we rediscover the IP. rediscoverIPIntervalSuccess = build.Select(build.Var{ - Standard: 6 * time.Hour, + Standard: 3 * time.Hour, Dev: 10 * time.Minute, Testing: 30 * time.Second, }).(time.Duration) @@ -243,8 +243,8 @@ var ( // rediscoverIPIntervalFailure is the time that has to pass after a failed // IP discovery before we try again. rediscoverIPIntervalFailure = build.Select(build.Var{ - Standard: 30 * time.Minute, - Dev: 5 * time.Minute, + Standard: 15 * time.Minute, + Dev: 1 * time.Minute, Testing: 10 * time.Second, }).(time.Duration) diff --git a/modules/gateway/ip.go b/modules/gateway/ip.go index 47cf4afe3c..96a5a6def2 100644 --- a/modules/gateway/ip.go +++ b/modules/gateway/ip.go @@ -42,7 +42,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { g.mu.RUnlock() // Check if there are enough peers. Otherwise wait. if len(peers) < minPeersForIPDiscovery { - g.waitForPeerDiscoverySignal(timeout) + g.managedSleep(peerDiscoveryRetryInterval) continue } // Ask all the peers about our ip in parallel @@ -78,7 +78,7 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } // If there haven't been enough successful responses we wait some time. if successfulResponses < minPeersForIPDiscovery { - g.waitForPeerDiscoverySignal(timeout) + g.managedSleep(peerDiscoveryRetryInterval) continue } // If an address was returned by more than half the peers we consider @@ -90,16 +90,6 @@ func (g *Gateway) managedIPFromPeers() (string, error) { } } // Otherwise we wait before trying again. - g.waitForPeerDiscoverySignal(timeout) - } -} - -// waitForPeerDiscoverySignal blocks for the time specified in -// peerDiscoveryRetryInterval. -func (g *Gateway) waitForPeerDiscoverySignal(timeout <-chan time.Time) { - select { - case <-time.After(peerDiscoveryRetryInterval): - case <-g.peerTG.StopChan(): - case <-timeout: + g.managedSleep(peerDiscoveryRetryInterval) } } diff --git a/modules/gateway/upnp.go b/modules/gateway/upnp.go index 0620d8ec87..a2719928bf 100644 --- a/modules/gateway/upnp.go +++ b/modules/gateway/upnp.go @@ -60,7 +60,7 @@ func (g *Gateway) threadedLearnHostname() { if err == nil { host, err = d.ExternalIP() } - if err != nil { + if !build.DEBUG && err != nil { host, err = myExternalIP() } if err != nil { @@ -70,13 +70,10 @@ func (g *Gateway) threadedLearnHostname() { g.log.Println("WARN: failed to discover external IP:", err) } // If we were unable to discover our IP we try again later. - if err != nil { - select { - case <-g.threads.StopChan(): - return - case <-time.After(rediscoverIPIntervalFailure): - continue - } + if err != nil && g.managedSleep(rediscoverIPIntervalFailure) { + continue + } else if err != nil { + return // shutdown interrupted sleep } g.mu.RLock() @@ -84,7 +81,11 @@ func (g *Gateway) threadedLearnHostname() { g.mu.RUnlock() if err := addr.IsValid(); err != nil { g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) - return + if err != nil && g.managedSleep(rediscoverIPIntervalFailure) { + continue + } else if err != nil { + return // shutdown interrupted sleep + } } g.mu.Lock() @@ -94,10 +95,8 @@ func (g *Gateway) threadedLearnHostname() { g.log.Println("INFO: our address is", addr) // Rediscover the IP later in case it changed. - select { - case <-g.threads.StopChan(): - return - case <-time.After(rediscoverIPIntervalSuccess): + if !g.managedSleep(rediscoverIPIntervalSuccess) { + return // shutdown interrupted sleep } } } From 6774cbf4394083909590e972c50b5f075d42d698 Mon Sep 17 00:00:00 2001 From: Marc Lester Tan Date: Wed, 28 Mar 2018 18:54:43 -0700 Subject: [PATCH 012/212] Renaming MaxAdjustmentDown and MaxAdjustmentUp to MaxTargetAdjustmentDown and MaxTargetAdjustmentUp respectively as suggested by DavidVorick here: https://github.com/NebulousLabs/Sia/pull/2786 --- cmd/siad/server.go | 8 ++++---- doc/API.md | 4 ++-- doc/api/Daemon.md | 4 ++-- modules/consensus/processedblock.go | 8 ++++---- types/constants.go | 26 +++++++++++++------------- 5 files changed, 25 insertions(+), 25 deletions(-) diff --git a/cmd/siad/server.go b/cmd/siad/server.go index 077ef6a853..de7221b5f4 100644 --- a/cmd/siad/server.go +++ b/cmd/siad/server.go @@ -79,8 +79,8 @@ type ( RootTarget types.Target `json:"roottarget"` RootDepth types.Target `json:"rootdepth"` - MaxAdjustmentUp *big.Rat `json:"maxadjustmentup"` - MaxAdjustmentDown *big.Rat `json:"maxadjustmentdown"` + MaxTargetAdjustmentUp *big.Rat `json:"maxtargetadjustmentup"` + MaxTargetAdjustmentDown *big.Rat `json:"maxtargetadjustmentdown"` SiacoinPrecision types.Currency `json:"siacoinprecision"` } @@ -349,8 +349,8 @@ func (srv *Server) daemonConstantsHandler(w http.ResponseWriter, _ *http.Request RootTarget: types.RootTarget, RootDepth: types.RootDepth, - MaxAdjustmentUp: types.MaxAdjustmentUp, - MaxAdjustmentDown: types.MaxAdjustmentDown, + MaxTargetAdjustmentUp: types.MaxTargetAdjustmentUp, + MaxTargetAdjustmentDown: types.MaxTargetAdjustmentDown, SiacoinPrecision: types.SiacoinPrecision, } diff --git a/doc/API.md b/doc/API.md index ca25716b6a..049738de66 100644 --- a/doc/API.md +++ b/doc/API.md @@ -128,8 +128,8 @@ returns the set of constants in use. "roottarget": [0,0,0,0,32,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], "rootdepth": [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255], - "maxadjustmentup": "5/2", - "maxadjustmentdown": "2/5", + "maxtargetadjustmentup": "5/2", + "maxtargetadjustmentdown": "2/5", "siacoinprecision": "1000000000000000000000000" // hastings per siacoin } diff --git a/doc/api/Daemon.md b/doc/api/Daemon.md index 32bccdc4db..fc5227105b 100644 --- a/doc/api/Daemon.md +++ b/doc/api/Daemon.md @@ -83,10 +83,10 @@ returns the set of constants in use. "rootdepth": [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255], // Largest allowed ratio between the old difficulty and the new difficulty. - "maxadjustmentup": "5/2", + "maxtargetadjustmentup": "5/2", // Smallest allowed ratio between the old difficulty and the new difficulty. - "maxadjustmentdown": "2/5", + "maxtargetadjustmentdown": "2/5", // Number of Hastings in one siacoin. "siacoinprecision": "1000000000000000000000000" // hastings per siacoin diff --git a/modules/consensus/processedblock.go b/modules/consensus/processedblock.go index fdbd2eeab1..710ca94e9c 100644 --- a/modules/consensus/processedblock.go +++ b/modules/consensus/processedblock.go @@ -91,10 +91,10 @@ func (cs *ConsensusSet) targetAdjustmentBase(blockMap *bolt.Bucket, pb *processe // of total work, which prevents certain classes of difficulty adjusting // attacks. func clampTargetAdjustment(base *big.Rat) *big.Rat { - if base.Cmp(types.MaxAdjustmentUp) > 0 { - return types.MaxAdjustmentUp - } else if base.Cmp(types.MaxAdjustmentDown) < 0 { - return types.MaxAdjustmentDown + if base.Cmp(types.MaxTargetAdjustmentUp) > 0 { + return types.MaxTargetAdjustmentUp + } else if base.Cmp(types.MaxTargetAdjustmentDown) < 0 { + return types.MaxTargetAdjustmentDown } return base } diff --git a/types/constants.go b/types/constants.go index 73fd1caf7c..585a6db9af 100644 --- a/types/constants.go +++ b/types/constants.go @@ -55,14 +55,14 @@ var ( // any transactions spending the payout. File contract payouts also are subject to // a maturity delay. MaturityDelay BlockHeight - // MaxAdjustmentDown restrict how much the block difficulty is allowed to + // MaxTargetAdjustmentDown restrict how much the block difficulty is allowed to // change in a single step, which is important to limit the effect of difficulty // raising and lowering attacks. - MaxAdjustmentDown *big.Rat - // MaxAdjustmentUp restrict how much the block difficulty is allowed to + MaxTargetAdjustmentDown *big.Rat + // MaxTargetAdjustmentUp restrict how much the block difficulty is allowed to // change in a single step, which is important to limit the effect of difficulty // raising and lowering attacks. - MaxAdjustmentUp *big.Rat + MaxTargetAdjustmentUp *big.Rat // MedianTimestampWindow tells us how many blocks to look back when calculating // the median timestamp over the previous n blocks. The timestamp of a block is // not allowed to be less than or equal to the median timestamp of the previous n @@ -138,11 +138,11 @@ func init() { GenesisTimestamp = Timestamp(1424139000) // Change as necessary. RootTarget = Target{0, 0, 2} // Standard developer CPUs will be able to mine blocks with the race library activated. - TargetWindow = 20 // Difficulty is adjusted based on prior 20 blocks. - MaxAdjustmentUp = big.NewRat(120, 100) // Difficulty adjusts quickly. - MaxAdjustmentDown = big.NewRat(100, 120) // Difficulty adjusts quickly. - FutureThreshold = 2 * 60 // 2 minutes. - ExtremeFutureThreshold = 4 * 60 // 4 minutes. + TargetWindow = 20 // Difficulty is adjusted based on prior 20 blocks. + MaxTargetAdjustmentUp = big.NewRat(120, 100) // Difficulty adjusts quickly. + MaxTargetAdjustmentDown = big.NewRat(100, 120) // Difficulty adjusts quickly. + FutureThreshold = 2 * 60 // 2 minutes. + ExtremeFutureThreshold = 4 * 60 // 4 minutes. MinimumCoinbase = 30e3 @@ -181,8 +181,8 @@ func init() { // only 1 second and testing mining should be happening substantially // faster than that. TargetWindow = 200 - MaxAdjustmentUp = big.NewRat(10001, 10000) - MaxAdjustmentDown = big.NewRat(9999, 10000) + MaxTargetAdjustmentUp = big.NewRat(10001, 10000) + MaxTargetAdjustmentDown = big.NewRat(9999, 10000) FutureThreshold = 3 // 3 seconds ExtremeFutureThreshold = 6 // 6 seconds @@ -256,8 +256,8 @@ func init() { // difficulty is adjusted four times as often. This does result in // greater difficulty oscillation, a tradeoff that was chosen to be // acceptable due to Sia's more vulnerable position as an altcoin. - MaxAdjustmentUp = big.NewRat(25, 10) - MaxAdjustmentDown = big.NewRat(10, 25) + MaxTargetAdjustmentUp = big.NewRat(25, 10) + MaxTargetAdjustmentDown = big.NewRat(10, 25) // Blocks will not be accepted if their timestamp is more than 3 hours // into the future, but will be accepted as soon as they are no longer From 97c613092cb7f040ab05d1db570141ac44c0c303 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 29 Mar 2018 17:14:03 -0400 Subject: [PATCH 013/212] reformat if clauses --- modules/gateway/upnp.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/modules/gateway/upnp.go b/modules/gateway/upnp.go index a2719928bf..54946e8d65 100644 --- a/modules/gateway/upnp.go +++ b/modules/gateway/upnp.go @@ -70,10 +70,11 @@ func (g *Gateway) threadedLearnHostname() { g.log.Println("WARN: failed to discover external IP:", err) } // If we were unable to discover our IP we try again later. - if err != nil && g.managedSleep(rediscoverIPIntervalFailure) { + if err != nil { + if !g.managedSleep(rediscoverIPIntervalFailure) { + return // shutdown interrupted sleep + } continue - } else if err != nil { - return // shutdown interrupted sleep } g.mu.RLock() @@ -81,10 +82,11 @@ func (g *Gateway) threadedLearnHostname() { g.mu.RUnlock() if err := addr.IsValid(); err != nil { g.log.Printf("WARN: discovered hostname %q is invalid: %v", addr, err) - if err != nil && g.managedSleep(rediscoverIPIntervalFailure) { + if err != nil { + if !g.managedSleep(rediscoverIPIntervalFailure) { + return // shutdown interrupted sleep + } continue - } else if err != nil { - return // shutdown interrupted sleep } } From 4c5d447898909b455da0167a04d6210d27784070 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 30 Mar 2018 15:08:05 -0400 Subject: [PATCH 014/212] add additional logging to applyHistory in wallet --- modules/wallet/database.go | 17 +++++++++++------ modules/wallet/update.go | 8 ++++---- types/encoding.go | 6 ++++-- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 50606130bd..88826d0f0e 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -2,13 +2,14 @@ package wallet import ( "encoding/binary" - "errors" + "fmt" "reflect" "time" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" "github.com/NebulousLabs/fastrand" "github.com/coreos/bbolt" @@ -249,7 +250,8 @@ func dbAddProcessedTransactionAddrs(tx *bolt.Tx, pt modules.ProcessedTransaction } for addr := range addrs { if err := dbAddAddrTransaction(tx, addr, txn); err != nil { - return err + return errors.AddContext(err, fmt.Sprintf("failed to add txn %v to address %v", + pt.TransactionID, addr)) } } return nil @@ -299,22 +301,25 @@ func dbAppendProcessedTransaction(tx *bolt.Tx, pt modules.ProcessedTransaction) b := tx.Bucket(bucketProcessedTransactions) key, err := b.NextSequence() if err != nil { - return err + return errors.AddContext(err, "failed to get next sequence from bucket") } // big-endian is used so that the keys are properly sorted keyBytes := make([]byte, 8) binary.BigEndian.PutUint64(keyBytes, key) if err = b.Put(keyBytes, encoding.Marshal(pt)); err != nil { - return err + return errors.AddContext(err, "failed to store processed txn in database") } // add used index to bucketProcessedTxnIndex if err = dbPutTransactionIndex(tx, pt.TransactionID, keyBytes); err != nil { - return err + return errors.AddContext(err, "failed to store txn index in database") } // also add this txid to the bucketAddrTransactions - return dbAddProcessedTransactionAddrs(tx, pt, key) + if err = dbAddProcessedTransactionAddrs(tx, pt, key); err != nil { + return errors.AddContext(err, "failed to add processed transaction to addresses in database") + } + return nil } func dbGetLastProcessedTransaction(tx *bolt.Tx) (pt modules.ProcessedTransaction, err error) { diff --git a/modules/wallet/update.go b/modules/wallet/update.go index 327309eeab..e0632849da 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -1,11 +1,11 @@ package wallet import ( - "fmt" "math" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" "github.com/coreos/bbolt" ) @@ -396,14 +396,14 @@ func (w *Wallet) applyHistory(tx *bolt.Tx, cc modules.ConsensusChange) error { for _, block := range cc.AppliedBlocks { consensusHeight, err := dbGetConsensusHeight(tx) if err != nil { - return err + return errors.AddContext(err, "failed to consensus height") } // Increment the consensus height. if block.ID() != types.GenesisID { consensusHeight++ err = dbPutConsensusHeight(tx, consensusHeight) if err != nil { - return err + return errors.AddContext(err, "failed to store consensus height in database") } } @@ -411,7 +411,7 @@ func (w *Wallet) applyHistory(tx *bolt.Tx, cc modules.ConsensusChange) error { for _, pt := range pts { err := dbAppendProcessedTransaction(tx, pt) if err != nil { - return fmt.Errorf("could not put processed transaction: %v", err) + return errors.AddContext(err, "could not put processed transaction") } } } diff --git a/types/encoding.go b/types/encoding.go index f0b1683566..b27faad6b4 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -14,6 +14,7 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" + "github.com/NebulousLabs/errors" ) // sanityCheckWriter checks that the bytes written to w exactly match the @@ -176,7 +177,8 @@ func (d *decHelper) NextPrefix(elemSize uintptr) uint64 { return 0 } if n > 1<<31-1 || n*uint64(elemSize) > encoding.MaxSliceSize { - d.err = encoding.ErrSliceTooLarge + d.err = errors.AddContext(encoding.ErrSliceTooLarge, + fmt.Sprintf("n: %d, elemSize: %d", n, elemSize)) return 0 } return n @@ -323,7 +325,7 @@ func (cf CoveredFields) MarshalSia(w io.Writer) error { // MarshalSiaSize returns the encoded size of cf. func (cf CoveredFields) MarshalSiaSize() (size int) { - size += 1 // WholeTransaction + size++ // WholeTransaction size += 8 + len(cf.SiacoinInputs)*8 size += 8 + len(cf.SiacoinOutputs)*8 size += 8 + len(cf.FileContracts)*8 From 384e473af82526073a9e6b7c783c0cc672ce6703 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 2 Apr 2018 14:17:19 -0400 Subject: [PATCH 015/212] add rollback feature to wallet db --- modules/wallet/database.go | 24 ++++++++++++++++++++++-- modules/wallet/update.go | 20 +++++++++++++++----- modules/wallet/wallet.go | 9 +++++++-- 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 88826d0f0e..365890a192 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -82,25 +82,45 @@ func (w *Wallet) threadedDBUpdate() { return } w.mu.Lock() - w.syncDB() + err := w.syncDB() w.mu.Unlock() + if err != nil { + // If the database is having problems, we need to close it to + // protect it. This will likely cause a panic somewhere when another + // caller tries to access dbTx but it is nil. + w.log.Severe("ERROR: syncDB encountered an error. Closing the wallet db to protect the database, wallet may crash.", err) + w.db.Close() + return + } } } // syncDB commits the current global transaction and immediately begins a // new one. It must be called with a write-lock. -func (w *Wallet) syncDB() { +func (w *Wallet) syncDB() error { + // If the rollback flag is set, it means that somewhere in the middle of an + // atomic update there was a failure, and that failure needs to be rolled + // back. An error will be returned. + if w.dbRollback { + w.log.Severe("ERROR: database rollback requested. Performing rollback to protect the database and refusing to open a new dbTX, wallet may crash.") + w.dbTx.Rollback() + return errors.New("database unable to sync - rollback requested") + } + // commit the current tx err := w.dbTx.Commit() if err != nil { w.log.Severe("ERROR: failed to apply database update:", err) w.dbTx.Rollback() + return errors.AddContext(err, "unable to commit dbTx in syncDB") } // begin a new tx w.dbTx, err = w.db.Begin(true) if err != nil { w.log.Severe("ERROR: failed to start database update:", err) + return errors.AddContext(err, "unable to begin new dbTx in syncDB") } + return nil } // dbReset wipes and reinitializes a wallet database. diff --git a/modules/wallet/update.go b/modules/wallet/update.go index e0632849da..3e370ee489 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -121,6 +121,7 @@ func (w *Wallet) updateConfirmedSet(tx *bolt.Tx, cc modules.ConsensusChange) err } if err != nil { w.log.Severe("Could not update siacoin output:", err) + return err } } for _, diff := range cc.SiafundOutputDiffs { @@ -139,6 +140,7 @@ func (w *Wallet) updateConfirmedSet(tx *bolt.Tx, cc modules.ConsensusChange) err } if err != nil { w.log.Severe("Could not update siafund output:", err) + return err } } for _, diff := range cc.SiafundPoolDiffs { @@ -150,6 +152,7 @@ func (w *Wallet) updateConfirmedSet(tx *bolt.Tx, cc modules.ConsensusChange) err } if err != nil { w.log.Severe("Could not update siafund pool:", err) + return err } } return nil @@ -172,6 +175,7 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { w.log.Println("A wallet transaction has been reverted due to a reorg:", txid) if err := dbDeleteLastProcessedTransaction(tx); err != nil { w.log.Severe("Could not revert transaction:", err) + return err } } } @@ -182,6 +186,7 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { w.log.Println("Miner payout has been reverted due to a reorg:", block.MinerPayoutID(uint64(i)), "::", mp.Value.HumanString()) if err := dbDeleteLastProcessedTransaction(tx); err != nil { w.log.Severe("Could not revert transaction:", err) + return err } break // there will only ever be one miner transaction } @@ -431,21 +436,26 @@ func (w *Wallet) ProcessConsensusChange(cc modules.ConsensusChange) { defer w.mu.Unlock() if needRescan, err := w.updateLookahead(w.dbTx, cc); err != nil { - w.log.Println("ERROR: failed to update lookahead:", err) + w.log.Severe("ERROR: failed to update lookahead:", err) + w.dbRollback = true } else if needRescan { go w.threadedResetSubscriptions() } if err := w.updateConfirmedSet(w.dbTx, cc); err != nil { - w.log.Println("ERROR: failed to update confirmed set:", err) + w.log.Severe("ERROR: failed to update confirmed set:", err) + w.dbRollback = true } if err := w.revertHistory(w.dbTx, cc.RevertedBlocks); err != nil { - w.log.Println("ERROR: failed to revert consensus change:", err) + w.log.Severe("ERROR: failed to revert consensus change:", err) + w.dbRollback = true } if err := w.applyHistory(w.dbTx, cc); err != nil { - w.log.Println("ERROR: failed to apply consensus change:", err) + w.log.Severe("ERROR: failed to apply consensus change:", err) + w.dbRollback = true } if err := dbPutConsensusChangeID(w.dbTx, cc.ID); err != nil { - w.log.Println("ERROR: failed to update consensus change ID:", err) + w.log.Severe("ERROR: failed to update consensus change ID:", err) + w.dbRollback = true } if cc.Synced { diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index d62c63f3a1..d808b1fc9c 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -87,8 +87,13 @@ type Wallet struct { // transactions. A global db transaction is maintained in memory to avoid // excessive disk writes. Any operations involving dbTx must hold an // exclusive lock. - db *persist.BoltDatabase - dbTx *bolt.Tx + // + // If dbRollback is set, then when the database syncs it will perform a + // rollback instead of a commit. For safety reasons, the db will close and + // the wallet will close if a rollback is performed. + db *persist.BoltDatabase + dbRollback bool + dbTx *bolt.Tx persistDir string log *persist.Logger From 7141749000d4155b47c86d5de1754e6552f0d3c1 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 2 Apr 2018 19:10:35 -0400 Subject: [PATCH 016/212] wrap the wallet in a lock when setting the settings --- modules/wallet/wallet.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index d808b1fc9c..eb76780666 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -254,5 +254,7 @@ func (w *Wallet) Settings() modules.WalletSettings { // SetSettings will update the settings for the wallet. func (w *Wallet) SetSettings(s modules.WalletSettings) { + w.mu.Lock() w.defragDisabled = s.NoDefrag + w.mu.Unlock() } From 592ebe53d80ca8e40ebd64a5e35c65e498bc2c8c Mon Sep 17 00:00:00 2001 From: David Vorick Date: Tue, 3 Apr 2018 10:27:01 -0400 Subject: [PATCH 017/212] remove redundant severe message --- modules/wallet/database.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 365890a192..f4cc688ced 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -88,7 +88,7 @@ func (w *Wallet) threadedDBUpdate() { // If the database is having problems, we need to close it to // protect it. This will likely cause a panic somewhere when another // caller tries to access dbTx but it is nil. - w.log.Severe("ERROR: syncDB encountered an error. Closing the wallet db to protect the database, wallet may crash.", err) + w.log.Severe("ERROR: syncDB encountered an error. Closing database to protect wallet. wallet may crash:", err) w.db.Close() return } @@ -102,7 +102,6 @@ func (w *Wallet) syncDB() error { // atomic update there was a failure, and that failure needs to be rolled // back. An error will be returned. if w.dbRollback { - w.log.Severe("ERROR: database rollback requested. Performing rollback to protect the database and refusing to open a new dbTX, wallet may crash.") w.dbTx.Rollback() return errors.New("database unable to sync - rollback requested") } From 910979af13bdf8c4c87a7f0d13496bc56ef25161 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 31 Mar 2018 21:25:15 -0400 Subject: [PATCH 018/212] include more info in 'too-large' decoding errors --- encoding/marshal.go | 29 ++++++++++++++++++++++------- encoding/marshal_test.go | 7 +++---- types/encoding.go | 6 ++---- 3 files changed, 27 insertions(+), 15 deletions(-) diff --git a/encoding/marshal.go b/encoding/marshal.go index c8df591d6f..e6d172264d 100644 --- a/encoding/marshal.go +++ b/encoding/marshal.go @@ -24,12 +24,27 @@ const ( var ( errBadPointer = errors.New("cannot decode into invalid pointer") - // ErrObjectTooLarge is an error when encoded object exceeds size limit. - ErrObjectTooLarge = errors.New("encoded object exceeds size limit") - // ErrSliceTooLarge is an error when encoded slice is too large. - ErrSliceTooLarge = errors.New("encoded slice is too large") ) +// ErrObjectTooLarge is an error when encoded object exceeds size limit. +type ErrObjectTooLarge uint64 + +// Error implements the error interface. +func (e ErrObjectTooLarge) Error() string { + return fmt.Sprintf("encoded object (>= %v bytes) exceeds size limit (%v bytes)", uint64(e), uint64(MaxObjectSize)) +} + +// ErrSliceTooLarge is an error when encoded slice is too large. +type ErrSliceTooLarge struct { + Len uint64 + ElemSize uint64 +} + +// Error implements the error interface. +func (e ErrSliceTooLarge) Error() string { + return fmt.Sprintf("encoded slice (%v*%v bytes) exceeds size limit (%v bytes)", e.Len, e.ElemSize, uint64(MaxSliceSize)) +} + type ( // A SiaMarshaler can encode and write itself to a stream. SiaMarshaler interface { @@ -195,7 +210,7 @@ func (d *Decoder) Read(p []byte) (int, error) { n, err := d.r.Read(p) // enforce an absolute maximum size limit if d.n += n; d.n > MaxObjectSize { - panic(ErrObjectTooLarge) + panic(ErrObjectTooLarge(d.n)) } return n, err } @@ -243,7 +258,7 @@ func (d *Decoder) readN(n int) []byte { panic(io.ErrUnexpectedEOF) } if d.n += n; d.n > MaxObjectSize { - panic(ErrObjectTooLarge) + panic(ErrObjectTooLarge(d.n)) } return b } @@ -306,7 +321,7 @@ func (d *Decoder) decode(val reflect.Value) { // sanity-check the sliceLen, otherwise you can crash a peer by making // them allocate a massive slice if sliceLen > 1<<31-1 || sliceLen*uint64(val.Type().Elem().Size()) > MaxSliceSize { - panic(ErrSliceTooLarge) + panic(ErrSliceTooLarge{Len: sliceLen, ElemSize: uint64(val.Type().Elem().Size())}) } else if sliceLen == 0 { return } diff --git a/encoding/marshal_test.go b/encoding/marshal_test.go index 594c5ecd3d..d8603a0852 100644 --- a/encoding/marshal_test.go +++ b/encoding/marshal_test.go @@ -148,20 +148,20 @@ func TestDecode(t *testing.T) { // big slice (larger than MaxSliceSize) err = Unmarshal(EncUint64(MaxSliceSize+1), new([]byte)) - if err == nil || err.Error() != "could not decode type []uint8: encoded slice is too large" { + if err == nil || !strings.Contains(err.Error(), "exceeds size limit") { t.Error("expected large slice error, got", err) } // massive slice (larger than MaxInt32) err = Unmarshal(EncUint64(1<<32), new([]byte)) - if err == nil || err.Error() != "could not decode type []uint8: encoded slice is too large" { + if err == nil || !strings.Contains(err.Error(), "exceeds size limit") { t.Error("expected large slice error, got", err) } // many small slices (total larger than maxDecodeLen) bigSlice := strings.Split(strings.Repeat("0123456789abcdefghijklmnopqrstuvwxyz", (MaxSliceSize/16)-1), "0") err = Unmarshal(Marshal(bigSlice), new([]string)) - if err == nil || err.Error() != "could not decode type []string: encoded object exceeds size limit" { + if err == nil || !strings.Contains(err.Error(), "exceeds size limit") { t.Error("expected size limit error, got", err) } @@ -178,7 +178,6 @@ func TestDecode(t *testing.T) { if err == nil || err.Error() != "could not decode type [3]uint8: EOF" { t.Error("expected EOF error, got", err) } - } // TestMarshalUnmarshal tests the Marshal and Unmarshal functions, which are diff --git a/types/encoding.go b/types/encoding.go index b27faad6b4..045b4063e6 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -14,7 +14,6 @@ import ( "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" - "github.com/NebulousLabs/errors" ) // sanityCheckWriter checks that the bytes written to w exactly match the @@ -133,7 +132,7 @@ func (d *decHelper) Read(p []byte) (int, error) { } d.n += n if d.n > encoding.MaxObjectSize { - d.err = encoding.ErrObjectTooLarge + d.err = encoding.ErrObjectTooLarge(d.n) } return n, d.err } @@ -177,8 +176,7 @@ func (d *decHelper) NextPrefix(elemSize uintptr) uint64 { return 0 } if n > 1<<31-1 || n*uint64(elemSize) > encoding.MaxSliceSize { - d.err = errors.AddContext(encoding.ErrSliceTooLarge, - fmt.Sprintf("n: %d, elemSize: %d", n, elemSize)) + d.err = encoding.ErrSliceTooLarge{Len: n, ElemSize: uint64(elemSize)} return 0 } return n From 142fdea9d92f90fe0a8a25fb7eb3cf33412a13a1 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Thu, 5 Apr 2018 10:55:44 -0700 Subject: [PATCH 019/212] reduce synchronize timeouts and increase block batch size --- modules/consensus/synchronize.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/consensus/synchronize.go b/modules/consensus/synchronize.go index 049e34fd15..43e60c3471 100644 --- a/modules/consensus/synchronize.go +++ b/modules/consensus/synchronize.go @@ -39,7 +39,7 @@ var ( // the consensus set in a single iteration during the initial blockchain // download. MaxCatchUpBlocks = build.Select(build.Var{ - Standard: types.BlockHeight(10), + Standard: types.BlockHeight(25), Dev: types.BlockHeight(50), Testing: types.BlockHeight(3), }).(types.BlockHeight) @@ -57,21 +57,21 @@ var ( // relayHeaderTimeout is the timeout for the RelayHeader RPC. relayHeaderTimeout = build.Select(build.Var{ - Standard: 3 * time.Minute, + Standard: 60 * time.Second, Dev: 20 * time.Second, Testing: 3 * time.Second, }).(time.Duration) // sendBlkTimeout is the timeout for the SendBlk RPC. sendBlkTimeout = build.Select(build.Var{ - Standard: 4 * time.Minute, + Standard: 90 * time.Second, Dev: 30 * time.Second, Testing: 4 * time.Second, }).(time.Duration) // sendBlocksTimeout is the timeout for the SendBlocks RPC. sendBlocksTimeout = build.Select(build.Var{ - Standard: 5 * time.Minute, + Standard: 120 * time.Second, Dev: 40 * time.Second, Testing: 5 * time.Second, }).(time.Duration) From 49704703740e12aa59abda5606346faac411f0b4 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Fri, 6 Apr 2018 10:03:21 -0700 Subject: [PATCH 020/212] Making it more conservative --- modules/consensus/synchronize.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/consensus/synchronize.go b/modules/consensus/synchronize.go index 43e60c3471..e6f6eb9da7 100644 --- a/modules/consensus/synchronize.go +++ b/modules/consensus/synchronize.go @@ -39,7 +39,7 @@ var ( // the consensus set in a single iteration during the initial blockchain // download. MaxCatchUpBlocks = build.Select(build.Var{ - Standard: types.BlockHeight(25), + Standard: types.BlockHeight(10), Dev: types.BlockHeight(50), Testing: types.BlockHeight(3), }).(types.BlockHeight) @@ -71,7 +71,7 @@ var ( // sendBlocksTimeout is the timeout for the SendBlocks RPC. sendBlocksTimeout = build.Select(build.Var{ - Standard: 120 * time.Second, + Standard: 180 * time.Second, Dev: 40 * time.Second, Testing: 5 * time.Second, }).(time.Duration) From 111b744e7651ae2d86a595600f373615aa22e39c Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Fri, 6 Apr 2018 20:00:19 +0200 Subject: [PATCH 021/212] Extend StorageObligation struct. --- modules/host.go | 21 ++++++++++++++++++++- modules/host/storageobligations.go | 13 +++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/modules/host.go b/modules/host.go index ad33ec44ed..0257e68863 100644 --- a/modules/host.go +++ b/modules/host.go @@ -111,10 +111,29 @@ type ( // StorageObligation contains information about a storage obligation that // the host has accepted. StorageObligation struct { + ObligationId types.FileContractID `json:"obligationid"` + FileSize uint64 `json:"filesize"` + SectorRootsCount uint64 `json:"sectorrootscount"` + ContractCost types.Currency `json:"contractcost"` + LockedCollateral types.Currency `json:"lockedcollateral"` + PotentialDownloadRevenue types.Currency `json:"potentialdownloadrevenue"` + PotentialStorageRevenue types.Currency `json:"potentialstoragerevenue"` + PotentialUploadRevenue types.Currency `json:"potentialuploadrevenue"` + RiskedCollateral types.Currency `json:"riskedcollateral"` + TransactionFeesAdded types.Currency `json:"transactionfeesadded"` + + // The negotiation height specifies the block height at which the file + // contract was negotiated. The expiration height and the proof deadline + // are equal to the window start and window end. Between the expiration height + // and the proof deadline, the host must submit the storage proof. NegotiationHeight types.BlockHeight `json:"negotiationheight"` + ExpirationHeight types.BlockHeight `json:"expirationheight"` + ProofDeadLine types.BlockHeight `json:"proofdeadline"` + // Variables indicating whether the critical transactions in a storage + // obligation have been confirmed on the blockchain. OriginConfirmed bool `json:"originconfirmed"` - RevisionConstructed bool `json:"revisionconstructed"` + RevisionConstructed bool `json:"revisioncontructed"` RevisionConfirmed bool `json:"revisionconfirmed"` ProofConstructed bool `json:"proofconstructed"` ProofConfirmed bool `json:"proofconfirmed"` diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 8eb3cb9805..b52ab8e12f 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -870,7 +870,20 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { return build.ExtendErr("unable to unmarshal storage obligation:", err) } mso := modules.StorageObligation{ + ObligationId: so.id(), + FileSize: so.fileSize(), + SectorRootsCount: uint64(len(so.SectorRoots)), + ContractCost: so.ContractCost, + LockedCollateral: so.LockedCollateral, + PotentialDownloadRevenue: so.PotentialDownloadRevenue, + PotentialStorageRevenue: so.PotentialStorageRevenue, + PotentialUploadRevenue: so.PotentialUploadRevenue, + RiskedCollateral: so.RiskedCollateral, + TransactionFeesAdded: so.TransactionFeesAdded, + NegotiationHeight: so.NegotiationHeight, + ExpirationHeight: so.expiration(), + ProofDeadLine: so.proofDeadline(), OriginConfirmed: so.OriginConfirmed, RevisionConstructed: so.RevisionConstructed, From f68aba0544e88cb41f99e2e1e79b40b711d79707 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Fri, 6 Apr 2018 20:01:42 +0200 Subject: [PATCH 022/212] Add type to return data. --- node/api/host.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/node/api/host.go b/node/api/host.go index 9f139b7c54..c886db7ddd 100644 --- a/node/api/host.go +++ b/node/api/host.go @@ -24,6 +24,12 @@ var ( ) type ( + // ContractInfoGET contains the information that is returned after a GET request + // to /host/contracts - information for the host about stored obligations. + ContractInfoGET struct { + Contracts []modules.StorageObligation `json:"obligations"` + } + // HostGET contains the information that is returned after a GET request to // /host - a bunch of information about the status of the host. HostGET struct { From cdefdf9aba80374a13fe04154a758af9c1e9f0f3 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Fri, 6 Apr 2018 20:02:48 +0200 Subject: [PATCH 023/212] Add new handler for /host/contracts --- node/api/host.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/node/api/host.go b/node/api/host.go index c886db7ddd..cd274d8400 100644 --- a/node/api/host.go +++ b/node/api/host.go @@ -67,6 +67,44 @@ func folderIndex(folderPath string, storageFolders []modules.StorageFolderMetada return -1, errStorageFolderNotFound } +// hostContractInfoHandler handles the API call to get the contract information of the host. +// Information is retrieved via the storage obligations from the host database. +// TODO: filters are hard coded. Adding or removing storage obligation statuses will +// currently break the API. +func (api *API) hostContractInfoHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { + statusStr := req.FormValue("status") + if statusStr == "" { + WriteError(w, Error{"Status must be provided to a /host/contracts call."}, http.StatusBadRequest) + return + } + var filter uint64 + switch statusStr { + case "unresolved": + filter = 0 + case "rejected": + filter = 1 + case "succeeded": + filter = 2 + case "failed": + filter = 3 + case "all": + filter = 4 + default: + WriteError(w, Error{"Unable to parse contract status."}, http.StatusBadRequest) + return + } + sos := []modules.StorageObligation{} + for _, so := range api.host.StorageObligations() { + if so.ObligationStatus == filter || filter == 4 { + sos = append(sos, so) + } + } + cg := ContractInfoGET{ + Contracts: sos, + } + WriteJSON(w, cg) +} + // hostHandlerGET handles GET requests to the /host API endpoint, returning key // information about the host. func (api *API) hostHandlerGET(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { From c5376bc8f9a2296ff6f405c6c8f2d384cc990363 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Fri, 6 Apr 2018 20:04:08 +0200 Subject: [PATCH 024/212] Add route for /host/contracts --- node/api/routes.go | 1 + 1 file changed, 1 insertion(+) diff --git a/node/api/routes.go b/node/api/routes.go index f7cc5419b0..5e41ac9467 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -45,6 +45,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/host", api.hostHandlerGET) // Get the host status. router.POST("/host", RequirePassword(api.hostHandlerPOST, requiredPassword)) // Change the settings of the host. router.POST("/host/announce", RequirePassword(api.hostAnnounceHandler, requiredPassword)) // Announce the host to the network. + router.GET("/host/contracts", api.hostContractInfoHandler) // Get info about contracts. router.GET("/host/estimatescore", api.hostEstimateScoreGET) // Calls pertaining to the storage manager that the host uses. From d8240fcae00cd5ac9b480ea9186baa7ea8c5697e Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Fri, 6 Apr 2018 21:05:05 +0200 Subject: [PATCH 025/212] Fix typo --- modules/host.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/host.go b/modules/host.go index 0257e68863..4e89a98d34 100644 --- a/modules/host.go +++ b/modules/host.go @@ -133,7 +133,7 @@ type ( // Variables indicating whether the critical transactions in a storage // obligation have been confirmed on the blockchain. OriginConfirmed bool `json:"originconfirmed"` - RevisionConstructed bool `json:"revisioncontructed"` + RevisionConstructed bool `json:"revisionconstructed"` RevisionConfirmed bool `json:"revisionconfirmed"` ProofConstructed bool `json:"proofconstructed"` ProofConfirmed bool `json:"proofconfirmed"` From f482acb90d64df3f32f93afd5e9f900596ecbb31 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 7 Apr 2018 18:09:04 -0400 Subject: [PATCH 026/212] only add addrs relevant to the wallet --- modules/wallet/database.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index f4cc688ced..268d66222a 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -262,10 +262,14 @@ func dbAddAddrTransaction(tx *bolt.Tx, addr types.UnlockHash, txn uint64) error func dbAddProcessedTransactionAddrs(tx *bolt.Tx, pt modules.ProcessedTransaction, txn uint64) error { addrs := make(map[types.UnlockHash]struct{}) for _, input := range pt.Inputs { - addrs[input.RelatedAddress] = struct{}{} + if input.WalletAddress { + addrs[input.RelatedAddress] = struct{}{} + } } for _, output := range pt.Outputs { - addrs[output.RelatedAddress] = struct{}{} + if output.WalletAddress { + addrs[output.RelatedAddress] = struct{}{} + } } for addr := range addrs { if err := dbAddAddrTransaction(tx, addr, txn); err != nil { From 07d923773ed49ac49e14d66c3d880805f598c110 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 7 Apr 2018 19:26:24 -0400 Subject: [PATCH 027/212] only ignore the void address --- modules/wallet/database.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 268d66222a..757a14d39d 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -262,16 +262,17 @@ func dbAddAddrTransaction(tx *bolt.Tx, addr types.UnlockHash, txn uint64) error func dbAddProcessedTransactionAddrs(tx *bolt.Tx, pt modules.ProcessedTransaction, txn uint64) error { addrs := make(map[types.UnlockHash]struct{}) for _, input := range pt.Inputs { - if input.WalletAddress { - addrs[input.RelatedAddress] = struct{}{} - } + addrs[input.RelatedAddress] = struct{}{} } for _, output := range pt.Outputs { - if output.WalletAddress { - addrs[output.RelatedAddress] = struct{}{} - } + addrs[output.RelatedAddress] = struct{}{} } for addr := range addrs { + if addr == (types.UnlockHash{}) { + // skip the void address; it's associated with too many + // transactions, which is problematic for large wallets + continue + } if err := dbAddAddrTransaction(tx, addr, txn); err != nil { return errors.AddContext(err, fmt.Sprintf("failed to add txn %v to address %v", pt.TransactionID, addr)) From 51c6b0e5d97b0d28fb914b839d3d9c0ad05c5780 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sat, 7 Apr 2018 19:41:27 -0400 Subject: [PATCH 028/212] skip miner fees, not void address --- modules/wallet/database.go | 9 ++++----- modules/wallet/update.go | 5 +++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 757a14d39d..1eba37d439 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -265,14 +265,13 @@ func dbAddProcessedTransactionAddrs(tx *bolt.Tx, pt modules.ProcessedTransaction addrs[input.RelatedAddress] = struct{}{} } for _, output := range pt.Outputs { + // miner fees don't have an address, so skip them + if output.FundType == types.SpecifierMinerFee { + continue + } addrs[output.RelatedAddress] = struct{}{} } for addr := range addrs { - if addr == (types.UnlockHash{}) { - // skip the void address; it's associated with too many - // transactions, which is problematic for large wallets - continue - } if err := dbAddAddrTransaction(tx, addr, txn); err != nil { return errors.AddContext(err, fmt.Sprintf("failed to add txn %v to address %v", pt.TransactionID, addr)) diff --git a/modules/wallet/update.go b/modules/wallet/update.go index 3e370ee489..9b7b6e93d6 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -383,8 +383,9 @@ func (w *Wallet) computeProcessedTransactionsFromBlock(tx *bolt.Tx, block types. for _, fee := range txn.MinerFees { pt.Outputs = append(pt.Outputs, modules.ProcessedOutput{ - FundType: types.SpecifierMinerFee, - Value: fee, + FundType: types.SpecifierMinerFee, + MaturityHeight: consensusHeight + types.MaturityDelay, + Value: fee, }) } pts = append(pts, pt) From a346582f30165e17abc38da6b479c20244fb1ea0 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Sat, 7 Apr 2018 20:40:33 -0400 Subject: [PATCH 029/212] update 'make cover' --- Makefile | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 75d865cbb3..34dd6b8701 100644 --- a/Makefile +++ b/Makefile @@ -86,7 +86,7 @@ release-race: # clean removes all directories that get automatically created during # development. clean: - rm -rf release doc/whitepaper.aux doc/whitepaper.log doc/whitepaper.pdf + rm -rf cover doc/whitepaper.aux doc/whitepaper.log doc/whitepaper.pdf release test: go test -short -tags='debug testing netgo' -timeout=5s $(pkgs) -run=$(run) @@ -103,13 +103,18 @@ test-mem: bench: clean fmt go test -tags='debug testing netgo' -timeout=500s -run=XXX -bench=$(run) $(pkgs) cover: clean + @mkdir -p cover/cmd @mkdir -p cover/modules @mkdir -p cover/modules/renter + @mkdir -p cover/modules/renter/hostdb @mkdir -p cover/modules/host - @for package in $(pkgs); do \ - go test -tags='testing debug' -timeout=500s -covermode=atomic -coverprofile=cover/$$package.out ./$$package -run=$(run) \ - && go tool cover -html=cover/$$package.out -o=cover/$$package.html \ - && rm cover/$$package.out ; \ + @mkdir -p cover/node + @mkdir -p cover/node/api + @mkdir -p cover/siatest + @for package in $(pkgs); do \ + go test -tags='testing debug netgo' -timeout=500s -covermode=atomic -coverprofile=cover/$$package.out ./$$package -run=$(run) \ + && go tool cover -html=cover/$$package.out -o=cover/$$package.html \ + && rm cover/$$package.out ; \ done # whitepaper builds the whitepaper from whitepaper.tex. pdflatex has to be From 0302669e18e1068f534fa2eddbac9fc2ce271235 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 9 Apr 2018 01:58:05 -0400 Subject: [PATCH 030/212] make directory in loop instead of all beforehand --- Makefile | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/Makefile b/Makefile index 34dd6b8701..002bf1b673 100644 --- a/Makefile +++ b/Makefile @@ -103,18 +103,12 @@ test-mem: bench: clean fmt go test -tags='debug testing netgo' -timeout=500s -run=XXX -bench=$(run) $(pkgs) cover: clean - @mkdir -p cover/cmd - @mkdir -p cover/modules - @mkdir -p cover/modules/renter - @mkdir -p cover/modules/renter/hostdb - @mkdir -p cover/modules/host - @mkdir -p cover/node - @mkdir -p cover/node/api - @mkdir -p cover/siatest - @for package in $(pkgs); do \ - go test -tags='testing debug netgo' -timeout=500s -covermode=atomic -coverprofile=cover/$$package.out ./$$package -run=$(run) \ - && go tool cover -html=cover/$$package.out -o=cover/$$package.html \ - && rm cover/$$package.out ; \ + @mkdir -p cover + @for package in $(pkgs); do \ + mkdir -p `dirname cover/$$package` \ + && go test -tags='testing debug netgo' -timeout=500s -covermode=atomic -coverprofile=cover/$$package.out ./$$package -run=$(run) \ + && go tool cover -html=cover/$$package.out -o=cover/$$package.html \ + && rm cover/$$package.out ; \ done # whitepaper builds the whitepaper from whitepaper.tex. pdflatex has to be From 6f5eccd49aff7fad048680f7b4b0d2e4b07425dc Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 15 Mar 2018 16:32:51 -0400 Subject: [PATCH 031/212] Implement io.ReadSeeker interface for downloader --- cmd/siad/main.go | 2 +- modules/renter.go | 4 + modules/renter/downloadchunk.go | 16 +++- modules/renter/downloadheap.go | 31 +++++++ modules/renter/renter.go | 5 ++ modules/renter/streamer.go | 146 ++++++++++++++++++++++++++++++++ node/api/renter.go | 14 +++ node/api/routes.go | 1 + siatest/renter/renter_test.go | 1 + 9 files changed, 218 insertions(+), 2 deletions(-) create mode 100644 modules/renter/streamer.go diff --git a/cmd/siad/main.go b/cmd/siad/main.go index 51fc037fda..cdba4518c4 100644 --- a/cmd/siad/main.go +++ b/cmd/siad/main.go @@ -157,7 +157,7 @@ func main() { }) // Set default values, which have the lowest priority. - root.Flags().StringVarP(&globalConfig.Siad.RequiredUserAgent, "agent", "", "Sia-Agent", "required substring for the user agent") + // root.Flags().StringVarP(&globalConfig.Siad.RequiredUserAgent, "agent", "", "Sia-Agent", "required substring for the user agent") root.Flags().StringVarP(&globalConfig.Siad.HostAddr, "host-addr", "", ":9982", "which port the host listens on") root.Flags().StringVarP(&globalConfig.Siad.ProfileDir, "profile-directory", "", "profiles", "location of the profiling directory") root.Flags().StringVarP(&globalConfig.Siad.APIaddr, "api-addr", "", "localhost:9980", "which host:port the API server listens on") diff --git a/modules/renter.go b/modules/renter.go index 87cbc2f07d..1506580677 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -343,6 +343,10 @@ type Renter interface { // ShareFilesAscii creates an ASCII-encoded '.sia' file. ShareFilesASCII(paths []string) (asciiSia string, err error) + // Streamer creates a io.ReadSeeker that can be used to stream downloads + // from the Sia network. + Streamer(siaPath string) (string, io.ReadSeeker, error) + // Upload uploads a file using the input parameters. Upload(FileUploadParams) error } diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index 4d6213cde4..c3f48fc9ad 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -211,10 +211,24 @@ func (udc *unfinishedDownloadChunk) threadedRecoverLogicalData() error { udc.physicalChunkData[i] = nil } + // Add the chunk to the cache. + // TODO this should only happen for the streaming endpoint. + recoveredData := recoverWriter.Bytes() + cmu.Lock() + cacheID := fmt.Sprintf("%v:%v", udc.download.staticSiaPath, udc.staticChunkIndex) + for key := range cache { + if len(cache) < 5 { + break + } + delete(cache, key) + } + cache[cacheID] = recoveredData + cmu.Unlock() + // Write the bytes to the requested output. start := udc.staticFetchOffset end := udc.staticFetchOffset + udc.staticFetchLength - _, err = udc.destination.WriteAt(recoverWriter.Bytes()[start:end], udc.staticWriteOffset) + _, err = udc.destination.WriteAt(recoveredData[start:end], udc.staticWriteOffset) if err != nil { udc.mu.Lock() udc.fail(err) diff --git a/modules/renter/downloadheap.go b/modules/renter/downloadheap.go index d23fefab77..d7f9846236 100644 --- a/modules/renter/downloadheap.go +++ b/modules/renter/downloadheap.go @@ -10,6 +10,8 @@ package renter import ( "container/heap" "errors" + "fmt" + "sync" "time" ) @@ -153,6 +155,9 @@ func (r *Renter) managedNextDownloadChunk() *unfinishedDownloadChunk { } } +var cache map[string][]byte +var cmu *sync.Mutex + // threadedDownloadLoop utilizes the worker pool to make progress on any queued // downloads. func (r *Renter) threadedDownloadLoop() { @@ -194,6 +199,32 @@ LOOP: // Break out of the inner loop and wait for more work. break } + + // Check if we got the chunk cached already. + // TODO this is not save. We need to figure out a way to do cache invalidation. + cacheID := fmt.Sprintf("%v:%v", nextChunk.download.staticSiaPath, nextChunk.staticChunkIndex) + cmu.Lock() + data, cached := cache[cacheID] + cmu.Unlock() + if cached { + start := nextChunk.staticFetchOffset + end := start + nextChunk.staticFetchLength + _, err := nextChunk.destination.WriteAt(data[start:end], nextChunk.staticWriteOffset) + if err != nil { + r.log.Println("WARN: failed to write cached chunk to destination") + } + + // Check if the download is complete now. + nextChunk.download.mu.Lock() + nextChunk.download.chunksRemaining-- + if nextChunk.download.chunksRemaining == 0 { + nextChunk.download.endTime = time.Now() + close(nextChunk.download.completeChan) + } + nextChunk.download.mu.Unlock() + continue + } + // Get the required memory to download this chunk. if !r.managedAcquireMemoryForDownloadChunk(nextChunk) { // The renter shut down before memory could be acquired. diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 08629870ff..2451661d72 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -432,6 +432,11 @@ func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.T } r.memoryManager = newMemoryManager(defaultMemory, r.tg.StopChan()) + // TODO those should not be global. Need a better way to cache streaming + // chunks. + cache = make(map[string][]byte) + cmu = new(sync.Mutex) + // Load all saved data. if err := r.initPersist(); err != nil { return nil, err diff --git a/modules/renter/streamer.go b/modules/renter/streamer.go new file mode 100644 index 0000000000..42a02960c2 --- /dev/null +++ b/modules/renter/streamer.go @@ -0,0 +1,146 @@ +package renter + +import ( + "bytes" + "fmt" + "io" + "math" + "time" + + "github.com/NebulousLabs/errors" +) + +type ( + // streamer is a io.ReadSeeker that can be used to stream downloads from + // the sia network. + streamer struct { + file *file + offset int64 + r *Renter + } +) + +// min is a helper function to find the minimum of multiple values. +func min(values ...uint64) uint64 { + min := uint64(math.MaxUint64) + for _, v := range values { + if v < min { + min = v + } + } + return min +} + +// Streamer create a io.ReadSeeker that can be used to stream downloads from +// the sia network. +func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { + // Lookup the file associated with the nickname. + lockID := r.mu.RLock() + file, exists := r.files[siaPath] + r.mu.RUnlock(lockID) + if !exists { + return "", nil, fmt.Errorf("no file with that path: %s", siaPath) + } + // Create the streamer + s := &streamer{ + file: file, + r: r, + } + return file.name, s, nil +} + +// Read implements the standard Read interface. It will download the requested +// data from the sia network and block until the download is complete. To +// prevent http.ServeContent from requesting too much data at once, Read can +// only request a single chunk at once. +func (s *streamer) Read(p []byte) (n int, err error) { + // Get the file's size + s.file.mu.RLock() + fileSize := int64(s.file.size) + s.file.mu.RUnlock() + + // Make sure we haven't reached the EOF yet. + if s.offset >= fileSize { + return 0, io.EOF + } + + // Calculate how much we can download. We never download more than a single chunk. + chunkSize := s.file.staticChunkSize() + remainingData := uint64(fileSize - s.offset) + requestedData := uint64(len(p)) + remainingChunk := chunkSize - uint64(s.offset)%chunkSize + length := min(chunkSize, remainingData, requestedData, remainingChunk) + + // Download data + buffer := bytes.NewBuffer([]byte{}) + d, err := s.r.newDownload(downloadParams{ + destination: newDownloadDestinationWriteCloserFromWriter(buffer), + destinationType: "http stream", + destinationString: "httpresponse", + file: s.file, + + latencyTarget: 25e3 * time.Millisecond, // TODO high default until full latency suport is added. + length: length, + needsMemory: true, + offset: uint64(s.offset), + overdrive: 3, // TODO: moderate default until full overdrive support is added. + priority: 10, // TODO: high default until full priority support is added. + }) + if err != nil { + return 0, errors.AddContext(err, "failed to create new download") + } + + // Add the download object to the download queue. + // TODO: Maybe this is not necessary for streams? + s.r.downloadHistoryMu.Lock() + s.r.downloadHistory = append(s.r.downloadHistory, d) + s.r.downloadHistoryMu.Unlock() + + // Block until the download has completed. + select { + case <-d.completeChan: + if d.Err() != nil { + return 0, errors.AddContext(d.Err(), "download failed") + } + case <-s.r.tg.StopChan(): + return 0, errors.New("download interrupted by shutdown") + } + + // Copy downloaded data into buffer. + copy(p, buffer.Bytes()) + + // Adjust offset + s.offset += int64(length) + return int(length), nil +} + +// Seek sets the offset for the next Read to offset, interpreted +// according to whence: SeekStart means relative to the start of the file, +// SeekCurrent means relative to the current offset, and SeekEnd means relative +// to the end. Seek returns the new offset relative to the start of the file +// and an error, if any. +func (s *streamer) Seek(offset int64, whence int) (int64, error) { + println("seek", offset, whence) + var newOffset int64 + switch whence { + case io.SeekStart: + println("start") + newOffset = 0 + case io.SeekCurrent: + println("current") + newOffset = s.offset + case io.SeekEnd: + println("end") + s.file.mu.RLock() + newOffset = int64(s.file.size) + s.file.mu.RUnlock() + } + newOffset += offset + + if newOffset < 0 { + return s.offset, errors.New("cannot seek to negative offset") + } + s.offset = newOffset + println("new offset", s.offset) + return s.offset, nil +} diff --git a/node/api/renter.go b/node/api/renter.go index 1da3ab1d43..719926ee66 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "log" "net/http" "path/filepath" "sort" @@ -539,6 +540,19 @@ func (api *API) renterShareASCIIHandler(w http.ResponseWriter, req *http.Request }) } +// renterStreamHandler handles downloads from the /renter/stream endpoint +func (api *API) renterStreamHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + siaPath := req.FormValue("siapath") + fileName, streamer, err := api.renter.Streamer(siaPath) + if err != nil { + WriteError(w, Error{fmt.Sprintf("failed to create download streamer: %v", err)}, + http.StatusInternalServerError) + return + } + log.Printf("Requesting %v", fileName) + http.ServeContent(w, req, fileName, time.Time{}, streamer) +} + // renterUploadHandler handles the API call to upload a file. func (api *API) renterUploadHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { source := req.FormValue("source") diff --git a/node/api/routes.go b/node/api/routes.go index f7cc5419b0..7c4fdea830 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -70,6 +70,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.POST("/renter", RequirePassword(api.renterHandlerPOST, requiredPassword)) router.GET("/renter/contracts", api.renterContractsHandler) router.GET("/renter/downloads", api.renterDownloadsHandler) + router.GET("/renter/stream", api.renterStreamHandler) router.GET("/renter/files", api.renterFilesHandler) router.GET("/renter/prices", api.renterPricesHandler) diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 8b068894e5..78ea529b6e 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -91,6 +91,7 @@ func testDownloadMultipleLargeSectors(t *testing.T, tg *siatest.TestGroup) { parallelDownloads := 10 // fileSize is the size of the downloaded file. fileSize := int(10*modules.SectorSize) + siatest.Fuzz() + // set download limits and reset them after test. // uniqueRemoteFiles is the number of files that will be uploaded to the // network. Downloads will choose the remote file to download randomly. uniqueRemoteFiles := 5 From 1e6b11697b2652e32743952ead75615ca7ef72bb Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 14:38:48 -0400 Subject: [PATCH 032/212] code cleanup --- modules/renter/consts.go | 8 +++++ modules/renter/download.go | 5 ++- modules/renter/downloadchunk.go | 39 +++++++++++++++------- modules/renter/downloadheap.go | 58 +++++++++++++++++++-------------- modules/renter/renter.go | 11 +++---- modules/renter/streamer.go | 2 +- 6 files changed, 78 insertions(+), 45 deletions(-) diff --git a/modules/renter/consts.go b/modules/renter/consts.go index 076307c600..87b7ca92f1 100644 --- a/modules/renter/consts.go +++ b/modules/renter/consts.go @@ -66,6 +66,14 @@ const ( // memoryPriorityHigh is used to request high priority memory memoryPriorityHigh = true + + // destinationTypeSeekStream is the destination type used for downloads + // from the /renter/stream endpoint. + destinationTypeSeekStream = "httpseekstream" + + // downloadCacheSize is the cache size of the /renter/stream cache in + // chunks. + downloadCacheSize = 2 ) var ( diff --git a/modules/renter/download.go b/modules/renter/download.go index ac04bc0b49..c8332d9337 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -309,6 +309,7 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { masterKey: params.file.masterKey, staticChunkIndex: i, + staticCacheID: fmt.Sprintf("%v:%v", d.staticSiaPath, i), staticChunkMap: chunkMaps[i-minChunk], staticChunkSize: params.file.staticChunkSize(), staticPieceSize: params.file.pieceSize, @@ -329,7 +330,9 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { physicalChunkData: make([][]byte, params.file.erasureCode.NumPieces()), pieceUsage: make([]bool, params.file.erasureCode.NumPieces()), - download: d, + download: d, + chunkCache: r.chunkCache, + cmu: r.cmu, } // Set the fetchOffset - the offset within the chunk that we start diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index c3f48fc9ad..bd58386584 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -43,6 +43,7 @@ type unfinishedDownloadChunk struct { // Fetch + Write instructions - read only or otherwise thread safe. staticChunkIndex uint64 // Required for deriving the encryption keys for each piece. + staticCacheID string // Used to uniquely identify a chunk in the chunk cache. staticChunkMap map[types.FileContractID]downloadPieceInfo // Maps from file contract ids to the info for the piece associated with that contract staticChunkSize uint64 staticFetchLength uint64 // Length within the logical chunk to fetch. @@ -72,6 +73,10 @@ type unfinishedDownloadChunk struct { // The download object, mostly to update download progress. download *download mu sync.Mutex + + // Caching related fields + chunkCache map[string][]byte + cmu *sync.Mutex } // fail will set the chunk status to failed. The physical chunk memory will be @@ -166,6 +171,24 @@ func (udc *unfinishedDownloadChunk) returnMemory() { } } +// addChunkToCache adds the chunk to the cache if the download is a streaming +// endpoint download. +func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { + // TODO is it safe to read the type without a lock? + if udc.download.destinationType == destinationTypeSeekStream { + udc.cmu.Lock() + // Prune cache if necessary. + for key := range udc.chunkCache { + if len(udc.chunkCache) < downloadCacheSize { + break + } + delete(udc.chunkCache, key) + } + udc.chunkCache[udc.staticCacheID] = data + udc.cmu.Unlock() + } +} + // threadedRecoverLogicalData will take all of the pieces that have been // downloaded and encode them into the logical data which is then written to the // underlying writer for the download. @@ -211,19 +234,11 @@ func (udc *unfinishedDownloadChunk) threadedRecoverLogicalData() error { udc.physicalChunkData[i] = nil } - // Add the chunk to the cache. - // TODO this should only happen for the streaming endpoint. + // Get recovered data recoveredData := recoverWriter.Bytes() - cmu.Lock() - cacheID := fmt.Sprintf("%v:%v", udc.download.staticSiaPath, udc.staticChunkIndex) - for key := range cache { - if len(cache) < 5 { - break - } - delete(cache, key) - } - cache[cacheID] = recoveredData - cmu.Unlock() + + // Add the chunk to the cache. + udc.addChunkToCache(recoveredData) // Write the bytes to the requested output. start := udc.staticFetchOffset diff --git a/modules/renter/downloadheap.go b/modules/renter/downloadheap.go index d7f9846236..88e6be18a9 100644 --- a/modules/renter/downloadheap.go +++ b/modules/renter/downloadheap.go @@ -10,8 +10,6 @@ package renter import ( "container/heap" "errors" - "fmt" - "sync" "time" ) @@ -155,8 +153,38 @@ func (r *Renter) managedNextDownloadChunk() *unfinishedDownloadChunk { } } -var cache map[string][]byte -var cmu *sync.Mutex +// managedTryCache tries to retrieve the chunk from the renter's cache. If +// successful it will write the data to the destination and stop the download +// if it was the last missing chunk. The function returns true if the chunk was +// in the cache. +// TODO in the future we might need cache invalidation. At the +// moment this doesn't worry us since our files are static. +func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { + udc.mu.Lock() + defer udc.mu.Unlock() + r.cmu.Lock() + data, cached := r.chunkCache[udc.staticCacheID] + r.cmu.Unlock() + if !cached { + return false + } + start := udc.staticFetchOffset + end := start + udc.staticFetchLength + _, err := udc.destination.WriteAt(data[start:end], udc.staticWriteOffset) + if err != nil { + r.log.Println("WARN: failed to write cached chunk to destination") + } + + // Check if the download is complete now. + udc.download.mu.Lock() + udc.download.chunksRemaining-- + if udc.download.chunksRemaining == 0 { + udc.download.endTime = time.Now() + close(udc.download.completeChan) + } + udc.download.mu.Unlock() + return true +} // threadedDownloadLoop utilizes the worker pool to make progress on any queued // downloads. @@ -201,27 +229,7 @@ LOOP: } // Check if we got the chunk cached already. - // TODO this is not save. We need to figure out a way to do cache invalidation. - cacheID := fmt.Sprintf("%v:%v", nextChunk.download.staticSiaPath, nextChunk.staticChunkIndex) - cmu.Lock() - data, cached := cache[cacheID] - cmu.Unlock() - if cached { - start := nextChunk.staticFetchOffset - end := start + nextChunk.staticFetchLength - _, err := nextChunk.destination.WriteAt(data[start:end], nextChunk.staticWriteOffset) - if err != nil { - r.log.Println("WARN: failed to write cached chunk to destination") - } - - // Check if the download is complete now. - nextChunk.download.mu.Lock() - nextChunk.download.chunksRemaining-- - if nextChunk.download.chunksRemaining == 0 { - nextChunk.download.endTime = time.Now() - close(nextChunk.download.completeChan) - } - nextChunk.download.mu.Unlock() + if r.managedTryCache(nextChunk) { continue } diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 2451661d72..9e18e9a3de 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -189,6 +189,8 @@ type Renter struct { lastEstimation modules.RenterPriceEstimation // Utilities. + chunkCache map[string][]byte + cmu *sync.Mutex cs modules.ConsensusSet g modules.Gateway hostContractor hostContractor @@ -421,22 +423,19 @@ func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.T workerPool: make(map[types.FileContractID]*worker), + chunkCache: make(map[string][]byte), + cmu: new(sync.Mutex), cs: cs, + deps: deps, g: g, hostDB: hdb, hostContractor: hc, persistDir: persistDir, mu: siasync.New(modules.SafeMutexDelay, 1), tpool: tpool, - deps: deps, } r.memoryManager = newMemoryManager(defaultMemory, r.tg.StopChan()) - // TODO those should not be global. Need a better way to cache streaming - // chunks. - cache = make(map[string][]byte) - cmu = new(sync.Mutex) - // Load all saved data. if err := r.initPersist(); err != nil { return nil, err diff --git a/modules/renter/streamer.go b/modules/renter/streamer.go index 42a02960c2..d9345072e8 100644 --- a/modules/renter/streamer.go +++ b/modules/renter/streamer.go @@ -75,7 +75,7 @@ func (s *streamer) Read(p []byte) (n int, err error) { buffer := bytes.NewBuffer([]byte{}) d, err := s.r.newDownload(downloadParams{ destination: newDownloadDestinationWriteCloserFromWriter(buffer), - destinationType: "http stream", + destinationType: destinationTypeSeekStream, destinationString: "httpresponse", file: s.file, From 24735bbd903f8a176700b9556b1b7e9667e1109c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 15:24:12 -0400 Subject: [PATCH 033/212] set destination type in newDownload and remove debug outputs --- modules/renter/download.go | 1 + modules/renter/streamer.go | 5 ----- node/api/renter.go | 2 -- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index c8332d9337..402a248eb4 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -258,6 +258,7 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { destination: params.destination, destinationString: params.destinationString, + destinationType: params.destinationType, staticLatencyTarget: params.latencyTarget, staticLength: params.length, staticOffset: params.offset, diff --git a/modules/renter/streamer.go b/modules/renter/streamer.go index d9345072e8..09974ae786 100644 --- a/modules/renter/streamer.go +++ b/modules/renter/streamer.go @@ -120,17 +120,13 @@ func (s *streamer) Read(p []byte) (n int, err error) { // to the end. Seek returns the new offset relative to the start of the file // and an error, if any. func (s *streamer) Seek(offset int64, whence int) (int64, error) { - println("seek", offset, whence) var newOffset int64 switch whence { case io.SeekStart: - println("start") newOffset = 0 case io.SeekCurrent: - println("current") newOffset = s.offset case io.SeekEnd: - println("end") s.file.mu.RLock() newOffset = int64(s.file.size) s.file.mu.RUnlock() @@ -141,6 +137,5 @@ func (s *streamer) Seek(offset int64, whence int) (int64, error) { return s.offset, errors.New("cannot seek to negative offset") } s.offset = newOffset - println("new offset", s.offset) return s.offset, nil } diff --git a/node/api/renter.go b/node/api/renter.go index 719926ee66..acbf44bf97 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -2,7 +2,6 @@ package api import ( "fmt" - "log" "net/http" "path/filepath" "sort" @@ -549,7 +548,6 @@ func (api *API) renterStreamHandler(w http.ResponseWriter, req *http.Request, ps http.StatusInternalServerError) return } - log.Printf("Requesting %v", fileName) http.ServeContent(w, req, fileName, time.Time{}, streamer) } From 3cb2218aa3f899d7a494454ec5da66486bbe4aee Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 16:45:15 -0400 Subject: [PATCH 034/212] add to test --- node/api/client/renter.go | 7 +++++++ node/api/renter.go | 2 +- node/api/routes.go | 2 +- siatest/renter.go | 9 +++++++++ siatest/renter/renter_test.go | 5 +++++ 5 files changed, 23 insertions(+), 2 deletions(-) diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 276a5ce735..a9c4edba1e 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -71,6 +71,13 @@ func (c *Client) RenterPostRateLimit(readBPS, writeBPS int64) (err error) { return } +// RenterStreamGet uses the /renter/stream endpoint to download data as a +// stream. +func (c *Client) RenterStreamGet(siaPath string) (resp []byte, err error) { + resp, err = c.getRawResponse("/renter/stream/" + siaPath) + return +} + // RenterUploadPost uses the /renter/upload endpoin to upload a file func (c *Client) RenterUploadPost(path, siaPath string, dataPieces, parityPieces uint64) (err error) { values := url.Values{} diff --git a/node/api/renter.go b/node/api/renter.go index acbf44bf97..c3b60715ca 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -541,7 +541,7 @@ func (api *API) renterShareASCIIHandler(w http.ResponseWriter, req *http.Request // renterStreamHandler handles downloads from the /renter/stream endpoint func (api *API) renterStreamHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - siaPath := req.FormValue("siapath") + siaPath := strings.TrimPrefix(ps.ByName("siapath"), "/") fileName, streamer, err := api.renter.Streamer(siaPath) if err != nil { WriteError(w, Error{fmt.Sprintf("failed to create download streamer: %v", err)}, diff --git a/node/api/routes.go b/node/api/routes.go index 7c4fdea830..ec703684e7 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -70,7 +70,6 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.POST("/renter", RequirePassword(api.renterHandlerPOST, requiredPassword)) router.GET("/renter/contracts", api.renterContractsHandler) router.GET("/renter/downloads", api.renterDownloadsHandler) - router.GET("/renter/stream", api.renterStreamHandler) router.GET("/renter/files", api.renterFilesHandler) router.GET("/renter/prices", api.renterPricesHandler) @@ -85,6 +84,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) + router.GET("/renter/stream/*siapath", api.renterStreamHandler) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. diff --git a/siatest/renter.go b/siatest/renter.go index a365fd4145..2c15dfef50 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -58,6 +58,15 @@ func (tn *TestNode) DownloadByStream(rf *RemoteFile) (data []byte, err error) { return } +// Stream uses the streaming endpoint to download a file. +func (tn *TestNode) Stream(rf *RemoteFile) (data []byte, err error) { + data, err = tn.RenterStreamGet(rf.siaPath) + if err == nil && rf.checksum != crypto.HashAll(data) { + err = errors.New("downloaded bytes don't match requested data") + } + return +} + // DownloadInfo returns the DownloadInfo struct of a file. If it returns nil, // the download has either finished, or was never started in the first place. // If the corresponding download info was found, DownloadInfo also performs a diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 78ea529b6e..e64dcc1ac5 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -82,6 +82,11 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { if err := renter.WaitForDownload(localFile, remoteFile); err != nil { t.Error(err) } + // Stream the file. + _, err = renter.Stream(remoteFile) + if err != nil { + t.Fatal(err) + } } // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors) From c2b227b4a353c00121c0981cce85d3a7aa72b8d8 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 17:21:05 -0400 Subject: [PATCH 035/212] whitelist streaming endpoint --- cmd/siad/main.go | 2 +- node/api/routes.go | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/cmd/siad/main.go b/cmd/siad/main.go index cdba4518c4..51fc037fda 100644 --- a/cmd/siad/main.go +++ b/cmd/siad/main.go @@ -157,7 +157,7 @@ func main() { }) // Set default values, which have the lowest priority. - // root.Flags().StringVarP(&globalConfig.Siad.RequiredUserAgent, "agent", "", "Sia-Agent", "required substring for the user agent") + root.Flags().StringVarP(&globalConfig.Siad.RequiredUserAgent, "agent", "", "Sia-Agent", "required substring for the user agent") root.Flags().StringVarP(&globalConfig.Siad.HostAddr, "host-addr", "", ":9982", "which port the host listens on") root.Flags().StringVarP(&globalConfig.Siad.ProfileDir, "profile-directory", "", "profiles", "location of the profiling directory") root.Flags().StringVarP(&globalConfig.Siad.APIaddr, "api-addr", "", "localhost:9980", "which host:port the API server listens on") diff --git a/node/api/routes.go b/node/api/routes.go index ec703684e7..41bb3549f7 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -168,7 +168,7 @@ func cleanCloseHandler(next http.Handler) http.Handler { // UserAgent that contains the specified string. func RequireUserAgent(h http.Handler, ua string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if !strings.Contains(req.UserAgent(), ua) { + if !strings.Contains(req.UserAgent(), ua) && !whitelisted(req) { WriteError(w, Error{"Browser access disabled due to security vulnerability. Use Sia-UI or siac."}, http.StatusBadRequest) return } @@ -194,3 +194,12 @@ func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { h(w, req, ps) } } + +// whitelisted allows certain requests to be whitelisted even if they don't +// have a matching User-Agent. +func whitelisted(req *http.Request) bool { + if strings.HasPrefix(req.URL.Path, "/renter/stream") { + return true + } + return false +} From 3d3a25f88c1f31dcb2d8a16803d997af0bd4b865 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 17:38:10 -0400 Subject: [PATCH 036/212] add documentation --- doc/API.md | 15 +++++++++++++++ doc/api/Renter.md | 15 +++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/doc/API.md b/doc/API.md index ca25716b6a..cf028ba779 100644 --- a/doc/API.md +++ b/doc/API.md @@ -724,6 +724,7 @@ Renter | [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | | [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | | [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | +| [/renter/stream/*___siapath___](#renterstreamsiapath-get) | GET | | [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | For examples and detailed descriptions of request and response parameters, @@ -943,6 +944,20 @@ newsiapath standard success or error response. See [#standard-responses](#standard-responses). +#### /renter/stream/*___siapath___ [GET] + +downloads a file using http streaming. This call blocks until the data is +received. + +###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-1) +``` +*siapath +``` + +###### Response +standard success with the requested data in the body or error response. See +[#standard-responses](#standard-responses). + #### /renter/upload/*___siapath___ [POST] uploads a file to the network from the local filesystem. diff --git a/doc/api/Renter.md b/doc/api/Renter.md index 351666198c..662b4a04c5 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -31,6 +31,7 @@ Index | [/renter/download/___*siapath___](#renterdownload__siapath___-get) | GET | | [/renter/downloadasync/___*siapath___](#renterdownloadasync__siapath___-get) | GET | | [/renter/rename/___*siapath___](#renterrename___siapath___-post) | POST | +| [/renter/stream/___*siapath___](#renterstreamsiapath-get) | GET | | [/renter/upload/___*siapath___](#renterupload___siapath___-post) | POST | #### /renter [GET] @@ -411,6 +412,20 @@ newsiapath standard success or error response. See [API.md#standard-responses](/doc/API.md#standard-responses). +#### /renter/stream/*___siapath___ [GET] + +downloads a file using http streaming. This call blocks until the data is +received. + +###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-1) +``` +*siapath +``` + +###### Response +standard success with the requested data in the body or error response. See +[#standard-responses](#standard-responses). + #### /renter/upload/___*siapath___ [POST] starts a file upload to the Sia network from the local filesystem. From 1a2239cd00b0be4c01afa5a6d20341b75df5f8c2 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 30 Mar 2018 13:31:13 -0400 Subject: [PATCH 037/212] rename fields and move cache --- modules/renter/download.go | 34 ++++++++++----------- modules/renter/downloadcache.go | 53 +++++++++++++++++++++++++++++++++ modules/renter/downloadchunk.go | 20 +------------ modules/renter/downloadheap.go | 33 -------------------- 4 files changed, 71 insertions(+), 69 deletions(-) create mode 100644 modules/renter/downloadcache.go diff --git a/modules/renter/download.go b/modules/renter/download.go index 402a248eb4..922a02aefd 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -155,12 +155,12 @@ type ( staticStartTime time.Time // Set immediately when the download object is created. // Basic information about the file. - destination downloadDestination - destinationString string // The string reported to the user to indicate the download's destination. - destinationType string // "memory buffer", "http stream", "file", etc. - staticLength uint64 // Length to download starting from the offset. - staticOffset uint64 // Offset within the file to start the download. - staticSiaPath string // The path of the siafile at the time the download started. + destination downloadDestination + destinationString string // The string reported to the user to indicate the download's destination. + staticDestinationType string // "memory buffer", "http stream", "file", etc. + staticLength uint64 // Length to download starting from the offset. + staticOffset uint64 // Offset within the file to start the download. + staticSiaPath string // The path of the siafile at the time the download started. // Retrieval settings for the file. staticLatencyTarget time.Duration // In milliseconds. Lower latency results in lower total system throughput. @@ -256,15 +256,15 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { staticStartTime: time.Now(), - destination: params.destination, - destinationString: params.destinationString, - destinationType: params.destinationType, - staticLatencyTarget: params.latencyTarget, - staticLength: params.length, - staticOffset: params.offset, - staticOverdrive: params.overdrive, - staticSiaPath: params.file.name, - staticPriority: params.priority, + destination: params.destination, + destinationString: params.destinationString, + staticDestinationType: params.destinationType, + staticLatencyTarget: params.latencyTarget, + staticLength: params.length, + staticOffset: params.offset, + staticOverdrive: params.overdrive, + staticSiaPath: params.file.name, + staticPriority: params.priority, log: r.log, memoryManager: r.memoryManager, @@ -333,7 +333,7 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { download: d, chunkCache: r.chunkCache, - cmu: r.cmu, + cacheMu: r.cmu, } // Set the fetchOffset - the offset within the chunk that we start @@ -475,7 +475,7 @@ func (r *Renter) DownloadHistory() []modules.DownloadInfo { d.mu.Lock() // Lock required for d.endTime only. downloads[i] = modules.DownloadInfo{ Destination: d.destinationString, - DestinationType: d.destinationType, + DestinationType: d.staticDestinationType, Length: d.staticLength, Offset: d.staticOffset, SiaPath: d.staticSiaPath, diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go new file mode 100644 index 0000000000..3a2b108cc3 --- /dev/null +++ b/modules/renter/downloadcache.go @@ -0,0 +1,53 @@ +package renter + +import "time" + +// addChunkToCache adds the chunk to the cache if the download is a streaming +// endpoint download. +func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { + if udc.download.staticDestinationType == destinationTypeSeekStream { + udc.cacheMu.Lock() + // Prune cache if necessary. + for key := range udc.chunkCache { + if len(udc.chunkCache) < downloadCacheSize { + break + } + delete(udc.chunkCache, key) + } + udc.chunkCache[udc.staticCacheID] = data + udc.cacheMu.Unlock() + } +} + +// managedTryCache tries to retrieve the chunk from the renter's cache. If +// successful it will write the data to the destination and stop the download +// if it was the last missing chunk. The function returns true if the chunk was +// in the cache. +// TODO in the future we might need cache invalidation. At the +// moment this doesn't worry us since our files are static. +func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { + udc.mu.Lock() + defer udc.mu.Unlock() + r.cmu.Lock() + data, cached := r.chunkCache[udc.staticCacheID] + r.cmu.Unlock() + if !cached { + return false + } + start := udc.staticFetchOffset + end := start + udc.staticFetchLength + _, err := udc.destination.WriteAt(data[start:end], udc.staticWriteOffset) + if err != nil { + r.log.Println("WARN: failed to write cached chunk to destination") + } + + // Check if the download is complete now. + udc.download.mu.Lock() + udc.download.chunksRemaining-- + if udc.download.chunksRemaining == 0 { + udc.download.endTime = time.Now() + close(udc.download.completeChan) + } + udc.download.mu.Unlock() + return true +} diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index bd58386584..69962b3215 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -76,7 +76,7 @@ type unfinishedDownloadChunk struct { // Caching related fields chunkCache map[string][]byte - cmu *sync.Mutex + cacheMu *sync.Mutex } // fail will set the chunk status to failed. The physical chunk memory will be @@ -171,24 +171,6 @@ func (udc *unfinishedDownloadChunk) returnMemory() { } } -// addChunkToCache adds the chunk to the cache if the download is a streaming -// endpoint download. -func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { - // TODO is it safe to read the type without a lock? - if udc.download.destinationType == destinationTypeSeekStream { - udc.cmu.Lock() - // Prune cache if necessary. - for key := range udc.chunkCache { - if len(udc.chunkCache) < downloadCacheSize { - break - } - delete(udc.chunkCache, key) - } - udc.chunkCache[udc.staticCacheID] = data - udc.cmu.Unlock() - } -} - // threadedRecoverLogicalData will take all of the pieces that have been // downloaded and encode them into the logical data which is then written to the // underlying writer for the download. diff --git a/modules/renter/downloadheap.go b/modules/renter/downloadheap.go index 88e6be18a9..6777b4d778 100644 --- a/modules/renter/downloadheap.go +++ b/modules/renter/downloadheap.go @@ -153,39 +153,6 @@ func (r *Renter) managedNextDownloadChunk() *unfinishedDownloadChunk { } } -// managedTryCache tries to retrieve the chunk from the renter's cache. If -// successful it will write the data to the destination and stop the download -// if it was the last missing chunk. The function returns true if the chunk was -// in the cache. -// TODO in the future we might need cache invalidation. At the -// moment this doesn't worry us since our files are static. -func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { - udc.mu.Lock() - defer udc.mu.Unlock() - r.cmu.Lock() - data, cached := r.chunkCache[udc.staticCacheID] - r.cmu.Unlock() - if !cached { - return false - } - start := udc.staticFetchOffset - end := start + udc.staticFetchLength - _, err := udc.destination.WriteAt(data[start:end], udc.staticWriteOffset) - if err != nil { - r.log.Println("WARN: failed to write cached chunk to destination") - } - - // Check if the download is complete now. - udc.download.mu.Lock() - udc.download.chunksRemaining-- - if udc.download.chunksRemaining == 0 { - udc.download.endTime = time.Now() - close(udc.download.completeChan) - } - udc.download.mu.Unlock() - return true -} - // threadedDownloadLoop utilizes the worker pool to make progress on any queued // downloads. func (r *Renter) threadedDownloadLoop() { From 647b84fd329363a92a573ee0b3170d9fc393df35 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 9 Apr 2018 14:19:03 -0400 Subject: [PATCH 038/212] implement review comments --- doc/API.md | 6 ++++ doc/api/Renter.md | 6 ++++ modules/renter.go | 3 +- modules/renter/downloadcache.go | 36 +++++++++++++------ .../{streamer.go => downloadstreamer.go} | 18 ++++------ modules/renter/renter.go | 2 +- node/api/client/client.go | 33 +++++++++++++++++ node/api/client/renter.go | 7 ++++ node/api/routes.go | 28 ++++++++++----- siatest/localfile.go | 13 +++++-- siatest/renter.go | 27 ++++++++++++-- siatest/renter/renter_test.go | 10 ++++-- 12 files changed, 149 insertions(+), 40 deletions(-) rename modules/renter/{streamer.go => downloadstreamer.go} (83%) diff --git a/doc/API.md b/doc/API.md index cf028ba779..547b0ecf5d 100644 --- a/doc/API.md +++ b/doc/API.md @@ -948,6 +948,12 @@ standard success or error response. See downloads a file using http streaming. This call blocks until the data is received. +The streaming endpoint also uses caching internally to prevent siad from +redownloading the same chunk multiple times when only parts of a file are +requested at once. This might lead to a substantial increase in ram usage and +therefore it is not recommended to stream multiple files in parallel at the +moment. This restriction will be removed together with the caching once partial +downloads are supported in the future. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-1) ``` diff --git a/doc/api/Renter.md b/doc/api/Renter.md index 662b4a04c5..d30a64b7ff 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -416,6 +416,12 @@ standard success or error response. See downloads a file using http streaming. This call blocks until the data is received. +The streaming endpoint also uses caching internally to prevent siad from +redownloading the same chunk multiple times when only parts of a file are +requested at once. This might lead to a substantial increase in ram usage and +therefore it is not recommended to stream multiple files in parallel at the +moment. This restriction will be removed together with the caching once partial +downloads are supported in the future. ###### Path Parameters [(with comments)](/doc/api/Renter.md#path-parameters-1) ``` diff --git a/modules/renter.go b/modules/renter.go index 1506580677..9e7934c75b 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -344,7 +344,8 @@ type Renter interface { ShareFilesASCII(paths []string) (asciiSia string, err error) // Streamer creates a io.ReadSeeker that can be used to stream downloads - // from the Sia network. + // from the Sia network and also returns the fileName of the streamed + // resource. Streamer(siaPath string) (string, io.ReadSeeker, error) // Upload uploads a file using the input parameters. diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 3a2b108cc3..4b2bdc5bc7 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -1,22 +1,34 @@ package renter -import "time" +// TODO expose the downloadCacheSize as a variable and allow users to set it +// via the API. + +import ( + "time" + + "github.com/NebulousLabs/errors" +) // addChunkToCache adds the chunk to the cache if the download is a streaming // endpoint download. +// TODO this won't be necessary anymore once we have partial downloads. func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { if udc.download.staticDestinationType == destinationTypeSeekStream { - udc.cacheMu.Lock() - // Prune cache if necessary. - for key := range udc.chunkCache { - if len(udc.chunkCache) < downloadCacheSize { - break - } - delete(udc.chunkCache, key) + // We only cache streaming chunks since browsers and media players tend to only request a few kib at once when streaming data. That way we can prevent scheduling the same chunk for download over and over. + return + } + udc.cacheMu.Lock() + // Prune cache if necessary. + // TODO insteado of deleting a 'random' key, delete the + // least-recently-accessed element of the cache. + for key := range udc.chunkCache { + if len(udc.chunkCache) < downloadCacheSize { + break } - udc.chunkCache[udc.staticCacheID] = data - udc.cacheMu.Unlock() + delete(udc.chunkCache, key) } + udc.chunkCache[udc.staticCacheID] = data + udc.cacheMu.Unlock() } // managedTryCache tries to retrieve the chunk from the renter's cache. If @@ -38,7 +50,9 @@ func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { end := start + udc.staticFetchLength _, err := udc.destination.WriteAt(data[start:end], udc.staticWriteOffset) if err != nil { - r.log.Println("WARN: failed to write cached chunk to destination") + r.log.Println("WARN: failed to write cached chunk to destination:", err) + udc.fail(errors.AddContext(err, "failed to write cached chunk to destination")) + return true } // Check if the download is complete now. diff --git a/modules/renter/streamer.go b/modules/renter/downloadstreamer.go similarity index 83% rename from modules/renter/streamer.go rename to modules/renter/downloadstreamer.go index 09974ae786..9789ea5932 100644 --- a/modules/renter/streamer.go +++ b/modules/renter/downloadstreamer.go @@ -31,14 +31,14 @@ func min(values ...uint64) uint64 { return min } -// Streamer create a io.ReadSeeker that can be used to stream downloads from +// Streamer creates an io.ReadSeeker that can be used to stream downloads from // the sia network. func (r *Renter) Streamer(siaPath string) (string, io.ReadSeeker, error) { // Lookup the file associated with the nickname. lockID := r.mu.RLock() file, exists := r.files[siaPath] r.mu.RUnlock(lockID) - if !exists { + if !exists || file.deleted { return "", nil, fmt.Errorf("no file with that path: %s", siaPath) } // Create the streamer @@ -69,7 +69,7 @@ func (s *streamer) Read(p []byte) (n int, err error) { remainingData := uint64(fileSize - s.offset) requestedData := uint64(len(p)) remainingChunk := chunkSize - uint64(s.offset)%chunkSize - length := min(chunkSize, remainingData, requestedData, remainingChunk) + length := min(remainingData, requestedData, remainingChunk) // Download data buffer := bytes.NewBuffer([]byte{}) @@ -79,23 +79,17 @@ func (s *streamer) Read(p []byte) (n int, err error) { destinationString: "httpresponse", file: s.file, - latencyTarget: 25e3 * time.Millisecond, // TODO high default until full latency suport is added. + latencyTarget: 50 * time.Millisecond, // TODO low default until full latency suport is added. length: length, needsMemory: true, offset: uint64(s.offset), - overdrive: 3, // TODO: moderate default until full overdrive support is added. - priority: 10, // TODO: high default until full priority support is added. + overdrive: 5, // TODO: high default until full overdrive support is added. + priority: 1000, // TODO: high default until full priority support is added. }) if err != nil { return 0, errors.AddContext(err, "failed to create new download") } - // Add the download object to the download queue. - // TODO: Maybe this is not necessary for streams? - s.r.downloadHistoryMu.Lock() - s.r.downloadHistory = append(s.r.downloadHistory, d) - s.r.downloadHistoryMu.Unlock() - // Block until the download has completed. select { case <-d.completeChan: diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 9e18e9a3de..70fc9f087b 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -192,6 +192,7 @@ type Renter struct { chunkCache map[string][]byte cmu *sync.Mutex cs modules.ConsensusSet + deps modules.Dependencies g modules.Gateway hostContractor hostContractor hostDB hostDB @@ -200,7 +201,6 @@ type Renter struct { mu *siasync.RWMutex tg threadgroup.ThreadGroup tpool modules.TransactionPool - deps modules.Dependencies } // Close closes the Renter and its dependencies diff --git a/node/api/client/client.go b/node/api/client/client.go index 98e7dbf809..d3da6b05d6 100644 --- a/node/api/client/client.go +++ b/node/api/client/client.go @@ -3,6 +3,7 @@ package client import ( "bytes" "encoding/json" + "fmt" "io" "io/ioutil" "net/http" @@ -98,6 +99,38 @@ func (c *Client) getRawResponse(resource string) ([]byte, error) { return ioutil.ReadAll(res.Body) } +// getRawResponse requests part of the specified resource. The response, if +// provided, will be returned in a byte slice +func (c *Client) getRawPartialResponse(resource string, from, to uint64) ([]byte, error) { + req, err := c.NewRequest("GET", resource, nil) + if err != nil { + return nil, err + } + req.Header.Add("Range", fmt.Sprintf("bytes=%d-%d", from, to)) + + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, errors.AddContext(err, "request failed") + } + defer drainAndClose(res.Body) + + if res.StatusCode == http.StatusNotFound { + return nil, errors.New("API call not recognized: " + resource) + } + + // If the status code is not 2xx, decode and return the accompanying + // api.Error. + if res.StatusCode < 200 || res.StatusCode > 299 { + return nil, readAPIError(res.Body) + } + + if res.StatusCode == http.StatusNoContent { + // no reason to read the response + return []byte{}, nil + } + return ioutil.ReadAll(res.Body) +} + // get requests the specified resource. The response, if provided, will be // decoded into obj. The resource path must begin with /. func (c *Client) get(resource string, obj interface{}) error { diff --git a/node/api/client/renter.go b/node/api/client/renter.go index a9c4edba1e..84e573688a 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -78,6 +78,13 @@ func (c *Client) RenterStreamGet(siaPath string) (resp []byte, err error) { return } +// RenterStreamPartialGet uses the /renter/stream endpoint to download a part +// of data as a stream. +func (c *Client) RenterStreamPartialGet(siaPath string, start, end uint64) (resp []byte, err error) { + resp, err = c.getRawPartialResponse("/renter/stream/"+siaPath, start, end) + return +} + // RenterUploadPost uses the /renter/upload endpoin to upload a file func (c *Client) RenterUploadPost(path, siaPath string, dataPieces, parityPieces uint64) (err error) { values := url.Values{} diff --git a/node/api/routes.go b/node/api/routes.go index 41bb3549f7..ab2d5685dc 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -1,6 +1,7 @@ package api import ( + "context" "net/http" "strings" "time" @@ -9,6 +10,10 @@ import ( "github.com/julienschmidt/httprouter" ) +// unrestrictedContextKey is a context key that is set to allow a route to be +// called without the Sia-Agent being set. +type unrestrictedContextKey struct{} + // buildHttpRoutes sets up and returns an * httprouter.Router. // it connected the Router to the given api using the required // parameters: requiredUserAgent and requiredPassword @@ -84,7 +89,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) - router.GET("/renter/stream/*siapath", api.renterStreamHandler) + router.GET("/renter/stream/*siapath", Unrestricted(api.renterStreamHandler)) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. @@ -168,7 +173,7 @@ func cleanCloseHandler(next http.Handler) http.Handler { // UserAgent that contains the specified string. func RequireUserAgent(h http.Handler, ua string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - if !strings.Contains(req.UserAgent(), ua) && !whitelisted(req) { + if !strings.Contains(req.UserAgent(), ua) && !isUnrestricted(req) { WriteError(w, Error{"Browser access disabled due to security vulnerability. Use Sia-UI or siac."}, http.StatusBadRequest) return } @@ -195,11 +200,16 @@ func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { } } -// whitelisted allows certain requests to be whitelisted even if they don't -// have a matching User-Agent. -func whitelisted(req *http.Request) bool { - if strings.HasPrefix(req.URL.Path, "/renter/stream") { - return true - } - return false +// Unrestricted can be used to whitelist api routes from requiring the +// Sia-Agent to be set. +func Unrestricted(h httprouter.Handle) httprouter.Handle { + return httprouter.Handle(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + req = req.WithContext(context.WithValue(req.Context(), unrestrictedContextKey{}, 0)) + h(w, req, ps) + }) +} + +// isUnrestricted checks if a context has the unrestrictedContextKey set. +func isUnrestricted(req *http.Request) bool { + return req.Context().Value(unrestrictedContextKey{}) != nil } diff --git a/siatest/localfile.go b/siatest/localfile.go index 7036db673e..ab81544928 100644 --- a/siatest/localfile.go +++ b/siatest/localfile.go @@ -30,7 +30,7 @@ func NewFile(size int) (*LocalFile, error) { err := ioutil.WriteFile(path, bytes, 0600) return &LocalFile{ path: path, - checksum: crypto.HashObject(bytes), + checksum: crypto.HashBytes(bytes), }, err } @@ -46,7 +46,7 @@ func (lf *LocalFile) checkIntegrity() error { if err != nil { return errors.AddContext(err, "failed to read file from disk") } - if crypto.HashAll(data) != lf.checksum { + if crypto.HashBytes(data) != lf.checksum { return errors.New("checksums don't match") } return nil @@ -56,3 +56,12 @@ func (lf *LocalFile) checkIntegrity() error { func (lf *LocalFile) fileName() string { return filepath.Base(lf.path) } + +// partialChecksum returns the checksum of a part of the file. +func (lf *LocalFile) partialChecksum(from, to uint64) (crypto.Hash, error) { + data, err := ioutil.ReadFile(lf.path) + if err != nil { + return crypto.Hash{}, errors.AddContext(err, "failed to read file from disk") + } + return crypto.HashBytes(data[from:to]), nil +} diff --git a/siatest/renter.go b/siatest/renter.go index 2c15dfef50..4388a1e1b5 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -52,7 +52,7 @@ func (tn *TestNode) DownloadByStream(rf *RemoteFile) (data []byte, err error) { return nil, errors.AddContext(err, "failed to retrieve FileInfo") } data, err = tn.RenterDownloadHTTPResponseGet(rf.siaPath, 0, fi.Filesize) - if err == nil && rf.checksum != crypto.HashAll(data) { + if err == nil && rf.checksum != crypto.HashBytes(data) { err = errors.New("downloaded bytes don't match requested data") } return @@ -61,12 +61,35 @@ func (tn *TestNode) DownloadByStream(rf *RemoteFile) (data []byte, err error) { // Stream uses the streaming endpoint to download a file. func (tn *TestNode) Stream(rf *RemoteFile) (data []byte, err error) { data, err = tn.RenterStreamGet(rf.siaPath) - if err == nil && rf.checksum != crypto.HashAll(data) { + if err == nil && rf.checksum != crypto.HashBytes(data) { err = errors.New("downloaded bytes don't match requested data") } return } +// StreamPartial uses the streaming endpoint to download a partial file. A +// local file can be provided optionally to implicitly check the checksum of +// the downloaded data. +func (tn *TestNode) StreamPartial(rf *RemoteFile, lf *LocalFile, from, to uint64) (data []byte, err error) { + data, err = tn.RenterStreamPartialGet(rf.siaPath, from, to) + if err != nil { + return + } + if lf != nil { + var checksum crypto.Hash + checksum, err = lf.partialChecksum(from, to+1) + if err != nil { + err = errors.AddContext(err, "failed to get partial checksum") + return + } + if checksum != crypto.HashBytes(data) { + err = errors.New("downloaded bytes don't match requested data") + return + } + } + return +} + // DownloadInfo returns the DownloadInfo struct of a file. If it returns nil, // the download has either finished, or was never started in the first place. // If the corresponding download info was found, DownloadInfo also performs a diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index e64dcc1ac5..9b1e1e3b10 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -60,7 +60,8 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { // Upload file, creating a piece for each host in the group dataPieces := uint64(1) parityPieces := uint64(len(tg.Hosts())) - dataPieces - _, remoteFile, err := renter.UploadNewFileBlocking(100+siatest.Fuzz(), dataPieces, parityPieces) + fileSize := 100 + siatest.Fuzz() + localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) if err != nil { t.Fatal("Failed to upload a file for testing: ", err) } @@ -75,7 +76,7 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { t.Fatal(err) } // Download the file asynchronously and wait for the download to finish. - localFile, err := renter.DownloadToDisk(remoteFile, true) + localFile, err = renter.DownloadToDisk(remoteFile, true) if err != nil { t.Error(err) } @@ -87,6 +88,11 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { if err != nil { t.Fatal(err) } + // Stream the file partially. + _, err = renter.StreamPartial(remoteFile, localFile, 0, uint64(fileSize/2)) + if err != nil { + t.Fatal(err) + } } // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors) From 17f74613104a932fb6122dcfc105958f7ec97f2d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 9 Apr 2018 16:27:11 -0400 Subject: [PATCH 039/212] Add UpdateUtility method to SafeContract and add Utility field to contract header --- modules/renter/proto/contract.go | 45 +++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 4 deletions(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 16d82aee08..cf90db94a0 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -45,14 +45,15 @@ type contractHeader struct { SecretKey crypto.SecretKey // Same as modules.RenterContract. - StartHeight types.BlockHeight + ContractFee types.Currency DownloadSpending types.Currency + SiafundFee types.Currency + StartHeight types.BlockHeight StorageSpending types.Currency - UploadSpending types.Currency TotalCost types.Currency - ContractFee types.Currency TxnFee types.Currency - SiafundFee types.Currency + UploadSpending types.Currency + Utility modules.ContractUtility } // validate returns an error if the contractHeader is invalid. @@ -131,6 +132,42 @@ func (c *SafeContract) Metadata() modules.RenterContract { } } +// UpdateUtility updates the utility field of a contract. +func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { + // Get current header + c.headerMu.Lock() + newHeader := c.header + c.headerMu.Unlock() + + // Construct new header + newHeader.Utility = utility + + // Record the intent to change the header in the wal. + t, err := c.wal.NewTransaction([]writeaheadlog.Update{ + c.makeUpdateSetHeader(newHeader), + }) + if err != nil { + return err + } + // Signal that the setup is completed. + if err := <-t.SignalSetupComplete(); err != nil { + return err + } + // Apply the change. + if err := c.applySetHeader(newHeader); err != nil { + return err + } + // Sync the change to disk. + if err := c.f.Sync(); err != nil { + return err + } + // Signal that the update has been applied. + if err := t.SignalUpdatesApplied(); err != nil { + return err + } + return nil +} + func (c *SafeContract) makeUpdateSetHeader(h contractHeader) writeaheadlog.Update { c.headerMu.Lock() id := c.header.ID() From 7bd31683b3994ac030e5bc476a4c9ae7dd81d52b Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 9 Apr 2018 17:41:43 -0400 Subject: [PATCH 040/212] remove contractUtilies map from contractor --- modules/renter/contractor/contractor.go | 40 ++++++------ modules/renter/contractor/contracts.go | 64 ++++++++++++++----- .../contractor/host_integration_test.go | 10 ++- modules/renter/proto/contract.go | 7 ++ 4 files changed, 82 insertions(+), 39 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 44b0d69a01..cf4c1e3953 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -60,14 +60,9 @@ type Contractor struct { renewing map[types.FileContractID]bool // prevent revising during renewal revising map[types.FileContractID]bool // prevent overlapping revisions - // The contract utility values are not persisted in any way, instead get - // set based on the values in the hostdb at startup. During startup, the - // 'managedMarkContractsUtility' needs to be called so that the utility is - // set correctly. - contracts *proto.ContractSet - contractUtilities map[types.FileContractID]modules.ContractUtility - oldContracts map[types.FileContractID]modules.RenterContract - renewedIDs map[types.FileContractID]types.FileContractID + contracts *proto.ContractSet + oldContracts map[types.FileContractID]modules.RenterContract + renewedIDs map[types.FileContractID]types.FileContractID } // resolveID returns the ID of the most recent renewal of id. @@ -135,10 +130,12 @@ func (c *Contractor) Contracts() []modules.RenterContract { // ContractUtility returns the utility fields for the given contract. func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - c.mu.RLock() - utility, exists := c.contractUtilities[c.resolveID(id)] - c.mu.RUnlock() - return utility, exists + sc, exists := c.contracts.Acquire(id) + if !exists { + return modules.ContractUtility{}, false + } + defer c.contracts.Return(sc) + return sc.Utility(), true } // CurrentPeriod returns the height at which the current allowance period @@ -221,14 +218,13 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host interruptMaintenance: make(chan struct{}), - contracts: contractSet, - downloaders: make(map[types.FileContractID]*hostDownloader), - editors: make(map[types.FileContractID]*hostEditor), - contractUtilities: make(map[types.FileContractID]modules.ContractUtility), - oldContracts: make(map[types.FileContractID]modules.RenterContract), - renewedIDs: make(map[types.FileContractID]types.FileContractID), - renewing: make(map[types.FileContractID]bool), - revising: make(map[types.FileContractID]bool), + contracts: contractSet, + downloaders: make(map[types.FileContractID]*hostDownloader), + editors: make(map[types.FileContractID]*hostEditor), + oldContracts: make(map[types.FileContractID]modules.RenterContract), + renewedIDs: make(map[types.FileContractID]types.FileContractID), + renewing: make(map[types.FileContractID]bool), + revising: make(map[types.FileContractID]bool), } // Close the contract set and logger upon shutdown. @@ -248,7 +244,9 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host } // Mark contract utility. - c.managedMarkContractsUtility() + if err := c.managedMarkContractsUtility(); err != nil { + return nil, err + } // Subscribe to the consensus set. err = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index bc50122e6c..0dd7c5bb82 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -28,6 +28,16 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { return c.currentPeriod + c.allowance.Period } +// ContractUtility returns the ContractUtility for a contract with a given id. +func (c *Contractor) contractUtility(id types.FileContractID) modules.ContractUtility { + sc, exists := c.contracts.Acquire(c.resolveID(id)) + if !exists { + return modules.ContractUtility{} + } + defer c.contracts.Return(sc) + return sc.Utility() +} + // managedInterruptContractMaintenance will issue an interrupt signal to any // running maintenance, stopping that maintenance. If there are multiple threads // running maintenance, they will all be stopped. @@ -56,7 +66,7 @@ func (c *Contractor) managedInterruptContractMaintenance() { // managedMarkContractsUtility checks every active contract in the contractor and // figures out whether the contract is useful for uploading, and whehter the // contract should be renewed. -func (c *Contractor) managedMarkContractsUtility() { +func (c *Contractor) managedMarkContractsUtility() error { // Pull a new set of hosts from the hostdb that could be used as a new set // to match the allowance. The lowest scoring host of these new hosts will // be used as a baseline for determining whether our existing contracts are @@ -130,10 +140,11 @@ func (c *Contractor) managedMarkContractsUtility() { }() // Apply changes. - c.mu.Lock() - c.contractUtilities[contract.ID] = utility - c.mu.Unlock() + if err := c.updateContractUtility(contract.ID, utility); err != nil { + return err + } } + return nil } // managedNewContract negotiates an initial file contract with the specified @@ -186,9 +197,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // For convenience contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. - c.mu.RLock() - utility := c.contractUtilities[contract.ID] - c.mu.RUnlock() + utility := c.contractUtility(contract.ID) if !utility.GoodForRenew { c.log.Critical("Renewing a contract that has been marked as !GoodForRenew") } @@ -269,7 +278,10 @@ func (c *Contractor) threadedContractMaintenance() { // Update the utility fields for this contract based on the most recent // hostdb. - c.managedMarkContractsUtility() + if err := c.managedMarkContractsUtility(); err != nil { + c.log.Println("Failed to update contracUtilities", err) + return + } // Figure out which contracts need to be renewed, and while we have the // lock, figure out the end height for the new contracts and also the amount @@ -344,7 +356,8 @@ func (c *Contractor) threadedContractMaintenance() { // Iterate through the contracts again, figuring out which contracts to // renew and how much extra funds to renew them with. for _, contract := range c.contracts.ViewAll() { - if !c.contractUtilities[contract.ID].GoodForRenew { + utility := c.contractUtility(contract.ID) + if !utility.GoodForRenew { continue } if c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight { @@ -464,9 +477,7 @@ func (c *Contractor) threadedContractMaintenance() { return } // Return the contract if it's not useful for renewing. - c.mu.RLock() - oldUtility := c.contractUtilities[id] - c.mu.RUnlock() + oldUtility := c.contractUtility(id) if !oldUtility.GoodForRenew { c.log.Printf("Contract %v slated for renew is marked not good for renew", id) c.contracts.Return(oldContract) @@ -489,10 +500,16 @@ func (c *Contractor) threadedContractMaintenance() { GoodForUpload: true, GoodForRenew: true, } - c.contractUtilities[newContract.ID] = newUtility + if err := c.updateContractUtility(newContract.ID, newUtility); err != nil { + c.log.Println("Failed to update the contract utilities", err) + return + } oldUtility.GoodForRenew = false oldUtility.GoodForUpload = false - c.contractUtilities[id] = oldUtility + if err := c.updateContractUtility(id, oldUtility); err != nil { + c.log.Println("Failed to update the contract utilities", err) + return + } c.mu.Unlock() // If the contract is a mid-cycle renew, add the contract line to // the new contract. The contract line is not included/extended if @@ -543,7 +560,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.RLock() uploadContracts := 0 for _, id := range c.contracts.IDs() { - if c.contractUtilities[id].GoodForUpload { + if c.contractUtility(id).GoodForUpload { uploadContracts++ } } @@ -583,9 +600,13 @@ func (c *Contractor) threadedContractMaintenance() { // Add this contract to the contractor and save. c.mu.Lock() - c.contractUtilities[newContract.ID] = modules.ContractUtility{ + err = c.updateContractUtility(newContract.ID, modules.ContractUtility{ GoodForUpload: true, GoodForRenew: true, + }) + if err != nil { + c.log.Println("Failed to update the contract utilities", err) + return } err = c.saveSync() c.mu.Unlock() @@ -609,3 +630,14 @@ func (c *Contractor) threadedContractMaintenance() { } } } + +// updateContractUtility updates the ContractUtility for a contract with a +// given id. +func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { + sc, exists := c.contracts.Acquire(c.resolveID(id)) + if !exists { + return errors.New("can't update non-existing contract") + } + defer c.contracts.Return(sc) + return sc.UpdateUtility(utility) +} diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index ea4d37aadd..23836aca96 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -350,7 +350,10 @@ func TestIntegrationRenew(t *testing.T) { // renew the contract oldContract, _ := c.contracts.Acquire(contract.ID) c.mu.Lock() - c.contractUtilities[contract.ID] = modules.ContractUtility{GoodForRenew: true} + err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + if err != nil { + t.Fatal(err) + } c.mu.Unlock() contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+200) if err != nil { @@ -383,7 +386,10 @@ func TestIntegrationRenew(t *testing.T) { // renew to a lower height oldContract, _ = c.contracts.Acquire(contract.ID) c.mu.Lock() - c.contractUtilities[contract.ID] = modules.ContractUtility{GoodForRenew: true} + err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + if err != nil { + t.Fatal(err) + } c.mu.Unlock() contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+100) if err != nil { diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index cf90db94a0..3d227a0729 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -168,6 +168,13 @@ func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { return nil } +// Utility returns the contract utility for the contract. +func (c *SafeContract) Utility() modules.ContractUtility { + c.headerMu.Lock() + defer c.headerMu.Unlock() + return c.header.Utility +} + func (c *SafeContract) makeUpdateSetHeader(h contractHeader) writeaheadlog.Update { c.headerMu.Lock() id := c.header.ID() From e03ee335e73d935b7050ca3a906391926831dbe8 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 10:41:02 -0400 Subject: [PATCH 041/212] use View instead of Acquire to get contract utilities --- modules/renter.go | 3 +++ modules/renter/contractor/contracts.go | 5 ++--- modules/renter/proto/contract.go | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/renter.go b/modules/renter.go index 87cbc2f07d..cd9cc759ec 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -233,6 +233,9 @@ type RenterContract struct { StorageSpending types.Currency UploadSpending types.Currency + // Utility contains utility information about the renter. + Utility ContractUtility + // TotalCost indicates the amount of money that the renter spent and/or // locked up while forming a contract. This includes fees, and includes // funds which were allocated (but not necessarily committed) to spend on diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 0dd7c5bb82..a6d2e24d0d 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -30,12 +30,11 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { // ContractUtility returns the ContractUtility for a contract with a given id. func (c *Contractor) contractUtility(id types.FileContractID) modules.ContractUtility { - sc, exists := c.contracts.Acquire(c.resolveID(id)) + rc, exists := c.contracts.View(c.resolveID(id)) if !exists { return modules.ContractUtility{} } - defer c.contracts.Return(sc) - return sc.Utility() + return rc.Utility } // managedInterruptContractMaintenance will issue an interrupt signal to any diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 3d227a0729..fe8ff419a9 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -129,6 +129,7 @@ func (c *SafeContract) Metadata() modules.RenterContract { ContractFee: h.ContractFee, TxnFee: h.TxnFee, SiafundFee: h.SiafundFee, + Utility: h.Utility, } } From c2f203e5a8488e023a9511ba728f9ab22f572bd7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 11:55:35 -0400 Subject: [PATCH 042/212] call StreamPartial on random ranges --- siatest/renter.go | 13 +++++++++---- siatest/renter/renter_test.go | 12 ++++++++---- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/siatest/renter.go b/siatest/renter.go index 4388a1e1b5..c33c140e1c 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -67,14 +67,19 @@ func (tn *TestNode) Stream(rf *RemoteFile) (data []byte, err error) { return } -// StreamPartial uses the streaming endpoint to download a partial file. A -// local file can be provided optionally to implicitly check the checksum of -// the downloaded data. +// StreamPartial uses the streaming endpoint to download a partial file in +// range [from;to]. A local file can be provided optionally to implicitly check +// the checksum of the downloaded data. func (tn *TestNode) StreamPartial(rf *RemoteFile, lf *LocalFile, from, to uint64) (data []byte, err error) { data, err = tn.RenterStreamPartialGet(rf.siaPath, from, to) if err != nil { return } + if uint64(len(data)) != to-from+1 { + err = fmt.Errorf("length of downloaded data should be %v but was %v", + to-from+1, len(data)) + return + } if lf != nil { var checksum crypto.Hash checksum, err = lf.partialChecksum(from, to+1) @@ -83,7 +88,7 @@ func (tn *TestNode) StreamPartial(rf *RemoteFile, lf *LocalFile, from, to uint64 return } if checksum != crypto.HashBytes(data) { - err = errors.New("downloaded bytes don't match requested data") + err = fmt.Errorf("downloaded bytes don't match requested data %v-%v", from, to) return } } diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 9b1e1e3b10..972f18e75c 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -88,10 +88,14 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { if err != nil { t.Fatal(err) } - // Stream the file partially. - _, err = renter.StreamPartial(remoteFile, localFile, 0, uint64(fileSize/2)) - if err != nil { - t.Fatal(err) + // Stream the file partially a few times. At least 1 byte is streamed. + for i := 0; i < 5; i++ { + from := fastrand.Intn(fileSize - 1) // [0..fileSize-2] + to := from + 1 + fastrand.Intn(fileSize-from-1) // [from+1..fileSize-1] + _, err = renter.StreamPartial(remoteFile, localFile, uint64(from), uint64(to)) + if err != nil { + t.Fatal(err) + } } } From 364138998d2ba9861e043e3f9d3a246d434abc78 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 12:19:04 -0400 Subject: [PATCH 043/212] fix TestIntegrationRenew --- modules/renter/contractor/host_integration_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index 23836aca96..db6928146f 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -348,13 +348,13 @@ func TestIntegrationRenew(t *testing.T) { } // renew the contract - oldContract, _ := c.contracts.Acquire(contract.ID) c.mu.Lock() err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } c.mu.Unlock() + oldContract, _ := c.contracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+200) if err != nil { t.Fatal(err) @@ -384,13 +384,13 @@ func TestIntegrationRenew(t *testing.T) { } // renew to a lower height - oldContract, _ = c.contracts.Acquire(contract.ID) c.mu.Lock() err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } c.mu.Unlock() + oldContract, _ = c.contracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+100) if err != nil { t.Fatal(err) From b4206dfdbadfd20740b42566ab4b072180f46a32 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 12:34:47 -0400 Subject: [PATCH 044/212] move UpdateContractUtility from contractor to contractset --- modules/renter/contractor/contracts.go | 19 ++++--------------- .../contractor/host_integration_test.go | 4 ++-- modules/renter/proto/contract.go | 4 ++-- modules/renter/proto/contractset.go | 14 ++++++++++++++ 4 files changed, 22 insertions(+), 19 deletions(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index a6d2e24d0d..3452c81918 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -139,7 +139,7 @@ func (c *Contractor) managedMarkContractsUtility() error { }() // Apply changes. - if err := c.updateContractUtility(contract.ID, utility); err != nil { + if err := c.contracts.UpdateContractUtility(contract.ID, utility); err != nil { return err } } @@ -499,13 +499,13 @@ func (c *Contractor) threadedContractMaintenance() { GoodForUpload: true, GoodForRenew: true, } - if err := c.updateContractUtility(newContract.ID, newUtility); err != nil { + if err := c.contracts.UpdateContractUtility(newContract.ID, newUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } oldUtility.GoodForRenew = false oldUtility.GoodForUpload = false - if err := c.updateContractUtility(id, oldUtility); err != nil { + if err := c.contracts.UpdateContractUtility(id, oldUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } @@ -599,7 +599,7 @@ func (c *Contractor) threadedContractMaintenance() { // Add this contract to the contractor and save. c.mu.Lock() - err = c.updateContractUtility(newContract.ID, modules.ContractUtility{ + err = c.contracts.UpdateContractUtility(newContract.ID, modules.ContractUtility{ GoodForUpload: true, GoodForRenew: true, }) @@ -629,14 +629,3 @@ func (c *Contractor) threadedContractMaintenance() { } } } - -// updateContractUtility updates the ContractUtility for a contract with a -// given id. -func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { - sc, exists := c.contracts.Acquire(c.resolveID(id)) - if !exists { - return errors.New("can't update non-existing contract") - } - defer c.contracts.Return(sc) - return sc.UpdateUtility(utility) -} diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index db6928146f..a19a0ed9b8 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -349,7 +349,7 @@ func TestIntegrationRenew(t *testing.T) { // renew the contract c.mu.Lock() - err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.contracts.UpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } @@ -385,7 +385,7 @@ func TestIntegrationRenew(t *testing.T) { // renew to a lower height c.mu.Lock() - err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.contracts.UpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index fe8ff419a9..f96f5de41d 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -133,8 +133,8 @@ func (c *SafeContract) Metadata() modules.RenterContract { } } -// UpdateUtility updates the utility field of a contract. -func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { +// updateUtility updates the utility field of a contract. +func (c *SafeContract) updateUtility(utility modules.ContractUtility) error { // Get current header c.headerMu.Lock() newHeader := c.header diff --git a/modules/renter/proto/contractset.go b/modules/renter/proto/contractset.go index 883ec77e58..31b53dbfb2 100644 --- a/modules/renter/proto/contractset.go +++ b/modules/renter/proto/contractset.go @@ -49,6 +49,20 @@ func (cs *ContractSet) Acquire(id types.FileContractID) (*SafeContract, bool) { return safeContract, true } +// UpdateContractUtility updates the ContractUtility for a contract with a +// given id. +func (cs *ContractSet) UpdateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { + cs.mu.Lock() + safeContract, ok := cs.contracts[id] + cs.mu.Unlock() + if !ok { + return errors.New("can't update non-existing contract") + } + safeContract.mu.Lock() + defer safeContract.mu.Unlock() + return safeContract.updateUtility(utility) +} + // Delete removes a contract from the set. The contract must have been // previously acquired by Acquire. If the contract is not present in the set, // Delete is a no-op. From 1b05ece8f64bc35ec83f38026bfc342c9f0b7d55 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 13:35:14 -0400 Subject: [PATCH 045/212] make UpdateUtility a method on a SafeContractand change contractUtility Signature --- modules/renter/contractor/contractor.go | 7 +-- modules/renter/contractor/contracts.go | 49 +++++++++++++------ .../contractor/host_integration_test.go | 4 +- modules/renter/proto/contract.go | 2 +- modules/renter/proto/contractset.go | 14 ------ 5 files changed, 37 insertions(+), 39 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index cf4c1e3953..1483fff5ba 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -130,12 +130,7 @@ func (c *Contractor) Contracts() []modules.RenterContract { // ContractUtility returns the utility fields for the given contract. func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - sc, exists := c.contracts.Acquire(id) - if !exists { - return modules.ContractUtility{}, false - } - defer c.contracts.Return(sc) - return sc.Utility(), true + return c.contractUtility(id) } // CurrentPeriod returns the height at which the current allowance period diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 3452c81918..61b3e0af8f 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -6,6 +6,7 @@ package contractor import ( "errors" + "fmt" "math/big" "time" @@ -29,12 +30,12 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { } // ContractUtility returns the ContractUtility for a contract with a given id. -func (c *Contractor) contractUtility(id types.FileContractID) modules.ContractUtility { +func (c *Contractor) contractUtility(id types.FileContractID) (modules.ContractUtility, bool) { rc, exists := c.contracts.View(c.resolveID(id)) if !exists { - return modules.ContractUtility{} + return modules.ContractUtility{}, false } - return rc.Utility + return rc.Utility, true } // managedInterruptContractMaintenance will issue an interrupt signal to any @@ -139,7 +140,10 @@ func (c *Contractor) managedMarkContractsUtility() error { }() // Apply changes. - if err := c.contracts.UpdateContractUtility(contract.ID, utility); err != nil { + c.mu.Lock() + err := c.updateContractUtility(contract.ID, utility) + c.mu.Unlock() + if err != nil { return err } } @@ -196,9 +200,10 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // For convenience contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. - utility := c.contractUtility(contract.ID) - if !utility.GoodForRenew { - c.log.Critical("Renewing a contract that has been marked as !GoodForRenew") + utility, ok := c.contractUtility(contract.ID) + if !ok || !utility.GoodForRenew { + c.log.Critical(fmt.Sprintf("Renewing a contract that has been marked as !GoodForRenew %v/%v", + ok, utility.GoodForRenew)) } // Fetch the host associated with this contract. @@ -355,8 +360,8 @@ func (c *Contractor) threadedContractMaintenance() { // Iterate through the contracts again, figuring out which contracts to // renew and how much extra funds to renew them with. for _, contract := range c.contracts.ViewAll() { - utility := c.contractUtility(contract.ID) - if !utility.GoodForRenew { + utility, ok := c.contractUtility(contract.ID) + if !ok || !utility.GoodForRenew { continue } if c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight { @@ -476,9 +481,10 @@ func (c *Contractor) threadedContractMaintenance() { return } // Return the contract if it's not useful for renewing. - oldUtility := c.contractUtility(id) - if !oldUtility.GoodForRenew { - c.log.Printf("Contract %v slated for renew is marked not good for renew", id) + oldUtility, ok := c.contractUtility(id) + if !ok || !oldUtility.GoodForRenew { + c.log.Printf("Contract %v slated for renew is marked not good for renew %v/%v", + id, ok, oldUtility.GoodForRenew) c.contracts.Return(oldContract) return } @@ -499,13 +505,13 @@ func (c *Contractor) threadedContractMaintenance() { GoodForUpload: true, GoodForRenew: true, } - if err := c.contracts.UpdateContractUtility(newContract.ID, newUtility); err != nil { + if err := c.updateContractUtility(newContract.ID, newUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } oldUtility.GoodForRenew = false oldUtility.GoodForUpload = false - if err := c.contracts.UpdateContractUtility(id, oldUtility); err != nil { + if err := c.updateContractUtility(id, oldUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } @@ -559,7 +565,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.RLock() uploadContracts := 0 for _, id := range c.contracts.IDs() { - if c.contractUtility(id).GoodForUpload { + if cu, ok := c.contractUtility(id); ok && cu.GoodForUpload { uploadContracts++ } } @@ -599,7 +605,7 @@ func (c *Contractor) threadedContractMaintenance() { // Add this contract to the contractor and save. c.mu.Lock() - err = c.contracts.UpdateContractUtility(newContract.ID, modules.ContractUtility{ + err = c.updateContractUtility(newContract.ID, modules.ContractUtility{ GoodForUpload: true, GoodForRenew: true, }) @@ -629,3 +635,14 @@ func (c *Contractor) threadedContractMaintenance() { } } } + +// updateContractUtility is a helper function that acquires a contract, updates +// its ContractUtility and returns the contract again. +func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { + sc, ok := c.contracts.Acquire(id) + if !ok { + return errors.New("failed to acquire contract for update") + } + defer c.contracts.Return(sc) + return sc.UpdateUtility(utility) +} diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index a19a0ed9b8..db6928146f 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -349,7 +349,7 @@ func TestIntegrationRenew(t *testing.T) { // renew the contract c.mu.Lock() - err = c.contracts.UpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } @@ -385,7 +385,7 @@ func TestIntegrationRenew(t *testing.T) { // renew to a lower height c.mu.Lock() - err = c.contracts.UpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index f96f5de41d..7f58d4339c 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -134,7 +134,7 @@ func (c *SafeContract) Metadata() modules.RenterContract { } // updateUtility updates the utility field of a contract. -func (c *SafeContract) updateUtility(utility modules.ContractUtility) error { +func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { // Get current header c.headerMu.Lock() newHeader := c.header diff --git a/modules/renter/proto/contractset.go b/modules/renter/proto/contractset.go index 31b53dbfb2..883ec77e58 100644 --- a/modules/renter/proto/contractset.go +++ b/modules/renter/proto/contractset.go @@ -49,20 +49,6 @@ func (cs *ContractSet) Acquire(id types.FileContractID) (*SafeContract, bool) { return safeContract, true } -// UpdateContractUtility updates the ContractUtility for a contract with a -// given id. -func (cs *ContractSet) UpdateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { - cs.mu.Lock() - safeContract, ok := cs.contracts[id] - cs.mu.Unlock() - if !ok { - return errors.New("can't update non-existing contract") - } - safeContract.mu.Lock() - defer safeContract.mu.Unlock() - return safeContract.updateUtility(utility) -} - // Delete removes a contract from the set. The contract must have been // previously acquired by Acquire. If the contract is not present in the set, // Delete is a no-op. From b746122bcd943393315308c2c09578887f516a06 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 10 Apr 2018 19:42:28 +0200 Subject: [PATCH 046/212] Rename JSON field to match Go struct --- node/api/host.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/api/host.go b/node/api/host.go index cd274d8400..210543ebf4 100644 --- a/node/api/host.go +++ b/node/api/host.go @@ -27,7 +27,7 @@ type ( // ContractInfoGET contains the information that is returned after a GET request // to /host/contracts - information for the host about stored obligations. ContractInfoGET struct { - Contracts []modules.StorageObligation `json:"obligations"` + Contracts []modules.StorageObligation `json:"contracts"` } // HostGET contains the information that is returned after a GET request to From 1b5498785719d2a9a420853fc9a3d2d4b60c770a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 14:51:26 -0400 Subject: [PATCH 047/212] fix lint error --- modules/renter/proto/contract.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 7f58d4339c..fe8ff419a9 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -133,7 +133,7 @@ func (c *SafeContract) Metadata() modules.RenterContract { } } -// updateUtility updates the utility field of a contract. +// UpdateUtility updates the utility field of a contract. func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { // Get current header c.headerMu.Lock() From 21c6f57609dce99fdbf357a4329a912b6ad515a9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 15:19:42 -0400 Subject: [PATCH 048/212] fix deadlock --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 61b3e0af8f..2f44ab9f11 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -511,7 +511,7 @@ func (c *Contractor) threadedContractMaintenance() { } oldUtility.GoodForRenew = false oldUtility.GoodForUpload = false - if err := c.updateContractUtility(id, oldUtility); err != nil { + if err := oldContract.UpdateUtility(oldUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } From 41c72b38283ad46b26c76814383a96a5900a7dad Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 00:00:42 +0200 Subject: [PATCH 049/212] Add status constants in modules/host.go. Change ObligationStatus to type string. Remove filter in node/api/host.go. A call to /host/contracts will give the caller all storage obligations. --- modules/host.go | 10 +++++++++- modules/host/storageobligations.go | 12 +++++++++++- node/api/host.go | 31 +----------------------------- 3 files changed, 21 insertions(+), 32 deletions(-) diff --git a/modules/host.go b/modules/host.go index 4e89a98d34..26693c13dd 100644 --- a/modules/host.go +++ b/modules/host.go @@ -7,6 +7,14 @@ import ( const ( // HostDir names the directory that contains the host persistence. HostDir = "host" + // ObligationStatusFailed is the status for failed storage obligations + ObligationStatusFailed = "failed" + // ObligationStatusRejected is the status for rejected storage obligations + ObligationStatusRejected = "rejected" + // ObligationStatusSucceeded is the status for succeeded storage obligations + ObligationStatusSucceeded = "succeeded" + // ObligationStatusUnresolved is the status for unresolved storage obligations + ObligationStatusUnresolved = "unresolved" ) var ( @@ -137,7 +145,7 @@ type ( RevisionConfirmed bool `json:"revisionconfirmed"` ProofConstructed bool `json:"proofconstructed"` ProofConfirmed bool `json:"proofconfirmed"` - ObligationStatus uint64 `json:"obligationstatus"` + ObligationStatus string `json:"obligationstatus"` } // HostWorkingStatus reports the working state of a host. Can be one of diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index b52ab8e12f..c686bb6547 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -869,6 +869,16 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { if err != nil { return build.ExtendErr("unable to unmarshal storage obligation:", err) } + statusString := map[storageObligationStatus]string{ + obligationFailed: modules.ObligationStatusFailed, + obligationRejected: modules.ObligationStatusRejected, + obligationSucceeded: modules.ObligationStatusSucceeded, + obligationUnresolved: modules.ObligationStatusUnresolved, + }[so.ObligationStatus] + if statusString == "" { + h.log.Severe("WARN: invalid obligation status encountered when reading database.") + return build.ExtendErr("unable to parse obligation status.", err) + } mso := modules.StorageObligation{ ObligationId: so.id(), FileSize: so.fileSize(), @@ -890,7 +900,7 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { RevisionConfirmed: so.RevisionConfirmed, ProofConstructed: so.ProofConstructed, ProofConfirmed: so.ProofConfirmed, - ObligationStatus: uint64(so.ObligationStatus), + ObligationStatus: statusString, } sos = append(sos, mso) return nil diff --git a/node/api/host.go b/node/api/host.go index 210543ebf4..12c53b5cc0 100644 --- a/node/api/host.go +++ b/node/api/host.go @@ -69,38 +69,9 @@ func folderIndex(folderPath string, storageFolders []modules.StorageFolderMetada // hostContractInfoHandler handles the API call to get the contract information of the host. // Information is retrieved via the storage obligations from the host database. -// TODO: filters are hard coded. Adding or removing storage obligation statuses will -// currently break the API. func (api *API) hostContractInfoHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - statusStr := req.FormValue("status") - if statusStr == "" { - WriteError(w, Error{"Status must be provided to a /host/contracts call."}, http.StatusBadRequest) - return - } - var filter uint64 - switch statusStr { - case "unresolved": - filter = 0 - case "rejected": - filter = 1 - case "succeeded": - filter = 2 - case "failed": - filter = 3 - case "all": - filter = 4 - default: - WriteError(w, Error{"Unable to parse contract status."}, http.StatusBadRequest) - return - } - sos := []modules.StorageObligation{} - for _, so := range api.host.StorageObligations() { - if so.ObligationStatus == filter || filter == 4 { - sos = append(sos, so) - } - } cg := ContractInfoGET{ - Contracts: sos, + Contracts: api.host.StorageObligations(), } WriteJSON(w, cg) } From 3972a1348b172fbd1db2e576c308b64e6a644f78 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 00:34:13 +0200 Subject: [PATCH 050/212] Change return value to nil --- modules/host/storageobligations.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index c686bb6547..fdd859ebd5 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -877,7 +877,7 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { }[so.ObligationStatus] if statusString == "" { h.log.Severe("WARN: invalid obligation status encountered when reading database.") - return build.ExtendErr("unable to parse obligation status.", err) + return nil } mso := modules.StorageObligation{ ObligationId: so.id(), From 9c45c6e3a7113897920ef040956c1023b47f9fc9 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 08:46:02 +0200 Subject: [PATCH 051/212] Use stringer to get status string from storageObligationStatus type --- modules/host/storageobligations.go | 12 +----------- modules/host/storageobligationstatus_string.go | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 11 deletions(-) create mode 100644 modules/host/storageobligationstatus_string.go diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index fdd859ebd5..3494273829 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -869,16 +869,6 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { if err != nil { return build.ExtendErr("unable to unmarshal storage obligation:", err) } - statusString := map[storageObligationStatus]string{ - obligationFailed: modules.ObligationStatusFailed, - obligationRejected: modules.ObligationStatusRejected, - obligationSucceeded: modules.ObligationStatusSucceeded, - obligationUnresolved: modules.ObligationStatusUnresolved, - }[so.ObligationStatus] - if statusString == "" { - h.log.Severe("WARN: invalid obligation status encountered when reading database.") - return nil - } mso := modules.StorageObligation{ ObligationId: so.id(), FileSize: so.fileSize(), @@ -900,7 +890,7 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { RevisionConfirmed: so.RevisionConfirmed, ProofConstructed: so.ProofConstructed, ProofConfirmed: so.ProofConfirmed, - ObligationStatus: statusString, + ObligationStatus: so.ObligationStatus.String(), } sos = append(sos, mso) return nil diff --git a/modules/host/storageobligationstatus_string.go b/modules/host/storageobligationstatus_string.go new file mode 100644 index 0000000000..f237e503f1 --- /dev/null +++ b/modules/host/storageobligationstatus_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=storageObligationStatus"; DO NOT EDIT. + +package host + +import "strconv" + +const _storageObligationStatus_name = "obligationUnresolvedobligationRejectedobligationSucceededobligationFailed" + +var _storageObligationStatus_index = [...]uint8{0, 20, 38, 57, 73} + +func (i storageObligationStatus) String() string { + if i >= storageObligationStatus(len(_storageObligationStatus_index)-1) { + return "storageObligationStatus(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _storageObligationStatus_name[_storageObligationStatus_index[i]:_storageObligationStatus_index[i+1]] +} From 73395f0de3304ef7314c27e0485eff8427875fa4 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 08:48:20 +0200 Subject: [PATCH 052/212] Remove obligation status constants --- modules/host.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/modules/host.go b/modules/host.go index 26693c13dd..d9b9037973 100644 --- a/modules/host.go +++ b/modules/host.go @@ -7,14 +7,6 @@ import ( const ( // HostDir names the directory that contains the host persistence. HostDir = "host" - // ObligationStatusFailed is the status for failed storage obligations - ObligationStatusFailed = "failed" - // ObligationStatusRejected is the status for rejected storage obligations - ObligationStatusRejected = "rejected" - // ObligationStatusSucceeded is the status for succeeded storage obligations - ObligationStatusSucceeded = "succeeded" - // ObligationStatusUnresolved is the status for unresolved storage obligations - ObligationStatusUnresolved = "unresolved" ) var ( From cf4a00deebe82c0f71542e6d8d429d37d4cd7067 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 08:57:11 +0200 Subject: [PATCH 053/212] Sort StorageObligation fields. Sort storageObligation fields. Rename FileSize to DataSize as it represents the data size. --- modules/host.go | 14 +++++++------- modules/host/storageobligations.go | 22 +++++++++++----------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/modules/host.go b/modules/host.go index d9b9037973..bbbfe926c8 100644 --- a/modules/host.go +++ b/modules/host.go @@ -111,11 +111,11 @@ type ( // StorageObligation contains information about a storage obligation that // the host has accepted. StorageObligation struct { - ObligationId types.FileContractID `json:"obligationid"` - FileSize uint64 `json:"filesize"` - SectorRootsCount uint64 `json:"sectorrootscount"` ContractCost types.Currency `json:"contractcost"` + DataSize uint64 `json:"datasize"` LockedCollateral types.Currency `json:"lockedcollateral"` + ObligationId types.FileContractID `json:"obligationid"` + SectorRootsCount uint64 `json:"sectorrootscount"` PotentialDownloadRevenue types.Currency `json:"potentialdownloadrevenue"` PotentialStorageRevenue types.Currency `json:"potentialstoragerevenue"` PotentialUploadRevenue types.Currency `json:"potentialuploadrevenue"` @@ -126,18 +126,18 @@ type ( // contract was negotiated. The expiration height and the proof deadline // are equal to the window start and window end. Between the expiration height // and the proof deadline, the host must submit the storage proof. - NegotiationHeight types.BlockHeight `json:"negotiationheight"` ExpirationHeight types.BlockHeight `json:"expirationheight"` + NegotiationHeight types.BlockHeight `json:"negotiationheight"` ProofDeadLine types.BlockHeight `json:"proofdeadline"` // Variables indicating whether the critical transactions in a storage // obligation have been confirmed on the blockchain. + ObligationStatus string `json:"obligationstatus"` OriginConfirmed bool `json:"originconfirmed"` - RevisionConstructed bool `json:"revisionconstructed"` RevisionConfirmed bool `json:"revisionconfirmed"` - ProofConstructed bool `json:"proofconstructed"` + RevisionConstructed bool `json:"revisionconstructed"` ProofConfirmed bool `json:"proofconfirmed"` - ObligationStatus string `json:"obligationstatus"` + ProofConstructed bool `json:"proofconstructed"` } // HostWorkingStatus reports the working state of a host. Can be one of diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 3494273829..699c42ef15 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -139,12 +139,12 @@ type storageObligation struct { // Variables indicating whether the critical transactions in a storage // obligation have been confirmed on the blockchain. + ObligationStatus storageObligationStatus OriginConfirmed bool - RevisionConstructed bool - RevisionConfirmed bool - ProofConstructed bool ProofConfirmed bool - ObligationStatus storageObligationStatus + ProofConstructed bool + RevisionConfirmed bool + RevisionConstructed bool } // getStorageObligation fetches a storage obligation from the database tx. @@ -870,27 +870,27 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { return build.ExtendErr("unable to unmarshal storage obligation:", err) } mso := modules.StorageObligation{ - ObligationId: so.id(), - FileSize: so.fileSize(), - SectorRootsCount: uint64(len(so.SectorRoots)), ContractCost: so.ContractCost, + DataSize: so.fileSize(), LockedCollateral: so.LockedCollateral, + ObligationId: so.id(), PotentialDownloadRevenue: so.PotentialDownloadRevenue, PotentialStorageRevenue: so.PotentialStorageRevenue, PotentialUploadRevenue: so.PotentialUploadRevenue, RiskedCollateral: so.RiskedCollateral, + SectorRootsCount: uint64(len(so.SectorRoots)), TransactionFeesAdded: so.TransactionFeesAdded, - NegotiationHeight: so.NegotiationHeight, ExpirationHeight: so.expiration(), + NegotiationHeight: so.NegotiationHeight, ProofDeadLine: so.proofDeadline(), + ObligationStatus: so.ObligationStatus.String(), OriginConfirmed: so.OriginConfirmed, - RevisionConstructed: so.RevisionConstructed, RevisionConfirmed: so.RevisionConfirmed, - ProofConstructed: so.ProofConstructed, + RevisionConstructed: so.RevisionConstructed, ProofConfirmed: so.ProofConfirmed, - ObligationStatus: so.ObligationStatus.String(), + ProofConstructed: so.ProofConstructed, } sos = append(sos, mso) return nil From c4963c0e7cdba5d96af618d8c9813bc0a35a4d33 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 11:28:48 +0200 Subject: [PATCH 054/212] Add documentation for /host/contracts API end point --- doc/API.md | 39 +++++++++++++++++++++++++-- doc/api/Host.md | 70 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 107 insertions(+), 2 deletions(-) diff --git a/doc/API.md b/doc/API.md index ca25716b6a..4ed230e294 100644 --- a/doc/API.md +++ b/doc/API.md @@ -257,6 +257,7 @@ Host | [/host](#host-get) | GET | | [/host](#host-post) | POST | | [/host/announce](#hostannounce-post) | POST | +| [/host/contracts](#hostcontracts-get) | GET | | [/host/estimatescore](#hostestimatescore-get) | GET | | [/host/storage](#hoststorage-get) | GET | | [/host/storage/folders/add](#hoststoragefoldersadd-post) | POST | @@ -392,11 +393,45 @@ netaddress string // Optional standard success or error response. See [#standard-responses](#standard-responses). +#### /host/contracts [GET] + +gets a list of all contracts from the host database + +###### JSON Response [(with comments)](/doc/api/Host.md#json-response-1) +```javascript +{ + "contracts": [ + { + "contractcost": "1234", // hastings + "datasize": 500000, // bytes + "lockedcollateral": "1234", // hastings + "obligationid": "fff48010dcbbd6ba7ffd41bc4b25a3634ee58bbf688d2f06b7d5a0c837304e13", + "sectorrootscount": 2, + "potentialdownloadrevenue": "1234", // hastings + "potentialstoragerevenue": "1234", // hastings + "potentialuploadrevenue": "1234", // hastings + "riskedcollateral": "1234", // hastings + "transactionfeesadded": "1234", // hastings + + "expirationheight": 123456, // blocks + "negotionheight": 123456, // blocks + "proofdeadline": 123456, // blocks + + "obligationstatus": "obligationFailed", + "originconfirmed": true, + "revisionconfirmed": false, + "proofconfirmed": true, + "proofconstructed": true + } + ] +} +``` + #### /host/storage [GET] gets a list of folders tracked by the host's storage manager. -###### JSON Response [(with comments)](/doc/api/Host.md#json-response-1) +###### JSON Response [(with comments)](/doc/api/Host.md#json-response-2) ```javascript { "folders": [ @@ -487,7 +522,7 @@ standard success or error response. See returns the estimated HostDB score of the host using its current settings, combined with the provided settings. -###### JSON Response [(with comments)](/doc/api/Host.md#json-response-2) +###### JSON Response [(with comments)](/doc/api/Host.md#json-response-3) ```javascript { "estimatedscore": "123456786786786786786786786742133", diff --git a/doc/api/Host.md b/doc/api/Host.md index 559a342bb5..26c4f2b437 100644 --- a/doc/api/Host.md +++ b/doc/api/Host.md @@ -25,6 +25,7 @@ Index | [/host](#host-get) | GET | | [/host](#host-post) | POST | | [/host/announce](#hostannounce-post) | POST | +| [/host/contracts](#hostcontracts-get) | GET | | [/host/estimatescore](#hostestimatescore-get) | GET | | [/host/storage](#hoststorage-get) | GET | | [/host/storage/folders/add](#hoststoragefoldersadd-post) | POST | @@ -432,6 +433,75 @@ netaddress string // Optional standard success or error response. See [#standard-responses](#standard-responses). +#### /host/contracts [GET] + +Get contract information from the host database. This call will return all storage obligations on the host. Its up to the caller to filter the contracts based on his needs. + +###### JSON Response +```javascript +{ + "contracts": [ + // Amount in hastings to cover the transaction fees for this storage obligation. + "contractcost": "1234", // hastings + + // Size of the data that is protected by the contract. + "datasize": 50000, // bytes + + // Amount that is locked as collateral for this storage obligation. + "lockedcollateral": "1234", // hastings + + // Id of the storageobligation, which is defined by the file contract id of the file contract that governs the storage obligation. + "obligationid": "fff48010dcbbd6ba7ffd41bc4b25a3634ee58bbf688d2f06b7d5a0c837304e13", + + // Number of sector roots. + "sectorrootscount": 2, + + // Potential revenue for downloaded data that the host will reveive upon successful completion of the obligation. + "potentialdownloadrevenue": "1234", // hastings + + // Potential revenue for storage of data that the host will reveive upon successful completion of the obligation. + "potentialstoragerevenue": "1234", // hastings + + // Potential revenue for uploaded data that the host will reveive upon successful completion of the obligation. + "potentialuploadrevenue": "1234", // hastings + + // Amount that the host might lose if the submission of the storage proof is not successful. + "riskedcollateral": "1234", // hastings + + // Amount for transaction fees that the host added to the storage obligation. + "transactionfeesadded": "1234", // hastings + + // Experation height is the height at which the storage obligation expires. + "experationheight": 123456, // blocks + + // Negotion height is the height at which the storage obligation was negotiated. + "negotionheight": 0, // blocks + + // The proof deadline is the height by which the storage proof must be submitted. + "proofdeadline": 123456, // blocks + + // Status of the storage obligation. There are 4 different statuses: + // obligationFailed: the storage obligation failed, potential revenues and risked collateral are lost + // obligationRejected: the storage obligation was never started, no revenues gained or lost + // obligationSucceeded: the storage obligation was completed, revenues were gained + // obligationUnresolved: the storage obligation has an uninitialized value. When the "proofdeadline" is in the past this might be a stale obligation. + "obligationstatus": obligationFailed, + + // Origin confirmed indicates whether the file contract was seen on the blockchain for this storage obligation. + "originconfirmed": true, + + // Revision confirmed indicates whether there was a file contract revision seen on the blockchain for this storage obligation. + "revisionconfirmed": true, + + // Proof confirmed indicates whether there was a storage proof seen on the blockchain for this storage obligation. + "proofconfirmed": true, + + // The host has constructed a storage proof + "proofconstructed": false + ] +} +``` + #### /host/storage [GET] gets a list of folders tracked by the host's storage manager. From 3838ecf225ad500ed5f533e5cd8c0f5356e5ed98 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 11:33:21 +0200 Subject: [PATCH 055/212] Add TODO for fields that are not set or used in storageObligation struct --- modules/host/storageobligations.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 699c42ef15..45a8d314ab 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -27,6 +27,9 @@ package host // TODO: Make sure that not too many action items are being created. +// TODO: The ProofConstructed and NegotiationHeight fields of storageObligation +// are not set or used. + import ( "encoding/binary" "encoding/json" From 86317f6c07c37e3ac67bb06718b9c6364c3edde4 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 11 Apr 2018 11:20:21 -0400 Subject: [PATCH 056/212] implement review changes --- modules/renter/contractor/contractor.go | 6 ++---- modules/renter/contractor/contracts.go | 18 +++++++++--------- 2 files changed, 11 insertions(+), 13 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 1483fff5ba..757b1f4caa 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -130,7 +130,7 @@ func (c *Contractor) Contracts() []modules.RenterContract { // ContractUtility returns the utility fields for the given contract. func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - return c.contractUtility(id) + return c.staticContractUtility(id) } // CurrentPeriod returns the height at which the current allowance period @@ -239,9 +239,7 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host } // Mark contract utility. - if err := c.managedMarkContractsUtility(); err != nil { - return nil, err - } + cm.managedMarkContractsUtility() // Subscribe to the consensus set. err = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 2f44ab9f11..10b6ce9498 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -29,8 +29,8 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { return c.currentPeriod + c.allowance.Period } -// ContractUtility returns the ContractUtility for a contract with a given id. -func (c *Contractor) contractUtility(id types.FileContractID) (modules.ContractUtility, bool) { +// staticContractUtility returns the ContractUtility for a contract with a given id. +func (c *Contractor) staticContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { rc, exists := c.contracts.View(c.resolveID(id)) if !exists { return modules.ContractUtility{}, false @@ -200,7 +200,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // For convenience contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. - utility, ok := c.contractUtility(contract.ID) + utility, ok := c.staticContractUtility(contract.ID) if !ok || !utility.GoodForRenew { c.log.Critical(fmt.Sprintf("Renewing a contract that has been marked as !GoodForRenew %v/%v", ok, utility.GoodForRenew)) @@ -360,7 +360,7 @@ func (c *Contractor) threadedContractMaintenance() { // Iterate through the contracts again, figuring out which contracts to // renew and how much extra funds to renew them with. for _, contract := range c.contracts.ViewAll() { - utility, ok := c.contractUtility(contract.ID) + utility, ok := c.staticContractUtility(contract.ID) if !ok || !utility.GoodForRenew { continue } @@ -481,7 +481,7 @@ func (c *Contractor) threadedContractMaintenance() { return } // Return the contract if it's not useful for renewing. - oldUtility, ok := c.contractUtility(id) + oldUtility, ok := c.staticContractUtility(id) if !ok || !oldUtility.GoodForRenew { c.log.Printf("Contract %v slated for renew is marked not good for renew %v/%v", id, ok, oldUtility.GoodForRenew) @@ -565,7 +565,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.RLock() uploadContracts := 0 for _, id := range c.contracts.IDs() { - if cu, ok := c.contractUtility(id); ok && cu.GoodForUpload { + if cu, ok := c.staticContractUtility(id); ok && cu.GoodForUpload { uploadContracts++ } } @@ -639,10 +639,10 @@ func (c *Contractor) threadedContractMaintenance() { // updateContractUtility is a helper function that acquires a contract, updates // its ContractUtility and returns the contract again. func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { - sc, ok := c.contracts.Acquire(id) + safeContract, ok := c.contracts.Acquire(id) if !ok { return errors.New("failed to acquire contract for update") } - defer c.contracts.Return(sc) - return sc.UpdateUtility(utility) + defer c.contracts.Return(safeContract) + return safeContract.UpdateUtility(utility) } From d4e6a24a60cebfe1f9b3d3776695ae2f657a8acd Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 11 Apr 2018 10:44:09 -0400 Subject: [PATCH 057/212] Fix NDF in siatest/renter package --- siatest/testgroup.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/siatest/testgroup.go b/siatest/testgroup.go index 7ca26daf26..5fa50bb688 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -430,8 +430,12 @@ func (tg *TestGroup) AddNodes(nps ...node.NodeParams) error { if err := fullyConnectNodes(nodes); err != nil { return build.ExtendErr("failed to fully connect nodes", err) } - // Fund nodes. + // Make sure the new nodes are synced. miner := mapToSlice(tg.miners)[0] + if err := synchronizationCheck(miner, tg.nodes); err != nil { + return build.ExtendErr("synchronization check failed", err) + } + // Fund nodes. if err := fundNodes(miner, newNodes); err != nil { return build.ExtendErr("failed to fund new hosts", err) } From ada4f3d451da0497c25d6a4f0da27b58009a489e Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 19:38:23 +0200 Subject: [PATCH 058/212] Use /host/contract API calls with tests --- node/api/renterhost_test.go | 54 ++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/node/api/renterhost_test.go b/node/api/renterhost_test.go index 26b405d923..27295c690e 100644 --- a/node/api/renterhost_test.go +++ b/node/api/renterhost_test.go @@ -101,6 +101,23 @@ func TestHostObligationAcceptingContracts(t *testing.T) { t.Fatal(err) } + // Get contracts via API call + var cts ContractInfoGET + err = st.getAPI("/host/contracts", &cts) + if err != nil { + t.Fatal(err) + } + + // There should be some contracts returned + if len(cts.Contracts) == 0 { + t.Fatal("No contracts returned from /host/contracts API call.") + } + + // Check if the number of contracts are equal to the number of storage obligations + if len(cts.Contracts) != len(st.host.StorageObligations()) { + t.Fatal("Number of contracts returned by API call and host method don't match.") + } + // set acceptingcontracts = false, mine some blocks, verify we can download settings := st.host.InternalSettings() settings.AcceptingContracts = false @@ -199,11 +216,14 @@ func TestHostAndRentVanilla(t *testing.T) { } // Check the host, who should now be reporting file contracts. - // - // TODO: Switch to using an API call. - obligations := st.host.StorageObligations() - if len(obligations) != 1 { - t.Error("Host has wrong number of obligations:", len(obligations)) + var cts ContractInfoGET + err = st.getAPI("/host/contracts", &cts) + if err != nil { + t.Fatal(err) + } + + if len(cts.Contracts) != 1 { + t.Error("Host has wrong number of obligations:", len(cts.Contracts)) } // Create a file. @@ -319,11 +339,16 @@ func TestHostAndRentVanilla(t *testing.T) { // Check that the host was able to get the file contract confirmed on the // blockchain. - obligations = st.host.StorageObligations() - if len(obligations) != 1 { - t.Error("Host has wrong number of obligations:", len(obligations)) + cts = ContractInfoGET{} + err = st.getAPI("/host/contracts", &cts) + if err != nil { + t.Fatal(err) + } + + if len(cts.Contracts) != 1 { + t.Error("Host has wrong number of obligations:", len(cts.Contracts)) } - if !obligations[0].OriginConfirmed { + if !cts.Contracts[0].OriginConfirmed { t.Error("host has not seen the file contract on the blockchain") } @@ -336,10 +361,15 @@ func TestHostAndRentVanilla(t *testing.T) { time.Sleep(time.Millisecond * 200) } + cts = ContractInfoGET{} + err = st.getAPI("/host/contracts", &cts) + if err != nil { + t.Fatal(err) + } + success := false - obligations = st.host.StorageObligations() - for _, obligation := range obligations { - if obligation.ProofConfirmed { + for _, contract := range cts.Contracts { + if contract.ProofConfirmed { success = true break } From de7a2f4ac1fe3ac3c687f1c1c5f09cfa2bd91157 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Wed, 11 Apr 2018 19:59:19 +0200 Subject: [PATCH 059/212] Change field to string type --- doc/api/Host.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/api/Host.md b/doc/api/Host.md index 26c4f2b437..ab52b2a139 100644 --- a/doc/api/Host.md +++ b/doc/api/Host.md @@ -485,7 +485,7 @@ Get contract information from the host database. This call will return all stora // obligationRejected: the storage obligation was never started, no revenues gained or lost // obligationSucceeded: the storage obligation was completed, revenues were gained // obligationUnresolved: the storage obligation has an uninitialized value. When the "proofdeadline" is in the past this might be a stale obligation. - "obligationstatus": obligationFailed, + "obligationstatus": "obligationFailed", // Origin confirmed indicates whether the file contract was seen on the blockchain for this storage obligation. "originconfirmed": true, From e99cebf5cf819769d4016ca60af47e44718b6e93 Mon Sep 17 00:00:00 2001 From: Michael Lynch Date: Wed, 11 Apr 2018 20:25:05 -0400 Subject: [PATCH 060/212] log contract ID on contract formation On renewal attempts, Sia logs the ID of the contract associated with the log message, but it does not log it when the contract is formed. Logging the contract helps debug issues related to contracts, so adding the contract ID to the log output. --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index bc50122e6c..66e61f524e 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -175,7 +175,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin } contractValue := contract.RenterFunds - c.log.Printf("Formed contract with %v for %v", host.NetAddress, contractValue.HumanString()) + c.log.Printf("Formed contract %v with %v for %v", contract.ID, host.NetAddress, contractValue.HumanString()) return contract, nil } From 663c22fd49b511df795d18ffb7b16f5ab46882f9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 10:38:38 -0400 Subject: [PATCH 061/212] only repair if enough workers are available --- modules/renter/uploadheap.go | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 0df2ecd1e1..28cc27aa33 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -320,16 +320,28 @@ func (r *Renter) threadedUploadLoop() { break } - // If there is work to do, perform the work. managedPrepareNextChunk - // will block until enough memory is available to perform the work, - // slowing this thread down to using only the resources that are - // available. + // Check if there is work by trying to pop of the next chunk from + // the heap. nextChunk := r.uploadHeap.managedPop() - if nextChunk != nil { - r.managedPrepareNextChunk(nextChunk, hosts) + if nextChunk == nil { + break + } + + // Make sure we have enough workers for this chunk to reach minimum + // redundancy. Otherwise we ignore this chunk for now and try again + // the next time we rebuild the heap and refresh the workers. + r.mu.RLock() + availableWorkers := len(r.workerPool) + rm.mu.RUnlock() + if availableWorkers < nextChunk.minimumPieces { continue } - break + + // Perform the work. managedPrepareNextChunk will block until + // enough memory is available to perform the work, slowing this + // thread down to using only the resources that are available. + r.managedPrepareNextChunk(nextChunk, hosts) + continue } // Block until new work is required. From fca35c16b210f37e236fd410db6d6627b8cd8021 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 11:20:16 -0400 Subject: [PATCH 062/212] revert contractHeader field ordering --- modules/renter/proto/contract.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index fe8ff419a9..d7900e1ddb 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -45,14 +45,14 @@ type contractHeader struct { SecretKey crypto.SecretKey // Same as modules.RenterContract. - ContractFee types.Currency - DownloadSpending types.Currency - SiafundFee types.Currency StartHeight types.BlockHeight + DownloadSpending types.Currency StorageSpending types.Currency + UploadSpending types.Currency TotalCost types.Currency + ContractFee types.Currency TxnFee types.Currency - UploadSpending types.Currency + SiafundFee types.Currency Utility modules.ContractUtility } @@ -410,6 +410,8 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha return sc.Metadata(), nil } +// loadSafeContract loads a contract from disk and adds it to the contractset +// if it is valid. func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlog.Transaction) error { f, err := os.OpenFile(filename, os.O_RDWR, 0600) if err != nil { From a64ecd5a5605eb2f8d0251073e045408aa6cc428 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 13:18:31 -0400 Subject: [PATCH 063/212] don't build unfinished chunks if we don't have enough workers --- modules/renter/uploadheap.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 28cc27aa33..21b8fb4fed 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -110,6 +110,11 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un return nil } + // If we don't have enough workers for the file, don't repair it right now. + if len(r.workerPool) < f.erasureCode.MinPieces() { + return nil + } + // Assemble the set of chunks. // // TODO / NOTE: Future files may have a different method for determining the From 556eabb8d052801c76febc50589f78a65dd99e3c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 13:35:04 -0400 Subject: [PATCH 064/212] fix build --- modules/renter/contractor/contractor.go | 2 +- modules/renter/uploadheap.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 757b1f4caa..858b6c8840 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -239,7 +239,7 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host } // Mark contract utility. - cm.managedMarkContractsUtility() + c.managedMarkContractsUtility() // Subscribe to the consensus set. err = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 21b8fb4fed..30ac665d02 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -335,9 +335,9 @@ func (r *Renter) threadedUploadLoop() { // Make sure we have enough workers for this chunk to reach minimum // redundancy. Otherwise we ignore this chunk for now and try again // the next time we rebuild the heap and refresh the workers. - r.mu.RLock() + id := r.mu.RLock() availableWorkers := len(r.workerPool) - rm.mu.RUnlock() + r.mu.RUnlock(id) if availableWorkers < nextChunk.minimumPieces { continue } From 4797340e4b910fdf5cef1148f68bc505fbb29b38 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 15:30:31 -0400 Subject: [PATCH 065/212] update /consensus/blocks endpoint docs --- doc/API.md | 16 ++++++++++++++++ doc/api/Consensus.md | 19 ++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/doc/API.md b/doc/API.md index 547b0ecf5d..d1247f7d2c 100644 --- a/doc/API.md +++ b/doc/API.md @@ -160,6 +160,7 @@ Consensus | Route | HTTP verb | | --------------------------------------------------------------------------- | --------- | | [/consensus](#consensus-get) | GET | +| [/consensus/blocks](#consensusblocks-get) | GET | | [/consensus/validate/transactionset](#consensusvalidatetransactionset-post) | POST | For examples and detailed descriptions of request and response parameters, @@ -180,6 +181,21 @@ returns information about the consensus set, such as the current block height. } ``` +#### /consensus/blocks [GET] + +Returns the block for a given id or height. + +###### Query String Parameters +One of the following parameters can be specified. +``` +// BlockID of the requested block. +id + +// BlockHeight of the requested block. +height + +``` + #### /consensus/validate/transactionset [POST] validates a set of transactions using the current utxo set. diff --git a/doc/api/Consensus.md b/doc/api/Consensus.md index 70be2fcc3e..d23f658b01 100644 --- a/doc/api/Consensus.md +++ b/doc/api/Consensus.md @@ -23,8 +23,7 @@ Index | Route | HTTP verb | | --------------------------------------------------------------------------- | --------- | | [/consensus](#consensus-get) | GET | -| [/consensus/blocks/:id](#consensus-blocks-id-get) | GET | -| [/consensus/headers/:height](#consensus-headers-height-get) | GET | +| [/consensus/blocks](#consensusblocks-get) | GET | | [/consensus/validate/transactionset](#consensusvalidatetransactionset-post) | POST | #### /consensus [GET] @@ -52,14 +51,20 @@ returns information about the consensus set, such as the current block height. } ``` -#### /consensus/blocks/:id [GET] +#### /consensus/blocks [GET] -Returns the block for a given id. +Returns the block for a given id or height. -#### /consensus/headers/:height [GET] +###### Query String Parameters +One of the following parameters can be specified. +``` +// BlockID of the requested block. +id + +// BlockHeight of the requested block. +height -Returns header information of a block at a given height. At the moment only the -BlockID is included, but this can be extended as needed. +``` #### /consensus/validate/transactionset [POST] From 5d92812336d293f8ab7c27375ac52c05a9c34478 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 16 Apr 2018 13:03:46 -0400 Subject: [PATCH 066/212] Improve contractor fee reporting --- cmd/siac/rentercmd.go | 5 ++-- modules/renter.go | 25 +++++++++++++++----- modules/renter/contractor/contractor.go | 11 +++++++-- modules/renter/contractor/contractor_test.go | 19 +++++++++++++-- node/api/renter_test.go | 2 +- 5 files changed, 49 insertions(+), 13 deletions(-) diff --git a/cmd/siac/rentercmd.go b/cmd/siac/rentercmd.go index 4e5fe51f86..99fafa70cc 100644 --- a/cmd/siac/rentercmd.go +++ b/cmd/siac/rentercmd.go @@ -160,12 +160,13 @@ func rentercmd() { Storage Spending: %v Upload Spending: %v Download Spending: %v + Fees Spending: %v Unspent Funds: %v Total Allocated: %v `, currencyUnits(fm.StorageSpending), currencyUnits(fm.UploadSpending), - currencyUnits(fm.DownloadSpending), currencyUnits(fm.Unspent), - currencyUnits(fm.ContractSpending)) + currencyUnits(fm.DownloadSpending), currencyUnits(fm.ContractFees), + currencyUnits(fm.Unspent), currencyUnits(fm.TotalAllocated)) // also list files renterfileslistcmd() diff --git a/modules/renter.go b/modules/renter.go index 9e7934c75b..def2287cbf 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -237,7 +237,8 @@ type RenterContract struct { // locked up while forming a contract. This includes fees, and includes // funds which were allocated (but not necessarily committed) to spend on // uploads/downloads/storage. - // + TotalCost types.Currency + // ContractFee is the amount of money paid to the host to cover potential // future transaction fees that the host may incur, and to cover any other // overheads the host may have. @@ -249,7 +250,6 @@ type RenterContract struct { // contract. The siafund fee that the renter pays covers both the renter and // the host portions of the contract, and therefore can be unexpectedly high // if the the host collateral is high. - TotalCost types.Currency ContractFee types.Currency TxnFee types.Currency SiafundFee types.Currency @@ -258,11 +258,24 @@ type RenterContract struct { // ContractorSpending contains the metrics about how much the Contractor has // spent during the current billing period. type ContractorSpending struct { - ContractSpending types.Currency `json:"contractspending"` + // ContractFees are the sum of all fees in the contract. This means it + // includes the ContractFee, TxnFee and SiafundFee + ContractFees types.Currency `json:"contractfees"` + // DownloadSpending is the money currently spent on downloads. DownloadSpending types.Currency `json:"downloadspending"` - StorageSpending types.Currency `json:"storagespending"` - UploadSpending types.Currency `json:"uploadspending"` - Unspent types.Currency `json:"unspent"` + // StorageSpending is the money currently spent on storage. + StorageSpending types.Currency `json:"storagespending"` + // ContractSpending is the total amount of money that the renter has put + // into the contract, whether it's locked and the renter gets that money + // back or whether it's spent and the renter won't get the money back. + TotalAllocated types.Currency `json:"totalallocated"` + // UploadSpending is the money currently spent on uploads. + UploadSpending types.Currency `json:"uploadspending"` + // Unspent is locked-away, unspent money. + Unspent types.Currency `json:"unspent"` + // V132contractSpending was renamed to TotalAllocated and always has the + // same value as TotalAllocated. + V132contractSpending types.Currency `json:"contractspending"` } // A Renter uploads, tracks, repairs, and downloads a set of files for the diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 44b0d69a01..1d3242e9c5 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -95,7 +95,14 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { var spending modules.ContractorSpending for _, contract := range c.contracts.ViewAll() { - spending.ContractSpending = spending.ContractSpending.Add(contract.TotalCost) + // Calculate ContractFees + spending.ContractFees = spending.ContractFees.Add(contract.ContractFee) + spending.ContractFees = spending.ContractFees.Add(contract.TxnFee) + spending.ContractFees = spending.ContractFees.Add(contract.SiafundFee) + // Calculate TotalAllocated + spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost) + spending.V132contractSpending = spending.TotalAllocated + // Calculate Spending spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending) spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending) spending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending) @@ -107,7 +114,7 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { // spending.StorageSpending = spending.StorageSpending.Add(pre.StorageSpending) // } } - allSpending := spending.ContractSpending.Add(spending.DownloadSpending).Add(spending.UploadSpending).Add(spending.StorageSpending) + allSpending := spending.TotalAllocated.Add(spending.DownloadSpending).Add(spending.UploadSpending).Add(spending.StorageSpending) // If the allowance is smaller than the spending, the unspent funds are 0 if !(c.allowance.Funds.Cmp(allSpending) < 0) { diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index 6a843a36ef..b4f6674af4 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -321,8 +321,23 @@ func TestAllowanceSpending(t *testing.T) { // PeriodSpending should reflect the amount of spending accurately reportedSpending := c.PeriodSpending() - if reportedSpending.ContractSpending.Cmp(spent) != 0 { - t.Fatal("reported incorrect spending for this billing cycle: got", reportedSpending.ContractSpending.HumanString(), "wanted", spent.HumanString()) + if reportedSpending.TotalAllocated.Cmp(spent) != 0 { + t.Fatal("reported incorrect spending for this billing cycle: got", reportedSpending.TotalAllocated.HumanString(), "wanted", spent.HumanString()) + } + // COMPATv132 totalallocated should equal contractspending field. + if reportedSpending.V132contractSpending.Cmp(reportedSpending.TotalAllocated) != 0 { + t.Fatal("TotalAllocated should be equal to ContractSpending for compatibility") + } + + var expectedFees types.Currency + for _, contract := range c.Contracts() { + expectedFees = expectedFees.Add(contract.TxnFee) + expectedFees = expectedFees.Add(contract.SiafundFee) + expectedFees = expectedFees.Add(contract.ContractFee) + } + if expectedFees.Cmp(reportedSpending.ContractFees) != 0 { + t.Fatalf("expected %v reported fees but was %v", + expectedFees.HumanString(), reportedSpending.ContractFees.HumanString()) } // enter a new period. PeriodSpending should reset. diff --git a/node/api/renter_test.go b/node/api/renter_test.go index 32ce05dbb5..cb28858045 100644 --- a/node/api/renter_test.go +++ b/node/api/renter_test.go @@ -663,7 +663,7 @@ func TestRenterHandlerContracts(t *testing.T) { for _, contract := range contracts.Contracts { expectedContractSpending = expectedContractSpending.Add(contract.TotalCost) } - if got := get.FinancialMetrics.ContractSpending; got.Cmp(expectedContractSpending) != 0 { + if got := get.FinancialMetrics.TotalAllocated; got.Cmp(expectedContractSpending) != 0 { t.Fatalf("expected contract spending to be %v; got %v", expectedContractSpending, got) } } From cc6243fffc18e17ead8bb48098441637a18e9cc1 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 16 Apr 2018 13:20:31 -0400 Subject: [PATCH 067/212] update docs --- doc/API.md | 4 +++- doc/api/Renter.md | 9 ++++++++- modules/renter.go | 2 +- siatest/testgroup.go | 2 +- 4 files changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/API.md b/doc/API.md index d1247f7d2c..3d7140c1c8 100644 --- a/doc/API.md +++ b/doc/API.md @@ -762,9 +762,11 @@ returns the current settings along with metrics on the renter's spending. } }, "financialmetrics": { - "contractspending": "1234", // hastings + "contractfees": "1234", // hastings + "contractspending": "1234", // hastings (deprecated, now totalallocated) "downloadspending": "5678", // hastings "storagespending": "1234", // hastings + "totalallocated": "1234", // hastings "uploadspending": "5678", // hastings "unspent": "1234" // hastings }, diff --git a/doc/api/Renter.md b/doc/api/Renter.md index d30a64b7ff..525b72216a 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -66,9 +66,12 @@ returns the current settings along with metrics on the renter's spending. // Metrics about how much the Renter has spent on storage, uploads, and // downloads. "financialmetrics": { + // Amount of money spent on contract fees, transaction fees and siafund fees. + "contractfees": "1234", // hastings + // How much money, in hastings, the Renter has spent on file contracts, // including fees. - "contractspending": "1234", // hastings + "contractspending": "1234", // hastings, (deprecated, now totalallocated) // Amount of money spent on downloads. "downloadspending": "5678", // hastings @@ -76,6 +79,10 @@ returns the current settings along with metrics on the renter's spending. // Amount of money spend on storage. "storagespending": "1234", // hastings + // Total amount of money that the renter has put into contracts. Includes + // spent money and also money that will be returned to the renter. + "totalallocated": "1234", // hastings + // Amount of money spent on uploads. "uploadspending": "5678", // hastings diff --git a/modules/renter.go b/modules/renter.go index def2287cbf..d7102e6b8c 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -266,7 +266,7 @@ type ContractorSpending struct { // StorageSpending is the money currently spent on storage. StorageSpending types.Currency `json:"storagespending"` // ContractSpending is the total amount of money that the renter has put - // into the contract, whether it's locked and the renter gets that money + // into contracts, whether it's locked and the renter gets that money // back or whether it's spent and the renter won't get the money back. TotalAllocated types.Currency `json:"totalallocated"` // UploadSpending is the money currently spent on uploads. diff --git a/siatest/testgroup.go b/siatest/testgroup.go index 5fa50bb688..a832fde550 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -316,7 +316,7 @@ func synchronizationCheck(miner *TestNode, nodes map[*TestNode]struct{}) error { return err } for node := range nodes { - err := Retry(100, 100*time.Millisecond, func() error { + err := Retry(600, 100*time.Millisecond, func() error { ncg, err := node.ConsensusGet() if err != nil { return err From 3ea0b506791873067efc0db8ed8e4e963ded4307 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Mon, 16 Apr 2018 20:29:18 +0200 Subject: [PATCH 068/212] Remove auto generated stringer file --- modules/host/storageobligations.go | 17 +++++++++++++++++ modules/host/storageobligationstatus_string.go | 16 ---------------- 2 files changed, 17 insertions(+), 16 deletions(-) delete mode 100644 modules/host/storageobligationstatus_string.go diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 45a8d314ab..7576abcfa3 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -34,6 +34,7 @@ import ( "encoding/binary" "encoding/json" "errors" + "strconv" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" @@ -150,6 +151,22 @@ type storageObligation struct { RevisionConstructed bool } +func (i storageObligationStatus) String() string { + if i == 0 { + return "obligationUnresolved" + } + if i == 1 { + return "obligationRejected" + } + if i == 2 { + return "obligationSucceeded" + } + if i == 3 { + return "obligationFailed" + } + return "storageObligationStatus(" + strconv.FormatInt(int64(i), 10) + ")" +} + // getStorageObligation fetches a storage obligation from the database tx. func getStorageObligation(tx *bolt.Tx, soid types.FileContractID) (so storageObligation, err error) { soBytes := tx.Bucket(bucketStorageObligations).Get(soid[:]) diff --git a/modules/host/storageobligationstatus_string.go b/modules/host/storageobligationstatus_string.go deleted file mode 100644 index f237e503f1..0000000000 --- a/modules/host/storageobligationstatus_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type=storageObligationStatus"; DO NOT EDIT. - -package host - -import "strconv" - -const _storageObligationStatus_name = "obligationUnresolvedobligationRejectedobligationSucceededobligationFailed" - -var _storageObligationStatus_index = [...]uint8{0, 20, 38, 57, 73} - -func (i storageObligationStatus) String() string { - if i >= storageObligationStatus(len(_storageObligationStatus_index)-1) { - return "storageObligationStatus(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _storageObligationStatus_name[_storageObligationStatus_index[i]:_storageObligationStatus_index[i+1]] -} From 6a5e9e96cb36343c77fbfd3f8bb18a0e80c17454 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 16 Apr 2018 15:34:47 -0400 Subject: [PATCH 069/212] fix unspent coins reporting --- modules/renter/contractor/contractor.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 1d3242e9c5..6bbd415fa4 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -114,12 +114,15 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { // spending.StorageSpending = spending.StorageSpending.Add(pre.StorageSpending) // } } - allSpending := spending.TotalAllocated.Add(spending.DownloadSpending).Add(spending.UploadSpending).Add(spending.StorageSpending) - - // If the allowance is smaller than the spending, the unspent funds are 0 - if !(c.allowance.Funds.Cmp(allSpending) < 0) { + // Calculate amount of spent money to get unspent money. + allSpending := spending.ContractFees + allSpending = allSpending.Add(spending.DownloadSpending) + allSpending = allSpending.Add(spending.UploadSpending) + allSpending = allSpending.Add(spending.StorageSpending) + if c.allowance.Funds.Cmp(allSpending) >= 0 { spending.Unspent = c.allowance.Funds.Sub(allSpending) } + return spending } From 83b3c8a6527c9bd059d7df4fe1fd38842d244da0 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Mon, 16 Apr 2018 21:53:00 +0200 Subject: [PATCH 070/212] Add more tests --- node/api/renterhost_test.go | 38 ++++++++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/node/api/renterhost_test.go b/node/api/renterhost_test.go index 27295c690e..39b90c8c3f 100644 --- a/node/api/renterhost_test.go +++ b/node/api/renterhost_test.go @@ -225,6 +225,26 @@ func TestHostAndRentVanilla(t *testing.T) { if len(cts.Contracts) != 1 { t.Error("Host has wrong number of obligations:", len(cts.Contracts)) } + // Check if the obligation status is unresolved + if cts.Contracts[0].ObligationStatus != "obligationUnresolved" { + t.Error("Wrong obligation status for new contract:", cts.Contracts[0].ObligationStatus) + } + // Check if there are no sector roots on a new contract + if cts.Contracts[0].SectorRootsCount != 0 { + t.Error("Wrong number of sector roots for new contract:", cts.Contracts[0].SectorRootsCount) + } + // Check if there is locked collateral + if cts.Contracts[0].LockedCollateral.IsZero() { + t.Error("No locked collateral in contract.") + } + // Check if risked collateral is not equal to zero + if !cts.Contracts[0].RiskedCollateral.IsZero() { + t.Error("Risked collateral not zero in new contract.") + } + // Check if all potential revenues are zero + if !(cts.Contracts[0].PotentialDownloadRevenue.IsZero() && cts.Contracts[0].PotentialUploadRevenue.IsZero() && cts.Contracts[0].PotentialStorageRevenue.IsZero()) { + t.Error("Potential values not zero in new contract.") + } // Create a file. path := filepath.Join(st.dir, "test.dat") @@ -349,7 +369,19 @@ func TestHostAndRentVanilla(t *testing.T) { t.Error("Host has wrong number of obligations:", len(cts.Contracts)) } if !cts.Contracts[0].OriginConfirmed { - t.Error("host has not seen the file contract on the blockchain") + t.Error("Host has not seen the file contract on the blockchain.") + } + // Check if there are sector roots + if cts.Contracts[0].SectorRootsCount == 0 { + t.Error("Sector roots count is zero for used obligation.") + } + // Check if risked collateral is not equal to zero + if cts.Contracts[0].RiskedCollateral.IsZero() { + t.Error("Risked collateral is zero for used obligation.") + } + // There should be some potential revenues in this contract + if cts.Contracts[0].PotentialDownloadRevenue.IsZero() || cts.Contracts[0].PotentialUploadRevenue.IsZero() || cts.Contracts[0].PotentialStorageRevenue.IsZero() { + t.Error("Potential revenue value is zero for used obligation.") } // Mine blocks until the host should have submitted a storage proof. @@ -370,6 +402,10 @@ func TestHostAndRentVanilla(t *testing.T) { success := false for _, contract := range cts.Contracts { if contract.ProofConfirmed { + // Sector roots should be removed from storage obligation + if contract.SectorRootsCount > 0 { + t.Error("There are sector roots on completed storage obligation.") + } success = true break } From 1440dcf29826f683e99c40b3843320985a90edad Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 16 Apr 2018 16:33:29 -0400 Subject: [PATCH 071/212] improve siac renter formatting --- cmd/siac/rentercmd.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/cmd/siac/rentercmd.go b/cmd/siac/rentercmd.go index 99fafa70cc..ce063c7c2e 100644 --- a/cmd/siac/rentercmd.go +++ b/cmd/siac/rentercmd.go @@ -156,17 +156,27 @@ func rentercmd() { die("Could not get renter info:", err) } fm := rg.FinancialMetrics + totalSpent := fm.ContractFees.Add(fm.UploadSpending). + Add(fm.DownloadSpending).Add(fm.StorageSpending) + unspentAllocated := fm.TotalAllocated.Sub(totalSpent) + unspentUnallocated := fm.Unspent.Sub(unspentAllocated) + fmt.Printf(`Renter info: - Storage Spending: %v - Upload Spending: %v - Download Spending: %v - Fees Spending: %v - Unspent Funds: %v - Total Allocated: %v - -`, currencyUnits(fm.StorageSpending), currencyUnits(fm.UploadSpending), + Allowance: %v + Spent Funds: %v + Storage: %v + Upload: %v + Download: %v + Fees: %v + Unspent Funds: %v + Allocated: %v + Unallocated: %v + +`, currencyUnits(rg.Settings.Allowance.Funds), currencyUnits(totalSpent), + currencyUnits(fm.StorageSpending), currencyUnits(fm.UploadSpending), currencyUnits(fm.DownloadSpending), currencyUnits(fm.ContractFees), - currencyUnits(fm.Unspent), currencyUnits(fm.TotalAllocated)) + currencyUnits(fm.Unspent), currencyUnits(unspentAllocated), + currencyUnits(unspentUnallocated)) // also list files renterfileslistcmd() From 5ed8617c2352b28376126b6c112b46ed09c3eba7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 16 Apr 2018 16:36:52 -0400 Subject: [PATCH 072/212] Rename V132contractSpending to ContractSpendingDeprecated --- modules/renter.go | 4 ++-- modules/renter/contractor/contractor.go | 2 +- modules/renter/contractor/contractor_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/renter.go b/modules/renter.go index d7102e6b8c..e433a9ff43 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -273,9 +273,9 @@ type ContractorSpending struct { UploadSpending types.Currency `json:"uploadspending"` // Unspent is locked-away, unspent money. Unspent types.Currency `json:"unspent"` - // V132contractSpending was renamed to TotalAllocated and always has the + // ContractSpendingDeprecated was renamed to TotalAllocated and always has the // same value as TotalAllocated. - V132contractSpending types.Currency `json:"contractspending"` + ContractSpendingDeprecated types.Currency `json:"contractspending"` } // A Renter uploads, tracks, repairs, and downloads a set of files for the diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 6bbd415fa4..51e23b3aa8 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -101,7 +101,7 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { spending.ContractFees = spending.ContractFees.Add(contract.SiafundFee) // Calculate TotalAllocated spending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost) - spending.V132contractSpending = spending.TotalAllocated + spending.ContractSpendingDeprecated = spending.TotalAllocated // Calculate Spending spending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending) spending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending) diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index b4f6674af4..7577966571 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -325,7 +325,7 @@ func TestAllowanceSpending(t *testing.T) { t.Fatal("reported incorrect spending for this billing cycle: got", reportedSpending.TotalAllocated.HumanString(), "wanted", spent.HumanString()) } // COMPATv132 totalallocated should equal contractspending field. - if reportedSpending.V132contractSpending.Cmp(reportedSpending.TotalAllocated) != 0 { + if reportedSpending.ContractSpendingDeprecated.Cmp(reportedSpending.TotalAllocated) != 0 { t.Fatal("TotalAllocated should be equal to ContractSpending for compatibility") } From 664ef9df2768e2d98f4120976d91c636568a4701 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Mon, 16 Apr 2018 21:55:27 -0400 Subject: [PATCH 073/212] preserve compatibility --- cmd/siad/server.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cmd/siad/server.go b/cmd/siad/server.go index de7221b5f4..829faf2f57 100644 --- a/cmd/siad/server.go +++ b/cmd/siad/server.go @@ -79,6 +79,11 @@ type ( RootTarget types.Target `json:"roottarget"` RootDepth types.Target `json:"rootdepth"` + // DEPRECATED: same values as MaxTargetAdjustmentUp and + // MaxTargetAdjustmentDown. + MaxAdjustmentUp *big.Rat `json:"maxadjustmentup"` + MaxAdjustmentDown *big.Rat `json:"maxadjustmentdown"` + MaxTargetAdjustmentUp *big.Rat `json:"maxtargetadjustmentup"` MaxTargetAdjustmentDown *big.Rat `json:"maxtargetadjustmentdown"` @@ -349,6 +354,11 @@ func (srv *Server) daemonConstantsHandler(w http.ResponseWriter, _ *http.Request RootTarget: types.RootTarget, RootDepth: types.RootDepth, + // DEPRECATED: same values as MaxTargetAdjustmentUp and + // MaxTargetAdjustmentDown. + MaxAdjustmentUp: types.MaxTargetAdjustmentUp, + MaxAdjustmentDown: types.MaxTargetAdjustmentDown, + MaxTargetAdjustmentUp: types.MaxTargetAdjustmentUp, MaxTargetAdjustmentDown: types.MaxTargetAdjustmentDown, From a19854669c567c64959b22a923f8b2e0c37d64a1 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 17 Apr 2018 20:23:21 +0200 Subject: [PATCH 074/212] Fix typos. Fix ordering. Add "revisionconstructed" field in docs. --- doc/API.md | 7 ++++--- doc/api/Host.md | 21 ++++++++++++--------- modules/host.go | 6 +++--- modules/host/storageobligations.go | 4 ++-- 4 files changed, 21 insertions(+), 17 deletions(-) diff --git a/doc/API.md b/doc/API.md index 4ed230e294..3dc92a9561 100644 --- a/doc/API.md +++ b/doc/API.md @@ -406,22 +406,23 @@ gets a list of all contracts from the host database "datasize": 500000, // bytes "lockedcollateral": "1234", // hastings "obligationid": "fff48010dcbbd6ba7ffd41bc4b25a3634ee58bbf688d2f06b7d5a0c837304e13", - "sectorrootscount": 2, "potentialdownloadrevenue": "1234", // hastings "potentialstoragerevenue": "1234", // hastings "potentialuploadrevenue": "1234", // hastings "riskedcollateral": "1234", // hastings + "sectorrootscount": 2, "transactionfeesadded": "1234", // hastings "expirationheight": 123456, // blocks - "negotionheight": 123456, // blocks + "negotiationheight": 123456, // blocks "proofdeadline": 123456, // blocks "obligationstatus": "obligationFailed", "originconfirmed": true, - "revisionconfirmed": false, "proofconfirmed": true, "proofconstructed": true + "revisionconfirmed": false, + "revisionconstructed": false, } ] } diff --git a/doc/api/Host.md b/doc/api/Host.md index ab52b2a139..bc5964dfb2 100644 --- a/doc/api/Host.md +++ b/doc/api/Host.md @@ -453,9 +453,6 @@ Get contract information from the host database. This call will return all stora // Id of the storageobligation, which is defined by the file contract id of the file contract that governs the storage obligation. "obligationid": "fff48010dcbbd6ba7ffd41bc4b25a3634ee58bbf688d2f06b7d5a0c837304e13", - // Number of sector roots. - "sectorrootscount": 2, - // Potential revenue for downloaded data that the host will reveive upon successful completion of the obligation. "potentialdownloadrevenue": "1234", // hastings @@ -468,14 +465,17 @@ Get contract information from the host database. This call will return all stora // Amount that the host might lose if the submission of the storage proof is not successful. "riskedcollateral": "1234", // hastings + // Number of sector roots. + "sectorrootscount": 2, + // Amount for transaction fees that the host added to the storage obligation. "transactionfeesadded": "1234", // hastings // Experation height is the height at which the storage obligation expires. - "experationheight": 123456, // blocks + "expirationheight": 123456, // blocks // Negotion height is the height at which the storage obligation was negotiated. - "negotionheight": 0, // blocks + "negotiationheight": 0, // blocks // The proof deadline is the height by which the storage proof must be submitted. "proofdeadline": 123456, // blocks @@ -490,15 +490,18 @@ Get contract information from the host database. This call will return all stora // Origin confirmed indicates whether the file contract was seen on the blockchain for this storage obligation. "originconfirmed": true, - // Revision confirmed indicates whether there was a file contract revision seen on the blockchain for this storage obligation. - "revisionconfirmed": true, - // Proof confirmed indicates whether there was a storage proof seen on the blockchain for this storage obligation. "proofconfirmed": true, // The host has constructed a storage proof "proofconstructed": false - ] + + // Revision confirmed indicates whether there was a file contract revision seen on the blockchain for this storage obligation. + "revisionconfirmed": true, + + // Revision constructed indicates whether there was a file contract revision constructed for this storage obligation. + "revisionconstructed": true, + ] } ``` diff --git a/modules/host.go b/modules/host.go index bbbfe926c8..666242db1a 100644 --- a/modules/host.go +++ b/modules/host.go @@ -115,11 +115,11 @@ type ( DataSize uint64 `json:"datasize"` LockedCollateral types.Currency `json:"lockedcollateral"` ObligationId types.FileContractID `json:"obligationid"` - SectorRootsCount uint64 `json:"sectorrootscount"` PotentialDownloadRevenue types.Currency `json:"potentialdownloadrevenue"` PotentialStorageRevenue types.Currency `json:"potentialstoragerevenue"` PotentialUploadRevenue types.Currency `json:"potentialuploadrevenue"` RiskedCollateral types.Currency `json:"riskedcollateral"` + SectorRootsCount uint64 `json:"sectorrootscount"` TransactionFeesAdded types.Currency `json:"transactionfeesadded"` // The negotiation height specifies the block height at which the file @@ -134,10 +134,10 @@ type ( // obligation have been confirmed on the blockchain. ObligationStatus string `json:"obligationstatus"` OriginConfirmed bool `json:"originconfirmed"` - RevisionConfirmed bool `json:"revisionconfirmed"` - RevisionConstructed bool `json:"revisionconstructed"` ProofConfirmed bool `json:"proofconfirmed"` ProofConstructed bool `json:"proofconstructed"` + RevisionConfirmed bool `json:"revisionconfirmed"` + RevisionConstructed bool `json:"revisionconstructed"` } // HostWorkingStatus reports the working state of a host. Can be one of diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 7576abcfa3..828a431a8d 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -907,10 +907,10 @@ func (h *Host) StorageObligations() (sos []modules.StorageObligation) { ObligationStatus: so.ObligationStatus.String(), OriginConfirmed: so.OriginConfirmed, - RevisionConfirmed: so.RevisionConfirmed, - RevisionConstructed: so.RevisionConstructed, ProofConfirmed: so.ProofConfirmed, ProofConstructed: so.ProofConstructed, + RevisionConfirmed: so.RevisionConfirmed, + RevisionConstructed: so.RevisionConstructed, } sos = append(sos, mso) return nil From 077043e7bef86bfa2d07e40c01cd6aa168739aad Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 17 Apr 2018 17:14:38 -0400 Subject: [PATCH 075/212] Fix streaming endpoint --- modules/renter/downloadcache.go | 2 +- node/api/routes.go | 19 ++++++------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 4b2bdc5bc7..7cda44b761 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -13,7 +13,7 @@ import ( // endpoint download. // TODO this won't be necessary anymore once we have partial downloads. func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { - if udc.download.staticDestinationType == destinationTypeSeekStream { + if udc.download.staticDestinationType != destinationTypeSeekStream { // We only cache streaming chunks since browsers and media players tend to only request a few kib at once when streaming data. That way we can prevent scheduling the same chunk for download over and over. return } diff --git a/node/api/routes.go b/node/api/routes.go index ab2d5685dc..d1f2a15d0c 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -1,7 +1,6 @@ package api import ( - "context" "net/http" "strings" "time" @@ -89,7 +88,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) - router.GET("/renter/stream/*siapath", Unrestricted(api.renterStreamHandler)) + router.GET("/renter/stream/*siapath", api.renterStreamHandler) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. @@ -200,16 +199,10 @@ func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { } } -// Unrestricted can be used to whitelist api routes from requiring the -// Sia-Agent to be set. -func Unrestricted(h httprouter.Handle) httprouter.Handle { - return httprouter.Handle(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - req = req.WithContext(context.WithValue(req.Context(), unrestrictedContextKey{}, 0)) - h(w, req, ps) - }) -} - -// isUnrestricted checks if a context has the unrestrictedContextKey set. +// isUnrestricted checks if a request is allowed to bypass the Sia-Agent check. func isUnrestricted(req *http.Request) bool { - return req.Context().Value(unrestrictedContextKey{}) != nil + if strings.HasPrefix(req.URL.Path, "/renter/stream") { + return true + } + return false } From e6064bf36464504d3b4a96aaa5fa3bcc6fabedd5 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 14 Mar 2018 21:31:42 -0400 Subject: [PATCH 076/212] speed up contract formation and only form contracts if there are no pending scans --- modules/renter/contractor/consts.go | 10 ----- modules/renter/contractor/contractor_test.go | 40 ++++++++++------- modules/renter/contractor/contracts.go | 16 ++++--- modules/renter/contractor/dependencies.go | 3 +- modules/renter/hostdb/hostdb.go | 10 ++++- modules/renter/hostdb/hostdb_test.go | 45 +++++++++++++++---- modules/renter/proto/formcontract.go | 3 +- modules/renter/proto/proto.go | 1 + modules/renter/proto/renew.go | 5 ++- modules/renter/renter.go | 7 ++- modules/renter/renter_test.go | 8 ++-- modules/transactionpool.go | 4 ++ modules/transactionpool/transactionpool.go | 20 +++++++++ modules/wallet.go | 4 ++ modules/wallet/transactionbuilder.go | 20 +++++++++ modules/wallet/transactionbuilder_test.go | 46 ++++++++++++++++++++ 16 files changed, 190 insertions(+), 52 deletions(-) diff --git a/modules/renter/contractor/consts.go b/modules/renter/contractor/consts.go index 2d5ba6368b..57061f03c2 100644 --- a/modules/renter/contractor/consts.go +++ b/modules/renter/contractor/consts.go @@ -1,8 +1,6 @@ package contractor import ( - "time" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" @@ -10,14 +8,6 @@ import ( // Constants related to contract formation parameters. var ( - // To alleviate potential block propagation issues, the contractor sleeps - // between each contract formation. - contractFormationInterval = build.Select(build.Var{ - Dev: 10 * time.Second, - Standard: 60 * time.Second, - Testing: 10 * time.Millisecond, - }).(time.Duration) - // minContractFundRenewalThreshold defines the ratio of remaining funds to // total contract cost below which the contractor will prematurely renew a // contract. diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index 7577966571..a0b10b2ae8 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -32,12 +32,12 @@ func (newStub) AcceptTransactionSet([]types.Transaction) error { return nil func (newStub) FeeEstimation() (a types.Currency, b types.Currency) { return } // hdb stubs -func (newStub) AllHosts() []modules.HostDBEntry { return nil } -func (newStub) ActiveHosts() []modules.HostDBEntry { return nil } -func (newStub) Host(types.SiaPublicKey) (settings modules.HostDBEntry, ok bool) { return } -func (newStub) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } -func (newStub) IncrementFailedInteractions(key types.SiaPublicKey) { return } -func (newStub) RandomHosts(int, []types.SiaPublicKey) []modules.HostDBEntry { return nil } +func (newStub) AllHosts() []modules.HostDBEntry { return nil } +func (newStub) ActiveHosts() []modules.HostDBEntry { return nil } +func (newStub) Host(types.SiaPublicKey) (settings modules.HostDBEntry, ok bool) { return } +func (newStub) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } +func (newStub) IncrementFailedInteractions(key types.SiaPublicKey) { return } +func (newStub) RandomHosts(int, []types.SiaPublicKey) ([]modules.HostDBEntry, error) { return nil, nil } func (newStub) ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown { return modules.HostScoreBreakdown{} } @@ -132,13 +132,13 @@ func TestAllowance(t *testing.T) { // its methods. type stubHostDB struct{} -func (stubHostDB) AllHosts() (hs []modules.HostDBEntry) { return } -func (stubHostDB) ActiveHosts() (hs []modules.HostDBEntry) { return } -func (stubHostDB) Host(types.SiaPublicKey) (h modules.HostDBEntry, ok bool) { return } -func (stubHostDB) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } -func (stubHostDB) IncrementFailedInteractions(key types.SiaPublicKey) { return } -func (stubHostDB) PublicKey() (spk types.SiaPublicKey) { return } -func (stubHostDB) RandomHosts(int, []types.SiaPublicKey) (hs []modules.HostDBEntry) { return } +func (stubHostDB) AllHosts() (hs []modules.HostDBEntry) { return } +func (stubHostDB) ActiveHosts() (hs []modules.HostDBEntry) { return } +func (stubHostDB) Host(types.SiaPublicKey) (h modules.HostDBEntry, ok bool) { return } +func (stubHostDB) IncrementSuccessfulInteractions(key types.SiaPublicKey) { return } +func (stubHostDB) IncrementFailedInteractions(key types.SiaPublicKey) { return } +func (stubHostDB) PublicKey() (spk types.SiaPublicKey) { return } +func (stubHostDB) RandomHosts(int, []types.SiaPublicKey) (hs []modules.HostDBEntry, _ error) { return } func (stubHostDB) ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown { return modules.HostScoreBreakdown{} } @@ -238,7 +238,11 @@ func TestAllowanceSpending(t *testing.T) { t.Fatal(err) } err = build.Retry(50, 100*time.Millisecond, func() error { - if len(c.hdb.RandomHosts(1, nil)) == 0 { + hosts, err := c.hdb.RandomHosts(1, nil) + if err != nil { + return err + } + if len(hosts) == 0 { return errors.New("host has not been scanned yet") } return nil @@ -399,8 +403,12 @@ func TestIntegrationSetAllowance(t *testing.T) { t.Fatal(err) } - // wait for hostdb to scan host - for i := 0; i < 100 && len(c.hdb.RandomHosts(1, nil)) == 0; i++ { + // wait for hostdb to scan + hosts, err := c.hdb.RandomHosts(1, nil) + if err != nil { + t.Fatal("failed to get hosts", err) + } + for i := 0; i < 100 && len(hosts) == 0; i++ { time.Sleep(time.Millisecond * 50) } diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 1581c9be94..f225a444ac 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "math/big" - "time" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" @@ -74,7 +73,10 @@ func (c *Contractor) managedMarkContractsUtility() error { c.mu.RLock() hostCount := int(c.allowance.Hosts) c.mu.RUnlock() - hosts := c.hdb.RandomHosts(hostCount+minScoreHostBuffer, nil) + hosts, err := c.hdb.RandomHosts(hostCount+minScoreHostBuffer, nil) + if err != nil { + return + } // Find the minimum score that a host is allowed to have to be considered // good for upload. @@ -547,7 +549,7 @@ func (c *Contractor) threadedContractMaintenance() { return case <-c.interruptMaintenance: return - case <-time.After(contractFormationInterval): + default: } } @@ -585,7 +587,11 @@ func (c *Contractor) threadedContractMaintenance() { } initialContractFunds := c.allowance.Funds.Div64(c.allowance.Hosts).Div64(3) c.mu.RUnlock() - hosts := c.hdb.RandomHosts(neededContracts*2+10, exclude) + hosts, err := c.hdb.RandomHosts(neededContracts*2+10, exclude) + if err != nil { + c.log.Println("WARN: not forming new contracts:", err) + return + } // Form contracts with the hosts one at a time, until we have enough // contracts. @@ -631,7 +637,7 @@ func (c *Contractor) threadedContractMaintenance() { return case <-c.interruptMaintenance: return - case <-time.After(contractFormationInterval): + default: } } } diff --git a/modules/renter/contractor/dependencies.go b/modules/renter/contractor/dependencies.go index 840e3af28b..e61d9bae76 100644 --- a/modules/renter/contractor/dependencies.go +++ b/modules/renter/contractor/dependencies.go @@ -38,6 +38,7 @@ type ( Drop() FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) + UnconfirmedParents() []types.Transaction View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } @@ -52,7 +53,7 @@ type ( Host(types.SiaPublicKey) (modules.HostDBEntry, bool) IncrementSuccessfulInteractions(key types.SiaPublicKey) IncrementFailedInteractions(key types.SiaPublicKey) - RandomHosts(n int, exclude []types.SiaPublicKey) []modules.HostDBEntry + RandomHosts(n int, exclude []types.SiaPublicKey) ([]modules.HostDBEntry, error) ScoreBreakdown(modules.HostDBEntry) modules.HostScoreBreakdown } diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index 7d95a5f60d..234402048e 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -225,6 +225,12 @@ func (hdb *HostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { // RandomHosts implements the HostDB interface's RandomHosts() method. It takes // a number of hosts to return, and a slice of netaddresses to ignore, and // returns a slice of entries. -func (hdb *HostDB) RandomHosts(n int, excludeKeys []types.SiaPublicKey) []modules.HostDBEntry { - return hdb.hostTree.SelectRandom(n, excludeKeys) +func (hdb *HostDB) RandomHosts(n int, excludeKeys []types.SiaPublicKey) ([]modules.HostDBEntry, error) { + hdb.mu.RLock() + pendingScans := len(hdb.scanList) + hdb.mu.RUnlock() + if pendingScans > 0 { + return []modules.HostDBEntry{}, errors.New("Can't get hosts while hostdb is scanning") + } + return hdb.hostTree.SelectRandom(n, excludeKeys), nil } diff --git a/modules/renter/hostdb/hostdb_test.go b/modules/renter/hostdb/hostdb_test.go index c191ca9b60..5e37449790 100644 --- a/modules/renter/hostdb/hostdb_test.go +++ b/modules/renter/hostdb/hostdb_test.go @@ -234,7 +234,10 @@ func TestRandomHosts(t *testing.T) { // Check that all hosts can be queried. for i := 0; i < 25; i++ { - hosts := hdbt.hdb.RandomHosts(nEntries, nil) + hosts, err := hdbt.hdb.RandomHosts(nEntries, nil) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(hosts) != nEntries { t.Errorf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries) } @@ -254,7 +257,10 @@ func TestRandomHosts(t *testing.T) { // Base case, fill out a map exposing hosts from a single RH query. dupCheck1 := make(map[string]modules.HostDBEntry) - hosts := hdbt.hdb.RandomHosts(nEntries/2, nil) + hosts, err := hdbt.hdb.RandomHosts(nEntries/2, nil) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(hosts) != nEntries/2 { t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2) } @@ -275,7 +281,10 @@ func TestRandomHosts(t *testing.T) { for i := 0; i < 10; i++ { dupCheck2 := make(map[string]modules.HostDBEntry) var overlap, disjoint bool - hosts = hdbt.hdb.RandomHosts(nEntries/2, nil) + hosts, err = hdbt.hdb.RandomHosts(nEntries/2, nil) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(hosts) != nEntries/2 { t.Fatalf("RandomHosts returned few entries. got %v wanted %v\n", len(hosts), nEntries/2) } @@ -306,12 +315,18 @@ func TestRandomHosts(t *testing.T) { // Try exclude list by excluding every host except for the last one, and // doing a random select. for i := 0; i < 25; i++ { - hosts := hdbt.hdb.RandomHosts(nEntries, nil) + hosts, err := hdbt.hdb.RandomHosts(nEntries, nil) + if err != nil { + t.Fatal("Failed to get hosts", err) + } var exclude []types.SiaPublicKey for j := 1; j < len(hosts); j++ { exclude = append(exclude, hosts[j].PublicKey) } - rand := hdbt.hdb.RandomHosts(1, exclude) + rand, err := hdbt.hdb.RandomHosts(1, exclude) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(rand) != 1 { t.Fatal("wrong number of hosts returned") } @@ -320,7 +335,10 @@ func TestRandomHosts(t *testing.T) { } // Try again but request more hosts than are available. - rand = hdbt.hdb.RandomHosts(5, exclude) + rand, err = hdbt.hdb.RandomHosts(5, exclude) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(rand) != 1 { t.Fatal("wrong number of hosts returned") } @@ -339,7 +357,10 @@ func TestRandomHosts(t *testing.T) { // Select only 20 hosts. dupCheck := make(map[string]struct{}) - rand = hdbt.hdb.RandomHosts(20, exclude) + rand, err = hdbt.hdb.RandomHosts(20, exclude) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(rand) != 20 { t.Error("random hosts is returning the wrong number of hosts") } @@ -357,7 +378,10 @@ func TestRandomHosts(t *testing.T) { // Select exactly 50 hosts. dupCheck = make(map[string]struct{}) - rand = hdbt.hdb.RandomHosts(50, exclude) + rand, err = hdbt.hdb.RandomHosts(50, exclude) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(rand) != 50 { t.Error("random hosts is returning the wrong number of hosts") } @@ -375,7 +399,10 @@ func TestRandomHosts(t *testing.T) { // Select 100 hosts. dupCheck = make(map[string]struct{}) - rand = hdbt.hdb.RandomHosts(100, exclude) + rand, err = hdbt.hdb.RandomHosts(100, exclude) + if err != nil { + t.Fatal("Failed to get hosts", err) + } if len(rand) != 50 { t.Error("random hosts is returning the wrong number of hosts") } diff --git a/modules/renter/proto/formcontract.go b/modules/renter/proto/formcontract.go index 4a2cf4c3a1..fb5903b72c 100644 --- a/modules/renter/proto/formcontract.go +++ b/modules/renter/proto/formcontract.go @@ -98,7 +98,8 @@ func (cs *ContractSet) FormContract(params ContractParams, txnBuilder transactio // Create initial transaction set. txn, parentTxns := txnBuilder.View() - txnSet := append(parentTxns, txn) + unconfirmedParents := txnBuilder.UnconfirmedParents() + txnSet := append(unconfirmedParents, append(parentTxns, txn)...) // Increase Successful/Failed interactions accordingly defer func() { diff --git a/modules/renter/proto/proto.go b/modules/renter/proto/proto.go index e6787e6753..670bbf275c 100644 --- a/modules/renter/proto/proto.go +++ b/modules/renter/proto/proto.go @@ -19,6 +19,7 @@ type ( AddTransactionSignature(types.TransactionSignature) uint64 FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) + UnconfirmedParents() []types.Transaction View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } diff --git a/modules/renter/proto/renew.go b/modules/renter/proto/renew.go index fdf89c588c..564586bf13 100644 --- a/modules/renter/proto/renew.go +++ b/modules/renter/proto/renew.go @@ -97,9 +97,10 @@ func (cs *ContractSet) Renew(oldContract *SafeContract, params ContractParams, t // add miner fee txnBuilder.AddMinerFee(txnFee) - // create initial transaction set + // Create initial transaction set. txn, parentTxns := txnBuilder.View() - txnSet := append(parentTxns, txn) + unconfirmedParents := txnBuilder.UnconfirmedParents() + txnSet := append(unconfirmedParents, append(parentTxns, txn)...) // Increase Successful/Failed interactions accordingly defer func() { diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 70fc9f087b..250b3e1d6c 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -75,7 +75,7 @@ type hostDB interface { // RandomHosts returns a set of random hosts, weighted by their estimated // usefulness / attractiveness to the renter. RandomHosts will not return // any offline or inactive hosts. - RandomHosts(int, []types.SiaPublicKey) []modules.HostDBEntry + RandomHosts(int, []types.SiaPublicKey) ([]modules.HostDBEntry, error) // ScoreBreakdown returns a detailed explanation of the various properties // of the host. @@ -224,7 +224,10 @@ func (r *Renter) PriceEstimation() modules.RenterPriceEstimation { } // Grab hosts to perform the estimation. - hosts := r.hostDB.RandomHosts(priceEstimationScope, nil) + hosts, err := r.hostDB.RandomHosts(priceEstimationScope, nil) + if err != nil { + return modules.RenterPriceEstimation{} + } // Check if there are zero hosts, which means no estimation can be made. if len(hosts) == 0 { diff --git a/modules/renter/renter_test.go b/modules/renter/renter_test.go index 2a49098310..9a217396c3 100644 --- a/modules/renter/renter_test.go +++ b/modules/renter/renter_test.go @@ -107,8 +107,8 @@ func (stubHostDB) AllHosts() []modules.HostDBEntry { return nil } func (stubHostDB) AverageContractPrice() types.Currency { return types.Currency{} } func (stubHostDB) Close() error { return nil } func (stubHostDB) IsOffline(modules.NetAddress) bool { return true } -func (stubHostDB) RandomHosts(int, []types.SiaPublicKey) []modules.HostDBEntry { - return []modules.HostDBEntry{} +func (stubHostDB) RandomHosts(int, []types.SiaPublicKey) ([]modules.HostDBEntry, error) { + return []modules.HostDBEntry{}, nil } func (stubHostDB) EstimateHostScore(modules.HostDBEntry) modules.HostScoreBreakdown { return modules.HostScoreBreakdown{} @@ -143,8 +143,8 @@ type pricesStub struct { dbEntries []modules.HostDBEntry } -func (ps pricesStub) RandomHosts(n int, exclude []types.SiaPublicKey) []modules.HostDBEntry { - return ps.dbEntries +func (ps pricesStub) RandomHosts(n int, exclude []types.SiaPublicKey) ([]modules.HostDBEntry, error) { + return ps.dbEntries, nil } // TestRenterPricesVolatility verifies that the renter caches its price diff --git a/modules/transactionpool.go b/modules/transactionpool.go index 9e8924a5cd..df2afd35e2 100644 --- a/modules/transactionpool.go +++ b/modules/transactionpool.go @@ -143,6 +143,10 @@ type ( // transaction pool changes, and should not subscribe to both. TransactionPoolSubscribe(TransactionPoolSubscriber) + // TransactionSet returns the transaction set the provided object + // appears in. + TransactionSet(crypto.Hash) []types.Transaction + // Unsubscribe removes a subscriber from the transaction pool. // This is necessary for clean shutdown of the miner. Unsubscribe(TransactionPoolSubscriber) diff --git a/modules/transactionpool/transactionpool.go b/modules/transactionpool/transactionpool.go index cebeb22721..a7abba875a 100644 --- a/modules/transactionpool/transactionpool.go +++ b/modules/transactionpool/transactionpool.go @@ -256,6 +256,26 @@ func (tp *TransactionPool) Transaction(id types.TransactionID) (types.Transactio return txn, necessaryParents, exists } +// TransactionSet returns the transaction set the provided object +// appears in. +func (tp *TransactionPool) TransactionSet(oid crypto.Hash) []types.Transaction { + tp.mu.RLock() + defer tp.mu.RUnlock() + var parents []types.Transaction + tSetID, exists := tp.knownObjects[ObjectID(oid)] + if !exists { + return parents + } + tSet, exists := tp.transactionSets[tSetID] + if !exists { + return parents + } + for _, txn := range tSet { + parents = append(parents, txn) + } + return parents +} + // Broadcast broadcasts a transaction set to all of the transaction pool's // peers. func (tp *TransactionPool) Broadcast(ts []types.Transaction) { diff --git a/modules/wallet.go b/modules/wallet.go index 3994a9f167..35e376fe7b 100644 --- a/modules/wallet.go +++ b/modules/wallet.go @@ -198,6 +198,10 @@ type ( // transaction should be dropped. Sign(wholeTransaction bool) ([]types.Transaction, error) + // UnconfirmedParents returns any unconfirmed parents the transaction set that + // is being built by the transaction builder could have. + UnconfirmedParents() []types.Transaction + // View returns the incomplete transaction along with all of its // parents. View() (txn types.Transaction, parents []types.Transaction) diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index 2b7af3855e..20549c336f 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -382,6 +382,26 @@ func (tb *transactionBuilder) FundSiafunds(amount types.Currency) error { return nil } +// UnconfirmedParents returns any unconfirmed parents the transaction set that +// is being built by the transaction builder could have. +func (tb *transactionBuilder) UnconfirmedParents() (parents []types.Transaction) { + addedParents := make(map[types.TransactionID]struct{}) + for _, p := range tb.parents { + for _, sci := range p.SiacoinInputs { + tSet := tb.wallet.tpool.TransactionSet(crypto.Hash(sci.ParentID)) + for _, txn := range tSet { + txnID := txn.ID() + if _, exists := addedParents[txnID]; exists { + continue + } + addedParents[txnID] = struct{}{} + parents = append(parents, txn) + } + } + } + return +} + // AddParents adds a set of parents to the transaction. func (tb *transactionBuilder) AddParents(newParents []types.Transaction) { tb.parents = append(tb.parents, newParents...) diff --git a/modules/wallet/transactionbuilder_test.go b/modules/wallet/transactionbuilder_test.go index 7447b5fb8a..7529288907 100644 --- a/modules/wallet/transactionbuilder_test.go +++ b/modules/wallet/transactionbuilder_test.go @@ -450,3 +450,49 @@ func TestParallelBuilders(t *testing.T) { t.Fatal("did not get the expected ending balance", expected, endingSCConfirmed, startingSCConfirmed) } } + +// TestUnconfirmedParents tests the functionality of the transaction builder's +// UnconfirmedParents method. +func TestUnconfirmedParents(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + wt, err := createWalletTester(t.Name(), &modules.ProductionDependencies{}) + if err != nil { + t.Fatal(err) + } + defer wt.closeWt() + + // Send all of the wallet's available balance to itself. + uc, err := wt.wallet.NextAddress() + if err != nil { + t.Fatal("Failed to get address", err) + } + siacoins, _, _ := wt.wallet.ConfirmedBalance() + tSet, err := wt.wallet.SendSiacoins(siacoins.Sub(types.SiacoinPrecision), uc.UnlockHash()) + if err != nil { + t.Fatal("Failed to send coins", err) + } + + // Create a transaction. That transaction should use siacoin outputs from + // the unconfirmed transactions in tSet as inputs and is therefore a child + // of tSet. + b := wt.wallet.StartTransaction() + txnFund := types.NewCurrency64(1e3) + err = b.FundSiacoins(txnFund) + if err != nil { + t.Fatal(err) + } + + // UnconfirmedParents should return the transactions of the transaction set + // we used to send money to ourselves. + parents := b.UnconfirmedParents() + if len(tSet) != len(parents) { + t.Fatal("parents should have same length as unconfirmed transaction set") + } + for i := 0; i < len(tSet); i++ { + if tSet[i].ID() != parents[i].ID() { + t.Error("returned parent doesn't match transaction of transaction set") + } + } +} From 6c2d697fec38b59be710afc70763c6d3c2a10d6a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 14 Mar 2018 22:00:48 -0400 Subject: [PATCH 077/212] add todo --- modules/renter/contractor/contractor.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 65e5d12d8f..0b498034e5 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -249,6 +249,9 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host } // Mark contract utility. + // TODO we probably have to force this to succeed even if there is a scan + // going on. Otherwise all the contract utilities might end up + // uninitialized. c.managedMarkContractsUtility() // Subscribe to the consensus set. From 9de3a753d45d17d3e1fedbe3ae00f517dede5ab1 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 11 Apr 2018 14:38:32 -0400 Subject: [PATCH 078/212] add initialScanComplete flag and make RandomHosts return an error if it is not completed --- modules/renter/hostdb/hostdb.go | 20 +++++++++-------- modules/renter/hostdb/scan.go | 38 +++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 9 deletions(-) diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index 234402048e..2afae8a5a4 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -19,8 +19,9 @@ import ( ) var ( - errNilCS = errors.New("cannot create hostdb with nil consensus set") - errNilGateway = errors.New("cannot create hostdb with nil gateway") + errInitialScanIncomplete = errors.New("initial hostdb scan is not yet completed") + errNilCS = errors.New("cannot create hostdb with nil consensus set") + errNilGateway = errors.New("cannot create hostdb with nil gateway") ) // The HostDB is a database of potential hosts. It assigns a weight to each @@ -45,10 +46,11 @@ type HostDB struct { // handful of goroutines constantly waiting on the channel for hosts to // scan. The scan map is used to prevent duplicates from entering the scan // pool. - scanList []modules.HostDBEntry - scanMap map[string]struct{} - scanWait bool - scanningThreads int + initialScanComplete bool + scanList []modules.HostDBEntry + scanMap map[string]struct{} + scanWait bool + scanningThreads int blockHeight types.BlockHeight lastChange modules.ConsensusChangeID @@ -227,10 +229,10 @@ func (hdb *HostDB) Host(spk types.SiaPublicKey) (modules.HostDBEntry, bool) { // returns a slice of entries. func (hdb *HostDB) RandomHosts(n int, excludeKeys []types.SiaPublicKey) ([]modules.HostDBEntry, error) { hdb.mu.RLock() - pendingScans := len(hdb.scanList) + initialScanComplete := hdb.initialScanComplete hdb.mu.RUnlock() - if pendingScans > 0 { - return []modules.HostDBEntry{}, errors.New("Can't get hosts while hostdb is scanning") + if !initialScanComplete { + return []modules.HostDBEntry{}, errInitialScanIncomplete } return hdb.hostTree.SelectRandom(n, excludeKeys), nil } diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 7cdd4e6be7..42f9eaacde 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -290,6 +290,23 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { hdb.mu.Unlock() } +// waitForScans is a helper function that blocks until the hostDB's scanList is +// empty. +func (hdb *HostDB) managedWaitForScans() { + for { + hdb.mu.Lock() + length := len(hdb.scanList) + hdb.mu.Unlock() + if length == 0 { + break + } + select { + case <-hdb.tg.StopChan(): + case <-time.After(time.Second): + } + } +} + // threadedProbeHosts pulls hosts from the thread pool and runs a scan on them. func (hdb *HostDB) threadedProbeHosts(scanPool <-chan modules.HostDBEntry) { err := hdb.tg.Add() @@ -329,6 +346,27 @@ func (hdb *HostDB) threadedScan() { } defer hdb.tg.Done() + // Wait for the potential initial scan to finish + hdb.managedWaitForScans() + + // The initial scan might have been interrupted. Queue one scan for every + // announced host that was missed by the initial scan and wait for the + // scans to finish before starting the scan loop. + allHosts := hdb.hostTree.All() + hdb.mu.Lock() + for _, host := range allHosts { + if len(host.ScanHistory) == 0 && host.HistoricUptime == 0 && host.HistoricDowntime == 0 { + hdb.queueScan(host) + } + } + hdb.mu.Unlock() + hdb.managedWaitForScans() + + // Set the flag to indicate that the initial scan is complete. + hdb.mu.Lock() + hdb.initialScanComplete = true + hdb.mu.Unlock() + for { // Set up a scan for the hostCheckupQuanity most valuable hosts in the // hostdb. Hosts that fail their scans will be docked significantly, From 79adbcdfaeafaa15ba949200a3a8af6dc0f40e7b Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 11 Apr 2018 15:08:55 -0400 Subject: [PATCH 079/212] set intitial scan to completed if threadedScan is not executed --- modules/renter/contractor/contractor.go | 8 +------- modules/renter/contractor/contracts.go | 2 +- modules/renter/hostdb/hostdb.go | 6 ++++-- modules/renter/hostdb/scan.go | 14 ++++++++++++-- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 0b498034e5..b1367ae191 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -8,7 +8,6 @@ package contractor // renter lock. import ( - "errors" "fmt" "os" "path/filepath" @@ -19,6 +18,7 @@ import ( "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" ) var ( @@ -248,12 +248,6 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host return nil, err } - // Mark contract utility. - // TODO we probably have to force this to succeed even if there is a scan - // going on. Otherwise all the contract utilities might end up - // uninitialized. - c.managedMarkContractsUtility() - // Subscribe to the consensus set. err = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan()) if err == modules.ErrInvalidConsensusChangeID { diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index f225a444ac..68f30d8d50 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -75,7 +75,7 @@ func (c *Contractor) managedMarkContractsUtility() error { c.mu.RUnlock() hosts, err := c.hdb.RandomHosts(hostCount+minScoreHostBuffer, nil) if err != nil { - return + return err } // Find the minimum score that a host is allowed to have to be considered diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index 2afae8a5a4..856fc45dde 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -19,7 +19,7 @@ import ( ) var ( - errInitialScanIncomplete = errors.New("initial hostdb scan is not yet completed") + ErrInitialScanIncomplete = errors.New("initial hostdb scan is not yet completed") errNilCS = errors.New("cannot create hostdb with nil consensus set") errNilGateway = errors.New("cannot create hostdb with nil gateway") ) @@ -163,6 +163,8 @@ func NewCustomHostDB(g modules.Gateway, cs modules.ConsensusSet, persistDir stri // fake hosts and not have them marked as offline as the scanloop operates. if !hdb.deps.Disrupt("disableScanLoop") { go hdb.threadedScan() + } else { + hdb.initialScanComplete = true } return hdb, nil @@ -232,7 +234,7 @@ func (hdb *HostDB) RandomHosts(n int, excludeKeys []types.SiaPublicKey) ([]modul initialScanComplete := hdb.initialScanComplete hdb.mu.RUnlock() if !initialScanComplete { - return []modules.HostDBEntry{}, errInitialScanIncomplete + return []modules.HostDBEntry{}, ErrInitialScanIncomplete } return hdb.hostTree.SelectRandom(n, excludeKeys), nil } diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 42f9eaacde..4a3a6c6efc 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -346,8 +346,18 @@ func (hdb *HostDB) threadedScan() { } defer hdb.tg.Done() - // Wait for the potential initial scan to finish - hdb.managedWaitForScans() + // Wait until the consensus set is synced. Only then we can be sure that + // the initial scan covers the whole network. + for { + if hdb.cs.Synced() { + break + } + select { + case <-hdb.tg.StopChan(): + return + case <-time.After(time.Second): + } + } // The initial scan might have been interrupted. Queue one scan for every // announced host that was missed by the initial scan and wait for the From 6c9f2e9769d0ecc10bedba1fc8a66b3ed48ec115 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 12 Apr 2018 13:35:44 -0400 Subject: [PATCH 080/212] abort contract maintenance if marking contracts wasn't possible --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 68f30d8d50..95358fdcc7 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -285,7 +285,7 @@ func (c *Contractor) threadedContractMaintenance() { // Update the utility fields for this contract based on the most recent // hostdb. if err := c.managedMarkContractsUtility(); err != nil { - c.log.Println("Failed to update contracUtilities", err) + c.log.Println("WARNING: wasn't able to mark contracts", err) return } From 82fa96fcb74c0fd71357727e2101398ec79b02d3 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 12 Apr 2018 15:49:50 -0400 Subject: [PATCH 081/212] add ErrInitialScanIncomplete comment --- modules/renter/hostdb/hostdb.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index 856fc45dde..62c56cc717 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -19,6 +19,8 @@ import ( ) var ( + // ErrInitialScanIncomplete is returned whenever an operation is not + // allowed to be executed before the initial host scan has finished. ErrInitialScanIncomplete = errors.New("initial hostdb scan is not yet completed") errNilCS = errors.New("cannot create hostdb with nil consensus set") errNilGateway = errors.New("cannot create hostdb with nil gateway") From 2cb6c8b4d4f2962361efe79ffa850b2336b5ee9c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 17 Apr 2018 11:46:07 -0400 Subject: [PATCH 082/212] don't mark contract utilities on contractor startup --- modules/renter/contractor/contractor.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index b1367ae191..9892cf0d32 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -8,6 +8,7 @@ package contractor // renter lock. import ( + "errors" "fmt" "os" "path/filepath" @@ -18,7 +19,6 @@ import ( "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" - "github.com/NebulousLabs/errors" ) var ( From ae924fb3fe68b3a06903d05beaae21324452bf1a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 17 Apr 2018 12:05:53 -0400 Subject: [PATCH 083/212] make sure siatest testnodes always start at a height > TaxHardforkHeight --- modules/consensus/accept_test.go | 1 + modules/renter/contractor/contractor.go | 4 +- modules/renter/contractor/contracts.go | 16 ++++--- modules/renter/proto/contract.go | 59 +++++++++++++++++++++++-- modules/renter/uploadheap.go | 5 +++ siatest/renter/renter_test.go | 2 +- siatest/testgroup.go | 26 +++++++++-- siatest/testnode.go | 2 +- types/constants.go | 9 ++++ types/filecontracts.go | 3 +- 10 files changed, 109 insertions(+), 18 deletions(-) diff --git a/modules/consensus/accept_test.go b/modules/consensus/accept_test.go index 2b215c449e..8b40cd11ef 100644 --- a/modules/consensus/accept_test.go +++ b/modules/consensus/accept_test.go @@ -866,6 +866,7 @@ func TestInconsistentCheck(t *testing.T) { // This test checks that the hardfork scheduled for block 21,000 rolls through // smoothly. func TestTaxHardfork(t *testing.T) { + t.Skip("Removed tax compat code from testing due to NDFs") if testing.Short() { t.SkipNow() } diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 9892cf0d32..b2071a9621 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -140,7 +140,9 @@ func (c *Contractor) Contracts() []modules.RenterContract { // ContractUtility returns the utility fields for the given contract. func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - return c.staticContractUtility(id) + c.mu.RLock() + defer c.mu.RUnlock() + return c.contractUtility(id) } // CurrentPeriod returns the height at which the current allowance period diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 95358fdcc7..ec91472459 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -28,8 +28,8 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { return c.currentPeriod + c.allowance.Period } -// staticContractUtility returns the ContractUtility for a contract with a given id. -func (c *Contractor) staticContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { +// contractUtility returns the ContractUtility for a contract with a given id. +func (c *Contractor) contractUtility(id types.FileContractID) (modules.ContractUtility, bool) { rc, exists := c.contracts.View(c.resolveID(id)) if !exists { return modules.ContractUtility{}, false @@ -202,7 +202,9 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // For convenience contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. - utility, ok := c.staticContractUtility(contract.ID) + c.mu.RLock() + utility, ok := c.contractUtility(contract.ID) + c.mu.RUnlock() if !ok || !utility.GoodForRenew { c.log.Critical(fmt.Sprintf("Renewing a contract that has been marked as !GoodForRenew %v/%v", ok, utility.GoodForRenew)) @@ -362,7 +364,7 @@ func (c *Contractor) threadedContractMaintenance() { // Iterate through the contracts again, figuring out which contracts to // renew and how much extra funds to renew them with. for _, contract := range c.contracts.ViewAll() { - utility, ok := c.staticContractUtility(contract.ID) + utility, ok := c.contractUtility(contract.ID) if !ok || !utility.GoodForRenew { continue } @@ -483,7 +485,9 @@ func (c *Contractor) threadedContractMaintenance() { return } // Return the contract if it's not useful for renewing. - oldUtility, ok := c.staticContractUtility(id) + c.mu.RLock() + oldUtility, ok := c.contractUtility(id) + c.mu.RUnlock() if !ok || !oldUtility.GoodForRenew { c.log.Printf("Contract %v slated for renew is marked not good for renew %v/%v", id, ok, oldUtility.GoodForRenew) @@ -567,7 +571,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.RLock() uploadContracts := 0 for _, id := range c.contracts.IDs() { - if cu, ok := c.staticContractUtility(id); ok && cu.GoodForUpload { + if cu, ok := c.contractUtility(id); ok && cu.GoodForUpload { uploadContracts++ } } diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index d7900e1ddb..f9bffc7cdc 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -29,6 +29,11 @@ type updateSetHeader struct { Header contractHeader } +type v132UpdateSetHeader struct { + ID types.FileContractID + Header v132ContractHeader +} + type updateSetRoot struct { ID types.FileContractID Root crypto.Hash @@ -56,6 +61,26 @@ type contractHeader struct { Utility modules.ContractUtility } +type v132ContractHeader struct { + // transaction is the signed transaction containing the most recent + // revision of the file contract. + Transaction types.Transaction + + // secretKey is the key used by the renter to sign the file contract + // transaction. + SecretKey crypto.SecretKey + + // Same as modules.RenterContract. + StartHeight types.BlockHeight + DownloadSpending types.Currency + StorageSpending types.Currency + UploadSpending types.Currency + TotalCost types.Currency + ContractFee types.Currency + TxnFee types.Currency + SiafundFee types.Currency +} + // validate returns an error if the contractHeader is invalid. func (h *contractHeader) validate() error { if len(h.Transaction.FileContractRevisions) > 0 && @@ -329,7 +354,7 @@ func (c *SafeContract) commitTxns() error { switch update.Name { case updateNameSetHeader: var u updateSetHeader - if err := encoding.Unmarshal(update.Instructions, &u); err != nil { + if err := unmarshalHeader(update.Instructions, &u); err != nil { return err } if err := c.applySetHeader(u.Header); err != nil { @@ -363,7 +388,7 @@ func (c *SafeContract) unappliedHeader() (h contractHeader) { for _, update := range t.Updates { if update.Name == updateNameSetHeader { var u updateSetHeader - if err := encoding.Unmarshal(update.Instructions, &u); err != nil { + if err := unmarshalHeader(update.Instructions, &u); err != nil { continue } h = u.Header @@ -450,7 +475,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo switch update := t.Updates[0]; update.Name { case updateNameSetHeader: var u updateSetHeader - if err := encoding.Unmarshal(update.Instructions, &u); err != nil { + if err := unmarshalHeader(update.Instructions, &u); err != nil { return err } id = u.ID @@ -590,3 +615,31 @@ func (mrs *MerkleRootSet) UnmarshalJSON(b []byte) error { *mrs = umrs return nil } + +func unmarshalHeader(b []byte, u *updateSetHeader) error { + // Try unmarshaling the header. + if err := encoding.Unmarshal(b, u); err != nil { + // COMPATv132 try unmarshaling the header the old way. + var oldHeader v132UpdateSetHeader + if err2 := encoding.Unmarshal(b, &oldHeader); err2 != nil { + // If unmarshaling the header the old way also doesn't work we + // return the original error. + return err + } + // If unmarshaling it the old way was successful we convert it to a new + // header. + u.Header = contractHeader{ + Transaction: oldHeader.Header.Transaction, + SecretKey: oldHeader.Header.SecretKey, + StartHeight: oldHeader.Header.StartHeight, + DownloadSpending: oldHeader.Header.DownloadSpending, + StorageSpending: oldHeader.Header.StorageSpending, + UploadSpending: oldHeader.Header.UploadSpending, + TotalCost: oldHeader.Header.TotalCost, + ContractFee: oldHeader.Header.ContractFee, + TxnFee: oldHeader.Header.TxnFee, + SiafundFee: oldHeader.Header.SiafundFee, + } + } + return nil +} diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 30ac665d02..8a79fdcf47 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -21,6 +21,7 @@ import ( "sync" "time" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) @@ -167,6 +168,10 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un for fcid, fileContract := range f.contracts { recentContract, exists := r.hostContractor.ContractByID(fcid) contractUtility, exists2 := r.hostContractor.ContractUtility(fcid) + if (exists && !exists2) || (!exists && exists) { + build.Critical("got a contract without utility or vice versa which shouldn't happen", + exists, exists2) + } if !exists || !exists2 { // File contract does not seem to be part of the host anymore. // Delete this contract and mark the file to be saved. diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 972f18e75c..3e9c7981a4 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -177,7 +177,7 @@ func testRenterLocalRepair(t *testing.T, tg *siatest.TestGroup) { t.Fatal(err) } // Get the file info of the fully uploaded file. Tha way we can compare the - // redundancieslater. + // redundancies later. fi, err := renter.FileInfo(remoteFile) if err != nil { t.Fatal("failed to get file info", err) diff --git a/siatest/testgroup.go b/siatest/testgroup.go index a832fde550..d6d77a1626 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -1,6 +1,7 @@ package siatest import ( + "fmt" "math" "strconv" "sync" @@ -89,7 +90,7 @@ func NewGroup(nodeParams ...node.NodeParams) (*TestGroup, error) { return nil, errors.New("cannot fund group without miners") } miner := tg.Miners()[0] - for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { + for i := types.BlockHeight(0); i <= types.MaturityDelay+types.TaxHardforkHeight; i++ { if err := miner.MineBlock(); err != nil { return nil, errors.AddContext(err, "failed to mine block for funding") } @@ -315,16 +316,33 @@ func synchronizationCheck(miner *TestNode, nodes map[*TestNode]struct{}) error { if err != nil { return err } + // Loop until all the blocks have the same CurrentBlock. If we need to mine + // a new block in between we need to repeat the check until no block was + // mined. for node := range nodes { err := Retry(600, 100*time.Millisecond, func() error { ncg, err := node.ConsensusGet() if err != nil { return err } - if mcg.CurrentBlock != ncg.CurrentBlock { - return errors.New("the node's current block doesn't equal the miner's") + // If the CurrentBlock's match we are done. + if mcg.CurrentBlock == ncg.CurrentBlock { + return nil } - return nil + // If the miner's height is greater than the node's we need to + // wait a bit longer for them to sync. + if mcg.Height > ncg.Height { + return fmt.Errorf("the node didn't catch up to the miner's height %v %v", + mcg.Height, ncg.Height) + } + // If the miner's height is smaller than the node's we need a + // bit longer for them to sync. + if mcg.Height < ncg.Height { + return errors.New("the miner didn't catch up to the node's height") + } + // If the miner's height is equal to the node's but still + // doesn't match, it needs to mine a block. + return errors.New("the node's current block's id does not equal the miner's") }) if err != nil { return err diff --git a/siatest/testnode.go b/siatest/testnode.go index 646608a4f1..b8afc30abf 100644 --- a/siatest/testnode.go +++ b/siatest/testnode.go @@ -29,7 +29,7 @@ func NewNode(nodeParams node.NodeParams) (*TestNode, error) { return nil, err } // Fund the node - for i := types.BlockHeight(0); i <= types.MaturityDelay; i++ { + for i := types.BlockHeight(0); i <= types.MaturityDelay+types.TaxHardforkHeight; i++ { if err := tn.MineBlock(); err != nil { return nil, err } diff --git a/types/constants.go b/types/constants.go index 585a6db9af..daa3bea15d 100644 --- a/types/constants.go +++ b/types/constants.go @@ -124,6 +124,15 @@ var ( TargetWindow BlockHeight ) +var ( + // TaxHardforkHeight is the height at which the tax hardfork occured. + TaxHardforkHeight = build.Select(build.Var{ + Dev: BlockHeight(10), + Standard: BlockHeight(21e3), + Testing: BlockHeight(10), + }).(BlockHeight) +) + // init checks which build constant is in place and initializes the variables // accordingly. func init() { diff --git a/types/filecontracts.go b/types/filecontracts.go index c731950e2c..5266d9e1a2 100644 --- a/types/filecontracts.go +++ b/types/filecontracts.go @@ -4,7 +4,6 @@ package types // contracts. import ( - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" ) @@ -123,7 +122,7 @@ func PostTax(height BlockHeight, payout Currency) Currency { func Tax(height BlockHeight, payout Currency) Currency { // COMPATv0.4.0 - until the first 20,000 blocks have been archived, they // will need to be handled in a special way. - if (height < 21e3 && build.Release == "standard") || (height < 10 && build.Release == "testing") { + if height < TaxHardforkHeight { return payout.MulFloat(0.039).RoundDown(SiafundCount) } return payout.MulTax().RoundDown(SiafundCount) From 0004e572a9fe630ce5c6f950c8b547b0869edb9d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 19 Apr 2018 13:32:40 -0400 Subject: [PATCH 084/212] Revert "Fix streaming endpoint" This reverts commit 077043e7bef86bfa2d07e40c01cd6aa168739aad. --- modules/renter/downloadcache.go | 2 +- node/api/routes.go | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 7cda44b761..4b2bdc5bc7 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -13,7 +13,7 @@ import ( // endpoint download. // TODO this won't be necessary anymore once we have partial downloads. func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { - if udc.download.staticDestinationType != destinationTypeSeekStream { + if udc.download.staticDestinationType == destinationTypeSeekStream { // We only cache streaming chunks since browsers and media players tend to only request a few kib at once when streaming data. That way we can prevent scheduling the same chunk for download over and over. return } diff --git a/node/api/routes.go b/node/api/routes.go index d1f2a15d0c..ab2d5685dc 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -1,6 +1,7 @@ package api import ( + "context" "net/http" "strings" "time" @@ -88,7 +89,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) - router.GET("/renter/stream/*siapath", api.renterStreamHandler) + router.GET("/renter/stream/*siapath", Unrestricted(api.renterStreamHandler)) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. @@ -199,10 +200,16 @@ func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { } } -// isUnrestricted checks if a request is allowed to bypass the Sia-Agent check. +// Unrestricted can be used to whitelist api routes from requiring the +// Sia-Agent to be set. +func Unrestricted(h httprouter.Handle) httprouter.Handle { + return httprouter.Handle(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + req = req.WithContext(context.WithValue(req.Context(), unrestrictedContextKey{}, 0)) + h(w, req, ps) + }) +} + +// isUnrestricted checks if a context has the unrestrictedContextKey set. func isUnrestricted(req *http.Request) bool { - if strings.HasPrefix(req.URL.Path, "/renter/stream") { - return true - } - return false + return req.Context().Value(unrestrictedContextKey{}) != nil } From 057f5ec270888641945ba1f0eceab914f286c34f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 19 Apr 2018 19:29:36 -0400 Subject: [PATCH 085/212] Update siac to use node.api.Client package and add flag to change UserAgent --- cmd/siac/consensuscmd.go | 4 +- cmd/siac/daemoncmd.go | 19 +-- cmd/siac/export.go | 4 +- cmd/siac/gatewaycmd.go | 16 +-- cmd/siac/hostcmd.go | 40 +++--- cmd/siac/hostdbcmd.go | 20 ++- cmd/siac/main.go | 202 +++-------------------------- cmd/siac/minercmd.go | 9 +- cmd/siac/rentercmd.go | 65 +++++----- cmd/siac/walletcmd.go | 85 ++++++------ node/api/client/daemon.go | 12 ++ node/api/client/gateway.go | 7 + node/api/client/host.go | 102 +++++++++++++-- node/api/client/hostdb.go | 19 ++- node/api/client/miner.go | 7 + node/api/client/renter.go | 46 ++++++- node/api/client/transactionpool.go | 9 ++ node/api/client/wallet.go | 99 +++++++++++++- node/api/daemon.go | 7 + node/api/wallet.go | 4 +- siatest/testgroup.go | 2 +- siatest/testnode.go | 4 +- 22 files changed, 435 insertions(+), 347 deletions(-) create mode 100644 node/api/client/transactionpool.go diff --git a/cmd/siac/consensuscmd.go b/cmd/siac/consensuscmd.go index 2c2bf40490..d7b1cae936 100644 --- a/cmd/siac/consensuscmd.go +++ b/cmd/siac/consensuscmd.go @@ -6,7 +6,6 @@ import ( "github.com/spf13/cobra" - "github.com/NebulousLabs/Sia/node/api" "github.com/NebulousLabs/Sia/types" ) @@ -22,8 +21,7 @@ var ( // consensuscmd is the handler for the command `siac consensus`. // Prints the current state of consensus. func consensuscmd() { - var cg api.ConsensusGET - err := getAPI("/consensus", &cg) + cg, err := httpClient.ConsensusGet() if err != nil { die("Could not get current consensus state:", err) } diff --git a/cmd/siac/daemoncmd.go b/cmd/siac/daemoncmd.go index bf2450f6ce..8cae4e4e9d 100644 --- a/cmd/siac/daemoncmd.go +++ b/cmd/siac/daemoncmd.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/NebulousLabs/Sia/build" - "github.com/NebulousLabs/Sia/node/api" "github.com/spf13/cobra" ) @@ -38,11 +37,6 @@ var ( } ) -type updateInfo struct { - Available bool `json:"available"` - Version string `json:"version"` -} - // version prints the version of siac and siad. func versioncmd() { fmt.Println("Sia Client") @@ -51,8 +45,7 @@ func versioncmd() { fmt.Println("\tGit Revision " + build.GitRevision) fmt.Println("\tBuild Time " + build.BuildTime) } - var dvg api.DaemonVersionGet - err := getAPI("/daemon/version", &dvg) + dvg, err := httpClient.DaemonVersionGet() if err != nil { fmt.Println("Could not get daemon version:", err) return @@ -68,7 +61,7 @@ func versioncmd() { // stopcmd is the handler for the command `siac stop`. // Stops the daemon. func stopcmd() { - err := get("/daemon/stop") + err := httpClient.DaemonStopGet() if err != nil { die("Could not stop daemon:", err) } @@ -76,8 +69,7 @@ func stopcmd() { } func updatecmd() { - var update updateInfo - err := getAPI("/daemon/update", &update) + update, err := httpClient.DaemonUpdateGet() if err != nil { fmt.Println("Could not check for update:", err) return @@ -87,7 +79,7 @@ func updatecmd() { return } - err = post("/daemon/update", "") + err = httpClient.DaemonUpdatePost() if err != nil { fmt.Println("Could not apply update:", err) return @@ -96,8 +88,7 @@ func updatecmd() { } func updatecheckcmd() { - var update updateInfo - err := getAPI("/daemon/update", &update) + update, err := httpClient.DaemonUpdateGet() if err != nil { fmt.Println("Could not check for update:", err) return diff --git a/cmd/siac/export.go b/cmd/siac/export.go index 79a9d8d40e..005c217761 100644 --- a/cmd/siac/export.go +++ b/cmd/siac/export.go @@ -5,7 +5,6 @@ import ( "fmt" "os" - "github.com/NebulousLabs/Sia/node/api" "github.com/NebulousLabs/Sia/types" "github.com/spf13/cobra" @@ -31,8 +30,7 @@ var ( // renterexportcontracttxnscmd is the handler for the command `siac renter export contract-txns`. // Exports the current contract set to JSON. func renterexportcontracttxnscmd(destination string) { - var cs api.RenterContracts - err := getAPI("/renter/contracts", &cs) + cs, err := httpClient.RenterContractsGet() if err != nil { die("Could not retrieve contracts:", err) } diff --git a/cmd/siac/gatewaycmd.go b/cmd/siac/gatewaycmd.go index 61d0c6ee92..ba5737094e 100644 --- a/cmd/siac/gatewaycmd.go +++ b/cmd/siac/gatewaycmd.go @@ -5,9 +5,8 @@ import ( "os" "text/tabwriter" + "github.com/NebulousLabs/Sia/modules" "github.com/spf13/cobra" - - "github.com/NebulousLabs/Sia/node/api" ) var ( @@ -50,7 +49,7 @@ var ( // gatewayconnectcmd is the handler for the command `siac gateway add [address]`. // Adds a new peer to the peer list. func gatewayconnectcmd(addr string) { - err := post("/gateway/connect/"+addr, "") + err := httpClient.GatewayConnectPost(modules.NetAddress(addr)) if err != nil { die("Could not add peer:", err) } @@ -60,7 +59,7 @@ func gatewayconnectcmd(addr string) { // gatewaydisconnectcmd is the handler for the command `siac gateway remove [address]`. // Removes a peer from the peer list. func gatewaydisconnectcmd(addr string) { - err := post("/gateway/disconnect/"+addr, "") + err := httpClient.GatewayDisconnectPost(modules.NetAddress(addr)) if err != nil { die("Could not remove peer:", err) } @@ -70,8 +69,7 @@ func gatewaydisconnectcmd(addr string) { // gatewayaddresscmd is the handler for the command `siac gateway address`. // Prints the gateway's network address. func gatewayaddresscmd() { - var info api.GatewayGET - err := getAPI("/gateway", &info) + info, err := httpClient.GatewayGet() if err != nil { die("Could not get gateway address:", err) } @@ -81,8 +79,7 @@ func gatewayaddresscmd() { // gatewaycmd is the handler for the command `siac gateway`. // Prints the gateway's network address and number of peers. func gatewaycmd() { - var info api.GatewayGET - err := getAPI("/gateway", &info) + info, err := httpClient.GatewayGet() if err != nil { die("Could not get gateway address:", err) } @@ -93,8 +90,7 @@ func gatewaycmd() { // gatewaylistcmd is the handler for the command `siac gateway list`. // Prints a list of all peers. func gatewaylistcmd() { - var info api.GatewayGET - err := getAPI("/gateway", &info) + info, err := httpClient.GatewayGet() if err != nil { die("Could not get peer list:", err) } diff --git a/cmd/siac/hostcmd.go b/cmd/siac/hostcmd.go index 1018b7f59e..514887de1a 100644 --- a/cmd/siac/hostcmd.go +++ b/cmd/siac/hostcmd.go @@ -8,8 +8,9 @@ import ( "strings" "text/tabwriter" + "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" - "github.com/NebulousLabs/Sia/node/api" + "github.com/NebulousLabs/Sia/node/api/client" "github.com/NebulousLabs/Sia/types" "github.com/spf13/cobra" @@ -122,13 +123,11 @@ sector may impact host revenue.`, // hostcmd is the handler for the command `siac host`. // Prints info about the host and its storage folders. func hostcmd() { - hg := new(api.HostGET) - err := getAPI("/host", hg) + hg, err := httpClient.HostGet() if err != nil { die("Could not fetch host settings:", err) } - sg := new(api.StorageGET) - err = getAPI("/host/storage", sg) + sg, err := httpClient.HostStorageGet() if err != nil { die("Could not fetch storage info:", err) } @@ -284,8 +283,7 @@ RPC Stats: } // if wallet is locked print warning - walletstatus := new(api.WalletGET) - walleterr := getAPI("/wallet", walletstatus) + walletstatus, walleterr := httpClient.WalletGet() if walleterr != nil { fmt.Print("\nWarning:\n Could not get wallet status. A working wallet is needed in order to operate your host. Error: ") fmt.Println(walleterr) @@ -368,15 +366,14 @@ func hostconfigcmd(param, value string) { default: die("\"" + param + "\" is not a host setting") } - err = post("/host", param+"="+value) + err = httpClient.HostModifySettingPost(client.HostParam(param), value) if err != nil { - die("Could not update host settings:", err) + die("Failed to update host settings:", err) } fmt.Println("Host settings updated.") // get the estimated conversion rate. - var eg api.HostEstimateScoreGET - err = getAPI(fmt.Sprintf("/host/estimatescore?%v=%v", param, value), &eg) + eg, err := httpClient.HostEstimateScoreGet(param, value) if err != nil { if err.Error() == "cannot call /host/estimatescore without the renter module" { // score estimate requires the renter module @@ -394,9 +391,9 @@ func hostannouncecmd(cmd *cobra.Command, args []string) { var err error switch len(args) { case 0: - err = post("/host/announce", "") + err = httpClient.HostAnnouncePost() case 1: - err = post("/host/announce", "netaddress="+args[0]) + err = httpClient.HostAnnounceAddrPost(modules.NetAddress(args[0])) default: cmd.UsageFunc()(cmd) os.Exit(exitCodeUsage) @@ -407,7 +404,7 @@ func hostannouncecmd(cmd *cobra.Command, args []string) { fmt.Println("Host announcement submitted to network.") // start accepting contracts - err = post("/host", "acceptingcontracts=true") + err = httpClient.HostModifySettingPost(client.HostParamAcceptingContracts, true) if err != nil { die("Could not configure host to accept contracts:", err) } @@ -427,9 +424,8 @@ func hostfolderaddcmd(path, size string) { fmt.Sscan(size, &sizeUint64) sizeUint64 /= 64 * modules.SectorSize sizeUint64 *= 64 * modules.SectorSize - size = fmt.Sprint(sizeUint64) - err = post("/host/storage/folders/add", fmt.Sprintf("path=%s&size=%s", abs(path), size)) + err = httpClient.HostStorageFoldersAddPost(abs(path), sizeUint64) if err != nil { die("Could not add folder:", err) } @@ -438,7 +434,7 @@ func hostfolderaddcmd(path, size string) { // hostfolderremovecmd removes a folder from the host. func hostfolderremovecmd(path string) { - err := post("/host/storage/folders/remove", "path="+abs(path)) + err := httpClient.HostStorageFoldersRemovePost(abs(path)) if err != nil { die("Could not remove folder:", err) } @@ -456,9 +452,8 @@ func hostfolderresizecmd(path, newsize string) { fmt.Sscan(newsize, &sizeUint64) sizeUint64 /= 64 * modules.SectorSize sizeUint64 *= 64 * modules.SectorSize - newsize = fmt.Sprint(sizeUint64) - err = post("/host/storage/folders/resize", fmt.Sprintf("path=%s&newsize=%s", abs(path), newsize)) + err = httpClient.HostStorageFoldersResizePost(abs(path), sizeUint64) if err != nil { die("Could not resize folder:", err) } @@ -467,7 +462,12 @@ func hostfolderresizecmd(path, newsize string) { // hostsectordeletecmd deletes a sector from the host. func hostsectordeletecmd(root string) { - err := post("/host/storage/sectors/delete/"+root, "") + var hash crypto.Hash + err := hash.LoadString(root) + if err != nil { + die("Could not parse root:", err) + } + err = httpClient.HostStorageSectorsDeletePost(hash) if err != nil { die("Could not delete sector:", err) } diff --git a/cmd/siac/hostdbcmd.go b/cmd/siac/hostdbcmd.go index 64d2e31059..cde2a2b493 100644 --- a/cmd/siac/hostdbcmd.go +++ b/cmd/siac/hostdbcmd.go @@ -10,6 +10,7 @@ import ( "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/node/api" + "github.com/NebulousLabs/Sia/types" ) const scanHistoryLen = 30 @@ -52,8 +53,7 @@ func printScoreBreakdown(info *api.HostdbHostsGET) { func hostdbcmd() { if !hostdbVerbose { - info := new(api.HostdbActiveGET) - err := getAPI("/hostdb/active", info) + info, err := httpClient.HostDbActiveGet() if err != nil { die("Could not fetch host list:", err) } @@ -76,8 +76,7 @@ func hostdbcmd() { } w.Flush() } else { - info := new(api.HostdbAllGET) - err := getAPI("/hostdb/all", info) + info, err := httpClient.HostDbAllGet() if err != nil { die("Could not fetch host list:", err) } @@ -210,8 +209,7 @@ func hostdbcmd() { referenceScore := big.NewRat(1, 1) if len(activeHosts) > 0 { referenceIndex := len(activeHosts) / 5 - hostInfo := new(api.HostdbHostsGET) - err := getAPI("/hostdb/hosts/"+activeHosts[referenceIndex].PublicKeyString, hostInfo) + hostInfo, err := httpClient.HostDbHostsGet(activeHosts[referenceIndex].PublicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -261,8 +259,7 @@ func hostdbcmd() { } // Grab the score information for the active hosts. - hostInfo := new(api.HostdbHostsGET) - err := getAPI("/hostdb/hosts/"+host.PublicKeyString, hostInfo) + hostInfo, err := httpClient.HostDbHostsGet(host.PublicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -277,8 +274,9 @@ func hostdbcmd() { } func hostdbviewcmd(pubkey string) { - info := new(api.HostdbHostsGET) - err := getAPI("/hostdb/hosts/"+pubkey, info) + var publicKey types.SiaPublicKey + publicKey.LoadString(pubkey) + info, err := httpClient.HostDbHostsGet(publicKey) if err != nil { die("Could not fetch provided host:", err) } @@ -301,7 +299,7 @@ func hostdbviewcmd(pubkey string) { fmt.Fprintln(w, "\t\tVersion:\t", info.Entry.Version) w.Flush() - printScoreBreakdown(info) + printScoreBreakdown(&info) // Compute the total measured uptime and total measured downtime for this // host. diff --git a/cmd/siac/main.go b/cmd/siac/main.go index 54bd3448cf..65e1ffc693 100644 --- a/cmd/siac/main.go +++ b/cmd/siac/main.go @@ -1,39 +1,29 @@ package main import ( - "encoding/json" - "errors" "fmt" - "net" - "net/http" "os" "reflect" "github.com/spf13/cobra" "github.com/NebulousLabs/Sia/build" - "github.com/NebulousLabs/Sia/node/api" + "github.com/NebulousLabs/Sia/node/api/client" ) var ( // Flags. - addr string // override default API address - hostVerbose bool // display additional host info - initForce bool // destroy and reencrypt the wallet on init if it already exists - initPassword bool // supply a custom password when creating a wallet - renterListVerbose bool // Show additional info about uploaded files. - renterShowHistory bool // Show download history in addition to download queue. + hostVerbose bool // display additional host info + initForce bool // destroy and reencrypt the wallet on init if it already exists + initPassword bool // supply a custom password when creating a wallet + renterListVerbose bool // Show additional info about uploaded files. + renterShowHistory bool // Show download history in addition to download queue. ) var ( // Globals. - rootCmd *cobra.Command // Root command cobra object, used by bash completion cmd. -) - -var ( - // User-supplied password, cached so that we don't need to prompt multiple - // times. - apiPassword string + rootCmd *cobra.Command // Root command cobra object, used by bash completion cmd. + httpClient client.Client ) // Exit codes. @@ -43,173 +33,8 @@ const ( exitCodeUsage = 64 // EX_USAGE in sysexits.h ) -// non2xx returns true for non-success HTTP status codes. -func non2xx(code int) bool { - return code < 200 || code > 299 -} - -// decodeError returns the api.Error from a API response. This method should -// only be called if the response's status code is non-2xx. The error returned -// may not be of type api.Error in the event of an error unmarshalling the -// JSON. -func decodeError(resp *http.Response) error { - var apiErr api.Error - err := json.NewDecoder(resp.Body).Decode(&apiErr) - if err != nil { - return err - } - return apiErr -} - -// apiGet wraps a GET request with a status code check, such that if the GET does -// not return 2xx, the error will be read and returned. The response body is -// not closed. -func apiGet(call string) (*http.Response, error) { - if host, port, _ := net.SplitHostPort(addr); host == "" { - addr = net.JoinHostPort("localhost", port) - } - resp, err := api.HttpGET("http://" + addr + call) - if err != nil { - return nil, errors.New("no response from daemon") - } - // check error code - if resp.StatusCode == http.StatusUnauthorized { - // retry request with authentication. - resp.Body.Close() - if apiPassword == "" { - apiPassword = os.Getenv("SIA_API_PASSWORD") - if apiPassword != "" { - fmt.Println("Using SIA_API_PASSWORD environment variable") - } else { - // prompt for password and store it in a global var for subsequent - // calls - apiPassword, err = passwordPrompt("API password: ") - if err != nil { - return nil, err - } - } - } - resp, err = api.HttpGETAuthenticated("http://"+addr+call, apiPassword) - if err != nil { - return nil, errors.New("no response from daemon - authentication failed") - } - } - if resp.StatusCode == http.StatusNotFound { - resp.Body.Close() - return nil, errors.New("API call not recognized: " + call) - } - if non2xx(resp.StatusCode) { - err := decodeError(resp) - resp.Body.Close() - return nil, err - } - return resp, nil -} - -// getAPI makes a GET API call and decodes the response. An error is returned -// if the response status is not 2xx. -func getAPI(call string, obj interface{}) error { - resp, err := apiGet(call) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNoContent { - return errors.New("expecting a response, but API returned status code 204 No Content") - } - - err = json.NewDecoder(resp.Body).Decode(obj) - if err != nil { - return err - } - return nil -} - -// get makes an API call and discards the response. An error is returned if the -// response status is not 2xx. -func get(call string) error { - resp, err := apiGet(call) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// apiPost wraps a POST request with a status code check, such that if the POST -// does not return 2xx, the error will be read and returned. The response body -// is not closed. -func apiPost(call, vals string) (*http.Response, error) { - if host, port, _ := net.SplitHostPort(addr); host == "" { - addr = net.JoinHostPort("localhost", port) - } - - resp, err := api.HttpPOST("http://"+addr+call, vals) - if err != nil { - return nil, errors.New("no response from daemon") - } - // check error code - if resp.StatusCode == http.StatusUnauthorized { - resp.Body.Close() - apiPassword = os.Getenv("SIA_API_PASSWORD") - if apiPassword != "" { - fmt.Println("Using SIA_API_PASSWORD environment variable") - } else { - // Prompt for password and retry request with authentication. - apiPassword, err = passwordPrompt("API password: ") - if err != nil { - return nil, err - } - } - resp, err = api.HttpPOSTAuthenticated("http://"+addr+call, vals, apiPassword) - if err != nil { - return nil, errors.New("no response from daemon - authentication failed") - } - } - if resp.StatusCode == http.StatusNotFound { - resp.Body.Close() - return nil, errors.New("API call not recognized: " + call) - } - if non2xx(resp.StatusCode) { - err := decodeError(resp) - resp.Body.Close() - return nil, err - } - return resp, nil -} - -// postResp makes a POST API call and decodes the response. An error is -// returned if the response status is not 2xx. -func postResp(call, vals string, obj interface{}) error { - resp, err := apiPost(call, vals) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNoContent { - return errors.New("expecting a response, but API returned status code 204 No Content") - } - - err = json.NewDecoder(resp.Body).Decode(obj) - if err != nil { - return err - } - return nil -} - // post makes an API call and discards the response. An error is returned if // the response status is not 2xx. -func post(call, vals string) error { - resp, err := apiPost(call, vals) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - // wrap wraps a generic command with a check that the command has been // passed the correct number of arguments. The command must take only strings // as arguments. @@ -309,8 +134,15 @@ func main() { root.AddCommand(bashcomplCmd) root.AddCommand(mangenCmd) - // parse flags - root.PersistentFlags().StringVarP(&addr, "addr", "a", "localhost:9980", "which host/port to communicate with (i.e. the host/port siad is listening on)") + // Check if the api password environment variable is set. + apiPassword := os.Getenv("SIA_API_PASSWORD") + if apiPassword != "" { + fmt.Println("Using SIA_API_PASSWORD environment variable") + } + // initialize client + root.PersistentFlags().StringVarP(&httpClient.Address, "addr", "a", "localhost:9980", "which host/port to communicate with (i.e. the host/port siad is listening on)") + root.PersistentFlags().StringVarP(&httpClient.Password, "apipassword", "x", apiPassword, "the password for the API's http authentication") + root.PersistentFlags().StringVarP(&httpClient.UserAgent, "useragent", "u", "Sia-Agent", "the useragent used by siac to connect to the daemon's API") // run if err := root.Execute(); err != nil { diff --git a/cmd/siac/minercmd.go b/cmd/siac/minercmd.go index d991cae96d..cbdd1398ac 100644 --- a/cmd/siac/minercmd.go +++ b/cmd/siac/minercmd.go @@ -3,8 +3,6 @@ package main import ( "fmt" - "github.com/NebulousLabs/Sia/node/api" - "github.com/spf13/cobra" ) @@ -34,7 +32,7 @@ var ( // minerstartcmd is the handler for the command `siac miner start`. // Starts the CPU miner. func minerstartcmd() { - err := get("/miner/start") + err := httpClient.MinerStartGet() if err != nil { die("Could not start miner:", err) } @@ -44,8 +42,7 @@ func minerstartcmd() { // minercmd is the handler for the command `siac miner`. // Prints the status of the miner. func minercmd() { - status := new(api.MinerGET) - err := getAPI("/miner", status) + status, err := httpClient.MinerGet() if err != nil { die("Could not get miner status:", err) } @@ -64,7 +61,7 @@ Blocks Mined: %d (%d stale) // minerstopcmd is the handler for the command `siac miner stop`. // Stops the CPU miner. func minerstopcmd() { - err := get("/miner/stop") + err := httpClient.MinerStopGet() if err != nil { die("Could not stop miner:", err) } diff --git a/cmd/siac/rentercmd.go b/cmd/siac/rentercmd.go index ce063c7c2e..6799663552 100644 --- a/cmd/siac/rentercmd.go +++ b/cmd/siac/rentercmd.go @@ -150,8 +150,7 @@ func abs(path string) string { // rentercmd displays the renter's financial metrics and lists the files it is // tracking. func rentercmd() { - var rg api.RenterGET - err := getAPI("/renter", &rg) + rg, err := httpClient.RenterGet() if err != nil { die("Could not get renter info:", err) } @@ -185,8 +184,7 @@ func rentercmd() { // renteruploadscmd is the handler for the command `siac renter uploads`. // Lists files currently uploading. func renteruploadscmd() { - var rf api.RenterFiles - err := getAPI("/renter/files", &rf) + rf, err := httpClient.RenterFilesGet() if err != nil { die("Could not get upload queue:", err) } @@ -217,8 +215,7 @@ func renteruploadscmd() { // Lists files currently downloading, and optionally previously downloaded // files if the -H or --history flag is specified. func renterdownloadscmd() { - var queue api.RenterDownloadQueue - err := getAPI("/renter/downloads", &queue) + queue, err := httpClient.RenterDownloadsGet() if err != nil { die("Could not get download queue:", err) } @@ -260,8 +257,7 @@ func renterdownloadscmd() { // renterallowancecmd displays the current allowance. func renterallowancecmd() { - var rg api.RenterGET - err := getAPI("/renter", &rg) + rg, err := httpClient.RenterGet() if err != nil { die("Could not get allowance:", err) } @@ -276,7 +272,7 @@ func renterallowancecmd() { // renterallowancecancelcmd cancels the current allowance. func renterallowancecancelcmd() { - err := post("/renter", "hosts=0&funds=0&period=0&renewwindow=0") + err := httpClient.RenterCancelAllowance() if err != nil { die("error canceling allowance:", err) } @@ -299,24 +295,36 @@ func rentersetallowancecmd(cmd *cobra.Command, args []string) { } blocks, err := parsePeriod(args[1]) if err != nil { - die("Could not parse period") + die("Could not parse period:", err) + } + allowance := modules.Allowance{} + _, err = fmt.Sscan(hastings, &allowance.Funds) + if err != nil { + die("Could not parse amount:", err) + } + + _, err = fmt.Sscan(blocks, &allowance.Period) + if err != nil { + die("Could not parse period:", err) } - queryString := fmt.Sprintf("funds=%s&period=%s", hastings, blocks) if len(args) > 2 { - _, err = strconv.Atoi(args[2]) + hosts, err := strconv.Atoi(args[2]) if err != nil { die("Could not parse host count") } - queryString += fmt.Sprintf("&hosts=%s", args[2]) + allowance.Hosts = uint64(hosts) } if len(args) > 3 { renewWindow, err := parsePeriod(args[3]) if err != nil { die("Could not parse renew window") } - queryString += fmt.Sprintf("&renewwindow=%s", renewWindow) + _, err = fmt.Sscan(renewWindow, &allowance.RenewWindow) + if err != nil { + die("Could not parse renew window:", err) + } } - err = post("/renter", queryString) + err = httpClient.RenterPostAllowance(allowance) if err != nil { die("Could not set allowance:", err) } @@ -340,8 +348,7 @@ func (s byValue) Less(i, j int) bool { // rentercontractscmd is the handler for the comand `siac renter contracts`. // It lists the Renter's contracts. func rentercontractscmd() { - var rc api.RenterContracts - err := getAPI("/renter/contracts", &rc) + rc, err := httpClient.RenterContractsGet() if err != nil { die("Could not get contracts:", err) } @@ -371,16 +378,14 @@ func rentercontractscmd() { // rentercontractsviewcmd is the handler for the command `siac renter contracts `. // It lists details of a specific contract. func rentercontractsviewcmd(cid string) { - var rc api.RenterContracts - err := getAPI("/renter/contracts", &rc) + rc, err := httpClient.RenterContractsGet() if err != nil { die("Could not get contract details: ", err) } for _, rc := range rc.Contracts { if rc.ID.String() == cid { - var hostInfo api.HostdbHostsGET - err = getAPI("/hostdb/hosts/"+rc.HostPublicKey.String(), &hostInfo) + hostInfo, err := httpClient.HostDbHostsGet(rc.HostPublicKey) if err != nil { die("Could not fetch details of host: ", err) } @@ -420,7 +425,7 @@ Contract %v // renterfilesdeletecmd is the handler for the command `siac renter delete [path]`. // Removes the specified path from the Sia network. func renterfilesdeletecmd(path string) { - err := post("/renter/delete/"+path, "") + err := httpClient.RenterDeletePost(path) if err != nil { die("Could not delete file:", err) } @@ -434,7 +439,7 @@ func renterfilesdownloadcmd(path, destination string) { done := make(chan struct{}) go downloadprogress(done, path) - err := get("/renter/download/" + path + "?destination=" + destination) + err := httpClient.RenterDownloadFullGet(path, destination, false) close(done) if err != nil { die("Could not download file:", err) @@ -451,8 +456,7 @@ func downloadprogress(done chan struct{}, siapath string) { case <-time.Tick(time.Second): // get download progress of file - var queue api.RenterDownloadQueue - err := getAPI("/renter/downloads", &queue) + queue, err := httpClient.RenterDownloadsGet() if err != nil { continue // benign } @@ -487,7 +491,7 @@ func (s bySiaPath) Less(i, j int) bool { return s[i].SiaPath < s[j].SiaPath } // Lists files known to the renter on the network. func renterfileslistcmd() { var rf api.RenterFiles - err := getAPI("/renter/files", &rf) + rf, err := httpClient.RenterFilesGet() if err != nil { die("Could not get file list:", err) } @@ -528,7 +532,7 @@ func renterfileslistcmd() { // renterfilesrenamecmd is the handler for the command `siac renter rename [path] [newpath]`. // Renames a file on the Sia network. func renterfilesrenamecmd(path, newpath string) { - err := post("/renter/rename/"+path, "newsiapath="+newpath) + err := httpClient.RenterRenamePost(path, newpath) if err != nil { die("Could not rename file:", err) } @@ -568,7 +572,7 @@ func renterfilesuploadcmd(source, path string) { fpath, _ := filepath.Rel(source, file) fpath = filepath.Join(path, fpath) fpath = filepath.ToSlash(fpath) - err = post("/renter/upload/"+fpath, "source="+abs(file)) + err = httpClient.RenterUploadDefaultPost(abs(file), fpath) if err != nil { die("Could not upload file:", err) } @@ -576,7 +580,7 @@ func renterfilesuploadcmd(source, path string) { fmt.Printf("Uploaded %d files into '%s'.\n", len(files), path) } else { // single file - err = post("/renter/upload/"+path, "source="+abs(source)) + err = httpClient.RenterUploadDefaultPost(abs(source), path) if err != nil { die("Could not upload file:", err) } @@ -587,8 +591,7 @@ func renterfilesuploadcmd(source, path string) { // renterpricescmd is the handler for the command `siac renter prices`, which // displays the prices of various storage operations. func renterpricescmd() { - var rpg api.RenterPricesGET - err := getAPI("/renter/prices", &rpg) + rpg, err := httpClient.RenterPricesGet() if err != nil { die("Could not read the renter prices:", err) } diff --git a/cmd/siac/walletcmd.go b/cmd/siac/walletcmd.go index d89fafa95e..6767a73b0b 100644 --- a/cmd/siac/walletcmd.go +++ b/cmd/siac/walletcmd.go @@ -3,6 +3,7 @@ package main import ( "errors" "fmt" + "math" "math/big" "os" "syscall" @@ -11,7 +12,6 @@ import ( "github.com/spf13/cobra" "golang.org/x/crypto/ssh/terminal" - "github.com/NebulousLabs/Sia/node/api" "github.com/NebulousLabs/Sia/types" ) @@ -206,8 +206,7 @@ func confirmPassword(prev string) error { // walletaddresscmd fetches a new address from the wallet that will be able to // receive coins. func walletaddresscmd() { - addr := new(api.WalletAddressGET) - err := getAPI("/wallet/address", addr) + addr, err := httpClient.WalletAddressGet() if err != nil { die("Could not generate new address:", err) } @@ -216,8 +215,7 @@ func walletaddresscmd() { // walletaddressescmd fetches the list of addresses that the wallet knows. func walletaddressescmd() { - addrs := new(api.WalletAddressesGET) - err := getAPI("/wallet/addresses", addrs) + addrs, err := httpClient.WalletAddressesGet() if err != nil { die("Failed to fetch addresses:", err) } @@ -238,8 +236,7 @@ func walletchangepasswordcmd() { } else if err = confirmPassword(newPassword); err != nil { die(err) } - qs := fmt.Sprintf("newpassword=%s&encryptionpassword=%s", newPassword, currentPassword) - err = post("/wallet/changepassword", qs) + err = httpClient.WalletChangePasswordPost(currentPassword, newPassword) if err != nil { die("Changing the password failed:", err) } @@ -248,21 +245,17 @@ func walletchangepasswordcmd() { // walletinitcmd encrypts the wallet with the given password func walletinitcmd() { - var er api.WalletInitPOST - qs := fmt.Sprintf("dictionary=%s", "english") + var password string + var err error if initPassword { - password, err := passwordPrompt("Wallet password: ") + password, err = passwordPrompt("Wallet password: ") if err != nil { die("Reading password failed:", err) } else if err = confirmPassword(password); err != nil { die(err) } - qs += fmt.Sprintf("&encryptionpassword=%s", password) } - if initForce { - qs += "&force=true" - } - err := postResp("/wallet/init", qs, &er) + er, err := httpClient.WalletInitPost(password, "english", initForce) if err != nil { die("Error when encrypting wallet:", err) } @@ -280,20 +273,16 @@ func walletinitseedcmd() { if err != nil { die("Reading seed failed:", err) } - qs := fmt.Sprintf("&seed=%s&dictionary=%s", seed, "english") + var password string if initPassword { - password, err := passwordPrompt("Wallet password: ") + password, err = passwordPrompt("Wallet password: ") if err != nil { die("Reading password failed:", err) } else if err = confirmPassword(password); err != nil { die(err) } - qs += fmt.Sprintf("&encryptionpassword=%s", password) - } - if initForce { - qs += "&force=true" } - err = post("/wallet/init/seed", qs) + err = httpClient.WalletInitSeedPost(seed, password, "english", initForce) if err != nil { die("Could not initialize wallet from seed:", err) } @@ -310,8 +299,7 @@ func walletload033xcmd(source string) { if err != nil { die("Reading password failed:", err) } - qs := fmt.Sprintf("source=%s&encryptionpassword=%s", abs(source), password) - err = post("/wallet/033x", qs) + err = httpClient.Wallet033xPost(abs(source), password) if err != nil { die("Loading wallet failed:", err) } @@ -328,8 +316,7 @@ func walletloadseedcmd() { if err != nil { die("Reading password failed:", err) } - qs := fmt.Sprintf("encryptionpassword=%s&seed=%s&dictionary=%s", password, seed, "english") - err = post("/wallet/seed", qs) + err = httpClient.WalletSeedPost(seed, password, "english") if err != nil { die("Could not add seed:", err) } @@ -342,8 +329,7 @@ func walletloadsiagcmd(keyfiles string) { if err != nil { die("Reading password failed:", err) } - qs := fmt.Sprintf("keyfiles=%s&encryptionpassword=%s", keyfiles, password) - err = post("/wallet/siagkey", qs) + err = httpClient.WalletSiagKeyPost(keyfiles, password) if err != nil { die("Loading siag key failed:", err) } @@ -352,7 +338,7 @@ func walletloadsiagcmd(keyfiles string) { // walletlockcmd locks the wallet func walletlockcmd() { - err := post("/wallet/lock", "") + err := httpClient.WalletLockPost() if err != nil { die("Could not lock wallet:", err) } @@ -360,8 +346,7 @@ func walletlockcmd() { // walletseedcmd returns the current seed { func walletseedscmd() { - var seedInfo api.WalletSeedsGET - err := getAPI("/wallet/seeds", &seedInfo) + seedInfo, err := httpClient.WalletSeedsGet() if err != nil { die("Error retrieving the current seed:", err) } @@ -388,7 +373,15 @@ func walletsendsiacoinscmd(amount, dest string) { if err != nil { die("Could not parse amount:", err) } - err = post("/wallet/siacoins", fmt.Sprintf("amount=%s&destination=%s", hastings, dest)) + var value types.Currency + if _, err := fmt.Sscan(hastings, &value); err != nil { + die("Failed to parse amount", err) + } + var hash types.UnlockHash + if _, err := fmt.Sscan(dest, &hash); err != nil { + die("Failed to parse destination address", err) + } + _, err = httpClient.WalletSiacoinsPost(value, hash) if err != nil { die("Could not send siacoins:", err) } @@ -397,7 +390,15 @@ func walletsendsiacoinscmd(amount, dest string) { // walletsendsiafundscmd sends siafunds to a destination address. func walletsendsiafundscmd(amount, dest string) { - err := post("/wallet/siafunds", fmt.Sprintf("amount=%s&destination=%s", amount, dest)) + var value types.Currency + if _, err := fmt.Sscan(amount, &value); err != nil { + die("Failed to parse amount", err) + } + var hash types.UnlockHash + if _, err := fmt.Sscan(dest, &hash); err != nil { + die("Failed to parse destination address", err) + } + _, err := httpClient.WalletSiafundsPost(value, hash) if err != nil { die("Could not send siafunds:", err) } @@ -406,13 +407,11 @@ func walletsendsiafundscmd(amount, dest string) { // walletbalancecmd retrieves and displays information about the wallet. func walletbalancecmd() { - status := new(api.WalletGET) - err := getAPI("/wallet", status) + status, err := httpClient.WalletGet() if err != nil { die("Could not get wallet status:", err) } - var fees api.TpoolFeeGET - err = getAPI("/tpool/fee", &fees) + fees, err := httpClient.TransactionPoolFeeGet() if err != nil { die("Could not get fee estimation:", err) } @@ -458,8 +457,7 @@ func walletsweepcmd() { die("Reading seed failed:", err) } - var swept api.WalletSweepPOST - err = postResp("/wallet/sweep/seed", fmt.Sprintf("seed=%s&dictionary=%s", seed, "english"), &swept) + swept, err := httpClient.WalletSweepPost(seed, "english") if err != nil { die("Could not sweep seed:", err) } @@ -469,8 +467,7 @@ func walletsweepcmd() { // wallettransactionscmd lists all of the transactions related to the wallet, // providing a net flow of siacoins and siafunds for each. func wallettransactionscmd() { - wtg := new(api.WalletTransactionsGET) - err := getAPI("/wallet/transactions?startheight=0&endheight=10000000", wtg) + wtg, err := httpClient.WalletTransactionsGet(0, math.MaxUint64) if err != nil { die("Could not fetch transaction history:", err) } @@ -536,8 +533,7 @@ func walletunlockcmd() { password := os.Getenv("SIA_WALLET_PASSWORD") if password != "" && !initPassword { fmt.Println("Using SIA_WALLET_PASSWORD environment variable") - qs := fmt.Sprintf("encryptionpassword=%s&dictonary=%s", password, "english") - err := post("/wallet/unlock", qs) + err := httpClient.WalletUnlockPost(password, "english") if err != nil { fmt.Println("Automatic unlock failed!") } else { @@ -549,8 +545,7 @@ func walletunlockcmd() { if err != nil { die("Reading password failed:", err) } - qs := fmt.Sprintf("encryptionpassword=%s&dictonary=%s", password, "english") - err = post("/wallet/unlock", qs) + err = httpClient.WalletUnlockPost(password, "english") if err != nil { die("Could not unlock wallet:", err) } diff --git a/node/api/client/daemon.go b/node/api/client/daemon.go index e8a68bbf97..e018dba2ef 100644 --- a/node/api/client/daemon.go +++ b/node/api/client/daemon.go @@ -13,3 +13,15 @@ func (c *Client) DaemonStopGet() (err error) { err = c.get("/daemon/stop", nil) return } + +// DaemonUpdateGet checks for an available daemon update. +func (c *Client) DaemonUpdateGet() (dig api.DaemonUpdateGet, err error) { + err = c.get("/daemon/update", nil) + return +} + +// DaemonUpdatePost updates the daemon. +func (c *Client) DaemonUpdatePost() (err error) { + err = c.post("/daemon/update", "", nil) + return +} diff --git a/node/api/client/gateway.go b/node/api/client/gateway.go index e529de453a..075ee58f0e 100644 --- a/node/api/client/gateway.go +++ b/node/api/client/gateway.go @@ -23,6 +23,13 @@ func (c *Client) GatewayConnectPost(address modules.NetAddress) (err error) { return } +// GatewayDisconnectPost uses the /gateway/disconnect/:address endpoint to +// disconnect the gateway from a peer. +func (c *Client) GatewayDisconnectPost(address modules.NetAddress) (err error) { + err = c.post("/gateway/disconnect/"+string(address), "", nil) + return +} + // GatewayGet requests the /gateway api resource func (c *Client) GatewayGet() (gwg api.GatewayGET, err error) { err = c.get("/gateway", &gwg) diff --git a/node/api/client/host.go b/node/api/client/host.go index be4ffa1ff2..87ee757d07 100644 --- a/node/api/client/host.go +++ b/node/api/client/host.go @@ -1,12 +1,54 @@ package client import ( + "fmt" "net/url" "strconv" + "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/node/api" ) +// HostParam is a parameter in the host's settings that can be changed via the +// API. It is primarily used as a helper struct to ensure type safety. +type HostParam string + +var ( + // HostParamCollateralBudget is the collateral budget of the host in + // hastings. + HostParamCollateralBudget = HostParam("collateralbudget") + // HostParamMaxCollateral is the max collateral of the host in hastings. + HostParamMaxCollateral = HostParam("maxcollateral") + // HostParamMinContractPrice is the min contract price in hastings. + HostParamMinContractPrice = HostParam("mincontractprice") + // HostParamMinDownloadBandwidthPrice is the min download bandwidth price + // in hastings/byte. + HostParamMinDownloadBandwidthPrice = HostParam("mindownloadbandwidthprice") + // HostParamMinUploadBandwidthPrice is the min upload bandwidth price in + // hastings/byte. + HostParamMinUploadBandwidthPrice = HostParam("minuploadbandwidthprice") + // HostParamCollateral is the host's collateral in hastings/byte/block. + HostParamCollateral = HostParam("collateral") + // HostParamMinStoragePrice is the minimum storage price in + // hastings/byte/block. + HostParamMinStoragePrice = HostParam("minstorageprice") + // HostParamAcceptingContracts indicates if the host is accepting new + // contracts. + HostParamAcceptingContracts = HostParam("acceptingcontracts") + // HostParamMaxDuration is the max duration of a contract in blocks. + HostParamMaxDuration = HostParam("maxduration") + // HostParamWindowSize is the size of the proof window in blocks. + HostParamWindowSize = HostParam("windowsize") + // HostParamMaxDownloadBatchSize is the maximum size of the download batch + // size in bytes. + HostParamMaxDownloadBatchSize = HostParam("maxdownloadbatchsize") + // HostParamMaxReviseBatchSize is the maximum size of the revise batch size. + HostParamMaxReviseBatchSize = HostParam("maxrevisebatchsize") + // HostParamNetAddress is the announced netaddress of the host. + HostParamNetAddress = HostParam("netaddress") +) + // HostAnnouncePost uses the /host/announce endpoint to announce the host to // the network func (c *Client) HostAnnouncePost() (err error) { @@ -14,11 +56,29 @@ func (c *Client) HostAnnouncePost() (err error) { return } -// HostAcceptingContractsPost uses the /host endpoint to change the acceptingcontracts field of the host's settings -func (c *Client) HostAcceptingContractsPost(acceptingContracts bool) (err error) { - values := url.Values{} - values.Set("acceptingcontracts", strconv.FormatBool(acceptingContracts)) - err = c.post("/host", values.Encode(), nil) +// HostAnnounceAddrPost uses the /host/anounce endpoint to announce the host to +// the network using the provided address. +func (c *Client) HostAnnounceAddrPost(address modules.NetAddress) (err error) { + err = c.post("/host/announce", "netaddress="+string(address), nil) + return +} + +// HostEstimateScoreGet requests the /host/estimatescore endpoint. +func (c *Client) HostEstimateScoreGet(param, value string) (eg api.HostEstimateScoreGET, err error) { + err = c.get(fmt.Sprintf("/host/estimatescore?%v=%v", param, value), &eg) + return +} + +// HostGet requests the /host endpoint. +func (c *Client) HostGet() (hg api.HostGET, err error) { + err = c.get("/host", &hg) + return +} + +// HostModifySettingPost uses the /host endpoint to change a param of the host +// settings to a certain value. +func (c *Client) HostModifySettingPost(param HostParam, value interface{}) (err error) { + err = c.post("/host", string(param)+"="+fmt.Sprint(value), nil) return } @@ -32,8 +92,34 @@ func (c *Client) HostStorageFoldersAddPost(path string, size uint64) (err error) return } -// HostGet requests the /host endpoint. -func (c *Client) HostGet() (hg api.HostGET, err error) { - err = c.get("/host", &hg) +// HostStorageFoldersRemovePost uses the /host/storage/folders/remove api +// endpoint to remove a storage folder from a host. +func (c *Client) HostStorageFoldersRemovePost(path string) (err error) { + values := url.Values{} + values.Set("path", path) + err = c.post("/host/storage/folders/remove", values.Encode(), nil) + return +} + +// HostStorageFoldersResizePost uses the /host/storage/folders/resize api +// endpoint to resize an existing storage folder. +func (c *Client) HostStorageFoldersResizePost(path string, size uint64) (err error) { + values := url.Values{} + values.Set("path", path) + values.Set("newsize", strconv.FormatUint(size, 10)) + err = c.post("/host/storage/folders/resize", values.Encode(), nil) + return +} + +// HostStorageGet requests the /host/storage endpoint. +func (c *Client) HostStorageGet() (sg api.StorageGET, err error) { + err = c.get("/host/storage", &sg) + return +} + +// HostStorageSectorsDeletePost uses the /host/storage/sectors/delete endpoint +// to delete a sector from the host. +func (c *Client) HostStorageSectorsDeletePost(root crypto.Hash) (err error) { + err = c.post("/host/storage/sectors/delete/"+root.String(), "", nil) return } diff --git a/node/api/client/hostdb.go b/node/api/client/hostdb.go index b0b0d8c387..2f2967a338 100644 --- a/node/api/client/hostdb.go +++ b/node/api/client/hostdb.go @@ -1,9 +1,24 @@ package client -import "github.com/NebulousLabs/Sia/node/api" +import ( + "github.com/NebulousLabs/Sia/node/api" + "github.com/NebulousLabs/Sia/types" +) -// HostDbActiveGet requests the /hostdb/active endpoint's resources +// HostDbActiveGet requests the /hostdb/active endpoint's resources. func (c *Client) HostDbActiveGet() (hdag api.HostdbActiveGET, err error) { err = c.get("/hostdb/active", &hdag) return } + +// HostDbAllGet requests the /hostdb/all endpoint's resources. +func (c *Client) HostDbAllGet() (hdag api.HostdbAllGET, err error) { + err = c.get("/hostdb/all", &hdag) + return +} + +// HostDbHostsGet request the /hostdb/hosts/:pubkey endpoint's resources. +func (c *Client) HostDbHostsGet(pk types.SiaPublicKey) (hhg api.HostdbHostsGET, err error) { + err = c.get("/hostdb/hosts/"+pk.String(), &hhg) + return +} diff --git a/node/api/client/miner.go b/node/api/client/miner.go index 24f534814d..2e856a37a1 100644 --- a/node/api/client/miner.go +++ b/node/api/client/miner.go @@ -2,9 +2,16 @@ package client import ( "github.com/NebulousLabs/Sia/encoding" + "github.com/NebulousLabs/Sia/node/api" "github.com/NebulousLabs/Sia/types" ) +// MinerGet requests the /miner endpoint's resources. +func (c *Client) MinerGet() (mg api.MinerGET, err error) { + err = c.get("/miner", &mg) + return +} + // MinerHeaderGet uses the /miner/header endpoint to get a header for work. func (c *Client) MinerHeaderGet() (target types.Target, bh types.BlockHeader, err error) { targetAndHeader, err := c.getRawResponse("/miner/header") diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 84e573688a..9d0c84b61a 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -30,6 +30,15 @@ func (c *Client) RenterDownloadGet(siaPath, destination string, offset, length u return } +// RenterDownloadFullGet uses the /renter/download endpoint to download a full +// file. +func (c *Client) RenterDownloadFullGet(siaPath, destination string, async bool) (err error) { + query := fmt.Sprintf("%s?destination=%s&httpresp=false&async=%v", + siaPath, destination, async) + err = c.get("/renter/download/"+query, nil) + return +} + // RenterDownloadsGet requests the /renter/downloads resource func (c *Client) RenterDownloadsGet() (rdq api.RenterDownloadQueue, err error) { err = c.get("/renter/downloads", &rdq) @@ -44,12 +53,18 @@ func (c *Client) RenterDownloadHTTPResponseGet(siaPath string, offset, length ui return } -// RenterFilesGet requests the /renter/files resource +// RenterFilesGet requests the /renter/files resource. func (c *Client) RenterFilesGet() (rf api.RenterFiles, err error) { err = c.get("/renter/files", &rf) return } +// RenterGet requests the /renter resource. +func (c *Client) RenterGet() (rg api.RenterGET, err error) { + err = c.get("/renter", &rg) + return +} + // RenterPostAllowance uses the /renter endpoint to change the renter's allowance func (c *Client) RenterPostAllowance(allowance modules.Allowance) (err error) { values := url.Values{} @@ -61,6 +76,18 @@ func (c *Client) RenterPostAllowance(allowance modules.Allowance) (err error) { return } +// RenterCancelAllowance uses the /renter endpoint to cancel the allowance. +func (c *Client) RenterCancelAllowance() (err error) { + err = c.RenterPostAllowance(modules.Allowance{}) + return +} + +// RenterPricesGet requests the /renter/prices endpoint's resources. +func (c *Client) RenterPricesGet() (rpg api.RenterPricesGET, err error) { + err = c.get("/renter/prices", &rpg) + return +} + // RenterPostRateLimit uses the /renter endpoint to change the renter's bandwidth rate // limit. func (c *Client) RenterPostRateLimit(readBPS, writeBPS int64) (err error) { @@ -71,6 +98,12 @@ func (c *Client) RenterPostRateLimit(readBPS, writeBPS int64) (err error) { return } +// RenterRenamePost uses the /renter/rename/:siapath endpoint to rename a file. +func (c *Client) RenterRenamePost(siaPathOld, siaPathNew string) (err error) { + err = c.post("/renter/rename/"+siaPathOld, "newsiapath="+siaPathNew, nil) + return +} + // RenterStreamGet uses the /renter/stream endpoint to download data as a // stream. func (c *Client) RenterStreamGet(siaPath string) (resp []byte, err error) { @@ -85,7 +118,7 @@ func (c *Client) RenterStreamPartialGet(siaPath string, start, end uint64) (resp return } -// RenterUploadPost uses the /renter/upload endpoin to upload a file +// RenterUploadPost uses the /renter/upload endpoint to upload a file func (c *Client) RenterUploadPost(path, siaPath string, dataPieces, parityPieces uint64) (err error) { values := url.Values{} values.Set("source", path) @@ -94,3 +127,12 @@ func (c *Client) RenterUploadPost(path, siaPath string, dataPieces, parityPieces err = c.post(fmt.Sprintf("/renter/upload%v", siaPath), values.Encode(), nil) return } + +// RenterUploadDefaultPost uses the /renter/upload endpoint with default +// redundancy settings to upload a file. +func (c *Client) RenterUploadDefaultPost(path, siaPath string) (err error) { + values := url.Values{} + values.Set("source", path) + err = c.post(fmt.Sprintf("/renter/upload%v", siaPath), values.Encode(), nil) + return +} diff --git a/node/api/client/transactionpool.go b/node/api/client/transactionpool.go new file mode 100644 index 0000000000..cc0089c254 --- /dev/null +++ b/node/api/client/transactionpool.go @@ -0,0 +1,9 @@ +package client + +import "github.com/NebulousLabs/Sia/node/api" + +// TransactionPoolFeeGet uses the /tpool/fee endpoint to get a fee estimation. +func (c *Client) TransactionPoolFeeGet() (tfg api.TpoolFeeGET, err error) { + err = c.get("/tpool/fee", &tfg) + return +} diff --git a/node/api/client/wallet.go b/node/api/client/wallet.go index dff2fdfbd2..dbb96f8c31 100644 --- a/node/api/client/wallet.go +++ b/node/api/client/wallet.go @@ -16,22 +16,76 @@ func (c *Client) WalletAddressGet() (wag api.WalletAddressGET, err error) { return } +// WalletAddressesGet requests the wallets known addresses from the +// /wallet/addresses endpoint. +func (c *Client) WalletAddressesGet() (wag api.WalletAddressesGET, err error) { + err = c.get("/wallet/addresses", &wag) + return +} + +// WalletChangePasswordPost uses the /wallet/changepassword endpoint to change +// the wallet's password. +func (c *Client) WalletChangePasswordPost(currentPassword, newPassword string) (err error) { + values := url.Values{} + values.Set("newpassword", newPassword) + values.Set("encryptionpassword", currentPassword) + err = c.post("/wallet/changepassword", values.Encode(), nil) + return +} + // WalletInitPost uses the /wallet/init endpoint to initialize and encrypt a // wallet -func (c *Client) WalletInitPost(password string, force bool) (wip api.WalletInitPOST, err error) { +func (c *Client) WalletInitPost(password, dictionary string, force bool) (wip api.WalletInitPOST, err error) { values := url.Values{} + values.Set("dictionary", dictionary) values.Set("encryptionpassword", password) values.Set("force", strconv.FormatBool(force)) err = c.post("/wallet/init", values.Encode(), &wip) return } +// WalletInitSeedPost uses the /wallet/init/seed endpoint to initialize and +// encrypt a wallet using a given seed. +func (c *Client) WalletInitSeedPost(seed, password, dictionary string, force bool) (err error) { + values := url.Values{} + values.Set("seed", seed) + values.Set("dictionary", dictionary) + values.Set("encryptionpassword", password) + values.Set("force", strconv.FormatBool(force)) + err = c.post("/wallet/init/seed", values.Encode(), nil) + return +} + // WalletGet requests the /wallet api resource func (c *Client) WalletGet() (wg api.WalletGET, err error) { err = c.get("/wallet", &wg) return } +// WalletLockPost uses the /wallet/lock endpoint to lock the wallet. +func (c *Client) WalletLockPost() (err error) { + err = c.post("/wallet/lock", "", nil) + return +} + +// WalletSeedPost uses the /wallet/seed endpoint to add a seed to the wallet's list +// of seeds. +func (c *Client) WalletSeedPost(seed, password, dictionary string) (err error) { + values := url.Values{} + values.Set("seed", seed) + values.Set("dictionary", dictionary) + values.Set("encryptionpassword", password) + err = c.post("/wallet/seed", values.Encode(), nil) + return +} + +// WalletSeedsGet uses the /wallet/seeds endpoint to return the wallet's +// current seeds. +func (c *Client) WalletSeedsGet() (wsg api.WalletSeedsGET, err error) { + err = c.get("/wallet/seeds", &wsg) + return +} + // WalletSiacoinsMultiPost uses the /wallet/siacoin api endpoint to send money // to multiple addresses at once func (c *Client) WalletSiacoinsMultiPost(outputs []types.SiacoinOutput) (wsp api.WalletSiacoinsPOST, err error) { @@ -55,6 +109,36 @@ func (c *Client) WalletSiacoinsPost(amount types.Currency, destination types.Unl return } +// WalletSiafundsPost uses the /wallet/siafunds api endpoint to send siafunds +// to a single address. +func (c *Client) WalletSiafundsPost(amount types.Currency, destination types.UnlockHash) (wsp api.WalletSiafundsPOST, err error) { + values := url.Values{} + values.Set("amount", amount.String()) + values.Set("destination", destination.String()) + err = c.post("/wallet/siafunds", values.Encode(), &wsp) + return +} + +// WalletSiagKeyPost uses the /wallet/siagkey endpoint to load a siag key into +// the wallet. +func (c *Client) WalletSiagKeyPost(keyfiles, password string) (err error) { + values := url.Values{} + values.Set("keyfiles", keyfiles) + values.Set("encryptionpassword", password) + err = c.post("/wallet/siagkey", values.Encode(), nil) + return +} + +// WalletSweepPost uses the /wallet/sweep/seed endpoint to sweep a seed into +// the current wallet. +func (c *Client) WalletSweepPost(seed, dictionary string) (wsp api.WalletSweepPOST, err error) { + values := url.Values{} + values.Set("seed", seed) + values.Set("dictionary", dictionary) + err = c.post("/wallet/sweep/seed", values.Encode(), &wsp) + return +} + // WalletTransactionsGet requests the/wallet/transactions api resource for a // certain startheight and endheight func (c *Client) WalletTransactionsGet(startHeight types.BlockHeight, endHeight types.BlockHeight) (wtg api.WalletTransactionsGET, err error) { @@ -65,9 +149,20 @@ func (c *Client) WalletTransactionsGet(startHeight types.BlockHeight, endHeight // WalletUnlockPost uses the /wallet/unlock endpoint to unlock the wallet with // a given encryption key. Per default this key is the seed. -func (c *Client) WalletUnlockPost(password string) (err error) { +func (c *Client) WalletUnlockPost(password, dictionary string) (err error) { values := url.Values{} values.Set("encryptionpassword", password) + values.Set("dictionary", dictionary) err = c.post("/wallet/unlock", values.Encode(), nil) return } + +// Wallet033xPost uses the /wallet/033x endpoint to load a v0.3.3.x wallet into +// the current wallet. +func (c *Client) Wallet033xPost(path, password string) (err error) { + values := url.Values{} + values.Set("source", path) + values.Set("encryptionpassword", password) + err = c.post("/wallet/033x", values.Encode(), nil) + return +} diff --git a/node/api/daemon.go b/node/api/daemon.go index 36a7ca9ea8..bdfbaac337 100644 --- a/node/api/daemon.go +++ b/node/api/daemon.go @@ -6,3 +6,10 @@ type DaemonVersionGet struct { GitRevision string BuildTime string } + +// DaemonUpdateGet contains information about a potential available update for +// the daemon. +type DaemonUpdateGet struct { + Available bool `json:"available"` + Version string `json:"version"` +} diff --git a/node/api/wallet.go b/node/api/wallet.go index 3edeb92103..c48ddcdbba 100644 --- a/node/api/wallet.go +++ b/node/api/wallet.go @@ -504,12 +504,12 @@ func (api *API) walletTransactionsHandler(w http.ResponseWriter, req *http.Reque return } // Get the start and end blocks. - start, err := strconv.Atoi(startheightStr) + start, err := strconv.ParseUint(startheightStr, 10, 64) if err != nil { WriteError(w, Error{"parsing integer value for parameter `startheight` failed: " + err.Error()}, http.StatusBadRequest) return } - end, err := strconv.Atoi(endheightStr) + end, err := strconv.ParseUint(endheightStr, 10, 64) if err != nil { WriteError(w, Error{"parsing integer value for parameter `endheight` failed: " + err.Error()}, http.StatusBadRequest) return diff --git a/siatest/testgroup.go b/siatest/testgroup.go index a832fde550..e5a259b7b1 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -170,7 +170,7 @@ func addStorageFolderToHosts(hosts map[*TestNode]struct{}) error { // announceHosts adds storage to each host and announces them to the group func announceHosts(hosts map[*TestNode]struct{}) error { for host := range hosts { - if err := host.HostAcceptingContractsPost(true); err != nil { + if err := host.HostModifySettingPost(client.HostParamAcceptingContracts, true); err != nil { return errors.AddContext(err, "failed to set host to accepting contracts") } if err := host.HostAnnouncePost(); err != nil { diff --git a/siatest/testnode.go b/siatest/testnode.go index 646608a4f1..effa8887d1 100644 --- a/siatest/testnode.go +++ b/siatest/testnode.go @@ -58,14 +58,14 @@ func NewCleanNode(nodeParams node.NodeParams) (*TestNode, error) { tn := &TestNode{*s, *c, ""} // Init wallet - wip, err := tn.WalletInitPost("", false) + wip, err := tn.WalletInitPost("", "english", false) if err != nil { return nil, err } tn.primarySeed = wip.PrimarySeed // Unlock wallet - if err := tn.WalletUnlockPost(tn.primarySeed); err != nil { + if err := tn.WalletUnlockPost(tn.primarySeed, "english"); err != nil { return nil, err } From 1859ba3c4673c9c72728a8ca24c464e85bc13b4e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 20 Apr 2018 11:01:16 -0400 Subject: [PATCH 086/212] remove shorthands -u and -x, stop support for alternate dictionaries and change HostParams to consts --- cmd/siac/main.go | 4 ++-- cmd/siac/walletcmd.go | 12 ++++++------ node/api/client/host.go | 2 +- node/api/client/wallet.go | 15 +++++---------- siatest/testnode.go | 4 ++-- 5 files changed, 16 insertions(+), 21 deletions(-) diff --git a/cmd/siac/main.go b/cmd/siac/main.go index 65e1ffc693..99a722d4eb 100644 --- a/cmd/siac/main.go +++ b/cmd/siac/main.go @@ -141,8 +141,8 @@ func main() { } // initialize client root.PersistentFlags().StringVarP(&httpClient.Address, "addr", "a", "localhost:9980", "which host/port to communicate with (i.e. the host/port siad is listening on)") - root.PersistentFlags().StringVarP(&httpClient.Password, "apipassword", "x", apiPassword, "the password for the API's http authentication") - root.PersistentFlags().StringVarP(&httpClient.UserAgent, "useragent", "u", "Sia-Agent", "the useragent used by siac to connect to the daemon's API") + root.PersistentFlags().StringVarP(&httpClient.Password, "apipassword", "", apiPassword, "the password for the API's http authentication") + root.PersistentFlags().StringVarP(&httpClient.UserAgent, "useragent", "", "Sia-Agent", "the useragent used by siac to connect to the daemon's API") // run if err := root.Execute(); err != nil { diff --git a/cmd/siac/walletcmd.go b/cmd/siac/walletcmd.go index 6767a73b0b..3c127df400 100644 --- a/cmd/siac/walletcmd.go +++ b/cmd/siac/walletcmd.go @@ -255,7 +255,7 @@ func walletinitcmd() { die(err) } } - er, err := httpClient.WalletInitPost(password, "english", initForce) + er, err := httpClient.WalletInitPost(password, initForce) if err != nil { die("Error when encrypting wallet:", err) } @@ -282,7 +282,7 @@ func walletinitseedcmd() { die(err) } } - err = httpClient.WalletInitSeedPost(seed, password, "english", initForce) + err = httpClient.WalletInitSeedPost(seed, password, initForce) if err != nil { die("Could not initialize wallet from seed:", err) } @@ -316,7 +316,7 @@ func walletloadseedcmd() { if err != nil { die("Reading password failed:", err) } - err = httpClient.WalletSeedPost(seed, password, "english") + err = httpClient.WalletSeedPost(seed, password) if err != nil { die("Could not add seed:", err) } @@ -457,7 +457,7 @@ func walletsweepcmd() { die("Reading seed failed:", err) } - swept, err := httpClient.WalletSweepPost(seed, "english") + swept, err := httpClient.WalletSweepPost(seed) if err != nil { die("Could not sweep seed:", err) } @@ -533,7 +533,7 @@ func walletunlockcmd() { password := os.Getenv("SIA_WALLET_PASSWORD") if password != "" && !initPassword { fmt.Println("Using SIA_WALLET_PASSWORD environment variable") - err := httpClient.WalletUnlockPost(password, "english") + err := httpClient.WalletUnlockPost(password) if err != nil { fmt.Println("Automatic unlock failed!") } else { @@ -545,7 +545,7 @@ func walletunlockcmd() { if err != nil { die("Reading password failed:", err) } - err = httpClient.WalletUnlockPost(password, "english") + err = httpClient.WalletUnlockPost(password) if err != nil { die("Could not unlock wallet:", err) } diff --git a/node/api/client/host.go b/node/api/client/host.go index 87ee757d07..ddb9dd26c0 100644 --- a/node/api/client/host.go +++ b/node/api/client/host.go @@ -14,7 +14,7 @@ import ( // API. It is primarily used as a helper struct to ensure type safety. type HostParam string -var ( +const ( // HostParamCollateralBudget is the collateral budget of the host in // hastings. HostParamCollateralBudget = HostParam("collateralbudget") diff --git a/node/api/client/wallet.go b/node/api/client/wallet.go index dbb96f8c31..754e251070 100644 --- a/node/api/client/wallet.go +++ b/node/api/client/wallet.go @@ -35,9 +35,8 @@ func (c *Client) WalletChangePasswordPost(currentPassword, newPassword string) ( // WalletInitPost uses the /wallet/init endpoint to initialize and encrypt a // wallet -func (c *Client) WalletInitPost(password, dictionary string, force bool) (wip api.WalletInitPOST, err error) { +func (c *Client) WalletInitPost(password string, force bool) (wip api.WalletInitPOST, err error) { values := url.Values{} - values.Set("dictionary", dictionary) values.Set("encryptionpassword", password) values.Set("force", strconv.FormatBool(force)) err = c.post("/wallet/init", values.Encode(), &wip) @@ -46,10 +45,9 @@ func (c *Client) WalletInitPost(password, dictionary string, force bool) (wip ap // WalletInitSeedPost uses the /wallet/init/seed endpoint to initialize and // encrypt a wallet using a given seed. -func (c *Client) WalletInitSeedPost(seed, password, dictionary string, force bool) (err error) { +func (c *Client) WalletInitSeedPost(seed, password string, force bool) (err error) { values := url.Values{} values.Set("seed", seed) - values.Set("dictionary", dictionary) values.Set("encryptionpassword", password) values.Set("force", strconv.FormatBool(force)) err = c.post("/wallet/init/seed", values.Encode(), nil) @@ -70,10 +68,9 @@ func (c *Client) WalletLockPost() (err error) { // WalletSeedPost uses the /wallet/seed endpoint to add a seed to the wallet's list // of seeds. -func (c *Client) WalletSeedPost(seed, password, dictionary string) (err error) { +func (c *Client) WalletSeedPost(seed, password string) (err error) { values := url.Values{} values.Set("seed", seed) - values.Set("dictionary", dictionary) values.Set("encryptionpassword", password) err = c.post("/wallet/seed", values.Encode(), nil) return @@ -131,10 +128,9 @@ func (c *Client) WalletSiagKeyPost(keyfiles, password string) (err error) { // WalletSweepPost uses the /wallet/sweep/seed endpoint to sweep a seed into // the current wallet. -func (c *Client) WalletSweepPost(seed, dictionary string) (wsp api.WalletSweepPOST, err error) { +func (c *Client) WalletSweepPost(seed string) (wsp api.WalletSweepPOST, err error) { values := url.Values{} values.Set("seed", seed) - values.Set("dictionary", dictionary) err = c.post("/wallet/sweep/seed", values.Encode(), &wsp) return } @@ -149,10 +145,9 @@ func (c *Client) WalletTransactionsGet(startHeight types.BlockHeight, endHeight // WalletUnlockPost uses the /wallet/unlock endpoint to unlock the wallet with // a given encryption key. Per default this key is the seed. -func (c *Client) WalletUnlockPost(password, dictionary string) (err error) { +func (c *Client) WalletUnlockPost(password string) (err error) { values := url.Values{} values.Set("encryptionpassword", password) - values.Set("dictionary", dictionary) err = c.post("/wallet/unlock", values.Encode(), nil) return } diff --git a/siatest/testnode.go b/siatest/testnode.go index effa8887d1..646608a4f1 100644 --- a/siatest/testnode.go +++ b/siatest/testnode.go @@ -58,14 +58,14 @@ func NewCleanNode(nodeParams node.NodeParams) (*TestNode, error) { tn := &TestNode{*s, *c, ""} // Init wallet - wip, err := tn.WalletInitPost("", "english", false) + wip, err := tn.WalletInitPost("", false) if err != nil { return nil, err } tn.primarySeed = wip.PrimarySeed // Unlock wallet - if err := tn.WalletUnlockPost(tn.primarySeed, "english"); err != nil { + if err := tn.WalletUnlockPost(tn.primarySeed); err != nil { return nil, err } From 15b81b23e0dd7e75ba5b9aab3084662c8138fe08 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Fri, 20 Apr 2018 12:51:15 -0400 Subject: [PATCH 087/212] stop timer early if lock acquired --- sync/trymutex.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sync/trymutex.go b/sync/trymutex.go index 3b4684d05f..c4c7760f41 100644 --- a/sync/trymutex.go +++ b/sync/trymutex.go @@ -43,10 +43,12 @@ func (tm *TryMutex) TryLock() bool { func (tm *TryMutex) TryLockTimed(t time.Duration) bool { tm.once.Do(tm.init) + timer := time.NewTimer(t) select { case <-tm.lock: + timer.Stop() return true - case <-time.After(t): + case <-timer.C: return false } } From 597f7cf561a1b368b196371dcc905567d8a2ee18 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 20 Apr 2018 13:26:40 -0400 Subject: [PATCH 088/212] remove skip --- modules/consensus/accept_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/consensus/accept_test.go b/modules/consensus/accept_test.go index 8b40cd11ef..2b215c449e 100644 --- a/modules/consensus/accept_test.go +++ b/modules/consensus/accept_test.go @@ -866,7 +866,6 @@ func TestInconsistentCheck(t *testing.T) { // This test checks that the hardfork scheduled for block 21,000 rolls through // smoothly. func TestTaxHardfork(t *testing.T) { - t.Skip("Removed tax compat code from testing due to NDFs") if testing.Short() { t.SkipNow() } From 049c4692a0277d0b250e6f42a01dcfbaa09946e0 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 20 Apr 2018 16:15:08 -0400 Subject: [PATCH 089/212] Fix gateway NDF --- modules/gateway/conn.go | 4 ++-- modules/gateway/gateway.go | 4 ++-- modules/gateway/nodes.go | 17 ++++++++++------- modules/gateway/nodes_test.go | 17 ++++++----------- modules/gateway/peers.go | 8 ++++---- modules/gateway/peers_test.go | 2 +- 6 files changed, 25 insertions(+), 27 deletions(-) diff --git a/modules/gateway/conn.go b/modules/gateway/conn.go index 167a8232c8..c36f9fcd3f 100644 --- a/modules/gateway/conn.go +++ b/modules/gateway/conn.go @@ -19,10 +19,10 @@ func (pc peerConn) RPCAddr() modules.NetAddress { return pc.dialbackAddr } -// dial will dial the input address and return a connection. dial appropriately +// staticDial will staticDial the input address and return a connection. staticDial appropriately // handles things like clean shutdown, fast shutdown, and chooses the correct // communication protocol. -func (g *Gateway) dial(addr modules.NetAddress) (net.Conn, error) { +func (g *Gateway) staticDial(addr modules.NetAddress) (net.Conn, error) { dialer := &net.Dialer{ Cancel: g.threads.StopChan(), Timeout: dialTimeout, diff --git a/modules/gateway/gateway.go b/modules/gateway/gateway.go index 3c056e525f..e6a1899f25 100644 --- a/modules/gateway/gateway.go +++ b/modules/gateway/gateway.go @@ -152,7 +152,7 @@ type Gateway struct { threads siasync.ThreadGroup // Unique ID - id gatewayID + staticId gatewayID } type gatewayID [8]byte @@ -205,7 +205,7 @@ func New(addr string, bootstrap bool, persistDir string) (*Gateway, error) { } // Set Unique GatewayID - fastrand.Read(g.id[:]) + fastrand.Read(g.staticId[:]) // Create the logger. g.log, err = persist.NewFileLogger(filepath.Join(g.persistDir, logFile)) diff --git a/modules/gateway/nodes.go b/modules/gateway/nodes.go index b1446445b9..c41c084d3a 100644 --- a/modules/gateway/nodes.go +++ b/modules/gateway/nodes.go @@ -44,12 +44,12 @@ func (g *Gateway) addNode(addr modules.NetAddress) error { return nil } -// pingNode verifies that there is a reachable node at the provided address +// staticPingNode verifies that there is a reachable node at the provided address // by performing the Sia gateway handshake protocol. -func (g *Gateway) pingNode(addr modules.NetAddress) error { +func (g *Gateway) staticPingNode(addr modules.NetAddress) error { // Ping the untrusted node to see whether or not there's actually a // reachable node at the provided address. - conn, err := g.dial(addr) + conn, err := g.staticDial(addr) if err != nil { return err } @@ -70,7 +70,7 @@ func (g *Gateway) pingNode(addr modules.NetAddress) error { // inaccurate NetAddress. ourHeader := sessionHeader{ GenesisID: types.GenesisID, - UniqueID: g.id, + UniqueID: g.staticId, NetAddress: modules.NetAddress(conn.LocalAddr().String()), } if err := exchangeOurHeader(conn, ourHeader); err != nil { @@ -267,11 +267,14 @@ func (g *Gateway) permanentNodePurger(closeChan chan struct{}) { // through, which would cause the node to be pruned even though it may // be a good node. Because nodes are plentiful, this is an acceptable // bug. - if err = g.pingNode(node); err != nil { + if err = g.staticPingNode(node); err != nil { g.mu.Lock() - g.removeNode(node) + if len(g.nodes) > pruneNodeListLen { + // Check if the number of nodes is still above the threshold. + g.removeNode(node) + g.log.Debugf("INFO: removing node %q because it could not be reached during a random scan: %v", node, err) + } g.mu.Unlock() - g.log.Debugf("INFO: removing node %q because it could not be reached during a random scan: %v", node, err) } } } diff --git a/modules/gateway/nodes_test.go b/modules/gateway/nodes_test.go index 9ce0973ac5..dbc0976b2e 100644 --- a/modules/gateway/nodes_test.go +++ b/modules/gateway/nodes_test.go @@ -403,23 +403,18 @@ func TestHealthyNodeListPruning(t *testing.T) { } // Spin until all gateways have a nearly full node list. - success := false - for i := 0; i < 80; i++ { - success = true + err := build.Retry(1000, 100*time.Millisecond, func() error { for _, g := range gs { g.mu.RLock() gNodeLen := len(g.nodes) g.mu.RUnlock() if gNodeLen < healthyNodeListLen { - success = false - break + return errors.New("node is not connected to a sufficient number of peers") } } - if !success { - time.Sleep(time.Second * 1) - } - } - if !success { + return nil + }) + if err != nil { t.Fatal("peers are not sharing nodes with eachother") } @@ -465,7 +460,7 @@ func TestHealthyNodeListPruning(t *testing.T) { } // Close the remaining gateways. - err := gs[0].Close() + err = gs[0].Close() if err != nil { t.Error(err) } diff --git a/modules/gateway/peers.go b/modules/gateway/peers.go index 08403b00fc..6b0427130c 100644 --- a/modules/gateway/peers.go +++ b/modules/gateway/peers.go @@ -174,7 +174,7 @@ func (g *Gateway) managedAcceptConnPeer(conn net.Conn, remoteVersion string) err g.mu.RLock() ourHeader := sessionHeader{ GenesisID: types.GenesisID, - UniqueID: g.id, + UniqueID: g.staticId, NetAddress: g.myAddr, } g.mu.RUnlock() @@ -217,7 +217,7 @@ func (g *Gateway) managedAcceptConnPeer(conn net.Conn, remoteVersion string) err // do this in a goroutine so that we can begin communicating with the peer // immediately. go func() { - err := g.pingNode(remoteAddr) + err := g.staticPingNode(remoteAddr) if err == nil { g.mu.Lock() g.addNode(remoteAddr) @@ -370,7 +370,7 @@ func (g *Gateway) managedConnectPeer(conn net.Conn, remoteVersion string, remote g.mu.RLock() ourHeader := sessionHeader{ GenesisID: types.GenesisID, - UniqueID: g.id, + UniqueID: g.staticId, NetAddress: g.myAddr, } g.mu.RUnlock() @@ -407,7 +407,7 @@ func (g *Gateway) managedConnect(addr modules.NetAddress) error { } // Dial the peer and perform peer initialization. - conn, err := g.dial(addr) + conn, err := g.staticDial(addr) if err != nil { return err } diff --git a/modules/gateway/peers_test.go b/modules/gateway/peers_test.go index 3c5e333bbd..3fcdbb10b0 100644 --- a/modules/gateway/peers_test.go +++ b/modules/gateway/peers_test.go @@ -600,7 +600,7 @@ func TestConnectRejectsVersions(t *testing.T) { { version: minimumAcceptablePeerVersion, msg: "Connect should not succeed when peer is connecting to itself", - uniqueID: g.id, + uniqueID: g.staticId, genesisID: types.GenesisID, errWant: errOurAddress.Error(), localErrWant: errOurAddress.Error(), From 459c629c61ccc459cbf472b1920a43928e95dfb7 Mon Sep 17 00:00:00 2001 From: MSevey Date: Mon, 23 Apr 2018 13:56:25 -0400 Subject: [PATCH 090/212] added instruction to resource your profile after setting go env variables --- doc/Guide to Contributing to Sia.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/Guide to Contributing to Sia.md b/doc/Guide to Contributing to Sia.md index 52f05d3451..0af0a35811 100644 --- a/doc/Guide to Contributing to Sia.md +++ b/doc/Guide to Contributing to Sia.md @@ -40,6 +40,8 @@ $ mkdir $HOME/golang $ echo 'export GOPATH=$HOME/golang' >> $HOME/.profile # add bin subdirectory to PATH environmental variable $ echo 'export PATH=$PATH:$GOPATH/bin' >> $HOME/.profile +# resource your profile +$ source $HOME/.profile ``` From 72fb20e63661c102e557aaba43bd1198577ab074 Mon Sep 17 00:00:00 2001 From: MSevey Date: Mon, 23 Apr 2018 15:24:49 -0400 Subject: [PATCH 091/212] Updated instructions for adding git remote for SSH key --- doc/Guide to Contributing to Sia.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/Guide to Contributing to Sia.md b/doc/Guide to Contributing to Sia.md index 0af0a35811..da031e0a69 100644 --- a/doc/Guide to Contributing to Sia.md +++ b/doc/Guide to Contributing to Sia.md @@ -119,6 +119,8 @@ $ cd $GOPATH/src/github.com/NebulousLabs/Sia # Add your fork as a remote. Name it whatever is convenient, # e.g your GitHub username $ git remote add https://github.com//Sia.git +# Or if you use an SSH key, create the remote with the following +$ git remote add git@github.com:/Sia.git ``` From 050fcfa899859e9fc999a070f8bc39221dc5f919 Mon Sep 17 00:00:00 2001 From: MSevey Date: Mon, 23 Apr 2018 15:29:08 -0400 Subject: [PATCH 092/212] minor spelling change --- doc/Guide to Contributing to Sia.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/Guide to Contributing to Sia.md b/doc/Guide to Contributing to Sia.md index da031e0a69..83b9f7b04a 100644 --- a/doc/Guide to Contributing to Sia.md +++ b/doc/Guide to Contributing to Sia.md @@ -40,7 +40,7 @@ $ mkdir $HOME/golang $ echo 'export GOPATH=$HOME/golang' >> $HOME/.profile # add bin subdirectory to PATH environmental variable $ echo 'export PATH=$PATH:$GOPATH/bin' >> $HOME/.profile -# resource your profile +# source your profile $ source $HOME/.profile ``` From 17082842cbfce978584cad54bbd7d404dfccab6a Mon Sep 17 00:00:00 2001 From: MSevey Date: Mon, 23 Apr 2018 15:35:23 -0400 Subject: [PATCH 093/212] Updated make section based on Makefile --- doc/Guide to Contributing to Sia.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/doc/Guide to Contributing to Sia.md b/doc/Guide to Contributing to Sia.md index 83b9f7b04a..64a585a8fe 100644 --- a/doc/Guide to Contributing to Sia.md +++ b/doc/Guide to Contributing to Sia.md @@ -69,13 +69,19 @@ $ cd $GOPATH/src/github.com/NebulousLabs/Sia # You have three Sia builds to choose from. # To build the standard release binary: -$ make release-std +$ make release # Or to build the release binary with race detection and an array debugging # asserts: -$ make release +$ make release-race # Or to build the developer binary (with a different genesis block, faster # block times, and other changes): -$ make +$ make dev +# Or build the developer binary with race detection: +$ make dev-race +# Build the debugger binary: +$ make debug +# Or build debugger binary with race detection: +$ make debug-race ``` From 445c102a14e5cf287763cd197e330a8f6233b607 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 24 Apr 2018 07:52:24 +0200 Subject: [PATCH 094/212] Change status expiring empty contract from failed to succeeded --- modules/host/storageobligations.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 828a431a8d..6873a383ac 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -765,7 +765,7 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { // If the window has closed, the host has failed and the obligation can // be removed. - if so.proofDeadline() < blockHeight || len(so.SectorRoots) == 0 { + if so.proofDeadline() < blockHeight { h.log.Debugln("storage proof not confirmed by deadline, id", so.id()) h.mu.Lock() err := h.removeStorageObligation(so, obligationFailed) @@ -775,6 +775,19 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { } return } + // If the obligation has no sector roots, we can remove the obligation and not + // submit a storage proof. The host payout for a failed empty contract + // includes the contract cost and locked collateral. + if len(so.SectorRoots) == 0 { + h.logDebugln("storage proof not submitted for empty contract, id", so.id()) + h.mu.Lock() + err := h.removeStorageObligation(so, obligationSucceeded) + h.mu.Unlock() + if err != nil { + h.log.Println("Error removing storage obligation:", err) + } + return + } // Get the index of the segment, and the index of the sector containing // the segment. From a81a9f04680cdfaadc08d9f4c716d7cdbec6ca15 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 24 Apr 2018 08:05:06 +0200 Subject: [PATCH 095/212] Update logging. Fix typo --- modules/host/storageobligations.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 6873a383ac..9562133f16 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -574,8 +574,15 @@ func (h *Host) removeStorageObligation(so storageObligation, sos storageObligati } } if sos == obligationSucceeded { + // Empty obligations don't submit a storage proof. The revenue for an empty + // storage obligation should equal the contract cost of the obligation + if len(so.SectorRoots) == 0 { + h.log.Printf("Not submitted a storage proof for empty contract. Revenue is %v.\n", so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue)) + } else { + h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue)) + } + // Remove the obligation statistics as potential risk and income. - h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue)) h.financialMetrics.PotentialContractCompensation = h.financialMetrics.PotentialContractCompensation.Sub(so.ContractCost) h.financialMetrics.LockedStorageCollateral = h.financialMetrics.LockedStorageCollateral.Sub(so.LockedCollateral) h.financialMetrics.PotentialStorageRevenue = h.financialMetrics.PotentialStorageRevenue.Sub(so.PotentialStorageRevenue) @@ -779,7 +786,7 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { // submit a storage proof. The host payout for a failed empty contract // includes the contract cost and locked collateral. if len(so.SectorRoots) == 0 { - h.logDebugln("storage proof not submitted for empty contract, id", so.id()) + h.log.Debugln("storage proof not submitted for empty contract, id", so.id()) h.mu.Lock() err := h.removeStorageObligation(so, obligationSucceeded) h.mu.Unlock() From 0f754a3a6060fe3a0e80d08226a358e51d842bab Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 24 Apr 2018 18:04:16 +0200 Subject: [PATCH 096/212] Simplify print statement --- modules/host/storageobligations.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 9562133f16..964baf64b4 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -576,10 +576,11 @@ func (h *Host) removeStorageObligation(so storageObligation, sos storageObligati if sos == obligationSucceeded { // Empty obligations don't submit a storage proof. The revenue for an empty // storage obligation should equal the contract cost of the obligation + revenue := so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue) if len(so.SectorRoots) == 0 { - h.log.Printf("Not submitted a storage proof for empty contract. Revenue is %v.\n", so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue)) + h.log.Printf("Not submitted a storage proof for empty contract. Revenue is %v.\n", revenue) } else { - h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue)) + h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", revenue) } // Remove the obligation statistics as potential risk and income. From 677c3a2a49eba4e9bfa0b65d2f03554fadeb4f2b Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 24 Apr 2018 18:08:08 +0200 Subject: [PATCH 097/212] Make comment more clear that no storage proof is needed for empty contract --- modules/host/storageobligations.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 964baf64b4..85f50419b9 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -578,7 +578,7 @@ func (h *Host) removeStorageObligation(so storageObligation, sos storageObligati // storage obligation should equal the contract cost of the obligation revenue := so.ContractCost.Add(so.PotentialStorageRevenue).Add(so.PotentialDownloadRevenue).Add(so.PotentialUploadRevenue) if len(so.SectorRoots) == 0 { - h.log.Printf("Not submitted a storage proof for empty contract. Revenue is %v.\n", revenue) + h.log.Printf("No need to submit a storage proof for empty contract. Revenue is %v.\n", revenue) } else { h.log.Printf("Successfully submitted a storage proof. Revenue is %v.\n", revenue) } From 634ee3bcced1c431485f079cecfb6b4340b117fe Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Tue, 24 Apr 2018 18:09:25 +0200 Subject: [PATCH 098/212] First check for empty contract, than for too late storage proof --- modules/host/storageobligations.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 85f50419b9..833a7d00bf 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -771,32 +771,31 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { if !so.ProofConfirmed && blockHeight >= so.expiration()+resubmissionTimeout { h.log.Debugln("Host is attempting a storage proof for", so.id()) - // If the window has closed, the host has failed and the obligation can - // be removed. - if so.proofDeadline() < blockHeight { - h.log.Debugln("storage proof not confirmed by deadline, id", so.id()) + // If the obligation has no sector roots, we can remove the obligation and not + // submit a storage proof. The host payout for a failed empty contract + // includes the contract cost and locked collateral. + if len(so.SectorRoots) == 0 { + h.log.Debugln("storage proof not submitted for empty contract, id", so.id()) h.mu.Lock() - err := h.removeStorageObligation(so, obligationFailed) + err := h.removeStorageObligation(so, obligationSucceeded) h.mu.Unlock() if err != nil { h.log.Println("Error removing storage obligation:", err) } return } - // If the obligation has no sector roots, we can remove the obligation and not - // submit a storage proof. The host payout for a failed empty contract - // includes the contract cost and locked collateral. - if len(so.SectorRoots) == 0 { - h.log.Debugln("storage proof not submitted for empty contract, id", so.id()) + // If the window has closed, the host has failed and the obligation can + // be removed. + if so.proofDeadline() < blockHeight { + h.log.Debugln("storage proof not confirmed by deadline, id", so.id()) h.mu.Lock() - err := h.removeStorageObligation(so, obligationSucceeded) + err := h.removeStorageObligation(so, obligationFailed) h.mu.Unlock() if err != nil { h.log.Println("Error removing storage obligation:", err) } return } - // Get the index of the segment, and the index of the sector containing // the segment. segmentIndex, err := h.cs.StorageProofSegment(so.id()) From 89f7ee54f821b532728dc441ed4082b64edf7dd7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 13:17:41 -0400 Subject: [PATCH 099/212] Improve tpool panic output --- modules/transactionpool/update.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/transactionpool/update.go b/modules/transactionpool/update.go index 34d2921a05..d83167f88f 100644 --- a/modules/transactionpool/update.go +++ b/modules/transactionpool/update.go @@ -2,6 +2,7 @@ package transactionpool import ( "bytes" + "fmt" "sort" "github.com/NebulousLabs/Sia/crypto" @@ -193,7 +194,7 @@ func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { // Sanity check - the id of each reverted block should match the recent // parent id. if block.ID() != recentID && !resetSanityCheck { - panic("Consensus change series appears to be inconsistent - we are reverting the wrong block.") + panic(fmt.Sprintf("Consensus change series appears to be inconsistent - we are reverting the wrong block. bid: %v recent: %v", block.ID(), recentID)) } recentID = block.ParentID @@ -220,7 +221,7 @@ func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { // Sanity check - the parent id of each block should match the current // block id. if block.ParentID != recentID && !resetSanityCheck { - panic("Consensus change series appears to be inconsistent - we are applying the wrong block.") + panic(fmt.Sprintf("Consensus change series appears to be inconsistent - we are applying the wrong block. pid: %v recent: %v", block.ParentID, recentID)) } recentID = block.ID() From 181f267e61d84784a74a3d74d3e2df6aab80703d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 13:30:01 -0400 Subject: [PATCH 100/212] use build.Critical when we can't get recent block --- modules/transactionpool/update.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/transactionpool/update.go b/modules/transactionpool/update.go index d83167f88f..dcbc3d4b80 100644 --- a/modules/transactionpool/update.go +++ b/modules/transactionpool/update.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" @@ -186,7 +187,8 @@ func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { tp.log.Println("NOTE: Upgrading tpool database to support consensus change verification.") resetSanityCheck = true } else if err != nil { - tp.log.Println("ERROR: Could not access recentID from tpool.") + tp.log.Println("ERROR: Could not access recentID from tpool:", err) + build.Critical("ERROR: Could not access recentID from tpool:", err) } // Update the database of confirmed transactions. From 5110c8d8c1f50d43d523b77df1f131ff34444e28 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 15:27:26 -0400 Subject: [PATCH 101/212] rename contractUtility to readlockContractUtility and rename minScoreHostBuffer constant --- modules/renter/contractor/consts.go | 4 ++-- modules/renter/contractor/contractor.go | 10 +++++----- modules/renter/contractor/contracts.go | 16 ++++++++-------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/modules/renter/contractor/consts.go b/modules/renter/contractor/consts.go index 57061f03c2..8e19d15408 100644 --- a/modules/renter/contractor/consts.go +++ b/modules/renter/contractor/consts.go @@ -13,9 +13,9 @@ var ( // contract. minContractFundRenewalThreshold = float64(0.03) // 3% - // minScoreHostBuffer defines how many extra hosts are queried when trying + // randomHostsBufferForScore defines how many extra hosts are queried when trying // to figure out an appropriate minimum score for the hosts that we have. - minScoreHostBuffer = build.Select(build.Var{ + randomHostsBufferForScore = build.Select(build.Var{ Dev: 2, Standard: 10, Testing: 1, diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index b2071a9621..743a3acd23 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -65,8 +65,8 @@ type Contractor struct { renewedIDs map[types.FileContractID]types.FileContractID } -// resolveID returns the ID of the most recent renewal of id. -func (c *Contractor) resolveID(id types.FileContractID) types.FileContractID { +// readlockResolveID returns the ID of the most recent renewal of id. +func (c *Contractor) readlockResolveID(id types.FileContractID) types.FileContractID { newID, exists := c.renewedIDs[id] for exists { id = newID @@ -126,7 +126,7 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { func (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContract, bool) { c.mu.RLock() defer c.mu.RUnlock() - return c.contracts.View(c.resolveID(id)) + return c.contracts.View(c.readlockResolveID(id)) } // Contracts returns the contracts formed by the contractor in the current @@ -142,7 +142,7 @@ func (c *Contractor) Contracts() []modules.RenterContract { func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { c.mu.RLock() defer c.mu.RUnlock() - return c.contractUtility(id) + return c.readlockContractUtility(id) } // CurrentPeriod returns the height at which the current allowance period @@ -156,7 +156,7 @@ func (c *Contractor) CurrentPeriod() types.BlockHeight { // ResolveID returns the ID of the most recent renewal of id. func (c *Contractor) ResolveID(id types.FileContractID) types.FileContractID { c.mu.RLock() - newID := c.resolveID(id) + newID := c.readlockResolveID(id) c.mu.RUnlock() return newID } diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index ec91472459..e92af7f5b8 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -28,9 +28,9 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { return c.currentPeriod + c.allowance.Period } -// contractUtility returns the ContractUtility for a contract with a given id. -func (c *Contractor) contractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - rc, exists := c.contracts.View(c.resolveID(id)) +// readlockContractUtility returns the ContractUtility for a contract with a given id. +func (c *Contractor) readlockContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { + rc, exists := c.contracts.View(c.readlockResolveID(id)) if !exists { return modules.ContractUtility{}, false } @@ -73,7 +73,7 @@ func (c *Contractor) managedMarkContractsUtility() error { c.mu.RLock() hostCount := int(c.allowance.Hosts) c.mu.RUnlock() - hosts, err := c.hdb.RandomHosts(hostCount+minScoreHostBuffer, nil) + hosts, err := c.hdb.RandomHosts(hostCount+randomHostsBufferForScore, nil) if err != nil { return err } @@ -203,7 +203,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. c.mu.RLock() - utility, ok := c.contractUtility(contract.ID) + utility, ok := c.readlockContractUtility(contract.ID) c.mu.RUnlock() if !ok || !utility.GoodForRenew { c.log.Critical(fmt.Sprintf("Renewing a contract that has been marked as !GoodForRenew %v/%v", @@ -364,7 +364,7 @@ func (c *Contractor) threadedContractMaintenance() { // Iterate through the contracts again, figuring out which contracts to // renew and how much extra funds to renew them with. for _, contract := range c.contracts.ViewAll() { - utility, ok := c.contractUtility(contract.ID) + utility, ok := c.readlockContractUtility(contract.ID) if !ok || !utility.GoodForRenew { continue } @@ -486,7 +486,7 @@ func (c *Contractor) threadedContractMaintenance() { } // Return the contract if it's not useful for renewing. c.mu.RLock() - oldUtility, ok := c.contractUtility(id) + oldUtility, ok := c.readlockContractUtility(id) c.mu.RUnlock() if !ok || !oldUtility.GoodForRenew { c.log.Printf("Contract %v slated for renew is marked not good for renew %v/%v", @@ -571,7 +571,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.RLock() uploadContracts := 0 for _, id := range c.contracts.IDs() { - if cu, ok := c.contractUtility(id); ok && cu.GoodForUpload { + if cu, ok := c.readlockContractUtility(id); ok && cu.GoodForUpload { uploadContracts++ } } From db364893fb4c08ae79368bb2ef0c9f959ce7f58e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 15:32:31 -0400 Subject: [PATCH 102/212] change hardcoded value to constant --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index e92af7f5b8..21376290d5 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -591,7 +591,7 @@ func (c *Contractor) threadedContractMaintenance() { } initialContractFunds := c.allowance.Funds.Div64(c.allowance.Hosts).Div64(3) c.mu.RUnlock() - hosts, err := c.hdb.RandomHosts(neededContracts*2+10, exclude) + hosts, err := c.hdb.RandomHosts(neededContracts*2+randomHostsBufferForScore, exclude) if err != nil { c.log.Println("WARN: not forming new contracts:", err) return From 72c3155967b1db55ad20bc4a9d4ec2b262ae5947 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 15:54:42 -0400 Subject: [PATCH 103/212] docstrings and constants --- modules/renter/hostdb/consts.go | 4 ++++ modules/renter/hostdb/scan.go | 4 ++-- modules/renter/proto/contract.go | 5 +++++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/modules/renter/hostdb/consts.go b/modules/renter/hostdb/consts.go index 7a19aeca9c..054bb93d03 100644 --- a/modules/renter/hostdb/consts.go +++ b/modules/renter/hostdb/consts.go @@ -46,6 +46,10 @@ const ( // saveFrequency defines how frequently the hostdb will save to disk. Hostdb // will also save immediately prior to shutdown. saveFrequency = 2 * time.Minute + + // scanCheckInterval is the interval used when waiting for the scanList to + // empty itself and for waiting on the consensus set to be synced. + scanCheckInterval = time.Second ) var ( diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 4a3a6c6efc..2b23299a0a 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -302,7 +302,7 @@ func (hdb *HostDB) managedWaitForScans() { } select { case <-hdb.tg.StopChan(): - case <-time.After(time.Second): + case <-time.After(scanCheckInterval): } } } @@ -355,7 +355,7 @@ func (hdb *HostDB) threadedScan() { select { case <-hdb.tg.StopChan(): return - case <-time.After(time.Second): + case <-time.After(scanCheckInterval): } } diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index f9bffc7cdc..d7f8b7e650 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -29,6 +29,9 @@ type updateSetHeader struct { Header contractHeader } +// v132UpdateHeader was introduced due to backwards compatibility reasons after +// changing the format of the contractHeader. It contains the legacy +// v132ContractHeader. type v132UpdateSetHeader struct { ID types.FileContractID Header v132ContractHeader @@ -61,6 +64,8 @@ type contractHeader struct { Utility modules.ContractUtility } +// v132ContractHeader is a contractHeader without the Utility field. This field +// was added after v132 to be able to persist contract utilities. type v132ContractHeader struct { // transaction is the signed transaction containing the most recent // revision of the file contract. From 37f9db96e15f622ef3a58d3d834e0615d2dbcca6 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 24 Apr 2018 16:03:54 -0400 Subject: [PATCH 104/212] Trimmed Install Go section to just the first two paragraphs per LC request --- doc/Guide to Contributing to Sia.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/doc/Guide to Contributing to Sia.md b/doc/Guide to Contributing to Sia.md index 64a585a8fe..5977750f94 100644 --- a/doc/Guide to Contributing to Sia.md +++ b/doc/Guide to Contributing to Sia.md @@ -29,21 +29,6 @@ You should install the latest [official Go binary][binary] for your system (if not available, [install from source][source]). If you plan to cross compile Sia, see [Cross Compilation with Go 1.5][cross] by Dave Cheney. -Now make a workspace directory in which you will store source code and -dependencies. You can choose any filepath except where you installed Go (don't -choose `/usr/local`). - -```bash -# make a working directory called golang in your home directory -$ mkdir $HOME/golang -# store base path in an environmental variable -$ echo 'export GOPATH=$HOME/golang' >> $HOME/.profile -# add bin subdirectory to PATH environmental variable -$ echo 'export PATH=$PATH:$GOPATH/bin' >> $HOME/.profile -# source your profile -$ source $HOME/.profile -``` - ### Learn Go From 60c24b2648eb8294d2a5d61010465c54a7d74b40 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 15:40:04 -0400 Subject: [PATCH 105/212] use tp.log.Critical --- modules/transactionpool/update.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/transactionpool/update.go b/modules/transactionpool/update.go index dcbc3d4b80..bfd2584b3d 100644 --- a/modules/transactionpool/update.go +++ b/modules/transactionpool/update.go @@ -5,7 +5,6 @@ import ( "fmt" "sort" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" @@ -187,8 +186,7 @@ func (tp *TransactionPool) ProcessConsensusChange(cc modules.ConsensusChange) { tp.log.Println("NOTE: Upgrading tpool database to support consensus change verification.") resetSanityCheck = true } else if err != nil { - tp.log.Println("ERROR: Could not access recentID from tpool:", err) - build.Critical("ERROR: Could not access recentID from tpool:", err) + tp.log.Critical("ERROR: Could not access recentID from tpool:", err) } // Update the database of confirmed transactions. From e18d55b94f1a88b917fef9161c115c2679de37bc Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 16:28:35 -0400 Subject: [PATCH 106/212] fix if clause --- modules/renter/uploadheap.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/uploadheap.go b/modules/renter/uploadheap.go index 8a79fdcf47..ddb287ae5f 100644 --- a/modules/renter/uploadheap.go +++ b/modules/renter/uploadheap.go @@ -168,7 +168,7 @@ func (r *Renter) buildUnfinishedChunks(f *file, hosts map[string]struct{}) []*un for fcid, fileContract := range f.contracts { recentContract, exists := r.hostContractor.ContractByID(fcid) contractUtility, exists2 := r.hostContractor.ContractUtility(fcid) - if (exists && !exists2) || (!exists && exists) { + if exists != exists2 { build.Critical("got a contract without utility or vice versa which shouldn't happen", exists, exists2) } From a85082a4e2b608611e9e359723a1eef8b5662c1a Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 16:31:20 -0400 Subject: [PATCH 107/212] refactor method --- modules/transactionpool/transactionpool.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/modules/transactionpool/transactionpool.go b/modules/transactionpool/transactionpool.go index a7abba875a..901f2b4f3c 100644 --- a/modules/transactionpool/transactionpool.go +++ b/modules/transactionpool/transactionpool.go @@ -264,15 +264,13 @@ func (tp *TransactionPool) TransactionSet(oid crypto.Hash) []types.Transaction { var parents []types.Transaction tSetID, exists := tp.knownObjects[ObjectID(oid)] if !exists { - return parents + return nil } tSet, exists := tp.transactionSets[tSetID] if !exists { - return parents - } - for _, txn := range tSet { - parents = append(parents, txn) + return nil } + parents = append(parents, tSet...) return parents } From fcd6f8f1645dac5c991a15fc36f9ebb4aa84ce46 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 16:39:34 -0400 Subject: [PATCH 108/212] fix grammar --- modules/wallet/transactionbuilder.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index 20549c336f..0d50e94b93 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -382,8 +382,8 @@ func (tb *transactionBuilder) FundSiafunds(amount types.Currency) error { return nil } -// UnconfirmedParents returns any unconfirmed parents the transaction set that -// is being built by the transaction builder could have. +// UnconfirmedParents returns the unconfirmed parents of the transaction set +// that is being constructed by the transaction builder. func (tb *transactionBuilder) UnconfirmedParents() (parents []types.Transaction) { addedParents := make(map[types.TransactionID]struct{}) for _, p := range tb.parents { From 4787febff213e256691426815a1f5474cec5a392 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 24 Apr 2018 17:20:00 -0400 Subject: [PATCH 109/212] unconfirmed parents ignore child transactions --- modules/wallet/transactionbuilder.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index 0d50e94b93..a6b01399a1 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -390,12 +390,21 @@ func (tb *transactionBuilder) UnconfirmedParents() (parents []types.Transaction) for _, sci := range p.SiacoinInputs { tSet := tb.wallet.tpool.TransactionSet(crypto.Hash(sci.ParentID)) for _, txn := range tSet { + // Add the transaction to the parents. txnID := txn.ID() if _, exists := addedParents[txnID]; exists { continue } addedParents[txnID] = struct{}{} parents = append(parents, txn) + + // When we found the transaction that contains the output that + // is spent by sci we stop to avoid adding child transactions. + for i := range txn.SiacoinOutputs { + if txn.SiacoinOutputID(uint64(i)) == sci.ParentID { + break + } + } } } } From 8bc2d14ba9d6ed0db82a88857cebfa2e621e3784 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Tue, 24 Apr 2018 15:02:43 -0700 Subject: [PATCH 110/212] Add new contributors, fix goreport misspell --- doc/Contributors.md | 4 +++- siatest/renter.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/Contributors.md b/doc/Contributors.md index e497ad2e25..f16c95dbf7 100644 --- a/doc/Contributors.md +++ b/doc/Contributors.md @@ -7,13 +7,14 @@ ### List of contributors * DavidVorick (Owner) * lukechampine (Lead Developer) +* ChrisSchinnerl (Nebulous Developer) +* Msevey (Nebulous Developer) * VoidingWarranties * avahowell * seveibar * triazo * mnsl * mtlynch -* ChrisSchinnerl * Mingling94 * marcinja * RNabel @@ -52,6 +53,7 @@ * huetsch * mjmay08 * jfcg +* pachisi456 * Google Inc. diff --git a/siatest/renter.go b/siatest/renter.go index c33c140e1c..76ed1e4230 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -119,7 +119,7 @@ func (tn *TestNode) DownloadInfo(lf *LocalFile, rf *RemoteFile) (*api.DownloadIn if di.Length != di.Filesize { err = errors.AddContext(err, "filesize != length") } - // Received data can't be larger than transfered data + // Received data can't be larger than transferred data if di.Received > di.TotalDataTransferred { err = errors.AddContext(err, "received > TotalDataTransfered") } From 57a25e9608fa93493386d0071bc7dfe8404f48fe Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 10:43:41 -0400 Subject: [PATCH 111/212] make UnconfirmedParents fail if sign has been called on the transactionbuilder --- modules/renter/contractor/dependencies.go | 2 +- modules/renter/proto/formcontract.go | 5 ++++- modules/renter/proto/proto.go | 2 +- modules/renter/proto/renew.go | 5 ++++- modules/wallet.go | 2 +- modules/wallet/transactionbuilder.go | 10 +++++++++- modules/wallet/transactionbuilder_test.go | 5 ++++- 7 files changed, 24 insertions(+), 7 deletions(-) diff --git a/modules/renter/contractor/dependencies.go b/modules/renter/contractor/dependencies.go index e61d9bae76..d2261d82ca 100644 --- a/modules/renter/contractor/dependencies.go +++ b/modules/renter/contractor/dependencies.go @@ -38,7 +38,7 @@ type ( Drop() FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) - UnconfirmedParents() []types.Transaction + UnconfirmedParents() ([]types.Transaction, error) View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } diff --git a/modules/renter/proto/formcontract.go b/modules/renter/proto/formcontract.go index fb5903b72c..66744503aa 100644 --- a/modules/renter/proto/formcontract.go +++ b/modules/renter/proto/formcontract.go @@ -98,7 +98,10 @@ func (cs *ContractSet) FormContract(params ContractParams, txnBuilder transactio // Create initial transaction set. txn, parentTxns := txnBuilder.View() - unconfirmedParents := txnBuilder.UnconfirmedParents() + unconfirmedParents, err := txnBuilder.UnconfirmedParents() + if err != nil { + return modules.RenterContract{}, err + } txnSet := append(unconfirmedParents, append(parentTxns, txn)...) // Increase Successful/Failed interactions accordingly diff --git a/modules/renter/proto/proto.go b/modules/renter/proto/proto.go index 670bbf275c..c0a7589d65 100644 --- a/modules/renter/proto/proto.go +++ b/modules/renter/proto/proto.go @@ -19,7 +19,7 @@ type ( AddTransactionSignature(types.TransactionSignature) uint64 FundSiacoins(types.Currency) error Sign(bool) ([]types.Transaction, error) - UnconfirmedParents() []types.Transaction + UnconfirmedParents() ([]types.Transaction, error) View() (types.Transaction, []types.Transaction) ViewAdded() (parents, coins, funds, signatures []int) } diff --git a/modules/renter/proto/renew.go b/modules/renter/proto/renew.go index 564586bf13..89ca870f6d 100644 --- a/modules/renter/proto/renew.go +++ b/modules/renter/proto/renew.go @@ -99,7 +99,10 @@ func (cs *ContractSet) Renew(oldContract *SafeContract, params ContractParams, t // Create initial transaction set. txn, parentTxns := txnBuilder.View() - unconfirmedParents := txnBuilder.UnconfirmedParents() + unconfirmedParents, err := txnBuilder.UnconfirmedParents() + if err != nil { + return modules.RenterContract{}, err + } txnSet := append(unconfirmedParents, append(parentTxns, txn)...) // Increase Successful/Failed interactions accordingly diff --git a/modules/wallet.go b/modules/wallet.go index 35e376fe7b..74e1e2473d 100644 --- a/modules/wallet.go +++ b/modules/wallet.go @@ -200,7 +200,7 @@ type ( // UnconfirmedParents returns any unconfirmed parents the transaction set that // is being built by the transaction builder could have. - UnconfirmedParents() []types.Transaction + UnconfirmedParents() ([]types.Transaction, error) // View returns the incomplete transaction along with all of its // parents. diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index a6b01399a1..49cec7c8f3 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -384,7 +384,15 @@ func (tb *transactionBuilder) FundSiafunds(amount types.Currency) error { // UnconfirmedParents returns the unconfirmed parents of the transaction set // that is being constructed by the transaction builder. -func (tb *transactionBuilder) UnconfirmedParents() (parents []types.Transaction) { +func (tb *transactionBuilder) UnconfirmedParents() (parents []types.Transaction, err error) { + // Currently we don't need to call UnconfirmedParents after the transaction + // was signed so we don't allow doing that. If for some reason our + // requirements change, we can remove this check. The only downside is, + // that it might lead to transactions being returned that are not actually + // parents in case the signed transaction already has child transactions. + if tb.signed { + return nil, errBuilderAlreadySigned + } addedParents := make(map[types.TransactionID]struct{}) for _, p := range tb.parents { for _, sci := range p.SiacoinInputs { diff --git a/modules/wallet/transactionbuilder_test.go b/modules/wallet/transactionbuilder_test.go index 7529288907..3470cfacfa 100644 --- a/modules/wallet/transactionbuilder_test.go +++ b/modules/wallet/transactionbuilder_test.go @@ -486,7 +486,10 @@ func TestUnconfirmedParents(t *testing.T) { // UnconfirmedParents should return the transactions of the transaction set // we used to send money to ourselves. - parents := b.UnconfirmedParents() + parents, err := b.UnconfirmedParents() + if err != nil { + t.Fatal(err) + } if len(tSet) != len(parents) { t.Fatal("parents should have same length as unconfirmed transaction set") } From 9b3ecde8ab0bc7a3339468e38562609baa0cc25f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 11:41:06 -0400 Subject: [PATCH 112/212] add build.Retry to fullyConnectNodes --- siatest/testgroup.go | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/siatest/testgroup.go b/siatest/testgroup.go index d6d77a1626..682691fb3a 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -186,15 +186,23 @@ func fullyConnectNodes(nodes []*TestNode) error { // Fully connect the nodes for i, nodeA := range nodes { for _, nodeB := range nodes[i+1:] { - isPeer, err := nodeA.hasPeer(nodeB) + err := build.Retry(100, 100*time.Millisecond, func() error { + if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists { + return errors.AddContext(err, "failed to connect to peer") + } + isPeer1, err1 := nodeA.hasPeer(nodeB) + isPeer2, err2 := nodeB.hasPeer(nodeA) + if err1 != nil || err2 != nil { + return build.ExtendErr("couldn't determine if nodeA and nodeB are connected", + errors.Compose(err1, err2)) + } + if isPeer1 && isPeer2 { + return nil + } + return errors.New("nodeA and nodeB are not peers of each other") + }) if err != nil { - return build.ExtendErr("couldn't determine if nodeB is a peer of nodeA", err) - } - if isPeer { - continue - } - if err := nodeA.GatewayConnectPost(nodeB.GatewayAddress()); err != nil && err != client.ErrPeerExists { - return errors.AddContext(err, "failed to connect to peer") + return err } } } From e7e276c170d84a000220bbeb6fe6c53cce8c0ee2 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 16 Mar 2018 10:55:55 -0400 Subject: [PATCH 113/212] Remove renter's merkle roots from ram --- modules/renter/proto/contract.go | 63 ++++++++++++++++++--------- modules/renter/proto/contract_test.go | 18 ++++++-- modules/renter/proto/editor.go | 10 ++++- modules/renter/proto/renew.go | 8 +++- 4 files changed, 72 insertions(+), 27 deletions(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index d7f8b7e650..6598141bc6 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -2,16 +2,17 @@ package proto import ( "encoding/json" - "errors" "io" "os" "path/filepath" "sync" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" "github.com/NebulousLabs/writeaheadlog" ) @@ -129,7 +130,8 @@ type SafeContract struct { // merkleRoots are the Merkle roots of each sector stored on the host that // relate to this contract. - merkleRoots []crypto.Hash + //merkleRoots []crypto.Hash + numMerkleRoots int // unappliedTxns are the transactions that were written to the WAL but not // applied to the contract file. @@ -206,6 +208,28 @@ func (c *SafeContract) Utility() modules.ContractUtility { return c.header.Utility } +// merkleRoots returns the contracts merkle roots. +func (c *SafeContract) merkleRoots() ([]crypto.Hash, error) { + merkleRoots := make([]crypto.Hash, 0, c.numMerkleRoots) + if _, err := c.f.Seek(contractHeaderSize, io.SeekStart); err != nil { + return merkleRoots, err + } + for { + var root crypto.Hash + if _, err := io.ReadFull(c.f, root[:]); err == io.EOF { + break + } else if err != nil { + return merkleRoots, errors.AddContext(err, "failed to read root from disk") + } + merkleRoots = append(merkleRoots, root) + } + // Sanity check: should have read exactly numMerkleRoots roots. + if len(merkleRoots) != c.numMerkleRoots { + build.Critical("Number of merkle roots on disk doesn't match numMerkleRoots") + } + return merkleRoots, nil +} + func (c *SafeContract) makeUpdateSetHeader(h contractHeader) writeaheadlog.Update { c.headerMu.Lock() id := c.header.ID() @@ -250,10 +274,9 @@ func (c *SafeContract) applySetRoot(root crypto.Hash, index int) error { if _, err := c.f.WriteAt(root[:], rootOffset); err != nil { return err } - if len(c.merkleRoots) <= index { - c.merkleRoots = append(c.merkleRoots, make([]crypto.Hash, 1+index-len(c.merkleRoots))...) + if c.numMerkleRoots <= index { + c.numMerkleRoots++ } - c.merkleRoots[index] = root return nil } @@ -267,10 +290,9 @@ func (c *SafeContract) recordUploadIntent(rev types.FileContractRevision, root c newHeader.StorageSpending = newHeader.StorageSpending.Add(storageCost) newHeader.UploadSpending = newHeader.UploadSpending.Add(bandwidthCost) - rootIndex := len(c.merkleRoots) t, err := c.wal.NewTransaction([]writeaheadlog.Update{ c.makeUpdateSetHeader(newHeader), - c.makeUpdateSetRoot(root, rootIndex), + c.makeUpdateSetRoot(root, c.numMerkleRoots), }) if err != nil { return nil, err @@ -291,11 +313,10 @@ func (c *SafeContract) commitUpload(t *writeaheadlog.Transaction, signedTxn type newHeader.StorageSpending = newHeader.StorageSpending.Add(storageCost) newHeader.UploadSpending = newHeader.UploadSpending.Add(bandwidthCost) - rootIndex := len(c.merkleRoots) if err := c.applySetHeader(newHeader); err != nil { return err } - if err := c.applySetRoot(root, rootIndex); err != nil { + if err := c.applySetRoot(root, c.numMerkleRoots); err != nil { return err } if err := c.f.Sync(); err != nil { @@ -429,10 +450,10 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha return modules.RenterContract{}, err } sc := &SafeContract{ - header: h, - merkleRoots: roots, - f: f, - wal: cs.wal, + header: h, + numMerkleRoots: len(roots), + f: f, + wal: cs.wal, } cs.mu.Lock() cs.contracts[h.ID()] = sc @@ -455,7 +476,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo return err } // read merkleRoots - var merkleRoots []crypto.Hash + numMerkleRoots := 0 if _, err := f.Seek(contractHeaderSize, io.SeekStart); err != nil { return err } @@ -466,7 +487,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo } else if err != nil { return err } - merkleRoots = append(merkleRoots, root) + numMerkleRoots++ } // add relevant unapplied transactions var unappliedTxns []*writeaheadlog.Transaction @@ -497,11 +518,11 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo } // add to set cs.contracts[header.ID()] = &SafeContract{ - header: header, - merkleRoots: merkleRoots, - unappliedTxns: unappliedTxns, - f: f, - wal: cs.wal, + header: header, + numMerkleRoots: numMerkleRoots, + unappliedTxns: unappliedTxns, + f: f, + wal: cs.wal, } return nil } @@ -530,7 +551,7 @@ func (cs *ContractSet) ConvertV130Contract(c V130Contract, cr V130CachedRevision return errors.New("contract set is missing contract that was just added") } defer cs.Return(sc) - if len(cr.MerkleRoots) == len(sc.merkleRoots)+1 { + if len(cr.MerkleRoots) == sc.numMerkleRoots+1 { root := cr.MerkleRoots[len(cr.MerkleRoots)-1] _, err = sc.recordUploadIntent(cr.Revision, root, types.ZeroCurrency, types.ZeroCurrency) } else { diff --git a/modules/renter/proto/contract_test.go b/modules/renter/proto/contract_test.go index 1d57b17ee5..74b529c213 100644 --- a/modules/renter/proto/contract_test.go +++ b/modules/renter/proto/contract_test.go @@ -71,9 +71,13 @@ func TestContractUncommittedTxn(t *testing.T) { // the state of the contract should match the initial state // NOTE: can't use reflect.DeepEqual for the header because it contains // types.Currency fields + merkleRoots, err := sc.merkleRoots() + if err != nil { + t.Fatal("failed to get merkle roots", err) + } if !bytes.Equal(encoding.Marshal(sc.header), encoding.Marshal(initialHeader)) { t.Fatal("contractHeader should match initial contractHeader") - } else if !reflect.DeepEqual(sc.merkleRoots, initialRoots) { + } else if !reflect.DeepEqual(merkleRoots, initialRoots) { t.Fatal("Merkle roots should match initial Merkle roots") } @@ -91,9 +95,13 @@ func TestContractUncommittedTxn(t *testing.T) { t.Fatal("WAL transaction changed") } // the state of the contract should match the initial state + merkleRoots, err = sc.merkleRoots() + if err != nil { + t.Fatal("failed to get merkle roots:", err) + } if !bytes.Equal(encoding.Marshal(sc.header), encoding.Marshal(initialHeader)) { t.Fatal("contractHeader should match initial contractHeader", sc.header, initialHeader) - } else if !reflect.DeepEqual(sc.merkleRoots, initialRoots) { + } else if !reflect.DeepEqual(merkleRoots, initialRoots) { t.Fatal("Merkle roots should match initial Merkle roots") } @@ -107,9 +115,13 @@ func TestContractUncommittedTxn(t *testing.T) { t.Fatal("expected 0 unappliedTxns, got", len(sc.unappliedTxns)) } // the state of the contract should now match the revised state + merkleRoots, err = sc.merkleRoots() + if err != nil { + t.Fatal("failed to get merkle roots:", err) + } if !bytes.Equal(encoding.Marshal(sc.header), encoding.Marshal(revisedHeader)) { t.Fatal("contractHeader should match revised contractHeader", sc.header, revisedHeader) - } else if !reflect.DeepEqual(sc.merkleRoots, revisedRoots) { + } else if !reflect.DeepEqual(merkleRoots, revisedRoots) { t.Fatal("Merkle roots should match revised Merkle roots") } } diff --git a/modules/renter/proto/editor.go b/modules/renter/proto/editor.go index 34736a91e6..f0d66d286f 100644 --- a/modules/renter/proto/editor.go +++ b/modules/renter/proto/editor.go @@ -89,15 +89,21 @@ func (he *Editor) Upload(data []byte) (_ modules.RenterContract, _ crypto.Hash, return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient collateral to support upload") } + // Get merkle roots + oldRoots, err := sc.merkleRoots() + if err != nil { + return modules.RenterContract{}, crypto.Hash{}, err + } + // calculate the new Merkle root sectorRoot := crypto.MerkleRoot(data) - newRoots := append(sc.merkleRoots, sectorRoot) + newRoots := append(oldRoots, sectorRoot) merkleRoot := cachedMerkleRoot(newRoots) // create the action and revision actions := []modules.RevisionAction{{ Type: modules.ActionInsert, - SectorIndex: uint64(len(sc.merkleRoots)), + SectorIndex: uint64(sc.numMerkleRoots), Data: data, }} rev := newUploadRevision(contract.LastRevision(), merkleRoot, sectorPrice, sectorCollateral) diff --git a/modules/renter/proto/renew.go b/modules/renter/proto/renew.go index 89ca870f6d..20a49d209c 100644 --- a/modules/renter/proto/renew.go +++ b/modules/renter/proto/renew.go @@ -283,8 +283,14 @@ func (cs *ContractSet) Renew(oldContract *SafeContract, params ContractParams, t SiafundFee: types.Tax(startHeight, fc.Payout), } + // Get old roots + oldRoots, err := oldContract.merkleRoots() + if err != nil { + return modules.RenterContract{}, err + } + // Add contract to set. - meta, err := cs.managedInsertContract(header, oldContract.merkleRoots) + meta, err := cs.managedInsertContract(header, oldRoots) if err != nil { return modules.RenterContract{}, err } From 51f9b0cf8fdf6001dae818cc4361d47466ee5761 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 20 Mar 2018 19:35:31 -0400 Subject: [PATCH 114/212] add merkleRoots helper struct --- crypto/merkle.go | 6 + modules/renter/proto/contract.go | 79 +++----- modules/renter/proto/contract_test.go | 6 +- modules/renter/proto/editor.go | 11 +- modules/renter/proto/merkleroots.go | 248 +++++++++++++++++++++++ modules/renter/proto/merkleroots_test.go | 63 ++++++ modules/renter/proto/renew.go | 2 +- 7 files changed, 347 insertions(+), 68 deletions(-) create mode 100644 modules/renter/proto/merkleroots.go create mode 100644 modules/renter/proto/merkleroots_test.go diff --git a/crypto/merkle.go b/crypto/merkle.go index 5c84e5909d..cdd3341195 100644 --- a/crypto/merkle.go +++ b/crypto/merkle.go @@ -82,6 +82,12 @@ func (ct *CachedMerkleTree) Push(h Hash) { ct.CachedTree.Push(h[:]) } +// PushSubTree is a redefinition of merkletree.CachedTree.PushSubTree, with the +// added type safety of only accepting a hash. +func (ct *CachedMerkleTree) PushSubTree(height int, h Hash) error { + return ct.CachedTree.PushSubTree(height, h[:]) +} + // Root is a redefinition of merkletree.CachedTree.Root, returning a Hash // instead of a []byte. func (ct *CachedMerkleTree) Root() (h Hash) { diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 6598141bc6..a3e29a0df6 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -2,12 +2,10 @@ package proto import ( "encoding/json" - "io" "os" "path/filepath" "sync" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/encoding" "github.com/NebulousLabs/Sia/modules" @@ -128,10 +126,8 @@ type SafeContract struct { headerMu sync.Mutex header contractHeader - // merkleRoots are the Merkle roots of each sector stored on the host that - // relate to this contract. - //merkleRoots []crypto.Hash - numMerkleRoots int + // merkleRoots are the sector roots covered by this contract. + merkleRoots *merkleRoots // unappliedTxns are the transactions that were written to the WAL but not // applied to the contract file. @@ -208,28 +204,6 @@ func (c *SafeContract) Utility() modules.ContractUtility { return c.header.Utility } -// merkleRoots returns the contracts merkle roots. -func (c *SafeContract) merkleRoots() ([]crypto.Hash, error) { - merkleRoots := make([]crypto.Hash, 0, c.numMerkleRoots) - if _, err := c.f.Seek(contractHeaderSize, io.SeekStart); err != nil { - return merkleRoots, err - } - for { - var root crypto.Hash - if _, err := io.ReadFull(c.f, root[:]); err == io.EOF { - break - } else if err != nil { - return merkleRoots, errors.AddContext(err, "failed to read root from disk") - } - merkleRoots = append(merkleRoots, root) - } - // Sanity check: should have read exactly numMerkleRoots roots. - if len(merkleRoots) != c.numMerkleRoots { - build.Critical("Number of merkle roots on disk doesn't match numMerkleRoots") - } - return merkleRoots, nil -} - func (c *SafeContract) makeUpdateSetHeader(h contractHeader) writeaheadlog.Update { c.headerMu.Lock() id := c.header.ID() @@ -274,8 +248,11 @@ func (c *SafeContract) applySetRoot(root crypto.Hash, index int) error { if _, err := c.f.WriteAt(root[:], rootOffset); err != nil { return err } - if c.numMerkleRoots <= index { - c.numMerkleRoots++ + numMerkleRoots := c.merkleRoots.len() + if index == numMerkleRoots { + c.merkleRoots.push(root) + } else { + return c.merkleRoots.insert(index, root) } return nil } @@ -292,7 +269,7 @@ func (c *SafeContract) recordUploadIntent(rev types.FileContractRevision, root c t, err := c.wal.NewTransaction([]writeaheadlog.Update{ c.makeUpdateSetHeader(newHeader), - c.makeUpdateSetRoot(root, c.numMerkleRoots), + c.makeUpdateSetRoot(root, c.merkleRoots.len()), }) if err != nil { return nil, err @@ -316,7 +293,7 @@ func (c *SafeContract) commitUpload(t *writeaheadlog.Transaction, signedTxn type if err := c.applySetHeader(newHeader); err != nil { return err } - if err := c.applySetRoot(root, c.numMerkleRoots); err != nil { + if err := c.applySetRoot(root, c.merkleRoots.len()); err != nil { return err } if err := c.f.Sync(); err != nil { @@ -441,8 +418,9 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha return modules.RenterContract{}, err } // write roots - for i, root := range roots { - if _, err := f.WriteAt(root[:], contractHeaderSize+crypto.HashSize*int64(i)); err != nil { + merkleRoots := newMerkleRoots(f) + for _, root := range roots { + if err := merkleRoots.push(root); err != nil { return modules.RenterContract{}, err } } @@ -450,10 +428,10 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha return modules.RenterContract{}, err } sc := &SafeContract{ - header: h, - numMerkleRoots: len(roots), - f: f, - wal: cs.wal, + header: h, + merkleRoots: merkleRoots, + f: f, + wal: cs.wal, } cs.mu.Lock() cs.contracts[h.ID()] = sc @@ -476,19 +454,10 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo return err } // read merkleRoots - numMerkleRoots := 0 - if _, err := f.Seek(contractHeaderSize, io.SeekStart); err != nil { + merkleRoots, err := loadExistingMerkleRoots(f) + if err != nil { return err } - for { - var root crypto.Hash - if _, err := io.ReadFull(f, root[:]); err == io.EOF { - break - } else if err != nil { - return err - } - numMerkleRoots++ - } // add relevant unapplied transactions var unappliedTxns []*writeaheadlog.Transaction for _, t := range walTxns { @@ -518,11 +487,11 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo } // add to set cs.contracts[header.ID()] = &SafeContract{ - header: header, - numMerkleRoots: numMerkleRoots, - unappliedTxns: unappliedTxns, - f: f, - wal: cs.wal, + header: header, + merkleRoots: merkleRoots, + unappliedTxns: unappliedTxns, + f: f, + wal: cs.wal, } return nil } @@ -551,7 +520,7 @@ func (cs *ContractSet) ConvertV130Contract(c V130Contract, cr V130CachedRevision return errors.New("contract set is missing contract that was just added") } defer cs.Return(sc) - if len(cr.MerkleRoots) == sc.numMerkleRoots+1 { + if len(cr.MerkleRoots) == sc.merkleRoots.len()+1 { root := cr.MerkleRoots[len(cr.MerkleRoots)-1] _, err = sc.recordUploadIntent(cr.Revision, root, types.ZeroCurrency, types.ZeroCurrency) } else { diff --git a/modules/renter/proto/contract_test.go b/modules/renter/proto/contract_test.go index 74b529c213..802e7936d0 100644 --- a/modules/renter/proto/contract_test.go +++ b/modules/renter/proto/contract_test.go @@ -71,7 +71,7 @@ func TestContractUncommittedTxn(t *testing.T) { // the state of the contract should match the initial state // NOTE: can't use reflect.DeepEqual for the header because it contains // types.Currency fields - merkleRoots, err := sc.merkleRoots() + merkleRoots, err := sc.merkleRoots.merkleRoots() if err != nil { t.Fatal("failed to get merkle roots", err) } @@ -95,7 +95,7 @@ func TestContractUncommittedTxn(t *testing.T) { t.Fatal("WAL transaction changed") } // the state of the contract should match the initial state - merkleRoots, err = sc.merkleRoots() + merkleRoots, err = sc.merkleRoots.merkleRoots() if err != nil { t.Fatal("failed to get merkle roots:", err) } @@ -115,7 +115,7 @@ func TestContractUncommittedTxn(t *testing.T) { t.Fatal("expected 0 unappliedTxns, got", len(sc.unappliedTxns)) } // the state of the contract should now match the revised state - merkleRoots, err = sc.merkleRoots() + merkleRoots, err = sc.merkleRoots.merkleRoots() if err != nil { t.Fatal("failed to get merkle roots:", err) } diff --git a/modules/renter/proto/editor.go b/modules/renter/proto/editor.go index f0d66d286f..d85868282a 100644 --- a/modules/renter/proto/editor.go +++ b/modules/renter/proto/editor.go @@ -89,21 +89,14 @@ func (he *Editor) Upload(data []byte) (_ modules.RenterContract, _ crypto.Hash, return modules.RenterContract{}, crypto.Hash{}, errors.New("contract has insufficient collateral to support upload") } - // Get merkle roots - oldRoots, err := sc.merkleRoots() - if err != nil { - return modules.RenterContract{}, crypto.Hash{}, err - } - // calculate the new Merkle root sectorRoot := crypto.MerkleRoot(data) - newRoots := append(oldRoots, sectorRoot) - merkleRoot := cachedMerkleRoot(newRoots) + merkleRoot := sc.merkleRoots.newRoot(sectorRoot) // create the action and revision actions := []modules.RevisionAction{{ Type: modules.ActionInsert, - SectorIndex: uint64(sc.numMerkleRoots), + SectorIndex: uint64(sc.merkleRoots.len()), Data: data, }} rev := newUploadRevision(contract.LastRevision(), merkleRoot, sectorPrice, sectorCollateral) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go new file mode 100644 index 0000000000..7df14b1c43 --- /dev/null +++ b/modules/renter/proto/merkleroots.go @@ -0,0 +1,248 @@ +package proto + +import ( + "io" + "os" + + "github.com/NebulousLabs/Sia/build" + "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/errors" +) + +// merkleRootsCacheHeight is the height of the subTrees in cachedSubTrees. A +// height of 7 means that 128 sector roots are covered by a single cached +// subTree. +const merkleRootsCacheHeight = 7 + +type ( + // merkleRoots is a helper struct that makes it easier to add/insert/remove + // merkleRoots within a SafeContract. + merkleRoots struct { + // cachedSubTrees are cached trees that can be used to more efficiently + // compute the merkle root of a contract. The cached trees are not + // persisted and are computed after startup. + cachedSubTrees []*cachedSubTree + // uncachedRoots contains the sector roots that are not part of a + // cached subTree. The uncachedRoots slice should never get longer than + // 2^merkleRootsCacheHeight since that would simply result in a new + // cached subTree in cachedSubTrees. + uncachedRoots []crypto.Hash + + // file is the file of the safe contract that contains the root. + file *os.File + // numMerkleRoots is the number of merkle roots in file. + numMerkleRoots int + } + + cachedSubTree struct { + height int + sum crypto.Hash + } +) + +// loadExistingMerkleRoots reads creates a merkleRoots object from existing +// merkle roots. +func loadExistingMerkleRoots(file *os.File) (mr *merkleRoots, err error) { + mr = &merkleRoots{ + file: file, + } + // Get the number of roots stored in the file. + mr.numMerkleRoots, err = mr.lenFromFile() + if err != nil { + return + } + // Seek to the first root's offset. + if _, err = file.Seek(rootIndexToOffset(0), io.SeekStart); err != nil { + return + } + // Read the roots from the file without reading all of them at once. + for i := 0; i < mr.numMerkleRoots; i++ { + var root crypto.Hash + if _, err = io.ReadFull(file, root[:]); err == io.EOF { + break + } else if err != nil { + return + } + + // Append the root to the unachedRoots + mr.uncachedRoots = append(mr.uncachedRoots, root) + + // If the uncachedRoots grew too large we add them to the cache. + if len(mr.uncachedRoots) == 1<= mr.numMerkleRoots { + build.Critical("can't delete non-existing root") + return nil + } + // If i is the index of the last element we call deleteLastRoot. + if i == mr.numMerkleRoots-1 { + return mr.deleteLastRoot() + } + // - swap root at i with last root of mr.uncachedRoots. If that is not + // possible because len(mr.uncachedRoots) == 0 we need to delete the last + // cache and append its elements to mr.uncachedRoots before we swap. + // - if root at i is in a cache we need to reconstruct that cache after swapping. + // - call deleteLastRoot to get rid of the swapped element at the end of mr.u + panic("not implemented yet") +} + +// deleteLastRoot deletes the last sector root of the contract. +func (mr *merkleRoots) deleteLastRoot() error { + // - Truncate file + // - If len(mr.cachedSubTrees) == 0 delete the last subtree, load its + // elements from disk and append them to mr.uncachedRoots + panic("not implemented yet") +} + +// insert inserts a root by replacing a root at an existing index. +func (mr *merkleRoots) insert(index int, root crypto.Hash) error { + if index > mr.numMerkleRoots { + return errors.New("can't insert at a index greater than the number of roots") + } + panic("not yet implemented") +} + +// lenFromFile returns the number of merkle roots by computing it from the +// filesize. +func (mr *merkleRoots) lenFromFile() (int, error) { + offset, err := mr.file.Seek(0, io.SeekEnd) + if err != nil { + return 0, err + } + // If we haven't written a single root yet we just return 0. + rootStart := rootIndexToOffset(0) + if offset < rootStart { + return 0, nil + } + + // Sanity check contract file length. + if (offset-rootStart)%crypto.HashSize != 0 { + build.Critical("contract file has unexpected length and might be corrupted.") + } + return int((offset - rootStart) / crypto.HashSize), nil +} + +// len returns the number of merkle roots. It should always return the same +// number as lenFromFile. +func (mr *merkleRoots) len() int { + return mr.numMerkleRoots +} + +// push appends a merkle root to the end of the contract. If the number of +// uncached merkle roots grows too big we cache them in a new subTree. +func (mr *merkleRoots) push(root crypto.Hash) error { + // Sanity check the number of uncached roots before adding a new one. + if len(mr.uncachedRoots) == 1< Date: Wed, 21 Mar 2018 10:55:41 -0400 Subject: [PATCH 115/212] add insert method to merkleRoots --- modules/renter/proto/merkleroots.go | 58 ++++++++++++++++++++++-- modules/renter/proto/merkleroots_test.go | 57 +++++++++++++++++++++++ 2 files changed, 110 insertions(+), 5 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 7df14b1c43..cf63a1f783 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -68,7 +68,7 @@ func loadExistingMerkleRoots(file *os.File) (mr *merkleRoots, err error) { mr.uncachedRoots = append(mr.uncachedRoots, root) // If the uncachedRoots grew too large we add them to the cache. - if len(mr.uncachedRoots) == 1< mr.numMerkleRoots { return errors.New("can't insert at a index greater than the number of roots") } - panic("not yet implemented") + // Replaced the root on disk. + _, err := mr.file.WriteAt(root[:], rootIndexToOffset(index)) + if err != nil { + return errors.AddContext(err, "failed to insert root on disk") + } + + // Find out if the root is in mr.cachedSubTree or mr.uncachedRoots. + i, cached := mr.isIndexCached(index) + // If the root was not cached we can simply replace it in mr.uncachedRoots. + if !cached { + mr.uncachedRoots[i] = root + return nil + } + // If the root was cached we need to rebuild the cache. + if err := mr.rebuildCachedTree(i); err != nil { + return errors.AddContext(err, "failed to rebuild cache for inserted root") + } + return nil +} + +// isIndexCached determines if the root at index i is already cached in +// mr.cachedSubTree or if it is still in mr.uncachedRoots. It will return true +// or false and the index of the root in the corresponding data structure. +func (mr *merkleRoots) isIndexCached(i int) (int, bool) { + if i/(1< Date: Wed, 21 Mar 2018 11:49:49 -0400 Subject: [PATCH 116/212] add deleteLastRoot method for merkleRoots --- modules/renter/proto/merkleroots.go | 66 ++++++++++++++------- modules/renter/proto/merkleroots_test.go | 74 +++++++++++++++++++++++- 2 files changed, 118 insertions(+), 22 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index cf63a1f783..a8c3c59fe6 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -1,6 +1,7 @@ package proto import ( + "fmt" "io" "os" @@ -126,10 +127,27 @@ func (mr *merkleRoots) delete(i int) error { // deleteLastRoot deletes the last sector root of the contract. func (mr *merkleRoots) deleteLastRoot() error { - // - Truncate file - // - If len(mr.cachedSubTrees) == 0 delete the last subtree, load its - // elements from disk and append them to mr.uncachedRoots - panic("not implemented yet") + // Decrease the numMerkleRoots counter. + mr.numMerkleRoots-- + // Truncate file to avoid interpreting trailing data as valid. + if err := mr.file.Truncate(rootIndexToOffset(mr.numMerkleRoots)); err != nil { + return errors.AddContext(err, "failed to delete last root from file") + } + // If the last element is uncached we can simply remove it from the slice. + if len(mr.uncachedRoots) > 0 { + mr.uncachedRoots = mr.uncachedRoots[:len(mr.uncachedRoots)-1] + return nil + } + // If it is not uncached we need to delete the last cached tree and load + // its elements into mr.uncachedRoots. + mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] + rootIndex := len(mr.cachedSubTrees) * (1 << merkleRootsCacheHeight) + roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) + if err != nil { + return errors.AddContext(err, "failed to read cached tree's roots") + } + mr.uncachedRoots = append(mr.uncachedRoots, roots...) + return nil } // insert inserts a root by replacing a root at an existing index. @@ -255,24 +273,35 @@ func (mr *merkleRoots) newRoot(newRoot crypto.Hash) crypto.Hash { // merkleRoots reads all the merkle roots from disk and returns them. This is // not very fast and should only be used for testing purposes. -func (mr *merkleRoots) merkleRoots() ([]crypto.Hash, error) { - merkleRoots := make([]crypto.Hash, 0, mr.numMerkleRoots) - if _, err := mr.file.Seek(rootIndexToOffset(0), io.SeekStart); err != nil { +func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { + // Get roots. + roots, err = mr.merkleRootsFromIndex(0, mr.numMerkleRoots) + if err != nil { + return nil, err + } + // Sanity check: should have read exactly numMerkleRoots roots. + if len(roots) != mr.numMerkleRoots { + build.Critical(fmt.Sprintf("Number of merkle roots on disk (%v) doesn't match numMerkleRoots (%v)", + len(roots), mr.numMerkleRoots)) + } + return +} + +// merkleRootsFrom index readds all the merkle roots in range [from;to) +func (mr *merkleRoots) merkleRootsFromIndex(from, to int) ([]crypto.Hash, error) { + merkleRoots := make([]crypto.Hash, 0, mr.numMerkleRoots-1) + if _, err := mr.file.Seek(rootIndexToOffset(from), io.SeekStart); err != nil { return merkleRoots, err } - for { + for i := from; to-i > 0; i++ { var root crypto.Hash if _, err := io.ReadFull(mr.file, root[:]); err == io.EOF { - break + return nil, io.ErrUnexpectedEOF } else if err != nil { return merkleRoots, errors.AddContext(err, "failed to read root from disk") } merkleRoots = append(merkleRoots, root) } - // Sanity check: should have read exactly numMerkleRoots roots. - if len(merkleRoots) != mr.numMerkleRoots { - build.Critical("Number of merkle roots on disk doesn't match numMerkleRoots") - } return merkleRoots, nil } @@ -281,14 +310,9 @@ func (mr *merkleRoots) rebuildCachedTree(index int) error { // Find the index of the first root of the cached tree on disk. rootIndex := index * (1 << merkleRootsCacheHeight) // Read all the roots necessary for creating the cached tree. - roots := make([]crypto.Hash, 0, (1 << merkleRootsCacheHeight)) - for i := 0; i < (1 << merkleRootsCacheHeight); i++ { - var root crypto.Hash - _, err := mr.file.ReadAt(root[:], rootIndexToOffset(rootIndex+i)) - if err != nil { - return errors.AddContext(err, "failed to read sector required for rebuild from disk") - } - roots = append(roots, root) + roots, err := mr.merkleRootsFromIndex(rootIndex, rootIndex+(1< Date: Wed, 21 Mar 2018 13:14:14 -0400 Subject: [PATCH 117/212] add delete method to merkleRoots --- modules/renter/proto/merkleroots.go | 34 ++++++++--- modules/renter/proto/merkleroots_test.go | 76 ++++++++++++++++++++++++ 2 files changed, 103 insertions(+), 7 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index a8c3c59fe6..f5fcca29e0 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -117,12 +117,32 @@ func (mr *merkleRoots) delete(i int) error { if i == mr.numMerkleRoots-1 { return mr.deleteLastRoot() } - // - swap root at i with last root of mr.uncachedRoots. If that is not - // possible because len(mr.uncachedRoots) == 0 we need to delete the last - // cache and append its elements to mr.uncachedRoots before we swap. - // - if root at i is in a cache we need to reconstruct that cache after swapping. - // - call deleteLastRoot to get rid of the swapped element at the end of mr.u - panic("not implemented yet") + // If we don't have any uncached roots we need to delete the last cached + // tree and add its elements to the uncached roots. + if len(mr.uncachedRoots) == 0 { + mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] + rootIndex := len(mr.cachedSubTrees) * (1 << merkleRootsCacheHeight) + roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) + if err != nil { + return errors.AddContext(err, "failed to read cached tree's roots") + } + mr.uncachedRoots = append(mr.uncachedRoots, roots...) + } + // Swap the root at index i with the last root in mr.uncachedRoots. + _, err := mr.file.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], rootIndexToOffset(i)) + if err != nil { + return errors.AddContext(err, "failed to swap root to delete with last one") + } + // If the deleted root was cached we need to rebuild that cache. + if cacheIndex, cached := mr.isIndexCached(i); cached { + err = mr.rebuildCachedTree(cacheIndex) + } + if err != nil { + return errors.AddContext(err, "failed to rebuild cached tree") + } + // Now that the element we want to delete is the last root we can simply + // delete it by calling mr.deleteLastRoot. + return mr.deleteLastRoot() } // deleteLastRoot deletes the last sector root of the contract. @@ -289,7 +309,7 @@ func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { // merkleRootsFrom index readds all the merkle roots in range [from;to) func (mr *merkleRoots) merkleRootsFromIndex(from, to int) ([]crypto.Hash, error) { - merkleRoots := make([]crypto.Hash, 0, mr.numMerkleRoots-1) + merkleRoots := make([]crypto.Hash, 0, to-from) if _, err := mr.file.Seek(rootIndexToOffset(from), io.SeekStart); err != nil { return merkleRoots, err } diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index 94af3ed949..e8f8697962 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -190,3 +190,79 @@ func TestDeleteLastRoot(t *testing.T) { t.Fatal("expected 0 cached roots but was", len(merkleRoots.cachedSubTrees)) } } + +// TestDeleteLastRoot tests the deleteLastRoot method by creating many roots +// and deleting random indices until there are no more roots left. +func TestDelete(t *testing.T) { + dir := build.TempDir(t.Name()) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + filePath := path.Join(dir, "file.dat") + file, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + + // Create many sector roots. + numMerkleRoots := 1000 + merkleRoots := newMerkleRoots(file) + for i := 0; i < numMerkleRoots; i++ { + hash := crypto.Hash{} + copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) + merkleRoots.push(hash) + } + + for merkleRoots.numMerkleRoots > 0 { + // Randomly choose a root to delete. + deleteIndex := fastrand.Intn(merkleRoots.numMerkleRoots) + + // Get some metrics to be able to check if delete was working as expected. + numRoots := merkleRoots.numMerkleRoots + numCached := len(merkleRoots.cachedSubTrees) + numUncached := len(merkleRoots.uncachedRoots) + cachedIndex, cached := merkleRoots.isIndexCached(deleteIndex) + + if err := merkleRoots.delete(deleteIndex); err != nil { + t.Fatal("failed to delete random index", deleteIndex, err) + } + // Number of roots should have decreased. + if merkleRoots.numMerkleRoots != numRoots-1 { + t.Fatal("number of roots in memory should have decreased") + } + // Number of roots on disk should have decreased. + if roots, err := merkleRoots.merkleRoots(); err != nil { + t.Fatal("failed to get roots from disk") + } else if len(roots) != numRoots-1 { + t.Fatal("number of roots on disk should have decreased") + } + // If the number of uncached roots was >0 the cached roots should be + // the same and the number of uncached roots should have decreased by 1. + if numUncached > 0 && !(len(merkleRoots.cachedSubTrees) == numCached && len(merkleRoots.uncachedRoots) == numUncached-1) { + t.Fatal("deletion of uncached root failed") + } + // If the number of uncached roots was 0, there should be 1 less cached + // root and the uncached roots should have length + // 2^merkleRootsCacheHeight-1. + if numUncached == 0 && !(len(merkleRoots.cachedSubTrees) == numCached-1 && len(merkleRoots.uncachedRoots) == (1< cachedIndex { + subTreeLen := int(1 << merkleRootsCacheHeight) + from := cachedIndex * (1 << merkleRootsCacheHeight) + rooots, err := merkleRoots.merkleRoots() + roots, err := merkleRoots.merkleRootsFromIndex(from, from+subTreeLen) + if err != nil { + println("from", from) + println("len", len(rooots)) + println("expectedlen", merkleRoots.numMerkleRoots) + t.Fatal("failed to read roots of subTree", err) + } + if merkleRoots.cachedSubTrees[cachedIndex].sum != newCachedSubTree(roots).sum { + t.Fatal("new cached root sum doesn't match expected sum") + } + } + } +} From a9745b1fbed2f64f5715c98900b2f670443e1c9e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 22 Mar 2018 10:19:26 -0400 Subject: [PATCH 118/212] skip tests if testing.Short --- modules/renter/proto/contract.go | 8 +------- modules/renter/proto/merkleroots.go | 3 +++ modules/renter/proto/merkleroots_test.go | 16 ++++++++++++---- 3 files changed, 16 insertions(+), 11 deletions(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index a3e29a0df6..3f2255cf97 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -248,13 +248,7 @@ func (c *SafeContract) applySetRoot(root crypto.Hash, index int) error { if _, err := c.f.WriteAt(root[:], rootOffset); err != nil { return err } - numMerkleRoots := c.merkleRoots.len() - if index == numMerkleRoots { - c.merkleRoots.push(root) - } else { - return c.merkleRoots.insert(index, root) - } - return nil + return c.merkleRoots.insert(index, root) } func (c *SafeContract) recordUploadIntent(rev types.FileContractRevision, root crypto.Hash, storageCost, bandwidthCost types.Currency) (*writeaheadlog.Transaction, error) { diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index f5fcca29e0..4af5921271 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -175,6 +175,9 @@ func (mr *merkleRoots) insert(index int, root crypto.Hash) error { if index > mr.numMerkleRoots { return errors.New("can't insert at a index greater than the number of roots") } + if index == mr.numMerkleRoots { + return mr.push(root) + } // Replaced the root on disk. _, err := mr.file.WriteAt(root[:], rootIndexToOffset(index)) if err != nil { diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index e8f8697962..31527b6388 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -14,6 +14,9 @@ import ( // TestLoadExistingMerkleRoots tests if it is possible to load existing merkle // roots from disk. func TestLoadExistingMerkleRoots(t *testing.T) { + if testing.Short() { + t.SkipNow() + } // Create a file for the test. dir := build.TempDir(t.Name()) if err := os.MkdirAll(dir, 0755); err != nil { @@ -66,6 +69,9 @@ func TestLoadExistingMerkleRoots(t *testing.T) { // TestInsertMerkleRoot tests the merkleRoots' insert method. func TestInsertMerkleRoot(t *testing.T) { + if testing.Short() { + t.SkipNow() + } dir := build.TempDir(t.Name()) if err := os.MkdirAll(dir, 0755); err != nil { t.Fatal(err) @@ -120,6 +126,9 @@ func TestInsertMerkleRoot(t *testing.T) { // TestDeleteLastRoot tests the deleteLastRoot method. func TestDeleteLastRoot(t *testing.T) { + if testing.Short() { + t.SkipNow() + } dir := build.TempDir(t.Name()) if err := os.MkdirAll(dir, 0755); err != nil { t.Fatal(err) @@ -194,6 +203,9 @@ func TestDeleteLastRoot(t *testing.T) { // TestDeleteLastRoot tests the deleteLastRoot method by creating many roots // and deleting random indices until there are no more roots left. func TestDelete(t *testing.T) { + if testing.Short() { + t.SkipNow() + } dir := build.TempDir(t.Name()) if err := os.MkdirAll(dir, 0755); err != nil { t.Fatal(err) @@ -252,12 +264,8 @@ func TestDelete(t *testing.T) { if cached && len(merkleRoots.cachedSubTrees) > cachedIndex { subTreeLen := int(1 << merkleRootsCacheHeight) from := cachedIndex * (1 << merkleRootsCacheHeight) - rooots, err := merkleRoots.merkleRoots() roots, err := merkleRoots.merkleRootsFromIndex(from, from+subTreeLen) if err != nil { - println("from", from) - println("len", len(rooots)) - println("expectedlen", merkleRoots.numMerkleRoots) t.Fatal("failed to read roots of subTree", err) } if merkleRoots.cachedSubTrees[cachedIndex].sum != newCachedSubTree(roots).sum { From f396bf277c0c75bd22c8874c6e7efa2789f3c190 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 22 Mar 2018 16:13:26 -0400 Subject: [PATCH 119/212] add nondeterministic test --- modules/renter/proto/merkleroots_test.go | 48 ++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index 31527b6388..cc858bfa04 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -274,3 +274,51 @@ func TestDelete(t *testing.T) { } } } + +// TestMerkleRootsRandom creates a large number of merkle roots and runs random +// valid operations on them that shouldn't result in any errors. +func TestMerkleRootsRandom(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + dir := build.TempDir(t.Name()) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatal(err) + } + filePath := path.Join(dir, "file.dat") + file, err := os.Create(filePath) + if err != nil { + t.Fatal(err) + } + + // Create many sector roots. + numMerkleRoots := 10000 + merkleRoots := newMerkleRoots(file) + for i := 0; i < numMerkleRoots; i++ { + hash := crypto.Hash{} + copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) + merkleRoots.push(hash) + } + + // Randomly insert or delete elements. + for i := 0; i < numMerkleRoots; i++ { + operation := fastrand.Intn(2) + + // Delete + if operation == 0 { + index := fastrand.Intn(merkleRoots.numMerkleRoots) + if err := merkleRoots.delete(index); err != nil { + t.Fatalf("failed to delete %v: %v", index, err) + } + continue + } + + // Insert + var hash crypto.Hash + copy(hash[:], fastrand.Bytes(len(hash))) + index := fastrand.Intn(merkleRoots.numMerkleRoots + 1) + if err := merkleRoots.insert(index, hash); err != nil { + t.Fatalf("failed to insert %v at %v: %v", hash, index, err) + } + } +} From d1c9b91cad09a1bad622acd8d309c0de6cb36de5 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 10:31:35 -0400 Subject: [PATCH 120/212] implement review comments --- modules/renter/proto/merkleroots.go | 109 +++++++++++++---------- modules/renter/proto/merkleroots_test.go | 109 +++++++++++++++++++++-- 2 files changed, 162 insertions(+), 56 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 4af5921271..edd0f84fc3 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -1,6 +1,10 @@ +// TODO currently the cached trees are not persisted and we build them at +// startup. For petabytes of data this might take a long time. + package proto import ( + "bufio" "fmt" "io" "os" @@ -15,13 +19,16 @@ import ( // subTree. const merkleRootsCacheHeight = 7 +// merkleRootsPerCache is the number of merkle roots in a cached subTree of +// merkleRootsCacheHeight height. +const merkleRootsPerCache = 1 << merkleRootsCacheHeight + type ( // merkleRoots is a helper struct that makes it easier to add/insert/remove // merkleRoots within a SafeContract. merkleRoots struct { // cachedSubTrees are cached trees that can be used to more efficiently - // compute the merkle root of a contract. The cached trees are not - // persisted and are computed after startup. + // compute the merkle root of a contract. cachedSubTrees []*cachedSubTree // uncachedRoots contains the sector roots that are not part of a // cached subTree. The uncachedRoots slice should never get longer than @@ -35,45 +42,44 @@ type ( numMerkleRoots int } + // cachedSubTree is a cached subTree of a merkle tree. A height of 0 means + // that the sum is the hash of a leaf. A subTree of height 1 means sum is + // the root of 2 leaves. A subTree of height 2 contains the root of 4 + // leaves and so on. cachedSubTree struct { - height int - sum crypto.Hash + height int // height of the subTree + sum crypto.Hash // root of the subTree } ) // loadExistingMerkleRoots reads creates a merkleRoots object from existing // merkle roots. -func loadExistingMerkleRoots(file *os.File) (mr *merkleRoots, err error) { - mr = &merkleRoots{ +func loadExistingMerkleRoots(file *os.File) (*merkleRoots, error) { + mr := &merkleRoots{ file: file, } // Get the number of roots stored in the file. + var err error mr.numMerkleRoots, err = mr.lenFromFile() if err != nil { - return + return nil, err } // Seek to the first root's offset. - if _, err = file.Seek(rootIndexToOffset(0), io.SeekStart); err != nil { - return + if _, err = file.Seek(fileOffsetFromRootIndex(0), io.SeekStart); err != nil { + return nil, err } // Read the roots from the file without reading all of them at once. + r := bufio.NewReader(file) for i := 0; i < mr.numMerkleRoots; i++ { var root crypto.Hash - if _, err = io.ReadFull(file, root[:]); err == io.EOF { + if _, err = io.ReadFull(r, root[:]); err == io.EOF { break } else if err != nil { - return + return nil, err } - // Append the root to the unachedRoots - mr.uncachedRoots = append(mr.uncachedRoots, root) - - // If the uncachedRoots grew too large we add them to the cache. - if len(mr.uncachedRoots) == (1 << merkleRootsCacheHeight) { - st := newCachedSubTree(mr.uncachedRoots) - mr.cachedSubTrees = append(mr.cachedSubTrees, st) - mr.uncachedRoots = mr.uncachedRoots[:0] - } + // Append the root to the uncachedRoots + mr.appendRoot(root) } return mr, nil } @@ -82,7 +88,7 @@ func loadExistingMerkleRoots(file *os.File) (mr *merkleRoots, err error) { // 2^merkleRootsCacheHeight roots. func newCachedSubTree(roots []crypto.Hash) *cachedSubTree { // Sanity check the input length. - if len(roots) != (1 << merkleRootsCacheHeight) { + if len(roots) != merkleRootsPerCache { build.Critical("can't create a cached subTree from the provided number of roots") } return &cachedSubTree{ @@ -100,9 +106,9 @@ func newMerkleRoots(file *os.File) *merkleRoots { } } -// rootIndexToOffset calculates the offset of the merkle root at index i from +// fileOffsetFromRootIndex calculates the offset of the merkle root at index i from // the beginning of the contract file. -func rootIndexToOffset(i int) int64 { +func fileOffsetFromRootIndex(i int) int64 { return contractHeaderSize + crypto.HashSize*int64(i) } @@ -121,7 +127,7 @@ func (mr *merkleRoots) delete(i int) error { // tree and add its elements to the uncached roots. if len(mr.uncachedRoots) == 0 { mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] - rootIndex := len(mr.cachedSubTrees) * (1 << merkleRootsCacheHeight) + rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) if err != nil { return errors.AddContext(err, "failed to read cached tree's roots") @@ -129,7 +135,7 @@ func (mr *merkleRoots) delete(i int) error { mr.uncachedRoots = append(mr.uncachedRoots, roots...) } // Swap the root at index i with the last root in mr.uncachedRoots. - _, err := mr.file.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], rootIndexToOffset(i)) + _, err := mr.file.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], fileOffsetFromRootIndex(i)) if err != nil { return errors.AddContext(err, "failed to swap root to delete with last one") } @@ -150,7 +156,7 @@ func (mr *merkleRoots) deleteLastRoot() error { // Decrease the numMerkleRoots counter. mr.numMerkleRoots-- // Truncate file to avoid interpreting trailing data as valid. - if err := mr.file.Truncate(rootIndexToOffset(mr.numMerkleRoots)); err != nil { + if err := mr.file.Truncate(fileOffsetFromRootIndex(mr.numMerkleRoots)); err != nil { return errors.AddContext(err, "failed to delete last root from file") } // If the last element is uncached we can simply remove it from the slice. @@ -161,7 +167,7 @@ func (mr *merkleRoots) deleteLastRoot() error { // If it is not uncached we need to delete the last cached tree and load // its elements into mr.uncachedRoots. mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] - rootIndex := len(mr.cachedSubTrees) * (1 << merkleRootsCacheHeight) + rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) if err != nil { return errors.AddContext(err, "failed to read cached tree's roots") @@ -179,7 +185,7 @@ func (mr *merkleRoots) insert(index int, root crypto.Hash) error { return mr.push(root) } // Replaced the root on disk. - _, err := mr.file.WriteAt(root[:], rootIndexToOffset(index)) + _, err := mr.file.WriteAt(root[:], fileOffsetFromRootIndex(index)) if err != nil { return errors.AddContext(err, "failed to insert root on disk") } @@ -205,29 +211,29 @@ func (mr *merkleRoots) isIndexCached(i int) (int, bool) { if i/(1< 0; i++ { var root crypto.Hash - if _, err := io.ReadFull(mr.file, root[:]); err == io.EOF { + if _, err := io.ReadFull(r, root[:]); err == io.EOF { return nil, io.ErrUnexpectedEOF } else if err != nil { return merkleRoots, errors.AddContext(err, "failed to read root from disk") @@ -331,7 +344,7 @@ func (mr *merkleRoots) merkleRootsFromIndex(from, to int) ([]crypto.Hash, error) // rebuildCachedTree rebuilds the tree in mr.cachedSubTree at index i. func (mr *merkleRoots) rebuildCachedTree(index int) error { // Find the index of the first root of the cached tree on disk. - rootIndex := index * (1 << merkleRootsCacheHeight) + rootIndex := index * merkleRootsPerCache // Read all the roots necessary for creating the cached tree. roots, err := mr.merkleRootsFromIndex(rootIndex, rootIndex+(1< 0 { + // 1% chance to reload the roots and check if they are consistent. + if fastrand.Intn(100) == 0 { + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + if err != nil { + t.Fatal("failed to load existing roots", err) + } + if err := cmpRoots(loadedRoots, merkleRoots); err != nil { + t.Fatal(err) + } + } // Randomly choose a root to delete. deleteIndex := fastrand.Intn(merkleRoots.numMerkleRoots) @@ -262,8 +345,8 @@ func TestDelete(t *testing.T) { // If the deleted root was cached we expect the cache to have the // correct, updated value. if cached && len(merkleRoots.cachedSubTrees) > cachedIndex { - subTreeLen := int(1 << merkleRootsCacheHeight) - from := cachedIndex * (1 << merkleRootsCacheHeight) + subTreeLen := merkleRootsPerCache + from := cachedIndex * merkleRootsPerCache roots, err := merkleRoots.merkleRootsFromIndex(from, from+subTreeLen) if err != nil { t.Fatal("failed to read roots of subTree", err) @@ -302,6 +385,16 @@ func TestMerkleRootsRandom(t *testing.T) { // Randomly insert or delete elements. for i := 0; i < numMerkleRoots; i++ { + // 1% chance to reload the roots and check if they are consistent. + if fastrand.Intn(100) == 0 { + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + if err != nil { + t.Fatal("failed to load existing roots") + } + if err := cmpRoots(loadedRoots, merkleRoots); err != nil { + t.Fatal(err) + } + } operation := fastrand.Intn(2) // Delete From 62253c208a774853c9a4b4c17d3889429bf4f6d6 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 10 Apr 2018 13:40:43 -0400 Subject: [PATCH 121/212] use reflect.DeepEqual for cached subTree comparison --- modules/renter/proto/merkleroots_test.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index e40cb648c2..928520cdfb 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path" + "reflect" "testing" "github.com/NebulousLabs/Sia/build" @@ -47,11 +48,8 @@ func cmpRoots(m1, m2 *merkleRoots) error { } } for i := 0; i < len(m1.cachedSubTrees); i++ { - if m1.cachedSubTrees[i].height != m2.cachedSubTrees[i].height { - return errors.New("cached root height doesn't match") - } - if m1.cachedSubTrees[i].sum != m2.cachedSubTrees[i].sum { - return errors.New("cached root sum doesn't match") + if !reflect.DeepEqual(m1.cachedSubTrees[i], m2.cachedSubTrees[i]) { + return fmt.Errorf("cached trees at index %v don't match", i) } } return nil From 92dd6b67ba510f1aea9f2890e5b5a2866ee0abab Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 20 Apr 2018 17:04:30 -0400 Subject: [PATCH 122/212] fix TestDelete --- modules/renter/proto/merkleroots.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index edd0f84fc3..f03e7fc639 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -139,9 +139,12 @@ func (mr *merkleRoots) delete(i int) error { if err != nil { return errors.AddContext(err, "failed to swap root to delete with last one") } - // If the deleted root was cached we need to rebuild that cache. - if cacheIndex, cached := mr.isIndexCached(i); cached { - err = mr.rebuildCachedTree(cacheIndex) + // If the deleted root was not cached we swap the roots in memory too. + // Otherwise we rebuild the cachedSubTree. + if index, cached := mr.isIndexCached(i); !cached { + mr.uncachedRoots[index] = mr.uncachedRoots[len(mr.uncachedRoots)-1] + } else { + err = mr.rebuildCachedTree(index) } if err != nil { return errors.AddContext(err, "failed to rebuild cached tree") @@ -208,7 +211,7 @@ func (mr *merkleRoots) insert(index int, root crypto.Hash) error { // mr.cachedSubTree or if it is still in mr.uncachedRoots. It will return true // or false and the index of the root in the corresponding data structure. func (mr *merkleRoots) isIndexCached(i int) (int, bool) { - if i/(1< Date: Wed, 25 Apr 2018 12:29:35 -0400 Subject: [PATCH 123/212] add coments to the merkleRoots and the shared file --- modules/renter/proto/contract.go | 7 +++++++ modules/renter/proto/merkleroots.go | 11 ++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 3f2255cf97..e450c5665b 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -133,6 +133,13 @@ type SafeContract struct { // applied to the contract file. unappliedTxns []*writeaheadlog.Transaction + // file is the file of the safe contract that contains the roots. This + // file is usually shared with the SafeContract which means multiple + // threads could potentially write to the same file. That's why the + // SafeContract should never modify the file beyond the + // contractHeaderSize and the merkleRoots should never modify data + // before that. Both should also use WriteAt and ReadAt instead of + // Write and Read. f *os.File // TODO: use a dependency for this wal *writeaheadlog.WAL mu sync.Mutex diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index f03e7fc639..28a2a62e9f 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -26,6 +26,9 @@ const merkleRootsPerCache = 1 << merkleRootsCacheHeight type ( // merkleRoots is a helper struct that makes it easier to add/insert/remove // merkleRoots within a SafeContract. + // Modifying the merkleRoots is not ACID. This means that the SafeContract + // has to make sure it uses the WAL correctly to guarantee ACID updates to + // the underlying file. merkleRoots struct { // cachedSubTrees are cached trees that can be used to more efficiently // compute the merkle root of a contract. @@ -36,7 +39,13 @@ type ( // cached subTree in cachedSubTrees. uncachedRoots []crypto.Hash - // file is the file of the safe contract that contains the root. + // file is the file of the safe contract that contains the roots. This + // file is usually shared with the SafeContract which means multiple + // threads could potentially write to the same file. That's why the + // SafeContract should never modify the file beyond the + // contractHeaderSize and the merkleRoots should never modify data + // before that. Both should also use WriteAt and ReadAt instead of + // Write and Read. file *os.File // numMerkleRoots is the number of merkle roots in file. numMerkleRoots int From be833a350b6de8f415a81224ef222abeba12e020 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 12:51:12 -0400 Subject: [PATCH 124/212] add helper function for deleting the last cached tree and appending its elements to the uncached roots --- modules/renter/proto/merkleroots.go | 53 ++++++++++++++---------- modules/renter/proto/merkleroots_test.go | 2 +- 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 28a2a62e9f..0e39ae64f3 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -1,8 +1,8 @@ +package proto + // TODO currently the cached trees are not persisted and we build them at // startup. For petabytes of data this might take a long time. -package proto - import ( "bufio" "fmt" @@ -121,6 +121,17 @@ func fileOffsetFromRootIndex(i int) int64 { return contractHeaderSize + crypto.HashSize*int64(i) } +// appendRoot appends a root to the in-memory structure of the merkleRoots. If +// the length of the uncachedRoots grows too large they will be compressed into +// a cachedSubTree. +func (mr *merkleRoots) appendRoot(root crypto.Hash) { + mr.uncachedRoots = append(mr.uncachedRoots, root) + if len(mr.uncachedRoots) == merkleRootsPerCache { + mr.cachedSubTrees = append(mr.cachedSubTrees, newCachedSubTree(mr.uncachedRoots)) + mr.uncachedRoots = mr.uncachedRoots[:0] + } +} + // delete deletes the sector root at a certain index. func (mr *merkleRoots) delete(i int) error { // Check if the index is correct @@ -135,13 +146,9 @@ func (mr *merkleRoots) delete(i int) error { // If we don't have any uncached roots we need to delete the last cached // tree and add its elements to the uncached roots. if len(mr.uncachedRoots) == 0 { - mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] - rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache - roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) - if err != nil { - return errors.AddContext(err, "failed to read cached tree's roots") + if err := mr.moveLastCachedSubTreeToUncached(); err != nil { + return err } - mr.uncachedRoots = append(mr.uncachedRoots, roots...) } // Swap the root at index i with the last root in mr.uncachedRoots. _, err := mr.file.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], fileOffsetFromRootIndex(i)) @@ -177,14 +184,12 @@ func (mr *merkleRoots) deleteLastRoot() error { return nil } // If it is not uncached we need to delete the last cached tree and load - // its elements into mr.uncachedRoots. - mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] - rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache - roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) - if err != nil { - return errors.AddContext(err, "failed to read cached tree's roots") + // its elements into mr.uncachedRoots. This should give us + // merkleRootsPerCache-1 uncached roots since we already truncated the file + // by 1 root. + if err := mr.moveLastCachedSubTreeToUncached(); err != nil { + return err } - mr.uncachedRoots = append(mr.uncachedRoots, roots...) return nil } @@ -254,15 +259,17 @@ func (mr *merkleRoots) len() int { return mr.numMerkleRoots } -// appendRoot appends a root to the in-memory structure of the merkleRoots. If -// the length of the uncachedRoots grows too large they will be compressed into -// a cachedSubTree. -func (mr *merkleRoots) appendRoot(root crypto.Hash) { - mr.uncachedRoots = append(mr.uncachedRoots, root) - if len(mr.uncachedRoots) == merkleRootsPerCache { - mr.cachedSubTrees = append(mr.cachedSubTrees, newCachedSubTree(mr.uncachedRoots)) - mr.uncachedRoots = mr.uncachedRoots[:0] +// moveLastCachedSubTreeToUncached deletes the last cached subTree and appends +// its elements to the uncached roots. +func (mr *merkleRoots) moveLastCachedSubTreeToUncached() error { + mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] + rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache + roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) + if err != nil { + return errors.AddContext(err, "failed to read cached tree's roots") } + mr.uncachedRoots = append(mr.uncachedRoots, roots...) + return nil } // push appends a merkle root to the end of the contract. If the number of diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index 928520cdfb..dc0376922f 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -251,7 +251,7 @@ func TestDeleteLastRoot(t *testing.T) { t.Fatal("roots on disk don't match number of roots in memory") } // There should be 2^merkleRootsCacheHeight - 1 uncached roots now. - if len(merkleRoots.uncachedRoots) != (1< Date: Wed, 25 Apr 2018 14:02:03 -0400 Subject: [PATCH 125/212] rename methods and use normal tree instead of cached tree --- modules/renter/proto/editor.go | 2 +- modules/renter/proto/merkleroots.go | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/renter/proto/editor.go b/modules/renter/proto/editor.go index d85868282a..78b9700cf3 100644 --- a/modules/renter/proto/editor.go +++ b/modules/renter/proto/editor.go @@ -91,7 +91,7 @@ func (he *Editor) Upload(data []byte) (_ modules.RenterContract, _ crypto.Hash, // calculate the new Merkle root sectorRoot := crypto.MerkleRoot(data) - merkleRoot := sc.merkleRoots.newRoot(sectorRoot) + merkleRoot := sc.merkleRoots.checkNewRoot(sectorRoot) // create the action and revision actions := []modules.RevisionAction{{ diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 0e39ae64f3..0c9dd29850 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -88,7 +88,7 @@ func loadExistingMerkleRoots(file *os.File) (*merkleRoots, error) { } // Append the root to the uncachedRoots - mr.appendRoot(root) + mr.appendRootMemory(root) } return mr, nil } @@ -121,10 +121,10 @@ func fileOffsetFromRootIndex(i int) int64 { return contractHeaderSize + crypto.HashSize*int64(i) } -// appendRoot appends a root to the in-memory structure of the merkleRoots. If +// appendRootMemory appends a root to the in-memory structure of the merkleRoots. If // the length of the uncachedRoots grows too large they will be compressed into // a cachedSubTree. -func (mr *merkleRoots) appendRoot(root crypto.Hash) { +func (mr *merkleRoots) appendRootMemory(root crypto.Hash) { mr.uncachedRoots = append(mr.uncachedRoots, root) if len(mr.uncachedRoots) == merkleRootsPerCache { mr.cachedSubTrees = append(mr.cachedSubTrees, newCachedSubTree(mr.uncachedRoots)) @@ -285,7 +285,7 @@ func (mr *merkleRoots) push(root crypto.Hash) error { return err } // Add the root to the unached roots. - mr.appendRoot(root) + mr.appendRootMemory(root) // Increment the number of roots. mr.numMerkleRoots++ @@ -294,22 +294,22 @@ func (mr *merkleRoots) push(root crypto.Hash) error { // root returns the root of the merkle roots. func (mr *merkleRoots) root() crypto.Hash { - tree := crypto.NewCachedTree(sectorHeight) + tree := crypto.NewTree() for _, st := range mr.cachedSubTrees { - if err := tree.PushSubTree(st.height, st.sum); err != nil { + if err := tree.PushSubTree(st.height, st.sum[:]); err != nil { // This should never fail. build.Critical(err) } } for _, root := range mr.uncachedRoots { - tree.Push(root) + tree.Push(root[:]) } return tree.Root() } -// newRoot returns the root of the merkleTree after appending the newRoot +// checkNewRoot returns the root of the merkleTree after appending the checkNewRoot // without actually appending it. -func (mr *merkleRoots) newRoot(newRoot crypto.Hash) crypto.Hash { +func (mr *merkleRoots) checkNewRoot(newRoot crypto.Hash) crypto.Hash { tree := crypto.NewCachedTree(sectorHeight) for _, st := range mr.cachedSubTrees { if err := tree.PushSubTree(st.height, st.sum); err != nil { From 55210a52bd045cafee5440758040af05682ae028 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 14:10:53 -0400 Subject: [PATCH 126/212] refactor method names and comments --- modules/renter/proto/merkleroots.go | 13 ++++++------- modules/renter/proto/merkleroots_test.go | 2 +- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 0c9dd29850..e41a2a1f5b 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -264,7 +264,7 @@ func (mr *merkleRoots) len() int { func (mr *merkleRoots) moveLastCachedSubTreeToUncached() error { mr.cachedSubTrees = mr.cachedSubTrees[:len(mr.cachedSubTrees)-1] rootIndex := len(mr.cachedSubTrees) * merkleRootsPerCache - roots, err := mr.merkleRootsFromIndex(rootIndex, mr.numMerkleRoots) + roots, err := mr.merkleRootsFromIndexFromDisk(rootIndex, mr.numMerkleRoots) if err != nil { return errors.AddContext(err, "failed to read cached tree's roots") } @@ -325,11 +325,10 @@ func (mr *merkleRoots) checkNewRoot(newRoot crypto.Hash) crypto.Hash { return tree.Root() } -// merkleRoots reads all the merkle roots from disk and returns them. This is -// not very fast and should only be used for testing purposes. +// merkleRoots reads all the merkle roots from disk and returns them. func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { // Get roots. - roots, err = mr.merkleRootsFromIndex(0, mr.numMerkleRoots) + roots, err = mr.merkleRootsFromIndexFromDisk(0, mr.numMerkleRoots) if err != nil { return nil, err } @@ -341,8 +340,8 @@ func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { return } -// merkleRootsFrom index readds all the merkle roots in range [from;to) -func (mr *merkleRoots) merkleRootsFromIndex(from, to int) ([]crypto.Hash, error) { +// merkleRootsFrom index reads all the merkle roots in range [from;to) +func (mr *merkleRoots) merkleRootsFromIndexFromDisk(from, to int) ([]crypto.Hash, error) { merkleRoots := make([]crypto.Hash, 0, to-from) if _, err := mr.file.Seek(fileOffsetFromRootIndex(from), io.SeekStart); err != nil { return merkleRoots, err @@ -365,7 +364,7 @@ func (mr *merkleRoots) rebuildCachedTree(index int) error { // Find the index of the first root of the cached tree on disk. rootIndex := index * merkleRootsPerCache // Read all the roots necessary for creating the cached tree. - roots, err := mr.merkleRootsFromIndex(rootIndex, rootIndex+(1< cachedIndex { subTreeLen := merkleRootsPerCache from := cachedIndex * merkleRootsPerCache - roots, err := merkleRoots.merkleRootsFromIndex(from, from+subTreeLen) + roots, err := merkleRoots.merkleRootsFromIndexFromDisk(from, from+subTreeLen) if err != nil { t.Fatal("failed to read roots of subTree", err) } From 042b1a546dbd9f72e1d57d0a8042588d193c8fc4 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 16:14:41 -0400 Subject: [PATCH 127/212] introduce fileSections to guarantee that the safecontract and merkleroots don't modify the wrong section of the file --- modules/renter/proto/contract.go | 48 ++++++------- modules/renter/proto/contractset.go | 2 +- modules/renter/proto/filesection.go | 91 ++++++++++++++++++++++++ modules/renter/proto/merkleroots.go | 61 ++++++++-------- modules/renter/proto/merkleroots_test.go | 27 ++++--- 5 files changed, 156 insertions(+), 73 deletions(-) create mode 100644 modules/renter/proto/filesection.go diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index e450c5665b..17a36e6f70 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -133,16 +133,9 @@ type SafeContract struct { // applied to the contract file. unappliedTxns []*writeaheadlog.Transaction - // file is the file of the safe contract that contains the roots. This - // file is usually shared with the SafeContract which means multiple - // threads could potentially write to the same file. That's why the - // SafeContract should never modify the file beyond the - // contractHeaderSize and the merkleRoots should never modify data - // before that. Both should also use WriteAt and ReadAt instead of - // Write and Read. - f *os.File // TODO: use a dependency for this - wal *writeaheadlog.WAL - mu sync.Mutex + headerFile *fileSection + wal *writeaheadlog.WAL + mu sync.Mutex } // Metadata returns the metadata of a renter contract @@ -194,7 +187,7 @@ func (c *SafeContract) UpdateUtility(utility modules.ContractUtility) error { return err } // Sync the change to disk. - if err := c.f.Sync(); err != nil { + if err := c.headerFile.Sync(); err != nil { return err } // Signal that the update has been applied. @@ -241,7 +234,7 @@ func (c *SafeContract) makeUpdateSetRoot(root crypto.Hash, index int) writeahead func (c *SafeContract) applySetHeader(h contractHeader) error { headerBytes := make([]byte, contractHeaderSize) copy(headerBytes, encoding.Marshal(h)) - if _, err := c.f.WriteAt(headerBytes, 0); err != nil { + if _, err := c.headerFile.WriteAt(headerBytes, 0); err != nil { return err } c.headerMu.Lock() @@ -251,10 +244,6 @@ func (c *SafeContract) applySetHeader(h contractHeader) error { } func (c *SafeContract) applySetRoot(root crypto.Hash, index int) error { - rootOffset := contractHeaderSize + crypto.HashSize*int64(index) - if _, err := c.f.WriteAt(root[:], rootOffset); err != nil { - return err - } return c.merkleRoots.insert(index, root) } @@ -297,7 +286,7 @@ func (c *SafeContract) commitUpload(t *writeaheadlog.Transaction, signedTxn type if err := c.applySetRoot(root, c.merkleRoots.len()); err != nil { return err } - if err := c.f.Sync(); err != nil { + if err := c.headerFile.Sync(); err != nil { return err } if err := t.SignalUpdatesApplied(); err != nil { @@ -340,7 +329,7 @@ func (c *SafeContract) commitDownload(t *writeaheadlog.Transaction, signedTxn ty if err := c.applySetHeader(newHeader); err != nil { return err } - if err := c.f.Sync(); err != nil { + if err := c.headerFile.Sync(); err != nil { return err } if err := t.SignalUpdatesApplied(); err != nil { @@ -374,7 +363,7 @@ func (c *SafeContract) commitTxns() error { } } } - if err := c.f.Sync(); err != nil { + if err := c.headerFile.Sync(); err != nil { return err } if err := t.SignalUpdatesApplied(); err != nil { @@ -410,16 +399,15 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha if err != nil { return modules.RenterContract{}, err } - // preallocate space for header + roots - if err := f.Truncate(contractHeaderSize + crypto.HashSize*int64(len(roots))); err != nil { - return modules.RenterContract{}, err - } + // create fileSections + headerSection := newFileSection(f, 0, contractHeaderSize) + rootsSection := newFileSection(f, contractHeaderSize+1, -1) // write header - if _, err := f.WriteAt(encoding.Marshal(h), 0); err != nil { + if _, err := headerSection.WriteAt(encoding.Marshal(h), 0); err != nil { return modules.RenterContract{}, err } // write roots - merkleRoots := newMerkleRoots(f) + merkleRoots := newMerkleRoots(rootsSection) for _, root := range roots { if err := merkleRoots.push(root); err != nil { return modules.RenterContract{}, err @@ -431,7 +419,7 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha sc := &SafeContract{ header: h, merkleRoots: merkleRoots, - f: f, + headerFile: headerSection, wal: cs.wal, } cs.mu.Lock() @@ -447,6 +435,9 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo if err != nil { return err } + headerSection := newFileSection(f, 0, contractHeaderSize) + rootsSection := newFileSection(f, contractHeaderSize+1, -1) + // read header var header contractHeader if err := encoding.NewDecoder(f).Decode(&header); err != nil { @@ -454,8 +445,9 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo } else if err := header.validate(); err != nil { return err } + // read merkleRoots - merkleRoots, err := loadExistingMerkleRoots(f) + merkleRoots, err := loadExistingMerkleRoots(rootsSection) if err != nil { return err } @@ -491,7 +483,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo header: header, merkleRoots: merkleRoots, unappliedTxns: unappliedTxns, - f: f, + headerFile: headerSection, wal: cs.wal, } return nil diff --git a/modules/renter/proto/contractset.go b/modules/renter/proto/contractset.go index 883ec77e58..33a78607bf 100644 --- a/modules/renter/proto/contractset.go +++ b/modules/renter/proto/contractset.go @@ -135,7 +135,7 @@ func (cs *ContractSet) ViewAll() []modules.RenterContract { // Close closes all contracts in a contract set, this means rendering it unusable for I/O func (cs *ContractSet) Close() error { for _, c := range cs.contracts { - c.f.Close() + c.headerFile.Close() } _, err := cs.wal.CloseIncomplete() return err diff --git a/modules/renter/proto/filesection.go b/modules/renter/proto/filesection.go new file mode 100644 index 0000000000..a1a52fce25 --- /dev/null +++ b/modules/renter/proto/filesection.go @@ -0,0 +1,91 @@ +package proto + +import ( + "os" +) + +// fileSection is a helper struct that is used to split a file up in multiple +// sections. This guarantees that each part of the file can only write to and +// read from its corresponding section. +type fileSection struct { + f *os.File + start int64 + end int64 +} + +// newFileSection creates a new fileSection from a file and the provided bounds +// of the section. +func newFileSection(f *os.File, start, end int64) *fileSection { + if start < 0 { + panic("filesection can't start at an index < 0") + } + if end < start && end != -1 { + panic("the end of a filesection can't be before the start") + } + return &fileSection{ + f: f, + start: start, + end: end, + } +} + +// Close calls Close on the fileSection's underlying file. +func (f *fileSection) Close() error { + return f.f.Close() +} + +// Size uses the underlying file's stats to return the size of the sector. +func (f *fileSection) Size() (int64, error) { + info, err := f.f.Stat() + if err != nil { + return 0, err + } + size := info.Size() - f.start + if size < 0 { + size = 0 + } + if size > f.end-f.start && f.end != -1 { + size = f.end - f.start + } + return size, nil +} + +// ReadAt calls the fileSection's underlying file's ReadAt method if the read +// happens within the bounds of the section. Otherwise it returns io.EOF. +func (f *fileSection) ReadAt(b []byte, off int64) (int, error) { + if off < 0 { + panic("can't read from an offset before the section start") + } + if f.start+off+int64(len(b)) > f.end && f.end != -1 { + panic("can't read from an offset after the section end") + } + return f.f.ReadAt(b, f.start+off) +} + +// Sync calls Sync on the fileSection's underlying file. +func (f *fileSection) Sync() error { + return f.f.Sync() +} + +// Truncate calls Truncate on the fileSection's underlying file. +func (f *fileSection) Truncate(size int64) error { + if f.start+size < f.start { + panic("can't truncate file to be smaller than the section start") + } + if f.start+size > f.end && f.end != -1 { + panic("can't truncate file to be bigger than the section") + } + return f.f.Truncate(f.start + size) +} + +// WriteAt calls the fileSection's underlying file's WriteAt method if the +// write happens within the bounds of the section. Otherwise it returns io.EOF. +func (f *fileSection) WriteAt(b []byte, off int64) (int, error) { + if off < 0 { + panic("can't read from an offset before the section start") + } + if f.start+off+int64(len(b)) > f.end && f.end != -1 { + panic("can't read from an offset after the section end") + } + return f.f.WriteAt(b, off+f.start) +} diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index e41a2a1f5b..2fdc6b9bd3 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -4,10 +4,9 @@ package proto // startup. For petabytes of data this might take a long time. import ( - "bufio" + "bytes" "fmt" "io" - "os" "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/crypto" @@ -39,14 +38,8 @@ type ( // cached subTree in cachedSubTrees. uncachedRoots []crypto.Hash - // file is the file of the safe contract that contains the roots. This - // file is usually shared with the SafeContract which means multiple - // threads could potentially write to the same file. That's why the - // SafeContract should never modify the file beyond the - // contractHeaderSize and the merkleRoots should never modify data - // before that. Both should also use WriteAt and ReadAt instead of - // Write and Read. - file *os.File + // rootsFile is the rootsFile of the safe contract that contains the roots. + rootsFile *fileSection // numMerkleRoots is the number of merkle roots in file. numMerkleRoots int } @@ -63,9 +56,9 @@ type ( // loadExistingMerkleRoots reads creates a merkleRoots object from existing // merkle roots. -func loadExistingMerkleRoots(file *os.File) (*merkleRoots, error) { +func loadExistingMerkleRoots(file *fileSection) (*merkleRoots, error) { mr := &merkleRoots{ - file: file, + rootsFile: file, } // Get the number of roots stored in the file. var err error @@ -73,12 +66,17 @@ func loadExistingMerkleRoots(file *os.File) (*merkleRoots, error) { if err != nil { return nil, err } - // Seek to the first root's offset. - if _, err = file.Seek(fileOffsetFromRootIndex(0), io.SeekStart); err != nil { + // Get the size of the file and read it. + var size int64 + if size, err = file.Size(); err != nil { + return nil, err + } + fileData := make([]byte, size) + if _, err := file.ReadAt(fileData, 0); err != nil { return nil, err } // Read the roots from the file without reading all of them at once. - r := bufio.NewReader(file) + r := bytes.NewBuffer(fileData) for i := 0; i < mr.numMerkleRoots; i++ { var root crypto.Hash if _, err = io.ReadFull(r, root[:]); err == io.EOF { @@ -109,16 +107,16 @@ func newCachedSubTree(roots []crypto.Hash) *cachedSubTree { // newMerkleRoots creates a new merkleRoots object. This doesn't load existing // roots from file and will assume that the file doesn't contain any roots. // Don't use this on a file that contains roots. -func newMerkleRoots(file *os.File) *merkleRoots { +func newMerkleRoots(file *fileSection) *merkleRoots { return &merkleRoots{ - file: file, + rootsFile: file, } } // fileOffsetFromRootIndex calculates the offset of the merkle root at index i from // the beginning of the contract file. func fileOffsetFromRootIndex(i int) int64 { - return contractHeaderSize + crypto.HashSize*int64(i) + return crypto.HashSize * int64(i) } // appendRootMemory appends a root to the in-memory structure of the merkleRoots. If @@ -151,7 +149,7 @@ func (mr *merkleRoots) delete(i int) error { } } // Swap the root at index i with the last root in mr.uncachedRoots. - _, err := mr.file.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], fileOffsetFromRootIndex(i)) + _, err := mr.rootsFile.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], fileOffsetFromRootIndex(i)) if err != nil { return errors.AddContext(err, "failed to swap root to delete with last one") } @@ -175,7 +173,7 @@ func (mr *merkleRoots) deleteLastRoot() error { // Decrease the numMerkleRoots counter. mr.numMerkleRoots-- // Truncate file to avoid interpreting trailing data as valid. - if err := mr.file.Truncate(fileOffsetFromRootIndex(mr.numMerkleRoots)); err != nil { + if err := mr.rootsFile.Truncate(fileOffsetFromRootIndex(mr.numMerkleRoots)); err != nil { return errors.AddContext(err, "failed to delete last root from file") } // If the last element is uncached we can simply remove it from the slice. @@ -202,7 +200,7 @@ func (mr *merkleRoots) insert(index int, root crypto.Hash) error { return mr.push(root) } // Replaced the root on disk. - _, err := mr.file.WriteAt(root[:], fileOffsetFromRootIndex(index)) + _, err := mr.rootsFile.WriteAt(root[:], fileOffsetFromRootIndex(index)) if err != nil { return errors.AddContext(err, "failed to insert root on disk") } @@ -236,21 +234,16 @@ func (mr *merkleRoots) isIndexCached(i int) (int, bool) { // lenFromFile returns the number of merkle roots by computing it from the // filesize. func (mr *merkleRoots) lenFromFile() (int, error) { - stat, err := mr.file.Stat() + size, err := mr.rootsFile.Size() if err != nil { return 0, err } - size := stat.Size() - // If we haven't written a single root yet we just return 0. - if size < contractHeaderSize { - return 0, nil - } // Sanity check contract file length. - if (size-contractHeaderSize)%crypto.HashSize != 0 { + if size%crypto.HashSize != 0 { return 0, errors.New("contract file has unexpected length and might be corrupted") } - return int((size - contractHeaderSize) / crypto.HashSize), nil + return int(size / crypto.HashSize), nil } // len returns the number of merkle roots. It should always return the same @@ -281,7 +274,7 @@ func (mr *merkleRoots) push(root crypto.Hash) error { } // Calculate the root offset within the file and write it to disk. rootOffset := fileOffsetFromRootIndex(mr.len()) - if _, err := mr.file.WriteAt(root[:], rootOffset); err != nil { + if _, err := mr.rootsFile.WriteAt(root[:], rootOffset); err != nil { return err } // Add the root to the unached roots. @@ -343,10 +336,12 @@ func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { // merkleRootsFrom index reads all the merkle roots in range [from;to) func (mr *merkleRoots) merkleRootsFromIndexFromDisk(from, to int) ([]crypto.Hash, error) { merkleRoots := make([]crypto.Hash, 0, to-from) - if _, err := mr.file.Seek(fileOffsetFromRootIndex(from), io.SeekStart); err != nil { - return merkleRoots, err + // Get the size of the file and read it. + fileData := make([]byte, fileOffsetFromRootIndex(to)-fileOffsetFromRootIndex(from)) + if _, err := mr.rootsFile.ReadAt(fileData, fileOffsetFromRootIndex(from)); err != nil { + return nil, err } - r := bufio.NewReader(mr.file) + r := bytes.NewBuffer(fileData) for i := from; to-i > 0; i++ { var root crypto.Hash if _, err := io.ReadFull(r, root[:]); err == io.EOF { diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index 8bbe22ecc3..b2be9f8dc1 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -73,7 +73,8 @@ func TestLoadExistingMerkleRoots(t *testing.T) { } // Create sector roots. - merkleRoots := newMerkleRoots(file) + rootSection := newFileSection(file, 0, -1) + merkleRoots := newMerkleRoots(rootSection) for i := 0; i < 200; i++ { hash := crypto.Hash{} copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) @@ -81,7 +82,7 @@ func TestLoadExistingMerkleRoots(t *testing.T) { } // Load the existing file using LoadExistingMerkleRoots - merkleRoots2, err := loadExistingMerkleRoots(file) + merkleRoots2, err := loadExistingMerkleRoots(rootSection) if err != nil { t.Fatal(err) } @@ -127,7 +128,8 @@ func TestInsertMerkleRoot(t *testing.T) { } // Create sector roots. - merkleRoots := newMerkleRoots(file) + rootSection := newFileSection(file, 0, -1) + merkleRoots := newMerkleRoots(rootSection) for i := 0; i < 200; i++ { hash := crypto.Hash{} copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) @@ -151,7 +153,7 @@ func TestInsertMerkleRoot(t *testing.T) { } // Reload the roots. The in-memory structure and the roots on disk should // still be consistent. - loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.rootsFile) if err != nil { t.Fatal("failed to load existing roots", err) } @@ -176,7 +178,7 @@ func TestInsertMerkleRoot(t *testing.T) { } // Reload the roots. The in-memory structure and the roots on disk should // still be consistent. - loadedRoots, err = loadExistingMerkleRoots(merkleRoots.file) + loadedRoots, err = loadExistingMerkleRoots(merkleRoots.rootsFile) if err != nil { t.Fatal("failed to load existing roots", err) } @@ -204,7 +206,8 @@ func TestDeleteLastRoot(t *testing.T) { // makes the first delete remove a uncached root and the second delete has // to remove a cached tree. numMerkleRoots := merkleRootsPerCache + 1 - merkleRoots := newMerkleRoots(file) + rootSection := newFileSection(file, 0, -1) + merkleRoots := newMerkleRoots(rootSection) for i := 0; i < numMerkleRoots; i++ { hash := crypto.Hash{} copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) @@ -262,7 +265,7 @@ func TestDeleteLastRoot(t *testing.T) { // Reload the roots. The in-memory structure and the roots on disk should // still be consistent. - loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.rootsFile) if err != nil { t.Fatal("failed to load existing roots", err) } @@ -289,7 +292,8 @@ func TestDelete(t *testing.T) { // Create many sector roots. numMerkleRoots := 1000 - merkleRoots := newMerkleRoots(file) + rootSection := newFileSection(file, 0, -1) + merkleRoots := newMerkleRoots(rootSection) for i := 0; i < numMerkleRoots; i++ { hash := crypto.Hash{} copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) @@ -299,7 +303,7 @@ func TestDelete(t *testing.T) { for merkleRoots.numMerkleRoots > 0 { // 1% chance to reload the roots and check if they are consistent. if fastrand.Intn(100) == 0 { - loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.rootsFile) if err != nil { t.Fatal("failed to load existing roots", err) } @@ -374,7 +378,8 @@ func TestMerkleRootsRandom(t *testing.T) { // Create many sector roots. numMerkleRoots := 10000 - merkleRoots := newMerkleRoots(file) + rootSection := newFileSection(file, 0, -1) + merkleRoots := newMerkleRoots(rootSection) for i := 0; i < numMerkleRoots; i++ { hash := crypto.Hash{} copy(hash[:], fastrand.Bytes(crypto.HashSize)[:]) @@ -385,7 +390,7 @@ func TestMerkleRootsRandom(t *testing.T) { for i := 0; i < numMerkleRoots; i++ { // 1% chance to reload the roots and check if they are consistent. if fastrand.Intn(100) == 0 { - loadedRoots, err := loadExistingMerkleRoots(merkleRoots.file) + loadedRoots, err := loadExistingMerkleRoots(merkleRoots.rootsFile) if err != nil { t.Fatal("failed to load existing roots") } From 2383a09e0cc42db1ffdb21bcda1e25c056a73d75 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 25 Apr 2018 18:49:15 -0400 Subject: [PATCH 128/212] don't allow shrinking of fileSection if it has reached its max section size --- modules/renter/proto/filesection.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/renter/proto/filesection.go b/modules/renter/proto/filesection.go index a1a52fce25..c6d5becfd7 100644 --- a/modules/renter/proto/filesection.go +++ b/modules/renter/proto/filesection.go @@ -69,6 +69,13 @@ func (f *fileSection) Sync() error { // Truncate calls Truncate on the fileSection's underlying file. func (f *fileSection) Truncate(size int64) error { + currentSize, err := f.Size() + if err != nil { + return err + } + if currentSize == f.end-f.start && f.end != -1 { + panic("can't shrink section that has reached its max size unless it has no end boundary") + } if f.start+size < f.start { panic("can't truncate file to be smaller than the section start") } From e3fe91934ce6283aeb352f8101cf7b530f6f0382 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Thu, 26 Apr 2018 16:44:04 +0200 Subject: [PATCH 129/212] Add siac command to get data from /host/contracts endpoint --- cmd/siac/hostcmd.go | 45 +++++++++++++++++++++++++++++++++++++++++ cmd/siac/main.go | 14 +++++++------ node/api/client/host.go | 7 +++++++ 3 files changed, 60 insertions(+), 6 deletions(-) diff --git a/cmd/siac/hostcmd.go b/cmd/siac/hostcmd.go index 514887de1a..580aa484f1 100644 --- a/cmd/siac/hostcmd.go +++ b/cmd/siac/hostcmd.go @@ -73,6 +73,18 @@ To configure the host to accept new contracts, set acceptingcontracts to true: Run: wrap(hostconfigcmd), } + hostContractCmd = &cobra.Command{ + Use: "contracts", + Short: "Show host contracts", + Long: `Show host contracts sorted by expiration height. + +Available output types: + value: show financial information + status: show status information +`, + Run: wrap(hostcontractcmd), + } + hostFolderAddCmd = &cobra.Command{ Use: "add [path] [size]", Short: "Add a storage folder to the host", @@ -384,6 +396,39 @@ func hostconfigcmd(param, value string) { fmt.Printf("Estimated conversion rate: %v%%\n", eg.ConversionRate) } +// hostcontractcmd is the handler for the command `siac host contracts [type]`. +func hostcontractcmd() { + switch hostContractOutputType { + case "value", "status": + break + default: + die("\"" + hostContractOutputType + "\" is not a format") + + } + cg, err := httpClient.HostContractInfoGet() + if err != nil { + die("Could not fetch host contract info:", err) + } + sort.Slice(cg.Contracts, func(i, j int) bool { return cg.Contracts[i].ExpirationHeight < cg.Contracts[j].ExpirationHeight }) + w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0) + if hostContractOutputType == "value" { + fmt.Fprintf(w, "Obligation Id\tObligation Status\tContract Cost\tLocked Collateral\tRisked Collateral\tPotential Revenue\tExpiration Height\tTransaction Fees\n") + for _, so := range cg.Contracts { + potentialRevenue := so.PotentialDownloadRevenue.Add(so.PotentialUploadRevenue).Add(so.PotentialStorageRevenue) + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", so.ObligationId, strings.TrimPrefix(so.ObligationStatus, "obligation"), currencyUnits(so.ContractCost), currencyUnits(so.LockedCollateral), + currencyUnits(so.RiskedCollateral), currencyUnits(potentialRevenue), so.ExpirationHeight, currencyUnits(so.TransactionFeesAdded)) + } + } + if hostContractOutputType == "status" { + fmt.Fprintf(w, "Obligation ID\tObligation Status\tExpiration Height\tOrigin Confirmed\tRevision Constructed\tRevision Confirmed\tProof Constructed\tProof Confirmed\n") + for _, so := range cg.Contracts { + fmt.Fprintf(w, "%s\t%s\t%d\t%t\t%t\t%t\t%t\t%t\n", so.ObligationId, strings.TrimPrefix(so.ObligationStatus, "obligation"), so.ExpirationHeight, so.OriginConfirmed, + so.RevisionConstructed, so.RevisionConfirmed, so.ProofConstructed, so.ProofConfirmed) + } + } + w.Flush() +} + // hostannouncecmd is the handler for the command `siac host announce`. // Announces yourself as a host to the network. Optionally takes an address to // announce as. diff --git a/cmd/siac/main.go b/cmd/siac/main.go index 99a722d4eb..9be3c44bb5 100644 --- a/cmd/siac/main.go +++ b/cmd/siac/main.go @@ -13,11 +13,12 @@ import ( var ( // Flags. - hostVerbose bool // display additional host info - initForce bool // destroy and reencrypt the wallet on init if it already exists - initPassword bool // supply a custom password when creating a wallet - renterListVerbose bool // Show additional info about uploaded files. - renterShowHistory bool // Show download history in addition to download queue. + hostContractOutputType string // output type for host contracts + hostVerbose bool // display additional host info + initForce bool // destroy and reencrypt the wallet on init if it already exists + initPassword bool // supply a custom password when creating a wallet + renterListVerbose bool // Show additional info about uploaded files. + renterShowHistory bool // Show download history in addition to download queue. ) var ( @@ -87,10 +88,11 @@ func main() { updateCmd.AddCommand(updateCheckCmd) root.AddCommand(hostCmd) - hostCmd.AddCommand(hostConfigCmd, hostAnnounceCmd, hostFolderCmd, hostSectorCmd) + hostCmd.AddCommand(hostConfigCmd, hostAnnounceCmd, hostFolderCmd, hostContractCmd, hostSectorCmd) hostFolderCmd.AddCommand(hostFolderAddCmd, hostFolderRemoveCmd, hostFolderResizeCmd) hostSectorCmd.AddCommand(hostSectorDeleteCmd) hostCmd.Flags().BoolVarP(&hostVerbose, "verbose", "v", false, "Display detailed host info") + hostContractCmd.Flags().StringVarP(&hostContractOutputType, "type", "t", "value", "Select output type") root.AddCommand(hostdbCmd) hostdbCmd.AddCommand(hostdbViewCmd) diff --git a/node/api/client/host.go b/node/api/client/host.go index ddb9dd26c0..441589d2f0 100644 --- a/node/api/client/host.go +++ b/node/api/client/host.go @@ -63,6 +63,13 @@ func (c *Client) HostAnnounceAddrPost(address modules.NetAddress) (err error) { return } +// HostContractInfoGet uses the /host/contracts endpoint to get information +// about contracts on the host. +func (c *Client) HostContractInfoGet() (cg api.ContractInfoGET, err error) { + err = c.get("/host/contracts", &cg) + return +} + // HostEstimateScoreGet requests the /host/estimatescore endpoint. func (c *Client) HostEstimateScoreGet(param, value string) (eg api.HostEstimateScoreGET, err error) { err = c.get(fmt.Sprintf("/host/estimatescore?%v=%v", param, value), &eg) From 1829ba593305f9309d307728ddb5fd7e9405c9ad Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 26 Apr 2018 14:14:33 -0400 Subject: [PATCH 130/212] don't read all the roots at once --- modules/renter/proto/consts.go | 4 ++ modules/renter/proto/merkleroots.go | 97 ++++++++++++++++++----------- 2 files changed, 65 insertions(+), 36 deletions(-) diff --git a/modules/renter/proto/consts.go b/modules/renter/proto/consts.go index d210ee744d..cdbd623022 100644 --- a/modules/renter/proto/consts.go +++ b/modules/renter/proto/consts.go @@ -11,6 +11,10 @@ import ( const ( // contractExtension is the extension given to contract files. contractExtension = ".contract" + + // rootsDiskLoadBulkSize is the max number of roots we read from disk at + // once to avoid using up all the ram. + rootsDiskLoadBulkSize = 1024 * crypto.HashSize // 32 kib ) var ( diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index 2fdc6b9bd3..d8e7bb1ac6 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -4,7 +4,6 @@ package proto // startup. For petabytes of data this might take a long time. import ( - "bytes" "fmt" "io" @@ -54,6 +53,22 @@ type ( } ) +// parseRootsFromData takes some data and splits it up into sector roots. It will return an error if the size of the data is not a multiple of crypto.HashSize. +func parseRootsFromData(b []byte) ([]crypto.Hash, error) { + var roots []crypto.Hash + if len(b)%crypto.HashSize != 0 { + return roots, errors.New("roots have unexpected length and might be corrupted") + } + + var root crypto.Hash + for len(b) > 0 { + copy(root[:], b[:crypto.HashSize]) + roots = append(roots, root) + b = b[crypto.HashSize:] + } + return roots, nil +} + // loadExistingMerkleRoots reads creates a merkleRoots object from existing // merkle roots. func loadExistingMerkleRoots(file *fileSection) (*merkleRoots, error) { @@ -66,27 +81,26 @@ func loadExistingMerkleRoots(file *fileSection) (*merkleRoots, error) { if err != nil { return nil, err } - // Get the size of the file and read it. - var size int64 - if size, err = file.Size(); err != nil { - return nil, err - } - fileData := make([]byte, size) - if _, err := file.ReadAt(fileData, 0); err != nil { - return nil, err - } // Read the roots from the file without reading all of them at once. - r := bytes.NewBuffer(fileData) - for i := 0; i < mr.numMerkleRoots; i++ { - var root crypto.Hash - if _, err = io.ReadFull(r, root[:]); err == io.EOF { + readOff := int64(0) + rootsData := make([]byte, rootsDiskLoadBulkSize) + for { + n, err := file.ReadAt(rootsData, readOff) + if err == io.ErrUnexpectedEOF && n == 0 { break - } else if err != nil { + } + if err == io.EOF && n == 0 { + break + } + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { return nil, err } - - // Append the root to the uncachedRoots - mr.appendRootMemory(root) + roots, err := parseRootsFromData(rootsData[:n]) + if err != nil { + return nil, err + } + mr.appendRootMemory(roots...) + readOff += int64(n) } return mr, nil } @@ -122,11 +136,13 @@ func fileOffsetFromRootIndex(i int) int64 { // appendRootMemory appends a root to the in-memory structure of the merkleRoots. If // the length of the uncachedRoots grows too large they will be compressed into // a cachedSubTree. -func (mr *merkleRoots) appendRootMemory(root crypto.Hash) { - mr.uncachedRoots = append(mr.uncachedRoots, root) - if len(mr.uncachedRoots) == merkleRootsPerCache { - mr.cachedSubTrees = append(mr.cachedSubTrees, newCachedSubTree(mr.uncachedRoots)) - mr.uncachedRoots = mr.uncachedRoots[:0] +func (mr *merkleRoots) appendRootMemory(roots ...crypto.Hash) { + for _, root := range roots { + mr.uncachedRoots = append(mr.uncachedRoots, root) + if len(mr.uncachedRoots) == merkleRootsPerCache { + mr.cachedSubTrees = append(mr.cachedSubTrees, newCachedSubTree(mr.uncachedRoots)) + mr.uncachedRoots = mr.uncachedRoots[:0] + } } } @@ -336,20 +352,29 @@ func (mr *merkleRoots) merkleRoots() (roots []crypto.Hash, err error) { // merkleRootsFrom index reads all the merkle roots in range [from;to) func (mr *merkleRoots) merkleRootsFromIndexFromDisk(from, to int) ([]crypto.Hash, error) { merkleRoots := make([]crypto.Hash, 0, to-from) - // Get the size of the file and read it. - fileData := make([]byte, fileOffsetFromRootIndex(to)-fileOffsetFromRootIndex(from)) - if _, err := mr.rootsFile.ReadAt(fileData, fileOffsetFromRootIndex(from)); err != nil { - return nil, err - } - r := bytes.NewBuffer(fileData) - for i := from; to-i > 0; i++ { - var root crypto.Hash - if _, err := io.ReadFull(r, root[:]); err == io.EOF { - return nil, io.ErrUnexpectedEOF - } else if err != nil { - return merkleRoots, errors.AddContext(err, "failed to read root from disk") + remainingData := fileOffsetFromRootIndex(to) - fileOffsetFromRootIndex(from) + readOff := fileOffsetFromRootIndex(from) + var rootsData []byte + for remainingData > 0 { + if remainingData > rootsDiskLoadBulkSize { + rootsData = make([]byte, rootsDiskLoadBulkSize) + } else { + rootsData = make([]byte, remainingData) + } + n, err := mr.rootsFile.ReadAt(rootsData, readOff) + if err == io.ErrUnexpectedEOF || err == io.EOF { + return nil, errors.New("merkleRootsFromIndexFromDisk failed: roots have unexpected length") + } + if err != nil { + return nil, err + } + roots, err := parseRootsFromData(rootsData) + if err != nil { + return nil, err } - merkleRoots = append(merkleRoots, root) + merkleRoots = append(merkleRoots, roots...) + readOff += int64(n) + remainingData -= int64(n) } return merkleRoots, nil } From 99e2c82c59d68a024b84bfe9698bba23c2a14014 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 26 Apr 2018 16:00:51 -0400 Subject: [PATCH 131/212] idempotent delete --- modules/renter/proto/merkleroots.go | 90 ++++++++++++------------ modules/renter/proto/merkleroots_test.go | 28 ++++++-- 2 files changed, 69 insertions(+), 49 deletions(-) diff --git a/modules/renter/proto/merkleroots.go b/modules/renter/proto/merkleroots.go index d8e7bb1ac6..c9e8d7222a 100644 --- a/modules/renter/proto/merkleroots.go +++ b/modules/renter/proto/merkleroots.go @@ -146,51 +146,31 @@ func (mr *merkleRoots) appendRootMemory(roots ...crypto.Hash) { } } -// delete deletes the sector root at a certain index. -func (mr *merkleRoots) delete(i int) error { - // Check if the index is correct - if i >= mr.numMerkleRoots { - build.Critical("can't delete non-existing root") +// delete deletes the sector root at a certain index by replacing it with the +// last root and truncates the file to truncateSize after that. This ensures +// that the operation is indempotent. +func (mr *merkleRoots) delete(i int, lastRoot crypto.Hash, truncateSize int64) error { + // Swap the element at index i with the lastRoot. This might actually + // increase mr.numMerkleRoots since there is a chance that i points to an + // index after the end of the file. That's why the insert is executed first + // before truncating the file or decreasing the numMerkleRoots field. + if err := mr.insert(i, lastRoot); err != nil { + return errors.AddContext(err, "failed to swap deleted root with newRoot") + } + // Truncate the file to truncateSize. + if err := mr.rootsFile.Truncate(truncateSize); err != nil { + return errors.AddContext(err, "failed to truncate file") + } + // Adjust the numMerkleRoots field. If the number of roots didn't change we + // are done. + rootsBefore := mr.numMerkleRoots + mr.numMerkleRoots = int(truncateSize / crypto.HashSize) + if rootsBefore == mr.numMerkleRoots { return nil } - // If i is the index of the last element we call deleteLastRoot. - if i == mr.numMerkleRoots-1 { - return mr.deleteLastRoot() - } - // If we don't have any uncached roots we need to delete the last cached - // tree and add its elements to the uncached roots. - if len(mr.uncachedRoots) == 0 { - if err := mr.moveLastCachedSubTreeToUncached(); err != nil { - return err - } - } - // Swap the root at index i with the last root in mr.uncachedRoots. - _, err := mr.rootsFile.WriteAt(mr.uncachedRoots[len(mr.uncachedRoots)-1][:], fileOffsetFromRootIndex(i)) - if err != nil { - return errors.AddContext(err, "failed to swap root to delete with last one") - } - // If the deleted root was not cached we swap the roots in memory too. - // Otherwise we rebuild the cachedSubTree. - if index, cached := mr.isIndexCached(i); !cached { - mr.uncachedRoots[index] = mr.uncachedRoots[len(mr.uncachedRoots)-1] - } else { - err = mr.rebuildCachedTree(index) - } - if err != nil { - return errors.AddContext(err, "failed to rebuild cached tree") - } - // Now that the element we want to delete is the last root we can simply - // delete it by calling mr.deleteLastRoot. - return mr.deleteLastRoot() -} - -// deleteLastRoot deletes the last sector root of the contract. -func (mr *merkleRoots) deleteLastRoot() error { - // Decrease the numMerkleRoots counter. - mr.numMerkleRoots-- - // Truncate file to avoid interpreting trailing data as valid. - if err := mr.rootsFile.Truncate(fileOffsetFromRootIndex(mr.numMerkleRoots)); err != nil { - return errors.AddContext(err, "failed to delete last root from file") + // Sanity check the number of roots. + if rootsBefore != mr.numMerkleRoots+1 { + build.Critical("a delete should never delete more than one root at once") } // If the last element is uncached we can simply remove it from the slice. if len(mr.uncachedRoots) > 0 { @@ -209,8 +189,15 @@ func (mr *merkleRoots) deleteLastRoot() error { // insert inserts a root by replacing a root at an existing index. func (mr *merkleRoots) insert(index int, root crypto.Hash) error { - if index > mr.numMerkleRoots { - return errors.New("can't insert at a index greater than the number of roots") + // If the index does point to an offset beyond the end of the file we fill + // in the blanks with empty merkle roots. This usually just means that the + // machine crashed during the recovery process and that the next few + // updates are probably going to be delete operations that take care of the + // blank roots. + for index > mr.numMerkleRoots { + if err := mr.push(crypto.Hash{}); err != nil { + return errors.AddContext(err, "failed to extend roots") + } } if index == mr.numMerkleRoots { return mr.push(root) @@ -379,6 +366,19 @@ func (mr *merkleRoots) merkleRootsFromIndexFromDisk(from, to int) ([]crypto.Hash return merkleRoots, nil } +// prepareDelete is a helper function that returns the lastRoot and trunateSize +// arguments for a certain index to call delete with. +func (mr *merkleRoots) prepareDelete(index int) (lastRoot crypto.Hash, truncateSize int64, err error) { + roots, err := mr.merkleRootsFromIndexFromDisk(mr.numMerkleRoots-1, mr.numMerkleRoots) + if err != nil { + return crypto.Hash{}, 0, errors.AddContext(err, "failed to get last root") + } + if len(roots) != 1 { + return crypto.Hash{}, 0, fmt.Errorf("expected exactly 1 root but got %v", len(roots)) + } + return roots[0], int64((mr.numMerkleRoots - 1) * crypto.HashSize), nil +} + // rebuildCachedTree rebuilds the tree in mr.cachedSubTree at index i. func (mr *merkleRoots) rebuildCachedTree(index int) error { // Find the index of the first root of the cached tree on disk. diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index b2be9f8dc1..8ef47433e2 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -215,7 +215,11 @@ func TestDeleteLastRoot(t *testing.T) { } // Delete the last sector root. This should call deleteLastRoot internally. - if err := merkleRoots.delete(numMerkleRoots - 1); err != nil { + lastRoot, truncateSize, err := merkleRoots.prepareDelete(numMerkleRoots - 1) + if err != nil { + t.Fatal(err) + } + if err := merkleRoots.delete(numMerkleRoots-1, lastRoot, truncateSize); err != nil { t.Fatal("failed to delete last root", err) } numMerkleRoots-- @@ -239,7 +243,11 @@ func TestDeleteLastRoot(t *testing.T) { } // Delete the last sector root again. This time a cached root should be deleted too. - if err := merkleRoots.delete(numMerkleRoots - 1); err != nil { + lastRoot, truncateSize, err = merkleRoots.prepareDelete(numMerkleRoots - 1) + if err != nil { + t.Fatal(err) + } + if err := merkleRoots.delete(numMerkleRoots-1, lastRoot, truncateSize); err != nil { t.Fatal("failed to delete last root", err) } numMerkleRoots-- @@ -320,7 +328,15 @@ func TestDelete(t *testing.T) { numUncached := len(merkleRoots.uncachedRoots) cachedIndex, cached := merkleRoots.isIndexCached(deleteIndex) - if err := merkleRoots.delete(deleteIndex); err != nil { + // Call delete twice to make sure it's idempotent. + lastRoot, truncateSize, err := merkleRoots.prepareDelete(deleteIndex) + if err != nil { + t.Fatal(err) + } + if err := merkleRoots.delete(deleteIndex, lastRoot, truncateSize); err != nil { + t.Fatal("failed to delete random index", deleteIndex, err) + } + if err := merkleRoots.delete(deleteIndex, lastRoot, truncateSize); err != nil { t.Fatal("failed to delete random index", deleteIndex, err) } // Number of roots should have decreased. @@ -403,7 +419,11 @@ func TestMerkleRootsRandom(t *testing.T) { // Delete if operation == 0 { index := fastrand.Intn(merkleRoots.numMerkleRoots) - if err := merkleRoots.delete(index); err != nil { + lastRoot, truncateSize, err := merkleRoots.prepareDelete(index) + if err != nil { + t.Fatal(err) + } + if err := merkleRoots.delete(index, lastRoot, truncateSize); err != nil { t.Fatalf("failed to delete %v: %v", index, err) } continue From b8b5b9a7cf0ab5c878b4be97fbb7611a767be480 Mon Sep 17 00:00:00 2001 From: Niels Castien Date: Thu, 26 Apr 2018 22:18:31 +0200 Subject: [PATCH 132/212] Implement review comments --- cmd/siac/hostcmd.go | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/cmd/siac/hostcmd.go b/cmd/siac/hostcmd.go index 580aa484f1..a354e8bab2 100644 --- a/cmd/siac/hostcmd.go +++ b/cmd/siac/hostcmd.go @@ -398,33 +398,28 @@ func hostconfigcmd(param, value string) { // hostcontractcmd is the handler for the command `siac host contracts [type]`. func hostcontractcmd() { - switch hostContractOutputType { - case "value", "status": - break - default: - die("\"" + hostContractOutputType + "\" is not a format") - - } cg, err := httpClient.HostContractInfoGet() if err != nil { die("Could not fetch host contract info:", err) } sort.Slice(cg.Contracts, func(i, j int) bool { return cg.Contracts[i].ExpirationHeight < cg.Contracts[j].ExpirationHeight }) w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0) - if hostContractOutputType == "value" { + switch hostContractOutputType { + case "value": fmt.Fprintf(w, "Obligation Id\tObligation Status\tContract Cost\tLocked Collateral\tRisked Collateral\tPotential Revenue\tExpiration Height\tTransaction Fees\n") for _, so := range cg.Contracts { potentialRevenue := so.PotentialDownloadRevenue.Add(so.PotentialUploadRevenue).Add(so.PotentialStorageRevenue) fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%d\t%s\n", so.ObligationId, strings.TrimPrefix(so.ObligationStatus, "obligation"), currencyUnits(so.ContractCost), currencyUnits(so.LockedCollateral), currencyUnits(so.RiskedCollateral), currencyUnits(potentialRevenue), so.ExpirationHeight, currencyUnits(so.TransactionFeesAdded)) } - } - if hostContractOutputType == "status" { + case "status": fmt.Fprintf(w, "Obligation ID\tObligation Status\tExpiration Height\tOrigin Confirmed\tRevision Constructed\tRevision Confirmed\tProof Constructed\tProof Confirmed\n") for _, so := range cg.Contracts { fmt.Fprintf(w, "%s\t%s\t%d\t%t\t%t\t%t\t%t\t%t\n", so.ObligationId, strings.TrimPrefix(so.ObligationStatus, "obligation"), so.ExpirationHeight, so.OriginConfirmed, so.RevisionConstructed, so.RevisionConfirmed, so.ProofConstructed, so.ProofConfirmed) } + default: + die("\"" + hostContractOutputType + "\" is not a format") } w.Flush() } From c0a0c9e002ae6f65fc687d90f016d27154c8cab6 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 26 Apr 2018 16:27:06 -0400 Subject: [PATCH 133/212] implement review changes --- modules/renter/proto/consts.go | 4 ++++ modules/renter/proto/filesection.go | 18 +++++++----------- modules/renter/proto/merkleroots_test.go | 7 ++++++- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/modules/renter/proto/consts.go b/modules/renter/proto/consts.go index cdbd623022..cd5e99e185 100644 --- a/modules/renter/proto/consts.go +++ b/modules/renter/proto/consts.go @@ -15,6 +15,10 @@ const ( // rootsDiskLoadBulkSize is the max number of roots we read from disk at // once to avoid using up all the ram. rootsDiskLoadBulkSize = 1024 * crypto.HashSize // 32 kib + + // remainingFile is a constant used to indicate that a fileSection can access + // the whole remaining file instead of being boung to a certain end offset. + remainingFile = -1 ) var ( diff --git a/modules/renter/proto/filesection.go b/modules/renter/proto/filesection.go index c6d5becfd7..04edeb20e1 100644 --- a/modules/renter/proto/filesection.go +++ b/modules/renter/proto/filesection.go @@ -19,7 +19,7 @@ func newFileSection(f *os.File, start, end int64) *fileSection { if start < 0 { panic("filesection can't start at an index < 0") } - if end < start && end != -1 { + if end < start && end != remainingFile { panic("the end of a filesection can't be before the start") } return &fileSection{ @@ -44,7 +44,7 @@ func (f *fileSection) Size() (int64, error) { if size < 0 { size = 0 } - if size > f.end-f.start && f.end != -1 { + if size > f.end-f.start && f.end != remainingFile { size = f.end - f.start } return size, nil @@ -56,7 +56,7 @@ func (f *fileSection) ReadAt(b []byte, off int64) (int, error) { if off < 0 { panic("can't read from an offset before the section start") } - if f.start+off+int64(len(b)) > f.end && f.end != -1 { + if f.start+off+int64(len(b)) > f.end && f.end != remainingFile { panic("can't read from an offset after the section end") } return f.f.ReadAt(b, f.start+off) @@ -69,17 +69,13 @@ func (f *fileSection) Sync() error { // Truncate calls Truncate on the fileSection's underlying file. func (f *fileSection) Truncate(size int64) error { - currentSize, err := f.Size() - if err != nil { - return err - } - if currentSize == f.end-f.start && f.end != -1 { - panic("can't shrink section that has reached its max size unless it has no end boundary") + if f.end != remainingFile { + panic("can't truncate a file that has a end != remainingFile") } if f.start+size < f.start { panic("can't truncate file to be smaller than the section start") } - if f.start+size > f.end && f.end != -1 { + if f.start+size > f.end && f.end != remainingFile { panic("can't truncate file to be bigger than the section") } return f.f.Truncate(f.start + size) @@ -91,7 +87,7 @@ func (f *fileSection) WriteAt(b []byte, off int64) (int, error) { if off < 0 { panic("can't read from an offset before the section start") } - if f.start+off+int64(len(b)) > f.end && f.end != -1 { + if f.start+off+int64(len(b)) > f.end && f.end != remainingFile { panic("can't read from an offset after the section end") } return f.f.WriteAt(b, off+f.start) diff --git a/modules/renter/proto/merkleroots_test.go b/modules/renter/proto/merkleroots_test.go index 8ef47433e2..180a44c4fa 100644 --- a/modules/renter/proto/merkleroots_test.go +++ b/modules/renter/proto/merkleroots_test.go @@ -139,8 +139,13 @@ func TestInsertMerkleRoot(t *testing.T) { // Replace the last root with a new hash. It shouldn't be cached and // therefore no cached tree needs to be updated. newHash := crypto.Hash{} + insertIndex := merkleRoots.len() - 1 copy(newHash[:], fastrand.Bytes(crypto.HashSize)[:]) - if err := merkleRoots.insert(merkleRoots.len()-1, newHash); err != nil { + if err := merkleRoots.insert(insertIndex, newHash); err != nil { + t.Fatal("failed to insert root", err) + } + // Insert again at the same index to make sure insert is idempotent. + if err := merkleRoots.insert(insertIndex, newHash); err != nil { t.Fatal("failed to insert root", err) } // Check if the last root matches the new hash. From d12fefefb961b5544094d630ffe1589ec2ce266e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 27 Apr 2018 10:50:52 -0400 Subject: [PATCH 134/212] fix typo, use constant, remove unreachable block --- modules/renter/proto/consts.go | 2 +- modules/renter/proto/contract.go | 2 +- modules/renter/proto/filesection.go | 3 --- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/modules/renter/proto/consts.go b/modules/renter/proto/consts.go index cd5e99e185..8dbfe25488 100644 --- a/modules/renter/proto/consts.go +++ b/modules/renter/proto/consts.go @@ -17,7 +17,7 @@ const ( rootsDiskLoadBulkSize = 1024 * crypto.HashSize // 32 kib // remainingFile is a constant used to indicate that a fileSection can access - // the whole remaining file instead of being boung to a certain end offset. + // the whole remaining file instead of being bound to a certain end offset. remainingFile = -1 ) diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index 17a36e6f70..c95d76549b 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -436,7 +436,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo return err } headerSection := newFileSection(f, 0, contractHeaderSize) - rootsSection := newFileSection(f, contractHeaderSize+1, -1) + rootsSection := newFileSection(f, contractHeaderSize+1, remainingFile) // read header var header contractHeader diff --git a/modules/renter/proto/filesection.go b/modules/renter/proto/filesection.go index 04edeb20e1..bf28a263c9 100644 --- a/modules/renter/proto/filesection.go +++ b/modules/renter/proto/filesection.go @@ -75,9 +75,6 @@ func (f *fileSection) Truncate(size int64) error { if f.start+size < f.start { panic("can't truncate file to be smaller than the section start") } - if f.start+size > f.end && f.end != remainingFile { - panic("can't truncate file to be bigger than the section") - } return f.f.Truncate(f.start + size) } From 0bef2d6928de63a98dbe5e2d1ce0825d9091e24d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 27 Apr 2018 12:58:08 -0400 Subject: [PATCH 135/212] add testing or the fileSection --- modules/renter/proto/contract.go | 4 +- modules/renter/proto/filesection_test.go | 314 +++++++++++++++++++++++ 2 files changed, 316 insertions(+), 2 deletions(-) create mode 100644 modules/renter/proto/filesection_test.go diff --git a/modules/renter/proto/contract.go b/modules/renter/proto/contract.go index c95d76549b..727ba272bb 100644 --- a/modules/renter/proto/contract.go +++ b/modules/renter/proto/contract.go @@ -401,7 +401,7 @@ func (cs *ContractSet) managedInsertContract(h contractHeader, roots []crypto.Ha } // create fileSections headerSection := newFileSection(f, 0, contractHeaderSize) - rootsSection := newFileSection(f, contractHeaderSize+1, -1) + rootsSection := newFileSection(f, contractHeaderSize, -1) // write header if _, err := headerSection.WriteAt(encoding.Marshal(h), 0); err != nil { return modules.RenterContract{}, err @@ -436,7 +436,7 @@ func (cs *ContractSet) loadSafeContract(filename string, walTxns []*writeaheadlo return err } headerSection := newFileSection(f, 0, contractHeaderSize) - rootsSection := newFileSection(f, contractHeaderSize+1, remainingFile) + rootsSection := newFileSection(f, contractHeaderSize, remainingFile) // read header var header contractHeader diff --git a/modules/renter/proto/filesection_test.go b/modules/renter/proto/filesection_test.go new file mode 100644 index 0000000000..ba22a72cdf --- /dev/null +++ b/modules/renter/proto/filesection_test.go @@ -0,0 +1,314 @@ +package proto + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/NebulousLabs/Sia/build" + "github.com/NebulousLabs/fastrand" +) + +// SafeReadAt is a wrapper for ReadAt that recovers from a potential panic and +// returns it as an error. +func (f *fileSection) SafeReadAt(b []byte, off int64) (n int, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + return f.ReadAt(b, off) +} + +// SafeTruncate is a wrapper for Truncate that recovers from a potential panic +// and returns it as an error. +func (f *fileSection) SafeTruncate(size int64) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + return f.Truncate(size) +} + +// SafeWriteAt is a wrapper for WriteAt that recovers from a potential panic +// and returns it as an error. +func (f *fileSection) SafeWriteAt(b []byte, off int64) (n int, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + return f.WriteAt(b, off) +} + +// TestFileSectionBoundariesValidReadWrites uses valid read and write +// operations on the fileSection to make sure that the data is written to and +// read from the section correctly without corrupting other sections. +func TestFileSectionBoundariesValidReadWrites(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + testDir := build.TempDir(t.Name()) + if err := os.MkdirAll(testDir, 0700); err != nil { + t.Fatal(err) + } + testFile, err := os.Create(filepath.Join(testDir, "testfile.dat")) + if err != nil { + t.Fatal(err) + } + + // Create 3 sections. + s1Size := 100 + s2Size := 100 + s1 := newFileSection(testFile, 0, int64(s1Size)) + s2 := newFileSection(testFile, int64(s1Size), int64(s1Size+s2Size)) + s3 := newFileSection(testFile, int64(s1Size+s2Size), remainingFile) + + // Write as much data to the sections as they can fit. + s1Data := fastrand.Bytes(s1Size) + s2Data := fastrand.Bytes(s2Size) + s3Data := fastrand.Bytes(s2Size) // s3 has an infinite size so we just write s2Size bytes. + + n, err := s1.SafeWriteAt(s1Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s1Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s1Data), n) + } + n, err = s2.SafeWriteAt(s2Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s2Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s2Data), n) + } + n, err = s3.SafeWriteAt(s3Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s3Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s3Data), n) + } + + // Read the written data from the file and check if it matches. + readS1Data := make([]byte, len(s1Data)) + readS2Data := make([]byte, len(s2Data)) + readS3Data := make([]byte, len(s3Data)) + _, err = s1.SafeReadAt(readS1Data, 0) + if err != nil { + t.Fatal(err) + } + _, err = s2.SafeReadAt(readS2Data, 0) + if err != nil { + t.Fatal(err) + } + _, err = s3.SafeReadAt(readS3Data, 0) + if err != nil { + t.Fatal(err) + } + fi, err := testFile.Stat() + if err != nil { + t.Fatal(err) + } + size := fi.Size() + size1, err := s1.Size() + if err != nil { + t.Fatal(err) + } + size2, err := s2.Size() + if err != nil { + t.Fatal(err) + } + size3, err := s3.Size() + if err != nil { + t.Fatal(err) + } + if size1 != int64(s1Size) { + t.Fatalf("expected size to be %v but was %v", s1Size, size1) + } + if size2 != int64(s2Size) { + t.Fatalf("expected size to be %v but was %v", s2Size, size2) + } + if size3 != int64(s2Size) { + t.Fatalf("expected size to be %v but was %v", s2Size, size3) + } + if size != size1+size2+size3 { + t.Fatalf("total size should be %v but was %v", size, size1+size2+size3) + } + + if !bytes.Equal(s1Data, readS1Data) { + t.Fatal("the read data doesn't match the written data") + } + if !bytes.Equal(s2Data, readS2Data) { + t.Fatal("the read data doesn't match the written data") + } + if !bytes.Equal(s3Data, readS3Data) { + t.Fatal("the read data doesn't match the written data") + } + + // Read the written data directly from the underlying file and check again. + _, err = testFile.ReadAt(readS1Data, 0) + if err != nil { + t.Fatal(err) + } + _, err = testFile.ReadAt(readS2Data, int64(s1Size)) + if err != nil { + t.Fatal(err) + } + _, err = testFile.ReadAt(readS3Data, int64(s1Size+s2Size)) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(s1Data, readS1Data) { + t.Fatal("the read data doesn't match the written data") + } + if !bytes.Equal(s2Data, readS2Data) { + t.Fatal("the read data doesn't match the written data") + } + if !bytes.Equal(s3Data, readS3Data) { + t.Fatal("the read data doesn't match the written data") + } +} + +// TestFileSectionBoundariesInvalidReadWrites tries a variation of invalid read +// and write operations on the section to make sure the caller can't write to +// neighboring sections by accident. +func TestFileSectionBoundariesInvalidReadWrites(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + testDir := build.TempDir(t.Name()) + if err := os.MkdirAll(testDir, 0700); err != nil { + t.Fatal(err) + } + testFile, err := os.Create(filepath.Join(testDir, "testfile.dat")) + if err != nil { + t.Fatal(err) + } + + // Create 3 sections. + s1Size := 100 + s2Size := 100 + s1 := newFileSection(testFile, 0, int64(s1Size)) + s2 := newFileSection(testFile, int64(s1Size), int64(s1Size+s2Size)) + + // Fill the file with some random data + randomData := fastrand.Bytes(s1Size + s2Size + 100) + if _, err := testFile.WriteAt(randomData, 0); err != nil { + t.Fatal(err) + } + // Create some random data for the following calls to write. That data + // should never be written since all the calls should fail. + data := fastrand.Bytes(1) + + // Try a number of invalid read and write operations. They should all fail. + if _, err := s1.SafeWriteAt(data, int64(s1Size)); err == nil { + t.Fatal("sector shouldn't be able to write data beyond its end boundary") + } + if _, err := s2.SafeWriteAt(data, -1); err == nil { + t.Fatal("sector shouldn't be able to write data below its start boundary") + } + if _, err := s1.SafeReadAt(data, int64(s1Size)); err == nil { + t.Fatal("sector shouldn't be able to read data beyond its end boundary") + } + if _, err := s2.SafeReadAt(data, -1); err == nil { + t.Fatal("sector shouldn't be able to read data below its start boundary") + } + + // The file should still have the same random data from the beginning. + fileData, err := ioutil.ReadAll(testFile) + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fileData, randomData) { + t.Fatal("file data doesn't match the initial data") + } +} + +// TestFileSectionTruncate checks if file sections without an open end boundary +// can be truncated and makes sure that the last section can't truncate the +// file below its boundary. +func TestFileSectionTruncate(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + testDir := build.TempDir(t.Name()) + if err := os.MkdirAll(testDir, 0700); err != nil { + t.Fatal(err) + } + testFile, err := os.Create(filepath.Join(testDir, "testfile.dat")) + if err != nil { + t.Fatal(err) + } + + // Create 3 sections. + s1Size := 100 + s2Size := 100 + s1 := newFileSection(testFile, 0, int64(s1Size)) + s2 := newFileSection(testFile, int64(s1Size), int64(s1Size+s2Size)) + s3 := newFileSection(testFile, int64(s1Size+s2Size), remainingFile) + + // Write as much data to the sections as they can fit. + s1Data := fastrand.Bytes(s1Size) + s2Data := fastrand.Bytes(s2Size) + s3Data := fastrand.Bytes(s2Size) // s3 has an infinite size so we just write s2Size bytes. + + n, err := s1.SafeWriteAt(s1Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s1Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s1Data), n) + } + n, err = s2.SafeWriteAt(s2Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s2Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s2Data), n) + } + n, err = s3.SafeWriteAt(s3Data, 0) + if err != nil { + t.Fatal(err) + } + if n != len(s3Data) { + t.Fatalf("expected %v bytes to be written instead of %v", len(s3Data), n) + } + + // Try to truncate s1 and s2. That shouldn't be possible. + if err := s1.SafeTruncate(int64(fastrand.Intn(s1Size + 1))); err == nil { + t.Fatal("it shouldn't be possible to truncate a section with a fixed end boundary.") + } + if err := s2.SafeTruncate(int64(fastrand.Intn(s2Size + 1))); err == nil { + t.Fatal("it shouldn't be possible to truncate a section with a fixed end boundary.") + } + + // Try to truncate s3 to size 0. This should be possible and also reduce + // the total file size. + if err := s3.SafeTruncate(0); err != nil { + t.Fatal("failed to truncate s3", err) + } + fi, err := testFile.Stat() + if err != nil { + t.Fatal(err) + } + if fi.Size() != int64(s1Size+s2Size) { + t.Fatalf("expected size after truncate is %v but was %v", s1Size+s2Size, fi.Size()) + } + size, err := s3.Size() + if err != nil { + t.Fatal(err) + } + if size != 0 { + t.Fatalf("size was %v but should be %v", size, 0) + } + // Try to truncate s3 below its start. That shouldn't be possible. + if err := s3.SafeTruncate(-1); err == nil { + t.Fatal("it shouldn't be possible to truncate a section to a negative size") + } +} From 98424a0abfb045c29b2e6b20d9d9941e90b6fe08 Mon Sep 17 00:00:00 2001 From: lukechampine Date: Sun, 29 Apr 2018 19:16:33 -0400 Subject: [PATCH 136/212] add UnlockHash.Scan method --- types/encoding.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/types/encoding.go b/types/encoding.go index 045b4063e6..fdc229bab6 100644 --- a/types/encoding.go +++ b/types/encoding.go @@ -1158,3 +1158,14 @@ func (uh *UnlockHash) LoadString(strUH string) error { copy(uh[:], byteUnlockHash[:]) return nil } + +// Scan implements the fmt.Scanner interface, allowing UnlockHash values to be +// scanned from text. +func (uh *UnlockHash) Scan(s fmt.ScanState, ch rune) error { + s.SkipSpace() + tok, err := s.Token(false, nil) + if err != nil { + return err + } + return uh.LoadString(string(tok)) +} From 093cae50a030efbab10a889d5b6186a673d6f309 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 11:17:58 -0400 Subject: [PATCH 137/212] add test --- types/encoding_test.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/types/encoding_test.go b/types/encoding_test.go index bf9765deba..d900ecc230 100644 --- a/types/encoding_test.go +++ b/types/encoding_test.go @@ -718,3 +718,20 @@ func TestTransactionMarshalSiaSize(t *testing.T) { t.Errorf("sizes do not match: expected %v, got %v", len(encoding.Marshal(txn)), txn.MarshalSiaSize()) } } + +// TestUnlockHashScan checks if the fmt.Scanner implementation of UnlockHash +// works as expected. +func TestUnlockHashScan(t *testing.T) { + // Create a random unlock hash. + uh := UnlockHash{} + copy(uh[:], fastrand.Bytes(crypto.HashSize)) + // Convert it to a string. + uhStr := uh.String() + // Scan the hash from the string. + scannedHash := UnlockHash{} + fmt.Sscan(uhStr, &scannedHash) + // Check if they are equal. + if !bytes.Equal(uh[:], scannedHash[:]) { + t.Fatal("scanned hash is not equal to original hash") + } +} From 25fcd94898975db0fea1616cb007bc77dd4fe7c4 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 12:36:40 -0400 Subject: [PATCH 138/212] make test more idiomatic --- types/encoding_test.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/types/encoding_test.go b/types/encoding_test.go index d900ecc230..19069b2f3d 100644 --- a/types/encoding_test.go +++ b/types/encoding_test.go @@ -723,13 +723,11 @@ func TestTransactionMarshalSiaSize(t *testing.T) { // works as expected. func TestUnlockHashScan(t *testing.T) { // Create a random unlock hash. - uh := UnlockHash{} - copy(uh[:], fastrand.Bytes(crypto.HashSize)) - // Convert it to a string. - uhStr := uh.String() - // Scan the hash from the string. - scannedHash := UnlockHash{} - fmt.Sscan(uhStr, &scannedHash) + var uh UnlockHash + fastrand.Read(uh[:]) + // Convert it to a string and parse the string using Sscan. + var scannedHash UnlockHash + fmt.Sscan(uh.String(), &scannedHash) // Check if they are equal. if !bytes.Equal(uh[:], scannedHash[:]) { t.Fatal("scanned hash is not equal to original hash") From 3b688547356d525e3725e34297d00fe200299a5c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 30 Apr 2018 18:03:53 -0400 Subject: [PATCH 139/212] Improve BucketProcessedTransactions usage --- modules/wallet/database.go | 24 +++++++++++++++++++----- modules/wallet/update.go | 11 ++++++++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index 1eba37d439..eb4340b8fe 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -295,6 +295,9 @@ func decodeProcessedTransaction(ptBytes []byte, pt *modules.ProcessedTransaction return err } +func dbDeleteTransactionIndex(tx *bolt.Tx, txid types.TransactionID) error { + return dbDelete(tx.Bucket(bucketProcessedTxnIndex), txid) +} func dbPutTransactionIndex(tx *bolt.Tx, txid types.TransactionID, key []byte) error { return dbPut(tx.Bucket(bucketProcessedTxnIndex), txid, key) } @@ -346,18 +349,29 @@ func dbAppendProcessedTransaction(tx *bolt.Tx, pt modules.ProcessedTransaction) } func dbGetLastProcessedTransaction(tx *bolt.Tx) (pt modules.ProcessedTransaction, err error) { - _, val := tx.Bucket(bucketProcessedTransactions).Cursor().Last() + seq := tx.Bucket(bucketProcessedTransactions).Sequence() + keyBytes := make([]byte, 8) + binary.BigEndian.PutUint64(keyBytes, seq) + val := tx.Bucket(bucketProcessedTransactions).Get(keyBytes) err = decodeProcessedTransaction(val, &pt) return } func dbDeleteLastProcessedTransaction(tx *bolt.Tx) error { - // delete the last entry in the bucket. Note that we don't need to - // decrement the sequence integer; we only care that the next integer is - // larger than the previous one. + // Get the last processed txn. + pt, err := dbGetLastProcessedTransaction(tx) + if err != nil { + return errors.New("can't delete from empty bucket") + } + // Delete its txid from the index bucket. + if err := dbDeleteTransactionIndex(tx, pt.TransactionID); err != nil { + return errors.AddContext(err, "couldn't delete txn index") + } + // Delete the last processed txn and decrement the sequence. b := tx.Bucket(bucketProcessedTransactions) + seq := b.Sequence() key, _ := b.Cursor().Last() - return b.Delete(key) + return errors.Compose(b.SetSequence(seq-1), b.Delete(key)) } func dbGetProcessedTransaction(tx *bolt.Tx, index uint64) (pt modules.ProcessedTransaction, err error) { diff --git a/modules/wallet/update.go b/modules/wallet/update.go index 9b7b6e93d6..3531591a1d 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -3,6 +3,7 @@ package wallet import ( "math" + "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/errors" @@ -169,6 +170,7 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { txid := block.Transactions[i].ID() pt, err := dbGetLastProcessedTransaction(tx) if err != nil { + build.Critical("can't revert transaction because the bucket is empty") break // bucket is empty } if txid == pt.TransactionID { @@ -182,7 +184,14 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { // Remove the miner payout transaction if applicable. for i, mp := range block.MinerPayouts { - if w.isWalletAddress(mp.UnlockHash) { + // If the transaction is relevant to the wallet, it will be the + // most recent transaction in bucketProcessedTransactions. + pt, err := dbGetLastProcessedTransaction(tx) + if err != nil { + build.Critical("can't revert miner payout because the bucket is empty") + break // bucket is empty + } + if types.TransactionID(block.ID()) == pt.TransactionID { w.log.Println("Miner payout has been reverted due to a reorg:", block.MinerPayoutID(uint64(i)), "::", mp.Value.HumanString()) if err := dbDeleteLastProcessedTransaction(tx); err != nil { w.log.Severe("Could not revert transaction:", err) From baef80d5a43240ebc3834b24c05d0190a5907c81 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 11:10:44 -0400 Subject: [PATCH 140/212] remove sanity check --- modules/wallet/update.go | 3 - siatest/wallet/wallet.go | 1 + siatest/wallet/wallet_test.go | 118 ++++++++++++++++++++++++++++++++++ 3 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 siatest/wallet/wallet.go create mode 100644 siatest/wallet/wallet_test.go diff --git a/modules/wallet/update.go b/modules/wallet/update.go index 3531591a1d..f870ecc636 100644 --- a/modules/wallet/update.go +++ b/modules/wallet/update.go @@ -3,7 +3,6 @@ package wallet import ( "math" - "github.com/NebulousLabs/Sia/build" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/errors" @@ -170,7 +169,6 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { txid := block.Transactions[i].ID() pt, err := dbGetLastProcessedTransaction(tx) if err != nil { - build.Critical("can't revert transaction because the bucket is empty") break // bucket is empty } if txid == pt.TransactionID { @@ -188,7 +186,6 @@ func (w *Wallet) revertHistory(tx *bolt.Tx, reverted []types.Block) error { // most recent transaction in bucketProcessedTransactions. pt, err := dbGetLastProcessedTransaction(tx) if err != nil { - build.Critical("can't revert miner payout because the bucket is empty") break // bucket is empty } if types.TransactionID(block.ID()) == pt.TransactionID { diff --git a/siatest/wallet/wallet.go b/siatest/wallet/wallet.go new file mode 100644 index 0000000000..23a7507327 --- /dev/null +++ b/siatest/wallet/wallet.go @@ -0,0 +1 @@ +package wallet diff --git a/siatest/wallet/wallet_test.go b/siatest/wallet/wallet_test.go new file mode 100644 index 0000000000..427c96f927 --- /dev/null +++ b/siatest/wallet/wallet_test.go @@ -0,0 +1,118 @@ +package wallet + +import ( + "errors" + "path/filepath" + "testing" + "time" + + "github.com/NebulousLabs/Sia/build" + "github.com/NebulousLabs/Sia/siatest" + "github.com/NebulousLabs/Sia/types" +) + +func TestTransactionReorg(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + + // Create 2 miners + testdir, err := siatest.TestDir(t.Name()) + if err != nil { + t.Fatal(err) + } + + // Create two miners + miner1, err := siatest.NewNode(siatest.Miner(filepath.Join(testdir, "miner1"))) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := miner1.Close(); err != nil { + t.Fatal(err) + } + }() + // miner1 sends a txn to itself and mines it. + uc, err := miner1.WalletAddressGet() + if err != nil { + t.Fatal(err) + } + wsp, err := miner1.WalletSiacoinsPost(types.SiacoinPrecision, uc.Address) + if err != nil { + t.Fatal(err) + } + println("\nTransactions sent") + for _, tid := range wsp.TransactionIDs { + println(" ", tid.String()) + } + println("") + blocks := 1 + for i := 0; i < blocks; i++ { + if err := miner1.MineBlock(); err != nil { + t.Fatal(err) + } + } + // wait until the transaction from before shows up as processed. + txn := wsp.TransactionIDs[len(wsp.TransactionIDs)-1] + err = build.Retry(100, 100*time.Millisecond, func() error { + cg, err := miner1.ConsensusGet() + if err != nil { + return err + } + wtg, err := miner1.WalletTransactionsGet(1, cg.Height) + if err != nil { + return err + } + for _, t := range wtg.ConfirmedTransactions { + if t.TransactionID == txn { + return nil + } + } + return errors.New("txn isn't processed yet") + }) + if err != nil { + t.Fatal(err) + } + println("\ncreate node 2") + miner2, err := siatest.NewNode(siatest.Miner(filepath.Join(testdir, "miner2"))) + if err != nil { + t.Fatal(err) + } + defer func() { + if err := miner2.Close(); err != nil { + t.Fatal(err) + } + }() + + // miner2 mines 2 blocks now to create a longer chain than miner1. + for i := 0; i < blocks+1; i++ { + if err := miner2.MineBlock(); err != nil { + t.Fatal(err) + } + } + // miner1 and miner2 connect. This should cause a reorg that reverts the + // transaction from before. + println("\nconnecting") + if err := miner1.GatewayConnectPost(miner2.GatewayAddress()); err != nil { + t.Fatal(err) + } + err = build.Retry(100, 100*time.Millisecond, func() error { + cg, err := miner1.ConsensusGet() + if err != nil { + return err + } + wtg, err := miner1.WalletTransactionsGet(1, cg.Height) + if err != nil { + return err + } + for _, t := range wtg.ConfirmedTransactions { + if t.TransactionID == txn { + return errors.New("txn is still processed") + } + } + return nil + }) + if err != nil { + t.Fatal(err) + } +} From 03a260f34c53be248a00c9715517eedd025e5c76 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 17:02:21 -0400 Subject: [PATCH 141/212] remove leftover printlns and cursor.last call --- modules/wallet/database.go | 5 +++-- siatest/wallet/wallet_test.go | 10 ++-------- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/modules/wallet/database.go b/modules/wallet/database.go index eb4340b8fe..ea8a01ce42 100644 --- a/modules/wallet/database.go +++ b/modules/wallet/database.go @@ -370,8 +370,9 @@ func dbDeleteLastProcessedTransaction(tx *bolt.Tx) error { // Delete the last processed txn and decrement the sequence. b := tx.Bucket(bucketProcessedTransactions) seq := b.Sequence() - key, _ := b.Cursor().Last() - return errors.Compose(b.SetSequence(seq-1), b.Delete(key)) + keyBytes := make([]byte, 8) + binary.BigEndian.PutUint64(keyBytes, seq) + return errors.Compose(b.SetSequence(seq-1), b.Delete(keyBytes)) } func dbGetProcessedTransaction(tx *bolt.Tx, index uint64) (pt modules.ProcessedTransaction, err error) { diff --git a/siatest/wallet/wallet_test.go b/siatest/wallet/wallet_test.go index 427c96f927..7b31a20176 100644 --- a/siatest/wallet/wallet_test.go +++ b/siatest/wallet/wallet_test.go @@ -11,12 +11,13 @@ import ( "github.com/NebulousLabs/Sia/types" ) +// TestTransactionReorg makes sure that a processedTransaction isn't returned +// by the API after bein reverted. func TestTransactionReorg(t *testing.T) { if testing.Short() { t.SkipNow() } - // Create 2 miners testdir, err := siatest.TestDir(t.Name()) if err != nil { t.Fatal(err) @@ -41,11 +42,6 @@ func TestTransactionReorg(t *testing.T) { if err != nil { t.Fatal(err) } - println("\nTransactions sent") - for _, tid := range wsp.TransactionIDs { - println(" ", tid.String()) - } - println("") blocks := 1 for i := 0; i < blocks; i++ { if err := miner1.MineBlock(); err != nil { @@ -73,7 +69,6 @@ func TestTransactionReorg(t *testing.T) { if err != nil { t.Fatal(err) } - println("\ncreate node 2") miner2, err := siatest.NewNode(siatest.Miner(filepath.Join(testdir, "miner2"))) if err != nil { t.Fatal(err) @@ -92,7 +87,6 @@ func TestTransactionReorg(t *testing.T) { } // miner1 and miner2 connect. This should cause a reorg that reverts the // transaction from before. - println("\nconnecting") if err := miner1.GatewayConnectPost(miner2.GatewayAddress()); err != nil { t.Fatal(err) } From f699ec909df05c8c5ad651ace6660a1e15c3861f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 14:17:39 -0400 Subject: [PATCH 142/212] add scans to queue in random order --- modules/renter/hostdb/scan.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 2b23299a0a..e5454e44fa 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -15,18 +15,24 @@ import ( "github.com/NebulousLabs/fastrand" ) -// queueScan will add a host to the queue to be scanned. +// queueScan will add a host to the queue to be scanned. The host will be added +// at a random position which means that the order in which queueScan is called +// is not necessarily the order in which the hosts get scanned. That guarantees +// a random scan order during the initial scan. func (hdb *HostDB) queueScan(entry modules.HostDBEntry) { // If this entry is already in the scan pool, can return immediately. _, exists := hdb.scanMap[entry.PublicKey.String()] if exists { return } - - // Add the entry to a waitlist, then check if any thread is currently - // emptying the waitlist. If not, spawn a thread to empty the waitlist. + // Add the entry to a random position in the waitlist. hdb.scanMap[entry.PublicKey.String()] = struct{}{} hdb.scanList = append(hdb.scanList, entry) + i := len(hdb.scanList) - 1 + j := fastrand.Intn(i) + hdb.scanList[i], hdb.scanList[j] = hdb.scanList[j], hdb.scanList[i] + // Check if any thread is currently emptying the waitlist. If not, spawn a + // thread to empty the waitlist. if hdb.scanWait { // Another thread is emptying the scan list, nothing to worry about. return From 8fb3485d0172024338336e06ed2f00112ee77333 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Fri, 13 Apr 2018 15:17:35 -0400 Subject: [PATCH 143/212] Speedup initial host scan --- modules/renter/hostdb/consts.go | 6 ++++++ modules/renter/hostdb/hostdb.go | 12 +++++++----- modules/renter/hostdb/scan.go | 31 ++++++++++++++++++++++++++++--- 3 files changed, 41 insertions(+), 8 deletions(-) diff --git a/modules/renter/hostdb/consts.go b/modules/renter/hostdb/consts.go index 054bb93d03..647ebea559 100644 --- a/modules/renter/hostdb/consts.go +++ b/modules/renter/hostdb/consts.go @@ -34,6 +34,12 @@ const ( // scans start getting compressed. minScans = 12 + // minScansForSpeedup is the number of successful scan that needs to be + // completed before the dial up timeout for scans is reduced. This ensures + // that we have a sufficient sample size of scans for estimating the worst + // case timeout. + minScansForSpeedup = 100 + // recentInteractionWeightLimit caps the number of recent interactions as a // percentage of the historic interactions, to be certain that a large // amount of activity in a short period of time does not overwhelm the diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index 62c56cc717..f73ef2b179 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -10,6 +10,7 @@ import ( "os" "path/filepath" "sync" + "time" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter/hostdb/hosttree" @@ -48,11 +49,12 @@ type HostDB struct { // handful of goroutines constantly waiting on the channel for hosts to // scan. The scan map is used to prevent duplicates from entering the scan // pool. - initialScanComplete bool - scanList []modules.HostDBEntry - scanMap map[string]struct{} - scanWait bool - scanningThreads int + successfulScans uint64 + initialScanTimeout time.Duration + scanList []modules.HostDBEntry + scanMap map[string]struct{} + scanWait bool + scanningThreads int blockHeight types.BlockHeight lastChange modules.ConsensusChangeID diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index e5454e44fa..3464cea803 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -253,12 +253,25 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { hdb.mu.RUnlock() var settings modules.HostExternalSettings + var latency time.Duration err := func() error { + // During the initial scan we choose a shorter timeout once we have + // scanned a certain number of hosts successfully to finish the scan + // more quickly. + timeout := hostRequestTimeout + hdb.mu.RLock() + if !hdb.initialScanComplete && hdb.successfulScans > minScansForSpeedup && hdb.initialScanTimeout < hostRequestTimeout { + timeout = hdb.initialScanTimeout + } + hdb.mu.RUnlock() + dialer := &net.Dialer{ Cancel: hdb.tg.StopChan(), - Timeout: hostRequestTimeout, + Timeout: timeout, } + start := time.Now() conn, err := dialer.Dial("tcp", string(netAddr)) + latency = time.Since(start) if err != nil { return err } @@ -281,19 +294,31 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { copy(pubkey[:], pubKey.Key) return crypto.ReadSignedObject(conn, &settings, maxSettingsLen, pubkey) }() + success := false if err != nil { hdb.log.Debugf("Scan of host at %v failed: %v", netAddr, err) } else { hdb.log.Debugf("Scan of host at %v succeeded.", netAddr) entry.HostExternalSettings = settings + success = true } + hdb.mu.Lock() + defer hdb.mu.Unlock() // Update the host tree to have a new entry, including the new error. Then // delete the entry from the scan map as the scan has been successful. - hdb.mu.Lock() hdb.updateEntry(entry, err) - hdb.mu.Unlock() + + // Update the initial scan timeout if the scan was successful and if the + // latency for this scan was greater than the current timeout. We want to + // find the worst case latency. + if success { + hdb.successfulScans++ + if latency > hdb.initialScanTimeout { + hdb.initialScanTimeout = 2 * latency + } + } } // waitForScans is a helper function that blocks until the hostDB's scanList is From b0175d82b4e66d56dd0fa69150d4e08d6d1bc3a7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 26 Apr 2018 18:25:55 -0400 Subject: [PATCH 144/212] fix queueScan --- modules/renter/hostdb/scan.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 3464cea803..de0d1b5236 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -28,9 +28,11 @@ func (hdb *HostDB) queueScan(entry modules.HostDBEntry) { // Add the entry to a random position in the waitlist. hdb.scanMap[entry.PublicKey.String()] = struct{}{} hdb.scanList = append(hdb.scanList, entry) - i := len(hdb.scanList) - 1 - j := fastrand.Intn(i) - hdb.scanList[i], hdb.scanList[j] = hdb.scanList[j], hdb.scanList[i] + if len(hdb.scanList) > 1 { + i := len(hdb.scanList) - 1 + j := fastrand.Intn(i) + hdb.scanList[i], hdb.scanList[j] = hdb.scanList[j], hdb.scanList[i] + } // Check if any thread is currently emptying the waitlist. If not, spawn a // thread to empty the waitlist. if hdb.scanWait { From 12c638ea32c67a07ca251c179d45fc02796aa65f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 11:24:47 -0400 Subject: [PATCH 145/212] add comment --- modules/renter/hostdb/scan.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index de0d1b5236..627f4f08c5 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -317,6 +317,9 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { // find the worst case latency. if success { hdb.successfulScans++ + // If the latency is greater than the initialScanTimeout we use twice + // the latency for the new timeout. This gives us a little more wiggle + // room for outliers in case most hosts have a similar latency. if latency > hdb.initialScanTimeout { hdb.initialScanTimeout = 2 * latency } From f9f57ddca3e804a3005824a118ad037da4c1bd34 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 13:41:53 -0400 Subject: [PATCH 146/212] reduce minScansForSpeedup to 25 --- modules/renter/hostdb/consts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/hostdb/consts.go b/modules/renter/hostdb/consts.go index 647ebea559..8cac990192 100644 --- a/modules/renter/hostdb/consts.go +++ b/modules/renter/hostdb/consts.go @@ -38,7 +38,7 @@ const ( // completed before the dial up timeout for scans is reduced. This ensures // that we have a sufficient sample size of scans for estimating the worst // case timeout. - minScansForSpeedup = 100 + minScansForSpeedup = 25 // recentInteractionWeightLimit caps the number of recent interactions as a // percentage of the historic interactions, to be certain that a large From 9a1d9a61e7919c417bfdced2e70636352d92c39b Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 16:19:09 -0400 Subject: [PATCH 147/212] use median instead of worst case --- modules/renter/hostdb/consts.go | 5 +++++ modules/renter/hostdb/hostdb.go | 12 ++++++------ modules/renter/hostdb/scan.go | 34 ++++++++++++++++++--------------- 3 files changed, 30 insertions(+), 21 deletions(-) diff --git a/modules/renter/hostdb/consts.go b/modules/renter/hostdb/consts.go index 8cac990192..611e83a668 100644 --- a/modules/renter/hostdb/consts.go +++ b/modules/renter/hostdb/consts.go @@ -40,6 +40,11 @@ const ( // case timeout. minScansForSpeedup = 25 + // scanSpeedupMedianMultiplier is the number with which the median of the + // initial scans is multiplied to speedup the intial scan after + // minScansForSpeedup successful scans. + scanSpeedupMedianMultiplier = 5 + // recentInteractionWeightLimit caps the number of recent interactions as a // percentage of the historic interactions, to be certain that a large // amount of activity in a short period of time does not overwhelm the diff --git a/modules/renter/hostdb/hostdb.go b/modules/renter/hostdb/hostdb.go index f73ef2b179..83042347d7 100644 --- a/modules/renter/hostdb/hostdb.go +++ b/modules/renter/hostdb/hostdb.go @@ -49,12 +49,12 @@ type HostDB struct { // handful of goroutines constantly waiting on the channel for hosts to // scan. The scan map is used to prevent duplicates from entering the scan // pool. - successfulScans uint64 - initialScanTimeout time.Duration - scanList []modules.HostDBEntry - scanMap map[string]struct{} - scanWait bool - scanningThreads int + initialScanComplete bool + initialScanLatencies []time.Duration + scanList []modules.HostDBEntry + scanMap map[string]struct{} + scanWait bool + scanningThreads int blockHeight types.BlockHeight lastChange modules.ConsensusChangeID diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index 627f4f08c5..e666f37402 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -6,6 +6,7 @@ package hostdb import ( "net" + "sort" "time" "github.com/NebulousLabs/Sia/build" @@ -257,13 +258,18 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { var settings modules.HostExternalSettings var latency time.Duration err := func() error { - // During the initial scan we choose a shorter timeout once we have - // scanned a certain number of hosts successfully to finish the scan - // more quickly. timeout := hostRequestTimeout hdb.mu.RLock() - if !hdb.initialScanComplete && hdb.successfulScans > minScansForSpeedup && hdb.initialScanTimeout < hostRequestTimeout { - timeout = hdb.initialScanTimeout + if !hdb.initialScanComplete && len(hdb.initialScanLatencies) == minScansForSpeedup { + // During an initial scan, when we have at least minScansForSpeedup + // active scans in initialScanLatencies, we use + // 5*median(initialScanLatencies) as the new hostRequestTimeout to + // speedup the scanning process. + timeout = hdb.initialScanLatencies[len(hdb.initialScanLatencies)/2] + timeout *= scanSpeedupMedianMultiplier + if hostRequestTimeout < timeout { + timeout = hostRequestTimeout + } } hdb.mu.RUnlock() @@ -312,16 +318,14 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { // delete the entry from the scan map as the scan has been successful. hdb.updateEntry(entry, err) - // Update the initial scan timeout if the scan was successful and if the - // latency for this scan was greater than the current timeout. We want to - // find the worst case latency. - if success { - hdb.successfulScans++ - // If the latency is greater than the initialScanTimeout we use twice - // the latency for the new timeout. This gives us a little more wiggle - // room for outliers in case most hosts have a similar latency. - if latency > hdb.initialScanTimeout { - hdb.initialScanTimeout = 2 * latency + // Add the scan to the initialScanLatencies if it was successful. + if success && len(hdb.initialScanLatencies) < minScansForSpeedup { + hdb.initialScanLatencies = append(hdb.initialScanLatencies, latency) + // If the slice has reached its maximum size we sort it. + if len(hdb.initialScanLatencies) == minScansForSpeedup { + sort.Slice(hdb.initialScanLatencies, func(i, j int) bool { + return hdb.initialScanLatencies[i] < hdb.initialScanLatencies[j] + }) } } } From 65a1b781b3af431eee001645331c69242bcd631e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 13:45:36 -0400 Subject: [PATCH 148/212] refactor success boolean --- modules/renter/hostdb/scan.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index e666f37402..e09d690bee 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -302,15 +302,14 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { copy(pubkey[:], pubKey.Key) return crypto.ReadSignedObject(conn, &settings, maxSettingsLen, pubkey) }() - success := false if err != nil { hdb.log.Debugf("Scan of host at %v failed: %v", netAddr, err) } else { hdb.log.Debugf("Scan of host at %v succeeded.", netAddr) entry.HostExternalSettings = settings - success = true } + success := err == nil hdb.mu.Lock() defer hdb.mu.Unlock() From c0fe35c1898c9aa7c8ef287b8e4253441a1a5bf9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 18 Apr 2018 19:35:34 -0400 Subject: [PATCH 149/212] add test for stream caching --- siatest/renter/renter_test.go | 42 +++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 3e9c7981a4..3872156653 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -3,7 +3,9 @@ package renter import ( "sync" "testing" + "time" + "github.com/NebulousLabs/Sia/crypto" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/modules/renter" "github.com/NebulousLabs/Sia/node" @@ -39,8 +41,9 @@ func TestRenter(t *testing.T) { name string test func(*testing.T, *siatest.TestGroup) }{ - {"UploadDownload", testUploadDownload}, - {"DownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, + {"TestRenterStreamingCache", testRenterStreamingCache}, + {"TestUploadDownload", testUploadDownload}, + {"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, {"TestRenterLocalRepair", testRenterLocalRepair}, {"TestRenterRemoteRepair", testRenterRemoteRepair}, } @@ -269,3 +272,38 @@ func testRenterRemoteRepair(t *testing.T, tg *siatest.TestGroup) { t.Fatal("Failed to download file", err) } } + +// testRenterStreamingCache checks if the chunk cache works correctly. +func testRenterStreamingCache(t *testing.T, tg *siatest.TestGroup) { + // Grab the first of the group's renters + r := tg.Renters()[0] + + // Set fileSize and redundancy for upload + dataPieces := uint64(1) + parityPieces := uint64(len(tg.Hosts())) - dataPieces + + // Set the bandwidth limit to 1 chunk per second. + pieceSize := modules.SectorSize - crypto.TwofishOverhead + chunkSize := int64(pieceSize * dataPieces) + if err := r.RenterPostRateLimit(chunkSize, chunkSize); err != nil { + t.Fatal(err) + } + + // Upload a file that is a single chunk big. + _, remoteFile, err := r.UploadNewFileBlocking(int(chunkSize), dataPieces, parityPieces) + if err != nil { + t.Fatal(err) + } + + // Download the same chunk 250 times. This should take at least 250 seconds + // without caching but not more than 30 with caching. + start := time.Now() + for i := 0; i < 250; i++ { + if _, err := r.Stream(remoteFile); err != nil { + t.Fatal(err) + } + if time.Since(start) > time.Second*30 { + t.Fatal("download took longer than 30 seconds") + } + } +} From 096492ece3d189cc5550522698c6960d54d9c678 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 26 Apr 2018 17:48:34 -0400 Subject: [PATCH 150/212] fix cache and useragent check --- modules/renter/downloadcache.go | 2 +- node/api/routes.go | 20 +++----------------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 4b2bdc5bc7..7cda44b761 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -13,7 +13,7 @@ import ( // endpoint download. // TODO this won't be necessary anymore once we have partial downloads. func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { - if udc.download.staticDestinationType == destinationTypeSeekStream { + if udc.download.staticDestinationType != destinationTypeSeekStream { // We only cache streaming chunks since browsers and media players tend to only request a few kib at once when streaming data. That way we can prevent scheduling the same chunk for download over and over. return } diff --git a/node/api/routes.go b/node/api/routes.go index bd7d0ec204..1d868c45f2 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -1,7 +1,6 @@ package api import ( - "context" "net/http" "strings" "time" @@ -10,10 +9,6 @@ import ( "github.com/julienschmidt/httprouter" ) -// unrestrictedContextKey is a context key that is set to allow a route to be -// called without the Sia-Agent being set. -type unrestrictedContextKey struct{} - // buildHttpRoutes sets up and returns an * httprouter.Router. // it connected the Router to the given api using the required // parameters: requiredUserAgent and requiredPassword @@ -90,7 +85,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/download/*siapath", RequirePassword(api.renterDownloadHandler, requiredPassword)) router.GET("/renter/downloadasync/*siapath", RequirePassword(api.renterDownloadAsyncHandler, requiredPassword)) router.POST("/renter/rename/*siapath", RequirePassword(api.renterRenameHandler, requiredPassword)) - router.GET("/renter/stream/*siapath", Unrestricted(api.renterStreamHandler)) + router.GET("/renter/stream/*siapath", api.renterStreamHandler) router.POST("/renter/upload/*siapath", RequirePassword(api.renterUploadHandler, requiredPassword)) // HostDB endpoints. @@ -201,16 +196,7 @@ func RequirePassword(h httprouter.Handle, password string) httprouter.Handle { } } -// Unrestricted can be used to whitelist api routes from requiring the -// Sia-Agent to be set. -func Unrestricted(h httprouter.Handle) httprouter.Handle { - return httprouter.Handle(func(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { - req = req.WithContext(context.WithValue(req.Context(), unrestrictedContextKey{}, 0)) - h(w, req, ps) - }) -} - -// isUnrestricted checks if a context has the unrestrictedContextKey set. +// isUnrestricted checks if a request may bypass the useragent check. func isUnrestricted(req *http.Request) bool { - return req.Context().Value(unrestrictedContextKey{}) != nil + return strings.HasPrefix(req.URL.Path, "/renter/stream/") } From 1718f70d2a1064554387146714359b1e9e6019d8 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 1 May 2018 17:28:32 -0400 Subject: [PATCH 151/212] add tpool/raw endpoint to client package and add siatest/wallet to Makefile --- Makefile | 2 +- node/api/client/transactionpool.go | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 002bf1b673..82176f9df4 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ pkgs = ./build ./cmd/siac ./cmd/siad ./compatibility ./crypto ./encoding ./modul ./modules/gateway ./modules/host ./modules/host/contractmanager ./modules/renter ./modules/renter/contractor \ ./modules/renter/hostdb ./modules/renter/hostdb/hosttree ./modules/renter/proto ./modules/miner ./modules/wallet \ ./modules/transactionpool ./node ./node/api ./persist ./siatest ./siatest/consensus ./siatest/renter \ - ./node/api/server ./sync ./types + ./siatest/wallet ./node/api/server ./sync ./types # fmt calls go fmt on all packages. fmt: diff --git a/node/api/client/transactionpool.go b/node/api/client/transactionpool.go index cc0089c254..d9228e5a91 100644 --- a/node/api/client/transactionpool.go +++ b/node/api/client/transactionpool.go @@ -1,9 +1,25 @@ package client -import "github.com/NebulousLabs/Sia/node/api" +import ( + "net/url" + + "github.com/NebulousLabs/Sia/encoding" + "github.com/NebulousLabs/Sia/node/api" + "github.com/NebulousLabs/Sia/types" +) // TransactionPoolFeeGet uses the /tpool/fee endpoint to get a fee estimation. func (c *Client) TransactionPoolFeeGet() (tfg api.TpoolFeeGET, err error) { err = c.get("/tpool/fee", &tfg) return } + +// TransactionPoolRawPost uses the /tpool/raw endpoint to send a raw +// transaction to the transaction pool. +func (c *Client) TransactionPoolRawPost(txn types.Transaction, parents types.Transaction) (err error) { + values := url.Values{} + values.Set("transaction", string(encoding.Marshal(txn))) + values.Set("parents", string(encoding.Marshal(parents))) + err = c.post("/tpool/raw", values.Encode(), nil) + return +} From d2b84ae5c6a4e62212953493c52416313f0c72f7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 16:05:56 -0400 Subject: [PATCH 152/212] Reduce impact of host collateral on scoring the host --- modules/renter/hostdb/hostweight.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/modules/renter/hostdb/hostweight.go b/modules/renter/hostdb/hostweight.go index 6aec7319f9..45f47589fa 100644 --- a/modules/renter/hostdb/hostweight.go +++ b/modules/renter/hostdb/hostweight.go @@ -14,9 +14,9 @@ var ( // weight to be very large. baseWeight = types.NewCurrency(new(big.Int).Exp(big.NewInt(10), big.NewInt(80), nil)) - // collateralExponentiation is the number of times that the collateral is - // multiplied into the price. - collateralExponentiation = 1 + // collateralExponentiation is the power to which we raise the weight + // during collateral adjustment. + collateralExponentiation = 0.75 // minCollateral is the amount of collateral we weight all hosts as having, // even if they do not have any collateral. This is to temporarily prop up @@ -85,10 +85,7 @@ func (hdb *HostDB) collateralAdjustments(entry modules.HostDBEntry) float64 { actual := float64(actualU64) // Exponentiate the results. - weight := float64(1) - for i := 0; i < collateralExponentiation; i++ { - weight *= actual / base - } + weight := math.Pow(actual/base, collateralExponentiation) // Add in penalties for low MaxCollateral. Hosts should be willing to pay // for at least 100 GB of collateral on a contract. From 35b59218c0c9bd04033f69debb4c1250fb117816 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 16:25:29 -0400 Subject: [PATCH 153/212] extend synchronization check error message --- siatest/testgroup.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/siatest/testgroup.go b/siatest/testgroup.go index dd5073a4fd..726c469af9 100644 --- a/siatest/testgroup.go +++ b/siatest/testgroup.go @@ -340,8 +340,10 @@ func synchronizationCheck(miner *TestNode, nodes map[*TestNode]struct{}) error { // If the miner's height is greater than the node's we need to // wait a bit longer for them to sync. if mcg.Height > ncg.Height { - return fmt.Errorf("the node didn't catch up to the miner's height %v %v", - mcg.Height, ncg.Height) + gwgMiner, errMiner := miner.GatewayGet() + gwgNode, errNode := node.GatewayGet() + return errors.Compose(fmt.Errorf("the node didn't catch up to the miner's height %v %v but have %v and %v peers respectively", + mcg.Height, ncg.Height, len(gwgNode.Peers), len(gwgMiner.Peers)), errMiner, errNode) } // If the miner's height is smaller than the node's we need a // bit longer for them to sync. From 4b66b592b60d748ddf508049d1694feb83503267 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 17:38:41 -0400 Subject: [PATCH 154/212] add sanity check for host scan speedup --- modules/renter/hostdb/scan.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/renter/hostdb/scan.go b/modules/renter/hostdb/scan.go index e09d690bee..0c38381f21 100644 --- a/modules/renter/hostdb/scan.go +++ b/modules/renter/hostdb/scan.go @@ -260,6 +260,9 @@ func (hdb *HostDB) managedScanHost(entry modules.HostDBEntry) { err := func() error { timeout := hostRequestTimeout hdb.mu.RLock() + if len(hdb.initialScanLatencies) > minScansForSpeedup { + build.Critical("initialScanLatencies should never be greater than minScansForSpeedup") + } if !hdb.initialScanComplete && len(hdb.initialScanLatencies) == minScansForSpeedup { // During an initial scan, when we have at least minScansForSpeedup // active scans in initialScanLatencies, we use From 3499efe26a6148051f1a90fe83cdc92a7a1823a9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 11:14:16 -0400 Subject: [PATCH 155/212] Add response type to /consensus/blocks endpoint docu --- doc/API.md | 41 +++++++++++++++++++++++++++++++++++++++++ doc/api/Consensus.md | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/doc/API.md b/doc/API.md index 6c360d62ac..50499db2e7 100644 --- a/doc/API.md +++ b/doc/API.md @@ -196,6 +196,47 @@ height ``` +###### Response +The JSON formatted block or a standard error response. +``` +{ + "minerpayouts": [ + { + "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", + "value": "299999000000000000000000000000" + } + ], + "nonce": [ + 253, + 253, + 0, + 0, + 0, + 0, + 0, + 100 + ], + "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", + "timestamp": 1433626546, + "transactions": [ + { + "arbitrarydata": [ + "Tm9uU2lhQ9ikb2Lc6jDRfLhhjPflTQ==" + ], + "filecontractrevisions": [], + "filecontracts": [], + "minerfees": [], + "siacoininputs": [], + "siacoinoutputs": [], + "siafundinputs": [], + "siafundoutputs": [], + "storageproofs": [], + "transactionsignatures": [] + } + ] +} +``` + #### /consensus/validate/transactionset [POST] validates a set of transactions using the current utxo set. diff --git a/doc/api/Consensus.md b/doc/api/Consensus.md index d23f658b01..5840abc7bd 100644 --- a/doc/api/Consensus.md +++ b/doc/api/Consensus.md @@ -66,6 +66,47 @@ height ``` +###### Response +The JSON formatted block or a standard error response. +``` +{ + "minerpayouts": [ + { + "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", + "value": "299999000000000000000000000000" + } + ], + "nonce": [ + 253, + 253, + 0, + 0, + 0, + 0, + 0, + 100 + ], + "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", + "timestamp": 1433626546, + "transactions": [ + { + "arbitrarydata": [ + "Tm9uU2lhQ9ikb2Lc6jDRfLhhjPflTQ==" + ], + "filecontractrevisions": [], + "filecontracts": [], + "minerfees": [], + "siacoininputs": [], + "siacoinoutputs": [], + "siafundinputs": [], + "siafundoutputs": [], + "storageproofs": [], + "transactionsignatures": [] + } + ] +} +``` + #### /consensus/validate/transactionset [POST] validates a set of transactions using the current utxo set. From 16ab8023a85d981a4b09e2345bc07e969cd668c3 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 11:49:46 -0400 Subject: [PATCH 156/212] add BlockID to /consensus/blocks endpoint --- node/api/client/consensus.go | 8 ++++---- node/api/consensus.go | 11 ++++++++++- siatest/consensus/consensus_test.go | 6 ++++++ 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/node/api/client/consensus.go b/node/api/client/consensus.go index bd60ea4afa..ef98ee9301 100644 --- a/node/api/client/consensus.go +++ b/node/api/client/consensus.go @@ -14,13 +14,13 @@ func (c *Client) ConsensusGet() (cg api.ConsensusGET, err error) { } // ConsensusBlocksIDGet requests the /consensus/blocks api resource -func (c *Client) ConsensusBlocksIDGet(id types.BlockID) (block types.Block, err error) { - err = c.get("/consensus/blocks?id="+id.String(), &block) +func (c *Client) ConsensusBlocksIDGet(id types.BlockID) (cbg api.ConsensusBlocksGet, err error) { + err = c.get("/consensus/blocks?id="+id.String(), &cbg) return } // ConsensusBlocksHeightGet requests the /consensus/blocks api resource -func (c *Client) ConsensusBlocksHeightGet(height types.BlockHeight) (block types.Block, err error) { - err = c.get("/consensus/blocks?height="+fmt.Sprint(height), &block) +func (c *Client) ConsensusBlocksHeightGet(height types.BlockHeight) (cbg api.ConsensusBlocksGet, err error) { + err = c.get("/consensus/blocks?height="+fmt.Sprint(height), &cbg) return } diff --git a/node/api/consensus.go b/node/api/consensus.go index b435decd7f..17a82d2cae 100644 --- a/node/api/consensus.go +++ b/node/api/consensus.go @@ -25,6 +25,12 @@ type ConsensusHeadersGET struct { BlockID types.BlockID `json:"blockid"` } +// ConsensusBlocksGet wraps a types.Block and adds an id field. +type ConsensusBlocksGet struct { + BlockID types.BlockID `json:"id"` + types.Block +} + // consensusHandler handles the API calls to /consensus. func (api *API) consensusHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { cbid := api.cs.CurrentBlock().ID() @@ -77,7 +83,10 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, return } // Write response - WriteJSON(w, b) + WriteJSON(w, ConsensusBlocksGet{ + b.ID(), + b, + }) } // consensusValidateTransactionsetHandler handles the API calls to diff --git a/siatest/consensus/consensus_test.go b/siatest/consensus/consensus_test.go index 6095b3155a..5b16417eaa 100644 --- a/siatest/consensus/consensus_test.go +++ b/siatest/consensus/consensus_test.go @@ -85,6 +85,9 @@ func TestConsensusBlocksIDGet(t *testing.T) { } // Make sure all of the fields are initialized and not empty var zeroID types.BlockID + if block.BlockID != cg.CurrentBlock { + t.Fatal("BlockID wasn't set correctly") + } if block.ParentID == zeroID { t.Fatal("ParentID wasn't set correctly") } @@ -103,6 +106,9 @@ func TestConsensusBlocksIDGet(t *testing.T) { if err != nil { t.Fatal("Failed to retrieve block", err) } + if block.BlockID != block2.BlockID { + t.Fatal("BlockID wasn't set correctly") + } // block and block2 should be the same if block.ParentID != block2.ParentID { t.Fatal("ParentIDs don't match") From 7d29da60dce9195f0519b49893ac4f1aa00dd367 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 13:31:56 -0400 Subject: [PATCH 157/212] add height to /consensus/blocks endpoint --- modules/consensus.go | 6 +++--- modules/consensus/consensusset.go | 5 +++-- node/api/consensus.go | 8 +++++--- siatest/consensus/consensus_test.go | 8 +++++++- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/modules/consensus.go b/modules/consensus.go index 4a24d722fa..6a59df4b96 100644 --- a/modules/consensus.go +++ b/modules/consensus.go @@ -190,9 +190,9 @@ type ( // bool to indicate whether that block exists. BlockAtHeight(types.BlockHeight) (types.Block, bool) - // BlocksByID returns a block found for a given ID, with a bool to - // indicate whether that block exists. - BlockByID(types.BlockID) (types.Block, bool) + // BlocksByID returns a block found for a given ID and its height, with + // a bool to indicate whether that block exists. + BlockByID(types.BlockID) (types.Block, types.BlockHeight, bool) // ChildTarget returns the target required to extend the current heaviest // fork. This function is typically used by miners looking to extend the diff --git a/modules/consensus/consensusset.go b/modules/consensus/consensusset.go index 07a5ed1136..8dfd471f00 100644 --- a/modules/consensus/consensusset.go +++ b/modules/consensus/consensusset.go @@ -203,17 +203,18 @@ func (cs *ConsensusSet) BlockAtHeight(height types.BlockHeight) (block types.Blo } // BlockByID returns the block for a given BlockID. -func (cs *ConsensusSet) BlockByID(id types.BlockID) (block types.Block, exists bool) { +func (cs *ConsensusSet) BlockByID(id types.BlockID) (block types.Block, height types.BlockHeight, exists bool) { _ = cs.db.View(func(tx *bolt.Tx) error { pb, err := getBlockMap(tx, id) if err != nil { return err } block = pb.Block + height = pb.Height exists = true return nil }) - return block, exists + return block, height, exists } // ChildTarget returns the target for the child of a block. diff --git a/node/api/consensus.go b/node/api/consensus.go index 17a82d2cae..b3720c4602 100644 --- a/node/api/consensus.go +++ b/node/api/consensus.go @@ -27,7 +27,8 @@ type ConsensusHeadersGET struct { // ConsensusBlocksGet wraps a types.Block and adds an id field. type ConsensusBlocksGet struct { - BlockID types.BlockID `json:"id"` + BlockID types.BlockID `json:"id"` + BlockHeight types.BlockHeight `json:"height"` types.Block } @@ -57,6 +58,7 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, } var b types.Block + var h types.BlockHeight var exists bool // Handle request by id @@ -66,11 +68,10 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, WriteError(w, Error{"failed to unmarshal blockid"}, http.StatusBadRequest) return } - b, exists = api.cs.BlockByID(bid) + b, h, exists = api.cs.BlockByID(bid) } // Handle request by height if height != "" { - var h uint64 if _, err := fmt.Sscan(height, &h); err != nil { WriteError(w, Error{"failed to parse block height"}, http.StatusBadRequest) return @@ -85,6 +86,7 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, // Write response WriteJSON(w, ConsensusBlocksGet{ b.ID(), + h, b, }) } diff --git a/siatest/consensus/consensus_test.go b/siatest/consensus/consensus_test.go index 5b16417eaa..89fa79cbb4 100644 --- a/siatest/consensus/consensus_test.go +++ b/siatest/consensus/consensus_test.go @@ -88,6 +88,9 @@ func TestConsensusBlocksIDGet(t *testing.T) { if block.BlockID != cg.CurrentBlock { t.Fatal("BlockID wasn't set correctly") } + if block.BlockHeight != cg.Height { + t.Fatal("BlockHeight wasn't set correctly") + } if block.ParentID == zeroID { t.Fatal("ParentID wasn't set correctly") } @@ -106,10 +109,13 @@ func TestConsensusBlocksIDGet(t *testing.T) { if err != nil { t.Fatal("Failed to retrieve block", err) } + // block and block2 should be the same if block.BlockID != block2.BlockID { t.Fatal("BlockID wasn't set correctly") } - // block and block2 should be the same + if block.BlockHeight != block2.BlockHeight { + t.Fatal("BlockID wasn't set correctly") + } if block.ParentID != block2.ParentID { t.Fatal("ParentIDs don't match") } From 2b929404c0612d44fceda7f66b592cc12b52cbd7 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 13:34:29 -0400 Subject: [PATCH 158/212] update docu --- doc/API.md | 2 ++ doc/api/Consensus.md | 2 ++ 2 files changed, 4 insertions(+) diff --git a/doc/API.md b/doc/API.md index 50499db2e7..b92b2919be 100644 --- a/doc/API.md +++ b/doc/API.md @@ -200,6 +200,8 @@ height The JSON formatted block or a standard error response. ``` { + "height": 1, + "id": "0000000018492dfe2a1b2da6ca3534a757796573f84f0eb0eb5f88d75cd10f9f", "minerpayouts": [ { "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", diff --git a/doc/api/Consensus.md b/doc/api/Consensus.md index 5840abc7bd..ecda632213 100644 --- a/doc/api/Consensus.md +++ b/doc/api/Consensus.md @@ -70,6 +70,8 @@ height The JSON formatted block or a standard error response. ``` { + "height": 1, + "id": "0000000018492dfe2a1b2da6ca3534a757796573f84f0eb0eb5f88d75cd10f9f", "minerpayouts": [ { "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", From 789497c5894fe9526e12f3deed498905e78ad480 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 13:42:48 -0400 Subject: [PATCH 159/212] collapse nonce field --- doc/API.md | 11 +---------- doc/api/Consensus.md | 11 +---------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/doc/API.md b/doc/API.md index b92b2919be..6563531041 100644 --- a/doc/API.md +++ b/doc/API.md @@ -208,16 +208,7 @@ The JSON formatted block or a standard error response. "value": "299999000000000000000000000000" } ], - "nonce": [ - 253, - 253, - 0, - 0, - 0, - 0, - 0, - 100 - ], + "nonce": [253,253,0,0,0,0,0,100], "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", "timestamp": 1433626546, "transactions": [ diff --git a/doc/api/Consensus.md b/doc/api/Consensus.md index ecda632213..b4d730c833 100644 --- a/doc/api/Consensus.md +++ b/doc/api/Consensus.md @@ -78,16 +78,7 @@ The JSON formatted block or a standard error response. "value": "299999000000000000000000000000" } ], - "nonce": [ - 253, - 253, - 0, - 0, - 0, - 0, - 0, - 100 - ], + "nonce": [253,253,0,0,0,0,0,100], "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", "timestamp": 1433626546, "transactions": [ From 64b90e7397858d6e4e7e992af087aa27cab944fc Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 14:22:28 -0400 Subject: [PATCH 160/212] different block example --- doc/API.md | 73 ++++++++++++++++++++++++++++++++++++-------- doc/api/Consensus.md | 73 ++++++++++++++++++++++++++++++++++++-------- 2 files changed, 120 insertions(+), 26 deletions(-) diff --git a/doc/API.md b/doc/API.md index 6563531041..29418f9a17 100644 --- a/doc/API.md +++ b/doc/API.md @@ -200,31 +200,78 @@ height The JSON formatted block or a standard error response. ``` { - "height": 1, - "id": "0000000018492dfe2a1b2da6ca3534a757796573f84f0eb0eb5f88d75cd10f9f", + "height": 20032, + "id": "00000000000033b9eb57fa63a51adeea857e70f6415ebbfe5df2a01f0d0477f4", "minerpayouts": [ { - "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", - "value": "299999000000000000000000000000" + "unlockhash": "c199cd180e19ef7597bcf4beecdd4f211e121d085e24432959c42bdf9030e32b9583e1c2727c", + "value": "279978000000000000000000000000" } ], - "nonce": [253,253,0,0,0,0,0,100], - "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", - "timestamp": 1433626546, + "nonce": [4,12,219,7,0,0,0,0], + "parentid": "0000000000009615e8db750eb1226aa5e629bfa7badbfe0b79607ec8b918a44c", + "timestamp": 1444516982, "transactions": [ + { + // ... + } { - "arbitrarydata": [ - "Tm9uU2lhQ9ikb2Lc6jDRfLhhjPflTQ==" - ], + "arbitrarydata": [], "filecontractrevisions": [], "filecontracts": [], "minerfees": [], - "siacoininputs": [], - "siacoinoutputs": [], + "siacoininputs": [ + { + "parentid": "24cbeb9df7eb2d81d0025168fc94bd179909d834f49576e65b51feceaf957a64", + "unlockconditions": { + "publickeys": [ + { + "algorithm": "ed25519", + "key": "QET8w7WRbGfcnnpKd1nuQfE3DuNUUq9plyoxwQYDK4U=" + } + ], + "signaturesrequired": 1, + "timelock": 0 + } + } + ], + "siacoinoutputs": [ + { + "unlockhash": "d54f500f6c1774d518538dbe87114fe6f7e6c76b5bc8373a890b12ce4b8909a336106a4cd6db", + "value": "1010000000000000000000000000" + }, + { + "unlockhash": "48a56b19bd0be4f24190640acbd0bed9669ea9c18823da2645ec1ad9652f10b06c5d4210f971", + "value": "5780000000000000000000000000" + } + ], "siafundinputs": [], "siafundoutputs": [], "storageproofs": [], - "transactionsignatures": [] + "transactionsignatures": [ + { + "coveredfields": { + "arbitrarydata": [], + "filecontractrevisions": [], + "filecontracts": [], + "minerfees": [], + "siacoininputs": [], + "siacoinoutputs": [], + "siafundinputs": [], + "siafundoutputs": [], + "storageproofs": [], + "transactionsignatures": [], + "wholetransaction": true + }, + "parentid": "24cbeb9df7eb2d81d0025168fc94bd179909d834f49576e65b51feceaf957a64", + "publickeyindex": 0, + "signature": "pByLGMlvezIZWVZmHQs/ynGETETNbxcOY/kr6uivYgqZqCcKTJ0JkWhcFaKJU+3DEA7JAloLRNZe3PTklD3tCQ==", + "timelock": 0 + } + ] + }, + { + // ... } ] } diff --git a/doc/api/Consensus.md b/doc/api/Consensus.md index b4d730c833..c57c6e0aff 100644 --- a/doc/api/Consensus.md +++ b/doc/api/Consensus.md @@ -70,31 +70,78 @@ height The JSON formatted block or a standard error response. ``` { - "height": 1, - "id": "0000000018492dfe2a1b2da6ca3534a757796573f84f0eb0eb5f88d75cd10f9f", + "height": 20032, + "id": "00000000000033b9eb57fa63a51adeea857e70f6415ebbfe5df2a01f0d0477f4", "minerpayouts": [ { - "unlockhash": "68a0607b15f21ce643bb47075e539b9e938d7a8e0870951a3c872a2d89ae05194ee3a0f18680", - "value": "299999000000000000000000000000" + "unlockhash": "c199cd180e19ef7597bcf4beecdd4f211e121d085e24432959c42bdf9030e32b9583e1c2727c", + "value": "279978000000000000000000000000" } ], - "nonce": [253,253,0,0,0,0,0,100], - "parentid": "25f6e3b9295a61f69fcb956aca9f0076234ecf2e02d399db5448b6e22f26e81c", - "timestamp": 1433626546, + "nonce": [4,12,219,7,0,0,0,0], + "parentid": "0000000000009615e8db750eb1226aa5e629bfa7badbfe0b79607ec8b918a44c", + "timestamp": 1444516982, "transactions": [ + { + // ... + } { - "arbitrarydata": [ - "Tm9uU2lhQ9ikb2Lc6jDRfLhhjPflTQ==" - ], + "arbitrarydata": [], "filecontractrevisions": [], "filecontracts": [], "minerfees": [], - "siacoininputs": [], - "siacoinoutputs": [], + "siacoininputs": [ + { + "parentid": "24cbeb9df7eb2d81d0025168fc94bd179909d834f49576e65b51feceaf957a64", + "unlockconditions": { + "publickeys": [ + { + "algorithm": "ed25519", + "key": "QET8w7WRbGfcnnpKd1nuQfE3DuNUUq9plyoxwQYDK4U=" + } + ], + "signaturesrequired": 1, + "timelock": 0 + } + } + ], + "siacoinoutputs": [ + { + "unlockhash": "d54f500f6c1774d518538dbe87114fe6f7e6c76b5bc8373a890b12ce4b8909a336106a4cd6db", + "value": "1010000000000000000000000000" + }, + { + "unlockhash": "48a56b19bd0be4f24190640acbd0bed9669ea9c18823da2645ec1ad9652f10b06c5d4210f971", + "value": "5780000000000000000000000000" + } + ], "siafundinputs": [], "siafundoutputs": [], "storageproofs": [], - "transactionsignatures": [] + "transactionsignatures": [ + { + "coveredfields": { + "arbitrarydata": [], + "filecontractrevisions": [], + "filecontracts": [], + "minerfees": [], + "siacoininputs": [], + "siacoinoutputs": [], + "siafundinputs": [], + "siafundoutputs": [], + "storageproofs": [], + "transactionsignatures": [], + "wholetransaction": true + }, + "parentid": "24cbeb9df7eb2d81d0025168fc94bd179909d834f49576e65b51feceaf957a64", + "publickeyindex": 0, + "signature": "pByLGMlvezIZWVZmHQs/ynGETETNbxcOY/kr6uivYgqZqCcKTJ0JkWhcFaKJU+3DEA7JAloLRNZe3PTklD3tCQ==", + "timelock": 0 + } + ] + }, + { + // ... } ] } From b6968905a22b62d4806f236ac481e113fc7cb6a1 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 3 May 2018 10:31:06 -0400 Subject: [PATCH 161/212] Move managedArchiveContracts to threadedContractMaintenance --- modules/renter/contractor/contracts.go | 4 ++++ modules/renter/contractor/update.go | 11 ----------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 21376290d5..1e0a1b895c 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -265,6 +265,10 @@ func (c *Contractor) threadedContractMaintenance() { return } defer c.tg.Done() + + // Archive contracts that need to be archived before doing additional maintenance. + c.managedArchiveContracts() + // Nothing to do if there are no hosts. c.mu.RLock() wantedHosts := c.allowance.Hosts diff --git a/modules/renter/contractor/update.go b/modules/renter/contractor/update.go index b07292be87..1950947f6c 100644 --- a/modules/renter/contractor/update.go +++ b/modules/renter/contractor/update.go @@ -7,17 +7,7 @@ import ( // managedArchiveContracts will figure out which contracts are no longer needed // and move them to the historic set of contracts. -// -// TODO: This function should be performed by threadedContractMaintenance. -// threadedContractMaintenance will currently quit if there are no hosts, but it -// should at least run this code before quitting. func (c *Contractor) managedArchiveContracts() { - err := c.tg.Add() - if err != nil { - return - } - defer c.tg.Done() - // Determine the current block height. c.mu.RLock() currentHeight := c.blockHeight @@ -90,6 +80,5 @@ func (c *Contractor) ProcessConsensusChange(cc modules.ConsensusChange) { // maintenance. if cc.Synced { go c.threadedContractMaintenance() - go c.managedArchiveContracts() } } From 6d9706867f1ce1572a66357fb4e8836b2b99a2c8 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 2 May 2018 16:47:25 -0400 Subject: [PATCH 162/212] add field names when initializing ConsensusBlockGet --- node/api/consensus.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/api/consensus.go b/node/api/consensus.go index b3720c4602..ee17c0a26c 100644 --- a/node/api/consensus.go +++ b/node/api/consensus.go @@ -85,9 +85,9 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, } // Write response WriteJSON(w, ConsensusBlocksGet{ - b.ID(), - h, - b, + BlockID: b.ID(), + BlockHeight: h, + Block: b, }) } From 235049beb342543b04f85ac899af0d19c890c284 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 3 May 2018 16:52:13 -0400 Subject: [PATCH 163/212] more logging when deleting a SafeContract from disk --- modules/renter/contractor/persist.go | 5 +- modules/renter/proto/contractset.go | 5 +- modules/renter/proto/contractset_test.go | 60 +++++++++++++++--------- 3 files changed, 44 insertions(+), 26 deletions(-) diff --git a/modules/renter/contractor/persist.go b/modules/renter/contractor/persist.go index 6fdea0e9fd..61179b265e 100644 --- a/modules/renter/contractor/persist.go +++ b/modules/renter/contractor/persist.go @@ -9,6 +9,8 @@ import ( "github.com/NebulousLabs/Sia/modules/renter/proto" "github.com/NebulousLabs/Sia/persist" "github.com/NebulousLabs/Sia/types" + + "github.com/NebulousLabs/errors" ) // contractorPersist defines what Contractor data persists across sessions. @@ -141,6 +143,5 @@ func convertPersist(dir string) error { } // delete the journal file - os.RemoveAll(journalPath) - return nil + return errors.AddContext(os.Remove(journalPath), "failed to remove journal file") } diff --git a/modules/renter/proto/contractset.go b/modules/renter/proto/contractset.go index 33a78607bf..e2087b59d2 100644 --- a/modules/renter/proto/contractset.go +++ b/modules/renter/proto/contractset.go @@ -64,7 +64,10 @@ func (cs *ContractSet) Delete(c *SafeContract) { cs.mu.Unlock() safeContract.mu.Unlock() // delete contract file - os.Remove(filepath.Join(cs.dir, c.header.ID().String()+contractExtension)) + err := os.Remove(filepath.Join(cs.dir, c.header.ID().String()+contractExtension)) + if err != nil { + build.Critical("Failed to delete SafeContract from disk:", err) + } } // IDs returns the FileContractID of each contract in the set. The contracts diff --git a/modules/renter/proto/contractset_test.go b/modules/renter/proto/contractset_test.go index aca119f7c4..31a2a9a05a 100644 --- a/modules/renter/proto/contractset_test.go +++ b/modules/renter/proto/contractset_test.go @@ -5,6 +5,9 @@ import ( "testing" "time" + "github.com/NebulousLabs/Sia/build" + "github.com/NebulousLabs/Sia/crypto" + "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/fastrand" @@ -24,7 +27,12 @@ func (cs *ContractSet) mustAcquire(t *testing.T, id types.FileContractID) *SafeC // TestContractSet tests that the ContractSet type is safe for concurrent use. func TestContractSet(t *testing.T) { // create contract set - c1 := &SafeContract{header: contractHeader{Transaction: types.Transaction{ + testDir := build.TempDir(t.Name()) + cs, err := NewContractSet(testDir, modules.ProdDependencies) + if err != nil { + t.Fatal(err) + } + header1 := contractHeader{Transaction: types.Transaction{ FileContractRevisions: []types.FileContractRevision{{ ParentID: types.FileContractID{1}, NewValidProofOutputs: []types.SiacoinOutput{{}, {}}, @@ -32,9 +40,8 @@ func TestContractSet(t *testing.T) { PublicKeys: []types.SiaPublicKey{{}, {}}, }, }}, - }}} - id1 := c1.header.ID() - c2 := &SafeContract{header: contractHeader{Transaction: types.Transaction{ + }} + header2 := contractHeader{Transaction: types.Transaction{ FileContractRevisions: []types.FileContractRevision{{ ParentID: types.FileContractID{2}, NewValidProofOutputs: []types.SiacoinOutput{{}, {}}, @@ -42,17 +49,21 @@ func TestContractSet(t *testing.T) { PublicKeys: []types.SiaPublicKey{{}, {}}, }, }}, - }}} - id2 := c2.header.ID() - cs := &ContractSet{ - contracts: map[types.FileContractID]*SafeContract{ - id1: c1, - id2: c2, - }, + }} + id1 := header1.ID() + id2 := header2.ID() + + _, err = cs.managedInsertContract(header1, []crypto.Hash{}) + if err != nil { + t.Fatal(err) + } + _, err = cs.managedInsertContract(header2, []crypto.Hash{}) + if err != nil { + t.Fatal(err) } // uncontested acquire/release - c1 = cs.mustAcquire(t, id1) + c1 := cs.mustAcquire(t, id1) cs.Return(c1) // 100 concurrent serialized mutations @@ -84,11 +95,13 @@ func TestContractSet(t *testing.T) { cs.Return(c1) // delete and reinsert id2 - c2 = cs.mustAcquire(t, id2) + c2 := cs.mustAcquire(t, id2) cs.Delete(c2) - cs.mu.Lock() - cs.contracts[id2] = c2 - cs.mu.Unlock() + roots, err := c2.merkleRoots.merkleRoots() + if err != nil { + t.Fatal(err) + } + cs.managedInsertContract(c2.header, roots) // call all the methods in parallel haphazardly funcs := []func(){ @@ -99,7 +112,7 @@ func TestContractSet(t *testing.T) { func() { cs.Return(cs.mustAcquire(t, id1)) }, func() { cs.Return(cs.mustAcquire(t, id2)) }, func() { - c3 := &SafeContract{header: contractHeader{ + header3 := contractHeader{ Transaction: types.Transaction{ FileContractRevisions: []types.FileContractRevision{{ ParentID: types.FileContractID{3}, @@ -109,12 +122,13 @@ func TestContractSet(t *testing.T) { }, }}, }, - }} - id3 := c3.header.ID() - cs.mu.Lock() - cs.contracts[id3] = c3 - cs.mu.Unlock() - cs.mustAcquire(t, id3) + } + id3 := header3.ID() + _, err := cs.managedInsertContract(header3, []crypto.Hash{}) + if err != nil { + t.Fatal(err) + } + c3 := cs.mustAcquire(t, id3) cs.Delete(c3) }, } From b9aaeb4596399b8588c72ad6280bd6e82034818c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 3 May 2018 17:39:21 -0400 Subject: [PATCH 164/212] close safecontract file before deleting it --- modules/renter/proto/contractset.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/renter/proto/contractset.go b/modules/renter/proto/contractset.go index e2087b59d2..96d8505e4a 100644 --- a/modules/renter/proto/contractset.go +++ b/modules/renter/proto/contractset.go @@ -1,7 +1,6 @@ package proto import ( - "errors" "os" "path/filepath" "sync" @@ -11,6 +10,7 @@ import ( "github.com/NebulousLabs/Sia/types" "github.com/NebulousLabs/ratelimit" + "github.com/NebulousLabs/errors" "github.com/NebulousLabs/writeaheadlog" ) @@ -64,7 +64,8 @@ func (cs *ContractSet) Delete(c *SafeContract) { cs.mu.Unlock() safeContract.mu.Unlock() // delete contract file - err := os.Remove(filepath.Join(cs.dir, c.header.ID().String()+contractExtension)) + path := filepath.Join(cs.dir, c.header.ID().String()+contractExtension) + err := errors.Compose(safeContract.headerFile.Close(), os.Remove(path)) if err != nil { build.Critical("Failed to delete SafeContract from disk:", err) } From 4e328eaee781e33a9f6fc398df12cc9bd1ff4528 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 1 May 2018 15:54:36 -0400 Subject: [PATCH 165/212] added cacheData struct and updated unfinishedDownloadChunk struct field chunkCache, updated code where referenced --- modules/renter/downloadcache.go | 14 +++++++++++--- modules/renter/downloadchunk.go | 9 ++++++++- modules/renter/renter.go | 8 ++++---- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 7cda44b761..31c1311a3d 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -9,12 +9,16 @@ import ( "github.com/NebulousLabs/errors" ) +var cd cacheData + // addChunkToCache adds the chunk to the cache if the download is a streaming // endpoint download. // TODO this won't be necessary anymore once we have partial downloads. func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { if udc.download.staticDestinationType != destinationTypeSeekStream { - // We only cache streaming chunks since browsers and media players tend to only request a few kib at once when streaming data. That way we can prevent scheduling the same chunk for download over and over. + // We only cache streaming chunks since browsers and media players tend + // to only request a few kib at once when streaming data. That way we can + // prevent scheduling the same chunk for download over and over. return } udc.cacheMu.Lock() @@ -27,7 +31,10 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { } delete(udc.chunkCache, key) } - udc.chunkCache[udc.staticCacheID] = data + + cd.data = data + cd.timestamp = time.Now() + udc.chunkCache[udc.staticCacheID] = cd udc.cacheMu.Unlock() } @@ -41,7 +48,8 @@ func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { udc.mu.Lock() defer udc.mu.Unlock() r.cmu.Lock() - data, cached := r.chunkCache[udc.staticCacheID] + cd, cached := r.chunkCache[udc.staticCacheID] + data := cd.data r.cmu.Unlock() if !cached { return false diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index 69962b3215..658976f2a8 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -22,6 +22,13 @@ type downloadPieceInfo struct { root crypto.Hash } +// cacheData contatins the data and the timestamp for the unfinished +// download chunks +type cacheData struct { + data []byte + timestamp time.Time +} + // unfinishedDownloadChunk contains a chunk for a download that is in progress. // // TODO: Currently, if a standby worker is needed, all of the standby workers @@ -75,7 +82,7 @@ type unfinishedDownloadChunk struct { mu sync.Mutex // Caching related fields - chunkCache map[string][]byte + chunkCache map[string]cacheData cacheMu *sync.Mutex } diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 250b3e1d6c..2ef8906799 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -189,7 +189,7 @@ type Renter struct { lastEstimation modules.RenterPriceEstimation // Utilities. - chunkCache map[string][]byte + chunkCache map[string]cacheData cmu *sync.Mutex cs modules.ConsensusSet deps modules.Dependencies @@ -363,10 +363,10 @@ func validateSiapath(siapath string) error { return ErrEmptyFilename } if siapath == ".." { - return errors.New("siapath cannot be ..") + return errors.New("siapath cannot be .. ") } if siapath == "." { - return errors.New("siapath cannot be .") + return errors.New("siapath cannot be . ") } // check prefix if strings.HasPrefix(siapath, "/") { @@ -426,7 +426,7 @@ func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.T workerPool: make(map[types.FileContractID]*worker), - chunkCache: make(map[string][]byte), + chunkCache: make(map[string]cacheData), cmu: new(sync.Mutex), cs: cs, deps: deps, From 166d32d0f0117c35c709cb68e7e4ca7978392e9c Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 1 May 2018 16:13:59 -0400 Subject: [PATCH 166/212] updated addChunkToCache for loop to find oldest entry and delete --- modules/renter/downloadcache.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 31c1311a3d..fb724d4f33 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -25,11 +25,17 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { // Prune cache if necessary. // TODO insteado of deleting a 'random' key, delete the // least-recently-accessed element of the cache. - for key := range udc.chunkCache { - if len(udc.chunkCache) < downloadCacheSize { - break + if len(udc.chunkCache) >= downloadCacheSize { + var oldestKey string + oldestTime := time.Now().Second() + + for key := range udc.chunkCache { + if udc.chunkCache[key].timestamp.Second() < oldestTime { + oldestTime = udc.chunkCache[key].timestamp.Second() + oldestKey = key + } } - delete(udc.chunkCache, key) + delete(udc.chunkCache, oldestKey) } cd.data = data From 340e4002bc5a903e12436bffcc1ebe0289d74cfb Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 1 May 2018 16:46:49 -0400 Subject: [PATCH 167/212] updated comments, changed addChunkToCache cacheMu unlock to defer statement --- modules/renter/downloadcache.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index fb724d4f33..76b3ece618 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -22,9 +22,9 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { return } udc.cacheMu.Lock() + defer udc.cacheMu.Unlock() + // Prune cache if necessary. - // TODO insteado of deleting a 'random' key, delete the - // least-recently-accessed element of the cache. if len(udc.chunkCache) >= downloadCacheSize { var oldestKey string oldestTime := time.Now().Second() @@ -41,7 +41,6 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { cd.data = data cd.timestamp = time.Now() udc.chunkCache[udc.staticCacheID] = cd - udc.cacheMu.Unlock() } // managedTryCache tries to retrieve the chunk from the renter's cache. If From a553eb834b5aaec5bd5184afe5ee3127058cecd7 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 1 May 2018 17:26:40 -0400 Subject: [PATCH 168/212] definied cd locally in addChunkToCache instead of globally as it was failing test-long, test-long passing now --- modules/renter/downloadcache.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 76b3ece618..dccdb18a5d 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -9,8 +9,6 @@ import ( "github.com/NebulousLabs/errors" ) -var cd cacheData - // addChunkToCache adds the chunk to the cache if the download is a streaming // endpoint download. // TODO this won't be necessary anymore once we have partial downloads. @@ -38,8 +36,10 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { delete(udc.chunkCache, oldestKey) } - cd.data = data - cd.timestamp = time.Now() + cd := cacheData{ + data: data, + timestamp: time.Now(), + } udc.chunkCache[udc.staticCacheID] = cd } From f8ac68d70daabf130468fe45ffcb716e4a1de8f0 Mon Sep 17 00:00:00 2001 From: MSevey Date: Wed, 2 May 2018 16:10:00 -0400 Subject: [PATCH 169/212] resolving edits from pull request --- modules/renter/downloadcache.go | 25 +++++++++++++------------ modules/renter/downloadchunk.go | 14 +++++++------- modules/renter/renter.go | 4 ++-- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index dccdb18a5d..a1987c5705 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -23,24 +23,25 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { defer udc.cacheMu.Unlock() // Prune cache if necessary. - if len(udc.chunkCache) >= downloadCacheSize { + for len(udc.chunkCache) >= downloadCacheSize { var oldestKey string - oldestTime := time.Now().Second() + oldestTime := time.Now() - for key := range udc.chunkCache { - if udc.chunkCache[key].timestamp.Second() < oldestTime { - oldestTime = udc.chunkCache[key].timestamp.Second() - oldestKey = key + // TODO: turn this from a structure where you loop over every element (O(n) per access) to a min heap (O(log n) per access). + // currently not a issue due to cache size remaining small (<20) + for id, chunk := range udc.chunkCache { + if chunk.lastAccess.Before(oldestTime) { + oldestTime = chunk.lastAccess + oldestKey = id } } delete(udc.chunkCache, oldestKey) } - cd := cacheData{ - data: data, - timestamp: time.Now(), + udc.chunkCache[udc.staticCacheID] = cacheData{ + data: data, + lastAccess: time.Now(), } - udc.chunkCache[udc.staticCacheID] = cd } // managedTryCache tries to retrieve the chunk from the renter's cache. If @@ -54,14 +55,14 @@ func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { defer udc.mu.Unlock() r.cmu.Lock() cd, cached := r.chunkCache[udc.staticCacheID] - data := cd.data + cd.lastAccess = time.Now() r.cmu.Unlock() if !cached { return false } start := udc.staticFetchOffset end := start + udc.staticFetchLength - _, err := udc.destination.WriteAt(data[start:end], udc.staticWriteOffset) + _, err := udc.destination.WriteAt(cd.data[start:end], udc.staticWriteOffset) if err != nil { r.log.Println("WARN: failed to write cached chunk to destination:", err) udc.fail(errors.AddContext(err, "failed to write cached chunk to destination")) diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index 658976f2a8..b6c1e8643e 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -14,6 +14,13 @@ import ( "github.com/NebulousLabs/errors" ) +// cacheData contatins the data and the timestamp for the unfinished +// download chunks +type cacheData struct { + data []byte + lastAccess time.Time +} + // downloadPieceInfo contains all the information required to download and // recover a piece of a chunk from a host. It is a value in a map where the key // is the file contract id. @@ -22,13 +29,6 @@ type downloadPieceInfo struct { root crypto.Hash } -// cacheData contatins the data and the timestamp for the unfinished -// download chunks -type cacheData struct { - data []byte - timestamp time.Time -} - // unfinishedDownloadChunk contains a chunk for a download that is in progress. // // TODO: Currently, if a standby worker is needed, all of the standby workers diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 2ef8906799..3e28fc1ac2 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -363,10 +363,10 @@ func validateSiapath(siapath string) error { return ErrEmptyFilename } if siapath == ".." { - return errors.New("siapath cannot be .. ") + return errors.New("siapath cannot be '..'") } if siapath == "." { - return errors.New("siapath cannot be . ") + return errors.New("siapath cannot be '.'") } // check prefix if strings.HasPrefix(siapath, "/") { From 3466b6bcbb2952203d49673e825a124a4fb8c235 Mon Sep 17 00:00:00 2001 From: MSevey Date: Thu, 3 May 2018 12:28:52 -0400 Subject: [PATCH 170/212] Reinsert lastAccess into map within managedTryCache --- modules/renter/downloadcache.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index a1987c5705..917d85a55f 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -27,7 +27,8 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { var oldestKey string oldestTime := time.Now() - // TODO: turn this from a structure where you loop over every element (O(n) per access) to a min heap (O(log n) per access). + // TODO: turn this from a structure where you loop over every element + // (O(n) per access) to a min heap (O(log n) per access). // currently not a issue due to cache size remaining small (<20) for id, chunk := range udc.chunkCache { if chunk.lastAccess.Before(oldestTime) { @@ -56,6 +57,7 @@ func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { r.cmu.Lock() cd, cached := r.chunkCache[udc.staticCacheID] cd.lastAccess = time.Now() + r.chunkCache[udc.staticCacheID] = cd r.cmu.Unlock() if !cached { return false From d192e6e9b7fbbbedc8eec8946cc444de7d985930 Mon Sep 17 00:00:00 2001 From: MSevey Date: Thu, 3 May 2018 12:30:05 -0400 Subject: [PATCH 171/212] create unit test file for updates to addChunkToCache --- modules/renter/downloadcache_test.go | 37 ++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 modules/renter/downloadcache_test.go diff --git a/modules/renter/downloadcache_test.go b/modules/renter/downloadcache_test.go new file mode 100644 index 0000000000..fd734a376b --- /dev/null +++ b/modules/renter/downloadcache_test.go @@ -0,0 +1,37 @@ +package renter + +import ( + "testing" + "time" +) + +// Stream uses the streaming endpoint to download a file. +func TestAddChunkToCache(t *testing.T) { + var udc *unfinishedDownloadChunk + + data := []byte{1, 2, 3, 4} + // Fill Cache + for i := 0; i < downloadCacheSize; i++ { + udc.addChunkToCache(data) + time.Sleep(1 * time.Millisecond) + } + + // Get oldest key + var oldestKey string + oldestTime := time.Now() + + for id, chunk := range udc.chunkCache { + if chunk.lastAccess.Before(oldestTime) { + oldestTime = chunk.lastAccess + oldestKey = id + } + } + + udc.addChunkToCache(data) + + // check if the oldestKey was removed + if _, ok := udc.chunkCache[oldestKey]; ok { + t.Errorf("Expected ok to be false instead it was %v", ok) + } + +} From 1ee9da0b5728309091cc1c162aea25f77c91635f Mon Sep 17 00:00:00 2001 From: MSevey Date: Thu, 3 May 2018 13:52:45 -0400 Subject: [PATCH 172/212] TestAddChunkToCache is now passing --- modules/renter/downloadcache_test.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/renter/downloadcache_test.go b/modules/renter/downloadcache_test.go index fd734a376b..5385dfe3fc 100644 --- a/modules/renter/downloadcache_test.go +++ b/modules/renter/downloadcache_test.go @@ -5,11 +5,15 @@ import ( "time" ) -// Stream uses the streaming endpoint to download a file. +// TestAddChunkToCache tests that the oldest chunk is removed func TestAddChunkToCache(t *testing.T) { - var udc *unfinishedDownloadChunk + var udc unfinishedDownloadChunk + udc.download = &download{ + staticDestinationType: "httpseekstream", + } data := []byte{1, 2, 3, 4} + // Fill Cache for i := 0; i < downloadCacheSize; i++ { udc.addChunkToCache(data) @@ -27,9 +31,10 @@ func TestAddChunkToCache(t *testing.T) { } } + // Add additional chunk to force deletion of a chunk udc.addChunkToCache(data) - // check if the oldestKey was removed + // check if the chunk with the oldestKey was removed if _, ok := udc.chunkCache[oldestKey]; ok { t.Errorf("Expected ok to be false instead it was %v", ok) } From 0935c2dce8c34bef13c363988f28daa5ce37ce7d Mon Sep 17 00:00:00 2001 From: MSevey Date: Thu, 3 May 2018 15:03:23 -0400 Subject: [PATCH 173/212] set chunkCache field in udc to pointer --- modules/renter/downloadcache.go | 9 ++++++--- modules/renter/downloadchunk.go | 2 +- modules/renter/renter.go | 4 ++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/modules/renter/downloadcache.go b/modules/renter/downloadcache.go index 917d85a55f..46db707949 100644 --- a/modules/renter/downloadcache.go +++ b/modules/renter/downloadcache.go @@ -39,7 +39,7 @@ func (udc *unfinishedDownloadChunk) addChunkToCache(data []byte) { delete(udc.chunkCache, oldestKey) } - udc.chunkCache[udc.staticCacheID] = cacheData{ + udc.chunkCache[udc.staticCacheID] = &cacheData{ data: data, lastAccess: time.Now(), } @@ -56,12 +56,15 @@ func (r *Renter) managedTryCache(udc *unfinishedDownloadChunk) bool { defer udc.mu.Unlock() r.cmu.Lock() cd, cached := r.chunkCache[udc.staticCacheID] - cd.lastAccess = time.Now() - r.chunkCache[udc.staticCacheID] = cd r.cmu.Unlock() if !cached { return false } + + // chunk exists, updating lastAccess and reinserting into map + cd.lastAccess = time.Now() + r.chunkCache[udc.staticCacheID] = cd + start := udc.staticFetchOffset end := start + udc.staticFetchLength _, err := udc.destination.WriteAt(cd.data[start:end], udc.staticWriteOffset) diff --git a/modules/renter/downloadchunk.go b/modules/renter/downloadchunk.go index b6c1e8643e..68d240913f 100644 --- a/modules/renter/downloadchunk.go +++ b/modules/renter/downloadchunk.go @@ -82,7 +82,7 @@ type unfinishedDownloadChunk struct { mu sync.Mutex // Caching related fields - chunkCache map[string]cacheData + chunkCache map[string]*cacheData cacheMu *sync.Mutex } diff --git a/modules/renter/renter.go b/modules/renter/renter.go index 3e28fc1ac2..f953887336 100644 --- a/modules/renter/renter.go +++ b/modules/renter/renter.go @@ -189,7 +189,7 @@ type Renter struct { lastEstimation modules.RenterPriceEstimation // Utilities. - chunkCache map[string]cacheData + chunkCache map[string]*cacheData cmu *sync.Mutex cs modules.ConsensusSet deps modules.Dependencies @@ -426,7 +426,7 @@ func NewCustomRenter(g modules.Gateway, cs modules.ConsensusSet, tpool modules.T workerPool: make(map[types.FileContractID]*worker), - chunkCache: make(map[string]cacheData), + chunkCache: make(map[string]*cacheData), cmu: new(sync.Mutex), cs: cs, deps: deps, From c5cfd642028c543eeea58c7a630e5f1f1c40d1bb Mon Sep 17 00:00:00 2001 From: MSevey Date: Thu, 3 May 2018 15:31:19 -0400 Subject: [PATCH 174/212] update TestAddChunkToCache to fix failing test. Test-long now passing as well --- modules/renter/downloadcache_test.go | 29 +++++++++++++--------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/modules/renter/downloadcache_test.go b/modules/renter/downloadcache_test.go index 5385dfe3fc..ebb3153f4a 100644 --- a/modules/renter/downloadcache_test.go +++ b/modules/renter/downloadcache_test.go @@ -1,41 +1,38 @@ package renter import ( + "strconv" + "sync" "testing" "time" ) // TestAddChunkToCache tests that the oldest chunk is removed func TestAddChunkToCache(t *testing.T) { - var udc unfinishedDownloadChunk - udc.download = &download{ - staticDestinationType: "httpseekstream", + udc := &unfinishedDownloadChunk{ + download: &download{ + staticDestinationType: "httpseekstream", + }, + chunkCache: make(map[string]*cacheData), + cacheMu: new(sync.Mutex), } data := []byte{1, 2, 3, 4} // Fill Cache for i := 0; i < downloadCacheSize; i++ { + udc.staticCacheID = strconv.Itoa(i) udc.addChunkToCache(data) time.Sleep(1 * time.Millisecond) } - // Get oldest key - var oldestKey string - oldestTime := time.Now() - - for id, chunk := range udc.chunkCache { - if chunk.lastAccess.Before(oldestTime) { - oldestTime = chunk.lastAccess - oldestKey = id - } - } - // Add additional chunk to force deletion of a chunk + udc.staticCacheID = strconv.Itoa(downloadCacheSize) udc.addChunkToCache(data) - // check if the chunk with the oldestKey was removed - if _, ok := udc.chunkCache[oldestKey]; ok { + // check if the chunk with staticCacheID = "0" was removed + // as that would have been the first to be added + if _, ok := udc.chunkCache["0"]; ok { t.Errorf("Expected ok to be false instead it was %v", ok) } From d1694d9247774883ad0782cbf4e13c538b60ad5c Mon Sep 17 00:00:00 2001 From: MSevey Date: Fri, 4 May 2018 13:35:27 -0400 Subject: [PATCH 175/212] resolve PR edits from chris --- modules/renter/downloadcache_test.go | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/modules/renter/downloadcache_test.go b/modules/renter/downloadcache_test.go index ebb3153f4a..0b4320ad89 100644 --- a/modules/renter/downloadcache_test.go +++ b/modules/renter/downloadcache_test.go @@ -11,29 +11,26 @@ import ( func TestAddChunkToCache(t *testing.T) { udc := &unfinishedDownloadChunk{ download: &download{ - staticDestinationType: "httpseekstream", + staticDestinationType: destinationTypeSeekStream, }, chunkCache: make(map[string]*cacheData), cacheMu: new(sync.Mutex), } - data := []byte{1, 2, 3, 4} - // Fill Cache for i := 0; i < downloadCacheSize; i++ { udc.staticCacheID = strconv.Itoa(i) - udc.addChunkToCache(data) + udc.addChunkToCache([]byte{}) time.Sleep(1 * time.Millisecond) } // Add additional chunk to force deletion of a chunk udc.staticCacheID = strconv.Itoa(downloadCacheSize) - udc.addChunkToCache(data) + udc.addChunkToCache([]byte{}) // check if the chunk with staticCacheID = "0" was removed // as that would have been the first to be added if _, ok := udc.chunkCache["0"]; ok { - t.Errorf("Expected ok to be false instead it was %v", ok) + t.Error("The least recently accessed chunk wasn't pruned from the cache") } - } From cb58cbf86048212db77845e6d18c1118d7971b70 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sat, 5 May 2018 22:40:25 -0400 Subject: [PATCH 176/212] Remove siapath Prefix in client library before using it --- node/api/client/renter.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 9d0c84b61a..3964ffb27f 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -4,6 +4,7 @@ import ( "fmt" "net/url" "strconv" + "strings" "github.com/NebulousLabs/Sia/modules" "github.com/NebulousLabs/Sia/node/api" @@ -17,6 +18,7 @@ func (c *Client) RenterContractsGet() (rc api.RenterContracts, err error) { // RenterDeletePost uses the /renter/delete endpoint to delete a file. func (c *Client) RenterDeletePost(siaPath string) (err error) { + siaPath = strings.TrimPrefix(siaPath, "/") err = c.post(fmt.Sprintf("/renter/delete/%s", siaPath), "", nil) return err } @@ -24,6 +26,7 @@ func (c *Client) RenterDeletePost(siaPath string) (err error) { // RenterDownloadGet uses the /renter/download endpoint to download a file to a // destination on disk. func (c *Client) RenterDownloadGet(siaPath, destination string, offset, length uint64, async bool) (err error) { + siaPath = strings.TrimPrefix(siaPath, "/") query := fmt.Sprintf("%s?destination=%s&offset=%d&length=%d&httpresp=false&async=%v", siaPath, destination, offset, length, async) err = c.get("/renter/download/"+query, nil) @@ -33,6 +36,7 @@ func (c *Client) RenterDownloadGet(siaPath, destination string, offset, length u // RenterDownloadFullGet uses the /renter/download endpoint to download a full // file. func (c *Client) RenterDownloadFullGet(siaPath, destination string, async bool) (err error) { + siaPath = strings.TrimPrefix(siaPath, "/") query := fmt.Sprintf("%s?destination=%s&httpresp=false&async=%v", siaPath, destination, async) err = c.get("/renter/download/"+query, nil) @@ -48,6 +52,7 @@ func (c *Client) RenterDownloadsGet() (rdq api.RenterDownloadQueue, err error) { // RenterDownloadHTTPResponseGet uses the /renter/download endpoint to download // a file and return its data. func (c *Client) RenterDownloadHTTPResponseGet(siaPath string, offset, length uint64) (resp []byte, err error) { + siaPath = strings.TrimPrefix(siaPath, "/") query := fmt.Sprintf("%s?offset=%d&length=%d&httpresp=true", siaPath, offset, length) resp, err = c.getRawResponse("/renter/download/" + query) return @@ -100,13 +105,16 @@ func (c *Client) RenterPostRateLimit(readBPS, writeBPS int64) (err error) { // RenterRenamePost uses the /renter/rename/:siapath endpoint to rename a file. func (c *Client) RenterRenamePost(siaPathOld, siaPathNew string) (err error) { - err = c.post("/renter/rename/"+siaPathOld, "newsiapath="+siaPathNew, nil) + siaPathOld = strings.TrimPrefix(siaPathOld, "/") + siaPathNew = strings.TrimPrefix(siaPathNew, "/") + err = c.post("/renter/rename/"+siaPathOld, "newsiapath=/"+siaPathNew, nil) return } // RenterStreamGet uses the /renter/stream endpoint to download data as a // stream. func (c *Client) RenterStreamGet(siaPath string) (resp []byte, err error) { + siaPath = strings.TrimPrefix(siaPath, "/") resp, err = c.getRawResponse("/renter/stream/" + siaPath) return } @@ -114,25 +122,28 @@ func (c *Client) RenterStreamGet(siaPath string) (resp []byte, err error) { // RenterStreamPartialGet uses the /renter/stream endpoint to download a part // of data as a stream. func (c *Client) RenterStreamPartialGet(siaPath string, start, end uint64) (resp []byte, err error) { + siaPath = strings.TrimPrefix(siaPath, "/") resp, err = c.getRawPartialResponse("/renter/stream/"+siaPath, start, end) return } // RenterUploadPost uses the /renter/upload endpoint to upload a file func (c *Client) RenterUploadPost(path, siaPath string, dataPieces, parityPieces uint64) (err error) { + siaPath = strings.TrimPrefix(siaPath, "/") values := url.Values{} values.Set("source", path) values.Set("datapieces", strconv.FormatUint(dataPieces, 10)) values.Set("paritypieces", strconv.FormatUint(parityPieces, 10)) - err = c.post(fmt.Sprintf("/renter/upload%v", siaPath), values.Encode(), nil) + err = c.post(fmt.Sprintf("/renter/upload/%v", siaPath), values.Encode(), nil) return } // RenterUploadDefaultPost uses the /renter/upload endpoint with default // redundancy settings to upload a file. func (c *Client) RenterUploadDefaultPost(path, siaPath string) (err error) { + siaPath = strings.TrimPrefix(siaPath, "/") values := url.Values{} values.Set("source", path) - err = c.post(fmt.Sprintf("/renter/upload%v", siaPath), values.Encode(), nil) + err = c.post(fmt.Sprintf("/renter/upload/%v", siaPath), values.Encode(), nil) return } From b8438ee191591522d60bc33f4906674e90eb1643 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 6 May 2018 13:43:32 -0400 Subject: [PATCH 177/212] Fix violation of locking conventions in FileList call --- modules/renter/files.go | 34 ++++++++++++++++++++++------------ modules/renter/files_test.go | 33 ++++++++++++++++----------------- 2 files changed, 38 insertions(+), 29 deletions(-) diff --git a/modules/renter/files.go b/modules/renter/files.go index c2d53bdd23..6f264cc0b6 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -89,10 +89,10 @@ func (f *file) numChunks() uint64 { } // available indicates whether the file is ready to be downloaded. -func (f *file) available(contractStatus func(types.FileContractID) (offline bool, goodForRenew bool)) bool { +func (f *file) available(offline map[types.FileContractID]bool) bool { chunkPieces := make([]int, f.numChunks()) for _, fc := range f.contracts { - if offline, _ := contractStatus(fc.ID); offline { + if offline[fc.ID] { continue } for _, p := range fc.Pieces { @@ -135,7 +135,7 @@ func (f *file) uploadProgress() float64 { // becomes available when this redundancy is >= 1. Assumes that every piece is // unique within a file contract. -1 is returned if the file has size 0. It // takes one argument, a map of offline contracts for this file. -func (f *file) redundancy(contractStatus func(types.FileContractID) (bool, bool)) float64 { +func (f *file) redundancy(offlineMap map[types.FileContractID]bool, goodForRenewMap map[types.FileContractID]bool) float64 { if f.size == 0 { return -1 } @@ -149,7 +149,8 @@ func (f *file) redundancy(contractStatus func(types.FileContractID) (bool, bool) return -1 } for _, fc := range f.contracts { - offline, goodForRenew := contractStatus(fc.ID) + offline := offlineMap[fc.ID] + goodForRenew := goodForRenewMap[fc.ID] // do not count pieces from the contract if the contract is offline if offline { @@ -257,21 +258,30 @@ func (r *Renter) DeleteFile(nickname string) error { // FileList returns all of the files that the renter has. func (r *Renter) FileList() []modules.FileInfo { + // Get all the files and their contracts var files []*file + contractIDs := make(map[types.FileContractID]struct{}) lockID := r.mu.RLock() for _, f := range r.files { files = append(files, f) + for cid := range f.contracts { + contractIDs[cid] = struct{}{} + } } r.mu.RUnlock(lockID) - contractStatus := func(id types.FileContractID) (offline bool, goodForRenew bool) { - id = r.hostContractor.ResolveID(id) - cu, ok := r.hostContractor.ContractUtility(id) - offline = r.hostContractor.IsOffline(id) - goodForRenew = ok && cu.GoodForRenew - return + // Build 2 maps that map every contract id to its offline and goodForRenew + // status. + goodForRenew := make(map[types.FileContractID]bool) + offline := make(map[types.FileContractID]bool) + for cid := range contractIDs { + resolvedID := r.hostContractor.ResolveID(cid) + cu, ok := r.hostContractor.ContractUtility(resolvedID) + goodForRenew[cid] = ok && cu.GoodForRenew + offline[cid] = r.hostContractor.IsOffline(resolvedID) } + // Build the list of FileInfos. var fileList []modules.FileInfo for _, f := range files { lockID := r.mu.RLock() @@ -287,8 +297,8 @@ func (r *Renter) FileList() []modules.FileInfo { LocalPath: localPath, Filesize: f.size, Renewing: renewing, - Available: f.available(contractStatus), - Redundancy: f.redundancy(contractStatus), + Available: f.available(offline), + Redundancy: f.redundancy(offline, goodForRenew), UploadedBytes: f.uploadedBytes(), UploadProgress: f.uploadProgress(), Expiration: f.expiration(), diff --git a/modules/renter/files_test.go b/modules/renter/files_test.go index cb0b5bc3e6..80f50f535d 100644 --- a/modules/renter/files_test.go +++ b/modules/renter/files_test.go @@ -44,9 +44,7 @@ func TestFileAvailable(t *testing.T) { erasureCode: rsc, pieceSize: 100, } - neverOffline := func(types.FileContractID) (bool, bool) { - return false, true - } + neverOffline := make(map[types.FileContractID]bool) if f.available(neverOffline) { t.Error("file should not be available") @@ -62,9 +60,8 @@ func TestFileAvailable(t *testing.T) { t.Error("file should be available") } - specificOffline := func(fcid types.FileContractID) (bool, bool) { - return fcid == fc.ID, true - } + specificOffline := make(map[types.FileContractID]bool) + specificOffline[fc.ID] = true if f.available(specificOffline) { t.Error("file should not be available") } @@ -109,9 +106,12 @@ func TestFileUploadProgressPinning(t *testing.T) { // with varying number of filecontracts and erasure code settings. func TestFileRedundancy(t *testing.T) { nDatas := []int{1, 2, 10} - neverOffline := func(types.FileContractID) (bool, bool) { - return false, true + neverOffline := make(map[types.FileContractID]bool) + goodForRenew := make(map[types.FileContractID]bool) + for i := 0; i < 5; i++ { + goodForRenew[types.FileContractID{byte(i)}] = true } + for _, nData := range nDatas { rsc, _ := NewRSCode(nData, 10) f := &file{ @@ -121,7 +121,7 @@ func TestFileRedundancy(t *testing.T) { erasureCode: rsc, } // Test that an empty file has 0 redundancy. - if r := f.redundancy(neverOffline); r != 0 { + if r := f.redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that a file with 1 filecontract that has a piece for every chunk but @@ -137,7 +137,7 @@ func TestFileRedundancy(t *testing.T) { fc.Pieces = append(fc.Pieces, pd) } f.contracts[fc.ID] = fc - if r := f.redundancy(neverOffline); r != 0 { + if r := f.redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that adding another filecontract with a piece for every chunk but one @@ -153,7 +153,7 @@ func TestFileRedundancy(t *testing.T) { fc.Pieces = append(fc.Pieces, pd) } f.contracts[fc.ID] = fc - if r := f.redundancy(neverOffline); r != 0 { + if r := f.redundancy(neverOffline, goodForRenew); r != 0 { t.Error("expected 0 redundancy, got", r) } // Test that adding a file contract with a piece for the missing chunk @@ -169,7 +169,7 @@ func TestFileRedundancy(t *testing.T) { f.contracts[fc.ID] = fc // 1.0 / MinPieces because the chunk with the least number of pieces has 1 piece. expectedR := 1.0 / float64(f.erasureCode.MinPieces()) - if r := f.redundancy(neverOffline); r != expectedR { + if r := f.redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } // Test that adding a file contract that has erasureCode.MinPieces() pieces @@ -188,7 +188,7 @@ func TestFileRedundancy(t *testing.T) { f.contracts[fc.ID] = fc // 1+MinPieces / MinPieces because the chunk with the least number of pieces has 1+MinPieces pieces. expectedR = float64(1+f.erasureCode.MinPieces()) / float64(f.erasureCode.MinPieces()) - if r := f.redundancy(neverOffline); r != expectedR { + if r := f.redundancy(neverOffline, goodForRenew); r != expectedR { t.Errorf("expected %f redundancy, got %f", expectedR, r) } @@ -205,10 +205,9 @@ func TestFileRedundancy(t *testing.T) { } } f.contracts[fc.ID] = fc - specificOffline := func(fcid types.FileContractID) (bool, bool) { - return fcid == fc.ID, true - } - if r := f.redundancy(specificOffline); r != expectedR { + specificOffline := make(map[types.FileContractID]bool) + specificOffline[fc.ID] = true + if r := f.redundancy(specificOffline, goodForRenew); r != expectedR { t.Errorf("expected redundancy to ignore offline file contracts, wanted %f got %f", expectedR, r) } } From 40a6dc9ba3e884465a59a78a0dccb6977524c2af Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 6 May 2018 15:00:23 -0400 Subject: [PATCH 178/212] fix more convention violations --- modules/renter/contractor/allowance.go | 8 +- modules/renter/contractor/contractor.go | 34 +-- modules/renter/contractor/contractor_test.go | 8 +- modules/renter/contractor/contracts.go | 283 +++++++++--------- modules/renter/contractor/downloader.go | 4 +- modules/renter/contractor/editor.go | 4 +- .../contractor/host_integration_test.go | 8 +- modules/renter/contractor/update.go | 6 +- modules/renter/contractor/uptime.go | 2 +- modules/renter/contractor/uptime_test.go | 2 +- 10 files changed, 178 insertions(+), 181 deletions(-) diff --git a/modules/renter/contractor/allowance.go b/modules/renter/contractor/allowance.go index 4b1ed00a00..eba21a3100 100644 --- a/modules/renter/contractor/allowance.go +++ b/modules/renter/contractor/allowance.go @@ -86,8 +86,8 @@ func (c *Contractor) managedCancelAllowance() error { c.log.Println("INFO: canceling allowance") // first need to invalidate any active editors/downloaders // NOTE: this code is the same as in managedRenewContracts + ids := c.staticContracts.IDs() c.mu.Lock() - ids := c.contracts.IDs() for _, id := range ids { // we aren't renewing, but we don't want new editors or downloaders to // be created @@ -128,13 +128,13 @@ func (c *Contractor) managedCancelAllowance() error { c.managedInterruptContractMaintenance() // Cycle through all contracts and delete them. - ids = c.contracts.IDs() + ids = c.staticContracts.IDs() for _, id := range ids { - contract, exists := c.contracts.Acquire(id) + contract, exists := c.staticContracts.Acquire(id) if !exists { continue } - c.contracts.Delete(contract) + c.staticContracts.Delete(contract) } return nil } diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 743a3acd23..1937e9fe6a 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -60,9 +60,9 @@ type Contractor struct { renewing map[types.FileContractID]bool // prevent revising during renewal revising map[types.FileContractID]bool // prevent overlapping revisions - contracts *proto.ContractSet - oldContracts map[types.FileContractID]modules.RenterContract - renewedIDs map[types.FileContractID]types.FileContractID + staticContracts *proto.ContractSet + oldContracts map[types.FileContractID]modules.RenterContract + renewedIDs map[types.FileContractID]types.FileContractID } // readlockResolveID returns the ID of the most recent renewal of id. @@ -89,7 +89,7 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { defer c.mu.RUnlock() var spending modules.ContractorSpending - for _, contract := range c.contracts.ViewAll() { + for _, contract := range c.staticContracts.ViewAll() { // Calculate ContractFees spending.ContractFees = spending.ContractFees.Add(contract.ContractFee) spending.ContractFees = spending.ContractFees.Add(contract.TxnFee) @@ -126,7 +126,7 @@ func (c *Contractor) PeriodSpending() modules.ContractorSpending { func (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContract, bool) { c.mu.RLock() defer c.mu.RUnlock() - return c.contracts.View(c.readlockResolveID(id)) + return c.staticContracts.View(c.readlockResolveID(id)) } // Contracts returns the contracts formed by the contractor in the current @@ -135,14 +135,12 @@ func (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContra func (c *Contractor) Contracts() []modules.RenterContract { c.mu.RLock() defer c.mu.RUnlock() - return c.contracts.ViewAll() + return c.staticContracts.ViewAll() } // ContractUtility returns the utility fields for the given contract. func (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - c.mu.RLock() - defer c.mu.RUnlock() - return c.readlockContractUtility(id) + return c.managedContractUtility(id) } // CurrentPeriod returns the height at which the current allowance period @@ -164,7 +162,7 @@ func (c *Contractor) ResolveID(id types.FileContractID) types.FileContractID { // SetRateLimits sets the bandwidth limits for connections created by the // contractSet. func (c *Contractor) SetRateLimits(readBPS, writeBPS int64, packetSize uint64) { - c.contracts.SetRateLimits(readBPS, writeBPS, packetSize) + c.staticContracts.SetRateLimits(readBPS, writeBPS, packetSize) } // Close closes the Contractor. @@ -225,18 +223,18 @@ func NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb host interruptMaintenance: make(chan struct{}), - contracts: contractSet, - downloaders: make(map[types.FileContractID]*hostDownloader), - editors: make(map[types.FileContractID]*hostEditor), - oldContracts: make(map[types.FileContractID]modules.RenterContract), - renewedIDs: make(map[types.FileContractID]types.FileContractID), - renewing: make(map[types.FileContractID]bool), - revising: make(map[types.FileContractID]bool), + staticContracts: contractSet, + downloaders: make(map[types.FileContractID]*hostDownloader), + editors: make(map[types.FileContractID]*hostEditor), + oldContracts: make(map[types.FileContractID]modules.RenterContract), + renewedIDs: make(map[types.FileContractID]types.FileContractID), + renewing: make(map[types.FileContractID]bool), + revising: make(map[types.FileContractID]bool), } // Close the contract set and logger upon shutdown. c.tg.AfterStop(func() { - if err := c.contracts.Close(); err != nil { + if err := c.staticContracts.Close(); err != nil { c.log.Println("Failed to close contract set:", err) } if err := c.log.Close(); err != nil { diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index a0b10b2ae8..7d645cf9d2 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -459,7 +459,7 @@ func TestIntegrationSetAllowance(t *testing.T) { t.Fatal(err) } c.mu.Lock() - clen := c.contracts.Len() + clen := c.staticContracts.Len() c.mu.Unlock() if clen != 1 { t.Fatal("expected 1 contract, got", clen) @@ -505,9 +505,9 @@ func TestIntegrationSetAllowance(t *testing.T) { // delete one of the contracts and set allowance with Funds*2; should // trigger 1 renewal and 1 new contract c.mu.Lock() - ids := c.contracts.IDs() - contract, _ := c.contracts.Acquire(ids[0]) - c.contracts.Delete(contract) + ids := c.staticContracts.IDs() + contract, _ := c.staticContracts.Acquire(ids[0]) + c.staticContracts.Delete(contract) c.mu.Unlock() a.Funds = a.Funds.Mul64(2) err = c.SetAllowance(a) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 21376290d5..9490a66450 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -28,9 +28,12 @@ func (c *Contractor) contractEndHeight() types.BlockHeight { return c.currentPeriod + c.allowance.Period } -// readlockContractUtility returns the ContractUtility for a contract with a given id. -func (c *Contractor) readlockContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { - rc, exists := c.contracts.View(c.readlockResolveID(id)) +// managedContractUtility returns the ContractUtility for a contract with a given id. +func (c *Contractor) managedContractUtility(id types.FileContractID) (modules.ContractUtility, bool) { + c.mu.RLock() + id = c.readlockResolveID(id) + c.mu.RUnlock() + rc, exists := c.staticContracts.View(id) if !exists { return modules.ContractUtility{}, false } @@ -94,7 +97,7 @@ func (c *Contractor) managedMarkContractsUtility() error { } // Update utility fields for each contract. - for _, contract := range c.contracts.ViewAll() { + for _, contract := range c.staticContracts.ViewAll() { utility := func() (u modules.ContractUtility) { // Start the contract in good standing. u.GoodForUpload = true @@ -184,7 +187,7 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin // create transaction builder txnBuilder := c.wallet.StartTransaction() - contract, err := c.contracts.FormContract(params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) + contract, err := c.staticContracts.FormContract(params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if err != nil { txnBuilder.Drop() return modules.RenterContract{}, err @@ -202,9 +205,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // For convenience contract := sc.Metadata() // Sanity check - should not be renewing a bad contract. - c.mu.RLock() - utility, ok := c.readlockContractUtility(contract.ID) - c.mu.RUnlock() + utility, ok := c.managedContractUtility(contract.ID) if !ok || !utility.GoodForRenew { c.log.Critical(fmt.Sprintf("Renewing a contract that has been marked as !GoodForRenew %v/%v", ok, utility.GoodForRenew)) @@ -241,7 +242,7 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. // execute negotiation protocol txnBuilder := c.wallet.StartTransaction() - newContract, err := c.contracts.Renew(sc, params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) + newContract, err := c.staticContracts.Renew(sc, params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if err != nil { txnBuilder.Drop() // return unused outputs to wallet return modules.RenterContract{}, err @@ -310,139 +311,139 @@ func (c *Contractor) threadedContractMaintenance() { var fundsAvailable types.Currency var renewSet []renewal refreshSet := make(map[types.FileContractID]struct{}) - func() { - c.mu.RLock() - defer c.mu.RUnlock() - - // Grab the end height that should be used for the contracts. - endHeight = c.currentPeriod + c.allowance.Period - - // Determine how many funds have been used already in this billing - // cycle, and how many funds are remaining. We have to calculate these - // numbers separately to avoid underflow, and then re-join them later to - // get the full picture for how many funds are available. - var fundsUsed types.Currency - for _, contract := range c.contracts.ViewAll() { - // Calculate the cost of the contract line. - contractLineCost := contract.TotalCost - // TODO: add previous contracts here - // Check if the contract is expiring. The funds in the contract are - // handled differently based on this information. - if c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight { - // The contract is expiring. Some of the funds are locked down - // to renew the contract, and then the remaining funds can be - // allocated to 'availableFunds'. - fundsUsed = fundsUsed.Add(contractLineCost).Sub(contract.RenterFunds) - fundsAvailable = fundsAvailable.Add(contract.RenterFunds) - } else { - // The contract is not expiring. None of the funds in the - // contract are available to renew or form contracts. - fundsUsed = fundsUsed.Add(contractLineCost) - } + c.mu.RLock() + currentPeriod := c.currentPeriod + allowance := c.allowance + blockHeight := c.blockHeight + c.mu.RUnlock() + + // Grab the end height that should be used for the contracts. + endHeight = currentPeriod + allowance.Period + + // Determine how many funds have been used already in this billing + // cycle, and how many funds are remaining. We have to calculate these + // numbers separately to avoid underflow, and then re-join them later to + // get the full picture for how many funds are available. + var fundsUsed types.Currency + for _, contract := range c.staticContracts.ViewAll() { + // Calculate the cost of the contract line. + contractLineCost := contract.TotalCost + // TODO: add previous contracts here + + // Check if the contract is expiring. The funds in the contract are + // handled differently based on this information. + if c.blockHeight+allowance.RenewWindow >= contract.EndHeight { + // The contract is expiring. Some of the funds are locked down + // to renew the contract, and then the remaining funds can be + // allocated to 'availableFunds'. + fundsUsed = fundsUsed.Add(contractLineCost).Sub(contract.RenterFunds) + fundsAvailable = fundsAvailable.Add(contract.RenterFunds) + } else { + // The contract is not expiring. None of the funds in the + // contract are available to renew or form contracts. + fundsUsed = fundsUsed.Add(contractLineCost) } + } - // Add any unspent funds from the allowance to the available funds. If - // the allowance has been decreased, it's possible that we actually need - // to reduce the number of funds available to compensate. - if fundsAvailable.Add(c.allowance.Funds).Cmp(fundsUsed) > 0 { - fundsAvailable = fundsAvailable.Add(c.allowance.Funds).Sub(fundsUsed) + // Add any unspent funds from the allowance to the available funds. If + // the allowance has been decreased, it's possible that we actually need + // to reduce the number of funds available to compensate. + if fundsAvailable.Add(allowance.Funds).Cmp(fundsUsed) > 0 { + fundsAvailable = fundsAvailable.Add(allowance.Funds).Sub(fundsUsed) + } else { + // Figure out how much we need to remove from fundsAvailable to + // clear the allowance. + overspend := fundsUsed.Sub(allowance.Funds).Sub(fundsAvailable) + if fundsAvailable.Cmp(overspend) > 0 { + // We still have some funds available. + fundsAvailable = fundsAvailable.Sub(overspend) } else { - // Figure out how much we need to remove from fundsAvailable to - // clear the allowance. - overspend := fundsUsed.Sub(c.allowance.Funds).Sub(fundsAvailable) - if fundsAvailable.Cmp(overspend) > 0 { - // We still have some funds available. - fundsAvailable = fundsAvailable.Sub(overspend) + // The overspend exceeds the available funds, set available + // funds to zero. + fundsAvailable = types.ZeroCurrency + } + } + + // Iterate through the contracts again, figuring out which contracts to + // renew and how much extra funds to renew them with. + for _, contract := range c.staticContracts.ViewAll() { + utility, ok := c.managedContractUtility(contract.ID) + if !ok || !utility.GoodForRenew { + continue + } + if blockHeight+allowance.RenewWindow >= contract.EndHeight { + // This contract needs to be renewed because it is going to + // expire soon. First step is to calculate how much money should + // be used in the renewal, based on how much of the contract + // funds (including previous contracts this billing cycle due to + // financial resets) were spent throughout this billing cycle. + // + // The amount we care about is the total amount that was spent + // on uploading, downloading, and storage throughout the billing + // cycle. This is calculated by starting with the total cost and + // subtracting out all of the fees, and then all of the unused + // money that was allocated (the RenterFunds). + renewAmount := contract.TotalCost.Sub(contract.ContractFee).Sub(contract.TxnFee).Sub(contract.SiafundFee).Sub(contract.RenterFunds) + // TODO: add previous contracts here + + // Get an estimate for how much the fees will cost. + // + // TODO: Look up this host in the hostdb to figure out what the + // actual fees will be. + estimatedFees := contract.ContractFee.Add(contract.TxnFee).Add(contract.SiafundFee) + renewAmount = renewAmount.Add(estimatedFees) + + // Determine if there is enough funds available to suppliement + // with a 33% bonus, and if there is, add a 33% bonus. + moneyBuffer := renewAmount.Div64(3) + if moneyBuffer.Cmp(fundsAvailable) < 0 { + renewAmount = renewAmount.Add(moneyBuffer) + fundsAvailable = fundsAvailable.Sub(moneyBuffer) } else { - // The overspend exceeds the available funds, set available - // funds to zero. - fundsAvailable = types.ZeroCurrency + c.log.Println("WARN: performing a limited renew due to low allowance") } - } - // Iterate through the contracts again, figuring out which contracts to - // renew and how much extra funds to renew them with. - for _, contract := range c.contracts.ViewAll() { - utility, ok := c.readlockContractUtility(contract.ID) - if !ok || !utility.GoodForRenew { + // The contract needs to be renewed because it is going to + // expire soon, and we need to refresh the time. + renewSet = append(renewSet, renewal{ + id: contract.ID, + amount: renewAmount, + }) + } else { + // Check if the contract has exhausted its funding and requires + // premature renewal. + host, _ := c.hdb.Host(contract.HostPublicKey) + + // Skip this host if its prices are too high. + // managedMarkContractsUtility should make this redundant, but + // this is here for extra safety. + if host.StoragePrice.Cmp(maxStoragePrice) > 0 || host.UploadBandwidthPrice.Cmp(maxUploadPrice) > 0 { continue } - if c.blockHeight+c.allowance.RenewWindow >= contract.EndHeight { - // This contract needs to be renewed because it is going to - // expire soon. First step is to calculate how much money should - // be used in the renewal, based on how much of the contract - // funds (including previous contracts this billing cycle due to - // financial resets) were spent throughout this billing cycle. - // - // The amount we care about is the total amount that was spent - // on uploading, downloading, and storage throughout the billing - // cycle. This is calculated by starting with the total cost and - // subtracting out all of the fees, and then all of the unused - // money that was allocated (the RenterFunds). - renewAmount := contract.TotalCost.Sub(contract.ContractFee).Sub(contract.TxnFee).Sub(contract.SiafundFee).Sub(contract.RenterFunds) - // TODO: add previous contracts here - - // Get an estimate for how much the fees will cost. - // - // TODO: Look up this host in the hostdb to figure out what the - // actual fees will be. - estimatedFees := contract.ContractFee.Add(contract.TxnFee).Add(contract.SiafundFee) - renewAmount = renewAmount.Add(estimatedFees) - - // Determine if there is enough funds available to suppliement - // with a 33% bonus, and if there is, add a 33% bonus. - moneyBuffer := renewAmount.Div64(3) - if moneyBuffer.Cmp(fundsAvailable) < 0 { - renewAmount = renewAmount.Add(moneyBuffer) - fundsAvailable = fundsAvailable.Sub(moneyBuffer) - } else { - c.log.Println("WARN: performing a limited renew due to low allowance") - } - - // The contract needs to be renewed because it is going to - // expire soon, and we need to refresh the time. - renewSet = append(renewSet, renewal{ - id: contract.ID, - amount: renewAmount, - }) - } else { - // Check if the contract has exhausted its funding and requires - // premature renewal. - c.mu.RUnlock() - host, _ := c.hdb.Host(contract.HostPublicKey) - c.mu.RLock() - - // Skip this host if its prices are too high. - // managedMarkContractsUtility should make this redundant, but - // this is here for extra safety. - if host.StoragePrice.Cmp(maxStoragePrice) > 0 || host.UploadBandwidthPrice.Cmp(maxUploadPrice) > 0 { - continue - } - blockBytes := types.NewCurrency64(modules.SectorSize * uint64(contract.EndHeight-c.blockHeight)) - sectorStoragePrice := host.StoragePrice.Mul(blockBytes) - sectorBandwidthPrice := host.UploadBandwidthPrice.Mul64(modules.SectorSize) - sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) - percentRemaining, _ := big.NewRat(0, 1).SetFrac(contract.RenterFunds.Big(), contract.TotalCost.Big()).Float64() - if contract.RenterFunds.Cmp(sectorPrice.Mul64(3)) < 0 || percentRemaining < minContractFundRenewalThreshold { - // This contract does need to be refreshed. Make sure there - // are enough funds available to perform the refresh, and - // then execute. - refreshAmount := contract.TotalCost.Mul64(2) - if refreshAmount.Cmp(fundsAvailable) < 0 { - refreshSet[contract.ID] = struct{}{} - renewSet = append(renewSet, renewal{ - id: contract.ID, - amount: refreshAmount, - }) - } else { - c.log.Println("WARN: cannot refresh empty contract due to low allowance.") - } + blockBytes := types.NewCurrency64(modules.SectorSize * uint64(contract.EndHeight-c.blockHeight)) + sectorStoragePrice := host.StoragePrice.Mul(blockBytes) + sectorBandwidthPrice := host.UploadBandwidthPrice.Mul64(modules.SectorSize) + sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) + percentRemaining, _ := big.NewRat(0, 1).SetFrac(contract.RenterFunds.Big(), contract.TotalCost.Big()).Float64() + if contract.RenterFunds.Cmp(sectorPrice.Mul64(3)) < 0 || percentRemaining < minContractFundRenewalThreshold { + // This contract does need to be refreshed. Make sure there + // are enough funds available to perform the refresh, and + // then execute. + refreshAmount := contract.TotalCost.Mul64(2) + if refreshAmount.Cmp(fundsAvailable) < 0 { + refreshSet[contract.ID] = struct{}{} + renewSet = append(renewSet, renewal{ + id: contract.ID, + amount: refreshAmount, + }) + } else { + c.log.Println("WARN: cannot refresh empty contract due to low allowance.") } } } - }() + } if len(renewSet) != 0 { c.log.Printf("renewing %v contracts", len(renewSet)) } @@ -480,18 +481,16 @@ func (c *Contractor) threadedContractMaintenance() { } // Fetch the contract that we are renewing. - oldContract, exists := c.contracts.Acquire(id) + oldContract, exists := c.staticContracts.Acquire(id) if !exists { return } // Return the contract if it's not useful for renewing. - c.mu.RLock() - oldUtility, ok := c.readlockContractUtility(id) - c.mu.RUnlock() + oldUtility, ok := c.managedContractUtility(id) if !ok || !oldUtility.GoodForRenew { c.log.Printf("Contract %v slated for renew is marked not good for renew %v/%v", id, ok, oldUtility.GoodForRenew) - c.contracts.Return(oldContract) + c.staticContracts.Return(oldContract) return } // Perform the actual renew. If the renew fails, return the @@ -499,7 +498,7 @@ func (c *Contractor) threadedContractMaintenance() { newContract, err := c.managedRenew(oldContract, amount, endHeight) if err != nil { c.log.Printf("WARN: failed to renew contract %v: %v\n", id, err) - c.contracts.Return(oldContract) + c.staticContracts.Return(oldContract) return } c.log.Printf("Renewed contract %v\n", id) @@ -534,7 +533,7 @@ func (c *Contractor) threadedContractMaintenance() { c.mu.Lock() defer c.mu.Unlock() // Delete the old contract. - c.contracts.Delete(oldContract) + c.staticContracts.Delete(oldContract) // Store the contract in the record of historic contracts. c.oldContracts[id] = oldContract.Metadata() // Add a mapping from the old contract to the new contract. @@ -568,13 +567,13 @@ func (c *Contractor) threadedContractMaintenance() { // Count the number of contracts which are good for uploading, and then make // more as needed to fill the gap. - c.mu.RLock() uploadContracts := 0 - for _, id := range c.contracts.IDs() { - if cu, ok := c.readlockContractUtility(id); ok && cu.GoodForUpload { + for _, id := range c.staticContracts.IDs() { + if cu, ok := c.managedContractUtility(id); ok && cu.GoodForUpload { uploadContracts++ } } + c.mu.RLock() neededContracts := int(c.allowance.Hosts) - uploadContracts c.mu.RUnlock() if neededContracts <= 0 { @@ -586,7 +585,7 @@ func (c *Contractor) threadedContractMaintenance() { // formation with. c.mu.RLock() var exclude []types.SiaPublicKey - for _, contract := range c.contracts.ViewAll() { + for _, contract := range c.staticContracts.ViewAll() { exclude = append(exclude, contract.HostPublicKey) } initialContractFunds := c.allowance.Funds.Div64(c.allowance.Hosts).Div64(3) @@ -649,10 +648,10 @@ func (c *Contractor) threadedContractMaintenance() { // updateContractUtility is a helper function that acquires a contract, updates // its ContractUtility and returns the contract again. func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { - safeContract, ok := c.contracts.Acquire(id) + safeContract, ok := c.staticContracts.Acquire(id) if !ok { return errors.New("failed to acquire contract for update") } - defer c.contracts.Return(safeContract) + defer c.staticContracts.Return(safeContract) return safeContract.UpdateUtility(utility) } diff --git a/modules/renter/contractor/downloader.go b/modules/renter/contractor/downloader.go index d7642d20fe..af048182d0 100644 --- a/modules/renter/contractor/downloader.go +++ b/modules/renter/contractor/downloader.go @@ -121,7 +121,7 @@ func (c *Contractor) Downloader(id types.FileContractID, cancel <-chan struct{}) } // Fetch the contract and host. - contract, haveContract := c.contracts.View(id) + contract, haveContract := c.staticContracts.View(id) if !haveContract { return nil, errors.New("no record of that contract") } @@ -157,7 +157,7 @@ func (c *Contractor) Downloader(id types.FileContractID, cancel <-chan struct{}) }() // create downloader - d, err := c.contracts.NewDownloader(host, contract.ID, c.hdb, cancel) + d, err := c.staticContracts.NewDownloader(host, contract.ID, c.hdb, cancel) if err != nil { return nil, err } diff --git a/modules/renter/contractor/editor.go b/modules/renter/contractor/editor.go index d3fe1686ca..b2d911f8e4 100644 --- a/modules/renter/contractor/editor.go +++ b/modules/renter/contractor/editor.go @@ -137,7 +137,7 @@ func (c *Contractor) Editor(id types.FileContractID, cancel <-chan struct{}) (_ // Check that the contract and host are both available, and run some brief // sanity checks to see that the host is not swindling us. - contract, haveContract := c.contracts.View(id) + contract, haveContract := c.staticContracts.View(id) if !haveContract { return nil, errors.New("no record of that contract") } @@ -171,7 +171,7 @@ func (c *Contractor) Editor(id types.FileContractID, cancel <-chan struct{}) (_ }() // Create the editor. - e, err := c.contracts.NewEditor(host, contract.ID, height, c.hdb, cancel) + e, err := c.staticContracts.NewEditor(host, contract.ID, height, c.hdb, cancel) if err != nil { return nil, err } diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index db6928146f..e73ec5d4d1 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -354,12 +354,12 @@ func TestIntegrationRenew(t *testing.T) { t.Fatal(err) } c.mu.Unlock() - oldContract, _ := c.contracts.Acquire(contract.ID) + oldContract, _ := c.staticContracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+200) if err != nil { t.Fatal(err) } - c.contracts.Return(oldContract) + c.staticContracts.Return(oldContract) // check renewed contract if contract.EndHeight != c.blockHeight+200 { @@ -390,12 +390,12 @@ func TestIntegrationRenew(t *testing.T) { t.Fatal(err) } c.mu.Unlock() - oldContract, _ = c.contracts.Acquire(contract.ID) + oldContract, _ = c.staticContracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+100) if err != nil { t.Fatal(err) } - c.contracts.Return(oldContract) + c.staticContracts.Return(oldContract) if contract.EndHeight != c.blockHeight+100 { t.Fatal(contract.EndHeight) } diff --git a/modules/renter/contractor/update.go b/modules/renter/contractor/update.go index b07292be87..a5d0b20361 100644 --- a/modules/renter/contractor/update.go +++ b/modules/renter/contractor/update.go @@ -26,7 +26,7 @@ func (c *Contractor) managedArchiveContracts() { // Loop through the current set of contracts and migrate any expired ones to // the set of old contracts. var expired []types.FileContractID - for _, contract := range c.contracts.ViewAll() { + for _, contract := range c.staticContracts.ViewAll() { if currentHeight > contract.EndHeight { id := contract.ID c.mu.Lock() @@ -44,8 +44,8 @@ func (c *Contractor) managedArchiveContracts() { // Delete all the expired contracts from the contract set. for _, id := range expired { - if sc, ok := c.contracts.Acquire(id); ok { - c.contracts.Delete(sc) + if sc, ok := c.staticContracts.Acquire(id); ok { + c.staticContracts.Delete(sc) } } } diff --git a/modules/renter/contractor/uptime.go b/modules/renter/contractor/uptime.go index 0ffa551cd0..61d3d855c5 100644 --- a/modules/renter/contractor/uptime.go +++ b/modules/renter/contractor/uptime.go @@ -28,7 +28,7 @@ var uptimeWindow = func() time.Duration { // IsOffline indicates whether a contract's host should be considered offline, // based on its scan metrics. func (c *Contractor) IsOffline(id types.FileContractID) bool { - contract, ok := c.contracts.View(id) + contract, ok := c.staticContracts.View(id) if !ok { // No contract, assume offline. return true diff --git a/modules/renter/contractor/uptime_test.go b/modules/renter/contractor/uptime_test.go index 0f56ce49b4..fda92148f0 100644 --- a/modules/renter/contractor/uptime_test.go +++ b/modules/renter/contractor/uptime_test.go @@ -31,7 +31,7 @@ func TestIntegrationReplaceOffline(t *testing.T) { // Block until the contract is registered. err = build.Retry(50, 100*time.Millisecond, func() error { c.mu.Lock() - lenC := c.contracts.Len() + lenC := c.staticContracts.Len() c.mu.Unlock() if lenC < 1 { return errors.New("allowance forming seems to have failed") From 6122ce692cfbd5f35376b79a106c22476f544d3f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 6 May 2018 19:13:05 -0400 Subject: [PATCH 179/212] Don't embed types.Block in ConsensuslocksGet --- node/api/consensus.go | 20 ++++++++++++++------ siatest/consensus/consensus_test.go | 8 ++++---- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/node/api/consensus.go b/node/api/consensus.go index ee17c0a26c..26d2187b88 100644 --- a/node/api/consensus.go +++ b/node/api/consensus.go @@ -27,9 +27,13 @@ type ConsensusHeadersGET struct { // ConsensusBlocksGet wraps a types.Block and adds an id field. type ConsensusBlocksGet struct { - BlockID types.BlockID `json:"id"` - BlockHeight types.BlockHeight `json:"height"` - types.Block + ID types.BlockID `json:"id"` + Height types.BlockHeight `json:"height"` + ParentID types.BlockID `json:"parentid"` + Nonce types.BlockNonce `json:"nonce"` + Timestamp types.Timestamp `json:"timestamp"` + MinerPayouts []types.SiacoinOutput `json:"minerpayouts"` + Transactions []types.Transaction `json:"transactions"` } // consensusHandler handles the API calls to /consensus. @@ -85,9 +89,13 @@ func (api *API) consensusBlocksHandler(w http.ResponseWriter, req *http.Request, } // Write response WriteJSON(w, ConsensusBlocksGet{ - BlockID: b.ID(), - BlockHeight: h, - Block: b, + ID: b.ID(), + Height: h, + ParentID: b.ParentID, + Nonce: b.Nonce, + Timestamp: b.Timestamp, + MinerPayouts: b.MinerPayouts, + Transactions: b.Transactions, }) } diff --git a/siatest/consensus/consensus_test.go b/siatest/consensus/consensus_test.go index 89fa79cbb4..42e4bc0c4b 100644 --- a/siatest/consensus/consensus_test.go +++ b/siatest/consensus/consensus_test.go @@ -85,10 +85,10 @@ func TestConsensusBlocksIDGet(t *testing.T) { } // Make sure all of the fields are initialized and not empty var zeroID types.BlockID - if block.BlockID != cg.CurrentBlock { + if block.ID != cg.CurrentBlock { t.Fatal("BlockID wasn't set correctly") } - if block.BlockHeight != cg.Height { + if block.Height != cg.Height { t.Fatal("BlockHeight wasn't set correctly") } if block.ParentID == zeroID { @@ -110,10 +110,10 @@ func TestConsensusBlocksIDGet(t *testing.T) { t.Fatal("Failed to retrieve block", err) } // block and block2 should be the same - if block.BlockID != block2.BlockID { + if block.ID != block2.ID { t.Fatal("BlockID wasn't set correctly") } - if block.BlockHeight != block2.BlockHeight { + if block.Height != block2.Height { t.Fatal("BlockID wasn't set correctly") } if block.ParentID != block2.ParentID { From 7c370fbf5ab1950321ed98fde9f5aa37ca5dc0ed Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 6 May 2018 19:21:35 -0400 Subject: [PATCH 180/212] remove unnecessary locking --- modules/renter/contractor/contractor.go | 2 -- modules/renter/contractor/contractor_test.go | 2 -- 2 files changed, 4 deletions(-) diff --git a/modules/renter/contractor/contractor.go b/modules/renter/contractor/contractor.go index 1937e9fe6a..05bb2d2716 100644 --- a/modules/renter/contractor/contractor.go +++ b/modules/renter/contractor/contractor.go @@ -133,8 +133,6 @@ func (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContra // allowance period. Only contracts formed with currently online hosts are // returned. func (c *Contractor) Contracts() []modules.RenterContract { - c.mu.RLock() - defer c.mu.RUnlock() return c.staticContracts.ViewAll() } diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index 7d645cf9d2..982078d9af 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -458,9 +458,7 @@ func TestIntegrationSetAllowance(t *testing.T) { if err != nil { t.Fatal(err) } - c.mu.Lock() clen := c.staticContracts.Len() - c.mu.Unlock() if clen != 1 { t.Fatal("expected 1 contract, got", clen) } From e5d03a2aeb10714c6a8fc20345b46e19970f004e Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 7 May 2018 11:01:56 -0400 Subject: [PATCH 181/212] update abuse minimums with the price of the siacoin having risen so much, the abuse minimums should be reduced substantially to reflect that. At some point we'll have a method for removing the abuse minimums, but currently we have no way to measure what might count as an abusive minimum. --- modules/renter/hostdb/hostweight.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/renter/hostdb/hostweight.go b/modules/renter/hostdb/hostweight.go index 45f47589fa..c9ef79431f 100644 --- a/modules/renter/hostdb/hostweight.go +++ b/modules/renter/hostdb/hostweight.go @@ -21,7 +21,7 @@ var ( // minCollateral is the amount of collateral we weight all hosts as having, // even if they do not have any collateral. This is to temporarily prop up // weak / cheap hosts on the network while the network is bootstrapping. - minCollateral = types.SiacoinPrecision.Mul64(5).Div64(tbMonth) + minCollateral = types.SiacoinPrecision.Div64(5).Div64(tbMonth) // Set a minimum price, below which setting lower prices will no longer put // this host at an advatnage. This price is considered the bar for @@ -30,7 +30,7 @@ var ( // // NOTE: This needs to be intelligently adjusted down as the practical price // of storage changes, and as the price of the siacoin changes. - minTotalPrice = types.SiacoinPrecision.Mul64(25).Div64(tbMonth) + minTotalPrice = types.SiacoinPrecision.Mul64(1).Div64(tbMonth) // priceDiveNormalization reduces the raw value of the price so that not so // many digits are needed when operating on the weight. This also allows the From 11828d3b20af9e768f7fbba65ee91f90b27d5639 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 6 May 2018 19:26:04 -0400 Subject: [PATCH 182/212] add locking --- modules/renter/files.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/renter/files.go b/modules/renter/files.go index 6f264cc0b6..5fb1b6c7b4 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -264,9 +264,11 @@ func (r *Renter) FileList() []modules.FileInfo { lockID := r.mu.RLock() for _, f := range r.files { files = append(files, f) + f.mu.RLock() for cid := range f.contracts { contractIDs[cid] = struct{}{} } + f.mu.RUnlock() } r.mu.RUnlock(lockID) From 0e1e21577c6a55f318663745ea0aa8dc453ac979 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 11:23:34 -0400 Subject: [PATCH 183/212] fix race --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 9490a66450..248885562b 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -333,7 +333,7 @@ func (c *Contractor) threadedContractMaintenance() { // Check if the contract is expiring. The funds in the contract are // handled differently based on this information. - if c.blockHeight+allowance.RenewWindow >= contract.EndHeight { + if blockHeight+allowance.RenewWindow >= contract.EndHeight { // The contract is expiring. Some of the funds are locked down // to renew the contract, and then the remaining funds can be // allocated to 'availableFunds'. From aa550e828eab4fc0010bce4cf30df4b64e773579 Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 7 May 2018 11:39:54 -0400 Subject: [PATCH 184/212] turn flaky test in to a vlong test --- modules/host/storageobligations_smoke_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/host/storageobligations_smoke_test.go b/modules/host/storageobligations_smoke_test.go index 217561aa46..72de17a701 100644 --- a/modules/host/storageobligations_smoke_test.go +++ b/modules/host/storageobligations_smoke_test.go @@ -587,7 +587,7 @@ func TestMultiSectorStorageObligationStack(t *testing.T) { // TestAutoRevisionSubmission checks that the host correctly submits a file // contract revision to the consensus set. func TestAutoRevisionSubmission(t *testing.T) { - if testing.Short() { + if testing.Short() || !build.VLONG { t.SkipNow() } t.Parallel() From ab26844222222688b510537d14767af29c050c5f Mon Sep 17 00:00:00 2001 From: David Vorick Date: Mon, 7 May 2018 11:44:29 -0400 Subject: [PATCH 185/212] add information to ndf with appveyor --- modules/renter/contractor/contractor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index a0b10b2ae8..bd64d42bfc 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -370,7 +370,7 @@ func TestAllowanceSpending(t *testing.T) { } if newReportedSpending.Unspent.Cmp(reportedSpending.Unspent) <= 0 { - t.Fatal("expected newReportedSpending to have more unspent") + t.Fatal("expected newReportedSpending to have more unspent", newReportedSpending, reportedSpending) } } From 753a42eba5ecd41da38f2f3a69d56c23719e9e9d Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 15:39:06 -0400 Subject: [PATCH 186/212] fix race in contractor --- modules/renter/contractor/contracts.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 37aaedbd3e..71b80f7173 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -426,7 +426,7 @@ func (c *Contractor) threadedContractMaintenance() { continue } - blockBytes := types.NewCurrency64(modules.SectorSize * uint64(contract.EndHeight-c.blockHeight)) + blockBytes := types.NewCurrency64(modules.SectorSize * uint64(contract.EndHeight-blockHeight)) sectorStoragePrice := host.StoragePrice.Mul(blockBytes) sectorBandwidthPrice := host.UploadBandwidthPrice.Mul64(modules.SectorSize) sectorPrice := sectorStoragePrice.Add(sectorBandwidthPrice) From 06e15fb120628e142cb64fd5efd6999ba5ef3d39 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sun, 18 Feb 2018 21:29:08 -0500 Subject: [PATCH 187/212] Simplify download handler and remove hardcoded sleep --- modules/renter.go | 4 + modules/renter/download.go | 182 ++++++++++++++++++++----------------- node/api/renter.go | 29 ++---- 3 files changed, 110 insertions(+), 105 deletions(-) diff --git a/modules/renter.go b/modules/renter.go index 670fee335e..00dd2acd08 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -315,6 +315,10 @@ type Renter interface { // downloads of `offset` and `length` type. Download(params RenterDownloadParameters) error + // Download performs a download according to the parameters passed without + // blocking, including downloads of `offset` and `length` type. + DownloadAsync(params RenterDownloadParameters) error + // DownloadHistory lists all the files that have been scheduled for download. DownloadHistory() []DownloadInfo diff --git a/modules/renter/download.go b/modules/renter/download.go index 922a02aefd..b3ca060d7f 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -233,6 +233,105 @@ func (d *download) Err() (err error) { return err } +// Download performs a file download using the passed parameters and blocks +// until the download is finished. +func (r *Renter) Download(p modules.RenterDownloadParameters) error { + return r.managedDownload(p, false) +} + +// DownloadAsync performs a file download using the passed parameters without +// blocking until the download is finished. +func (r *Renter) DownloadAsync(p modules.RenterDownloadParameters) error { + return r.managedDownload(p, true) +} + +// managedDownload performs a file download using the passed parameters. +func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) error { + // Lookup the file associated with the nickname. + lockID := r.mu.RLock() + file, exists := r.files[p.SiaPath] + r.mu.RUnlock(lockID) + if !exists { + return fmt.Errorf("no file with that path: %s", p.SiaPath) + } + + // Validate download parameters. + isHTTPResp := p.Httpwriter != nil + if p.Async && isHTTPResp { + return errors.New("cannot async download to http response") + } + if isHTTPResp && p.Destination != "" { + return errors.New("destination cannot be specified when downloading to http response") + } + if !isHTTPResp && p.Destination == "" { + return errors.New("destination not supplied") + } + if p.Destination != "" && !filepath.IsAbs(p.Destination) { + return errors.New("destination must be an absolute path") + } + if p.Offset == file.size { + return errors.New("offset equals filesize") + } + // Sentinel: if length == 0, download the entire file. + if p.Length == 0 { + p.Length = file.size - p.Offset + } + // Check whether offset and length is valid. + if p.Offset < 0 || p.Offset+p.Length > file.size { + return fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) + } + + // Instantiate the correct downloadWriter implementation. + var dw downloadDestination + var destinationType string + if isHTTPResp { + dw = newDownloadDestinationWriteCloserFromWriter(p.Httpwriter) + destinationType = "http stream" + } else { + osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, os.FileMode(file.mode)) + if err != nil { + return err + } + dw = osFile + destinationType = "file" + } + + // Create the download object. + d, err := r.newDownload(downloadParams{ + destination: dw, + destinationType: destinationType, + destinationString: p.Destination, + file: file, + + latencyTarget: 25e3 * time.Millisecond, // TODO: high default until full latency support is added. + length: p.Length, + needsMemory: true, + offset: p.Offset, + overdrive: 3, // TODO: moderate default until full overdrive support is added. + priority: 5, // TODO: moderate default until full priority support is added. + }) + if err != nil { + return err + } + + // Add the download object to the download queue. + r.downloadHistoryMu.Lock() + r.downloadHistory = append(r.downloadHistory, d) + r.downloadHistoryMu.Unlock() + + // If async is true we can return right away. + if async { + return nil + } + // Otherwise we block until the download has completed. + select { + case <-d.completeChan: + return d.Err() + case <-r.tg.StopChan(): + return errors.New("download interrupted by shutdown") + } +} + // newDownload creates and initializes a download based on the provided // parameters. func (r *Renter) newDownload(params downloadParams) (*download, error) { @@ -371,89 +470,6 @@ func (r *Renter) newDownload(params downloadParams) (*download, error) { return d, nil } -// Download performs a file download using the passed parameters. -func (r *Renter) Download(p modules.RenterDownloadParameters) error { - // Lookup the file associated with the nickname. - lockID := r.mu.RLock() - file, exists := r.files[p.SiaPath] - r.mu.RUnlock(lockID) - if !exists { - return fmt.Errorf("no file with that path: %s", p.SiaPath) - } - - // Validate download parameters. - isHTTPResp := p.Httpwriter != nil - if p.Async && isHTTPResp { - return errors.New("cannot async download to http response") - } - if isHTTPResp && p.Destination != "" { - return errors.New("destination cannot be specified when downloading to http response") - } - if !isHTTPResp && p.Destination == "" { - return errors.New("destination not supplied") - } - if p.Destination != "" && !filepath.IsAbs(p.Destination) { - return errors.New("destination must be an absolute path") - } - if p.Offset == file.size { - return errors.New("offset equals filesize") - } - // Sentinel: if length == 0, download the entire file. - if p.Length == 0 { - p.Length = file.size - p.Offset - } - // Check whether offset and length is valid. - if p.Offset < 0 || p.Offset+p.Length > file.size { - return fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) - } - - // Instantiate the correct downloadWriter implementation. - var dw downloadDestination - var destinationType string - if isHTTPResp { - dw = newDownloadDestinationWriteCloserFromWriter(p.Httpwriter) - destinationType = "http stream" - } else { - osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, os.FileMode(file.mode)) - if err != nil { - return err - } - dw = osFile - destinationType = "file" - } - - // Create the download object. - d, err := r.newDownload(downloadParams{ - destination: dw, - destinationType: destinationType, - destinationString: p.Destination, - file: file, - - latencyTarget: 25e3 * time.Millisecond, // TODO: high default until full latency support is added. - length: p.Length, - needsMemory: true, - offset: p.Offset, - overdrive: 3, // TODO: moderate default until full overdrive support is added. - priority: 5, // TODO: moderate default until full priority support is added. - }) - if err != nil { - return err - } - - // Add the download object to the download queue. - r.downloadHistoryMu.Lock() - r.downloadHistory = append(r.downloadHistory, d) - r.downloadHistoryMu.Unlock() - - // Block until the download has completed. - select { - case <-d.completeChan: - return d.Err() - case <-r.tg.StopChan(): - return errors.New("download interrupted by shutdown") - } -} - // DownloadHistory returns the list of downloads that have been performed. Will // include downloads that have not yet completed. Downloads will be roughly, but // not precisely, sorted according to start time. diff --git a/node/api/renter.go b/node/api/renter.go index c3b60715ca..79475c4394 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -407,30 +407,15 @@ func (api *API) renterDownloadHandler(w http.ResponseWriter, req *http.Request, WriteError(w, Error{err.Error()}, http.StatusBadRequest) return } - - if params.Async { // Create goroutine if `async` param set. - // check for errors for 5 seconds to catch validation errors (no file with - // that path, invalid parameters, insufficient hosts, etc) - errchan := make(chan error) - go func() { - errchan <- api.renter.Download(params) - }() - select { - case err = <-errchan: - if err != nil { - WriteError(w, Error{"download failed: " + err.Error()}, http.StatusInternalServerError) - return - } - case <-time.After(time.Millisecond * 100): - } + if params.Async { + err = api.renter.DownloadAsync(params) } else { - err := api.renter.Download(params) - if err != nil { - WriteError(w, Error{"download failed: " + err.Error()}, http.StatusInternalServerError) - return - } + err = api.renter.Download(params) + } + if err != nil { + WriteError(w, Error{"download failed: " + err.Error()}, http.StatusInternalServerError) + return } - if params.Httpwriter == nil { // `httpresp=true` causes writes to w before this line is run, automatically // adding `200 Status OK` code to response. Calling this results in a From 14c9a2b9e6c142c82b8321be843a733ad49cebc9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 7 Mar 2018 11:56:34 -0500 Subject: [PATCH 188/212] move select to Download --- modules/renter/download.go | 52 ++++++++++++++++++++------------------ 1 file changed, 28 insertions(+), 24 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index b3ca060d7f..5edd2d2637 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -236,41 +236,54 @@ func (d *download) Err() (err error) { // Download performs a file download using the passed parameters and blocks // until the download is finished. func (r *Renter) Download(p modules.RenterDownloadParameters) error { - return r.managedDownload(p, false) + d, err := r.managedDownload(p) + if err != nil { + return err + } + // Block until the download has completed + select { + case <-d.completeChan: + return d.Err() + case <-r.tg.StopChan(): + return errors.New("download interrupted by shutdown") + } } // DownloadAsync performs a file download using the passed parameters without // blocking until the download is finished. func (r *Renter) DownloadAsync(p modules.RenterDownloadParameters) error { - return r.managedDownload(p, true) + _, err := r.managedDownload(p) + return err } -// managedDownload performs a file download using the passed parameters. -func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) error { +// managedDownload performs a file download using the passed parameters and +// returns the download object and an error that indicates if the download +// setup was successful. +func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, error) { // Lookup the file associated with the nickname. lockID := r.mu.RLock() file, exists := r.files[p.SiaPath] r.mu.RUnlock(lockID) if !exists { - return fmt.Errorf("no file with that path: %s", p.SiaPath) + return nil, fmt.Errorf("no file with that path: %s", p.SiaPath) } // Validate download parameters. isHTTPResp := p.Httpwriter != nil if p.Async && isHTTPResp { - return errors.New("cannot async download to http response") + return nil, errors.New("cannot async download to http response") } if isHTTPResp && p.Destination != "" { - return errors.New("destination cannot be specified when downloading to http response") + return nil, errors.New("destination cannot be specified when downloading to http response") } if !isHTTPResp && p.Destination == "" { - return errors.New("destination not supplied") + return nil, errors.New("destination not supplied") } if p.Destination != "" && !filepath.IsAbs(p.Destination) { - return errors.New("destination must be an absolute path") + return nil, errors.New("destination must be an absolute path") } if p.Offset == file.size { - return errors.New("offset equals filesize") + return nil, errors.New("offset equals filesize") } // Sentinel: if length == 0, download the entire file. if p.Length == 0 { @@ -278,7 +291,7 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) } // Check whether offset and length is valid. if p.Offset < 0 || p.Offset+p.Length > file.size { - return fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) + return nil, fmt.Errorf("offset and length combination invalid, max byte is at index %d", file.size-1) } // Instantiate the correct downloadWriter implementation. @@ -290,7 +303,7 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) } else { osFile, err := os.OpenFile(p.Destination, os.O_CREATE|os.O_WRONLY, os.FileMode(file.mode)) if err != nil { - return err + return nil, err } dw = osFile destinationType = "file" @@ -311,7 +324,7 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) priority: 5, // TODO: moderate default until full priority support is added. }) if err != nil { - return err + return nil, err } // Add the download object to the download queue. @@ -319,17 +332,8 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters, async bool) r.downloadHistory = append(r.downloadHistory, d) r.downloadHistoryMu.Unlock() - // If async is true we can return right away. - if async { - return nil - } - // Otherwise we block until the download has completed. - select { - case <-d.completeChan: - return d.Err() - case <-r.tg.StopChan(): - return errors.New("download interrupted by shutdown") - } + // Return the download object + return d, nil } // newDownload creates and initializes a download based on the provided From ed1823363028ee61fdff9ab6027ad8d2cad4efeb Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 15:21:34 -0400 Subject: [PATCH 189/212] rename newDownload to managedNewDownload --- modules/renter/download.go | 6 +++--- modules/renter/downloadstreamer.go | 2 +- modules/renter/uploadchunk.go | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/renter/download.go b/modules/renter/download.go index 5edd2d2637..0c9a6f1ae4 100644 --- a/modules/renter/download.go +++ b/modules/renter/download.go @@ -310,7 +310,7 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, } // Create the download object. - d, err := r.newDownload(downloadParams{ + d, err := r.managedNewDownload(downloadParams{ destination: dw, destinationType: destinationType, destinationString: p.Destination, @@ -336,9 +336,9 @@ func (r *Renter) managedDownload(p modules.RenterDownloadParameters) (*download, return d, nil } -// newDownload creates and initializes a download based on the provided +// managedNewDownload creates and initializes a download based on the provided // parameters. -func (r *Renter) newDownload(params downloadParams) (*download, error) { +func (r *Renter) managedNewDownload(params downloadParams) (*download, error) { // Input validation. if params.file == nil { return nil, errors.New("no file provided when requesting download") diff --git a/modules/renter/downloadstreamer.go b/modules/renter/downloadstreamer.go index 9789ea5932..75fb96963f 100644 --- a/modules/renter/downloadstreamer.go +++ b/modules/renter/downloadstreamer.go @@ -73,7 +73,7 @@ func (s *streamer) Read(p []byte) (n int, err error) { // Download data buffer := bytes.NewBuffer([]byte{}) - d, err := s.r.newDownload(downloadParams{ + d, err := s.r.managedNewDownload(downloadParams{ destination: newDownloadDestinationWriteCloserFromWriter(buffer), destinationType: destinationTypeSeekStream, destinationString: "httpresponse", diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index ed44d8e0d1..abf7149c4c 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -135,7 +135,7 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e // Create the download. buf := downloadDestinationBuffer(make([]byte, chunk.length)) - d, err := r.newDownload(downloadParams{ + d, err := r.managedNewDownload(downloadParams{ destination: buf, destinationType: "buffer", file: chunk.renterFile, From bada92004fe096466dd4fb97bfd3db3334d3872f Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 10:40:16 -0400 Subject: [PATCH 190/212] Create /renter/file/*siapath route and corresponding renterFileHandler and GetFile() for API --- modules/renter.go | 3 +++ modules/renter/files.go | 14 ++++++++++++++ node/api/renter.go | 7 +++++++ node/api/routes.go | 1 + 4 files changed, 25 insertions(+) diff --git a/modules/renter.go b/modules/renter.go index 00dd2acd08..672d6ac803 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -325,6 +325,9 @@ type Renter interface { // FileList returns information on all of the files stored by the renter. FileList() []FileInfo + // GetFile returns information on specific file requested by user + GetFile() File + // Host provides the DB entry and score breakdown for the requested host. Host(pk types.SiaPublicKey) (HostDBEntry, bool) diff --git a/modules/renter/files.go b/modules/renter/files.go index 5fb1b6c7b4..40359d1d69 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -311,6 +311,20 @@ func (r *Renter) FileList() []modules.FileInfo { return fileList } +// GetFile returns file from siaPath provided by user. +func (r *Renter) GetFile(siaPath string) *file { + lockID := r.mu.Lock() + defer r.mu.Unlock(lockID) + + // Check that currentName exists and newName doesn't. + file, exists := r.files[siaPath] + if !exists { + return nil + } + + return file +} + // RenameFile takes an existing file and changes the nickname. The original // file must exist, and there must not be any file that already has the // replacement nickname. diff --git a/node/api/renter.go b/node/api/renter.go index 79475c4394..10c7488696 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -373,6 +373,13 @@ func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps WriteSuccess(w) } +// renterFileHandler handles the API call to return specific file. +func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { + WriteJSON(w, RenterFiles{ + Files: api.renter.GetFile(strings.TrimPrefix(ps.ByName("siapath"), "/"), + }) +} + // renterFilesHandler handles the API call to list all of the files. func (api *API) renterFilesHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, RenterFiles{ diff --git a/node/api/routes.go b/node/api/routes.go index 1d868c45f2..3db2408801 100644 --- a/node/api/routes.go +++ b/node/api/routes.go @@ -72,6 +72,7 @@ func (api *API) buildHTTPRoutes(requiredUserAgent string, requiredPassword strin router.GET("/renter/contracts", api.renterContractsHandler) router.GET("/renter/downloads", api.renterDownloadsHandler) router.GET("/renter/files", api.renterFilesHandler) + router.GET("/renter/file/*siapath", api.renterFileHandler) router.GET("/renter/prices", api.renterPricesHandler) // TODO: re-enable these routes once the new .sia format has been From 6ca84891eefcd4ca0483b44d1b8fdb88ed5d24da Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 10:59:30 -0400 Subject: [PATCH 191/212] Create RenterFileGet() and RenterFile struct for API call --- modules/renter.go | 2 +- modules/renter/files.go | 2 +- node/api/client/renter.go | 7 +++++++ node/api/renter.go | 7 ++++++- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/modules/renter.go b/modules/renter.go index 672d6ac803..fa9035a012 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -325,7 +325,7 @@ type Renter interface { // FileList returns information on all of the files stored by the renter. FileList() []FileInfo - // GetFile returns information on specific file requested by user + // GetFile returns information on specific file queried by user GetFile() File // Host provides the DB entry and score breakdown for the requested host. diff --git a/modules/renter/files.go b/modules/renter/files.go index 40359d1d69..d1b2ca5853 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -311,7 +311,7 @@ func (r *Renter) FileList() []modules.FileInfo { return fileList } -// GetFile returns file from siaPath provided by user. +// GetFile returns file from siaPath queried by user. func (r *Renter) GetFile(siaPath string) *file { lockID := r.mu.Lock() defer r.mu.Unlock(lockID) diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 3964ffb27f..4f25385344 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -58,6 +58,13 @@ func (c *Client) RenterDownloadHTTPResponseGet(siaPath string, offset, length ui return } +// RenterFileGet requests the /renter/files resource. +func (c *Client) RenterFileGet(siaPath string) (rf api.RenterFile, err error) { + siaPath = strings.TrimPrefix(siaPath, "/") + err = c.get("/renter/files/"+siaPath, &rf) + return +} + // RenterFilesGet requests the /renter/files resource. func (c *Client) RenterFilesGet() (rf api.RenterFiles, err error) { err = c.get("/renter/files", &rf) diff --git a/node/api/renter.go b/node/api/renter.go index 10c7488696..a7f24bc57f 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -123,6 +123,11 @@ type ( Downloads []DownloadInfo `json:"downloads"` } + // RenterFile lists the file queried. + RenterFile struct { + File *file `json:"file"` + } + // RenterFiles lists the files known to the renter. RenterFiles struct { Files []modules.FileInfo `json:"files"` @@ -375,7 +380,7 @@ func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps // renterFileHandler handles the API call to return specific file. func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - WriteJSON(w, RenterFiles{ + WriteJSON(w, RenterFile{ Files: api.renter.GetFile(strings.TrimPrefix(ps.ByName("siapath"), "/"), }) } From a6708b5532f2cc50b2d66d72e777e8e52e55bd95 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 10:41:36 -0400 Subject: [PATCH 192/212] Add tg.Add to exported wallet methods --- modules/wallet/money.go | 15 +++++++++++++++ modules/wallet/wallet.go | 20 ++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/modules/wallet/money.go b/modules/wallet/money.go index e2259c826f..3ec4544ba1 100644 --- a/modules/wallet/money.go +++ b/modules/wallet/money.go @@ -18,6 +18,11 @@ type sortedOutputs struct { // DustThreshold returns the quantity per byte below which a Currency is // considered to be Dust. func (w *Wallet) DustThreshold() types.Currency { + if err := w.tg.Add(); err != nil { + return types.Currency{} + } + defer w.tg.Done() + minFee, _ := w.tpool.FeeEstimation() return minFee.Mul64(3) } @@ -25,6 +30,11 @@ func (w *Wallet) DustThreshold() types.Currency { // ConfirmedBalance returns the balance of the wallet according to all of the // confirmed transactions. func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siafundClaimBalance types.Currency) { + if err := w.tg.Add(); err != nil { + return + } + defer w.tg.Done() + // dustThreshold has to be obtained separate from the lock dustThreshold := w.DustThreshold() @@ -61,6 +71,11 @@ func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalan // the unconfirmed transaction set. Refund outputs are included in this // reporting. func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency) { + if err := w.tg.Add(); err != nil { + return + } + defer w.tg.Done() + // dustThreshold has to be obtained separate from the lock dustThreshold := w.DustThreshold() diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index f16368d0fb..932cb177ae 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -114,6 +114,11 @@ type Wallet struct { // Height return the internal processed consensus height of the wallet func (w *Wallet) Height() types.BlockHeight { + if err := w.tg.Add(); err != nil { + return types.BlockHeight(0) + } + defer w.tg.Done() + w.mu.Lock() defer w.mu.Unlock() @@ -223,6 +228,11 @@ func (w *Wallet) Close() error { // AllAddresses returns all addresses that the wallet is able to spend from, // including unseeded addresses. Addresses are returned sorted in byte-order. func (w *Wallet) AllAddresses() []types.UnlockHash { + if err := w.tg.Add(); err != nil { + return []types.UnlockHash{} + } + defer w.tg.Done() + w.mu.RLock() defer w.mu.RUnlock() @@ -239,6 +249,11 @@ func (w *Wallet) AllAddresses() []types.UnlockHash { // Rescanning reports whether the wallet is currently rescanning the // blockchain. func (w *Wallet) Rescanning() bool { + if err := w.tg.Add(); err != nil { + return false + } + defer w.tg.Done() + rescanning := !w.scanLock.TryLock() if !rescanning { w.scanLock.Unlock() @@ -255,6 +270,11 @@ func (w *Wallet) Settings() modules.WalletSettings { // SetSettings will update the settings for the wallet. func (w *Wallet) SetSettings(s modules.WalletSettings) { + if err := w.tg.Add(); err != nil { + return + } + defer w.tg.Done() + w.mu.Lock() w.defragDisabled = s.NoDefrag w.mu.Unlock() From b9beb786fae77948b1004c92c4d4d6f4ab00a5b9 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 14:47:37 -0400 Subject: [PATCH 193/212] slowly descending into madness + adding error return value to exported wallet methods --- modules/consensus/accept_bench_test.go | 5 +- modules/consensus/accept_test.go | 20 +++- modules/consensus/accept_txntypes_test.go | 45 ++++++-- modules/consensus/consensusset_test.go | 5 +- modules/consensus/validtransaction_test.go | 10 +- modules/explorer/info_test.go | 15 ++- modules/explorer/update_test.go | 10 +- modules/host/announce.go | 25 ++-- modules/host/announce_test.go | 6 +- modules/host/host.go | 5 +- modules/host/negotiateformcontract.go | 11 +- modules/host/negotiaterenewcontract.go | 11 +- modules/host/storageobligations.go | 21 +++- modules/host/storageobligations_smoke_test.go | 7 +- modules/host/update_test.go | 5 +- modules/miner/blockmanager.go | 8 +- modules/miner/miner.go | 5 +- modules/miner/miner_test.go | 10 +- modules/miner/testminer.go | 14 ++- modules/renter/contractor/contractor_test.go | 13 ++- modules/renter/contractor/contracts.go | 10 +- modules/renter/contractor/dependencies.go | 6 +- .../contractor/host_integration_test.go | 12 +- modules/renter/contractor/negotiate_test.go | 10 +- modules/transactionpool/accept_test.go | 40 +++++-- .../transactionpool/transactionpool_test.go | 5 +- modules/wallet.go | 36 +++--- modules/wallet/defrag.go | 5 +- modules/wallet/defrag_test.go | 11 +- modules/wallet/encrypt.go | 17 ++- modules/wallet/encrypt_test.go | 88 +++++++++++--- modules/wallet/money.go | 44 ++++--- modules/wallet/money_test.go | 30 ++++- modules/wallet/seed.go | 8 +- modules/wallet/seed_test.go | 65 ++++++++--- modules/wallet/transactionbuilder.go | 17 ++- modules/wallet/transactionbuilder_test.go | 107 ++++++++++++++---- modules/wallet/transactions.go | 31 +++-- modules/wallet/transactions_test.go | 75 +++++++++--- modules/wallet/unseeded_test.go | 20 +++- modules/wallet/update_test.go | 20 +++- modules/wallet/wallet.go | 38 ++++--- modules/wallet/wallet_test.go | 73 +++++++++--- node/api/hostdb_test.go | 6 +- node/api/server_helpers_test.go | 12 +- node/api/wallet.go | 80 ++++++++++--- node/api/wallet_test.go | 79 ++++++++++--- 47 files changed, 931 insertions(+), 265 deletions(-) diff --git a/modules/consensus/accept_bench_test.go b/modules/consensus/accept_bench_test.go index 67ff63fbaa..7849dfcf67 100644 --- a/modules/consensus/accept_bench_test.go +++ b/modules/consensus/accept_bench_test.go @@ -129,7 +129,10 @@ func BenchmarkAcceptSmallBlocks(b *testing.B) { for j := 0; j < b.N; j++ { // Create a transaction with a miner fee, a normal siacoin output, and // a funded file contract. - txnBuilder := cst.wallet.StartTransaction() + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + b.Fatal(err) + } err = txnBuilder.FundSiacoins(types.NewCurrency64(125e6)) if err != nil { b.Fatal(err) diff --git a/modules/consensus/accept_test.go b/modules/consensus/accept_test.go index 2b215c449e..67f9a67b97 100644 --- a/modules/consensus/accept_test.go +++ b/modules/consensus/accept_test.go @@ -504,7 +504,10 @@ func TestIntegrationDoSBlockHandling(t *testing.T) { // Mine a block that is valid except for containing a buried invalid // transaction. The transaction has more siacoin inputs than outputs. - txnBuilder := cst.wallet.StartTransaction() + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(types.NewCurrency64(50)) if err != nil { t.Fatal(err) @@ -794,7 +797,10 @@ func TestBuriedBadTransaction(t *testing.T) { // Create a good transaction using the wallet. txnValue := types.NewCurrency64(1200) - txnBuilder := cst.wallet.StartTransaction() + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(txnValue) if err != nil { t.Fatal(err) @@ -890,7 +896,10 @@ func TestTaxHardfork(t *testing.T) { } // Create and fund a transaction with a file contract. - txnBuilder := cst.wallet.StartTransaction() + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(payout) if err != nil { t.Fatal(err) @@ -937,7 +946,10 @@ func TestTaxHardfork(t *testing.T) { NewValidProofOutputs: fc.ValidProofOutputs, NewMissedProofOutputs: fc.MissedProofOutputs, } - txnBuilder = cst.wallet.StartTransaction() + txnBuilder, err = cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } txnBuilder.AddFileContractRevision(fcr) txnSet, err = txnBuilder.Sign(true) if err != nil { diff --git a/modules/consensus/accept_txntypes_test.go b/modules/consensus/accept_txntypes_test.go index 3aa2349411..64044d5d60 100644 --- a/modules/consensus/accept_txntypes_test.go +++ b/modules/consensus/accept_txntypes_test.go @@ -107,8 +107,11 @@ func (cst *consensusSetTester) testSpendSiacoinsBlock() { // Create a block containing a transaction with a valid siacoin output. txnValue := types.NewCurrency64(1200) - txnBuilder := cst.wallet.StartTransaction() - err := txnBuilder.FundSiacoins(txnValue) + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + panic(err) + } + err = txnBuilder.FundSiacoins(txnValue) if err != nil { panic(err) } @@ -197,8 +200,11 @@ func (cst *consensusSetTester) testValidStorageProofBlocks() { // Submit a transaction with the file contract. oldSiafundPool := cst.cs.dbGetSiafundPool() - txnBuilder := cst.wallet.StartTransaction() - err := txnBuilder.FundSiacoins(payout) + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + panic(err) + } + err = txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } @@ -241,7 +247,10 @@ func (cst *consensusSetTester) testValidStorageProofBlocks() { HashSet: hashSet, } copy(sp.Segment[:], segment) - txnBuilder = cst.wallet.StartTransaction() + txnBuilder, err = cst.wallet.StartTransaction() + if err != nil { + panic(err) + } txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { @@ -322,8 +331,11 @@ func (cst *consensusSetTester) testMissedStorageProofBlocks() { // Submit a transaction with the file contract. oldSiafundPool := cst.cs.dbGetSiafundPool() - txnBuilder := cst.wallet.StartTransaction() - err := txnBuilder.FundSiacoins(payout) + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + panic(err) + } + err = txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } @@ -452,8 +464,11 @@ func (cst *consensusSetTester) testFileContractRevision() { } // Submit a transaction with the file contract. - txnBuilder := cst.wallet.StartTransaction() - err := txnBuilder.FundSiacoins(payout) + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + panic(err) + } + err = txnBuilder.FundSiacoins(payout) if err != nil { panic(err) } @@ -518,7 +533,10 @@ func (cst *consensusSetTester) testFileContractRevision() { HashSet: hashSet, } copy(sp.Segment[:], segment) - txnBuilder = cst.wallet.StartTransaction() + txnBuilder, err = cst.wallet.StartTransaction() + if err != nil { + panic(err) + } txnBuilder.AddStorageProof(sp) txnSet, err = txnBuilder.Sign(true) if err != nil { @@ -562,8 +580,11 @@ func (cst *consensusSetTester) testSpendSiafunds() { // Create a block containing a transaction with a valid siafund output. txnValue := types.NewCurrency64(3) - txnBuilder := cst.wallet.StartTransaction() - err := txnBuilder.FundSiafunds(txnValue) + txnBuilder, err := cst.wallet.StartTransaction() + if err != nil { + panic(err) + } + err = txnBuilder.FundSiafunds(txnValue) if err != nil { panic(err) } diff --git a/modules/consensus/consensusset_test.go b/modules/consensus/consensusset_test.go index 1ac7e5e2ba..caf9f9cb1d 100644 --- a/modules/consensus/consensusset_test.go +++ b/modules/consensus/consensusset_test.go @@ -69,7 +69,10 @@ func (cst *consensusSetTester) addSiafunds() { } // Check that the siafunds made it to the wallet. - _, siafundBalance, _ := cst.wallet.ConfirmedBalance() + _, siafundBalance, _, err := cst.wallet.ConfirmedBalance() + if err != nil { + panic(err) + } if !siafundBalance.Equals64(1e3) { panic("wallet does not have the siafunds") } diff --git a/modules/consensus/validtransaction_test.go b/modules/consensus/validtransaction_test.go index ebf238eacc..d516940e54 100644 --- a/modules/consensus/validtransaction_test.go +++ b/modules/consensus/validtransaction_test.go @@ -126,7 +126,10 @@ func TestStorageProofBoundaries(t *testing.T) { // Create a transaction around the file contract and add it to the // transaction pool. - b := cst.wallet.StartTransaction() + b, err := cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = b.FundSiacoins(types.NewCurrency64(500)) if err != nil { t.Fatal(err) @@ -255,7 +258,10 @@ func TestEmptyStorageProof(t *testing.T) { // Create a transaction around the file contract and add it to the // transaction pool. - b := cst.wallet.StartTransaction() + b, err := cst.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = b.FundSiacoins(types.NewCurrency64(500)) if err != nil { t.Fatal(err) diff --git a/modules/explorer/info_test.go b/modules/explorer/info_test.go index 7537db6aeb..bc8714c061 100644 --- a/modules/explorer/info_test.go +++ b/modules/explorer/info_test.go @@ -87,7 +87,10 @@ func TestFileContractPayoutsMissingProof(t *testing.T) { } // Create and fund valid file contracts. - builder := et.wallet.StartTransaction() + builder, err := et.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { @@ -189,7 +192,10 @@ func TestFileContractsPayoutValidProof(t *testing.T) { // Submit a transaction with the file contract. //oldSiafundPool := cst.cs.dbGetSiafundPool() - builder := et.wallet.StartTransaction() + builder, err := et.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = builder.FundSiacoins(payout) if err != nil { t.Fatal(err) @@ -224,7 +230,10 @@ func TestFileContractsPayoutValidProof(t *testing.T) { HashSet: hashSet, } copy(sp.Segment[:], segment) - builder = et.wallet.StartTransaction() + builder, err = et.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } builder.AddStorageProof(sp) tSet, err = builder.Sign(true) if err != nil { diff --git a/modules/explorer/update_test.go b/modules/explorer/update_test.go index 5bd9fb92de..6711e9d2dd 100644 --- a/modules/explorer/update_test.go +++ b/modules/explorer/update_test.go @@ -51,7 +51,10 @@ func TestIntegrationExplorerFileContractMetrics(t *testing.T) { // Put a file contract into the chain, and check that the explorer // correctly does all of the counting. - builder := et.wallet.StartTransaction() + builder, err := et.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } builder.FundSiacoins(types.NewCurrency64(5e9)) fcOutputs := []types.SiacoinOutput{{Value: types.NewCurrency64(4805e6)}} fc := types.FileContract{ @@ -102,7 +105,10 @@ func TestIntegrationExplorerFileContractMetrics(t *testing.T) { // Put a second file into the explorer to check that multiple files are // handled well. - builder = et.wallet.StartTransaction() + builder, err = et.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } builder.FundSiacoins(types.NewCurrency64(1e9)) fcOutputs = []types.SiacoinOutput{{Value: types.NewCurrency64(961e6)}} fc = types.FileContract{ diff --git a/modules/host/announce.go b/modules/host/announce.go index c765df9031..aea0f640c5 100644 --- a/modules/host/announce.go +++ b/modules/host/announce.go @@ -14,22 +14,26 @@ var ( // errUnknownAddress is returned if the host is unable to determine a // public address for itself to use in the announcement. - errUnknownAddress = errors.New("host cannot announce, does not seem to have a valid address.") + errUnknownAddress = errors.New("host cannot announce, does not seem to have a valid address") ) // managedAnnounce creates an announcement transaction and submits it to the network. -func (h *Host) managedAnnounce(addr modules.NetAddress) error { +func (h *Host) managedAnnounce(addr modules.NetAddress) (err error) { // The wallet needs to be unlocked to add fees to the transaction, and the // host needs to have an active unlock hash that renters can make payment // to. - if !h.wallet.Unlocked() { + unlocked, err := h.wallet.Unlocked() + if err != nil { + return err + } + if !unlocked { return errAnnWalletLocked } h.mu.Lock() pubKey := h.publicKey secKey := h.secretKey - err := h.checkUnlockHash() + err = h.checkUnlockHash() h.mu.Unlock() if err != nil { return err @@ -43,26 +47,31 @@ func (h *Host) managedAnnounce(addr modules.NetAddress) error { } // Create a transaction, with a fee, that contains the full announcement. - txnBuilder := h.wallet.StartTransaction() + txnBuilder, err := h.wallet.StartTransaction() + if err != nil { + return err + } + defer func() { + if err != nil { + txnBuilder.Drop() + } + }() _, fee := h.tpool.FeeEstimation() fee = fee.Mul64(600) // Estimated txn size (in bytes) of a host announcement. err = txnBuilder.FundSiacoins(fee) if err != nil { - txnBuilder.Drop() return err } _ = txnBuilder.AddMinerFee(fee) _ = txnBuilder.AddArbitraryData(signedAnnouncement) txnSet, err := txnBuilder.Sign(true) if err != nil { - txnBuilder.Drop() return err } // Add the transactions to the transaction pool. err = h.tpool.AcceptTransactionSet(txnSet) if err != nil { - txnBuilder.Drop() return err } diff --git a/modules/host/announce_test.go b/modules/host/announce_test.go index 2f94479084..23656e0134 100644 --- a/modules/host/announce_test.go +++ b/modules/host/announce_test.go @@ -183,7 +183,11 @@ func TestHostAnnounceCheckUnlockHash(t *testing.T) { t.Fatal("host did not set a new unlock hash after announce with reset wallet") } hasAddr := false - for _, addr := range ht.wallet.AllAddresses() { + addrs, err := ht.wallet.AllAddresses() + if err != nil { + t.Fatal(err) + } + for _, addr := range addrs { if addr == newUnlockHash { hasAddr = true break diff --git a/modules/host/host.go b/modules/host/host.go index c58bea406a..ef70b9e861 100644 --- a/modules/host/host.go +++ b/modules/host/host.go @@ -180,7 +180,10 @@ type Host struct { // from the wallet. That may fail due to the wallet being locked, in which case // an error is returned. func (h *Host) checkUnlockHash() error { - addrs := h.wallet.AllAddresses() + addrs, err := h.wallet.AllAddresses() + if err != nil { + return err + } hasAddr := false for _, addr := range addrs { if h.unlockHash == addr { diff --git a/modules/host/negotiateformcontract.go b/modules/host/negotiateformcontract.go index 133d7d8005..25fa196643 100644 --- a/modules/host/negotiateformcontract.go +++ b/modules/host/negotiateformcontract.go @@ -38,10 +38,17 @@ func (h *Host) managedAddCollateral(settings modules.HostExternalSettings, txnSe parents := txnSet[:len(txnSet)-1] fc := txn.FileContracts[0] hostPortion := contractCollateral(settings, fc) - builder = h.wallet.RegisterTransaction(txn, parents) + builder, err = h.wallet.RegisterTransaction(txn, parents) + if err != nil { + return + } + defer func() { + if err != nil { + builder.Drop() + } + }() err = builder.FundSiacoins(hostPortion) if err != nil { - builder.Drop() return nil, nil, nil, nil, extendErr("could not add collateral: ", ErrorInternal(err.Error())) } diff --git a/modules/host/negotiaterenewcontract.go b/modules/host/negotiaterenewcontract.go index 25d12e3640..6bffbe8383 100644 --- a/modules/host/negotiaterenewcontract.go +++ b/modules/host/negotiaterenewcontract.go @@ -51,10 +51,17 @@ func (h *Host) managedAddRenewCollateral(so storageObligation, settings modules. parents := txnSet[:len(txnSet)-1] fc := txn.FileContracts[0] hostPortion := renewContractCollateral(so, settings, fc) - builder = h.wallet.RegisterTransaction(txn, parents) + builder, err = h.wallet.RegisterTransaction(txn, parents) + if err != nil { + return + } + defer func() { + if err != nil { + builder.Drop() + } + }() err = builder.FundSiacoins(hostPortion) if err != nil { - builder.Drop() return nil, nil, nil, nil, extendErr("could not add collateral: ", ErrorInternal(err.Error())) } diff --git a/modules/host/storageobligations.go b/modules/host/storageobligations.go index 833a7d00bf..e301f3a5e5 100644 --- a/modules/host/storageobligations.go +++ b/modules/host/storageobligations.go @@ -735,13 +735,18 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { revisionTxnIndex := len(so.RevisionTransactionSet) - 1 revisionParents := so.RevisionTransactionSet[:revisionTxnIndex] revisionTxn := so.RevisionTransactionSet[revisionTxnIndex] - builder := h.wallet.RegisterTransaction(revisionTxn, revisionParents) + builder, err := h.wallet.RegisterTransaction(revisionTxn, revisionParents) + if err != nil { + h.log.Println("Error registering transaction:", err) + return + } _, feeRecommendation := h.tpool.FeeEstimation() if so.value().Div64(2).Cmp(feeRecommendation) < 0 { // There's no sense submitting the revision if the fee is more than // half of the anticipated revenue - fee market went up // unexpectedly, and the money that the renter paid to cover the // fees is no longer enough. + builder.Drop() return } txnSize := uint64(len(encoding.MarshalAll(so.RevisionTransactionSet)) + 300) @@ -749,18 +754,22 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { err = builder.FundSiacoins(requiredFee) if err != nil { h.log.Println("Error funding transaction fees", err) + builder.Drop() } builder.AddMinerFee(requiredFee) if err != nil { h.log.Println("Error adding miner fees", err) + builder.Drop() } feeAddedRevisionTransactionSet, err := builder.Sign(true) if err != nil { h.log.Println("Error signing transaction", err) + builder.Drop() } err = h.tpool.AcceptTransactionSet(feeAddedRevisionTransactionSet) if err != nil { h.log.Println("Error submitting transaction to transaction pool", err) + builder.Drop() } so.TransactionFeesAdded = so.TransactionFeesAdded.Add(requiredFee) // return @@ -834,12 +843,17 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { copy(sp.Segment[:], base) // Create and build the transaction with the storage proof. - builder := h.wallet.StartTransaction() + builder, err := h.wallet.StartTransaction() + if err != nil { + h.log.Println("Failed to start transaction:", err) + return + } _, feeRecommendation := h.tpool.FeeEstimation() if so.value().Cmp(feeRecommendation) < 0 { // There's no sense submitting the storage proof if the fee is more // than the anticipated revenue. h.log.Debugln("Host not submitting storage proof due to a value that does not sufficiently exceed the fee cost") + builder.Drop() return } txnSize := uint64(len(encoding.Marshal(sp)) + 300) @@ -847,6 +861,7 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { err = builder.FundSiacoins(requiredFee) if err != nil { h.log.Println("Host error when funding a storage proof transaction fee:", err) + builder.Drop() return } builder.AddMinerFee(requiredFee) @@ -854,11 +869,13 @@ func (h *Host) threadedHandleActionItem(soid types.FileContractID) { storageProofSet, err := builder.Sign(true) if err != nil { h.log.Println("Host error when signing the storage proof transaction:", err) + builder.Drop() return } err = h.tpool.AcceptTransactionSet(storageProofSet) if err != nil { h.log.Println("Host unable to submit storage proof transaction to transaction pool:", err) + builder.Drop() return } so.TransactionFeesAdded = so.TransactionFeesAdded.Add(requiredFee) diff --git a/modules/host/storageobligations_smoke_test.go b/modules/host/storageobligations_smoke_test.go index 72de17a701..3cfae4b3e2 100644 --- a/modules/host/storageobligations_smoke_test.go +++ b/modules/host/storageobligations_smoke_test.go @@ -31,12 +31,15 @@ func randSector() (crypto.Hash, []byte) { // contract that will form the foundation of a storage obligation. func (ht *hostTester) newTesterStorageObligation() (storageObligation, error) { // Create the file contract that will be used in the obligation. - builder := ht.wallet.StartTransaction() + builder, err := ht.wallet.StartTransaction() + if err != nil { + return storageObligation{}, err + } // Fund the file contract with a payout. The payout needs to be big enough // that the expected revenue is larger than the fee that the host may end // up paying. payout := types.SiacoinPrecision.Mul64(1e3) - err := builder.FundSiacoins(payout) + err = builder.FundSiacoins(payout) if err != nil { return storageObligation{}, err } diff --git a/modules/host/update_test.go b/modules/host/update_test.go index b04e687245..0032748c98 100644 --- a/modules/host/update_test.go +++ b/modules/host/update_test.go @@ -32,7 +32,10 @@ func TestStorageProof(t *testing.T) { ValidProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, MissedProofOutputs: []types.SiacoinOutput{{Value: types.NewCurrency64(1)}, {Value: types.NewCurrency64(0)}}, } - txnBuilder := ht.wallet.StartTransaction() + txnBuilder, err := ht.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) diff --git a/modules/miner/blockmanager.go b/modules/miner/blockmanager.go index 403113bd2b..59084da181 100644 --- a/modules/miner/blockmanager.go +++ b/modules/miner/blockmanager.go @@ -81,13 +81,17 @@ func (m *Miner) HeaderForWork() (types.BlockHeader, types.Target, error) { defer m.mu.Unlock() // Return a blank header with an error if the wallet is locked. - if !m.wallet.Unlocked() { + unlocked, err := m.wallet.Unlocked() + if err != nil { + return types.BlockHeader{}, types.Target{}, err + } + if !unlocked { return types.BlockHeader{}, types.Target{}, modules.ErrLockedWallet } // Check that the wallet has been initialized, and that the miner has // successfully fetched an address. - err := m.checkAddress() + err = m.checkAddress() if err != nil { return types.BlockHeader{}, types.Target{}, err } diff --git a/modules/miner/miner.go b/modules/miner/miner.go index ec04c960ff..ee1f1f770e 100644 --- a/modules/miner/miner.go +++ b/modules/miner/miner.go @@ -249,7 +249,10 @@ func (m *Miner) Close() error { // checkAddress checks that the miner has an address, fetching an address from // the wallet if not. func (m *Miner) checkAddress() error { - addrs := m.wallet.AllAddresses() + addrs, err := m.wallet.AllAddresses() + if err != nil { + return err + } hasAddr := false for _, addr := range addrs { if m.persist.Address == addr { diff --git a/modules/miner/miner_test.go b/modules/miner/miner_test.go index 05d49d4d48..a369d809ac 100644 --- a/modules/miner/miner_test.go +++ b/modules/miner/miner_test.go @@ -104,7 +104,10 @@ func TestIntegrationMiner(t *testing.T) { } // Check that the wallet has money. - siacoins, _, _ := mt.wallet.ConfirmedBalance() + siacoins, _, _, err := mt.wallet.ConfirmedBalance() + if err != nil { + t.Error(err) + } if siacoins.IsZero() { t.Error("expecting mining full balance to not be zero") } @@ -117,7 +120,10 @@ func TestIntegrationMiner(t *testing.T) { t.Fatal(err) } } - morecoins, _, _ := mt.wallet.ConfirmedBalance() + morecoins, _, _, err := mt.wallet.ConfirmedBalance() + if err != nil { + t.Error(err) + } if siacoins.Cmp(morecoins) >= 0 { t.Error("wallet is not gaining balance while mining") } diff --git a/modules/miner/testminer.go b/modules/miner/testminer.go index 22c9c3390b..2c9d24b512 100644 --- a/modules/miner/testminer.go +++ b/modules/miner/testminer.go @@ -49,7 +49,11 @@ func solveBlock(b types.Block, target types.Target) (types.Block, bool) { func (m *Miner) BlockForWork() (b types.Block, t types.Target, err error) { // Check if the wallet is unlocked. If the wallet is unlocked, make sure // that the miner has a recent address. - if !m.wallet.Unlocked() { + unlocked, err := m.wallet.Unlocked() + if err != nil { + return types.Block{}, types.Target{}, err + } + if !unlocked { err = modules.ErrLockedWallet return } @@ -85,10 +89,14 @@ func (m *Miner) FindBlock() (types.Block, error) { m.mu.Lock() defer m.mu.Unlock() - if !m.wallet.Unlocked() { + unlocked, err := m.wallet.Unlocked() + if err != nil { + return err + } + if !unlocked { return modules.ErrLockedWallet } - err := m.checkAddress() + err = m.checkAddress() if err != nil { return err } diff --git a/modules/renter/contractor/contractor_test.go b/modules/renter/contractor/contractor_test.go index e14eff8108..9afa37f0a9 100644 --- a/modules/renter/contractor/contractor_test.go +++ b/modules/renter/contractor/contractor_test.go @@ -24,8 +24,8 @@ func (newStub) Synced() bool { return true } func (newStub) Unsubscribe(modules.ConsensusSetSubscriber) { return } // wallet stubs -func (newStub) NextAddress() (uc types.UnlockConditions, err error) { return } -func (newStub) StartTransaction() modules.TransactionBuilder { return nil } +func (newStub) NextAddress() (uc types.UnlockConditions, err error) { return } +func (newStub) StartTransaction() (tb modules.TransactionBuilder, err error) { return } // transaction pool stubs func (newStub) AcceptTransactionSet([]types.Transaction) error { return nil } @@ -310,7 +310,10 @@ func TestAllowanceSpending(t *testing.T) { } } } - balance, _, _ := w.ConfirmedBalance() + balance, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } spent := minerRewards.Sub(balance) if spent.Cmp(testAllowance.Funds) > 0 { t.Fatal("contractor spent too much money: spent", spent.HumanString(), "allowance funds:", testAllowance.Funds.HumanString()) @@ -535,9 +538,9 @@ func (ws *testWalletShim) NextAddress() (types.UnlockConditions, error) { ws.nextAddressCalled = true return types.UnlockConditions{}, nil } -func (ws *testWalletShim) StartTransaction() modules.TransactionBuilder { +func (ws *testWalletShim) StartTransaction() (modules.TransactionBuilder, error) { ws.startTxnCalled = true - return nil + return nil, nil } // TestWalletBridge tests the walletBridge type. diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 71b80f7173..f54dddba53 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -185,7 +185,10 @@ func (c *Contractor) managedNewContract(host modules.HostDBEntry, contractFundin c.mu.RUnlock() // create transaction builder - txnBuilder := c.wallet.StartTransaction() + txnBuilder, err := c.wallet.StartTransaction() + if err != nil { + return modules.RenterContract{}, err + } contract, err := c.staticContracts.FormContract(params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if err != nil { @@ -241,7 +244,10 @@ func (c *Contractor) managedRenew(sc *proto.SafeContract, contractFunding types. c.mu.RUnlock() // execute negotiation protocol - txnBuilder := c.wallet.StartTransaction() + txnBuilder, err := c.wallet.StartTransaction() + if err != nil { + return modules.RenterContract{}, err + } newContract, err := c.staticContracts.Renew(sc, params, txnBuilder, c.tpool, c.hdb, c.tg.StopChan()) if err != nil { txnBuilder.Drop() // return unused outputs to wallet diff --git a/modules/renter/contractor/dependencies.go b/modules/renter/contractor/dependencies.go index d2261d82ca..442831db06 100644 --- a/modules/renter/contractor/dependencies.go +++ b/modules/renter/contractor/dependencies.go @@ -21,11 +21,11 @@ type ( // transactionBuilder. walletShim interface { NextAddress() (types.UnlockConditions, error) - StartTransaction() modules.TransactionBuilder + StartTransaction() (modules.TransactionBuilder, error) } wallet interface { NextAddress() (types.UnlockConditions, error) - StartTransaction() transactionBuilder + StartTransaction() (transactionBuilder, error) } transactionBuilder interface { AddArbitraryData([]byte) uint64 @@ -75,7 +75,7 @@ func (ws *WalletBridge) NextAddress() (types.UnlockConditions, error) { return w // StartTransaction creates a new transactionBuilder that can be used to create // and sign a transaction. -func (ws *WalletBridge) StartTransaction() transactionBuilder { return ws.W.StartTransaction() } +func (ws *WalletBridge) StartTransaction() (transactionBuilder, error) { return ws.W.StartTransaction() } // stdPersist implements the persister interface. The filename required by // these functions is internal to stdPersist. diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index e73ec5d4d1..06315f6a6c 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -32,7 +32,11 @@ func newTestingWallet(testdir string, cs modules.ConsensusSet, tp modules.Transa return nil, err } key := crypto.GenerateTwofishKey() - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + return nil, err + } + if !encrypted { _, err = w.Encrypt(key) if err != nil { return nil, err @@ -126,7 +130,11 @@ func newTestingTrio(name string) (modules.Host, *Contractor, modules.TestMiner, return nil, nil, nil, err } key := crypto.GenerateTwofishKey() - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + return nil, nil, nil, err + } + if !encrypted { _, err = w.Encrypt(key) if err != nil { return nil, nil, nil, err diff --git a/modules/renter/contractor/negotiate_test.go b/modules/renter/contractor/negotiate_test.go index f43c4936f5..8b01e8ae78 100644 --- a/modules/renter/contractor/negotiate_test.go +++ b/modules/renter/contractor/negotiate_test.go @@ -139,7 +139,10 @@ func TestNegotiateContract(t *testing.T) { RevisionNumber: 0, } - txnBuilder := ct.wallet.StartTransaction() + txnBuilder, err := ct.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) @@ -210,7 +213,10 @@ func TestReviseContract(t *testing.T) { {Value: types.ZeroCurrency, UnlockHash: types.UnlockHash{}}, } - txnBuilder := ct.wallet.StartTransaction() + txnBuilder, err := ct.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fc.Payout) if err != nil { t.Fatal(err) diff --git a/modules/transactionpool/accept_test.go b/modules/transactionpool/accept_test.go index 40b98f3ffe..fae20bf18c 100644 --- a/modules/transactionpool/accept_test.go +++ b/modules/transactionpool/accept_test.go @@ -74,7 +74,10 @@ func TestConflictingTransactionSets(t *testing.T) { // Fund a partial transaction. fund := types.NewCurrency64(30e6) - txnBuilder := tpt.wallet.StartTransaction() + txnBuilder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) @@ -335,7 +338,10 @@ func TestTransactionSuperset(t *testing.T) { // Fund a partial transaction. fund := types.NewCurrency64(30e6) - txnBuilder := tpt.wallet.StartTransaction() + txnBuilder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) @@ -394,7 +400,10 @@ func TestTransactionSubset(t *testing.T) { // Fund a partial transaction. fund := types.NewCurrency64(30e6) - txnBuilder := tpt.wallet.StartTransaction() + txnBuilder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) @@ -441,7 +450,10 @@ func TestTransactionChild(t *testing.T) { // Fund a partial transaction. fund := types.NewCurrency64(30e6) - txnBuilder := tpt.wallet.StartTransaction() + txnBuilder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(fund) if err != nil { t.Fatal(err) @@ -510,7 +522,10 @@ func TestAcceptFCAndConflictingRevision(t *testing.T) { defer tpt.Close() // Create and fund a valid file contract. - builder := tpt.wallet.StartTransaction() + builder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { @@ -566,7 +581,10 @@ func TestPartialConfirmation(t *testing.T) { defer tpt.Close() // Create and fund a valid file contract. - builder := tpt.wallet.StartTransaction() + builder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } payout := types.NewCurrency64(1e9) err = builder.FundSiacoins(payout) if err != nil { @@ -644,7 +662,10 @@ func TestPartialConfirmationWeave(t *testing.T) { // Create a transaction with a single output to a fully controlled address. emptyUH := types.UnlockConditions{}.UnlockHash() - builder1 := tpt.wallet.StartTransaction() + builder1, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } funding1 := types.NewCurrency64(1e9) err = builder1.FundSiacoins(funding1) if err != nil { @@ -672,7 +693,10 @@ func TestPartialConfirmationWeave(t *testing.T) { // Create a second output to the fully controlled address, to fund the // second transaction in the weave. - builder2 := tpt.wallet.StartTransaction() + builder2, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } funding2 := types.NewCurrency64(2e9) err = builder2.FundSiacoins(funding2) if err != nil { diff --git a/modules/transactionpool/transactionpool_test.go b/modules/transactionpool/transactionpool_test.go index 3df5e3523c..c07a4d6e8a 100644 --- a/modules/transactionpool/transactionpool_test.go +++ b/modules/transactionpool/transactionpool_test.go @@ -161,7 +161,10 @@ func TestGetTransaction(t *testing.T) { value := types.NewCurrency64(35e6) fee := types.NewCurrency64(3e2) emptyUH := types.UnlockConditions{}.UnlockHash() - txnBuilder := tpt.wallet.StartTransaction() + txnBuilder, err := tpt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = txnBuilder.FundSiacoins(value) if err != nil { t.Fatal(err) diff --git a/modules/wallet.go b/modules/wallet.go index 74e1e2473d..46ab24c04f 100644 --- a/modules/wallet.go +++ b/modules/wallet.go @@ -41,6 +41,10 @@ var ( // ErrLowBalance is returned if the wallet does not have enough funds to // complete the desired action. ErrLowBalance = errors.New("insufficient balance") + + // ErrWalletShutdown is returned when a method can't continue execution due + // to the wallet shutting down. + ErrWalletShutdown = errors.New("wallet is shutting down") ) type ( @@ -239,7 +243,7 @@ type ( // Encrypted returns whether or not the wallet has been encrypted yet. // After being encrypted for the first time, the wallet can only be // unlocked using the encryption password. - Encrypted() bool + Encrypted() (bool, error) // InitFromSeed functions like Encrypt, but using a specified seed. // Unlike Encrypt, the blockchain will be scanned to determine the @@ -266,7 +270,7 @@ type ( // Unlocked returns true if the wallet is currently unlocked, false // otherwise. - Unlocked() bool + Unlocked() (bool, error) } // KeyManager manages wallet keys, including the use of seeds, creating and @@ -276,7 +280,7 @@ type ( // AllAddresses returns all addresses that the wallet is able to spend // from, including unseeded addresses. Addresses are returned sorted in // byte-order. - AllAddresses() []types.UnlockHash + AllAddresses() ([]types.UnlockHash, error) // AllSeeds returns all of the seeds that are being tracked by the // wallet, including the primary seed. Only the primary seed is used to @@ -342,30 +346,30 @@ type ( // ConfirmedBalance returns the confirmed balance of the wallet, minus // any outgoing transactions. ConfirmedBalance will include unconfirmed // refund transactions. - ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siacoinClaimBalance types.Currency) + ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siacoinClaimBalance types.Currency, err error) // UnconfirmedBalance returns the unconfirmed balance of the wallet. // Outgoing funds and incoming funds are reported separately. Refund // outputs are included, meaning that sending a single coin to // someone could result in 'outgoing: 12, incoming: 11'. Siafunds are // not considered in the unconfirmed balance. - UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency) + UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency, err error) // Height returns the wallet's internal processed consensus height - Height() types.BlockHeight + Height() (types.BlockHeight, error) // AddressTransactions returns all of the transactions that are related // to a given address. - AddressTransactions(types.UnlockHash) []ProcessedTransaction + AddressTransactions(types.UnlockHash) ([]ProcessedTransaction, error) // AddressUnconfirmedHistory returns all of the unconfirmed // transactions related to a given address. - AddressUnconfirmedTransactions(types.UnlockHash) []ProcessedTransaction + AddressUnconfirmedTransactions(types.UnlockHash) ([]ProcessedTransaction, error) // Transaction returns the transaction with the given id. The bool // indicates whether the transaction is in the wallet database. The // wallet only stores transactions that are related to the wallet. - Transaction(types.TransactionID) (ProcessedTransaction, bool) + Transaction(types.TransactionID) (ProcessedTransaction, bool, error) // Transactions returns all of the transactions that were confirmed at // heights [startHeight, endHeight]. Unconfirmed transactions are not @@ -374,25 +378,25 @@ type ( // UnconfirmedTransactions returns all unconfirmed transactions // relative to the wallet. - UnconfirmedTransactions() []ProcessedTransaction + UnconfirmedTransactions() ([]ProcessedTransaction, error) // RegisterTransaction takes a transaction and its parents and returns // a TransactionBuilder which can be used to expand the transaction. - RegisterTransaction(t types.Transaction, parents []types.Transaction) TransactionBuilder + RegisterTransaction(t types.Transaction, parents []types.Transaction) (TransactionBuilder, error) // Rescanning reports whether the wallet is currently rescanning the // blockchain. - Rescanning() bool + Rescanning() (bool, error) // Settings returns the Wallet's current settings. - Settings() WalletSettings + Settings() (WalletSettings, error) // SetSettings sets the Wallet's settings. - SetSettings(WalletSettings) + SetSettings(WalletSettings) error // StartTransaction is a convenience method that calls // RegisterTransaction(types.Transaction{}, nil) - StartTransaction() TransactionBuilder + StartTransaction() (TransactionBuilder, error) // SendSiacoins is a tool for sending siacoins from the wallet to an // address. Sending money usually results in multiple transactions. The @@ -411,7 +415,7 @@ type ( // DustThreshold returns the quantity per byte below which a Currency is // considered to be Dust. - DustThreshold() types.Currency + DustThreshold() (types.Currency, error) } // WalletSettings control the behavior of the Wallet. diff --git a/modules/wallet/defrag.go b/modules/wallet/defrag.go index 39f937c35b..38232b40a2 100644 --- a/modules/wallet/defrag.go +++ b/modules/wallet/defrag.go @@ -16,7 +16,10 @@ var ( // wallet outputs into a single new address. func (w *Wallet) managedCreateDefragTransaction() ([]types.Transaction, error) { // dustThreshold and minFee have to be obtained separate from the lock - dustThreshold := w.DustThreshold() + dustThreshold, err := w.DustThreshold() + if err != nil { + return nil, err + } minFee, _ := w.tpool.FeeEstimation() w.mu.Lock() diff --git a/modules/wallet/defrag_test.go b/modules/wallet/defrag_test.go index 70affa1c9c..cb12ee31bf 100644 --- a/modules/wallet/defrag_test.go +++ b/modules/wallet/defrag_test.go @@ -71,7 +71,10 @@ func TestDefragWalletDust(t *testing.T) { dustOutputValue := types.NewCurrency64(10000) noutputs := defragThreshold + 1 - tbuilder := wt.wallet.StartTransaction() + tbuilder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = tbuilder.FundSiacoins(dustOutputValue.Mul64(uint64(noutputs))) if err != nil { t.Fatal(err) @@ -164,7 +167,11 @@ func TestDefragOutputExhaustion(t *testing.T) { fee := types.SiacoinPrecision.Mul64(10) numOutputs := defragThreshold + 1 - tbuilder := wt.wallet.StartTransaction() + tbuilder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } + tbuilder.FundSiacoins(txnValue.Mul64(uint64(numOutputs)).Add(fee)) for i := 0; i < numOutputs; i++ { diff --git a/modules/wallet/encrypt.go b/modules/wallet/encrypt.go index 22336e2b78..29176a9ba9 100644 --- a/modules/wallet/encrypt.go +++ b/modules/wallet/encrypt.go @@ -274,13 +274,16 @@ func (w *Wallet) wipeSecrets() { } // Encrypted returns whether or not the wallet has been encrypted. -func (w *Wallet) Encrypted() bool { +func (w *Wallet) Encrypted() (bool, error) { + if err := w.tg.Add(); err != nil { + return false, err + } w.mu.Lock() defer w.mu.Unlock() if build.DEBUG && w.unlocked && !w.encrypted { panic("wallet is both unlocked and unencrypted") } - return w.encrypted + return w.encrypted, nil } // Encrypt will create a primary seed for the wallet and encrypt it using @@ -391,15 +394,21 @@ func (w *Wallet) InitFromSeed(masterKey crypto.TwofishKey, seed modules.Seed) er } // Unlocked indicates whether the wallet is locked or unlocked. -func (w *Wallet) Unlocked() bool { +func (w *Wallet) Unlocked() (bool, error) { + if err := w.tg.Add(); err != nil { + return false, err + } w.mu.RLock() defer w.mu.RUnlock() - return w.unlocked + return w.unlocked, nil } // Lock will erase all keys from memory and prevent the wallet from spending // coins until it is unlocked. func (w *Wallet) Lock() error { + if err := w.tg.Add(); err != nil { + return err + } w.mu.Lock() defer w.mu.Unlock() if !w.unlocked { diff --git a/modules/wallet/encrypt_test.go b/modules/wallet/encrypt_test.go index f57ebd9e91..92d707ffbc 100644 --- a/modules/wallet/encrypt_test.go +++ b/modules/wallet/encrypt_test.go @@ -20,10 +20,18 @@ import ( // unlocking are all happening in the correct order and returning the correct // errors. func postEncryptionTesting(m modules.TestMiner, w *Wallet, masterKey crypto.TwofishKey) { - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + panic(err) + } + unlocked, err := w.Unlocked() + if err != nil { + panic(err) + } + if !encrypted { panic("wallet is not encrypted when starting postEncryptionTesting") } - if w.Unlocked() { + if unlocked { panic("wallet is unlocked when starting postEncryptionTesting") } if len(w.seeds) != 0 { @@ -31,7 +39,7 @@ func postEncryptionTesting(m modules.TestMiner, w *Wallet, masterKey crypto.Twof } // Try unlocking and using the wallet. - err := w.Unlock(masterKey) + err = w.Unlock(masterKey) if err != nil { panic(err) } @@ -47,7 +55,10 @@ func postEncryptionTesting(m modules.TestMiner, w *Wallet, masterKey crypto.Twof panic(err) } } - siacoinBal, _, _ := w.ConfirmedBalance() + siacoinBal, _, _, err := w.ConfirmedBalance() + if err != nil { + panic(err) + } if siacoinBal.IsZero() { panic("wallet balance reported as 0 after maturing some mined blocks") } @@ -83,7 +94,10 @@ func postEncryptionTesting(m modules.TestMiner, w *Wallet, masterKey crypto.Twof if err != nil { panic(err) } - siacoinBal2, _, _ := w.ConfirmedBalance() + siacoinBal2, _, _, err := w.ConfirmedBalance() + if err != nil { + panic(err) + } if siacoinBal2.Cmp(siacoinBal) >= 0 { panic("balance did not increase") } @@ -101,7 +115,11 @@ func TestIntegrationPreEncryption(t *testing.T) { } // Check that the wallet knows it's not encrypted. - if wt.wallet.Encrypted() { + encrypted, err := wt.wallet.Encrypted() + if err != nil { + t.Fatal(err) + } + if encrypted { t.Error("wallet is reporting that it has been encrypted") } err = wt.wallet.Lock() @@ -120,10 +138,19 @@ func TestIntegrationPreEncryption(t *testing.T) { if err != nil { t.Fatal(err) } - if w1.Encrypted() { + encrypted, err = w1.Encrypted() + if encrypted { t.Error("wallet is reporting that it has been encrypted when no such action has occurred") } - if w1.Unlocked() { + unlocked, err := w1.Unlocked() + if err != nil { + t.Fatal(err) + } + unlocked, err = w1.Unlocked() + if err != nil { + t.Fatal(err) + } + if unlocked { t.Error("new wallet is not being treated as locked") } w1.Close() @@ -210,13 +237,19 @@ func TestLock(t *testing.T) { } // Lock the wallet. - siacoinBalance, _, _ := wt.wallet.ConfirmedBalance() + siacoinBalance, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Error(err) + } err = wt.wallet.Lock() if err != nil { t.Error(err) } // Compare to the original balance. - siacoinBalance2, _, _ := wt.wallet.ConfirmedBalance() + siacoinBalance2, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Error(err) + } if !siacoinBalance2.Equals(siacoinBalance) { t.Error("siacoin balance reporting changed upon closing the wallet") } @@ -243,7 +276,10 @@ func TestLock(t *testing.T) { if err != nil { t.Fatal(err) } - siacoinBalance3, _, _ := wt.wallet.ConfirmedBalance() + siacoinBalance3, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Error(err) + } if siacoinBalance3.Cmp(siacoinBalance2) <= 0 { t.Error("balance should increase after a block was mined") } @@ -266,7 +302,10 @@ func TestInitFromSeedConcurrentUnlock(t *testing.T) { if err != nil { t.Fatal(err) } - origBal, _, _ := wt.wallet.ConfirmedBalance() + origBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } // create a blank wallet dir := filepath.Join(build.TempDir(modules.WalletDir, t.Name()+"-new"), modules.WalletDir) @@ -296,7 +335,10 @@ func TestInitFromSeedConcurrentUnlock(t *testing.T) { } // starting balance should match the original wallet - newBal, _, _ := w.ConfirmedBalance() + newBal, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newBal.Cmp(origBal) != 0 { t.Log(w.UnconfirmedBalance()) t.Fatalf("wallet should have correct balance after loading seed: wanted %v, got %v", origBal, newBal) @@ -358,7 +400,10 @@ func TestInitFromSeed(t *testing.T) { if err != nil { t.Fatal(err) } - origBal, _, _ := wt.wallet.ConfirmedBalance() + origBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } // create a blank wallet dir := filepath.Join(build.TempDir(modules.WalletDir, "TestInitFromSeed1"), modules.WalletDir) @@ -375,7 +420,10 @@ func TestInitFromSeed(t *testing.T) { t.Fatal(err) } // starting balance should match the original wallet - newBal, _, _ := w.ConfirmedBalance() + newBal, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newBal.Cmp(origBal) != 0 { t.Log(w.UnconfirmedBalance()) t.Fatalf("wallet should have correct balance after loading seed: wanted %v, got %v", origBal, newBal) @@ -447,7 +495,10 @@ func TestChangeKey(t *testing.T) { var newKey crypto.TwofishKey fastrand.Read(newKey[:]) - origBal, _, _ := wt.wallet.ConfirmedBalance() + origBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } err = wt.wallet.ChangeKey(wt.walletMasterKey, newKey) if err != nil { @@ -468,7 +519,10 @@ func TestChangeKey(t *testing.T) { if err != nil { t.Fatal(err) } - newBal, _, _ := wt.wallet.ConfirmedBalance() + newBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newBal.Cmp(origBal) != 0 { t.Fatal("wallet with changed key did not have the same balance") } diff --git a/modules/wallet/money.go b/modules/wallet/money.go index 3ec4544ba1..3c7e638183 100644 --- a/modules/wallet/money.go +++ b/modules/wallet/money.go @@ -17,26 +17,29 @@ type sortedOutputs struct { // DustThreshold returns the quantity per byte below which a Currency is // considered to be Dust. -func (w *Wallet) DustThreshold() types.Currency { +func (w *Wallet) DustThreshold() (types.Currency, error) { if err := w.tg.Add(); err != nil { - return types.Currency{} + return types.Currency{}, modules.ErrWalletShutdown } defer w.tg.Done() minFee, _ := w.tpool.FeeEstimation() - return minFee.Mul64(3) + return minFee.Mul64(3), nil } // ConfirmedBalance returns the balance of the wallet according to all of the // confirmed transactions. -func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siafundClaimBalance types.Currency) { +func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalance types.Currency, siafundClaimBalance types.Currency, err error) { if err := w.tg.Add(); err != nil { - return + return types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown } defer w.tg.Done() // dustThreshold has to be obtained separate from the lock - dustThreshold := w.DustThreshold() + dustThreshold, err := w.DustThreshold() + if err != nil { + return types.ZeroCurrency, types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown + } w.mu.Lock() defer w.mu.Unlock() @@ -70,14 +73,17 @@ func (w *Wallet) ConfirmedBalance() (siacoinBalance types.Currency, siafundBalan // UnconfirmedBalance returns the number of outgoing and incoming siacoins in // the unconfirmed transaction set. Refund outputs are included in this // reporting. -func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency) { +func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incomingSiacoins types.Currency, err error) { if err := w.tg.Add(); err != nil { - return + return types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown } defer w.tg.Done() // dustThreshold has to be obtained separate from the lock - dustThreshold := w.DustThreshold() + dustThreshold, err := w.DustThreshold() + if err != nil { + return types.ZeroCurrency, types.ZeroCurrency, modules.ErrWalletShutdown + } w.mu.Lock() defer w.mu.Unlock() @@ -101,6 +107,7 @@ func (w *Wallet) UnconfirmedBalance() (outgoingSiacoins types.Currency, incoming // is submitted to the transaction pool and is also returned. func (w *Wallet) SendSiacoins(amount types.Currency, dest types.UnlockHash) (txns []types.Transaction, err error) { if err := w.tg.Add(); err != nil { + err = modules.ErrWalletShutdown return nil, err } defer w.tg.Done() @@ -120,7 +127,10 @@ func (w *Wallet) SendSiacoins(amount types.Currency, dest types.UnlockHash) (txn UnlockHash: dest, } - txnBuilder := w.StartTransaction() + txnBuilder, err := w.StartTransaction() + if err != nil { + return nil, err + } defer func() { if err != nil { txnBuilder.Drop() @@ -159,6 +169,7 @@ func (w *Wallet) SendSiacoins(amount types.Currency, dest types.UnlockHash) (txn func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) (txns []types.Transaction, err error) { w.log.Println("Beginning call to SendSiacoinsMulti") if err := w.tg.Add(); err != nil { + err = modules.ErrWalletShutdown return nil, err } defer w.tg.Done() @@ -170,7 +181,10 @@ func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) (txns []types. return nil, modules.ErrLockedWallet } - txnBuilder := w.StartTransaction() + txnBuilder, err := w.StartTransaction() + if err != nil { + return nil, err + } defer func() { if err != nil { txnBuilder.Drop() @@ -229,6 +243,7 @@ func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) (txns []types. // is submitted to the transaction pool and is also returned. func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) { if err := w.tg.Add(); err != nil { + err = modules.ErrWalletShutdown return nil, err } defer w.tg.Done() @@ -247,8 +262,11 @@ func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]t UnlockHash: dest, } - txnBuilder := w.StartTransaction() - err := txnBuilder.FundSiacoins(tpoolFee) + txnBuilder, err := w.StartTransaction() + if err != nil { + return nil, err + } + err = txnBuilder.FundSiacoins(tpoolFee) if err != nil { return nil, err } diff --git a/modules/wallet/money_test.go b/modules/wallet/money_test.go index 9a0c65a6a1..510e11278e 100644 --- a/modules/wallet/money_test.go +++ b/modules/wallet/money_test.go @@ -21,8 +21,14 @@ func TestSendSiacoins(t *testing.T) { // Get the initial balance - should be 1 block. The unconfirmed balances // should be 0. - confirmedBal, _, _ := wt.wallet.ConfirmedBalance() - unconfirmedOut, unconfirmedIn := wt.wallet.UnconfirmedBalance() + confirmedBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + unconfirmedOut, unconfirmedIn, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !confirmedBal.Equals(types.CalculateCoinbase(1)) { t.Error("unexpected confirmed balance") } @@ -43,8 +49,14 @@ func TestSendSiacoins(t *testing.T) { if err != nil { t.Fatal(err) } - confirmedBal2, _, _ := wt.wallet.ConfirmedBalance() - unconfirmedOut2, unconfirmedIn2 := wt.wallet.UnconfirmedBalance() + confirmedBal2, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + unconfirmedOut2, unconfirmedIn2, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !confirmedBal2.Equals(confirmedBal) { t.Error("confirmed balance changed without introduction of blocks") } @@ -58,8 +70,14 @@ func TestSendSiacoins(t *testing.T) { if err != nil { t.Fatal(err) } - confirmedBal3, _, _ := wt.wallet.ConfirmedBalance() - unconfirmedOut3, unconfirmedIn3 := wt.wallet.UnconfirmedBalance() + confirmedBal3, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + unconfirmedOut3, unconfirmedIn3, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !confirmedBal3.Equals(confirmedBal2.Add(types.CalculateCoinbase(2)).Sub(sendValue).Sub(tpoolFee)) { t.Error("confirmed balance did not adjust to the expected value") } diff --git a/modules/wallet/seed.go b/modules/wallet/seed.go index 3d9d319109..15be39fec7 100644 --- a/modules/wallet/seed.go +++ b/modules/wallet/seed.go @@ -405,7 +405,11 @@ func (w *Wallet) SweepSeed(seed modules.Seed) (coins, funds types.Currency, err var txnCoins, txnFunds types.Currency // construct a transaction that spends the outputs - tb := w.StartTransaction() + tb, err := w.StartTransaction() + if err != nil { + return types.ZeroCurrency, types.ZeroCurrency, err + } + var sweptCoins, sweptFunds types.Currency // total values of swept outputs for _, output := range txnSiacoinOutputs { // construct a siacoin input that spends the output @@ -512,7 +516,7 @@ func (w *Wallet) SweepSeed(seed modules.Seed) (coins, funds types.Currency, err // submit the transactions err = w.tpool.AcceptTransactionSet(txnSet) if err != nil { - return + return types.ZeroCurrency, types.ZeroCurrency, err } w.log.Println("Creating a transaction set to sweep a seed, IDs:") diff --git a/modules/wallet/seed_test.go b/modules/wallet/seed_test.go index e01a4f5939..dcb6cdf242 100644 --- a/modules/wallet/seed_test.go +++ b/modules/wallet/seed_test.go @@ -125,7 +125,10 @@ func TestLoadSeed(t *testing.T) { t.Fatal(err) } // Balance of wallet should be 0. - siacoinBal, _, _ := w.ConfirmedBalance() + siacoinBal, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siacoinBal.Equals64(0) { t.Error("fresh wallet should not have a balance") } @@ -147,7 +150,10 @@ func TestLoadSeed(t *testing.T) { t.Error("AllSeeds returned the wrong seed") } - siacoinBal2, _, _ := w.ConfirmedBalance() + siacoinBal2, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siacoinBal2.Cmp64(0) <= 0 { t.Error("wallet failed to load a seed with money in it") } @@ -213,7 +219,10 @@ func TestSweepSeedCoins(t *testing.T) { t.Fatal(err) } // starting balance should be 0. - siacoinBal, _, _ := w.ConfirmedBalance() + siacoinBal, _, _, err := w.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siacoinBal.IsZero() { t.Error("fresh wallet should not have a balance") } @@ -225,7 +234,10 @@ func TestSweepSeedCoins(t *testing.T) { } // new wallet should have exactly 'sweptCoins' coins - _, incoming := w.UnconfirmedBalance() + _, incoming, err := w.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if incoming.Cmp(sweptCoins) != 0 { t.Fatalf("wallet should have correct balance after sweeping seed: wanted %v, got %v", sweptCoins, incoming) } @@ -250,7 +262,10 @@ func TestSweepSeedFunds(t *testing.T) { t.Error(err) } - _, siafundBal, _ := wt.wallet.ConfirmedBalance() + _, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } @@ -279,7 +294,10 @@ func TestSweepSeedFunds(t *testing.T) { for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } - oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() + oldCoinBalance, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siafundBal.Cmp(types.NewCurrency64(1988)) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", 1988, siafundBal) } @@ -299,7 +317,10 @@ func TestSweepSeedFunds(t *testing.T) { wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. - newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() + newCoinBalance, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newCoinBalance.Cmp(oldCoinBalance) >= 0 { t.Error("expecting balance to go down; instead, increased by", newCoinBalance.Sub(oldCoinBalance)) } @@ -325,7 +346,10 @@ func TestSweepSeedSentFunds(t *testing.T) { t.Error(err) } - _, siafundBal, _ := wt.wallet.ConfirmedBalance() + _, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } @@ -367,7 +391,10 @@ func TestSweepSeedSentFunds(t *testing.T) { for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } - oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() + oldCoinBalance, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if expected := 2000 - 12 - 10; siafundBal.Cmp(types.NewCurrency64(uint64(expected))) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", expected, siafundBal) } @@ -387,7 +414,10 @@ func TestSweepSeedSentFunds(t *testing.T) { wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. - newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() + newCoinBalance, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newCoinBalance.Cmp(oldCoinBalance) >= 0 { t.Error("expecting balance to go down; instead, increased by", newCoinBalance.Sub(oldCoinBalance)) } @@ -412,7 +442,10 @@ func TestSweepSeedCoinsAndFunds(t *testing.T) { t.Error(err) } - _, siafundBal, _ := wt.wallet.ConfirmedBalance() + _, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siafundBal.Cmp(types.NewCurrency64(2000)) != 0 { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } @@ -442,7 +475,10 @@ func TestSweepSeedCoinsAndFunds(t *testing.T) { for i := types.BlockHeight(0); i < types.MaturityDelay; i++ { wt.addBlockNoPayout() } - oldCoinBalance, siafundBal, _ := wt.wallet.ConfirmedBalance() + oldCoinBalance, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if siafundBal.Cmp(types.NewCurrency64(1988)) != 0 { t.Errorf("expecting balance of %v after sending siafunds to the seed, got %v", 1988, siafundBal) } @@ -462,7 +498,10 @@ func TestSweepSeedCoinsAndFunds(t *testing.T) { wt.addBlockNoPayout() // Wallet balance should have decreased to pay for the sweep transaction. - newCoinBalance, _, _ := wt.wallet.ConfirmedBalance() + newCoinBalance, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if newCoinBalance.Cmp(oldCoinBalance) <= 0 { t.Error("expecting balance to go up; instead, decreased by", oldCoinBalance.Sub(newCoinBalance)) } diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index 49cec7c8f3..c96226f02d 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -118,7 +118,10 @@ func (w *Wallet) checkOutput(tx *bolt.Tx, currentHeight types.BlockHeight, id ty // on the transaction builder. func (tb *transactionBuilder) FundSiacoins(amount types.Currency) error { // dustThreshold has to be obtained separate from the lock - dustThreshold := tb.wallet.DustThreshold() + dustThreshold, err := tb.wallet.DustThreshold() + if err != nil { + return err + } tb.wallet.mu.Lock() defer tb.wallet.mu.Unlock() @@ -660,14 +663,20 @@ func (w *Wallet) registerTransaction(t types.Transaction, parents []types.Transa // modules.TransactionBuilder which can be used to expand the transaction. The // most typical call is 'RegisterTransaction(types.Transaction{}, nil)', which // registers a new transaction without parents. -func (w *Wallet) RegisterTransaction(t types.Transaction, parents []types.Transaction) modules.TransactionBuilder { +func (w *Wallet) RegisterTransaction(t types.Transaction, parents []types.Transaction) (modules.TransactionBuilder, error) { + if err := w.tg.Add(); err != nil { + return nil, err + } w.mu.Lock() defer w.mu.Unlock() - return w.registerTransaction(t, parents) + return w.registerTransaction(t, parents), nil } // StartTransaction is a convenience function that calls // RegisterTransaction(types.Transaction{}, nil). -func (w *Wallet) StartTransaction() modules.TransactionBuilder { +func (w *Wallet) StartTransaction() (modules.TransactionBuilder, error) { + if err := w.tg.Add(); err != nil { + return nil, err + } return w.RegisterTransaction(types.Transaction{}, nil) } diff --git a/modules/wallet/transactionbuilder_test.go b/modules/wallet/transactionbuilder_test.go index 3470cfacfa..52e33260f9 100644 --- a/modules/wallet/transactionbuilder_test.go +++ b/modules/wallet/transactionbuilder_test.go @@ -54,7 +54,10 @@ func TestViewAdded(t *testing.T) { // but do not sign the transaction. The format of this test mimics the way // that the host-renter protocol behaves when building a file contract // transaction. - b := wt.wallet.StartTransaction() + b, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } txnFund := types.NewCurrency64(100e9) err = b.FundSiacoins(txnFund) if err != nil { @@ -67,7 +70,10 @@ func TestViewAdded(t *testing.T) { // Create a second builder that extends the first, unsigned transaction. Do // not sign the transaction, but do give the extensions to the original // builder. - b2 := wt.wallet.RegisterTransaction(unfinishedTxn, unfinishedParents) + b2, err := wt.wallet.RegisterTransaction(unfinishedTxn, unfinishedParents) + if err != nil { + t.Fatal(err) + } err = b2.FundSiacoins(txnFund) if err != nil { t.Fatal(err) @@ -144,7 +150,10 @@ func TestDoubleSignError(t *testing.T) { defer wt.closeWt() // Create a transaction, add money to it, and then call sign twice. - b := wt.wallet.StartTransaction() + b, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } txnFund := types.NewCurrency64(100e9) err = b.FundSiacoins(txnFund) if err != nil { @@ -191,8 +200,14 @@ func TestConcurrentBuilders(t *testing.T) { } // Get a baseline balance for the wallet. - startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() - startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() + startingSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + startingOutgoing, startingIncoming, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } @@ -201,8 +216,14 @@ func TestConcurrentBuilders(t *testing.T) { } // Create two builders at the same time, then add money to each. - builder1 := wt.wallet.StartTransaction() - builder2 := wt.wallet.StartTransaction() + builder1, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } + builder2, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } // Fund each builder with a siacoin output that is smaller than all of the // outputs that the wallet should currently have. funding := types.NewCurrency64(10e3).Mul(types.SiacoinPrecision) @@ -216,7 +237,10 @@ func TestConcurrentBuilders(t *testing.T) { } // Get a second reading on the wallet's balance. - fundedSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() + fundedSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startingSCConfirmed.Equals(fundedSCConfirmed) { t.Fatal("confirmed siacoin balance changed when no blocks have been mined", startingSCConfirmed, fundedSCConfirmed) } @@ -284,9 +308,15 @@ func TestConcurrentBuildersSingleOutput(t *testing.T) { if err != nil { t.Fatal(err) } - scBal, _, _ := wt.wallet.ConfirmedBalance() + scBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } // Use a custom builder so that there is no transaction fee. - builder := wt.wallet.StartTransaction() + builder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } err = builder.FundSiacoins(scBal) if err != nil { t.Fatal(err) @@ -312,8 +342,14 @@ func TestConcurrentBuildersSingleOutput(t *testing.T) { } // Get a baseline balance for the wallet. - startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() - startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() + startingSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + startingOutgoing, startingIncoming, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } @@ -322,8 +358,14 @@ func TestConcurrentBuildersSingleOutput(t *testing.T) { } // Create two builders at the same time, then add money to each. - builder1 := wt.wallet.StartTransaction() - builder2 := wt.wallet.StartTransaction() + builder1, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } + builder2, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } // Fund each builder with a siacoin output. funding := types.NewCurrency64(10e3).Mul(types.SiacoinPrecision) err = builder1.FundSiacoins(funding) @@ -337,7 +379,10 @@ func TestConcurrentBuildersSingleOutput(t *testing.T) { } // Get a second reading on the wallet's balance. - fundedSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() + fundedSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startingSCConfirmed.Equals(fundedSCConfirmed) { t.Fatal("confirmed siacoin balance changed when no blocks have been mined", startingSCConfirmed, fundedSCConfirmed) } @@ -397,8 +442,14 @@ func TestParallelBuilders(t *testing.T) { } // Get a baseline balance for the wallet. - startingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() - startingOutgoing, startingIncoming := wt.wallet.UnconfirmedBalance() + startingSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + startingOutgoing, startingIncoming, err := wt.wallet.UnconfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startingOutgoing.IsZero() { t.Fatal(startingOutgoing) } @@ -413,8 +464,11 @@ func TestParallelBuilders(t *testing.T) { wg.Add(1) go func() { // Create the builder and fund the transaction. - builder := wt.wallet.StartTransaction() - err := builder.FundSiacoins(funding) + builder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } + err = builder.FundSiacoins(funding) if err != nil { t.Fatal(err) } @@ -444,7 +498,10 @@ func TestParallelBuilders(t *testing.T) { } // Check the final balance. - endingSCConfirmed, _, _ := wt.wallet.ConfirmedBalance() + endingSCConfirmed, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } expected := startingSCConfirmed.Sub(funding.Mul(types.NewCurrency64(uint64(outputsDesired)))) if !expected.Equals(endingSCConfirmed) { t.Fatal("did not get the expected ending balance", expected, endingSCConfirmed, startingSCConfirmed) @@ -468,7 +525,10 @@ func TestUnconfirmedParents(t *testing.T) { if err != nil { t.Fatal("Failed to get address", err) } - siacoins, _, _ := wt.wallet.ConfirmedBalance() + siacoins, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } tSet, err := wt.wallet.SendSiacoins(siacoins.Sub(types.SiacoinPrecision), uc.UnlockHash()) if err != nil { t.Fatal("Failed to send coins", err) @@ -477,7 +537,10 @@ func TestUnconfirmedParents(t *testing.T) { // Create a transaction. That transaction should use siacoin outputs from // the unconfirmed transactions in tSet as inputs and is therefore a child // of tSet. - b := wt.wallet.StartTransaction() + b, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } txnFund := types.NewCurrency64(1e3) err = b.FundSiacoins(txnFund) if err != nil { diff --git a/modules/wallet/transactions.go b/modules/wallet/transactions.go index 5b7e698a6c..3436ba62f6 100644 --- a/modules/wallet/transactions.go +++ b/modules/wallet/transactions.go @@ -18,7 +18,10 @@ var ( // AddressTransactions returns all of the wallet transactions associated with a // single unlock hash. -func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction) { +func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction, err error) { + if err := w.tg.Add(); err != nil { + return []modules.ProcessedTransaction{}, err + } // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -32,12 +35,15 @@ func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.Process } pts = append(pts, pt) } - return pts + return pts, nil } // AddressUnconfirmedTransactions returns all of the unconfirmed wallet transactions // related to a specific address. -func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction) { +func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modules.ProcessedTransaction, err error) { + if err := w.tg.Add(); err != nil { + return []modules.ProcessedTransaction{}, err + } // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -63,12 +69,15 @@ func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modu pts = append(pts, pt) } } - return pts + return pts, err } // Transaction returns the transaction with the given id. 'False' is returned // if the transaction does not exist. -func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTransaction, found bool) { +func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTransaction, found bool, err error) { + if err := w.tg.Add(); err != nil { + return modules.ProcessedTransaction{}, false, err + } // ensure durability of reported transaction w.mu.Lock() defer w.mu.Unlock() @@ -77,7 +86,7 @@ func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTran // Get the keyBytes for the given txid keyBytes, err := dbGetTransactionIndex(w.dbTx, txid) if err != nil { - return modules.ProcessedTransaction{}, false + return modules.ProcessedTransaction{}, false, nil } // Retrieve the transaction @@ -88,6 +97,9 @@ func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTran // Transactions returns all transactions relevant to the wallet that were // confirmed in the range [startHeight, endHeight]. func (w *Wallet) Transactions(startHeight, endHeight types.BlockHeight) (pts []modules.ProcessedTransaction, err error) { + if err := w.tg.Add(); err != nil { + return nil, err + } // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -185,8 +197,11 @@ func (w *Wallet) Transactions(startHeight, endHeight types.BlockHeight) (pts []m // UnconfirmedTransactions returns the set of unconfirmed transactions that are // relevant to the wallet. -func (w *Wallet) UnconfirmedTransactions() []modules.ProcessedTransaction { +func (w *Wallet) UnconfirmedTransactions() ([]modules.ProcessedTransaction, error) { + if err := w.tg.Add(); err != nil { + return nil, err + } w.mu.RLock() defer w.mu.RUnlock() - return w.unconfirmedProcessedTransactions + return w.unconfirmedProcessedTransactions, nil } diff --git a/modules/wallet/transactions_test.go b/modules/wallet/transactions_test.go index bca7ec2816..2a04f46d93 100644 --- a/modules/wallet/transactions_test.go +++ b/modules/wallet/transactions_test.go @@ -46,7 +46,11 @@ func TestIntegrationTransactions(t *testing.T) { } // Two transactions added to unconfirmed pool - 1 to fund the exact output, // and 1 to hold the exact output. - if len(wt.wallet.UnconfirmedTransactions()) != 2 { + utxns, err := wt.wallet.UnconfirmedTransactions() + if err != nil { + t.Fatal(err) + } + if len(utxns) != 2 { t.Error("was expecting 4 unconfirmed transactions") } @@ -145,7 +149,10 @@ func TestIntegrationTransaction(t *testing.T) { } defer wt.closeWt() - _, exists := wt.wallet.Transaction(types.TransactionID{}) + _, exists, err := wt.wallet.Transaction(types.TransactionID{}) + if err != nil { + t.Fatal(err) + } if exists { t.Error("able to query a nonexisting transction") } @@ -162,7 +169,10 @@ func TestIntegrationTransaction(t *testing.T) { } // sendTxns[0] is the set-up transaction, sendTxns[1] contains the sentValue output - txn, exists := wt.wallet.Transaction(sendTxns[1].ID()) + txn, exists, err := wt.wallet.Transaction(sendTxns[1].ID()) + if err != nil { + t.Fatal(err) + } if !exists { t.Fatal("unable to query transaction") } @@ -191,7 +201,10 @@ func TestIntegrationTransaction(t *testing.T) { t.Fatal(err) } - txn, exists = wt.wallet.Transaction(sendTxns[1].ID()) + txn, exists, err = wt.wallet.Transaction(sendTxns[1].ID()) + if err != nil { + t.Fatal(err) + } if !exists { t.Fatal("unable to query transaction") } @@ -294,11 +307,18 @@ func TestIntegrationAddressTransactions(t *testing.T) { } // Check the confirmed balance of the address. - addrHist := wt.wallet.AddressTransactions(addr) + addrHist, err := wt.wallet.AddressTransactions(addr) + if err != nil { + t.Fatal(err) + } if len(addrHist) != 0 { t.Error("address should be empty - no confirmed transactions") } - if len(wt.wallet.AddressUnconfirmedTransactions(addr)) == 0 { + utxns, err := wt.wallet.AddressUnconfirmedTransactions(addr) + if err != nil { + t.Fatal(err) + } + if len(utxns) == 0 { t.Error("addresses unconfirmed transactions should not be empty") } b, _ := wt.miner.FindBlock() @@ -306,11 +326,18 @@ func TestIntegrationAddressTransactions(t *testing.T) { if err != nil { t.Fatal(err) } - addrHist = wt.wallet.AddressTransactions(addr) + addrHist, err = wt.wallet.AddressTransactions(addr) + if err != nil { + t.Fatal(err) + } if len(addrHist) == 0 { t.Error("address history should have some transactions") } - if len(wt.wallet.AddressUnconfirmedTransactions(addr)) != 0 { + utxns, err = wt.wallet.AddressUnconfirmedTransactions(addr) + if err != nil { + t.Fatal(err) + } + if len(utxns) != 0 { t.Error("addresses unconfirmed transactions should be empty") } } @@ -344,11 +371,18 @@ func TestAddressTransactionRevertedBlock(t *testing.T) { t.Fatal(err) } - addrHist := wt.wallet.AddressTransactions(addr) + addrHist, err := wt.wallet.AddressTransactions(addr) + if err != nil { + t.Fatal(err) + } if len(addrHist) == 0 { t.Error("address history should have some transactions") } - if len(wt.wallet.AddressUnconfirmedTransactions(addr)) != 0 { + utxns, err := wt.wallet.AddressUnconfirmedTransactions(addr) + if err != nil { + t.Fatal(err) + } + if len(utxns) != 0 { t.Error("addresses unconfirmed transactions should be empty") } @@ -359,11 +393,18 @@ func TestAddressTransactionRevertedBlock(t *testing.T) { } wt.wallet.mu.Unlock() - addrHist = wt.wallet.AddressTransactions(addr) + addrHist, err = wt.wallet.AddressTransactions(addr) + if err != nil { + t.Fatal(err) + } if len(addrHist) > 0 { t.Error("address history should should be empty") } - if len(wt.wallet.AddressUnconfirmedTransactions(addr)) > 0 { + utxns, err = wt.wallet.AddressUnconfirmedTransactions(addr) + if err != nil { + t.Fatal(err) + } + if len(utxns) > 0 { t.Error("addresses unconfirmed transactions should have some transactions") } } @@ -471,7 +512,10 @@ func BenchmarkAddressTransactions(b *testing.B) { b.ResetTimer() b.Run("indexed", func(b *testing.B) { for i := 0; i < b.N; i++ { - txns := wt.wallet.AddressTransactions(searchAddr) + txns, err := wt.wallet.AddressTransactions(searchAddr) + if err != nil { + b.Fatal(err) + } if len(txns) != 1 { b.Fatal(len(txns)) } @@ -480,7 +524,10 @@ func BenchmarkAddressTransactions(b *testing.B) { b.Run("indexed-nosync", func(b *testing.B) { wt.wallet.db.NoSync = true for i := 0; i < b.N; i++ { - txns := wt.wallet.AddressTransactions(searchAddr) + txns, err := wt.wallet.AddressTransactions(searchAddr) + if err != nil { + b.Fatal(err) + } if len(txns) != 1 { b.Fatal(len(txns)) } diff --git a/modules/wallet/unseeded_test.go b/modules/wallet/unseeded_test.go index 87730cffc2..7d3ef20474 100644 --- a/modules/wallet/unseeded_test.go +++ b/modules/wallet/unseeded_test.go @@ -26,7 +26,10 @@ func TestIntegrationLoad1of1Siag(t *testing.T) { t.Error(err) } - _, siafundBal, _ := wt.wallet.ConfirmedBalance() + _, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siafundBal.Equals64(2000) { t.Error("expecting a siafund balance of 2000 from the 1of1 key") } @@ -40,7 +43,10 @@ func TestIntegrationLoad1of1Siag(t *testing.T) { if err != nil { t.Fatal(err) } - _, siafundBal, _ = wt.wallet.ConfirmedBalance() + _, siafundBal, _, err = wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siafundBal.Equals64(1988) { t.Error("expecting balance of 1988 after sending siafunds to the void") } @@ -65,7 +71,10 @@ func TestIntegrationLoad2of3Siag(t *testing.T) { t.Error(err) } - _, siafundBal, _ := wt.wallet.ConfirmedBalance() + _, siafundBal, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siafundBal.Equals64(7000) { t.Error("expecting a siafund balance of 7000 from the 2of3 key") } @@ -79,7 +88,10 @@ func TestIntegrationLoad2of3Siag(t *testing.T) { if err != nil { t.Fatal(err) } - _, siafundBal, _ = wt.wallet.ConfirmedBalance() + _, siafundBal, _, err = wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !siafundBal.Equals64(6988) { t.Error("expecting balance of 6988 after sending siafunds to the void") } diff --git a/modules/wallet/update_test.go b/modules/wallet/update_test.go index 5cfc39233c..1f863c2107 100644 --- a/modules/wallet/update_test.go +++ b/modules/wallet/update_test.go @@ -26,7 +26,10 @@ func TestUpdate(t *testing.T) { } // since the miner is mining into a wallet address, the wallet should have // added a new transaction - _, ok := wt.wallet.Transaction(types.TransactionID(b.ID())) + _, ok, err := wt.wallet.Transaction(types.TransactionID(b.ID())) + if err != nil { + t.Fatal(err) + } if !ok { t.Fatal("no record of miner transaction") } @@ -36,7 +39,10 @@ func TestUpdate(t *testing.T) { RevertedBlocks: []types.Block{b}, }) // transaction should no longer be present - _, ok = wt.wallet.Transaction(types.TransactionID(b.ID())) + _, ok, err = wt.wallet.Transaction(types.TransactionID(b.ID())) + if err != nil { + t.Fatal(err) + } if ok { t.Fatal("miner transaction was not removed after block was reverted") } @@ -59,14 +65,20 @@ func TestUpdate(t *testing.T) { } // transaction should be present - _, ok = wt.wallet.Transaction(txnSet[0].ID()) + _, ok, err = wt.wallet.Transaction(txnSet[0].ID()) + if err != nil { + t.Fatal(err) + } if !ok { t.Fatal("no record of transaction") } // revert all the blocks wt.wallet.ProcessConsensusChange(revertCC) - _, ok = wt.wallet.Transaction(txnSet[0].ID()) + _, ok, err = wt.wallet.Transaction(txnSet[0].ID()) + if err != nil { + t.Fatal(err) + } if ok { t.Fatal("transaction was not removed") } diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index 932cb177ae..eeecaeeb4a 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -113,9 +113,9 @@ type Wallet struct { } // Height return the internal processed consensus height of the wallet -func (w *Wallet) Height() types.BlockHeight { +func (w *Wallet) Height() (types.BlockHeight, error) { if err := w.tg.Add(); err != nil { - return types.BlockHeight(0) + return types.BlockHeight(0), modules.ErrWalletShutdown } defer w.tg.Done() @@ -127,9 +127,9 @@ func (w *Wallet) Height() types.BlockHeight { return encoding.Unmarshal(tx.Bucket(bucketWallet).Get(keyConsensusHeight), &height) }) if err != nil { - return types.BlockHeight(0) + return types.BlockHeight(0), err } - return types.BlockHeight(height) + return types.BlockHeight(height), nil } // New creates a new wallet, loading any known addresses from the input file @@ -210,7 +210,10 @@ func (w *Wallet) Close() error { // Once the wallet is locked it cannot be unlocked except using the // unexported unlock method (w.Unlock returns an error if the wallet's // ThreadGroup is stopped). - if w.Unlocked() { + w.mu.RLock() + unlocked := w.unlocked + w.mu.RUnlock() + if unlocked { if err := w.Lock(); err != nil { errs = append(errs, err) } @@ -227,9 +230,9 @@ func (w *Wallet) Close() error { // AllAddresses returns all addresses that the wallet is able to spend from, // including unseeded addresses. Addresses are returned sorted in byte-order. -func (w *Wallet) AllAddresses() []types.UnlockHash { +func (w *Wallet) AllAddresses() ([]types.UnlockHash, error) { if err := w.tg.Add(); err != nil { - return []types.UnlockHash{} + return []types.UnlockHash{}, modules.ErrWalletShutdown } defer w.tg.Done() @@ -243,14 +246,14 @@ func (w *Wallet) AllAddresses() []types.UnlockHash { sort.Slice(addrs, func(i, j int) bool { return bytes.Compare(addrs[i][:], addrs[j][:]) < 0 }) - return addrs + return addrs, nil } // Rescanning reports whether the wallet is currently rescanning the // blockchain. -func (w *Wallet) Rescanning() bool { +func (w *Wallet) Rescanning() (bool, error) { if err := w.tg.Add(); err != nil { - return false + return false, modules.ErrWalletShutdown } defer w.tg.Done() @@ -258,24 +261,29 @@ func (w *Wallet) Rescanning() bool { if !rescanning { w.scanLock.Unlock() } - return rescanning + return rescanning, nil } // Settings returns the wallet's current settings -func (w *Wallet) Settings() modules.WalletSettings { +func (w *Wallet) Settings() (modules.WalletSettings, error) { + if err := w.tg.Add(); err != nil { + return modules.WalletSettings{}, modules.ErrWalletShutdown + } + defer w.tg.Done() return modules.WalletSettings{ NoDefrag: w.defragDisabled, - } + }, nil } // SetSettings will update the settings for the wallet. -func (w *Wallet) SetSettings(s modules.WalletSettings) { +func (w *Wallet) SetSettings(s modules.WalletSettings) error { if err := w.tg.Add(); err != nil { - return + return modules.ErrWalletShutdown } defer w.tg.Done() w.mu.Lock() w.defragDisabled = s.NoDefrag w.mu.Unlock() + return nil } diff --git a/modules/wallet/wallet_test.go b/modules/wallet/wallet_test.go index 31d3d8f84b..b20d20054e 100644 --- a/modules/wallet/wallet_test.go +++ b/modules/wallet/wallet_test.go @@ -186,7 +186,10 @@ func TestAllAddresses(t *testing.T) { wt.wallet.keys[types.UnlockHash{2}] = spendableKey{} wt.wallet.keys[types.UnlockHash{4}] = spendableKey{} wt.wallet.keys[types.UnlockHash{3}] = spendableKey{} - addrs := wt.wallet.AllAddresses() + addrs, err := wt.wallet.AllAddresses() + if err != nil { + t.Fatal(err) + } for i := range addrs { if addrs[i][0] != byte(i) { t.Error("address sorting failed:", i, addrs[i][0]) @@ -235,7 +238,11 @@ func TestRescanning(t *testing.T) { defer wt.closeWt() // A fresh wallet should not be rescanning. - if wt.wallet.Rescanning() { + rescanning, err := wt.wallet.Rescanning() + if err != nil { + t.Fatal(err) + } + if rescanning { t.Fatal("fresh wallet should not report that a scan is underway") } @@ -253,7 +260,11 @@ func TestRescanning(t *testing.T) { // wait for goroutine to start, after which Rescanning should return true time.Sleep(time.Millisecond * 10) - if !wt.wallet.Rescanning() { + rescanning, err = wt.wallet.Rescanning() + if err != nil { + t.Fatal(err) + } + if !rescanning { t.Fatal("wallet should report that a scan is underway") } @@ -264,7 +275,11 @@ func TestRescanning(t *testing.T) { } // Rescanning should now return false again - if wt.wallet.Rescanning() { + rescanning, err = wt.wallet.Rescanning() + if err != nil { + t.Fatal(err) + } + if rescanning { t.Fatal("wallet should not report that a scan is underway") } } @@ -339,7 +354,10 @@ func TestAdvanceLookaheadNoRescan(t *testing.T) { } defer wt.closeWt() - builder := wt.wallet.StartTransaction() + builder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } payout := types.ZeroCurrency // Get the current progress @@ -423,7 +441,10 @@ func TestAdvanceLookaheadForceRescan(t *testing.T) { if err != nil { t.Fatal("Couldn't fetch primary seed from db") } - startBal, _, _ := wt.wallet.ConfirmedBalance() + startBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } // Send coins to an address with a high seed index, just outside the // lookahead range. It will not be initially detected, but later the @@ -432,7 +453,10 @@ func TestAdvanceLookaheadForceRescan(t *testing.T) { farAddr := generateSpendableKey(wt.wallet.primarySeed, highIndex).UnlockConditions.UnlockHash() farPayout := types.SiacoinPrecision.Mul64(8888) - builder := wt.wallet.StartTransaction() + builder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } builder.AddSiacoinOutput(types.SiacoinOutput{ UnlockHash: farAddr, Value: farPayout, @@ -452,12 +476,18 @@ func TestAdvanceLookaheadForceRescan(t *testing.T) { t.Fatal(err) } wt.addBlockNoPayout() - newBal, _, _ := wt.wallet.ConfirmedBalance() + newBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !startBal.Sub(newBal).Equals(farPayout) { t.Fatal("wallet should not recognize coins sent to very high seed index") } - builder = wt.wallet.StartTransaction() + builder, err = wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } var payout types.Currency // choose 10 keys in the lookahead and remember them @@ -500,7 +530,10 @@ func TestAdvanceLookaheadForceRescan(t *testing.T) { time.Sleep(time.Second * 2) // Check that high seed index txn was discovered in the rescan - rescanBal, _, _ := wt.wallet.ConfirmedBalance() + rescanBal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !rescanBal.Equals(startBal) { t.Fatal("wallet did not discover txn after rescan") } @@ -557,8 +590,14 @@ func TestDistantWallets(t *testing.T) { } // The second wallet's balance should update accordingly. - w1bal, _, _ := wt.wallet.ConfirmedBalance() - w2bal, _, _ := w2.ConfirmedBalance() + w1bal, _, _, err := wt.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } + w2bal, _, _, err := w2.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if !w1bal.Equals(w2bal) { t.Fatal("balances do not match:", w1bal, w2bal) @@ -566,7 +605,10 @@ func TestDistantWallets(t *testing.T) { // Send coins to an address with a very high seed index, outside the // lookahead range. w2 should not detect it. - tbuilder := wt.wallet.StartTransaction() + tbuilder, err := wt.wallet.StartTransaction() + if err != nil { + t.Fatal(err) + } farAddr := generateSpendableKey(wt.wallet.primarySeed, lookaheadBuffer*10).UnlockConditions.UnlockHash() value := types.SiacoinPrecision.Mul64(1e3) tbuilder.AddSiacoinOutput(types.SiacoinOutput{ @@ -587,7 +629,10 @@ func TestDistantWallets(t *testing.T) { } wt.addBlockNoPayout() - if newBal, _, _ := w2.ConfirmedBalance(); !newBal.Equals(w2bal.Sub(value)) { + if newBal, _, _, err := w2.ConfirmedBalance(); !newBal.Equals(w2bal.Sub(value)) { + if err != nil { + t.Fatal(err) + } t.Fatal("wallet should not recognize coins sent to very high seed index") } } diff --git a/node/api/hostdb_test.go b/node/api/hostdb_test.go index f037a63cc9..43cd46d926 100644 --- a/node/api/hostdb_test.go +++ b/node/api/hostdb_test.go @@ -262,7 +262,11 @@ func assembleHostPort(key crypto.TwofishKey, hostHostname string, testdir string if err != nil { return nil, err } - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + return nil, err + } + if !encrypted { _, err = w.Encrypt(key) if err != nil { return nil, err diff --git a/node/api/server_helpers_test.go b/node/api/server_helpers_test.go index 8de1f0e1a9..c87e83136d 100644 --- a/node/api/server_helpers_test.go +++ b/node/api/server_helpers_test.go @@ -165,7 +165,11 @@ func assembleServerTester(key crypto.TwofishKey, testdir string) (*serverTester, if err != nil { return nil, err } - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + return nil, err + } + if !encrypted { _, err = w.Encrypt(key) if err != nil { return nil, err @@ -245,7 +249,11 @@ func assembleAuthenticatedServerTester(requiredPassword string, key crypto.Twofi if err != nil { return nil, err } - if !w.Encrypted() { + encrypted, err := w.Encrypted() + if err != nil { + return nil, err + } + if !encrypted { _, err = w.Encrypt(key) if err != nil { return nil, err diff --git a/node/api/wallet.go b/node/api/wallet.go index c48ddcdbba..5793bcaa56 100644 --- a/node/api/wallet.go +++ b/node/api/wallet.go @@ -2,6 +2,7 @@ package api import ( "encoding/json" + "fmt" "net/http" "path/filepath" "strconv" @@ -122,14 +123,46 @@ func encryptionKeys(seedStr string) (validKeys []crypto.TwofishKey) { // walletHander handles API calls to /wallet. func (api *API) walletHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { - siacoinBal, siafundBal, siaclaimBal := api.wallet.ConfirmedBalance() - siacoinsOut, siacoinsIn := api.wallet.UnconfirmedBalance() - dustThreshold := api.wallet.DustThreshold() + siacoinBal, siafundBal, siaclaimBal, err := api.wallet.ConfirmedBalance() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + siacoinsOut, siacoinsIn, err := api.wallet.UnconfirmedBalance() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + dustThreshold, err := api.wallet.DustThreshold() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + encrypted, err := api.wallet.Encrypted() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + unlocked, err := api.wallet.Unlocked() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + rescanning, err := api.wallet.Rescanning() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } + height, err := api.wallet.Height() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet: %v", err)}, http.StatusBadRequest) + return + } WriteJSON(w, WalletGET{ - Encrypted: api.wallet.Encrypted(), - Unlocked: api.wallet.Unlocked(), - Rescanning: api.wallet.Rescanning(), - Height: api.wallet.Height(), + Encrypted: encrypted, + Unlocked: unlocked, + Rescanning: rescanning, + Height: height, ConfirmedSiacoinBalance: siacoinBal, UnconfirmedOutgoingSiacoins: siacoinsOut, @@ -179,8 +212,13 @@ func (api *API) walletAddressHandler(w http.ResponseWriter, req *http.Request, _ // walletAddressHandler handles API calls to /wallet/addresses. func (api *API) walletAddressesHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { + addresses, err := api.wallet.AllAddresses() + if err != nil { + WriteError(w, Error{fmt.Sprintf("Error when calling /wallet/addresses: %v", err)}, http.StatusBadRequest) + return + } WriteJSON(w, WalletAddressesGET{ - Addresses: api.wallet.AllAddresses(), + Addresses: addresses, }) } @@ -482,11 +520,15 @@ func (api *API) walletTransactionHandler(w http.ResponseWriter, req *http.Reques jsonID := "\"" + ps.ByName("id") + "\"" err := id.UnmarshalJSON([]byte(jsonID)) if err != nil { - WriteError(w, Error{"error when calling /wallet/history: " + err.Error()}, http.StatusBadRequest) + WriteError(w, Error{"error when calling /wallet/transaction/id:" + err.Error()}, http.StatusBadRequest) return } - txn, ok := api.wallet.Transaction(id) + txn, ok, err := api.wallet.Transaction(id) + if err != nil { + WriteError(w, Error{"error when calling /wallet/transaction/id:" + err.Error()}, http.StatusBadRequest) + return + } if !ok { WriteError(w, Error{"error when calling /wallet/transaction/:id : transaction not found"}, http.StatusBadRequest) return @@ -519,7 +561,11 @@ func (api *API) walletTransactionsHandler(w http.ResponseWriter, req *http.Reque WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) return } - unconfirmedTxns := api.wallet.UnconfirmedTransactions() + unconfirmedTxns, err := api.wallet.UnconfirmedTransactions() + if err != nil { + WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) + return + } WriteJSON(w, WalletTransactionsGET{ ConfirmedTransactions: confirmedTxns, @@ -539,8 +585,16 @@ func (api *API) walletTransactionsAddrHandler(w http.ResponseWriter, req *http.R return } - confirmedATs := api.wallet.AddressTransactions(addr) - unconfirmedATs := api.wallet.AddressUnconfirmedTransactions(addr) + confirmedATs, err := api.wallet.AddressTransactions(addr) + if err != nil { + WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) + return + } + unconfirmedATs, err := api.wallet.AddressUnconfirmedTransactions(addr) + if err != nil { + WriteError(w, Error{"error when calling /wallet/transactions: " + err.Error()}, http.StatusBadRequest) + return + } WriteJSON(w, WalletTransactionsGETaddr{ ConfirmedTransactions: confirmedATs, UnconfirmedTransactions: unconfirmedATs, diff --git a/node/api/wallet_test.go b/node/api/wallet_test.go index c1021fcaf8..370ee9d8ff 100644 --- a/node/api/wallet_test.go +++ b/node/api/wallet_test.go @@ -2,7 +2,6 @@ package api import ( "encoding/json" - "errors" "fmt" "net/url" "os" @@ -19,6 +18,7 @@ import ( "github.com/NebulousLabs/Sia/modules/transactionpool" "github.com/NebulousLabs/Sia/modules/wallet" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/errors" "github.com/NebulousLabs/fastrand" ) @@ -119,7 +119,11 @@ func TestWalletEncrypt(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st.wallet.Unlocked() { + unlocked, err := st.wallet.Unlocked() + if err != nil { + t.Error(err) + } + if !unlocked { t.Error("wallet is not unlocked") } @@ -147,7 +151,11 @@ func TestWalletEncrypt(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st2.wallet.Unlocked() { + unlocked, err = st2.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } } @@ -213,7 +221,11 @@ func TestWalletBlankEncrypt(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !w.Unlocked() { + unlocked, err := w.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } } @@ -296,7 +308,11 @@ func TestIntegrationWalletInitSeed(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !w.Unlocked() { + unlocked, err := w.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } } @@ -515,8 +531,11 @@ func TestIntegrationWalletLoadSeedPOST(t *testing.T) { } // Record starting balances. - oldBal, _, _ := st.wallet.ConfirmedBalance() - w2bal, _, _ := w2.ConfirmedBalance() + oldBal, _, _, err1 := st.wallet.ConfirmedBalance() + w2bal, _, _, err2 := w2.ConfirmedBalance() + if errs := errors.Compose(err1, err2); errs != nil { + t.Fatal(errs) + } if w2bal.IsZero() { t.Fatal("second wallet's balance should not be zero") } @@ -532,7 +551,10 @@ func TestIntegrationWalletLoadSeedPOST(t *testing.T) { t.Fatal(err) } // First wallet should now have balance of both wallets - bal, _, _ := st.wallet.ConfirmedBalance() + bal, _, _, err := st.wallet.ConfirmedBalance() + if err != nil { + t.Fatal(err) + } if exp := oldBal.Add(w2bal); !bal.Equals(exp) { t.Fatalf("wallet did not load seed correctly: expected %v coins, got %v", exp, bal) } @@ -1017,7 +1039,11 @@ func TestWalletReset(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st.wallet.Unlocked() { + unlocked, err := st.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } @@ -1045,7 +1071,11 @@ func TestWalletReset(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st2.wallet.Unlocked() { + unlocked, err = st2.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } } @@ -1257,7 +1287,11 @@ func TestWalletChangePassword(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st.wallet.Unlocked() { + unlocked, err := st.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } @@ -1270,7 +1304,11 @@ func TestWalletChangePassword(t *testing.T) { t.Fatal(err) } // wallet should still be unlocked - if !st.wallet.Unlocked() { + unlocked, err = st.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Fatal("changepassword locked the wallet") } @@ -1285,7 +1323,11 @@ func TestWalletChangePassword(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st.wallet.Unlocked() { + unlocked, err = st.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } @@ -1313,7 +1355,11 @@ func TestWalletChangePassword(t *testing.T) { t.Fatal(err) } // Check that the wallet actually unlocked. - if !st2.wallet.Unlocked() { + unlocked, err = st2.wallet.Unlocked() + if err != nil { + t.Fatal(err) + } + if !unlocked { t.Error("wallet is not unlocked") } } @@ -1471,7 +1517,10 @@ func TestWalletGETDust(t *testing.T) { t.Fatal(err) } - dt := st.wallet.DustThreshold() + dt, err := st.wallet.DustThreshold() + if err != nil { + t.Fatal(err) + } if !dt.Equals(wg.DustThreshold) { t.Fatal("dustThreshold mismatch") } From 98eca0997fee08bfa6e0afd57f0397d7f7ec9750 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 16:50:07 -0400 Subject: [PATCH 194/212] add missing w.tg.Done --- modules/wallet/encrypt.go | 3 +++ modules/wallet/transactionbuilder.go | 2 ++ modules/wallet/transactions.go | 5 +++++ 3 files changed, 10 insertions(+) diff --git a/modules/wallet/encrypt.go b/modules/wallet/encrypt.go index 29176a9ba9..b68b0cc83c 100644 --- a/modules/wallet/encrypt.go +++ b/modules/wallet/encrypt.go @@ -278,6 +278,7 @@ func (w *Wallet) Encrypted() (bool, error) { if err := w.tg.Add(); err != nil { return false, err } + w.tg.Done() w.mu.Lock() defer w.mu.Unlock() if build.DEBUG && w.unlocked && !w.encrypted { @@ -398,6 +399,7 @@ func (w *Wallet) Unlocked() (bool, error) { if err := w.tg.Add(); err != nil { return false, err } + defer w.tg.Done() w.mu.RLock() defer w.mu.RUnlock() return w.unlocked, nil @@ -409,6 +411,7 @@ func (w *Wallet) Lock() error { if err := w.tg.Add(); err != nil { return err } + defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() if !w.unlocked { diff --git a/modules/wallet/transactionbuilder.go b/modules/wallet/transactionbuilder.go index c96226f02d..4fe1d4de66 100644 --- a/modules/wallet/transactionbuilder.go +++ b/modules/wallet/transactionbuilder.go @@ -667,6 +667,7 @@ func (w *Wallet) RegisterTransaction(t types.Transaction, parents []types.Transa if err := w.tg.Add(); err != nil { return nil, err } + defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() return w.registerTransaction(t, parents), nil @@ -678,5 +679,6 @@ func (w *Wallet) StartTransaction() (modules.TransactionBuilder, error) { if err := w.tg.Add(); err != nil { return nil, err } + defer w.tg.Done() return w.RegisterTransaction(types.Transaction{}, nil) } diff --git a/modules/wallet/transactions.go b/modules/wallet/transactions.go index 3436ba62f6..ed31916e1d 100644 --- a/modules/wallet/transactions.go +++ b/modules/wallet/transactions.go @@ -22,6 +22,7 @@ func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.Process if err := w.tg.Add(); err != nil { return []modules.ProcessedTransaction{}, err } + w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -44,6 +45,7 @@ func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modu if err := w.tg.Add(); err != nil { return []modules.ProcessedTransaction{}, err } + w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -78,6 +80,7 @@ func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTran if err := w.tg.Add(); err != nil { return modules.ProcessedTransaction{}, false, err } + w.tg.Done() // ensure durability of reported transaction w.mu.Lock() defer w.mu.Unlock() @@ -100,6 +103,7 @@ func (w *Wallet) Transactions(startHeight, endHeight types.BlockHeight) (pts []m if err := w.tg.Add(); err != nil { return nil, err } + w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -201,6 +205,7 @@ func (w *Wallet) UnconfirmedTransactions() ([]modules.ProcessedTransaction, erro if err := w.tg.Add(); err != nil { return nil, err } + w.tg.Done() w.mu.RLock() defer w.mu.RUnlock() return w.unconfirmedProcessedTransactions, nil From 18aec0ce1215d3a6eca10c1bbb2b0035e292daa1 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Mon, 7 May 2018 18:14:56 -0400 Subject: [PATCH 195/212] add managedLock and managedUnlocked --- modules/wallet/encrypt.go | 97 +++++++++++++++++++-------------- modules/wallet/transactions.go | 10 ++-- modules/wallet/wallet.go | 7 +-- node/api/server_helpers_test.go | 2 +- 4 files changed, 63 insertions(+), 53 deletions(-) diff --git a/modules/wallet/encrypt.go b/modules/wallet/encrypt.go index b68b0cc83c..9bda4f4ad2 100644 --- a/modules/wallet/encrypt.go +++ b/modules/wallet/encrypt.go @@ -278,7 +278,7 @@ func (w *Wallet) Encrypted() (bool, error) { if err := w.tg.Add(); err != nil { return false, err } - w.tg.Done() + defer w.tg.Done() w.mu.Lock() defer w.mu.Unlock() if build.DEBUG && w.unlocked && !w.encrypted { @@ -412,19 +412,43 @@ func (w *Wallet) Lock() error { return err } defer w.tg.Done() - w.mu.Lock() - defer w.mu.Unlock() - if !w.unlocked { - return modules.ErrLockedWallet + return w.managedLock() +} + +// ChangeKey changes the wallet's encryption key from masterKey to newKey. +func (w *Wallet) ChangeKey(masterKey crypto.TwofishKey, newKey crypto.TwofishKey) error { + if err := w.tg.Add(); err != nil { + return err } - w.log.Println("INFO: Locking wallet.") + defer w.tg.Done() - // Wipe all of the seeds and secret keys. They will be replaced upon - // calling 'Unlock' again. Note that since the public keys are not wiped, - // we can continue processing blocks. - w.wipeSecrets() - w.unlocked = false - return nil + return w.managedChangeKey(masterKey, newKey) +} + +// Unlock will decrypt the wallet seed and load all of the addresses into +// memory. +func (w *Wallet) Unlock(masterKey crypto.TwofishKey) error { + // By having the wallet's ThreadGroup track the Unlock method, we ensure + // that Unlock will never unlock the wallet once the ThreadGroup has been + // stopped. Without this precaution, the wallet's Close method would be + // unsafe because it would theoretically be possible for another function + // to Unlock the wallet in the short interval after Close calls w.Lock + // and before Close calls w.mu.Lock. + if err := w.tg.Add(); err != nil { + return err + } + defer w.tg.Done() + + if !w.scanLock.TryLock() { + return errScanInProgress + } + defer w.scanLock.Unlock() + + w.log.Println("INFO: Unlocking wallet.") + + // Initialize all of the keys in the wallet under a lock. While holding the + // lock, also grab the subscriber status. + return w.managedUnlock(masterKey) } // managedChangeKey safely performs the database operations required to change @@ -557,38 +581,27 @@ func (w *Wallet) managedChangeKey(masterKey crypto.TwofishKey, newKey crypto.Two return nil } -// ChangeKey changes the wallet's encryption key from masterKey to newKey. -func (w *Wallet) ChangeKey(masterKey crypto.TwofishKey, newKey crypto.TwofishKey) error { - if err := w.tg.Add(); err != nil { - return err +// managedLock will erase all keys from memory and prevent the wallet from +// spending coins until it is unlocked. +func (w *Wallet) managedLock() error { + w.mu.Lock() + defer w.mu.Unlock() + if !w.unlocked { + return modules.ErrLockedWallet } - defer w.tg.Done() + w.log.Println("INFO: Locking wallet.") - return w.managedChangeKey(masterKey, newKey) + // Wipe all of the seeds and secret keys. They will be replaced upon + // calling 'Unlock' again. Note that since the public keys are not wiped, + // we can continue processing blocks. + w.wipeSecrets() + w.unlocked = false + return nil } -// Unlock will decrypt the wallet seed and load all of the addresses into -// memory. -func (w *Wallet) Unlock(masterKey crypto.TwofishKey) error { - // By having the wallet's ThreadGroup track the Unlock method, we ensure - // that Unlock will never unlock the wallet once the ThreadGroup has been - // stopped. Without this precaution, the wallet's Close method would be - // unsafe because it would theoretically be possible for another function - // to Unlock the wallet in the short interval after Close calls w.Lock - // and before Close calls w.mu.Lock. - if err := w.tg.Add(); err != nil { - return err - } - defer w.tg.Done() - - if !w.scanLock.TryLock() { - return errScanInProgress - } - defer w.scanLock.Unlock() - - w.log.Println("INFO: Unlocking wallet.") - - // Initialize all of the keys in the wallet under a lock. While holding the - // lock, also grab the subscriber status. - return w.managedUnlock(masterKey) +// managedUnlocked indicates whether the wallet is locked or unlocked. +func (w *Wallet) managedUnlocked() bool { + w.mu.RLock() + defer w.mu.RUnlock() + return w.unlocked } diff --git a/modules/wallet/transactions.go b/modules/wallet/transactions.go index ed31916e1d..9d2e66a395 100644 --- a/modules/wallet/transactions.go +++ b/modules/wallet/transactions.go @@ -22,7 +22,7 @@ func (w *Wallet) AddressTransactions(uh types.UnlockHash) (pts []modules.Process if err := w.tg.Add(); err != nil { return []modules.ProcessedTransaction{}, err } - w.tg.Done() + defer w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -45,7 +45,7 @@ func (w *Wallet) AddressUnconfirmedTransactions(uh types.UnlockHash) (pts []modu if err := w.tg.Add(); err != nil { return []modules.ProcessedTransaction{}, err } - w.tg.Done() + defer w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -80,7 +80,7 @@ func (w *Wallet) Transaction(txid types.TransactionID) (pt modules.ProcessedTran if err := w.tg.Add(); err != nil { return modules.ProcessedTransaction{}, false, err } - w.tg.Done() + defer w.tg.Done() // ensure durability of reported transaction w.mu.Lock() defer w.mu.Unlock() @@ -103,7 +103,7 @@ func (w *Wallet) Transactions(startHeight, endHeight types.BlockHeight) (pts []m if err := w.tg.Add(); err != nil { return nil, err } - w.tg.Done() + defer w.tg.Done() // ensure durability of reported transactions w.mu.Lock() defer w.mu.Unlock() @@ -205,7 +205,7 @@ func (w *Wallet) UnconfirmedTransactions() ([]modules.ProcessedTransaction, erro if err := w.tg.Add(); err != nil { return nil, err } - w.tg.Done() + defer w.tg.Done() w.mu.RLock() defer w.mu.RUnlock() return w.unconfirmedProcessedTransactions, nil diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index eeecaeeb4a..46ddbd1e01 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -210,11 +210,8 @@ func (w *Wallet) Close() error { // Once the wallet is locked it cannot be unlocked except using the // unexported unlock method (w.Unlock returns an error if the wallet's // ThreadGroup is stopped). - w.mu.RLock() - unlocked := w.unlocked - w.mu.RUnlock() - if unlocked { - if err := w.Lock(); err != nil { + if w.managedUnlocked() { + if err := w.managedLock(); err != nil { errs = append(errs, err) } } diff --git a/node/api/server_helpers_test.go b/node/api/server_helpers_test.go index c87e83136d..2330ca3365 100644 --- a/node/api/server_helpers_test.go +++ b/node/api/server_helpers_test.go @@ -73,7 +73,7 @@ func (srv *Server) Close() error { for _, mod := range mods { if mod.c != nil { if closeErr := mod.c.Close(); closeErr != nil { - err = errors.Extend(err, fmt.Errorf("%v.Close failed: %v", mod.name, err)) + err = errors.Extend(err, fmt.Errorf("%v.Close failed: %v", mod.name, closeErr)) } } } From 4dd3d084d7f3c04ca299b74bda89682a645faf5f Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 12:07:54 -0400 Subject: [PATCH 196/212] Rename GetFile() to File(), update function code --- modules/renter.go | 6 ++-- modules/renter/files.go | 56 ++++++++++++++++++++++++++++++----- node/api/client/renter.go | 2 +- node/api/renter.go | 4 +-- siatest/renter/renter_test.go | 1 + 5 files changed, 56 insertions(+), 13 deletions(-) diff --git a/modules/renter.go b/modules/renter.go index fa9035a012..27a02681e0 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -322,12 +322,12 @@ type Renter interface { // DownloadHistory lists all the files that have been scheduled for download. DownloadHistory() []DownloadInfo + // File returns information on specific file queried by user + File(siaPath string) (FileInfo, error) + // FileList returns information on all of the files stored by the renter. FileList() []FileInfo - // GetFile returns information on specific file queried by user - GetFile() File - // Host provides the DB entry and score breakdown for the requested host. Host(pk types.SiaPublicKey) (HostDBEntry, bool) diff --git a/modules/renter/files.go b/modules/renter/files.go index d1b2ca5853..70448a5d57 100644 --- a/modules/renter/files.go +++ b/modules/renter/files.go @@ -311,18 +311,60 @@ func (r *Renter) FileList() []modules.FileInfo { return fileList } -// GetFile returns file from siaPath queried by user. -func (r *Renter) GetFile(siaPath string) *file { - lockID := r.mu.Lock() - defer r.mu.Unlock(lockID) +// File returns file from siaPath queried by user. +// Update based on FileList +func (r *Renter) File(siaPath string) (modules.FileInfo, error) { + var fileInfo modules.FileInfo - // Check that currentName exists and newName doesn't. + // Get the file and its contracs + contractIDs := make(map[types.FileContractID]struct{}) + lockID := r.mu.RLock() file, exists := r.files[siaPath] if !exists { - return nil + return fileInfo, ErrUnknownPath + } + file.mu.RLock() + for cid := range file.contracts { + contractIDs[cid] = struct{}{} } + file.mu.RUnlock() + r.mu.RUnlock(lockID) + + // Build 2 maps that map every contract id to its offline and goodForRenew + // status. + goodForRenew := make(map[types.FileContractID]bool) + offline := make(map[types.FileContractID]bool) + for cid := range contractIDs { + resolvedID := r.hostContractor.ResolveID(cid) + cu, ok := r.hostContractor.ContractUtility(resolvedID) + goodForRenew[cid] = ok && cu.GoodForRenew + offline[cid] = r.hostContractor.IsOffline(resolvedID) + } + + // Build the FileInfo + lockID = r.mu.RLock() + file.mu.RLock() + renewing := true + var localPath string + tf, exists := r.tracking[file.name] + if exists { + localPath = tf.RepairPath + } + fileInfo = modules.FileInfo{ + SiaPath: file.name, + LocalPath: localPath, + Filesize: file.size, + Renewing: renewing, + Available: file.available(offline), + Redundancy: file.redundancy(offline, goodForRenew), + UploadedBytes: file.uploadedBytes(), + UploadProgress: file.uploadProgress(), + Expiration: file.expiration(), + } + file.mu.RUnlock() + r.mu.RUnlock(lockID) - return file + return fileInfo, nil } // RenameFile takes an existing file and changes the nickname. The original diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 4f25385344..3a116389a9 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -61,7 +61,7 @@ func (c *Client) RenterDownloadHTTPResponseGet(siaPath string, offset, length ui // RenterFileGet requests the /renter/files resource. func (c *Client) RenterFileGet(siaPath string) (rf api.RenterFile, err error) { siaPath = strings.TrimPrefix(siaPath, "/") - err = c.get("/renter/files/"+siaPath, &rf) + err = c.get("/renter/file/"+siaPath, &rf) return } diff --git a/node/api/renter.go b/node/api/renter.go index a7f24bc57f..4be26a1169 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -125,7 +125,7 @@ type ( // RenterFile lists the file queried. RenterFile struct { - File *file `json:"file"` + File FileInfo `json:"file"` } // RenterFiles lists the files known to the renter. @@ -381,7 +381,7 @@ func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps // renterFileHandler handles the API call to return specific file. func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { WriteJSON(w, RenterFile{ - Files: api.renter.GetFile(strings.TrimPrefix(ps.ByName("siapath"), "/"), + Files: api.renter.File(strings.TrimPrefix(ps.ByName("siapath"), "/"), }) } diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 3872156653..8314d3bcdb 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -43,6 +43,7 @@ func TestRenter(t *testing.T) { }{ {"TestRenterStreamingCache", testRenterStreamingCache}, {"TestUploadDownload", testUploadDownload}, + // Add test for single file api {"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, {"TestRenterLocalRepair", testRenterLocalRepair}, {"TestRenterRemoteRepair", testRenterRemoteRepair}, From a004e4fab9ddd7d4d549d1b3ae8eec0d01617f21 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 13:10:39 -0400 Subject: [PATCH 197/212] Add siatest in renter package for single file API --- node/api/renter.go | 9 ++++++--- siatest/renter.go | 9 +++++++++ siatest/renter/renter_test.go | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) diff --git a/node/api/renter.go b/node/api/renter.go index 4be26a1169..35956c3433 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -125,7 +125,8 @@ type ( // RenterFile lists the file queried. RenterFile struct { - File FileInfo `json:"file"` + File modules.FileInfo `json:"file"` + Error error `json:"error"` } // RenterFiles lists the files known to the renter. @@ -379,9 +380,11 @@ func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps } // renterFileHandler handles the API call to return specific file. -func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, _ httprouter.Params) { +func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { + file, err := api.renter.File(strings.TrimPrefix(ps.ByName("siapath"), "/")) WriteJSON(w, RenterFile{ - Files: api.renter.File(strings.TrimPrefix(ps.ByName("siapath"), "/"), + File: file, + Error: err, }) } diff --git a/siatest/renter.go b/siatest/renter.go index 76ed1e4230..224c709ab6 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -131,6 +131,15 @@ func (tn *TestNode) DownloadInfo(lf *LocalFile, rf *RemoteFile) (*api.DownloadIn return di, err } +// File returns the file queried by the user +func (tn *TestNode) File(siaPath string) (modules.FileInfo, error) { + rf, err := tn.RenterFileGet(siapath) + if err != nil { + return nil, err + } + return rf, err +} + // Files lists the files tracked by the renter func (tn *TestNode) Files() ([]modules.FileInfo, error) { rf, err := tn.RenterFilesGet() diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 8314d3bcdb..5dab48ddfd 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -44,6 +44,7 @@ func TestRenter(t *testing.T) { {"TestRenterStreamingCache", testRenterStreamingCache}, {"TestUploadDownload", testUploadDownload}, // Add test for single file api + {"TestSingleFileGet", testSingleFileGet}, {"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, {"TestRenterLocalRepair", testRenterLocalRepair}, {"TestRenterRemoteRepair", testRenterRemoteRepair}, @@ -103,6 +104,37 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { } } +// testSingleFileUpload is a subtest that uses an existing TestGroup to test if +// using the signle file API endpoint works +func testSingleFileUpload(t *testing.T, tg *siatest.TestGroup) { + // Grab the first of the group's renters + renter := tg.Renters()[0] + // Upload file, creating a piece for each host in the group + dataPieces := uint64(1) + parityPieces := uint64(len(tg.Hosts())) - dataPieces + fileSize := 100 + siatest.Fuzz() + localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) + if err != nil { + t.Fatal("Failed to upload a file for testing: ", err) + } + + files, err := tg.Files() + if err != nil { + t.Fatal("Failed to get renter files: ", err) + } + + var file modules.FileInfo + for f := range files { + file, err = tg.File(f.SiaPath) + if err != nil { + t.Fatal("Failed to request single file", err) + } + if file != f { + t.Fatal("Single file queries does not match file previously requested.") + } + } +} + // testDownloadMultipleLargeSectors downloads multiple large files (>5 Sectors) // in parallel and makes sure that the downloads are blocking each other. func testDownloadMultipleLargeSectors(t *testing.T, tg *siatest.TestGroup) { From 95daddb81949f12900c7fee4027deffe9f63db00 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 8 May 2018 14:28:00 -0400 Subject: [PATCH 198/212] switch to nebulous threadgroup --- modules/wallet/persist.go | 4 +--- modules/wallet/wallet.go | 10 ++++++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/wallet/persist.go b/modules/wallet/persist.go index 4dabdc9865..5cc88b8a5e 100644 --- a/modules/wallet/persist.go +++ b/modules/wallet/persist.go @@ -123,9 +123,7 @@ func (w *Wallet) initPersist() error { if err != nil { return err } - w.tg.AfterStop(func() { w.db.Close() }) - - return nil + return w.tg.AfterStop(func() error { return w.db.Close() }) } // createBackup copies the wallet database to dst. diff --git a/modules/wallet/wallet.go b/modules/wallet/wallet.go index 46ddbd1e01..d0c1c2a4b5 100644 --- a/modules/wallet/wallet.go +++ b/modules/wallet/wallet.go @@ -19,6 +19,7 @@ import ( "github.com/NebulousLabs/Sia/persist" siasync "github.com/NebulousLabs/Sia/sync" "github.com/NebulousLabs/Sia/types" + "github.com/NebulousLabs/threadgroup" ) const ( @@ -105,7 +106,7 @@ type Wallet struct { // The wallet's ThreadGroup tells tracked functions to shut down and // blocks until they have all exited before returning from Close. - tg siasync.ThreadGroup + tg threadgroup.ThreadGroup // defragDisabled determines if the wallet is set to defrag outputs once it // reaches a certain threshold @@ -187,13 +188,18 @@ func NewCustomWallet(cs modules.ConsensusSet, tpool modules.TransactionPool, per } // make sure we commit on shutdown - w.tg.AfterStop(func() { + err = w.tg.AfterStop(func() error { err := w.dbTx.Commit() if err != nil { w.log.Println("ERROR: failed to apply database update:", err) w.dbTx.Rollback() + return err } + return nil }) + if err != nil { + return nil, err + } go w.threadedDBUpdate() return w, nil From 0aa308996a84be58170d4bd14f9ed52b057f1955 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 14:34:56 -0400 Subject: [PATCH 199/212] Update API documentation, fix test-long no passing --- doc/API.md | 54 ++++++++++++++++------- doc/api/Renter.md | 81 +++++++++++++++++++++++++++++------ siatest/renter.go | 6 +-- siatest/renter/renter_test.go | 13 +++--- 4 files changed, 115 insertions(+), 39 deletions(-) diff --git a/doc/API.md b/doc/API.md index 29418f9a17..f8866edc33 100644 --- a/doc/API.md +++ b/doc/API.md @@ -845,20 +845,21 @@ description of the byte encoding. Renter ------ -| Route | HTTP verb | -| ----------------------------------------------------------------------- | --------- | -| [/renter](#renter-get) | GET | -| [/renter](#renter-post) | POST | -| [/renter/contracts](#rentercontracts-get) | GET | -| [/renter/downloads](#renterdownloads-get) | GET | -| [/renter/prices](#renterprices-get) | GET | -| [/renter/files](#renterfiles-get) | GET | -| [/renter/delete/*___siapath___](#renterdeletesiapath-post) | POST | -| [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | -| [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | -| [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | -| [/renter/stream/*___siapath___](#renterstreamsiapath-get) | GET | -| [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | +| Route | HTTP verb | +| ------------------------------------------------------------------------------------- | --------- | +| [/renter](#renter-get) | GET | +| [/renter](#renter-post) | POST | +| [/renter/contracts](#rentercontracts-get) | GET | +| [/renter/downloads](#renterdownloads-get) | GET | +| [/renter/prices](#renterprices-get) | GET | +| [/renter/files](#renterfiles-get) | GET | +| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | +| [/renter/delete/*___siapath___](#renterdeletesiapath-post) | POST | +| [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | +| [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | +| [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | +| [/renter/stream/*___siapath___](#renterstreamsiapath-get) | GET | +| [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Renter.md](/doc/api/Renter.md). @@ -989,11 +990,34 @@ lists the status of all files. } ``` +#### /renter/file/*__siapath__ [GET] + +lists the status of specified file. + +###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-4) +```javascript +{ + "file": [ + { + "siapath": "foo/bar.txt", + "localpath": "/home/foo/bar.txt", + "filesize": 8192, // bytes + "available": true, + "renewing": true, + "redundancy": 5, + "bytesuploaded": 209715200, // total bytes uploaded + "uploadprogress": 100, // percent + "expiration": 60000 + } + ] +} +``` + #### /renter/prices [GET] lists the estimated prices of performing various storage and data operations. -###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-4) +###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-5) ```javascript { "downloadterabyte": "1234", // hastings diff --git a/doc/api/Renter.md b/doc/api/Renter.md index 525b72216a..e40f1d4e8a 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -19,20 +19,21 @@ allocated funds. Index ----- -| Route | HTTP verb | -| ----------------------------------------------------------------------- | --------- | -| [/renter](#renter-get) | GET | -| [/renter](#renter-post) | POST | -| [/renter/contracts](#rentercontracts-get) | GET | -| [/renter/downloads](#renterdownloads-get) | GET | -| [/renter/files](#renterfiles-get) | GET | -| [/renter/prices](#renter-prices-get) | GET | -| [/renter/delete/___*siapath___](#renterdelete___siapath___-post) | POST | -| [/renter/download/___*siapath___](#renterdownload__siapath___-get) | GET | -| [/renter/downloadasync/___*siapath___](#renterdownloadasync__siapath___-get) | GET | -| [/renter/rename/___*siapath___](#renterrename___siapath___-post) | POST | -| [/renter/stream/___*siapath___](#renterstreamsiapath-get) | GET | -| [/renter/upload/___*siapath___](#renterupload___siapath___-post) | POST | +| Route | HTTP verb | +| -------------------------------------------------------------------------------------------- | --------- | +| [/renter](#renter-get) | GET | +| [/renter](#renter-post) | POST | +| [/renter/contracts](#rentercontracts-get) | GET | +| [/renter/downloads](#renterdownloads-get) | GET | +| [/renter/files](#renterfiles-get) | GET | +| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | +| [/renter/prices](#renter-prices-get) | GET | +| [/renter/delete/___*siapath___](#renterdelete___siapath___-post) | POST | +| [/renter/download/___*siapath___](#renterdownload__siapath___-get) | GET | +| [/renter/downloadasync/___*siapath___](#renterdownloadasync__siapath___-get) | GET | +| [/renter/rename/___*siapath___](#renterrename___siapath___-post) | POST | +| [/renter/stream/___*siapath___](#renterstreamsiapath-get) | GET | +| [/renter/upload/___*siapath___](#renterupload___siapath___-post) | POST | #### /renter [GET] @@ -309,6 +310,58 @@ lists the status of all files. } ``` +#### /renter/file/*___siapath___ [GET] + +lists the statu of specified file. + +###### JSON Response +```javascript +{ + "file": [ + { + // Path to the file in the renter on the network. + "siapath": "foo/bar.txt", + + // Path to the local file on disk. + "localpath": "/home/foo/bar.txt", + + // Size of the file in bytes. + "filesize": 8192, // bytes + + // true if the file is available for download. Files may be available + // before they are completely uploaded. + "available": true, + + // true if the file's contracts will be automatically renewed by the + // renter. + "renewing": true, + + // Average redundancy of the file on the network. Redundancy is + // calculated by dividing the amount of data uploaded in the file's open + // contracts by the size of the file. Redundancy does not necessarily + // correspond to availability. Specifically, a redundancy >= 1 does not + // indicate the file is available as there could be a chunk of the file + // with 0 redundancy. + "redundancy": 5, + + // Total number of bytes successfully uploaded via current file contracts. + // This number includes padding and rendundancy, so a file with a size of + // 8192 bytes might be padded to 40 MiB and, with a redundancy of 5, + // encoded to 200 MiB for upload. + "uploadedbytes": 209715200, // bytes + + // Percentage of the file uploaded, including redundancy. Uploading has + // completed when uploadprogress is 100. Files may be available for + // download before upload progress is 100. + "uploadprogress": 100, // percent + + // Block height at which the file ceases availability. + "expiration": 60000 + } + ] +} +``` + #### /renter/prices [GET] lists the estimated prices of performing various storage and data operations. diff --git a/siatest/renter.go b/siatest/renter.go index 224c709ab6..12b7fe3ed7 100644 --- a/siatest/renter.go +++ b/siatest/renter.go @@ -133,11 +133,11 @@ func (tn *TestNode) DownloadInfo(lf *LocalFile, rf *RemoteFile) (*api.DownloadIn // File returns the file queried by the user func (tn *TestNode) File(siaPath string) (modules.FileInfo, error) { - rf, err := tn.RenterFileGet(siapath) + rf, err := tn.RenterFileGet(siaPath) if err != nil { - return nil, err + return rf.File, err } - return rf, err + return rf.File, err } // Files lists the files tracked by the renter diff --git a/siatest/renter/renter_test.go b/siatest/renter/renter_test.go index 5dab48ddfd..bfc12a18c6 100644 --- a/siatest/renter/renter_test.go +++ b/siatest/renter/renter_test.go @@ -43,7 +43,6 @@ func TestRenter(t *testing.T) { }{ {"TestRenterStreamingCache", testRenterStreamingCache}, {"TestUploadDownload", testUploadDownload}, - // Add test for single file api {"TestSingleFileGet", testSingleFileGet}, {"TestDownloadMultipleLargeSectors", testDownloadMultipleLargeSectors}, {"TestRenterLocalRepair", testRenterLocalRepair}, @@ -104,28 +103,28 @@ func testUploadDownload(t *testing.T, tg *siatest.TestGroup) { } } -// testSingleFileUpload is a subtest that uses an existing TestGroup to test if +// testSingleFileGet is a subtest that uses an existing TestGroup to test if // using the signle file API endpoint works -func testSingleFileUpload(t *testing.T, tg *siatest.TestGroup) { +func testSingleFileGet(t *testing.T, tg *siatest.TestGroup) { // Grab the first of the group's renters renter := tg.Renters()[0] // Upload file, creating a piece for each host in the group dataPieces := uint64(1) parityPieces := uint64(len(tg.Hosts())) - dataPieces fileSize := 100 + siatest.Fuzz() - localFile, remoteFile, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) + _, _, err := renter.UploadNewFileBlocking(fileSize, dataPieces, parityPieces) if err != nil { t.Fatal("Failed to upload a file for testing: ", err) } - files, err := tg.Files() + files, err := renter.Files() if err != nil { t.Fatal("Failed to get renter files: ", err) } var file modules.FileInfo - for f := range files { - file, err = tg.File(f.SiaPath) + for _, f := range files { + file, err = renter.File(f.SiaPath) if err != nil { t.Fatal("Failed to request single file", err) } From 27a041ade6ae08220a237a6327652cd457f4d8a7 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 14:54:11 -0400 Subject: [PATCH 200/212] Fix comments and documentation --- doc/API.md | 30 +++++++++++++++--------------- doc/api/Renter.md | 32 ++++++++++++++++---------------- node/api/client/renter.go | 2 +- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/doc/API.md b/doc/API.md index f8866edc33..ffe2c0eb0b 100644 --- a/doc/API.md +++ b/doc/API.md @@ -845,21 +845,21 @@ description of the byte encoding. Renter ------ -| Route | HTTP verb | -| ------------------------------------------------------------------------------------- | --------- | -| [/renter](#renter-get) | GET | -| [/renter](#renter-post) | POST | -| [/renter/contracts](#rentercontracts-get) | GET | -| [/renter/downloads](#renterdownloads-get) | GET | -| [/renter/prices](#renterprices-get) | GET | -| [/renter/files](#renterfiles-get) | GET | -| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | -| [/renter/delete/*___siapath___](#renterdeletesiapath-post) | POST | -| [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | -| [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | -| [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | -| [/renter/stream/*___siapath___](#renterstreamsiapath-get) | GET | -| [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | +| Route | HTTP verb | +| --------------------------------------------------------------------------| --------- | +| [/renter](#renter-get) | GET | +| [/renter](#renter-post) | POST | +| [/renter/contracts](#rentercontracts-get) | GET | +| [/renter/downloads](#renterdownloads-get) | GET | +| [/renter/prices](#renterprices-get) | GET | +| [/renter/files](#renterfiles-get) | GET | +| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | +| [/renter/delete/*___siapath___](#renterdeletesiapath-post) | POST | +| [/renter/download/*___siapath___](#renterdownloadsiapath-get) | GET | +| [/renter/downloadasync/*___siapath___](#renterdownloadasyncsiapath-get) | GET | +| [/renter/rename/*___siapath___](#renterrenamesiapath-post) | POST | +| [/renter/stream/*___siapath___](#renterstreamsiapath-get) | GET | +| [/renter/upload/*___siapath___](#renteruploadsiapath-post) | POST | For examples and detailed descriptions of request and response parameters, refer to [Renter.md](/doc/api/Renter.md). diff --git a/doc/api/Renter.md b/doc/api/Renter.md index e40f1d4e8a..667d51f2d2 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -19,21 +19,21 @@ allocated funds. Index ----- -| Route | HTTP verb | -| -------------------------------------------------------------------------------------------- | --------- | -| [/renter](#renter-get) | GET | -| [/renter](#renter-post) | POST | -| [/renter/contracts](#rentercontracts-get) | GET | -| [/renter/downloads](#renterdownloads-get) | GET | -| [/renter/files](#renterfiles-get) | GET | -| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | -| [/renter/prices](#renter-prices-get) | GET | -| [/renter/delete/___*siapath___](#renterdelete___siapath___-post) | POST | -| [/renter/download/___*siapath___](#renterdownload__siapath___-get) | GET | -| [/renter/downloadasync/___*siapath___](#renterdownloadasync__siapath___-get) | GET | -| [/renter/rename/___*siapath___](#renterrename___siapath___-post) | POST | -| [/renter/stream/___*siapath___](#renterstreamsiapath-get) | GET | -| [/renter/upload/___*siapath___](#renterupload___siapath___-post) | POST | +| Route | HTTP verb | +| ------------------------------------------------------------------------------- | --------- | +| [/renter](#renter-get) | GET | +| [/renter](#renter-post) | POST | +| [/renter/contracts](#rentercontracts-get) | GET | +| [/renter/downloads](#renterdownloads-get) | GET | +| [/renter/files](#renterfiles-get) | GET | +| [/renter/file/*___siapath___](#renterfile___siapath___-get) | GET | +| [/renter/prices](#renter-prices-get) | GET | +| [/renter/delete/___*siapath___](#renterdelete___siapath___-post) | POST | +| [/renter/download/___*siapath___](#renterdownload__siapath___-get) | GET | +| [/renter/downloadasync/___*siapath___](#renterdownloadasync__siapath___-get) | GET | +| [/renter/rename/___*siapath___](#renterrename___siapath___-post) | POST | +| [/renter/stream/___*siapath___](#renterstreamsiapath-get) | GET | +| [/renter/upload/___*siapath___](#renterupload___siapath___-post) | POST | #### /renter [GET] @@ -312,7 +312,7 @@ lists the status of all files. #### /renter/file/*___siapath___ [GET] -lists the statu of specified file. +lists the status of specified file. ###### JSON Response ```javascript diff --git a/node/api/client/renter.go b/node/api/client/renter.go index 3a116389a9..a96cbc8b66 100644 --- a/node/api/client/renter.go +++ b/node/api/client/renter.go @@ -58,7 +58,7 @@ func (c *Client) RenterDownloadHTTPResponseGet(siaPath string, offset, length ui return } -// RenterFileGet requests the /renter/files resource. +// RenterFileGet uses the /renter/file/:siapath endpoint to query a file. func (c *Client) RenterFileGet(siaPath string) (rf api.RenterFile, err error) { siaPath = strings.TrimPrefix(siaPath, "/") err = c.get("/renter/file/"+siaPath, &rf) From 781d66f0dee85ff1bccd1463dcb5398d3b80bf81 Mon Sep 17 00:00:00 2001 From: MSevey Date: Tue, 8 May 2018 15:43:23 -0400 Subject: [PATCH 201/212] Update renterFileHandler() error handling --- node/api/renter.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/node/api/renter.go b/node/api/renter.go index 35956c3433..5621254d8f 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -382,9 +382,12 @@ func (api *API) renterRenameHandler(w http.ResponseWriter, req *http.Request, ps // renterFileHandler handles the API call to return specific file. func (api *API) renterFileHandler(w http.ResponseWriter, req *http.Request, ps httprouter.Params) { file, err := api.renter.File(strings.TrimPrefix(ps.ByName("siapath"), "/")) + if err != nil { + WriteError(w, Error{err.Error()}, http.StatusBadRequest) + return + } WriteJSON(w, RenterFile{ - File: file, - Error: err, + File: file, }) } From 4009bb55f64128642595bd76fc72a99a7e339f7f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 9 May 2018 13:04:16 -0400 Subject: [PATCH 202/212] Fix contractor locking violation --- modules/renter/contractor/contracts.go | 16 ++++++---------- .../renter/contractor/host_integration_test.go | 8 ++------ 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/modules/renter/contractor/contracts.go b/modules/renter/contractor/contracts.go index 71b80f7173..f037bb761c 100644 --- a/modules/renter/contractor/contracts.go +++ b/modules/renter/contractor/contracts.go @@ -145,9 +145,7 @@ func (c *Contractor) managedMarkContractsUtility() error { }() // Apply changes. - c.mu.Lock() - err := c.updateContractUtility(contract.ID, utility) - c.mu.Unlock() + err := c.managedUpdateContractUtility(contract.ID, utility) if err != nil { return err } @@ -509,12 +507,11 @@ func (c *Contractor) threadedContractMaintenance() { // Update the utility values for the new contract, and for the old // contract. - c.mu.Lock() newUtility := modules.ContractUtility{ GoodForUpload: true, GoodForRenew: true, } - if err := c.updateContractUtility(newContract.ID, newUtility); err != nil { + if err := c.managedUpdateContractUtility(newContract.ID, newUtility); err != nil { c.log.Println("Failed to update the contract utilities", err) return } @@ -524,7 +521,6 @@ func (c *Contractor) threadedContractMaintenance() { c.log.Println("Failed to update the contract utilities", err) return } - c.mu.Unlock() // If the contract is a mid-cycle renew, add the contract line to // the new contract. The contract line is not included/extended if // we are just renewing because the contract is expiring. @@ -617,8 +613,7 @@ func (c *Contractor) threadedContractMaintenance() { } // Add this contract to the contractor and save. - c.mu.Lock() - err = c.updateContractUtility(newContract.ID, modules.ContractUtility{ + err = c.managedUpdateContractUtility(newContract.ID, modules.ContractUtility{ GoodForUpload: true, GoodForRenew: true, }) @@ -626,6 +621,7 @@ func (c *Contractor) threadedContractMaintenance() { c.log.Println("Failed to update the contract utilities", err) return } + c.mu.Lock() err = c.saveSync() c.mu.Unlock() if err != nil { @@ -649,9 +645,9 @@ func (c *Contractor) threadedContractMaintenance() { } } -// updateContractUtility is a helper function that acquires a contract, updates +// managedUpdateContractUtility is a helper function that acquires a contract, updates // its ContractUtility and returns the contract again. -func (c *Contractor) updateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { +func (c *Contractor) managedUpdateContractUtility(id types.FileContractID, utility modules.ContractUtility) error { safeContract, ok := c.staticContracts.Acquire(id) if !ok { return errors.New("failed to acquire contract for update") diff --git a/modules/renter/contractor/host_integration_test.go b/modules/renter/contractor/host_integration_test.go index e73ec5d4d1..19ded71e2b 100644 --- a/modules/renter/contractor/host_integration_test.go +++ b/modules/renter/contractor/host_integration_test.go @@ -348,12 +348,10 @@ func TestIntegrationRenew(t *testing.T) { } // renew the contract - c.mu.Lock() - err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.managedUpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } - c.mu.Unlock() oldContract, _ := c.staticContracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+200) if err != nil { @@ -384,12 +382,10 @@ func TestIntegrationRenew(t *testing.T) { } // renew to a lower height - c.mu.Lock() - err = c.updateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) + err = c.managedUpdateContractUtility(contract.ID, modules.ContractUtility{GoodForRenew: true}) if err != nil { t.Fatal(err) } - c.mu.Unlock() oldContract, _ = c.staticContracts.Acquire(contract.ID) contract, err = c.managedRenew(oldContract, types.SiacoinPrecision.Mul64(50), c.blockHeight+100) if err != nil { From 84aeea5e99dbe8280dabf91d273c16d99c52321f Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Sat, 5 May 2018 22:27:36 -0400 Subject: [PATCH 203/212] Always call Drop on transactionbuilder if method returns an error --- modules/wallet/money.go | 9 +++++++-- modules/wallet/seed.go | 5 +++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/modules/wallet/money.go b/modules/wallet/money.go index e2259c826f..b9ee74786b 100644 --- a/modules/wallet/money.go +++ b/modules/wallet/money.go @@ -212,7 +212,7 @@ func (w *Wallet) SendSiacoinsMulti(outputs []types.SiacoinOutput) (txns []types. // SendSiafunds creates a transaction sending 'amount' to 'dest'. The transaction // is submitted to the transaction pool and is also returned. -func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]types.Transaction, error) { +func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) (txns []types.Transaction, err error) { if err := w.tg.Add(); err != nil { return nil, err } @@ -233,7 +233,12 @@ func (w *Wallet) SendSiafunds(amount types.Currency, dest types.UnlockHash) ([]t } txnBuilder := w.StartTransaction() - err := txnBuilder.FundSiacoins(tpoolFee) + defer func() { + if err != nil { + txnBuilder.Drop() + } + }() + err = txnBuilder.FundSiacoins(tpoolFee) if err != nil { return nil, err } diff --git a/modules/wallet/seed.go b/modules/wallet/seed.go index 3d9d319109..b0e0b4319e 100644 --- a/modules/wallet/seed.go +++ b/modules/wallet/seed.go @@ -406,6 +406,11 @@ func (w *Wallet) SweepSeed(seed modules.Seed) (coins, funds types.Currency, err // construct a transaction that spends the outputs tb := w.StartTransaction() + defer func() { + if err != nil { + tb.Drop() + } + }() var sweptCoins, sweptFunds types.Currency // total values of swept outputs for _, output := range txnSiacoinOutputs { // construct a siacoin input that spends the output From 70f6e7c63c2ca4aaca8fbbdb97765521f1ee6afd Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Wed, 9 May 2018 15:44:59 -0400 Subject: [PATCH 204/212] skip test with disk io in short test --- modules/renter/proto/contractset_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/renter/proto/contractset_test.go b/modules/renter/proto/contractset_test.go index 31a2a9a05a..aecb5b48dd 100644 --- a/modules/renter/proto/contractset_test.go +++ b/modules/renter/proto/contractset_test.go @@ -26,6 +26,9 @@ func (cs *ContractSet) mustAcquire(t *testing.T, id types.FileContractID) *SafeC // TestContractSet tests that the ContractSet type is safe for concurrent use. func TestContractSet(t *testing.T) { + if testing.Short() { + t.SkipNow() + } // create contract set testDir := build.TempDir(t.Name()) cs, err := NewContractSet(testDir, modules.ProdDependencies) From b8980da21e6af15b3c3c00f2569224c27adbee8c Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Tue, 8 May 2018 19:35:31 -0400 Subject: [PATCH 205/212] clear download destination after download is done --- modules/renter/downloadstreamer.go | 6 ++++++ modules/renter/uploadchunk.go | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/modules/renter/downloadstreamer.go b/modules/renter/downloadstreamer.go index 75fb96963f..d07990e75c 100644 --- a/modules/renter/downloadstreamer.go +++ b/modules/renter/downloadstreamer.go @@ -90,6 +90,12 @@ func (s *streamer) Read(p []byte) (n int, err error) { return 0, errors.AddContext(err, "failed to create new download") } + // Set the in-memory buffer to nil just to be safe in case of a memory + // leak. + defer func() { + d.destination = nil + }() + // Block until the download has completed. select { case <-d.completeChan: diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index abf7149c4c..df165d96f5 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -151,6 +151,12 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e return err } + // Set the in-memory buffer to nil just to be safe in case of a memory + // leak. + defer func() { + d.destination = nil + }() + // Wait for the download to complete. select { case <-d.completeChan: From ecaf70ed98c1124b6bdc0c8c447f66f3436fcb9e Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 10 May 2018 11:22:57 -0400 Subject: [PATCH 206/212] change logicalChunkData to not be a continuous block of memory but shards instead --- modules/renter.go | 4 ++++ modules/renter/downloaddestination.go | 30 +++++++++++++++++++---- modules/renter/erasure.go | 18 ++++++++++++++ modules/renter/shardreader.go | 34 +++++++++++++++++++++++++++ modules/renter/uploadchunk.go | 16 ++++++------- 5 files changed, 90 insertions(+), 12 deletions(-) create mode 100644 modules/renter/shardreader.go diff --git a/modules/renter.go b/modules/renter.go index 00dd2acd08..d954c638de 100644 --- a/modules/renter.go +++ b/modules/renter.go @@ -28,6 +28,10 @@ type ErasureCoder interface { // containing parity data. Encode(data []byte) ([][]byte, error) + // EncodeShards encodes the input data like Encode but accepts an already + // sharded input. + EncodeShards(data [][]byte) ([][]byte, error) + // Recover recovers the original data from pieces and writes it to w. // pieces should be identical to the slice returned by Encode (length and // order must be preserved), but with missing elements set to nil. n is diff --git a/modules/renter/downloaddestination.go b/modules/renter/downloaddestination.go index ece3e01dd6..0427850141 100644 --- a/modules/renter/downloaddestination.go +++ b/modules/renter/downloaddestination.go @@ -20,6 +20,8 @@ import ( "errors" "io" "sync" + + "github.com/NebulousLabs/Sia/modules" ) // downloadDestination is a wrapper for the different types of writing that we @@ -37,7 +39,20 @@ type downloadDestination interface { // downloadDestinationBuffer writes logical chunk data to an in-memory buffer. // This buffer is primarily used when performing repairs on uploads. -type downloadDestinationBuffer []byte +type downloadDestinationBuffer [][]byte + +// NewDownloadDestinationBuffer allocates the necessary number of shards for +// the downloadDestinationBuffer and returns the new buffer. +func NewDownloadDestinationBuffer(length uint64) downloadDestinationBuffer { + // Round length up to next multiple of SectorSize. + length += modules.SectorSize - length%modules.SectorSize + buf := make([][]byte, length/modules.SectorSize) + for i := 0; length > 0; i++ { + buf[i] = make([]byte, modules.SectorSize) + length -= modules.SectorSize + } + return buf +} // Close implements Close for the downloadDestination interface. func (dw downloadDestinationBuffer) Close() error { @@ -46,11 +61,18 @@ func (dw downloadDestinationBuffer) Close() error { // WriteAt writes the provided data to the downloadDestinationBuffer. func (dw downloadDestinationBuffer) WriteAt(data []byte, offset int64) (int, error) { - if len(data)+int(offset) > len(dw) || offset < 0 { + if uint64(len(data)+int(offset)) > uint64(len(dw))*modules.SectorSize || offset < 0 { return 0, errors.New("write at specified offset exceeds buffer size") } - i := copy(dw[offset:], data) - return i, nil + written := len(data) + for len(data) > 0 { + shardIndex := offset / int64(modules.SectorSize) + sliceIndex := offset % int64(modules.SectorSize) + n := copy(dw[shardIndex][sliceIndex:], data) + data = data[n:] + offset += int64(n) + } + return written, nil } // downloadDestinationWriteCloser is a downloadDestination that writes to an diff --git a/modules/renter/erasure.go b/modules/renter/erasure.go index 0ab12659fc..02d30e2078 100644 --- a/modules/renter/erasure.go +++ b/modules/renter/erasure.go @@ -6,6 +6,7 @@ import ( "github.com/klauspost/reedsolomon" "github.com/NebulousLabs/Sia/modules" + "github.com/NebulousLabs/errors" ) // rsCode is a Reed-Solomon encoder/decoder. It implements the @@ -40,6 +41,23 @@ func (rs *rsCode) Encode(data []byte) ([][]byte, error) { return pieces, nil } +// EncodeShards creates the parity shards for an already sharded input. +func (rs *rsCode) EncodeShards(pieces [][]byte) ([][]byte, error) { + // Check that the caller provided the minimum amount of pieces. + if len(pieces) != rs.MinPieces() { + return nil, errors.New("too few data pieces given") + } + // Add the parity shards to pieces. + for len(pieces) < rs.NumPieces() { + pieces = append(pieces, make([]byte, modules.SectorSize)) + } + err := rs.enc.Encode(pieces) + if err != nil { + return nil, err + } + return pieces, nil +} + // Recover recovers the original data from pieces and writes it to w. // pieces should be identical to the slice returned by Encode (length and // order must be preserved), but with missing elements set to nil. diff --git a/modules/renter/shardreader.go b/modules/renter/shardreader.go new file mode 100644 index 0000000000..8e039f0ebb --- /dev/null +++ b/modules/renter/shardreader.go @@ -0,0 +1,34 @@ +package renter + +import ( + "io" +) + +// shardReader is a helper struct that can read data into shards of +// modules.SectorSize instead of whole byte slices. +type shardReader struct { + r io.ReaderAt +} + +// NewShardReader creates a new shardReader from an object that implements the +// ReaderAt interface. +func NewShardReader(r io.ReaderAt) *shardReader { + return &shardReader{ + r: r, + } +} + +// ReadAt reads data into a slice of shards from a certain offset. +func (sr *shardReader) ReadAt(d [][]byte, offset int64) (int, error) { + var n int + for len(d) > 0 { + read, err := sr.r.ReadAt(d[0], offset) + if err != nil { + return 0, err + } + d = d[1:] + offset += int64(read) + n += read + } + return n, nil +} diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index df165d96f5..342827bf2d 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -42,7 +42,7 @@ type unfinishedUploadChunk struct { // The logical data is the data that is presented to the user when the user // requests the chunk. The physical data is all of the pieces that get // stored across the network. - logicalChunkData []byte + logicalChunkData [][]byte physicalChunkData [][]byte // Worker synchronization fields. The mutex only protects these fields. @@ -134,7 +134,7 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e } // Create the download. - buf := downloadDestinationBuffer(make([]byte, chunk.length)) + buf := NewDownloadDestinationBuffer(chunk.length) d, err := r.managedNewDownload(downloadParams{ destination: buf, destinationType: "buffer", @@ -167,7 +167,7 @@ func (r *Renter) managedDownloadLogicalChunkData(chunk *unfinishedUploadChunk) e buf = nil return d.Err() } - chunk.logicalChunkData = []byte(buf) + chunk.logicalChunkData = [][]byte(buf) return nil } @@ -221,7 +221,7 @@ func (r *Renter) managedFetchAndRepairChunk(chunk *unfinishedUploadChunk) { // fact to reduce the total memory required to create the physical data. // That will also change the amount of memory we need to allocate, and the // number of times we need to return memory. - chunk.physicalChunkData, err = chunk.renterFile.erasureCode.Encode(chunk.logicalChunkData) + chunk.physicalChunkData, err = chunk.renterFile.erasureCode.EncodeShards(chunk.logicalChunkData) chunk.logicalChunkData = nil r.memoryManager.Return(erasureCodingMemory) chunk.memoryReleased += erasureCodingMemory @@ -300,15 +300,15 @@ func (r *Renter) managedFetchLogicalChunkData(chunk *unfinishedUploadChunk) erro // TODO: Once we have enabled support for small chunks, we should stop // needing to ignore the EOF errors, because the chunk size should always // match the tail end of the file. Until then, we ignore io.EOF. - chunk.logicalChunkData = make([]byte, chunk.length) - _, err = osFile.ReadAt(chunk.logicalChunkData, chunk.offset) + sr := NewShardReader(osFile) + buf := [][]byte(NewDownloadDestinationBuffer(chunk.length)) + _, err = sr.ReadAt(buf, chunk.offset) if err != nil && err != io.EOF && download { - chunk.logicalChunkData = nil return r.managedDownloadLogicalChunkData(chunk) } else if err != nil && err != io.EOF { - chunk.logicalChunkData = nil return errors.Extend(err, errors.New("failed to read file locally")) } + chunk.logicalChunkData = buf // Data successfully read from disk. return nil From 3e85b4fa6a3cb9b02ff67301082af5e2d09792ad Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 10 May 2018 12:45:22 -0400 Subject: [PATCH 207/212] account for twofish overhead --- modules/renter/downloaddestination.go | 12 +++++++----- modules/renter/erasure.go | 6 +++--- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/modules/renter/downloaddestination.go b/modules/renter/downloaddestination.go index 0427850141..92727165c8 100644 --- a/modules/renter/downloaddestination.go +++ b/modules/renter/downloaddestination.go @@ -45,11 +45,13 @@ type downloadDestinationBuffer [][]byte // the downloadDestinationBuffer and returns the new buffer. func NewDownloadDestinationBuffer(length uint64) downloadDestinationBuffer { // Round length up to next multiple of SectorSize. - length += modules.SectorSize - length%modules.SectorSize - buf := make([][]byte, length/modules.SectorSize) - for i := 0; length > 0; i++ { - buf[i] = make([]byte, modules.SectorSize) - length -= modules.SectorSize + if length%pieceSize != 0 { + length += pieceSize - length%pieceSize + } + buf := make([][]byte, 0, length/pieceSize) + for length > 0 { + buf = append(buf, make([]byte, pieceSize)) + length -= pieceSize } return buf } diff --git a/modules/renter/erasure.go b/modules/renter/erasure.go index 02d30e2078..21e074f46d 100644 --- a/modules/renter/erasure.go +++ b/modules/renter/erasure.go @@ -1,12 +1,12 @@ package renter import ( + "fmt" "io" "github.com/klauspost/reedsolomon" "github.com/NebulousLabs/Sia/modules" - "github.com/NebulousLabs/errors" ) // rsCode is a Reed-Solomon encoder/decoder. It implements the @@ -45,11 +45,11 @@ func (rs *rsCode) Encode(data []byte) ([][]byte, error) { func (rs *rsCode) EncodeShards(pieces [][]byte) ([][]byte, error) { // Check that the caller provided the minimum amount of pieces. if len(pieces) != rs.MinPieces() { - return nil, errors.New("too few data pieces given") + return nil, fmt.Errorf("invalid number of pieces given %v %v", len(pieces), rs.MinPieces()) } // Add the parity shards to pieces. for len(pieces) < rs.NumPieces() { - pieces = append(pieces, make([]byte, modules.SectorSize)) + pieces = append(pieces, make([]byte, pieceSize)) } err := rs.enc.Encode(pieces) if err != nil { From 9360c8bca82d7c143e54e23ac2f7aa824ade06e9 Mon Sep 17 00:00:00 2001 From: Matthew Sevey Date: Thu, 10 May 2018 16:56:39 -0400 Subject: [PATCH 208/212] Address Chris's comments in PR --- doc/API.md | 24 +++++++------- doc/api/Renter.md | 82 ++++++++++++++++++++++------------------------ node/api/renter.go | 3 +- 3 files changed, 52 insertions(+), 57 deletions(-) diff --git a/doc/API.md b/doc/API.md index ffe2c0eb0b..572148aee0 100644 --- a/doc/API.md +++ b/doc/API.md @@ -997,19 +997,17 @@ lists the status of specified file. ###### JSON Response [(with comments)](/doc/api/Renter.md#json-response-4) ```javascript { - "file": [ - { - "siapath": "foo/bar.txt", - "localpath": "/home/foo/bar.txt", - "filesize": 8192, // bytes - "available": true, - "renewing": true, - "redundancy": 5, - "bytesuploaded": 209715200, // total bytes uploaded - "uploadprogress": 100, // percent - "expiration": 60000 - } - ] + "file": { + "siapath": "foo/bar.txt", + "localpath": "/home/foo/bar.txt", + "filesize": 8192, // bytes + "available": true, + "renewing": true, + "redundancy": 5, + "bytesuploaded": 209715200, // total bytes uploaded + "uploadprogress": 100, // percent + "expiration": 60000 + } } ``` diff --git a/doc/api/Renter.md b/doc/api/Renter.md index 667d51f2d2..cae898964d 100644 --- a/doc/api/Renter.md +++ b/doc/api/Renter.md @@ -317,48 +317,46 @@ lists the status of specified file. ###### JSON Response ```javascript { - "file": [ - { - // Path to the file in the renter on the network. - "siapath": "foo/bar.txt", - - // Path to the local file on disk. - "localpath": "/home/foo/bar.txt", - - // Size of the file in bytes. - "filesize": 8192, // bytes - - // true if the file is available for download. Files may be available - // before they are completely uploaded. - "available": true, - - // true if the file's contracts will be automatically renewed by the - // renter. - "renewing": true, - - // Average redundancy of the file on the network. Redundancy is - // calculated by dividing the amount of data uploaded in the file's open - // contracts by the size of the file. Redundancy does not necessarily - // correspond to availability. Specifically, a redundancy >= 1 does not - // indicate the file is available as there could be a chunk of the file - // with 0 redundancy. - "redundancy": 5, - - // Total number of bytes successfully uploaded via current file contracts. - // This number includes padding and rendundancy, so a file with a size of - // 8192 bytes might be padded to 40 MiB and, with a redundancy of 5, - // encoded to 200 MiB for upload. - "uploadedbytes": 209715200, // bytes - - // Percentage of the file uploaded, including redundancy. Uploading has - // completed when uploadprogress is 100. Files may be available for - // download before upload progress is 100. - "uploadprogress": 100, // percent - - // Block height at which the file ceases availability. - "expiration": 60000 - } - ] + "file": { + // Path to the file in the renter on the network. + "siapath": "foo/bar.txt", + + // Path to the local file on disk. + "localpath": "/home/foo/bar.txt", + + // Size of the file in bytes. + "filesize": 8192, // bytes + + // true if the file is available for download. Files may be available + // before they are completely uploaded. + "available": true, + + // true if the file's contracts will be automatically renewed by the + // renter. + "renewing": true, + + // Average redundancy of the file on the network. Redundancy is + // calculated by dividing the amount of data uploaded in the file's open + // contracts by the size of the file. Redundancy does not necessarily + // correspond to availability. Specifically, a redundancy >= 1 does not + // indicate the file is available as there could be a chunk of the file + // with 0 redundancy. + "redundancy": 5, + + // Total number of bytes successfully uploaded via current file contracts. + // This number includes padding and rendundancy, so a file with a size of + // 8192 bytes might be padded to 40 MiB and, with a redundancy of 5, + // encoded to 200 MiB for upload. + "uploadedbytes": 209715200, // bytes + + // Percentage of the file uploaded, including redundancy. Uploading has + // completed when uploadprogress is 100. Files may be available for + // download before upload progress is 100. + "uploadprogress": 100, // percent + + // Block height at which the file ceases availability. + "expiration": 60000 + } } ``` diff --git a/node/api/renter.go b/node/api/renter.go index 5621254d8f..2feaf088d5 100644 --- a/node/api/renter.go +++ b/node/api/renter.go @@ -125,8 +125,7 @@ type ( // RenterFile lists the file queried. RenterFile struct { - File modules.FileInfo `json:"file"` - Error error `json:"error"` + File modules.FileInfo `json:"file"` } // RenterFiles lists the files known to the renter. From 400332726a5c5eab6054a326363f671cda2eb0de Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 10 May 2018 15:44:16 -0400 Subject: [PATCH 209/212] add ReadFrom to buffer --- modules/renter/downloaddestination.go | 16 ++++++++++++- modules/renter/shardreader.go | 34 --------------------------- modules/renter/uploadchunk.go | 6 ++--- 3 files changed, 18 insertions(+), 38 deletions(-) delete mode 100644 modules/renter/shardreader.go diff --git a/modules/renter/downloaddestination.go b/modules/renter/downloaddestination.go index 92727165c8..1ccad322b7 100644 --- a/modules/renter/downloaddestination.go +++ b/modules/renter/downloaddestination.go @@ -61,9 +61,23 @@ func (dw downloadDestinationBuffer) Close() error { return nil } +// ReadFrom reads data from a io.Reader until the buffer is full. +func (dw downloadDestinationBuffer) ReadFrom(r io.Reader) (int64, error) { + var n int64 + for len(dw) > 0 { + read, err := io.ReadFull(r, dw[0]) + if err != nil { + return 0, err + } + dw = dw[1:] + n += int64(read) + } + return n, nil +} + // WriteAt writes the provided data to the downloadDestinationBuffer. func (dw downloadDestinationBuffer) WriteAt(data []byte, offset int64) (int, error) { - if uint64(len(data)+int(offset)) > uint64(len(dw))*modules.SectorSize || offset < 0 { + if uint64(len(data))+uint64(offset) > uint64(len(dw))*pieceSize || offset < 0 { return 0, errors.New("write at specified offset exceeds buffer size") } written := len(data) diff --git a/modules/renter/shardreader.go b/modules/renter/shardreader.go deleted file mode 100644 index 8e039f0ebb..0000000000 --- a/modules/renter/shardreader.go +++ /dev/null @@ -1,34 +0,0 @@ -package renter - -import ( - "io" -) - -// shardReader is a helper struct that can read data into shards of -// modules.SectorSize instead of whole byte slices. -type shardReader struct { - r io.ReaderAt -} - -// NewShardReader creates a new shardReader from an object that implements the -// ReaderAt interface. -func NewShardReader(r io.ReaderAt) *shardReader { - return &shardReader{ - r: r, - } -} - -// ReadAt reads data into a slice of shards from a certain offset. -func (sr *shardReader) ReadAt(d [][]byte, offset int64) (int, error) { - var n int - for len(d) > 0 { - read, err := sr.r.ReadAt(d[0], offset) - if err != nil { - return 0, err - } - d = d[1:] - offset += int64(read) - n += read - } - return n, nil -} diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index 342827bf2d..e4e4774b20 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -300,9 +300,9 @@ func (r *Renter) managedFetchLogicalChunkData(chunk *unfinishedUploadChunk) erro // TODO: Once we have enabled support for small chunks, we should stop // needing to ignore the EOF errors, because the chunk size should always // match the tail end of the file. Until then, we ignore io.EOF. - sr := NewShardReader(osFile) - buf := [][]byte(NewDownloadDestinationBuffer(chunk.length)) - _, err = sr.ReadAt(buf, chunk.offset) + buf := NewDownloadDestinationBuffer(chunk.length) + sr := io.NewSectionReader(osFile, chunk.offset, int64(chunk.length)) + _, err = buf.ReadFrom(sr) if err != nil && err != io.EOF && download { return r.managedDownloadLogicalChunkData(chunk) } else if err != nil && err != io.EOF { From ec459dd6363b211f248209d4a56a84ed083f1f1d Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Thu, 10 May 2018 14:16:46 -0700 Subject: [PATCH 210/212] bumping version and changelog for 1.3.3 --- CHANGELOG.md | 12 ++++++++++-- README.md | 2 +- build/version.go | 2 +- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6a62542c5a..039bc496c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,24 @@ Version History --------------- +May 2018: + +v1.3.3 (patch release) +- Add Streaming API endpoints +- Faster contract formation +- Improved wallet scaling + + March 2018: -v1.3.2 (minor release) +v1.3.2 (patch release) - Improve renter throughput and stability - Reduce host I/O when idle - Add /tpool/confirmed endpoint December 2017: -v1.3.1 (minor release) +v1.3.1 (patch release) - Add new efficient, reliable contract format - Faster and smoother file repairs - Fix difficulty adjustment hardfork diff --git a/README.md b/README.md index 547c1fc05b..e5e6a6d0cd 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -# [![Sia Logo](http://sia.tech/img/svg/sia-green-logo.svg)](http://sia.tech) v1.3.2 (Capricorn) +# [![Sia Logo](http://sia.tech/img/svg/sia-green-logo.svg)](http://sia.tech) v1.3.3 (Capricorn) [![Build Status](https://travis-ci.org/NebulousLabs/Sia.svg?branch=master)](https://travis-ci.org/NebulousLabs/Sia) [![GoDoc](https://godoc.org/github.com/NebulousLabs/Sia?status.svg)](https://godoc.org/github.com/NebulousLabs/Sia) diff --git a/build/version.go b/build/version.go index 3442c61eb7..6cf8585f79 100644 --- a/build/version.go +++ b/build/version.go @@ -14,7 +14,7 @@ const ( MaxEncodedVersionLength = 100 // Version is the current version of siad. - Version = "1.3.2" + Version = "1.3.3" ) // IsVersion returns whether str is a valid version number. From 6eb04dac9844cf3b4db38598fc33388503b6bc93 Mon Sep 17 00:00:00 2001 From: Christopher Schinnerl Date: Thu, 10 May 2018 17:18:23 -0400 Subject: [PATCH 211/212] change to io.ReadFull --- modules/renter/downloaddestination.go | 2 +- modules/renter/uploadchunk.go | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/modules/renter/downloaddestination.go b/modules/renter/downloaddestination.go index 1ccad322b7..51a3a2bd33 100644 --- a/modules/renter/downloaddestination.go +++ b/modules/renter/downloaddestination.go @@ -67,7 +67,7 @@ func (dw downloadDestinationBuffer) ReadFrom(r io.Reader) (int64, error) { for len(dw) > 0 { read, err := io.ReadFull(r, dw[0]) if err != nil { - return 0, err + return n, err } dw = dw[1:] n += int64(read) diff --git a/modules/renter/uploadchunk.go b/modules/renter/uploadchunk.go index e4e4774b20..ae12331444 100644 --- a/modules/renter/uploadchunk.go +++ b/modules/renter/uploadchunk.go @@ -303,9 +303,11 @@ func (r *Renter) managedFetchLogicalChunkData(chunk *unfinishedUploadChunk) erro buf := NewDownloadDestinationBuffer(chunk.length) sr := io.NewSectionReader(osFile, chunk.offset, int64(chunk.length)) _, err = buf.ReadFrom(sr) - if err != nil && err != io.EOF && download { + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF && download { + r.log.Debugln("failed to read file, downloading instead:", err) return r.managedDownloadLogicalChunkData(chunk) - } else if err != nil && err != io.EOF { + } else if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + r.log.Debugln("failed to read file locally:", err) return errors.Extend(err, errors.New("failed to read file locally")) } chunk.logicalChunkData = buf From e3a4646747c4ddab376baf0656d567baf3187f37 Mon Sep 17 00:00:00 2001 From: Thomas Bennett Date: Thu, 10 May 2018 14:28:30 -0700 Subject: [PATCH 212/212] fix spacing --- CHANGELOG.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 039bc496c2..19d8f0f7b9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,6 @@ v1.3.3 (patch release) - Faster contract formation - Improved wallet scaling - March 2018: v1.3.2 (patch release)