diff --git a/.codeclimate.yml b/.codeclimate.yml index de527033803..119e60250b0 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -11,6 +11,12 @@ exclude_paths: engines: fixme: enabled: true + config: + strings: + - FIXME + - HACK + - XXX + - BUG golint: enabled: true govet: diff --git a/Rules.mk b/Rules.mk index 69721a57b25..441e7ec4a61 100644 --- a/Rules.mk +++ b/Rules.mk @@ -67,7 +67,6 @@ include $(dir)/Rules.mk # core targets # # -------------------- # - build: $(TGT_BIN) .PHONY: build @@ -143,6 +142,7 @@ help: @echo ' test_go_short' @echo ' test_go_expensive' @echo ' test_go_race' + @echo ' test_go_megacheck' - Run the `megacheck` vetting tool @echo ' test_sharness_short' @echo ' test_sharness_expensive' @echo ' test_sharness_race' diff --git a/blocks/blocks_test.go b/blocks/blocks_test.go index c13d8368fa6..e984a177258 100644 --- a/blocks/blocks_test.go +++ b/blocks/blocks_test.go @@ -91,9 +91,8 @@ func TestManualHash(t *testing.T) { u.Debug = true - block, err = NewBlockWithCid(data, c) + _, err = NewBlockWithCid(data, c) if err != ErrWrongHash { t.Fatal(err) } - } diff --git a/blocks/blockstore/arc_cache_test.go b/blocks/blockstore/arc_cache_test.go index 8790423801c..e6f35144d01 100644 --- a/blocks/blockstore/arc_cache_test.go +++ b/blocks/blockstore/arc_cache_test.go @@ -30,7 +30,7 @@ func testArcCached(ctx context.Context, bs Blockstore) (*arccache, error) { func createStores(t *testing.T) (*arccache, Blockstore, *callbackDatastore) { cd := &callbackDatastore{f: func() {}, ds: ds.NewMapDatastore()} bs := NewBlockstore(syncds.MutexWrap(cd)) - arc, err := testArcCached(nil, bs) + arc, err := testArcCached(context.TODO(), bs) if err != nil { t.Fatal(err) } diff --git a/blocks/blockstore/blockstore.go b/blocks/blockstore/blockstore.go index ac4b8740504..e1c7dcf3542 100644 --- a/blocks/blockstore/blockstore.go +++ b/blocks/blockstore/blockstore.go @@ -102,10 +102,6 @@ func NewBlockstore(d ds.Batching) Blockstore { type blockstore struct { datastore ds.Batching - lk sync.RWMutex - gcreq int32 - gcreqlk sync.Mutex - rehash bool } @@ -246,9 +242,8 @@ func NewGCLocker() GCLocker { } type gclocker struct { - lk sync.RWMutex - gcreq int32 - gcreqlk sync.Mutex + lk sync.RWMutex + gcreq int32 } // Unlocker represents an object which can Unlock diff --git a/blocks/blockstore/bloom_cache.go b/blocks/blockstore/bloom_cache.go index 47f5ac018a8..8bcf962fe57 100644 --- a/blocks/blockstore/bloom_cache.go +++ b/blocks/blockstore/bloom_cache.go @@ -118,7 +118,7 @@ func (b *bloomcache) hasCached(k *cid.Cid) (has bool, ok bool) { } if b.BloomActive() { blr := b.bloom.HasTS(k.Bytes()) - if blr == false { // not contained in bloom is only conclusive answer bloom gives + if !blr { // not contained in bloom is only conclusive answer bloom gives b.hits.Inc() return false, true } diff --git a/blocks/blockstore/bloom_cache_test.go b/blocks/blockstore/bloom_cache_test.go index f021efd8e7c..85046e27053 100644 --- a/blocks/blockstore/bloom_cache_test.go +++ b/blocks/blockstore/bloom_cache_test.go @@ -34,6 +34,9 @@ func TestPutManyAddsToBloom(t *testing.T) { defer cancel() cachedbs, err := testBloomCached(ctx, bs) + if err != nil { + t.Fatal(err) + } select { case <-cachedbs.rebuildChan: @@ -49,7 +52,7 @@ func TestPutManyAddsToBloom(t *testing.T) { if err != nil { t.Fatal(err) } - if has == false { + if !has { t.Fatal("added block is reported missing") } @@ -57,7 +60,7 @@ func TestPutManyAddsToBloom(t *testing.T) { if err != nil { t.Fatal(err) } - if has == true { + if has { t.Fatal("not added block is reported to be in blockstore") } } diff --git a/blocks/blockstore/caching_test.go b/blocks/blockstore/caching_test.go index 3c3c195467f..16066ad18c9 100644 --- a/blocks/blockstore/caching_test.go +++ b/blocks/blockstore/caching_test.go @@ -1,26 +1,29 @@ package blockstore -import "testing" +import ( + "context" + "testing" +) func TestCachingOptsLessThanZero(t *testing.T) { opts := DefaultCacheOpts() opts.HasARCCacheSize = -1 - if _, err := CachedBlockstore(nil, nil, opts); err == nil { + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { t.Error("wrong ARC setting was not detected") } opts = DefaultCacheOpts() opts.HasBloomFilterSize = -1 - if _, err := CachedBlockstore(nil, nil, opts); err == nil { + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { t.Error("negative bloom size was not detected") } opts = DefaultCacheOpts() opts.HasBloomFilterHashes = -1 - if _, err := CachedBlockstore(nil, nil, opts); err == nil { + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { t.Error("negative hashes setting was not detected") } } @@ -29,7 +32,7 @@ func TestBloomHashesAtZero(t *testing.T) { opts := DefaultCacheOpts() opts.HasBloomFilterHashes = 0 - if _, err := CachedBlockstore(nil, nil, opts); err == nil { + if _, err := CachedBlockstore(context.TODO(), nil, opts); err == nil { t.Error("zero hashes setting with positive size was not detected") } } diff --git a/blocks/set/set.go b/blocks/set/set.go index 66467315f4c..d71712dcc06 100644 --- a/blocks/set/set.go +++ b/blocks/set/set.go @@ -4,14 +4,11 @@ package set import ( - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" "github.com/ipfs/go-ipfs/blocks/bloom" ) -var log = logging.Logger("blockset") - // BlockSet represents a mutable set of blocks CIDs. type BlockSet interface { AddBlock(*cid.Cid) diff --git a/blocks/set/set_test.go b/blocks/set/set_test.go index 5239b32122f..ecb29163742 100644 --- a/blocks/set/set_test.go +++ b/blocks/set/set_test.go @@ -25,15 +25,15 @@ func exampleKeys() []*cid.Cid { func checkSet(set BlockSet, keySlice []*cid.Cid, t *testing.T) { for i, key := range keySlice { if i&tReAdd == 0 { - if set.HasKey(key) == false { + if !set.HasKey(key) { t.Error("key should be in the set") } } else if i&tRemove == 0 { - if set.HasKey(key) == true { + if set.HasKey(key) { t.Error("key shouldn't be in the set") } } else if i&tAdd == 0 { - if set.HasKey(key) == false { + if !set.HasKey(key) { t.Error("key should be in the set") } } @@ -70,7 +70,7 @@ func TestSetWorks(t *testing.T) { bloom := set.GetBloomFilter() for _, key := range addedKeys { - if bloom.Find(key.Bytes()) == false { + if !bloom.Find(key.Bytes()) { t.Error("bloom doesn't contain expected key") } } diff --git a/blockservice/blockservice.go b/blockservice/blockservice.go index 294b541338e..66701bc9d23 100644 --- a/blockservice/blockservice.go +++ b/blockservice/blockservice.go @@ -172,7 +172,7 @@ func (s *blockService) GetBlock(ctx context.Context, c *cid.Cid) (blocks.Block, // the returned channel. // NB: No guarantees are made about order. func (s *blockService) GetBlocks(ctx context.Context, ks []*cid.Cid) <-chan blocks.Block { - out := make(chan blocks.Block, 0) + out := make(chan blocks.Block) go func() { defer close(out) var misses []*cid.Cid diff --git a/cmd/ipfs/daemon.go b/cmd/ipfs/daemon.go index ca5e2730b54..daa8ec793b6 100644 --- a/cmd/ipfs/daemon.go +++ b/cmd/ipfs/daemon.go @@ -201,11 +201,9 @@ func daemonFunc(req cmds.Request, res cmds.Response) { ctx := req.InvocContext() go func() { - select { - case <-req.Context().Done(): - fmt.Println("Received interrupt signal, shutting down...") - fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)") - } + <-req.Context().Done() + fmt.Println("Received interrupt signal, shutting down...") + fmt.Println("(Hit ctrl-c again to force-shutdown the daemon.)") }() // check transport encryption flag. @@ -418,7 +416,6 @@ func daemonFunc(req cmds.Request, res cmds.Response) { res.SetError(err, cmds.ErrNormal) } } - return } // serveHTTPApi collects options, creates listener, prints status message and starts serving requests diff --git a/cmd/ipfs/ipfs.go b/cmd/ipfs/ipfs.go index f8c903346f4..d90fce79c31 100644 --- a/cmd/ipfs/ipfs.go +++ b/cmd/ipfs/ipfs.go @@ -44,12 +44,6 @@ func init() { } } -// isLocal returns true if the command should only be run locally (not sent to daemon), otherwise false -func isLocal(cmd *cmds.Command) bool { - _, found := localMap[cmd] - return found -} - // NB: when necessary, properties are described using negatives in order to // provide desirable defaults type cmdDetails struct { @@ -85,11 +79,10 @@ func (d *cmdDetails) Loggable() map[string]interface{} { } } -func (d *cmdDetails) usesConfigAsInput() bool { return !d.doesNotUseConfigAsInput } -func (d *cmdDetails) doesNotPreemptAutoUpdate() bool { return !d.preemptsAutoUpdate } -func (d *cmdDetails) canRunOnClient() bool { return !d.cannotRunOnClient } -func (d *cmdDetails) canRunOnDaemon() bool { return !d.cannotRunOnDaemon } -func (d *cmdDetails) usesRepo() bool { return !d.doesNotUseRepo } +func (d *cmdDetails) usesConfigAsInput() bool { return !d.doesNotUseConfigAsInput } +func (d *cmdDetails) canRunOnClient() bool { return !d.cannotRunOnClient } +func (d *cmdDetails) canRunOnDaemon() bool { return !d.cannotRunOnDaemon } +func (d *cmdDetails) usesRepo() bool { return !d.doesNotUseRepo } // "What is this madness!?" you ask. Our commands have the unfortunate problem of // not being able to run on all the same contexts. This map describes these diff --git a/cmd/ipfs/main.go b/cmd/ipfs/main.go index 3e1c529d171..5914cd28291 100644 --- a/cmd/ipfs/main.go +++ b/cmd/ipfs/main.go @@ -37,17 +37,12 @@ import ( // log is the command logger var log = logging.Logger("cmd/ipfs") -var ( - errUnexpectedApiOutput = errors.New("api returned unexpected output") - errApiVersionMismatch = errors.New("api version mismatch") - errRequestCanceled = errors.New("request canceled") -) +var errRequestCanceled = errors.New("request canceled") const ( EnvEnableProfiling = "IPFS_PROF" cpuProfile = "ipfs.cpuprof" heapProfile = "ipfs.memprof" - errorFormat = "ERROR: %v\n\n" ) type cmdInvocation struct { @@ -492,7 +487,7 @@ func startProfiling() (func(), error) { } pprof.StartCPUProfile(ofi) go func() { - for _ = range time.NewTicker(time.Second * 30).C { + for range time.NewTicker(time.Second * 30).C { err := writeHeapProfileToFile() if err != nil { log.Error(err) @@ -546,7 +541,7 @@ func (ih *IntrHandler) Handle(handler func(count int, ih *IntrHandler), sigs ... go func() { defer ih.wg.Done() count := 0 - for _ = range ih.sig { + for range ih.sig { count++ handler(count, ih) } diff --git a/cmd/ipfswatch/main.go b/cmd/ipfswatch/main.go index 6f37d588d43..6c941014798 100644 --- a/cmd/ipfswatch/main.go +++ b/cmd/ipfswatch/main.go @@ -7,6 +7,7 @@ import ( "os" "os/signal" "path/filepath" + "syscall" commands "github.com/ipfs/go-ipfs/commands" core "github.com/ipfs/go-ipfs/core" @@ -99,7 +100,7 @@ func run(ipfsPath, watchPath string) error { } interrupts := make(chan os.Signal) - signal.Notify(interrupts, os.Interrupt, os.Kill) + signal.Notify(interrupts, os.Interrupt, syscall.SIGTERM) for { select { @@ -167,10 +168,7 @@ func addTree(w *fsnotify.Watcher, root string) error { } return nil }) - if err != nil { - return err - } - return nil + return err } func IsDirectory(path string) (bool, error) { diff --git a/commands/cli/helptext.go b/commands/cli/helptext.go index 5a36c15f622..7d43e3e2e0a 100644 --- a/commands/cli/helptext.go +++ b/commands/cli/helptext.go @@ -16,9 +16,6 @@ const ( variadicArg = "%v..." shortFlag = "-%v" longFlag = "--%v" - optionType = "(%v)" - - whitespace = "\r\n\t " indentStr = " " ) @@ -295,9 +292,7 @@ func optionText(cmd ...*cmds.Command) []string { // get a slice of the options we want to list out options := make([]cmds.Option, 0) for _, c := range cmd { - for _, opt := range c.Options { - options = append(options, opt) - } + options = append(options, c.Options...) } // add option names to output (with each name aligned) @@ -427,13 +422,6 @@ func align(lines []string) []string { return lines } -func indent(lines []string, prefix string) []string { - for i, line := range lines { - lines[i] = prefix + indentString(line, prefix) - } - return lines -} - func indentString(line string, prefix string) string { return prefix + strings.Replace(line, "\n", "\n"+prefix, -1) } diff --git a/commands/cli/parse.go b/commands/cli/parse.go index 597809fcb38..ef9acf7e2d0 100644 --- a/commands/cli/parse.go +++ b/commands/cli/parse.go @@ -59,11 +59,8 @@ func Parse(input []string, stdin *os.File, root *cmds.Command) (cmds.Request, *c } err = cmd.CheckArguments(req) - if err != nil { - return req, cmd, path, err - } - return req, cmd, path, nil + return req, cmd, path, err } func ParseArgs(req cmds.Request, inputs []string, stdin *os.File, argDefs []cmds.Argument, root *cmds.Command) ([]string, []files.File, error) { diff --git a/commands/cli/parse_test.go b/commands/cli/parse_test.go index 5b86a3f243e..107116fba6e 100644 --- a/commands/cli/parse_test.go +++ b/commands/cli/parse_test.go @@ -204,7 +204,7 @@ func TestArgumentParsing(t *testing.T) { test := func(cmd words, f *os.File, res words) { if f != nil { - if _, err := f.Seek(0, os.SEEK_SET); err != nil { + if _, err := f.Seek(0, io.SeekStart); err != nil { t.Fatal(err) } } diff --git a/commands/command_test.go b/commands/command_test.go index 373a3242afc..a61de267b70 100644 --- a/commands/command_test.go +++ b/commands/command_test.go @@ -3,7 +3,6 @@ package commands import "testing" func noop(req Request, res Response) { - return } func TestOptionValidation(t *testing.T) { diff --git a/commands/files/multipartfile.go b/commands/files/multipartfile.go index 21e0d44c143..073569e8a59 100644 --- a/commands/files/multipartfile.go +++ b/commands/files/multipartfile.go @@ -10,7 +10,6 @@ import ( const ( multipartFormdataType = "multipart/form-data" - multipartMixedType = "multipart/mixed" applicationDirectory = "application/x-directory" applicationSymlink = "application/symlink" diff --git a/commands/http/handler.go b/commands/http/handler.go index 7138d4ddbff..ec40d136708 100644 --- a/commands/http/handler.go +++ b/commands/http/handler.go @@ -47,12 +47,9 @@ const ( extraContentLengthHeader = "X-Content-Length" uaHeader = "User-Agent" contentTypeHeader = "Content-Type" - contentDispHeader = "Content-Disposition" - transferEncodingHeader = "Transfer-Encoding" applicationJson = "application/json" applicationOctetStream = "application/octet-stream" plainText = "text/plain" - originHeader = "origin" ) var AllowedExposedHeadersArr = []string{streamHeader, channelHeader, extraContentLengthHeader} diff --git a/core/bootstrap.go b/core/bootstrap.go index 21e971792ef..65138cfa63a 100644 --- a/core/bootstrap.go +++ b/core/bootstrap.go @@ -147,10 +147,7 @@ func bootstrapRound(ctx context.Context, host host.Host, cfg BootstrapConfig) er defer log.EventBegin(ctx, "bootstrapStart", id).Done() log.Debugf("%s bootstrapping to %d nodes: %s", id, numToDial, randSubset) - if err := bootstrapConnect(ctx, host, randSubset); err != nil { - return err - } - return nil + return bootstrapConnect(ctx, host, randSubset) } func bootstrapConnect(ctx context.Context, ph host.Host, peers []pstore.PeerInfo) error { diff --git a/core/builder.go b/core/builder.go index 010b34d9b2a..2603914a65c 100644 --- a/core/builder.go +++ b/core/builder.go @@ -65,6 +65,7 @@ func (cfg *BuildCfg) fillDefaults() error { if cfg.Repo == nil { var d ds.Datastore d = ds.NewMapDatastore() + if cfg.NilRepo { d = ds.NewNullDatastore() } @@ -230,10 +231,5 @@ func setupNode(ctx context.Context, n *IpfsNode, cfg *BuildCfg) error { } n.Resolver = path.NewBasicResolver(n.DAG) - err = n.loadFilesRoot() - if err != nil { - return err - } - - return nil + return n.loadFilesRoot() } diff --git a/core/commands/active.go b/core/commands/active.go index 14f08afcd23..7a3163a75b9 100644 --- a/core/commands/active.go +++ b/core/commands/active.go @@ -70,7 +70,7 @@ Lists running and recently run commands. var live time.Duration if req.Active { - live = time.Now().Sub(req.StartTime) + live = time.Since(req.StartTime) } else { live = req.EndTime.Sub(req.StartTime) } diff --git a/core/commands/block.go b/core/commands/block.go index 117b7398e6b..de71c493e22 100644 --- a/core/commands/block.go +++ b/core/commands/block.go @@ -288,10 +288,7 @@ It takes a list of base58 encoded multihashs to remove. } err := util.ProcRmOutput(outChan, res.Stdout(), res.Stderr()) - if err != nil { - return nil, err - } - return nil, nil + return nil, err }, }, Type: util.RemovedBlock{}, diff --git a/core/commands/bootstrap.go b/core/commands/bootstrap.go index d3c5a93e056..8006e4e40c7 100644 --- a/core/commands/bootstrap.go +++ b/core/commands/bootstrap.go @@ -315,7 +315,6 @@ var bootstrapListCmd = &cmds.Command{ return } res.SetOutput(&BootstrapOutput{config.BootstrapPeerStrings(peers)}) - return }, Type: BootstrapOutput{}, Marshalers: cmds.MarshalerMap{ diff --git a/core/commands/files/files.go b/core/commands/files/files.go index aac2618da57..f36fe3f1c81 100644 --- a/core/commands/files/files.go +++ b/core/commands/files/files.go @@ -472,7 +472,7 @@ Examples: return } - _, err = rfd.Seek(int64(offset), os.SEEK_SET) + _, err = rfd.Seek(int64(offset), io.SeekStart) if err != nil { res.SetError(err, cmds.ErrNormal) return @@ -651,7 +651,7 @@ stat' on the file or any of its ancestors. return } - _, err = wfd.Seek(int64(offset), os.SEEK_SET) + _, err = wfd.Seek(int64(offset), io.SeekStart) if err != nil { log.Error("seekfail: ", err) res.SetError(err, cmds.ErrNormal) @@ -669,7 +669,7 @@ stat' on the file or any of its ancestors. r = io.LimitReader(r, int64(count)) } - n, err := io.Copy(wfd, input) + n, err := io.Copy(wfd, r) if err != nil { res.SetError(err, cmds.ErrNormal) return diff --git a/core/commands/swarm.go b/core/commands/swarm.go index e8265da80b5..d862d6acd1f 100644 --- a/core/commands/swarm.go +++ b/core/commands/swarm.go @@ -676,6 +676,10 @@ remove your filters from the ipfs config file. } removed, err := filtersRemove(r, cfg, req.Arguments()) + if err != nil { + res.SetError(err, cmds.ErrNormal) + return + } res.SetOutput(&stringList{removed}) }, diff --git a/core/core.go b/core/core.go index 3b0b9ded176..525dce20c7d 100644 --- a/core/core.go +++ b/core/core.go @@ -73,7 +73,7 @@ import ( ) const IpnsValidatorTag = "ipns" -const kSizeBlockstoreWriteCache = 100 + const kReprovideFrequency = time.Hour * 12 const discoveryConnTimeout = time.Second * 30 @@ -83,8 +83,7 @@ type mode int const ( // zero value is not a valid mode, must be explicitly set - invalidMode mode = iota - localMode + localMode mode = iota offlineMode onlineMode ) @@ -341,12 +340,7 @@ func (n *IpfsNode) startOnlineServicesWithHost(ctx context.Context, host p2phost n.Namesys = namesys.NewNameSystem(n.Routing, n.Repo.Datastore(), size) // setup ipns republishing - err = n.setupIpnsRepublisher() - if err != nil { - return err - } - - return nil + return n.setupIpnsRepublisher() } // getCacheSize returns cache life and cache size diff --git a/core/corehttp/gateway_test.go b/core/corehttp/gateway_test.go index 005f2844b02..5ef5d0aee6f 100644 --- a/core/corehttp/gateway_test.go +++ b/core/corehttp/gateway_test.go @@ -427,11 +427,6 @@ func TestIPNSHostnameBacklinks(t *testing.T) { req.Host = "example.net" req.Header.Set("X-Ipfs-Gateway-Prefix", "/bad-prefix") - res, err = doWithoutRedirect(req) - if err != nil { - t.Fatal(err) - } - // make request to directory listing with evil prefix req, err = http.NewRequest("GET", ts.URL, nil) if err != nil { diff --git a/core/corerouting/core.go b/core/corerouting/core.go index 71c0e9248ec..b54753140d4 100644 --- a/core/corerouting/core.go +++ b/core/corerouting/core.go @@ -18,12 +18,7 @@ import ( // default and 2) to avoid a circular dependency (it needs to be referenced in // the core if it's going to be the default) -var ( - errHostMissing = errors.New("supernode routing client requires a Host component") - errIdentityMissing = errors.New("supernode routing server requires a peer ID identity") - errPeerstoreMissing = errors.New("supernode routing server requires a peerstore") - errServersMissing = errors.New("supernode routing client requires at least 1 server peer") -) +var errServersMissing = errors.New("supernode routing client requires at least 1 server peer") // SupernodeServer returns a configuration for a routing server that stores // routing records to the provided datastore. Only routing records are store in diff --git a/core/coreunix/add_test.go b/core/coreunix/add_test.go index 4026373c73d..c1d182ddeaa 100644 --- a/core/coreunix/add_test.go +++ b/core/coreunix/add_test.go @@ -120,14 +120,10 @@ func TestAddGCLive(t *testing.T) { pipew.Close() // receive next object from adder - select { - case o := <-out: - addedHashes[o.(*AddedObject).Hash] = struct{}{} - } + o := <-out + addedHashes[o.(*AddedObject).Hash] = struct{}{} - select { - case <-gcstarted: - } + <-gcstarted for r := range gcout { if r.Error != nil { @@ -197,7 +193,7 @@ func testAddWPosInfo(t *testing.T, rawLeaves bool) { t.Fatal(err) } }() - for _ = range adder.Out { + for range adder.Out { } exp := 0 diff --git a/exchange/bitswap/bitswap.go b/exchange/bitswap/bitswap.go index e7a20008ba7..86e53dc2f51 100644 --- a/exchange/bitswap/bitswap.go +++ b/exchange/bitswap/bitswap.go @@ -37,7 +37,6 @@ const ( // TODO: if a 'non-nice' strategy is implemented, consider increasing this value maxProvidersPerRequest = 3 providerRequestTimeout = time.Second * 10 - hasBlockTimeout = time.Second * 15 provideTimeout = time.Second * 15 sizeBatchRequestChan = 32 // kMaxPriority is the max priority as defined by the bitswap protocol diff --git a/exchange/bitswap/bitswap_test.go b/exchange/bitswap/bitswap_test.go index 86271f111c9..3229b183be8 100644 --- a/exchange/bitswap/bitswap_test.go +++ b/exchange/bitswap/bitswap_test.go @@ -199,7 +199,7 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { if err != nil { errs <- err } - for _ = range outch { + for range outch { } }(inst) } @@ -226,16 +226,6 @@ func PerformDistributionTest(t *testing.T, numInstances, numBlocks int) { } } -func getOrFail(bitswap Instance, b blocks.Block, t *testing.T, wg *sync.WaitGroup) { - if _, err := bitswap.Blockstore().Get(b.Cid()); err != nil { - _, err := bitswap.Exchange.GetBlock(context.Background(), b.Cid()) - if err != nil { - t.Fatal(err) - } - } - wg.Done() -} - // TODO simplify this test. get to the _essence_! func TestSendToWantingPeer(t *testing.T) { if testing.Short() { @@ -611,14 +601,14 @@ func TestBitswapLedgerTwoWay(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blk, err := instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) + _, err = instances[1].Exchange.GetBlock(ctx, blocks[0].Cid()) if err != nil { t.Fatal(err) } ctx, cancel = context.WithTimeout(context.Background(), time.Second*5) defer cancel() - blk, err = instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) + blk, err := instances[0].Exchange.GetBlock(ctx, blocks[1].Cid()) if err != nil { t.Fatal(err) } diff --git a/exchange/bitswap/decision/ledger.go b/exchange/bitswap/decision/ledger.go index cb93f0e95fe..3826b73524b 100644 --- a/exchange/bitswap/decision/ledger.go +++ b/exchange/bitswap/decision/ledger.go @@ -27,9 +27,6 @@ type ledger struct { // Accounting tracks bytes sent and recieved. Accounting debtRatio - // firstExchnage is the time of the first data exchange. - firstExchange time.Time - // lastExchange is the time of the last data exchange. lastExchange time.Time diff --git a/exchange/bitswap/message/message.go b/exchange/bitswap/message/message.go index ac567792949..a0bc2215ab8 100644 --- a/exchange/bitswap/message/message.go +++ b/exchange/bitswap/message/message.go @@ -220,19 +220,13 @@ func (m *impl) ToProtoV1() *pb.Message { func (m *impl) ToNetV0(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) - if err := pbw.WriteMsg(m.ToProtoV0()); err != nil { - return err - } - return nil + return pbw.WriteMsg(m.ToProtoV0()) } func (m *impl) ToNetV1(w io.Writer) error { pbw := ggio.NewDelimitedWriter(w) - if err := pbw.WriteMsg(m.ToProtoV1()); err != nil { - return err - } - return nil + return pbw.WriteMsg(m.ToProtoV1()) } func (m *impl) Loggable() map[string]interface{} { diff --git a/exchange/bitswap/testutils.go b/exchange/bitswap/testutils.go index 588dca1849a..fa5e7f9401c 100644 --- a/exchange/bitswap/testutils.go +++ b/exchange/bitswap/testutils.go @@ -88,8 +88,6 @@ func (i *Instance) SetBlockstoreLatency(t time.Duration) time.Duration { // just a much better idea. func Session(ctx context.Context, net tn.Network, p testutil.Identity) Instance { bsdelay := delay.Fixed(0) - const bloomSize = 512 - const writeCacheElems = 100 adapter := net.Adapter(p) dstore := ds_sync.MutexWrap(datastore2.WithDelay(ds.NewMapDatastore(), bsdelay)) diff --git a/exchange/bitswap/wantmanager.go b/exchange/bitswap/wantmanager.go index 0825e8cfc97..bdb9db636ae 100644 --- a/exchange/bitswap/wantmanager.go +++ b/exchange/bitswap/wantmanager.go @@ -55,16 +55,6 @@ func NewWantManager(ctx context.Context, network bsnet.BitSwapNetwork) *WantMana } } -type msgPair struct { - to peer.ID - msg bsmsg.BitSwapMessage -} - -type cancellation struct { - who peer.ID - blk *cid.Cid -} - type msgQueue struct { p peer.ID diff --git a/exchange/offline/offline.go b/exchange/offline/offline.go index 399af0f58f2..a70201c6416 100644 --- a/exchange/offline/offline.go +++ b/exchange/offline/offline.go @@ -42,7 +42,7 @@ func (_ *offlineExchange) Close() error { } func (e *offlineExchange) GetBlocks(ctx context.Context, ks []*cid.Cid) (<-chan blocks.Block, error) { - out := make(chan blocks.Block, 0) + out := make(chan blocks.Block) go func() { defer close(out) var misses []*cid.Cid diff --git a/exchange/offline/offline_test.go b/exchange/offline/offline_test.go index d2f877a9406..efdf2c7b184 100644 --- a/exchange/offline/offline_test.go +++ b/exchange/offline/offline_test.go @@ -67,7 +67,7 @@ func TestGetBlocks(t *testing.T) { } var count int - for _ = range received { + for range received { count++ } if len(expected) != count { diff --git a/filestore/fsrefstore.go b/filestore/fsrefstore.go index 46cc39b7f48..f1db5b6a8d3 100644 --- a/filestore/fsrefstore.go +++ b/filestore/fsrefstore.go @@ -162,7 +162,7 @@ func (f *FileManager) readDataObj(c *cid.Cid, d *pb.DataObj) ([]byte, error) { } defer fi.Close() - _, err = fi.Seek(int64(d.GetOffset()), os.SEEK_SET) + _, err = fi.Seek(int64(d.GetOffset()), io.SeekStart) if err != nil { return nil, &CorruptReferenceError{StatusFileError, err} } diff --git a/fuse/ipns/common.go b/fuse/ipns/common.go index bfce194af2f..073324faded 100644 --- a/fuse/ipns/common.go +++ b/fuse/ipns/common.go @@ -33,9 +33,6 @@ func InitializeKeyspace(n *core.IpfsNode, key ci.PrivKey) error { } pub := nsys.NewRoutingPublisher(n.Routing, n.Repo.Datastore()) - if err := pub.Publish(ctx, key, path.FromCid(nodek)); err != nil { - return err - } - return nil + return pub.Publish(ctx, key, path.FromCid(nodek)) } diff --git a/fuse/ipns/ipns_test.go b/fuse/ipns/ipns_test.go index 28bc97a8c92..b7117505d35 100644 --- a/fuse/ipns/ipns_test.go +++ b/fuse/ipns/ipns_test.go @@ -198,7 +198,7 @@ func TestFilePersistence(t *testing.T) { mnt.Close() t.Log("Closed, opening new fs") - node, mnt = setupIpnsTest(t, node) + _, mnt = setupIpnsTest(t, node) defer mnt.Close() rbuf, err := ioutil.ReadFile(mnt.Dir + fname) diff --git a/fuse/ipns/ipns_unix.go b/fuse/ipns/ipns_unix.go index c24874e3feb..b955c320c1c 100644 --- a/fuse/ipns/ipns_unix.go +++ b/fuse/ipns/ipns_unix.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "io" "os" core "github.com/ipfs/go-ipfs/core" @@ -346,7 +347,7 @@ func (dir *Directory) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { } func (fi *File) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error { - _, err := fi.fi.Seek(req.Offset, os.SEEK_SET) + _, err := fi.fi.Seek(req.Offset, io.SeekStart) if err != nil { return err } @@ -473,7 +474,7 @@ func (fi *FileNode) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse. return nil, fuse.ENOTSUP } - _, err := fd.Seek(0, os.SEEK_END) + _, err := fd.Seek(0, io.SeekEnd) if err != nil { log.Error("seek reset failed: ", err) return nil, err diff --git a/fuse/node/mount_unix.go b/fuse/node/mount_unix.go index 2177eafb818..8345a9f9411 100644 --- a/fuse/node/mount_unix.go +++ b/fuse/node/mount_unix.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "strings" - "time" core "github.com/ipfs/go-ipfs/core" ipns "github.com/ipfs/go-ipfs/fuse/ipns" @@ -18,10 +17,6 @@ import ( var log = logging.Logger("node") -// amount of time to wait for mount errors -// TODO is this non-deterministic? -const mountTimeout = time.Second - // fuseNoDirectory used to check the returning fuse error const fuseNoDirectory = "fusermount: failed to access mountpoint" @@ -49,12 +44,7 @@ func Mount(node *core.IpfsNode, fsdir, nsdir string) error { return err } - var err error - if err = doMount(node, fsdir, nsdir); err != nil { - return err - } - - return nil + return doMount(node, fsdir, nsdir) } func doMount(node *core.IpfsNode, fsdir, nsdir string) error { diff --git a/fuse/readonly/readonly_unix.go b/fuse/readonly/readonly_unix.go index 7b470abd56c..e8004c81949 100644 --- a/fuse/readonly/readonly_unix.go +++ b/fuse/readonly/readonly_unix.go @@ -85,7 +85,6 @@ func (*Root) ReadDirAll(ctx context.Context) ([]fuse.Dirent, error) { type Node struct { Ipfs *core.IpfsNode Nd *mdag.ProtoNode - fd *uio.DagReader cached *ftpb.Data } @@ -190,7 +189,7 @@ func (s *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadR if err != nil { return err } - o, err := r.Seek(req.Offset, os.SEEK_SET) + o, err := r.Seek(req.Offset, io.SeekStart) lm["res_offset"] = o if err != nil { return err diff --git a/importer/balanced/balanced_test.go b/importer/balanced/balanced_test.go index c225f7b7592..4efb03b894c 100644 --- a/importer/balanced/balanced_test.go +++ b/importer/balanced/balanced_test.go @@ -6,14 +6,12 @@ import ( "io" "io/ioutil" mrand "math/rand" - "os" "testing" chunk "github.com/ipfs/go-ipfs/importer/chunk" h "github.com/ipfs/go-ipfs/importer/helpers" dag "github.com/ipfs/go-ipfs/merkledag" mdtest "github.com/ipfs/go-ipfs/merkledag/test" - pin "github.com/ipfs/go-ipfs/pin" uio "github.com/ipfs/go-ipfs/unixfs/io" "context" @@ -62,12 +60,6 @@ func TestSizeBasedSplit(t *testing.T) { testFileConsistency(t, 31*4095, 4096) } -func dup(b []byte) []byte { - o := make([]byte, len(b)) - copy(o, b) - return o -} - func testFileConsistency(t *testing.T, nbytes int64, blksize int64) { ds := mdtest.Mock() nd, should := getTestDag(t, ds, nbytes, blksize) @@ -131,11 +123,6 @@ func dagrArrComp(t *testing.T, r io.Reader, should []byte) { } } -type dagservAndPinner struct { - ds dag.DAGService - mp pin.Pinner -} - func TestIndirectBlocks(t *testing.T) { ds := mdtest.Mock() dag, buf := getTestDag(t, ds, 1024*1024, 512) @@ -166,7 +153,7 @@ func TestSeekingBasic(t *testing.T) { } start := int64(4000) - n, err := rs.Seek(start, os.SEEK_SET) + n, err := rs.Seek(start, io.SeekStart) if err != nil { t.Fatal(err) } @@ -194,7 +181,7 @@ func TestSeekToBegin(t *testing.T) { t.Fatal("Copy didnt copy enough bytes") } - seeked, err := rs.Seek(0, os.SEEK_SET) + seeked, err := rs.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -222,7 +209,7 @@ func TestSeekToAlmostBegin(t *testing.T) { t.Fatal("Copy didnt copy enough bytes") } - seeked, err := rs.Seek(1, os.SEEK_SET) + seeked, err := rs.Seek(1, io.SeekStart) if err != nil { t.Fatal(err) } @@ -243,7 +230,7 @@ func TestSeekEnd(t *testing.T) { t.Fatal(err) } - seeked, err := rs.Seek(0, os.SEEK_END) + seeked, err := rs.Seek(0, io.SeekEnd) if err != nil { t.Fatal(err) } @@ -262,7 +249,7 @@ func TestSeekEndSingleBlockFile(t *testing.T) { t.Fatal(err) } - seeked, err := rs.Seek(0, os.SEEK_END) + seeked, err := rs.Seek(0, io.SeekEnd) if err != nil { t.Fatal(err) } @@ -285,7 +272,7 @@ func TestSeekingStress(t *testing.T) { for i := 0; i < 50; i++ { offset := mrand.Intn(int(nbytes)) l := int(nbytes) - offset - n, err := rs.Seek(int64(offset), os.SEEK_SET) + n, err := rs.Seek(int64(offset), io.SeekStart) if err != nil { t.Fatal(err) } @@ -323,7 +310,7 @@ func TestSeekingConsistency(t *testing.T) { for coff := nbytes - 4096; coff >= 0; coff -= 4096 { t.Log(coff) - n, err := rs.Seek(coff, os.SEEK_SET) + n, err := rs.Seek(coff, io.SeekStart) if err != nil { t.Fatal(err) } diff --git a/importer/helpers/helpers.go b/importer/helpers/helpers.go index 317fd60d0a9..643ec412419 100644 --- a/importer/helpers/helpers.go +++ b/importer/helpers/helpers.go @@ -5,7 +5,6 @@ import ( "fmt" "os" - chunk "github.com/ipfs/go-ipfs/importer/chunk" dag "github.com/ipfs/go-ipfs/merkledag" pi "github.com/ipfs/go-ipfs/thirdparty/posinfo" ft "github.com/ipfs/go-ipfs/unixfs" @@ -18,7 +17,6 @@ import ( var BlockSizeLimit = 1048576 // 1 MB // rough estimates on expected sizes -var roughDataBlockSize = chunk.DefaultBlockSize var roughLinkBlockSize = 1 << 13 // 8KB var roughLinkSize = 34 + 8 + 5 // sha256 multihash + size + no name + protobuf framing @@ -113,11 +111,8 @@ func (n *UnixfsNode) AddChild(child *UnixfsNode, db *DagBuilderHelper) error { } _, err = db.batch.Add(childnode) - if err != nil { - return err - } - return nil + return err } // Removes the child node at the given index diff --git a/importer/importer.go b/importer/importer.go index c99cb4fbf1d..2c9e5db7c8e 100644 --- a/importer/importer.go +++ b/importer/importer.go @@ -13,12 +13,9 @@ import ( trickle "github.com/ipfs/go-ipfs/importer/trickle" dag "github.com/ipfs/go-ipfs/merkledag" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" ) -var log = logging.Logger("importer") - // Builds a DAG from the given file, writing created blocks to disk as they are // created func BuildDagFromFile(fpath string, ds dag.DAGService) (node.Node, error) { diff --git a/importer/trickle/trickle_test.go b/importer/trickle/trickle_test.go index 6abb9a01cd2..8af15095af0 100644 --- a/importer/trickle/trickle_test.go +++ b/importer/trickle/trickle_test.go @@ -7,14 +7,12 @@ import ( "io" "io/ioutil" mrand "math/rand" - "os" "testing" chunk "github.com/ipfs/go-ipfs/importer/chunk" h "github.com/ipfs/go-ipfs/importer/helpers" merkledag "github.com/ipfs/go-ipfs/merkledag" mdtest "github.com/ipfs/go-ipfs/merkledag/test" - pin "github.com/ipfs/go-ipfs/pin" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" @@ -126,11 +124,6 @@ func arrComp(a, b []byte) error { return nil } -type dagservAndPinner struct { - ds merkledag.DAGService - mp pin.Pinner -} - func TestIndirectBlocks(t *testing.T) { splitter := chunk.SizeSplitterGen(512) nbytes := 1024 * 1024 @@ -178,7 +171,7 @@ func TestSeekingBasic(t *testing.T) { } start := int64(4000) - n, err := rs.Seek(start, os.SEEK_SET) + n, err := rs.Seek(start, io.SeekStart) if err != nil { t.Fatal(err) } @@ -222,7 +215,7 @@ func TestSeekToBegin(t *testing.T) { t.Fatal("Copy didnt copy enough bytes") } - seeked, err := rs.Seek(0, os.SEEK_SET) + seeked, err := rs.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -266,7 +259,7 @@ func TestSeekToAlmostBegin(t *testing.T) { t.Fatal("Copy didnt copy enough bytes") } - seeked, err := rs.Seek(1, os.SEEK_SET) + seeked, err := rs.Seek(1, io.SeekStart) if err != nil { t.Fatal(err) } @@ -302,7 +295,7 @@ func TestSeekEnd(t *testing.T) { t.Fatal(err) } - seeked, err := rs.Seek(0, os.SEEK_END) + seeked, err := rs.Seek(0, io.SeekEnd) if err != nil { t.Fatal(err) } @@ -328,7 +321,7 @@ func TestSeekEndSingleBlockFile(t *testing.T) { t.Fatal(err) } - seeked, err := rs.Seek(0, os.SEEK_END) + seeked, err := rs.Seek(0, io.SeekEnd) if err != nil { t.Fatal(err) } @@ -358,7 +351,7 @@ func TestSeekingStress(t *testing.T) { for i := 0; i < 50; i++ { offset := mrand.Intn(int(nbytes)) l := int(nbytes) - offset - n, err := rs.Seek(int64(offset), os.SEEK_SET) + n, err := rs.Seek(int64(offset), io.SeekStart) if err != nil { t.Fatal(err) } @@ -403,7 +396,7 @@ func TestSeekingConsistency(t *testing.T) { for coff := nbytes - 4096; coff >= 0; coff -= 4096 { t.Log(coff) - n, err := rs.Seek(coff, os.SEEK_SET) + n, err := rs.Seek(coff, io.SeekStart) if err != nil { t.Fatal(err) } @@ -566,31 +559,3 @@ func TestAppendSingleBytesToEmpty(t *testing.T) { t.Fatal(err) } } - -func printDag(nd *merkledag.ProtoNode, ds merkledag.DAGService, indent int) { - pbd, err := ft.FromBytes(nd.Data()) - if err != nil { - panic(err) - } - - for i := 0; i < indent; i++ { - fmt.Print(" ") - } - fmt.Printf("{size = %d, type = %s, nc = %d", pbd.GetFilesize(), pbd.GetType().String(), len(pbd.GetBlocksizes())) - if len(nd.Links()) > 0 { - fmt.Println() - } - for _, lnk := range nd.Links() { - child, err := lnk.GetNode(context.Background(), ds) - if err != nil { - panic(err) - } - printDag(child.(*merkledag.ProtoNode), ds, indent+1) - } - if len(nd.Links()) > 0 { - for i := 0; i < indent; i++ { - fmt.Print(" ") - } - } - fmt.Println("}") -} diff --git a/keystore/keystore.go b/keystore/keystore.go index 8424dbafa58..acb8cb3cc8b 100644 --- a/keystore/keystore.go +++ b/keystore/keystore.go @@ -104,11 +104,8 @@ func (ks *FSKeystore) Put(name string, k ci.PrivKey) error { defer fi.Close() _, err = fi.Write(b) - if err != nil { - return err - } - return nil + return err } // Get retrieve a key from the Keystore diff --git a/merkledag/merkledag.go b/merkledag/merkledag.go index b7a6ccf15ff..1a29b56e4db 100644 --- a/merkledag/merkledag.go +++ b/merkledag/merkledag.go @@ -12,12 +12,10 @@ import ( offline "github.com/ipfs/go-ipfs/exchange/offline" ipldcbor "gx/ipfs/QmNrbCt8j9DT5W9Pmjy2SdudT9k8GpaDr4sRuFix3BXhgR/go-ipld-cbor" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" ) -var log = logging.Logger("merkledag") var ErrNotFound = fmt.Errorf("merkledag: not found") // DAGService is an IPFS Merkle DAG service. diff --git a/merkledag/merkledag_test.go b/merkledag/merkledag_test.go index da43bdb674a..2182f909884 100644 --- a/merkledag/merkledag_test.go +++ b/merkledag/merkledag_test.go @@ -209,12 +209,6 @@ func runBatchFetchTest(t *testing.T, read io.Reader) { } } -func assertCanGet(t *testing.T, ds DAGService, n node.Node) { - if _, err := ds.Get(context.Background(), n.Cid()); err != nil { - t.Fatal(err) - } -} - func TestCantGet(t *testing.T) { ds := dstest.Mock() a := NodeWithData([]byte("A")) diff --git a/merkledag/node.go b/merkledag/node.go index fa575097a2a..4ef49718414 100644 --- a/merkledag/node.go +++ b/merkledag/node.go @@ -215,9 +215,8 @@ func (n *ProtoNode) SetData(d []byte) { // that. If a link of the same name existed, it is removed. func (n *ProtoNode) UpdateNodeLink(name string, that *ProtoNode) (*ProtoNode, error) { newnode := n.Copy().(*ProtoNode) - err := newnode.RemoveNodeLink(name) - err = nil // ignore error - err = newnode.AddNodeLink(name, that) + _ = newnode.RemoveNodeLink(name) // ignore error + err := newnode.AddNodeLink(name, that) return newnode, err } diff --git a/mfs/dir.go b/mfs/dir.go index 60cae39c773..fdfb49538d9 100644 --- a/mfs/dir.go +++ b/mfs/dir.go @@ -326,12 +326,7 @@ func (d *Directory) Unlink(name string) error { delete(d.childDirs, name) delete(d.files, name) - err := d.dirbuilder.RemoveChild(d.ctx, name) - if err != nil { - return err - } - - return nil + return d.dirbuilder.RemoveChild(d.ctx, name) } func (d *Directory) Flush() error { diff --git a/mfs/mfs_test.go b/mfs/mfs_test.go index 11a13b8e0b0..fa2e7c53d73 100644 --- a/mfs/mfs_test.go +++ b/mfs/mfs_test.go @@ -396,6 +396,9 @@ func TestMfsFile(t *testing.T) { // assert size is as expected size, err := fi.Size() + if err != nil { + t.Fatal(err) + } if size != int64(fisize) { t.Fatal("size isnt correct") } @@ -419,12 +422,15 @@ func TestMfsFile(t *testing.T) { // make sure size hasnt changed size, err = wfd.Size() + if err != nil { + t.Fatal(err) + } if size != int64(fisize) { t.Fatal("size isnt correct") } // seek back to beginning - ns, err := wfd.Seek(0, os.SEEK_SET) + ns, err := wfd.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -561,13 +567,9 @@ func actorMakeFile(d *Directory) error { return err } - err = wfd.Close() - if err != nil { - return err - } - - return nil + return wfd.Close() } + func actorMkdir(d *Directory) error { d, err := randomWalk(d, rand.Intn(7)) if err != nil { @@ -575,31 +577,8 @@ func actorMkdir(d *Directory) error { } _, err = d.Mkdir(randomName()) - if err != nil { - return err - } - - return nil -} - -func actorRemoveFile(d *Directory) error { - d, err := randomWalk(d, rand.Intn(7)) - if err != nil { - return err - } - - ents, err := d.List(context.Background()) - if err != nil { - return err - } - - if len(ents) == 0 { - return nil - } - - re := ents[rand.Intn(len(ents))] - return d.Unlink(re.Name) + return err } func randomFile(d *Directory) (*File, error) { @@ -895,7 +874,7 @@ func readFile(rt *Root, path string, offset int64, buf []byte) error { return err } - _, err = fd.Seek(offset, os.SEEK_SET) + _, err = fd.Seek(offset, io.SeekStart) if err != nil { return err } diff --git a/mfs/ops.go b/mfs/ops.go index f84540a6a7c..0d02cbb08da 100644 --- a/mfs/ops.go +++ b/mfs/ops.go @@ -65,12 +65,7 @@ func Mv(r *Root, src, dst string) error { return err } - err = srcDirObj.Unlink(srcFname) - if err != nil { - return err - } - - return nil + return srcDirObj.Unlink(srcFname) } func lookupDir(r *Root, path string) (*Directory, error) { diff --git a/mfs/system.go b/mfs/system.go index 4ed84d83b27..934a3261036 100644 --- a/mfs/system.go +++ b/mfs/system.go @@ -170,12 +170,6 @@ type Republisher struct { lastpub *cid.Cid } -func (rp *Republisher) getVal() *cid.Cid { - rp.lk.Lock() - defer rp.lk.Unlock() - return rp.val -} - // NewRepublisher creates a new Republisher object to republish the given root // using the given short and long time intervals func NewRepublisher(ctx context.Context, pf PubFunc, tshort, tlong time.Duration) *Republisher { @@ -197,13 +191,6 @@ func (p *Republisher) setVal(c *cid.Cid) { p.val = c } -func (p *Republisher) pubNow() { - select { - case p.pubnowch <- nil: - default: - } -} - func (p *Republisher) WaitPub() { p.lk.Lock() consistent := p.lastpub == p.val diff --git a/mk/golang.mk b/mk/golang.mk index cf8b7e421de..ada0c977124 100644 --- a/mk/golang.mk +++ b/mk/golang.mk @@ -13,6 +13,7 @@ CHECK_GO := go-pkg-name=$(shell go list $(go-tags) github.com/ipfs/go-ipfs/$(1)) go-main-name=$(notdir $(call go-pkg-name,$(1)))$(?exe) go-curr-pkg-tgt=$(d)/$(call go-main-name,$(d)) +go-pkgs-novendor=$(shell go list github.com/ipfs/go-ipfs/... | grep -v /Godeps/) go-tags=$(if $(GOTAGS), -tags="$(call join-with,$(space),$(GOTAGS))") go-flags-with-tags=$(GOFLAGS)$(go-tags) @@ -39,6 +40,11 @@ test_go_fmt: .PHONY: test_go_fmt TEST_GO += test_go_fmt +test_go_megacheck: + @go get honnef.co/go/tools/cmd/megacheck + @for pkg in $(go-pkgs-novendor); do megacheck "$$pkg"; done +.PHONY: megacheck + test_go: $(TEST_GO) check_go_version: diff --git a/namesys/publisher.go b/namesys/publisher.go index 6c64372f066..cba463492d5 100644 --- a/namesys/publisher.go +++ b/namesys/publisher.go @@ -160,17 +160,11 @@ func PutRecordToRouting(ctx context.Context, k ci.PrivKey, value path.Path, seqn errs <- PublishPublicKey(ctx, r, namekey, k.GetPublic()) }() - err = waitOnErrChan(ctx, errs) - if err != nil { - return err - } - - err = waitOnErrChan(ctx, errs) - if err != nil { + if err := waitOnErrChan(ctx, errs); err != nil { return err } - return nil + return waitOnErrChan(ctx, errs) } func waitOnErrChan(ctx context.Context, errs chan error) error { @@ -192,12 +186,7 @@ func PublishPublicKey(ctx context.Context, r routing.ValueStore, k string, pubk // Store associated public key timectx, cancel := context.WithTimeout(ctx, PublishPutValTimeout) defer cancel() - err = r.PutValue(timectx, k, pkbytes) - if err != nil { - return err - } - - return nil + return r.PutValue(timectx, k, pkbytes) } func PublishEntry(ctx context.Context, r routing.ValueStore, ipnskey string, rec *pb.IpnsEntry) error { @@ -211,11 +200,7 @@ func PublishEntry(ctx context.Context, r routing.ValueStore, ipnskey string, rec log.Debugf("Storing ipns entry at: %s", ipnskey) // Store ipns entry at "/ipns/"+b58(h(pubkey)) - if err := r.PutValue(timectx, ipnskey, data); err != nil { - return err - } - - return nil + return r.PutValue(timectx, ipnskey, data) } func CreateRoutingEntryData(pk ci.PrivKey, val path.Path, seq uint64, eol time.Time) (*pb.IpnsEntry, error) { @@ -349,12 +334,7 @@ func InitializeKeyspace(ctx context.Context, ds dag.DAGService, pub Publisher, p return err } - err = pub.Publish(ctx, key, path.FromCid(nodek)) - if err != nil { - return err - } - - return nil + return pub.Publish(ctx, key, path.FromCid(nodek)) } func IpnsKeysForID(id peer.ID) (name, ipns string) { diff --git a/pin/gc/gc.go b/pin/gc/gc.go index ff1ca8a3511..f92e8eead84 100644 --- a/pin/gc/gc.go +++ b/pin/gc/gc.go @@ -9,13 +9,10 @@ import ( dag "github.com/ipfs/go-ipfs/merkledag" pin "github.com/ipfs/go-ipfs/pin" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" ) -var log = logging.Logger("gc") - // Result represents an incremental output from a garbage collection // run. It contains either an error, or the cid of a removed object. type Result struct { diff --git a/pin/pin.go b/pin/pin.go index 482d070fd50..4270884d9de 100644 --- a/pin/pin.go +++ b/pin/pin.go @@ -528,9 +528,7 @@ func (p *pinner) InternalPins() []*cid.Cid { p.lock.Lock() defer p.lock.Unlock() var out []*cid.Cid - for _, c := range p.internalPin.Keys() { - out = append(out, c) - } + out = append(out, p.internalPin.Keys()...) return out } diff --git a/pin/pin_test.go b/pin/pin_test.go index bb90ea08907..072761f0a43 100644 --- a/pin/pin_test.go +++ b/pin/pin_test.go @@ -183,8 +183,8 @@ func TestIsPinnedLookup(t *testing.T) { // TODO does pinner need to share datastore with blockservice? p := NewPinner(dstore, dserv, dserv) - aNodes := make([]*mdag.ProtoNode, aBranchLen, aBranchLen) - aKeys := make([]*cid.Cid, aBranchLen, aBranchLen) + aNodes := make([]*mdag.ProtoNode, aBranchLen) + aKeys := make([]*cid.Cid, aBranchLen) for i := 0; i < aBranchLen; i++ { a, _ := randNode() if i >= 1 { diff --git a/repo/config/config.go b/repo/config/config.go index fa94d1e3f07..411cecc681f 100644 --- a/repo/config/config.go +++ b/repo/config/config.go @@ -10,11 +10,8 @@ import ( "strings" "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/mitchellh/go-homedir" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" ) -var log = logging.Logger("config") - // Config is used to load ipfs config files. type Config struct { Identity Identity // local node's peer identity diff --git a/repo/config/supernode.go b/repo/config/supernode.go index de403fadd7d..a985040b26f 100644 --- a/repo/config/supernode.go +++ b/repo/config/supernode.go @@ -20,13 +20,6 @@ var DefaultSNRServers = []string{ "/ip4/178.62.61.185/tcp/4002/ipfs/QmVw6fGNqBixZE4bewRLT2VXX7fAHUHs8JyidDiJ1P7RUN", } -func initSNRConfig() (*SupernodeClientConfig, error) { - // TODO perform validation - return &SupernodeClientConfig{ - Servers: DefaultSNRServers, - }, nil -} - func (gcr *SupernodeClientConfig) ServerIPFSAddrs() ([]ipfsaddr.IPFSAddr, error) { var addrs []ipfsaddr.IPFSAddr for _, server := range gcr.Servers { diff --git a/repo/fsrepo/defaultds.go b/repo/fsrepo/defaultds.go index 9252d21ce6c..ea41235eab4 100644 --- a/repo/fsrepo/defaultds.go +++ b/repo/fsrepo/defaultds.go @@ -41,16 +41,6 @@ func openDefaultDatastore(r *FSRepo) (repo.Datastore, error) { return nil, fmt.Errorf("unable to open flatfs datastore: %v", err) } - // Add our PeerID to metrics paths to keep them unique - // - // As some tests just pass a zero-value Config to fsrepo.Init, - // cope with missing PeerID. - id := r.config.Identity.PeerID - if id == "" { - // the tests pass in a zero Config; cope with it - id = fmt.Sprintf("uninitialized_%p", r) - } - prefix := "ipfs.fsrepo.datastore." metricsBlocks := measure.New(prefix+"blocks", blocksDS) metricsLevelDB := measure.New(prefix+"leveldb", leveldbDS) diff --git a/repo/fsrepo/fsrepo.go b/repo/fsrepo/fsrepo.go index 07b25e1bd68..53358ada256 100644 --- a/repo/fsrepo/fsrepo.go +++ b/repo/fsrepo/fsrepo.go @@ -37,11 +37,6 @@ var RepoVersion = 5 var migrationInstructions = `See https://github.com/ipfs/fs-repo-migrations/blob/master/run.md Sorry for the inconvenience. In the future, these will run automatically.` -var errIncorrectRepoFmt = `Repo has incorrect version: %s -Program version is: %s -Please run the ipfs migration tool before continuing. -` + migrationInstructions - var programTooLowMessage = `Your programs version (%d) is lower than your repos (%d). Please update ipfs to a version that supports the existing repo, or run a migration in reverse. @@ -411,10 +406,7 @@ func (r *FSRepo) Close() error { // logging.Configure(logging.Output(os.Stderr)) r.closed = true - if err := r.lockfile.Close(); err != nil { - return err - } - return nil + return r.lockfile.Close() } // Result when not Open is undefined. The method may panic if it pleases. diff --git a/repo/fsrepo/fsrepo_test.go b/repo/fsrepo/fsrepo_test.go index def6739fe0f..d6387eec821 100644 --- a/repo/fsrepo/fsrepo_test.go +++ b/repo/fsrepo/fsrepo_test.go @@ -100,7 +100,7 @@ func TestDatastorePersistsFromRepoToRepo(t *testing.T) { actual, ok := v.([]byte) assert.True(ok, t, "value should be the []byte from r1's Put") assert.Nil(r2.Close(), t) - assert.True(bytes.Compare(expected, actual) == 0, t, "data should match") + assert.True(bytes.Equal(expected, actual), t, "data should match") } func TestOpenMoreThanOnceInSameProcess(t *testing.T) { diff --git a/repo/fsrepo/migrations/unpack.go b/repo/fsrepo/migrations/unpack.go index ef142c3f5ef..5b563071f65 100644 --- a/repo/fsrepo/migrations/unpack.go +++ b/repo/fsrepo/migrations/unpack.go @@ -70,11 +70,8 @@ func writeToPath(rc io.Reader, out string) error { defer binfi.Close() _, err = io.Copy(binfi, rc) - if err != nil { - return err - } - return nil + return err } func unpackZip(dist, binnom, path, out string) error { diff --git a/repo/fsrepo/serialize/serialize.go b/repo/fsrepo/serialize/serialize.go index 33ca9f63fc0..13fb2b95bd8 100644 --- a/repo/fsrepo/serialize/serialize.go +++ b/repo/fsrepo/serialize/serialize.go @@ -9,13 +9,10 @@ import ( "path/filepath" "github.com/ipfs/go-ipfs/repo/config" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" "gx/ipfs/QmdYwCmx8pZRkzdcd8MhmLJqYVoVTC1aGsy5Q4reMGLNLg/atomicfile" ) -var log = logging.Logger("fsrepo") - // ReadConfigFile reads the config from `filename` into `cfg`. func ReadConfigFile(filename string, cfg interface{}) error { f, err := os.Open(filename) diff --git a/routing/mock/centralized_server.go b/routing/mock/centralized_server.go index 4d824e67362..afa250b9ac2 100644 --- a/routing/mock/centralized_server.go +++ b/routing/mock/centralized_server.go @@ -66,7 +66,7 @@ func (rs *s) Providers(c *cid.Cid) []pstore.PeerInfo { return ret } for _, r := range records { - if time.Now().Sub(r.Created) > rs.delayConf.ValueVisibility.Get() { + if time.Since(r.Created) > rs.delayConf.ValueVisibility.Get() { ret = append(ret, r.Peer) } } diff --git a/routing/mock/centralized_test.go b/routing/mock/centralized_test.go index 69b451ce3f6..3c51340d68a 100644 --- a/routing/mock/centralized_test.go +++ b/routing/mock/centralized_test.go @@ -45,7 +45,7 @@ func TestClientFindProviders(t *testing.T) { providersFromClient := client.FindProvidersAsync(context.Background(), k, max) isInClient := false for pi := range providersFromClient { - if pi.ID == pi.ID { + if pi.ID == pi.ID { // <-- typo? isInClient = true } } @@ -72,7 +72,7 @@ func TestClientOverMax(t *testing.T) { providersFromClient := client.FindProvidersAsync(context.Background(), k, max) i := 0 - for _ = range providersFromClient { + for range providersFromClient { i++ } if i != max { @@ -128,7 +128,7 @@ func TestCanceledContext(t *testing.T) { providers := client.FindProvidersAsync(ctx, k, max) numProvidersReturned := 0 - for _ = range providers { + for range providers { numProvidersReturned++ } t.Log(numProvidersReturned) diff --git a/routing/none/none_client.go b/routing/none/none_client.go index 35f3dedea51..66767fdd026 100644 --- a/routing/none/none_client.go +++ b/routing/none/none_client.go @@ -7,15 +7,12 @@ import ( repo "github.com/ipfs/go-ipfs/repo" routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" p2phost "gx/ipfs/QmUywuGNZoUKV8B9iyvup9bPkLiMrhTsyVMkeSXW5VxAfC/go-libp2p-host" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" peer "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) -var log = logging.Logger("mockrouter") - type nilclient struct { } diff --git a/routing/offline/offline.go b/routing/offline/offline.go index 8c91a1c43e8..2be67792021 100644 --- a/routing/offline/offline.go +++ b/routing/offline/offline.go @@ -10,7 +10,6 @@ import ( routing "gx/ipfs/QmNdaQ8itUU9jEZUwTsG4gHMaPmRfi6FEe89QjQAFbep3M/go-libp2p-routing" ci "gx/ipfs/QmP1DfoUjiWH2ZBo1PBH6FupdBucbDepx3HpWmEY6JMUpY/go-libp2p-crypto" ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" record "gx/ipfs/QmWYCqr6UDqqD1bfRybaAPtbAqcN3TSJpveaBXMwbQ3ePZ/go-libp2p-record" pb "gx/ipfs/QmWYCqr6UDqqD1bfRybaAPtbAqcN3TSJpveaBXMwbQ3ePZ/go-libp2p-record/pb" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" @@ -19,8 +18,6 @@ import ( "gx/ipfs/QmdS9KpbDyPrieswibZhkod1oXqRwZJrUPzxCofAMWpFGq/go-libp2p-peer" ) -var log = logging.Logger("offlinerouting") - var ErrOffline = errors.New("routing system in offline mode") func NewOfflineRouter(dstore ds.Datastore, privkey ci.PrivKey) routing.IpfsRouting { diff --git a/routing/offline/offline_test.go b/routing/offline/offline_test.go index a847a2814c9..f4ccb272907 100644 --- a/routing/offline/offline_test.go +++ b/routing/offline/offline_test.go @@ -15,17 +15,19 @@ func TestOfflineRouterStorage(t *testing.T) { privkey, _, _ := testutil.RandTestKeyPair(128) offline := NewOfflineRouter(nds, privkey) - err := offline.PutValue(ctx, "key", []byte("testing 1 2 3")) - if err != nil { + if err := offline.PutValue(ctx, "key", []byte("testing 1 2 3")); err != nil { t.Fatal(err) } val, err := offline.GetValue(ctx, "key") + if err != nil { + t.Fatal(err) + } if !bytes.Equal([]byte("testing 1 2 3"), val) { t.Fatal("OfflineRouter does not properly store") } - val, err = offline.GetValue(ctx, "notHere") + _, err = offline.GetValue(ctx, "notHere") if err == nil { t.Fatal("Router should throw errors for unfound records") } diff --git a/routing/supernode/proxy/standard.go b/routing/supernode/proxy/standard.go index da816025432..eaa9e078674 100644 --- a/routing/supernode/proxy/standard.go +++ b/routing/supernode/proxy/standard.go @@ -104,10 +104,7 @@ func (px *standard) sendMessage(ctx context.Context, m *dhtpb.Message, remote pe } defer s.Close() pbw := ggio.NewDelimitedWriter(s) - if err := pbw.WriteMsg(m); err != nil { - return err - } - return nil + return pbw.WriteMsg(m) } // SendRequest sends the request to each remote sequentially (randomized order), diff --git a/routing/supernode/server.go b/routing/supernode/server.go index 5d42231d048..95e70e0be95 100644 --- a/routing/supernode/server.go +++ b/routing/supernode/server.go @@ -10,7 +10,6 @@ import ( datastore "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" dhtpb "gx/ipfs/QmRmroYSdievxnjiuy99C8BzShNstdEWcEF3LQHF7fUbez/go-libp2p-kad-dht/pb" - record "gx/ipfs/QmWYCqr6UDqqD1bfRybaAPtbAqcN3TSJpveaBXMwbQ3ePZ/go-libp2p-record" pb "gx/ipfs/QmWYCqr6UDqqD1bfRybaAPtbAqcN3TSJpveaBXMwbQ3ePZ/go-libp2p-record/pb" pstore "gx/ipfs/QmXZSd1qR5BxZkPyuwfT5jpqQFScZccoZvDneXsKzCNHWX/go-libp2p-peerstore" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" @@ -140,10 +139,7 @@ func putRoutingRecord(ds datastore.Datastore, k string, value *pb.Record) error } dskey := dshelp.NewKeyFromBinary([]byte(k)) // TODO namespace - if err := ds.Put(dskey, data); err != nil { - return err - } - return nil + return ds.Put(dskey, data) } func putRoutingProviders(ds datastore.Datastore, k string, newRecords []*dhtpb.Message_Peer) error { @@ -204,20 +200,3 @@ func getRoutingProviders(ds datastore.Datastore, k string) ([]*dhtpb.Message_Pee func providerKey(k string) datastore.Key { return datastore.KeyWithNamespaces([]string{"routing", "providers", k}) } - -func verify(ps pstore.Peerstore, r *pb.Record) error { - v := make(record.Validator) - v["pk"] = record.PublicKeyValidator - p := peer.ID(r.GetAuthor()) - pk := ps.PubKey(p) - if pk == nil { - return fmt.Errorf("do not have public key for %s", p) - } - if err := record.CheckRecordSig(r, pk); err != nil { - return err - } - if err := v.VerifyRecord(r); err != nil { - return err - } - return nil -} diff --git a/test/integration/three_legged_cat_test.go b/test/integration/three_legged_cat_test.go index 1713e54b4c1..07ebbe7ed7b 100644 --- a/test/integration/three_legged_cat_test.go +++ b/test/integration/three_legged_cat_test.go @@ -65,7 +65,6 @@ func TestThreeLeggedCat100MBMacbookCoastToCoast(t *testing.T) { func RunThreeLeggedCat(data []byte, conf testutil.LatencyConfig) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - const numPeers = 3 // create network mn := mocknet.New(ctx) diff --git a/test/supernode_client/main.go b/test/supernode_client/main.go index 5b64547699e..3bd4f77fb62 100644 --- a/test/supernode_client/main.go +++ b/test/supernode_client/main.go @@ -64,8 +64,8 @@ func run() error { return err } repoPath := gopath.Join(cwd, config.DefaultPathName) - if err := ensureRepoInitialized(repoPath); err != nil { - } + _ = ensureRepoInitialized(repoPath) + repo, err := fsrepo.Open(repoPath) if err != nil { // owned by node return err @@ -233,26 +233,6 @@ func runFileCattingWorker(ctx context.Context, n *core.IpfsNode) error { return nil } -func toPeerInfos(bpeers []config.BootstrapPeer) ([]pstore.PeerInfo, error) { - var peers []pstore.PeerInfo - for _, bootstrap := range bpeers { - p, err := toPeerInfo(bootstrap) - if err != nil { - return nil, err - } - peers = append(peers, p) - } - return peers, nil -} - -func toPeerInfo(bootstrap config.BootstrapPeer) (p pstore.PeerInfo, err error) { - p = pstore.PeerInfo{ - ID: bootstrap.ID(), - Addrs: []ma.Multiaddr{bootstrap.Multiaddr()}, - } - return p, nil -} - func cmdCtx(node *core.IpfsNode, repoPath string) commands.Context { return commands.Context{ Online: true, diff --git a/thirdparty/tar/extractor.go b/thirdparty/tar/extractor.go index ebbedd59399..b84926bb249 100644 --- a/thirdparty/tar/extractor.go +++ b/thirdparty/tar/extractor.go @@ -79,12 +79,7 @@ func (te *Extractor) extractDir(h *tar.Header, depth int) error { te.Path = path } - err := os.MkdirAll(path, 0755) - if err != nil { - return err - } - - return nil + return os.MkdirAll(path, 0755) } func (te *Extractor) extractSymlink(h *tar.Header) error { @@ -112,12 +107,7 @@ func (te *Extractor) extractFile(h *tar.Header, r *tar.Reader, depth int, rootEx } defer file.Close() - err = copyWithProgress(file, r, te.Progress) - if err != nil { - return err - } - - return nil + return copyWithProgress(file, r, te.Progress) } func copyWithProgress(to io.Writer, from io.Reader, cb func(int64) int64) error { diff --git a/unixfs/hamt/hamt_stress_test.go b/unixfs/hamt/hamt_stress_test.go index 76357b23d02..6044631b9fa 100644 --- a/unixfs/hamt/hamt_stress_test.go +++ b/unixfs/hamt/hamt_stress_test.go @@ -1,13 +1,10 @@ package hamt import ( - "bufio" "context" "fmt" "math/rand" "os" - "strconv" - "strings" "testing" "time" @@ -191,7 +188,8 @@ func genOpSet(seed int64, keep, temp []string) []testOp { } // executes the given op set with a repl to allow easier debugging -func debugExecuteOpSet(ds dag.DAGService, width int, ops []testOp) (*HamtShard, error) { +/*func debugExecuteOpSet(ds dag.DAGService, width int, ops []testOp) (*HamtShard, error) { + s, err := NewHamtShard(ds, width) if err != nil { return nil, err @@ -238,7 +236,7 @@ mainloop: run = -1 } case "lookop": - for k := 0; k < len(ops); k++ { + for k = 0; k < len(ops); k++ { if ops[k].Val == parts[1] { fmt.Printf(" Op %d: %s %s\n", k, opnames[ops[k].Op], parts[1]) } @@ -289,4 +287,4 @@ func readCommand() string { scan := bufio.NewScanner(os.Stdin) scan.Scan() return scan.Text() -} +}*/ diff --git a/unixfs/hamt/hamt_test.go b/unixfs/hamt/hamt_test.go index 9f834a5aefc..77997d2fd18 100644 --- a/unixfs/hamt/hamt_test.go +++ b/unixfs/hamt/hamt_test.go @@ -6,7 +6,6 @@ import ( "math/rand" "os" "sort" - "strings" "testing" "time" @@ -138,7 +137,7 @@ func TestBasicSet(t *testing.T) { func TestDirBuilding(t *testing.T) { ds := mdtest.Mock() - s, _ := NewHamtShard(ds, 256) + _, _ = NewHamtShard(ds, 256) _, s, err := makeDir(ds, 200) if err != nil { @@ -161,7 +160,7 @@ func TestDirBuilding(t *testing.T) { func TestShardReload(t *testing.T) { ds := mdtest.Mock() - s, _ := NewHamtShard(ds, 256) + _, _ = NewHamtShard(ds, 256) ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -494,21 +493,6 @@ func TestSetHamtChild(t *testing.T) { } } -func printDag(ds dag.DAGService, nd *dag.ProtoNode, depth int) { - padding := strings.Repeat(" ", depth) - fmt.Println("{") - for _, l := range nd.Links() { - fmt.Printf("%s%s: %s", padding, l.Name, l.Cid.String()) - ch, err := ds.Get(context.Background(), l.Cid) - if err != nil { - panic(err) - } - - printDag(ds, ch.(*dag.ProtoNode), depth+1) - } - fmt.Println(padding + "}") -} - func printDiff(ds dag.DAGService, a, b *dag.ProtoNode) { diff, err := dagutils.Diff(context.TODO(), ds, a, b) if err != nil { diff --git a/unixfs/io/dagreader_test.go b/unixfs/io/dagreader_test.go index b57426e38e6..3ac82fc5fae 100644 --- a/unixfs/io/dagreader_test.go +++ b/unixfs/io/dagreader_test.go @@ -2,8 +2,8 @@ package io import ( "bytes" + "io" "io/ioutil" - "os" "strings" "testing" @@ -54,7 +54,7 @@ func TestSeekAndRead(t *testing.T) { } for i := 255; i >= 0; i-- { - reader.Seek(int64(i), os.SEEK_SET) + reader.Seek(int64(i), io.SeekStart) if reader.Offset() != int64(i) { t.Fatal("expected offset to be increased by one after read") @@ -100,14 +100,14 @@ func TestRelativeSeek(t *testing.T) { t.Fatalf("expected to read: %d at %d, read %d", i, reader.Offset()-1, out) } if i != 255 { - _, err := reader.Seek(3, os.SEEK_CUR) + _, err := reader.Seek(3, io.SeekCurrent) if err != nil { t.Fatal(err) } } } - _, err = reader.Seek(4, os.SEEK_END) + _, err = reader.Seek(4, io.SeekEnd) if err != nil { t.Fatal(err) } @@ -120,7 +120,7 @@ func TestRelativeSeek(t *testing.T) { if int(out) != 255-i { t.Fatalf("expected to read: %d at %d, read %d", 255-i, reader.Offset()-1, out) } - reader.Seek(-5, os.SEEK_CUR) // seek 4 bytes but we read one byte every time so 5 bytes + reader.Seek(-5, io.SeekCurrent) // seek 4 bytes but we read one byte every time so 5 bytes } } diff --git a/unixfs/io/pbdagreader.go b/unixfs/io/pbdagreader.go index a5a53ffa25a..0b75fd916c0 100644 --- a/unixfs/io/pbdagreader.go +++ b/unixfs/io/pbdagreader.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "os" mdag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" @@ -185,7 +184,7 @@ func (dr *pbDagReader) Offset() int64 { // recreations that need to happen. func (dr *pbDagReader) Seek(offset int64, whence int) (int64, error) { switch whence { - case os.SEEK_SET: + case io.SeekStart: if offset < 0 { return -1, errors.New("Invalid offset") } @@ -226,7 +225,7 @@ func (dr *pbDagReader) Seek(offset int64, whence int) (int64, error) { } // set proper offset within child readseeker - n, err := dr.buf.Seek(left, os.SEEK_SET) + n, err := dr.buf.Seek(left, io.SeekStart) if err != nil { return -1, err } @@ -238,13 +237,13 @@ func (dr *pbDagReader) Seek(offset int64, whence int) (int64, error) { } dr.offset = offset return offset, nil - case os.SEEK_CUR: + case io.SeekCurrent: // TODO: be smarter here noffset := dr.offset + offset - return dr.Seek(noffset, os.SEEK_SET) - case os.SEEK_END: + return dr.Seek(noffset, io.SeekStart) + case io.SeekEnd: noffset := int64(dr.pbdata.GetFilesize()) - offset - return dr.Seek(noffset, os.SEEK_SET) + return dr.Seek(noffset, io.SeekStart) default: return 0, errors.New("invalid whence") } diff --git a/unixfs/mod/dagmodifier.go b/unixfs/mod/dagmodifier.go index c531caa1594..090cdb59300 100644 --- a/unixfs/mod/dagmodifier.go +++ b/unixfs/mod/dagmodifier.go @@ -5,7 +5,6 @@ import ( "context" "errors" "io" - "os" chunk "github.com/ipfs/go-ipfs/importer/chunk" help "github.com/ipfs/go-ipfs/importer/helpers" @@ -14,7 +13,6 @@ import ( ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" - logging "gx/ipfs/QmSpJByNKFX1sCsHBEp3R73FL4NF6FnQTEGyNAXHm2GS52/go-log" cid "gx/ipfs/QmYhQaCYEcaPPjxJX7YcPcVKkQfRy6sJ7B3XmGFk82XYdQ/go-cid" proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto" node "gx/ipfs/Qmb3Hm9QDFmfYuET4pu7Kyg8JV78jFa1nvZx5vnCZsK4ck/go-ipld-format" @@ -26,8 +24,6 @@ var ErrUnrecognizedWhence = errors.New("unrecognized whence") // 2MB var writebufferSize = 1 << 21 -var log = logging.Logger("dagio") - // DagModifier is the only struct licensed and able to correctly // perform surgery on a DAG 'file' // Dear god, please rename this to something more pleasant @@ -340,7 +336,7 @@ func (dm *DagModifier) readPrep() error { return err } - i, err := dr.Seek(int64(dm.curWrOff), os.SEEK_SET) + i, err := dr.Seek(int64(dm.curWrOff), io.SeekStart) if err != nil { cancel() return err @@ -397,11 +393,11 @@ func (dm *DagModifier) Seek(offset int64, whence int) (int64, error) { var newoffset uint64 switch whence { - case os.SEEK_CUR: + case io.SeekCurrent: newoffset = dm.curWrOff + uint64(offset) - case os.SEEK_SET: + case io.SeekStart: newoffset = uint64(offset) - case os.SEEK_END: + case io.SeekEnd: newoffset = uint64(fisize) - uint64(offset) default: return 0, ErrUnrecognizedWhence diff --git a/unixfs/mod/dagmodifier_test.go b/unixfs/mod/dagmodifier_test.go index ecc9be644d1..d7b3f326707 100644 --- a/unixfs/mod/dagmodifier_test.go +++ b/unixfs/mod/dagmodifier_test.go @@ -1,36 +1,21 @@ package mod import ( + "context" "fmt" + "io" "io/ioutil" - "os" "testing" - "github.com/ipfs/go-ipfs/blocks/blockstore" - bs "github.com/ipfs/go-ipfs/blockservice" - "github.com/ipfs/go-ipfs/exchange/offline" h "github.com/ipfs/go-ipfs/importer/helpers" trickle "github.com/ipfs/go-ipfs/importer/trickle" - mdag "github.com/ipfs/go-ipfs/merkledag" ft "github.com/ipfs/go-ipfs/unixfs" uio "github.com/ipfs/go-ipfs/unixfs/io" testu "github.com/ipfs/go-ipfs/unixfs/test" - context "context" - ds "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore" - "gx/ipfs/QmRWDav6mzWseLWeYfVd5fvUKiVe9xNH29YfMF438fG364/go-datastore/sync" u "gx/ipfs/QmWbjfz3u6HkAdPh34dgPchGbQjob6LXLhAeCGii2TX69n/go-ipfs-util" ) -func getMockDagServAndBstore(t testing.TB) (mdag.DAGService, blockstore.Blockstore) { - dstore := ds.NewMapDatastore() - tsds := sync.MutexWrap(dstore) - bstore := blockstore.NewBlockstore(tsds) - bserv := bs.New(bstore, offline.Exchange(bstore)) - dserv := mdag.NewDAGService(bserv) - return dserv, bstore -} - func testModWrite(t *testing.T, beg, size uint64, orig []byte, dm *DagModifier) []byte { newdata := make([]byte, size) r := u.NewTimeSeededRand() @@ -112,7 +97,7 @@ func TestDagModifierBasic(t *testing.T) { beg = uint64(len(b)) length = 3000 t.Log("Testing pure append") - b = testModWrite(t, beg, length, b, dagmod) + _ = testModWrite(t, beg, length, b, dagmod) // Verify reported length node, err := dagmod.GetNode() @@ -384,7 +369,7 @@ func TestDagTruncate(t *testing.T) { t.Fatal("size was incorrect!") } - _, err = dagmod.Seek(0, os.SEEK_SET) + _, err = dagmod.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -450,7 +435,7 @@ func TestSparseWrite(t *testing.T) { t.Fatal("incorrect write amount") } - _, err = dagmod.Seek(0, os.SEEK_SET) + _, err = dagmod.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -479,7 +464,7 @@ func TestSeekPastEndWrite(t *testing.T) { buf := make([]byte, 5000) u.NewTimeSeededRand().Read(buf[2500:]) - nseek, err := dagmod.Seek(2500, os.SEEK_SET) + nseek, err := dagmod.Seek(2500, io.SeekStart) if err != nil { t.Fatal(err) } @@ -497,7 +482,7 @@ func TestSeekPastEndWrite(t *testing.T) { t.Fatal("incorrect write amount") } - _, err = dagmod.Seek(0, os.SEEK_SET) + _, err = dagmod.Seek(0, io.SeekStart) if err != nil { t.Fatal(err) } @@ -525,7 +510,7 @@ func TestRelativeSeek(t *testing.T) { for i := 0; i < 64; i++ { dagmod.Write([]byte{byte(i)}) - if _, err := dagmod.Seek(1, os.SEEK_CUR); err != nil { + if _, err := dagmod.Seek(1, io.SeekCurrent); err != nil { t.Fatal(err) } } @@ -576,17 +561,26 @@ func TestEndSeek(t *testing.T) { t.Fatal(err) } - offset, err := dagmod.Seek(0, os.SEEK_CUR) + offset, err := dagmod.Seek(0, io.SeekCurrent) + if err != nil { + t.Fatal(err) + } if offset != 100 { t.Fatal("expected the relative seek 0 to return current location") } - offset, err = dagmod.Seek(0, os.SEEK_SET) + offset, err = dagmod.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } if offset != 0 { t.Fatal("expected the absolute seek to set offset at 0") } - offset, err = dagmod.Seek(0, os.SEEK_END) + offset, err = dagmod.Seek(0, io.SeekEnd) + if err != nil { + t.Fatal(err) + } if offset != 100 { t.Fatal("expected the end seek to set offset at end") } @@ -612,7 +606,7 @@ func TestReadAndSeek(t *testing.T) { } readBuf := make([]byte, 4) - offset, err := dagmod.Seek(0, os.SEEK_SET) + offset, err := dagmod.Seek(0, io.SeekStart) if offset != 0 { t.Fatal("expected offset to be 0") } @@ -636,7 +630,7 @@ func TestReadAndSeek(t *testing.T) { } // skip 4 - _, err = dagmod.Seek(1, os.SEEK_CUR) + _, err = dagmod.Seek(1, io.SeekCurrent) if err != nil { t.Fatalf("error: %s, offset %d, reader offset %d", err, dagmod.curWrOff, dagmod.read.Offset()) } @@ -676,7 +670,7 @@ func TestCtxRead(t *testing.T) { if err != nil { t.Fatal(err) } - dagmod.Seek(0, os.SEEK_SET) + dagmod.Seek(0, io.SeekStart) readBuf := make([]byte, 4) _, err = dagmod.CtxReadFull(ctx, readBuf)