@@ -0,0 +1,245 @@
package cluster_test

import (
"context"
"fmt"
"os"
"testing"
"time"

icid "github.com/ipfs/go-cid"
icore "github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/repo/fsrepo"
"github.com/textileio/go-textile/core"
"github.com/textileio/go-textile/ipfs"
"github.com/textileio/go-textile/keypair"
"github.com/textileio/go-textile/pb"
"github.com/textileio/go-textile/repo/config"
)

var vars = struct {
repoPath1 string
repoPath2 string

node1 *core.Textile
node2 *core.Textile

cid icid.Cid
}{
repoPath1: "testdata/.cluster1",
repoPath2: "testdata/.cluster2",

cid: icid.Undef,
}

func TestInitCluster(t *testing.T) {
_ = os.RemoveAll(vars.repoPath1)
_ = os.RemoveAll(vars.repoPath2)

accnt1 := keypair.Random()
accnt2 := keypair.Random()

swarmPort1 := core.GetRandomPort()
swarmPort2 := core.GetRandomPort()

err := core.InitRepo(core.InitConfig{
Account: accnt1,
RepoPath: vars.repoPath1,
ApiAddr: fmt.Sprintf("127.0.0.1:%s", core.GetRandomPort()),
SwarmPorts: swarmPort1,
Cluster: true,
ClusterBindMultiaddr: "/ip4/0.0.0.0/tcp/9096",
Debug: true,
})
if err != nil {
t.Fatalf("init node1 failed: %s", err)
}
err = core.InitRepo(core.InitConfig{
Account: accnt2,
RepoPath: vars.repoPath2,
ApiAddr: fmt.Sprintf("127.0.0.1:%s", core.GetRandomPort()),
SwarmPorts: swarmPort2,
Cluster: true,
ClusterBindMultiaddr: "/ip4/0.0.0.0/tcp/9097",
Debug: true,
})
if err != nil {
t.Fatalf("init node2 failed: %s", err)
}

// update bootstraps
addr1, err := getPeerAddress(vars.repoPath1, swarmPort1)
if err != nil {
t.Fatal(err)
}
addr2, err := getPeerAddress(vars.repoPath2, swarmPort2)
if err != nil {
t.Fatal(err)
}
err = updateClusterBootstraps(vars.repoPath1, []string{addr2})
if err != nil {
t.Fatal(err)
}
err = updateClusterBootstraps(vars.repoPath2, []string{addr1})
if err != nil {
t.Fatal(err)
}
}

func TestStartCluster(t *testing.T) {
var err error
vars.node1, err = core.NewTextile(core.RunConfig{
RepoPath: vars.repoPath1,
Debug: true,
})
if err != nil {
t.Fatalf("create node1 failed: %s", err)
}
vars.node2, err = core.NewTextile(core.RunConfig{
RepoPath: vars.repoPath2,
Debug: true,
})
if err != nil {
t.Fatalf("create node2 failed: %s", err)
}

// set cluster logs to debug
level := &pb.LogLevel{
Systems: map[string]pb.LogLevel_Level{
"cluster": pb.LogLevel_DEBUG,
},
}
err = vars.node1.SetLogLevel(level)
if err != nil {
t.Fatal(err)
}
err = vars.node2.SetLogLevel(level)
if err != nil {
t.Fatal(err)
}

// start nodes
err = vars.node1.Start()
if err != nil {
t.Fatalf("start node1 failed: %s", err)
}
<-vars.node1.OnlineCh()
<-vars.node1.Cluster().Ready()

// let node1 warm up
timer := time.NewTimer(time.Second * 5)
<-timer.C

err = vars.node2.Start()
if err != nil {
t.Fatalf("start node2 failed: %s", err)
}
<-vars.node2.OnlineCh()
<-vars.node2.Cluster().Ready()

// let node2 warm up
timer = time.NewTimer(time.Second * 5)
<-timer.C

// pin some data to node1
cid, err := pinTestData(vars.node1.Ipfs())
if err != nil {
t.Fatal(err)
}
vars.cid = *cid
}

func TestTextileClusterPeers(t *testing.T) {
ctx, cancel := context.WithTimeout(vars.node1.Ipfs().Context(), time.Minute)
defer cancel()

var ok bool
for _, p := range vars.node1.Cluster().Peers(ctx) {
if p.ID.Pretty() == vars.node2.Ipfs().Identity.Pretty() {
ok = true
break
}
}
if !ok {
t.Fatal("node2 not found in node1's peers")
}
ok = false
for _, p := range vars.node2.Cluster().Peers(ctx) {
if p.ID.Pretty() == vars.node1.Ipfs().Identity.Pretty() {
ok = true
break
}
}
if !ok {
t.Fatal("node1 not found in node2's peers")
}
}

func TestTextileClusterSync(t *testing.T) {
ctx, cancel := context.WithTimeout(vars.node1.Ipfs().Context(), time.Minute)
defer cancel()

_, err := vars.node1.Cluster().SyncAll(ctx)
if err != nil {
t.Fatalf("sync all failed: %s", err)
}

err = vars.node1.Cluster().StateSync(ctx)
if err != nil {
t.Fatalf("state sync failed: %s", err)
}

info, err := vars.node1.Cluster().Status(ctx, vars.cid)
if err != nil {
t.Fatal(err)
}
fmt.Println(info.String())
}

func TestTextileCluster_Stop(t *testing.T) {
err := vars.node1.Stop()
if err != nil {
t.Fatalf("stop node1 failed: %s", err)
}
err = vars.node2.Stop()
if err != nil {
t.Fatalf("stop node2 failed: %s", err)
}
}

func TestTextileCluster_Teardown(t *testing.T) {
vars.node1 = nil
vars.node2 = nil
}

func getPeerAddress(repoPath, swarmPort string) (string, error) {
r, err := fsrepo.Open(repoPath)
if err != nil {
return "", err
}
defer r.Close()
id, err := r.GetConfigKey("Identity.PeerID")
if err != nil {
return "", err
}
return fmt.Sprintf("/ip4/127.0.0.1/tcp/%s/ipfs/%s", swarmPort, id), nil
}

func updateClusterBootstraps(repoPath string, bootstraps []string) error {
conf, err := config.Read(repoPath)
if err != nil {
return err
}
conf.Cluster.Bootstraps = bootstraps
return config.Write(repoPath, conf)
}

func pinTestData(node *icore.IpfsNode) (*icid.Cid, error) {
f, err := os.Open("../mill/testdata/image.jpeg")
if err != nil {
return nil, err
}
defer f.Close()

return ipfs.AddData(node, f, true, false)
}
Empty file.
@@ -30,9 +30,7 @@ func Daemon(repoPath string, pinCode string, docs bool, debug bool) error {
return fmt.Errorf(fmt.Sprintf("create node failed: %s", err))
}

gateway.Host = &gateway.Gateway{
Node: node,
}
gateway.Host = &gateway.Gateway{Node: node}

err = startNode(docs)
if err != nil {
@@ -44,14 +42,17 @@ func Daemon(repoPath string, pinCode string, docs bool, debug bool) error {
quit := make(chan os.Signal)
signal.Notify(quit, os.Interrupt)
<-quit

fmt.Println("Interrupted")
fmt.Printf("Shutting down...")

err = stopNode()
if err != nil && err != core.ErrStopped {
fmt.Println(err.Error())
} else {
fmt.Print("done\n")
}

os.Exit(1)
return nil
}
@@ -75,7 +75,7 @@ func FileAdd(path string, threadID string, caption string, group bool, verbose b
pth = path
}

fi, err = os.Stat(path)
fi, err = os.Stat(pth)
if err != nil {
return err
}
@@ -436,6 +436,9 @@ Stacks may include:
initCafeOpen := initCmd.Flag("cafe-open", "Open the p2p cafe service for other peers").Bool()
initCafeURL := initCmd.Flag("cafe-url", "Specify a custom URL of this cafe, e.g., https://mycafe.com").Envar("CAFE_HOST_URL").String()
initCafeNeighborURL := initCmd.Flag("cafe-neighbor-url", "Specify the URL of a secondary cafe. Must return cafe info, e.g., via a Gateway: https://my-gateway.yolo.com/cafe, or a cafe API: https://my-cafe.yolo.com").Envar("CAFE_HOST_NEIGHBOR_URL").String()
initIpfsCluster := initCmd.Flag("cluster", "Treat the node as an IPFS Cluster peer").Bool()
initIpfsClusterBindMultiaddr := initCmd.Flag("cluster-bind-maddr", "Set the IPFS Cluster multiaddrs").Default("/ip4/0.0.0.0/tcp/9096").String()
Copy link
Collaborator

@hsanjuan hsanjuan Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

technically cluster won't bind to anything as it re-uses your already existing ipfs peer.

Copy link
Member Author

@sanderpick sanderpick Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, ok. That makes sense. I can remove this flag then.

initIpfsClusterPeers := initCmd.Flag("cluster-peer", "IPFS Cluster peers to sync with").Strings()
cmds[initCmd.FullCommand()] = func() error {
kp, err := keypair.Parse(*initAccountSeed)
if err != nil {
@@ -453,21 +456,24 @@ Stacks may include:
}

config := core.InitConfig{
Account: account,
PinCode: *initPin, // @todo rename to pin
RepoPath: repo, // @todo rename to repo
SwarmPorts: *initIpfsSwarmPorts,
ApiAddr: *initApiBindAddr,
CafeApiAddr: *initCafeApiBindAddr,
GatewayAddr: *initGatewayBindAddr,
ProfilingAddr: *initProfilingBindAddr,
IsMobile: false,
IsServer: *initIpfsServerMode,
LogToDisk: *initLogFiles,
Debug: *logDebug,
CafeOpen: *initCafeOpen,
CafeURL: *initCafeURL,
CafeNeighborURL: *initCafeNeighborURL,
Account: account,
PinCode: *initPin, // @todo rename to pin
RepoPath: repo, // @todo rename to repo
SwarmPorts: *initIpfsSwarmPorts,
ApiAddr: *initApiBindAddr,
CafeApiAddr: *initCafeApiBindAddr,
GatewayAddr: *initGatewayBindAddr,
ProfilingAddr: *initProfilingBindAddr,
IsMobile: false,
IsServer: *initIpfsServerMode,
LogToDisk: *initLogFiles,
Debug: *logDebug,
CafeOpen: *initCafeOpen,
CafeURL: *initCafeURL,
CafeNeighborURL: *initCafeNeighborURL,
Cluster: *initIpfsCluster,
ClusterBindMultiaddr: *initIpfsClusterBindMultiaddr,
ClusterPeers: *initIpfsClusterPeers,
}

return InitCommand(config)
@@ -1192,5 +1198,9 @@ func getRepo(repo string) (string, error) {
}
repo = filepath.Join(appDir, "repo")
}
return repo, nil
expanded, err := homedir.Expand(repo)
sanderpick marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
return "", err
}
return expanded, nil
}
@@ -18,7 +18,7 @@ func ThreadAdd(name string, key string, tipe string, sharing string, whitelist [
var body []byte
if schema == "" {
if schemaFile != "" {
path, err := homedir.Expand(string(schemaFile))
path, err := homedir.Expand(schemaFile)
if err != nil {
return err
}
@@ -0,0 +1,114 @@
package core

import (
"context"
"time"

util "github.com/ipfs/go-ipfs-util"
ipfscluster "github.com/ipfs/ipfs-cluster"
capi "github.com/ipfs/ipfs-cluster/api"
"github.com/ipfs/ipfs-cluster/consensus/raft"
"github.com/ipfs/ipfs-cluster/monitor/pubsubmon"
"github.com/ipfs/ipfs-cluster/observations"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/textileio/go-textile/cluster"
)

func (t *Textile) clusterExists() bool {
return util.FileExists(cluster.ConfigPath(t.repoPath))
}

// startCluster creates all the necessary things to produce the cluster object
func (t *Textile) startCluster() error {
cfgMgr, cfgs, err := cluster.MakeAndLoadConfigs(t.repoPath)
if err != nil {
return err
}
defer cfgMgr.Shutdown()

cfgs.ClusterCfg.LeaveOnShutdown = false

tracker, err := cluster.SetupPinTracker(
"map",
t.node.PeerHost,
cfgs.MaptrackerCfg,
cfgs.StatelessTrackerCfg,
cfgs.ClusterCfg.Peername,
)
if err != nil {
return err
}

informer, alloc, err := cluster.SetupAllocation(
"disk-freespace",
cfgs.DiskInfCfg,
cfgs.NumpinInfCfg,
)
if err != nil {
return err
}

ipfscluster.ReadyTimeout = raft.DefaultWaitForLeaderTimeout + 5*time.Second
Copy link
Collaborator

@hsanjuan hsanjuan Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you can ignore this (like set to 5 seconds or leave default). Raft is not involved anyways.

Copy link
Member Author

@sanderpick sanderpick Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👌


cons, err := cluster.SetupConsensus(
t.node.PeerHost,
t.node.DHT,
t.node.PubSub,
cfgs.CrdtCfg,
t.node.Repo.Datastore(),
)
if err != nil {
return err
}

tracer, err := observations.SetupTracing(cfgs.TracingCfg)
if err != nil {
return err
}

var peersF func(context.Context) ([]peer.ID, error)
mon, err := pubsubmon.New(t.node.Context(), cfgs.PubsubmonCfg, t.node.PubSub, peersF)
if err != nil {
return err
}

connector, err := cluster.NewConnector(t.node, func(ctx context.Context) []*capi.ID {
return t.cluster.Peers(ctx)
})
if err != nil {
return err
}

t.cluster, err = ipfscluster.NewCluster(
t.node.Context(),
t.node.PeerHost,
t.node.DHT,
cfgs.ClusterCfg,
t.node.Repo.Datastore(),
cons,
nil,
connector,
tracker,
mon,
alloc,
informer,
tracer,
)
if err != nil {
return err
}

bootstraps, err := cluster.ParseBootstraps(t.config.Cluster.Bootstraps)
if err != nil {
return err
}

// noop if no bootstraps
// if bootstrapping fails, consensus will never be ready
// and timeout. So this can happen in background and we
Copy link
Collaborator

@hsanjuan hsanjuan Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is not true for CRDT consensus. it will work regardless of bootstrap. Old comment in cluster, we will fix it.

Copy link
Member Author

@sanderpick sanderpick Jun 19, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

👌

// avoid worrying about error handling here (since Cluster
// will realize).
go cluster.Bootstrap(t.node.Context(), t.cluster, cons, bootstraps)

return nil
}
@@ -21,6 +21,28 @@ func (t *Textile) Config() *config.Config {
return t.config
}

// ConvertHeadersToCorsOptions converts http headers into the format that cors options accepts
func ConvertHeadersToCorsOptions(headers config.HTTPHeaders) cors.Options {
options := cors.Options{}

control, ok := headers["Access-Control-Allow-Origin"]
if ok && len(control) > 0 {
options.AllowedOrigins = control
}

control, ok = headers["Access-Control-Allow-Methods"]
if ok && len(control) > 0 {
options.AllowedMethods = control
}

control, ok = headers["Access-Control-Allow-Headers"]
if ok && len(control) > 0 {
options.AllowedHeaders = control
}

return options
}

// GetRandomPort returns a port within the acceptable range
func GetRandomPort() string {
rand.Seed(time.Now().UTC().UnixNano())
@@ -70,12 +92,15 @@ func applyTextileConfigOptions(init InitConfig) error {
conf.Cafe.Host.URL = init.CafeURL
conf.Cafe.Host.NeighborURL = init.CafeNeighborURL

// cluster settings
conf.Cluster.Bootstraps = init.ClusterPeers

// write to disk
return config.Write(init.RepoPath, conf)
}

// applySwarmPortConfigOption sets custom swarm ports (tcp and ws)
func applySwarmPortConfigOption(rep repo.Repo, ports string) error {
// applyIPFSSwarmPorts writes swarm ports (tcp and ws) to the IPFS config
func applyIPFSSwarmPorts(rep repo.Repo, ports string) error {
var parts []string
if ports != "" {
parts = strings.Split(ports, ",")
@@ -177,25 +202,3 @@ func ensureProfile(profile profile, repoPath string) error {

return rep.SetConfig(conf)
}

// ConvertHeadersToCorsOptions converts http headers into the format that cors options accepts
func ConvertHeadersToCorsOptions(headers config.HTTPHeaders) cors.Options {
options := cors.Options{}

control, ok := headers["Access-Control-Allow-Origin"]
if ok && len(control) > 0 {
options.AllowedOrigins = control
}

control, ok = headers["Access-Control-Allow-Methods"]
if ok && len(control) > 0 {
options.AllowedMethods = control
}

control, ok = headers["Access-Control-Allow-Headers"]
if ok && len(control) > 0 {
options.AllowedHeaders = control
}

return options
}
@@ -0,0 +1,50 @@
package core

import (
"context"

"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/node/libp2p"
"github.com/ipfs/go-ipfs/plugin/loader"
"github.com/ipfs/go-ipfs/repo/fsrepo"
)

// createIPFS creates an IPFS node
func (t *Textile) createIPFS(plugins *loader.PluginLoader, online bool) error {
rep, err := fsrepo.Open(t.repoPath)
if err != nil {
return err
}

routing := libp2p.DHTClientOption
if t.Server() {
routing = libp2p.DHTOption
}

cctx, _ := context.WithCancel(context.Background())
nd, err := core.NewNode(cctx, &core.BuildCfg{
Repo: rep,
Permanent: true, // temporary way to signify that node is permanent
Online: online,
ExtraOpts: map[string]bool{
"pubsub": true,
"ipnsps": true,
"mplex": true,
},
Routing: routing,
})
if err != nil {
return err
}
nd.IsDaemon = true

if t.node != nil {
err = t.node.Close()
if err != nil {
return err
}
}
t.node = nd

return nil
}
@@ -16,13 +16,13 @@ import (
oldcmds "github.com/ipfs/go-ipfs/commands"
"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/corerepo"
"github.com/ipfs/go-ipfs/core/node/libp2p"
"github.com/ipfs/go-ipfs/plugin/loader"
"github.com/ipfs/go-ipfs/repo/fsrepo"
ipld "github.com/ipfs/go-ipld-format"
logging "github.com/ipfs/go-log"
ipfscluster "github.com/ipfs/ipfs-cluster"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/textileio/go-textile/broadcast"
"github.com/textileio/go-textile/cluster"
"github.com/textileio/go-textile/ipfs"
"github.com/textileio/go-textile/keypair"
"github.com/textileio/go-textile/pb"
@@ -48,21 +48,24 @@ const kSyncAccountFreq = time.Hour

// InitConfig is used to setup a textile node
type InitConfig struct {
Account *keypair.Full
PinCode string
RepoPath string
SwarmPorts string
ApiAddr string
CafeApiAddr string
GatewayAddr string
ProfilingAddr string
IsMobile bool
IsServer bool
LogToDisk bool
Debug bool
CafeOpen bool
CafeURL string
CafeNeighborURL string
Account *keypair.Full
PinCode string
RepoPath string
SwarmPorts string
ApiAddr string
CafeApiAddr string
GatewayAddr string
ProfilingAddr string
IsMobile bool
IsServer bool
LogToDisk bool
Debug bool
CafeOpen bool
CafeURL string
CafeNeighborURL string // Deprecated
Cluster bool
ClusterBindMultiaddr string
ClusterPeers []string
}

// MigrateConfig is used to define options during a major migration
@@ -88,6 +91,7 @@ type Textile struct {
cancel context.CancelFunc
node *core.IpfsNode
datastore repo.Datastore
cluster *ipfscluster.Cluster
started bool
loadedThreads []*Thread
online chan struct{}
@@ -140,21 +144,19 @@ func InitRepo(conf InitConfig) error {
return err
}

rep, err := fsrepo.Open(conf.RepoPath)
if err != nil {
return err
}
defer func() {
if err := rep.Close(); err != nil {
log.Error(err.Error())
// init cluster
if conf.Cluster {
err = cluster.InitCluster(conf.RepoPath, conf.ClusterBindMultiaddr)
if err != nil {
return err
}
}()
}

// apply ipfs config opts
err = applySwarmPortConfigOption(rep, conf.SwarmPorts)
rep, err := fsrepo.Open(conf.RepoPath)
if err != nil {
return err
}
defer rep.Close()

sqliteDb, err := db.Create(conf.RepoPath, conf.PinCode)
if err != nil {
@@ -183,6 +185,11 @@ func InitRepo(conf InitConfig) error {
return err
}

err = applyIPFSSwarmPorts(rep, conf.SwarmPorts)
if err != nil {
return err
}

return applyTextileConfigOptions(conf)
}

@@ -385,6 +392,14 @@ func (t *Textile) Start() error {
log.Errorf(err.Error())
}
go t.cafeOutbox.Flush()

// start cluster
if t.clusterExists() {
err = t.startCluster()
if err != nil {
log.Errorf(err.Error())
}
}
}()

for _, mod := range t.datastore.Threads().List().Items {
@@ -434,6 +449,14 @@ func (t *Textile) Stop() error {
return err
}

// shutdown cluster
if t.cluster != nil {
err = t.cluster.Shutdown(t.node.Context())
if err != nil {
return err
}
}

// close ipfs node
err = t.node.Close()
if err != nil {
@@ -498,6 +521,11 @@ func (t *Textile) Ipfs() *core.IpfsNode {
return t.node
}

// Cluster returns the underlying ipfs cluster
func (t *Textile) Cluster() *ipfscluster.Cluster {
return t.cluster
}

// OnlineCh returns the online channel
func (t *Textile) OnlineCh() <-chan struct{} {
return t.online
@@ -609,46 +637,6 @@ func (t *Textile) cafeService() *CafeService {
return t.cafe
}

// createIPFS creates an IPFS node
func (t *Textile) createIPFS(plugins *loader.PluginLoader, online bool) error {
rep, err := fsrepo.Open(t.repoPath)
if err != nil {
return err
}

routing := libp2p.DHTOption
if t.Mobile() {
routing = libp2p.DHTClientOption
}

cctx, _ := context.WithCancel(context.Background())
nd, err := core.NewNode(cctx, &core.BuildCfg{
Repo: rep,
Permanent: true, // temporary way to signify that node is permanent
Online: online,
ExtraOpts: map[string]bool{
"pubsub": true,
"ipnsps": true,
"mplex": true,
},
Routing: routing,
})
if err != nil {
return err
}
nd.IsDaemon = true

if t.node != nil {
err = t.node.Close()
if err != nil {
return err
}
}
t.node = nd

return nil
}

// runJobs runs each message queue
func (t *Textile) runJobs() {
var freq time.Duration
@@ -21,33 +21,46 @@ require (
github.com/go-openapi/swag v0.19.0 // indirect
github.com/gogo/protobuf v1.2.1
github.com/golang/protobuf v1.3.1
github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
github.com/hashicorp/go-uuid v1.0.1 // indirect
github.com/hsanjuan/go-libp2p-http v0.0.5 // indirect
github.com/ipfs/go-cid v0.0.2
github.com/ipfs/go-ipfs v0.4.21
github.com/ipfs/go-datastore v0.0.5
github.com/ipfs/go-ipfs v0.4.22-0.20190613191811-0fabf0b92809
github.com/ipfs/go-ipfs-addr v0.0.1
github.com/ipfs/go-ipfs-cmds v0.0.8
github.com/ipfs/go-ipfs-config v0.0.3
github.com/ipfs/go-ipfs-cmds v0.0.10
github.com/ipfs/go-ipfs-config v0.0.6
github.com/ipfs/go-ipfs-files v0.0.3
github.com/ipfs/go-ipfs-util v0.0.1
github.com/ipfs/go-ipld-format v0.0.2
github.com/ipfs/go-log v0.0.1
github.com/ipfs/go-merkledag v0.0.3
github.com/ipfs/go-path v0.0.4
github.com/ipfs/go-unixfs v0.0.6
github.com/ipfs/interface-go-ipfs-core v0.0.8
github.com/ipfs/go-merkledag v0.1.0
github.com/ipfs/go-path v0.0.7
github.com/ipfs/go-unixfs v0.1.0
github.com/ipfs/interface-go-ipfs-core v0.1.0
github.com/ipfs/ipfs-cluster v0.11.0-rc4
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99
github.com/libp2p/go-libp2p-crypto v0.0.2
github.com/libp2p/go-libp2p-core v0.0.3
github.com/libp2p/go-libp2p-crypto v0.1.0
github.com/libp2p/go-libp2p-gorpc v0.1.0
github.com/libp2p/go-libp2p-host v0.1.0
github.com/libp2p/go-libp2p-interface-connmgr v0.0.5 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.1.0
github.com/libp2p/go-libp2p-net v0.0.2
github.com/libp2p/go-libp2p-peer v0.1.1
github.com/libp2p/go-libp2p-peerstore v0.0.6
github.com/libp2p/go-libp2p-protocol v0.0.1
github.com/libp2p/go-libp2p-record v0.0.1
github.com/libp2p/go-libp2p-peer v0.2.0
github.com/libp2p/go-libp2p-peerstore v0.1.0
github.com/libp2p/go-libp2p-protocol v0.1.0
github.com/libp2p/go-libp2p-pubsub v0.1.0
github.com/libp2p/go-libp2p-record v0.1.0
github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.1.2
github.com/multiformats/go-multiaddr v0.0.4
github.com/multiformats/go-multihash v0.0.5
github.com/multiformats/go-multihash v0.0.6
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f
github.com/onsi/ginkgo v1.8.0
github.com/onsi/gomega v1.5.0
github.com/prometheus/common v0.4.1
github.com/rs/cors v1.6.0
github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd
github.com/segmentio/ksuid v1.0.2
@@ -59,11 +72,10 @@ require (
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.1.0
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f
golang.org/x/image v0.0.0-20190321063152-3fc05d484e9f // indirect
google.golang.org/appengine v1.4.0 // indirect
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd // indirect
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3
)

replace github.com/ipfs/go-ipfs => github.com/sanderpick/go-ipfs v0.4.22-0.20190606034924-0478a0eca246
replace github.com/ugorji/go/codec => github.com/ugorji/go v1.1.2
485 go.sum

Large diffs are not rendered by default.

@@ -7,7 +7,7 @@ import (

"github.com/ipfs/go-ipfs/core"
"github.com/ipfs/go-ipfs/core/coreapi"
inet "github.com/libp2p/go-libp2p-net"
inet "github.com/libp2p/go-libp2p-core/network"
)

// SwarmConnect opens a direct connection to a list of peer multi addresses
@@ -19,6 +19,7 @@ type Config struct {
Logs Logs // local node's log settings
IsMobile bool // local node is setup for mobile
IsServer bool // local node is setup for a server w/ a public IP
Cluster Cluster // local node's IPFS Cluster settings
Cafe Cafe // local node cafe settings
}

@@ -36,11 +37,6 @@ type Addresses struct {
Profiling string // bind address of the profiling API
}

type SwarmPorts struct {
TCP string // TCP address port
WS string // WS address port
}

// HTTPHeaders to customise things like COR
type HTTPHeaders = map[string][]string

@@ -60,17 +56,22 @@ type Logs struct {
LogToDisk bool // when true, sends all logs to rolling files on disk
}

// IPFS Cluster settings
type Cluster struct {
Bootstraps []string
}

// Cafe settings
type Cafe struct {
Host CafeHost
}

// CafeHost settings
type CafeHost struct {
Open bool // When true, other peers can register with this node for cafe services.
URL string // Override the resolved URL of this cafe, useful for load HTTPS and/or load balancers
NeighborURL string // Specifies the URL of a secondary cafe. Must return cafe info.
SizeLimit int64 // Maximum file size limit to accept for POST requests in bytes.
Open bool // When true, other peers can register with this node for cafe services
URL string // Override the resolved URL of this cafe, useful for HTTPS and/or load balancers
NeighborURL string // Deprecated
SizeLimit int64 // Maximum file size limit to accept for POST requests in bytes
}

// Init returns the default textile config
@@ -147,12 +148,13 @@ func Init() (*Config, error) {
Logs: Logs{
LogToDisk: true,
},
Cluster: Cluster{
Bootstraps: []string{},
},
Cafe: Cafe{
Host: CafeHost{
Open: false,
URL: "",
NeighborURL: "",
SizeLimit: 0,
Open: false,
SizeLimit: 0,
},
},
IsMobile: false,
@@ -6,6 +6,7 @@ import (

ipfs "github.com/ipfs/go-ipfs"
native "github.com/ipfs/go-ipfs-config"
"github.com/libp2p/go-libp2p-core/peer"
)

// DefaultServerFilters has is a list of IPv4 and IPv6 prefixes that are private, local only, or unrouteable.
@@ -49,7 +50,7 @@ var TextileBootstrapAddresses = []string{
}

// TextileBootstrapPeers returns the (parsed) set of Textile bootstrap peers.
func TextileBootstrapPeers() ([]native.BootstrapPeer, error) {
func TextileBootstrapPeers() ([]peer.AddrInfo, error) {
ps, err := native.ParseBootstrapPeers(TextileBootstrapAddresses)
if err != nil {
return nil, fmt.Errorf(`failed to parse hardcoded bootstrap peers: %s
@@ -20,7 +20,7 @@ import (
logging "github.com/ipfs/go-log"
iface "github.com/ipfs/interface-go-ipfs-core"
ctxio "github.com/jbenet/go-context/io"
inet "github.com/libp2p/go-libp2p-net"
inet "github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-peer"
protocol "github.com/libp2p/go-libp2p-protocol"
"github.com/textileio/go-textile/crypto"
@@ -7,7 +7,8 @@ import (
"time"

ggio "github.com/gogo/protobuf/io"
inet "github.com/libp2p/go-libp2p-net"
"github.com/libp2p/go-libp2p-core/helpers"
inet "github.com/libp2p/go-libp2p-core/network"
peer "github.com/libp2p/go-libp2p-peer"
"github.com/textileio/go-textile/pb"
)
@@ -133,7 +134,7 @@ func (ms *messageSender) SendMessage(ctx context.Context, pmes *pb.Envelope) err
}

if ms.singleMes > streamReuseTries {
go inet.FullClose(ms.s)
go helpers.FullClose(ms.s)
ms.s = nil
} else if retry {
ms.singleMes++
@@ -181,7 +182,7 @@ func (ms *messageSender) SendRequest(ctx context.Context, pmes *pb.Envelope) (*p
}

if ms.singleMes > streamReuseTries {
go inet.FullClose(ms.s)
go helpers.FullClose(ms.s)
ms.s = nil
} else if retry {
ms.singleMes++
@@ -30,6 +30,9 @@ go test -coverprofile=core.cover.out ./core
# mobile
go test -coverprofile=mobile.cover.out ./mobile

# cluster
go test -coverprofile=cluster.cover.out ./cluster

# gateway
go test -coverprofile=gateway.cover.out ./gateway