Skip to content

Commit

Permalink
Merge e824aea into cb5012c
Browse files Browse the repository at this point in the history
  • Loading branch information
hsanjuan committed Nov 30, 2017
2 parents cb5012c + e824aea commit 852c89b
Show file tree
Hide file tree
Showing 28 changed files with 559 additions and 89 deletions.
2 changes: 2 additions & 0 deletions .dockerignore
@@ -0,0 +1,2 @@
Dockerfile
Dockerfile-*
25 changes: 20 additions & 5 deletions Dockerfile
@@ -1,7 +1,7 @@
FROM golang:1.9-stretch AS builder
MAINTAINER Hector Sanjuan <hector@protocol.ai>

# This build state just builds the cluster binaries
# This dockerfile builds and runs ipfs-cluster-service.

ENV GOPATH /go
ENV SRC_PATH $GOPATH/src/github.com/ipfs/ipfs-cluster
Expand All @@ -10,8 +10,20 @@ COPY . $SRC_PATH
WORKDIR $SRC_PATH
RUN make install

ENV SUEXEC_VERSION v0.2
ENV TINI_VERSION v0.16.1
RUN set -x \
&& cd /tmp \
&& git clone https://github.com/ncopa/su-exec.git \
&& cd su-exec \
&& git checkout -q $SUEXEC_VERSION \
&& make \
&& cd /tmp \
&& wget -q -O tini https://github.com/krallin/tini/releases/download/$TINI_VERSION/tini \
&& chmod +x tini

#------------------------------------------------------
FROM ipfs/go-ipfs
FROM busybox:1-glibc
MAINTAINER Hector Sanjuan <hector@protocol.ai>

# This is the container which just puts the previously
Expand All @@ -27,13 +39,16 @@ EXPOSE 9096

COPY --from=builder $GOPATH/bin/ipfs-cluster-service /usr/local/bin/ipfs-cluster-service
COPY --from=builder $GOPATH/bin/ipfs-cluster-ctl /usr/local/bin/ipfs-cluster-ctl
COPY --from=builder $SRC_PATH/docker/entrypoint.sh /usr/local/bin/start-daemons.sh
COPY --from=builder $SRC_PATH/docker/entrypoint.sh /usr/local/bin/entrypoint.sh
COPY --from=builder /tmp/su-exec/su-exec /sbin/su-exec
COPY --from=builder /tmp/tini /sbin/tini

RUN mkdir -p $IPFS_CLUSTER_PATH && \
chown 1000:100 $IPFS_CLUSTER_PATH
adduser -D -h $IPFS_CLUSTER_PATH -u 1000 -G users ipfs && \
chown ipfs:users $IPFS_CLUSTER_PATH

VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start-daemons.sh"]
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/entrypoint.sh"]

# Defaults for ipfs-cluster-service would go here
CMD []
41 changes: 41 additions & 0 deletions Dockerfile-bundle
@@ -0,0 +1,41 @@
FROM golang:1.9-stretch AS builder
MAINTAINER Hector Sanjuan <hector@protocol.ai>

# This dockerfile builds cluster and runs it along with go-ipfs.
# It re-uses the latest go-ipfs:release container.

# This builder just builds the cluster binaries
ENV GOPATH /go
ENV SRC_PATH $GOPATH/src/github.com/ipfs/ipfs-cluster

COPY . $SRC_PATH
WORKDIR $SRC_PATH
RUN make install

#------------------------------------------------------
FROM ipfs/go-ipfs:release
MAINTAINER Hector Sanjuan <hector@protocol.ai>

# This is the container which just puts the previously
# built binaries on the go-ipfs-container.

ENV GOPATH /go
ENV SRC_PATH /go/src/github.com/ipfs/ipfs-cluster
ENV IPFS_CLUSTER_PATH /data/ipfs-cluster

EXPOSE 9094
EXPOSE 9095
EXPOSE 9096

COPY --from=builder $GOPATH/bin/ipfs-cluster-service /usr/local/bin/ipfs-cluster-service
COPY --from=builder $GOPATH/bin/ipfs-cluster-ctl /usr/local/bin/ipfs-cluster-ctl
COPY --from=builder $SRC_PATH/docker/start-daemons.sh /usr/local/bin/start-daemons.sh

RUN mkdir -p $IPFS_CLUSTER_PATH && \
chown 1000:100 $IPFS_CLUSTER_PATH

VOLUME $IPFS_CLUSTER_PATH
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/start-daemons.sh"]

# Defaults for ipfs-cluster-service would go here
CMD []
2 changes: 1 addition & 1 deletion Dockerfile-test
Expand Up @@ -16,7 +16,7 @@ RUN cd /tmp && \


#------------------------------------------------------
FROM ipfs/go-ipfs
FROM ipfs/go-ipfs:master
MAINTAINER Hector Sanjuan <hector@protocol.ai>

# This is the container which just puts the previously
Expand Down
4 changes: 2 additions & 2 deletions README.md
Expand Up @@ -60,15 +60,15 @@ Note that since IPFS Cluster is evolving fast, these builds may not contain the

### Docker

You can build or download an automated build of the ipfs-cluster docker container. This container runs both the IPFS daemon and `ipfs-cluster-service` and includes `ipfs-cluster-ctl`. To launch the latest published version on Docker run:
You can build or download an automated build of the ipfs-cluster docker container. This container runs `ipfs-cluster-service` and includes `ipfs-cluster-ctl`. To launch the latest published version on Docker run:

`$ docker run ipfs/ipfs-cluster`

To build the container manually you can:

`$ docker build . -t ipfs-cluster`

You can mount your local ipfs-cluster configuration and data folder by passing `-v /data/ipfs-cluster your-local-ipfs-cluster-folder` to Docker.
You can mount your local ipfs-cluster configuration and data folder by passing `-v /data/ipfs-cluster your-local-ipfs-cluster-folder` to Docker. Otherwise, a new configuration will be generated. In that case, you can point it to the right IPFS location by setting `IPFS_API` like `--env IPFS_API="/ip4/1.2.3.4/tcp/5001"`.

### Install from the snap store

Expand Down
53 changes: 46 additions & 7 deletions api/rest/restapi.go
Expand Up @@ -224,6 +224,12 @@ func (api *API) routes() []route {
"/pins/sync",
api.syncAllHandler,
},
{
"RecoverAll",
"POST",
"/pins/recover",
api.recoverAllHandler,
},
{
"Status",
"GET",
Expand Down Expand Up @@ -463,15 +469,48 @@ func (api *API) syncHandler(w http.ResponseWriter, r *http.Request) {
}
}

func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
var pinInfo types.GlobalPinInfoSerial
func (api *API) recoverAllHandler(w http.ResponseWriter, r *http.Request) {
queryValues := r.URL.Query()
local := queryValues.Get("local")
if local == "true" {
var pinInfos []types.PinInfoSerial
err := api.rpcClient.Call("",
"Cluster",
"Recover",
c,
&pinInfo)
sendResponse(w, err, pinInfo)
"RecoverAllLocal",
struct{}{},
&pinInfos)
sendResponse(w, err, pinInfos)
} else {
sendErrorResponse(w, 400, "only requests with parameter local=true are supported")
}
}

func (api *API) recoverHandler(w http.ResponseWriter, r *http.Request) {
if c := parseCidOrError(w, r); c.Cid != "" {
queryValues := r.URL.Query()
local := queryValues.Get("local")

// Is it RESTful to return two different types
// depending on a flag? Should PinInfo
// be converted to a GlobalPinInfo ?

if local == "true" {
var pinInfo types.PinInfoSerial
err := api.rpcClient.Call("",
"Cluster",
"RecoverLocal",
c,
&pinInfo)
sendResponse(w, err, pinInfo)
} else {
var pinInfo types.GlobalPinInfoSerial
err := api.rpcClient.Call("",
"Cluster",
"Recover",
c,
&pinInfo)
sendResponse(w, err, pinInfo)
}
}
}

Expand Down
18 changes: 18 additions & 0 deletions api/rest/restapi_test.go
Expand Up @@ -306,3 +306,21 @@ func TestAPIRecoverEndpoint(t *testing.T) {
t.Error("expected different status")
}
}

func TestAPIRecoverAllEndpoint(t *testing.T) {
rest := testAPI(t)
defer rest.Shutdown()

var resp []api.PinInfoSerial
makePost(t, "/pins/recover?local=true", []byte{}, &resp)

if len(resp) != 0 {
t.Fatal("bad response length")
}

var errResp api.Error
makePost(t, "/pins/recover", []byte{}, &errResp)
if errResp.Code != 400 {
t.Error("expected a different error")
}
}
4 changes: 1 addition & 3 deletions api/types.go
Expand Up @@ -143,9 +143,7 @@ func (gpis GlobalPinInfoSerial) ToGlobalPinInfo() GlobalPinInfo {
return gpi
}

// PinInfo holds information about local pins. PinInfo is
// serialized when requesting the Global status, therefore
// we cannot use *cid.Cid.
// PinInfo holds information about local pins.
type PinInfo struct {
Cid *cid.Cid
Peer peer.ID
Expand Down
8 changes: 7 additions & 1 deletion cluster.go
Expand Up @@ -889,11 +889,17 @@ func (c *Cluster) Sync(h *cid.Cid) (api.GlobalPinInfo, error) {
return c.globalPinInfoCid("SyncLocal", h)
}

// RecoverLocal triggers a recover operation for a given Cid
// RecoverLocal triggers a recover operation for a given Cid.
func (c *Cluster) RecoverLocal(h *cid.Cid) (api.PinInfo, error) {
return c.tracker.Recover(h)
}

// RecoverAllLocal triggers a recover operation for all Cids tracked
// by this peer.
func (c *Cluster) RecoverAllLocal() ([]api.PinInfo, error) {
return c.tracker.RecoverAll()
}

// Recover triggers a recover operation for a given Cid in all
// cluster peers.
func (c *Cluster) Recover(h *cid.Cid) (api.GlobalPinInfo, error) {
Expand Down
29 changes: 27 additions & 2 deletions cluster_test.go
Expand Up @@ -85,12 +85,12 @@ func (ipfs *mockConnector) FreeSpace() (uint64, error) { retu
func (ipfs *mockConnector) RepoSize() (uint64, error) { return 0, nil }

func testingCluster(t *testing.T) (*Cluster, *mockAPI, *mockConnector, *mapstate.MapState, *maptracker.MapPinTracker) {
clusterCfg, _, _, consensusCfg, monCfg, _ := testingConfigs()
clusterCfg, _, _, consensusCfg, trackerCfg, monCfg, _ := testingConfigs()

api := &mockAPI{}
ipfs := &mockConnector{}
st := mapstate.NewMapState()
tracker := maptracker.NewMapPinTracker(clusterCfg.ID)
tracker := maptracker.NewMapPinTracker(trackerCfg, clusterCfg.ID)
monCfg.CheckInterval = 2 * time.Second
mon, _ := basic.NewMonitor(monCfg)
alloc := ascendalloc.NewAllocator()
Expand Down Expand Up @@ -293,3 +293,28 @@ func TestVersion(t *testing.T) {
t.Error("bad Version()")
}
}

func TestClusterRecoverAllLocal(t *testing.T) {
cl, _, _, _, _ := testingCluster(t)
defer cleanRaft()
defer cl.Shutdown()

c, _ := cid.Decode(test.TestCid1)
err := cl.Pin(api.PinCid(c))
if err != nil {
t.Fatal("pin should have worked:", err)
}

time.Sleep(time.Second)

recov, err := cl.RecoverAllLocal()
if err != nil {
t.Error("did not expect an error")
}
if len(recov) != 1 {
t.Fatal("there should be only one pin")
}
if recov[0].Status != api.TrackerStatusPinned {
t.Error("the pin should have been recovered")
}
}
4 changes: 4 additions & 0 deletions config/config.go
Expand Up @@ -283,6 +283,10 @@ func (cfg *Manager) LoadJSON(bs []byte) error {
return err
}
logger.Debugf("%s section configuration loaded", name)
} else {
logger.Warningf("%s section is empty, generating default", name)
component.SetBaseDir(dir)
component.Default()
}
}
return nil
Expand Down
21 changes: 16 additions & 5 deletions config_test.go
Expand Up @@ -6,6 +6,7 @@ import (
"github.com/ipfs/ipfs-cluster/informer/disk"
"github.com/ipfs/ipfs-cluster/ipfsconn/ipfshttp"
"github.com/ipfs/ipfs-cluster/monitor/basic"
"github.com/ipfs/ipfs-cluster/pintracker/maptracker"
)

var testingClusterSecret, _ = DecodeClusterSecret("2588b80d5cb05374fa142aed6cbb047d1f4ef8ef15e37eba68c65b9d30df67ed")
Expand Down Expand Up @@ -59,6 +60,14 @@ var testingIpfsCfg = []byte(`{
"proxy_idle_timeout": "1m0s"
}`)

var testingTrackerCfg = []byte(`
{
"pinning_timeout": "30s",
"unpinning_timeout": "15s",
"max_pin_queue_size": 4092
}
`)

var testingMonCfg = []byte(`{
"check_interval": "2s"
}`)
Expand All @@ -68,26 +77,28 @@ var testingDiskInfCfg = []byte(`{
"metric_type": "freespace"
}`)

func testingConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *basic.Config, *disk.Config) {
clusterCfg, apiCfg, ipfsCfg, consensusCfg, monCfg, diskInfCfg := testingEmptyConfigs()
func testingConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *disk.Config) {
clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, monCfg, diskInfCfg := testingEmptyConfigs()
clusterCfg.LoadJSON(testingClusterCfg)
apiCfg.LoadJSON(testingAPICfg)
ipfsCfg.LoadJSON(testingIpfsCfg)
consensusCfg.LoadJSON(testingRaftCfg)
trackerCfg.LoadJSON(testingTrackerCfg)
monCfg.LoadJSON(testingMonCfg)
diskInfCfg.LoadJSON(testingDiskInfCfg)

return clusterCfg, apiCfg, ipfsCfg, consensusCfg, monCfg, diskInfCfg
return clusterCfg, apiCfg, ipfsCfg, consensusCfg, trackerCfg, monCfg, diskInfCfg
}

func testingEmptyConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *basic.Config, *disk.Config) {
func testingEmptyConfigs() (*Config, *rest.Config, *ipfshttp.Config, *raft.Config, *maptracker.Config, *basic.Config, *disk.Config) {
clusterCfg := &Config{}
apiCfg := &rest.Config{}
ipfshttpCfg := &ipfshttp.Config{}
consensusCfg := &raft.Config{}
trackerCfg := &maptracker.Config{}
monCfg := &basic.Config{}
diskInfCfg := &disk.Config{}
return clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, monCfg, diskInfCfg
return clusterCfg, apiCfg, ipfshttpCfg, consensusCfg, trackerCfg, monCfg, diskInfCfg
}

// func TestConfigDefault(t *testing.T) {
Expand Down

0 comments on commit 852c89b

Please sign in to comment.