Skip to content
This repository has been archived by the owner on Nov 30, 2021. It is now read-only.

Commit

Permalink
chore(store): bump Ceph to "giant" release
Browse files Browse the repository at this point in the history
  • Loading branch information
carmstrong committed Nov 5, 2014
1 parent 816e718 commit 9fc6637
Show file tree
Hide file tree
Showing 9 changed files with 104 additions and 25 deletions.
3 changes: 2 additions & 1 deletion database/tests/database_test.go
Expand Up @@ -24,8 +24,9 @@ func TestDatabase(t *testing.T) {
// run mock ceph containers
cephName := "deis-ceph-" + tag
mock.RunMockCeph(t, cephName, cli, etcdPort)
defer cli.CmdRm("-f", cephName+"-monitor")
defer cli.CmdRm("-f", "-v", cephName+"-monitor")
defer cli.CmdRm("-f", "-v", cephName+"-daemon")
defer cli.CmdRm("-f", cephName+"-metadata")
defer cli.CmdRm("-f", cephName+"-gateway")

// run database container
Expand Down
6 changes: 3 additions & 3 deletions docs/managing_deis/add_remove_host.rst
Expand Up @@ -265,7 +265,7 @@ Reminder: make sure you're logged into the machine you're removing from the clus
deis-store-metadata
This is actually all that's necessary. Ceph provides a ``ceph mds rm`` command, but has no
documentation for it. See: http://docs.ceph.com/docs/firefly/rados/operations/control/#mds-subsystem
documentation for it. See: http://docs.ceph.com/docs/giant/rados/operations/control/#mds-subsystem

Removing the host from etcd
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand All @@ -274,5 +274,5 @@ The etcd cluster still has an entry for the host we've removed, so we'll need to
This can be achieved by making a request to the etcd API. See `remove machines`_ for details.

.. _`remove machines`: https://coreos.com/docs/distributed-configuration/etcd-api/#remove-machines
.. _`removing monitors`: http://ceph.com/docs/firefly/rados/operations/add-or-rm-mons/#removing-monitors
.. _`removing OSDs`: http://docs.ceph.com/docs/firefly/rados/operations/add-or-rm-osds/#removing-osds-manual
.. _`removing monitors`: http://ceph.com/docs/giant/rados/operations/add-or-rm-mons/#removing-monitors
.. _`removing OSDs`: http://docs.ceph.com/docs/giant/rados/operations/add-or-rm-osds/#removing-osds-manual
8 changes: 7 additions & 1 deletion docs/managing_deis/store_metadata_settings.rst
Expand Up @@ -17,7 +17,13 @@ Considerations: none

Settings set by store-metadata
------------------------------
The store-metadata component sets no keys in etcd.
The following etcd keys are set by the store-metadata component, typically in its /bin/boot script.

=================================== ==============================================
setting description
=================================== ==============================================
/deis/store/filesystemSetupComplete Set when the Ceph filesystem setup is complete
=================================== ==============================================

Settings used by store-metadata
-------------------------------
Expand Down
12 changes: 6 additions & 6 deletions docs/troubleshooting_deis/index.rst
Expand Up @@ -14,10 +14,10 @@ A deis-store component fails to start
The store component is the most complex component of Deis. As such, there are many ways for it to fail.
Recall that the store components represent Ceph services as follows:

* ``store-monitor``: http://ceph.com/docs/firefly/man/8/ceph-mon/
* ``store-daemon``: http://ceph.com/docs/firefly/man/8/ceph-osd/
* ``store-gateway``: http://ceph.com/docs/firefly/radosgw/
* ``store-metadata``: http://ceph.com/docs/firefly/man/8/ceph-mds/
* ``store-monitor``: http://ceph.com/docs/giant/man/8/ceph-mon/
* ``store-daemon``: http://ceph.com/docs/giant/man/8/ceph-osd/
* ``store-gateway``: http://ceph.com/docs/giant/radosgw/
* ``store-metadata``: http://ceph.com/docs/giant/man/8/ceph-mds/
* ``store-volume``: a system service which mounts a `Ceph FS`_ volume to be used by the controller and logger components

Log output for store components can be viewed with ``deisctl status store-<component>`` (such as
Expand Down Expand Up @@ -135,7 +135,7 @@ Other issues

Running into something not detailed here? Please `open an issue`_ or hop into #deis on Freenode IRC and we'll help!

.. _`Ceph FS`: https://ceph.com/docs/firefly/cephfs/
.. _`Ceph FS`: https://ceph.com/docs/giant/cephfs/
.. _`open an issue`: https://github.com/deis/deis/issues/new
.. _`troubleshooting`: http://docs.ceph.com/docs/firefly/rados/troubleshooting/
.. _`troubleshooting`: http://docs.ceph.com/docs/giant/rados/troubleshooting/

3 changes: 2 additions & 1 deletion registry/tests/registry_test.go
Expand Up @@ -36,8 +36,9 @@ func TestRegistry(t *testing.T) {
// run mock ceph containers
cephName := "deis-ceph-" + tag
mock.RunMockCeph(t, cephName, cli, etcdPort)
defer cli.CmdRm("-f", cephName+"-monitor")
defer cli.CmdRm("-f", "-v", cephName+"-monitor")
defer cli.CmdRm("-f", "-v", cephName+"-daemon")
defer cli.CmdRm("-f", cephName+"-metadata")
defer cli.CmdRm("-f", cephName+"-gateway")

host, port := utils.HostAddress(), utils.RandomPort()
Expand Down
2 changes: 1 addition & 1 deletion store/base/build.sh
Expand Up @@ -24,7 +24,7 @@ curl -sSL https://s3-us-west-2.amazonaws.com/opdemand/confd-git-0e563e5 -o /usr/
chmod +x /usr/local/bin/confd

curl -sSL 'https://ceph.com/git/?p=ceph.git;a=blob_plain;f=keys/release.asc' | apt-key add -
echo "deb http://ceph.com/debian-firefly trusty main" > /etc/apt/sources.list.d/ceph.list
echo "deb http://ceph.com/debian-giant trusty main" > /etc/apt/sources.list.d/ceph.list

apt-get update && apt-get install -yq ceph

Expand Down
29 changes: 18 additions & 11 deletions store/gateway/bin/boot
Expand Up @@ -35,21 +35,28 @@ if ! etcdctl --no-sync -C $ETCD get /deis/store/defaultPoolsConfigured >/dev/nul
echo "store-gateway: setting pg_num values for default pools..."
function set_until_success {
set +e
ceph osd pool set $1 pg_num $2 2>/dev/null
PG_SET=$?
until [[ $PG_SET -eq 0 ]]; do
sleep 5

echo "store-gateway: checking pool $1..."
if ! ceph osd pool get $1 pg_num | grep "pg_num: $2" ; then
ceph osd pool set $1 pg_num $2 2>/dev/null
PG_SET=$?
done

ceph osd pool set $1 pgp_num $2 2>/dev/null
PGP_SET=$?
until [[ $PGP_SET -eq 0 ]]; do
sleep 5
until [[ $PG_SET -eq 0 ]]; do
sleep 5
ceph osd pool set $1 pg_num $2 2>/dev/null
PG_SET=$?
done
fi

if ! ceph osd pool get $1 pgp_num | grep "pgp_num: $2" ; then
ceph osd pool set $1 pgp_num $2 2>/dev/null
PGP_SET=$?
done
until [[ $PGP_SET -eq 0 ]]; do
sleep 5
ceph osd pool set $1 pgp_num $2 2>/dev/null
PGP_SET=$?
done
fi

set -e
}

Expand Down
38 changes: 37 additions & 1 deletion store/metadata/bin/boot
Expand Up @@ -3,14 +3,50 @@

ETCD_PORT=${ETCD_PORT:-4001}
ETCD="$HOST:$ETCD_PORT"
ETCD_PATH=${ETCD_PATH:-/deis/store}

MDS_NAME=`hostname`
HOSTNAME=`hostname`
MDS_NAME=$HOSTNAME

until confd -onetime -node $ETCD -config-file /app/confd.toml >/dev/null 2>&1 ; do
echo "store-metadata: waiting for confd to write initial templates..."
sleep 5
done

if ! etcdctl --no-sync -C $ETCD get ${ETCD_PATH}/filesystemSetupComplete >/dev/null 2>&1 ; then
echo "store-metadata: The Ceph filesystem hasn't been created. Trying to obtain the lock to set up..."
# let's rock and roll. we need to obtain a lock so we can ensure only one machine is trying to deploy the cluster
if etcdctl --no-sync -C $ETCD mk ${ETCD_PATH}/filesystemSetupLock $HOSTNAME >/dev/null 2>&1 \
|| [[ `etcdctl --no-sync -C $ETCD get ${ETCD_PATH}/filesystemSetupLock` == "$HOSTNAME" ]] ; then
echo "store-metadata: obtained the lock to proceed with setting up."

PG_NUM=`etcdctl --no-sync -C $ETCD get /deis/store/pgNum`

# even though we know setup hasn't completed, we could be upgrading an older cluster which
# has the pools but not the filesystemSetupComplete key
if ! ceph osd lspools | grep " data," ; then
ceph osd pool create data ${PG_NUM}
fi

if ! ceph osd lspools | grep metadata ; then
ceph osd pool create metadata ${PG_NUM}
fi

if ceph fs ls | grep "No filesystems enabled" ; then
ceph fs new deis metadata data
fi

# mark setup as complete
echo "store-metadata: filesystem setup complete."
etcdctl --no-sync -C $ETCD set ${ETCD_PATH}/filesystemSetupComplete youBetcha >/dev/null
else
until etcdctl --no-sync -C $ETCD get ${ETCD_PATH}/filesystemSetupComplete >/dev/null 2>&1 ; do
echo "store-metadata: waiting for another metadata to complete setup..."
sleep 5
done
fi
fi

# Check to see if we are a new MDS
if [ ! -e /var/lib/ceph/mds/ceph-$MDS_NAME/keyring ]; then
mkdir -p /var/lib/ceph/mds/ceph-${MDS_NAME}
Expand Down
28 changes: 28 additions & 0 deletions tests/mock/mock.go
Expand Up @@ -67,6 +67,9 @@ func RunMockCeph(t *testing.T, name string, cli *client.DockerCli, etcdPort stri
daemonName := name + "-daemon"
RunMockCephDaemon(t, daemonName, etcdPort)

metadataName := name + "-metadata"
RunMockCephMetadata(t, metadataName, etcdPort)

gatewayName := name + "-gateway"
RunMockCephGateway(t, gatewayName, utils.RandomPort(), etcdPort)
}
Expand Down Expand Up @@ -122,6 +125,31 @@ func RunMockCephDaemon(t *testing.T, name string, etcdPort string) {
}
}

// RunMockCephMetadata starts a mock Ceph MDS
func RunMockCephMetadata(t *testing.T, name string, etcdPort string) {
var err error
cli, stdout, stdoutPipe := dockercli.NewClient()
cephImage := "deis/store-metadata:" + utils.BuildTag()
ipaddr := utils.HostAddress()
fmt.Printf("--- Running deis/mock-ceph-metadata at %s\n", ipaddr)
done2 := make(chan bool, 1)
go func() {
done2 <- true
_ = cli.CmdRm("-f", name)
err = dockercli.RunContainer(cli,
"--name", name,
"--rm",
"-e", "ETCD_PORT="+etcdPort,
"-e", "HOST="+ipaddr,
"--net=host",
cephImage)
}()
dockercli.PrintToStdout(t, stdout, stdoutPipe, "mds.0.1 active_start")
if err != nil {
t.Fatal(err)
}
}

// RunMockCephGateway starts a mock S3 endpoint used for component testing
func RunMockCephGateway(t *testing.T, name string, port string, etcdPort string) {
var err error
Expand Down

0 comments on commit 9fc6637

Please sign in to comment.