Permalink
Browse files

Fix the boostrap

Fix a bunch of things.

Signed-off-by: Sébastien Han <sebastien.han@enovance.com>
  • Loading branch information...
leseb committed Apr 14, 2015
1 parent 21ea860 commit 5ad59f1efe5852ca93b1d73be09145488fc89883
Showing with 26 additions and 29 deletions.
  1. +19 −15 init.sh
  2. +0 −2 install.sh
  3. +7 −12 run.sh
34 init.sh
@@ -6,39 +6,43 @@ rm -f /etc/ceph/*

MASTER=`hostname -s`

ip=$(ip -4 -o a | grep eth0 | awk '{print $4}' | cut -d'/' -f1)
echo "$ip $MASTER" >> /etc/hosts

#create ceph cluster
ceph-deploy --overwrite-conf new ${MASTER}
ceph-deploy --overwrite-conf mon create-initial ${MASTER}
ceph-deploy --overwrite-conf mon create ${MASTER}
ceph-deploy gatherkeys ${MASTER}

for i in 0 1 2
do
ceph osd create
ceph-osd -i ${i} --mkfs --mkkey
ceph auth add osd.${i} osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-${i}/keyring
ceph osd crush add ${i} 1 root=default host=${MASTER}
ceph-osd -i ${i} -k /var/lib/ceph/osd/ceph-${i}/keyring
done
echo "osd crush chooseleaf type = 0" >> /etc/ceph/ceph.conf
echo "osd journal size = 100" >> /etc/ceph/ceph.conf
echo "osd pool default size = 1" >> /etc/ceph/ceph.conf
echo "osd pool default pgp num = 8" >> /etc/ceph/ceph.conf
echo "osd pool default pg num = 8" >> /etc/ceph/ceph.conf

/sbin/service ceph -c /etc/ceph/ceph.conf stop mon.${MASTER}
/sbin/service ceph -c /etc/ceph/ceph.conf start mon.${MASTER}

#start ceph
#service ceph restart
ceph osd pool set rbd size 1

ps -ef |grep ceph
ceph osd create
ceph-osd -i 0 --mkfs --mkkey
ceph auth add osd.0 osd 'allow *' mon 'allow rwx' -i /var/lib/ceph/osd/ceph-0/keyring
ceph osd crush add 0 1 root=default host=${MASTER}
ceph-osd -i 0 -k /var/lib/ceph/osd/ceph-0/keyring

#see if we are ready to go
ceph osd tree
#ceph health should be clean+ok
ceph health

# create ceph fs
ceph osd pool create cephfs_data 4
ceph osd pool create cephfs_metadata 4
ceph fs new cephfs cephfs_metadata cephfs_data
ceph-deploy mds create ${MASTER}
ceph-deploy --overwrite-conf mds create ${MASTER}

ps -ef |grep ceph

ceph osd dump
sleep 120

ceph -w
@@ -9,5 +9,3 @@ cat ~/.ssh/id_rsa.pub |awk '{print $1, $2, "Generated"}' >> ~/.ssh/authorized_ke

yum install -y -q ceph-deploy epel-release
yum install -y ceph


19 run.sh
@@ -1,17 +1,12 @@
docker build -t centos_ceph_pkg .
mkdir -p /home/etc
mkdir -p /home/etc/ceph
mkdir -p /home/disks

for i in 0 1 2
do
umount /tmp/ceph_disk${i}
dd if=/dev/zero of=/home/disks/d${i} bs=1024M count=6 conv=notrunc
mkfs -t xfs -f /home/disks/d${i}
mkdir -p /tmp/ceph_disk${i}
mount -t xfs -o loop /home/disks/d${i} /tmp/ceph_disk${i}
done

docker run --privileged --net=host -i -t -v /tmp/ceph_disk0:/var/lib/ceph/osd/ceph-0 -v /tmp/ceph_disk1:/var/lib/ceph/osd/ceph-1 -v /tmp/ceph_disk2:/var/lib/ceph/osd/ceph-2 -v /etc/ceph:/etc/ceph -t centos_ceph_pkg /bin/bash /init.sh
mkdir -p /ceph/disks

umount /tmp/ceph_disk0
dd if=/dev/zero of=/ceph/disks/d0 bs=256M count=5 conv=notrunc
mkfs -t xfs -f /ceph/disks/d0
mkdir -p /tmp/ceph_disk0
mount -t xfs -o loop /ceph/disks/d0 /tmp/ceph_disk0

docker run --privileged --net=host -i -t -v /tmp/ceph_disk0:/var/lib/ceph/osd/ceph-0 -v /etc/ceph:/etc/ceph -t centos_ceph_pkg /bin/bash /init.sh

0 comments on commit 5ad59f1

Please sign in to comment.