Skip to content

Commit

Permalink
Ceph mon and osd
Browse files Browse the repository at this point in the history
- add possibility to use puppet-librarian-simple in library
- add fuel main repo
- add dummy disks for osd
- add osds
- add mons
- change node names to solar-dev
  • Loading branch information
cvieri committed Oct 29, 2015
1 parent 08327cd commit 0b5872e
Show file tree
Hide file tree
Showing 16 changed files with 300 additions and 20 deletions.
6 changes: 3 additions & 3 deletions examples/library_ceph/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@ Current example will do following things:
- fetch fuel-library from github
- use ./update_modules.sh to fetch librarian dependencies
- generate ceph keys on a solar-dev1
- install ceph-mon on solar-dev1 (INPROGRESS)
- install ceph-osd on solar-dev2 (TODO)
- install ceph-mon on solar-dev1
- install ceph-osd on solar-dev2
- imlement removal mechanism for ceph-mon/ceph-osd (TODO)


To use it:

```
python exaples/library_ceph/ceph.py
python examples/library_ceph/ceph.py
solar ch stage && solar ch process
solar or run-once last -w 120
```
Expand Down
88 changes: 79 additions & 9 deletions examples/library_ceph/ceph.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

from solar import events as evapi
from solar.core.resource import virtual_resource as vr
from solar.interfaces.db import get_db

Expand Down Expand Up @@ -36,40 +36,110 @@
def deploy():
db.clear()
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2})
first_node, second_node = [x for x in resources if x.name.startswith('node')]
first_node, second_node = [x for x in resources if x.name.startswith('solar-dev')]
first_transp = next(x for x in resources if x.name.startswith('transport'))

hosts1 = vr.create('hosts_file1', 'resources/hosts_file', {})[0]
hosts2 = vr.create('hosts_file2', 'resources/hosts_file', {})[0]


first_node.connect(hosts1, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})

second_node.connect(hosts1, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})

second_node.connect(hosts2, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})


first_node.connect(hosts2, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})

library = vr.create('library1', 'resources/fuel_library', {})[0]
first_node.connect(library)
library1 = vr.create('library1', 'resources/fuel_library', {})[0]
library2 = vr.create('library2', 'resources/fuel_library',
{'temp_directory': '/tmp/solar',
'puppet_modules': '/etc/fuel/modules',
'git':{'branch': 'master', 'repository': 'https://github.com/stackforge/fuel-library'},
'librarian_puppet_simple': 'true'})[0]

keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0]
first_node.connect(keys)

remote_file = vr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})

remote_file = vr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})

nbd_loop_device = vr.create('nbd_loop_device1', 'resources/nbd_loop_device',
{'path': '/tmp/1', 'id': '0'
})[0]

second_node.connect(nbd_loop_device, {})

ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'controller',
'role': 'primary-controller',
})[0]

managed_apt = vr.create(
ceph_osd = vr.create('ceph_osd2', 'resources/ceph_osd',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'ceph-osd',
})[0]

managed_apt1 = vr.create(
'managed_apt1', 'templates/mos_repos.yaml',
{'node': first_node.name, 'index': 0})[-1]

keys.connect(ceph_mon, {})
managed_apt2 = vr.create(
'managed_apt2', 'templates/mos_repos.yaml',
{'node': second_node.name, 'index': 1})[-1]


first_node.connect(library1, {})
second_node.connect(library2, {})

# ceph_mon.connect(ceph_osd, {})

first_node.connect(ceph_mon,
{'ip': ['ip', 'public_vip', 'management_vip']})
library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
managed_apt.connect(ceph_mon, {})
second_node.connect(ceph_osd,
{'ip': ['ip', 'public_vip', 'management_vip']})
library1.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
library2.connect(ceph_osd, {'puppet_modules': 'puppet_modules'})

evapi.add_dep(second_node.name, ceph_osd.name, actions=('run',))
evapi.add_dep(first_node.name, ceph_mon.name, actions=('run',))
evapi.add_dep(keys.name, ceph_mon.name, actions=('run',))
evapi.add_dep(remote_file.name, ceph_osd.name, actions=('run',))
evapi.add_dep(managed_apt1.name, ceph_mon.name, actions=('run',))
evapi.add_dep(managed_apt2.name, ceph_osd.name, actions=('run',))
evapi.add_dep(ceph_mon.name, ceph_osd.name, actions=('run',))
evapi.add_dep(nbd_loop_device.name, ceph_osd.name, actions=('run',))


if __name__ == '__main__':
deploy()
3 changes: 3 additions & 0 deletions resources/ceph_keys/actions/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ function generate_ssh_keys {
else
echo 'Key $key_path already exists'
fi

### FIXME: Dirty hack to allow scp under vagrant user ###
chmod +r $key_path
}

generate_ssh_keys
6 changes: 4 additions & 2 deletions resources/ceph_mon/actions/run.pp
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
$use_ceph = false
}

class {'firewall': }

if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
Expand Down Expand Up @@ -76,7 +78,7 @@
hasrestart => true,
}

Class['ceph'] ~> Service['cinder-volume']
Class['firewall'] -> Class['ceph'] ~> Service['cinder-volume']
Class['ceph'] ~> Service['cinder-backup']
}

Expand All @@ -89,7 +91,7 @@
hasrestart => true,
}

Class['ceph'] ~> Service['glance-api']
Class['firewall'] -> Class['ceph'] ~> Service['glance-api']
}

}
59 changes: 59 additions & 0 deletions resources/ceph_osd/actions/run.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
notice('MODULAR: ceph-osd.pp')

# Pulling hiera
$storage_hash = hiera('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_neutron = hiera('use_neutron', false)
#$mp_hash = hiera('mp')
$verbose = pick($storage_hash['verbose'], true)
$debug = pick($storage_hash['debug'], hiera('debug', true))
$use_monit = false
$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false)
$keystone_hash = hiera('keystone', {})
$access_hash = hiera('access', {})
$network_scheme = hiera_hash('network_scheme')
$neutron_mellanox = hiera('neutron_mellanox', false)
$syslog_hash = hiera('syslog', {})
$use_syslog = hiera('use_syslog', true)
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config($network_scheme)
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')

class {'firewall': } ->

class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => $storage_hash['objects_ceph'],
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'),
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph'],
}

$osd_devices = split($::osd_devices_list, ' ')
#Class Ceph is already defined so it will do it's thing.
notify {"ceph_osd: ${osd_devices}": }
notify {"osd_devices: ${::osd_devices_list}": }
# TODO(bogdando) add monit ceph-osd services monitoring, if required

#################################################################

# vim: set ts=2 sw=2 et :
38 changes: 38 additions & 0 deletions resources/ceph_osd/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
id: ceph_osd
handler: puppetv2
version: 1.0.0
input:
ip:
schema: str!
value:
public_vip:
schema: str!
value:
management_vip:
schema: str!
value:
use_syslog:
schema: bool
value: true
keystone:
schema: {'admin_token': 'str'}
value: {}
ceph_monitor_nodes:
schema: []
value: []
ceph_primary_monitor_node:
schema: []
value: []
storage:
schema: {}
value: {}
network_scheme:
schema: {}
value: {}
role:
schema: str!
value:
puppet_modules:
schema: str!
value:
tags: []
7 changes: 7 additions & 0 deletions resources/fuel_library/actions/run.sh
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
#!/bin/bash

mkdir -p {{temp_directory}}
use_librarin_simple={{librarian_puppet_simple}}


pushd {{temp_directory}}
if [ ! -d fuel-library ]
Expand All @@ -11,10 +13,15 @@ else
git pull
popd
fi

[ -n $use_librarian_puppet_simple ] && gem install librarian-puppet-simple --no-ri --no-rdoc

pushd ./fuel-library/deployment
./update_modules.sh
popd

[ -n $use_librarian_puppet_simple ] && gem uninstall -x librarian-puppet-simple

mkdir -p {{puppet_modules}}
cp -r ./fuel-library/deployment/puppet/* {{puppet_modules}}
popd
3 changes: 3 additions & 0 deletions resources/fuel_library/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,7 @@ input:
puppet_modules:
schema: str!
value: /etc/fuel/modules
librarian_puppet_simple:
schema: str!
value:
tags: []
2 changes: 1 addition & 1 deletion resources/managed_apt/actions/run.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@
tasks:
- shell: echo 'Managed by solar' > /etc/apt/sources.list
when: {{ensure_other_removed}}
- shell: wget -qO - {{gpg_key}} | sudo apt-key add -
- shell: apt-get update
when: {{ensure_other_removed}}
4 changes: 4 additions & 0 deletions resources/managed_apt/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,7 @@ input:
ensure_other_removed:
schema: bool
value: true
gpg_key:
schema: [str!]
value:

18 changes: 18 additions & 0 deletions resources/nbd_loop_device/actions/remove.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
- hosts: [{{ host }}]
sudo: yes
tasks:
- name: remove nbd-server
apt: name=nbd-server state=absent
- name: remove nbd-client
apt: name=nbd-client state=absent
- name: find loop device
shell: losetup -a|grep "{{path}}"|awk -F':' '{print $1}'
register: loop_device
- name: if loop device exists, delete it
command: sudo losetup -d {% raw %}{{item}}{% endraw %}
when: loop_device|success
with_items: loop_device.stdout_lines
- name: remove file
file: path={{path}} state=absent
- name: kill all nbd processes
shell: "killall nbd-client; killall nbd-server"
52 changes: 52 additions & 0 deletions resources/nbd_loop_device/actions/run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
- hosts: [{{ host }}]
sudo: yes
tasks:
- name: install nbd-server
apt: name=nbd-server state=latest
- name: install nbd-client
apt: name=nbd-client state=latest
- name: prepare a file
command: truncate -s 10G {{path}} creates={{path}}
- name: check if loop for file is already created
shell: losetup -a|grep {{path}}
register: loop_created
ignore_errors: True
- name: if loop is not created, create it
command: losetup -f {{path}}
when: loop_created|failed
- name: find loop device
shell: losetup -a|grep '{{path}}'|awk -F':' '{print $1}'
register: loop_device
- name: check if loop device has gpt
shell: "parted {{ '{{' }} loop_device.stdout }} print | grep 'Partition Table: gpt'"
register: gpt_created
ignore_errors: True
- name: set gpt label
shell: "echo yes | parted {{ '{{' }} loop_device.stdout }} mklabel gpt"
ignore_errors: True
when: gpt_created|failed
- name: check if guid set
shell: "sgdisk -i1 {{ '{{' }} loop_device.stdout }} | grep 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D'"
register: guid_set
ignore_errors: True
- name: set guid
shell: "sgdisk -n 1:0:9G -t 1:4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D -p {{ '{{' }} loop_device.stdout }}"
ignore_errors: True
when: guid_set|failed
- name: Creates directory
file: path=/mnt/dev state=directory owner=nbd
- name: Create symlink to loop device
file: src={{ '{{' }} loop_device.stdout }} dest=/mnt{{ '{{' }} loop_device.stdout }} state=link
- name: is nbd served
shell: "netstat -ln | grep '127.0.0.1:4242 '"
register: nbd_served
ignore_errors: True
- name: configure loop as nbd
shell: nbd-server 127.0.0.1@4242 /mnt{{ '{{' }} loop_device.stdout }} -c
ignore_errors: True
when: nbd_served|failed
- name: Set owner for loop
shell: "chown nbd /mnt{{ '{{' }} loop_device.stdout }}"
- name: connect to nbd
shell: nbd-client 127.0.0.1 4242 /dev/nbd{{id}}
ignore_errors: True
Loading

0 comments on commit 0b5872e

Please sign in to comment.