Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ceph mon and osd #292

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions examples/library_ceph/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@ Current example will do following things:
- fetch fuel-library from github
- use ./update_modules.sh to fetch librarian dependencies
- generate ceph keys on a solar-dev1
- install ceph-mon on solar-dev1 (INPROGRESS)
- install ceph-osd on solar-dev2 (TODO)
- install ceph-mon on solar-dev1
- install ceph-osd on solar-dev2
- imlement removal mechanism for ceph-mon/ceph-osd (TODO)

- configure 10GB hdd for work with ceph-osd (disk should be added manually)

To use it:

```
python exaples/library_ceph/ceph.py
python examples/library_ceph/ceph.py
solar ch stage && solar ch process
solar or run-once last -w 120
```
Expand Down
61 changes: 52 additions & 9 deletions examples/library_ceph/ceph.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@

from solar import events as evapi
from solar.core.resource import virtual_resource as vr
from solar.interfaces.db import get_db

Expand Down Expand Up @@ -36,40 +36,83 @@
def deploy():
db.clear()
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2})
first_node, second_node = [x for x in resources if x.name.startswith('node')]
first_node, second_node = [x for x in resources if x.name.startswith('solar-dev')]
first_transp = next(x for x in resources if x.name.startswith('transport'))

host1, host2 = [x for x in resources if x.name.startswith('hosts_file')]

library = vr.create('library1', 'resources/fuel_library', {})[0]
first_node.connect(library)
library1 = vr.create('library1', 'resources/fuel_library', {})[0]
library2 = vr.create('library2', 'resources/fuel_library',
{'temp_directory': '/tmp/solar',
'puppet_modules': '/etc/fuel/modules',
'git':{'branch': 'master', 'repository': 'https://github.com/stackforge/fuel-library'},
'librarian_puppet_simple': 'true'})[0]

keys = vr.create('ceph_key', 'resources/ceph_keys', {})[0]
first_node.connect(keys)

remote_file = vr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})

remote_file = vr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})

ceph_disk = vr.create('ceph_disk1', 'resources/ceph_disk',
{'disk_name': '/dev/vdb'})[0]

second_node.connect(ceph_disk, {})

ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'controller',
'role': 'primary-controller',
})[0]

ceph_osd = vr.create('ceph_osd2', 'resources/ceph_osd',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'ceph-osd',
})[0]

managed_apt = vr.create(
managed_apt1 = vr.create(
'managed_apt1', 'templates/mos_repos.yaml',
{'node': first_node.name, 'index': 0})[-1]

keys.connect(ceph_mon, {})
managed_apt2 = vr.create(
'managed_apt2', 'templates/mos_repos.yaml',
{'node': second_node.name, 'index': 1})[-1]

first_node.connect(library1, {})
second_node.connect(library2, {})

first_node.connect(ceph_mon,
{'ip': ['ip', 'public_vip', 'management_vip']})
library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
managed_apt.connect(ceph_mon, {})
second_node.connect(ceph_osd,
{'ip': ['ip', 'public_vip', 'management_vip']})
library1.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
library2.connect(ceph_osd, {'puppet_modules': 'puppet_modules'})

evapi.add_dep(second_node.name, ceph_osd.name, actions=('run',))
evapi.add_dep(first_node.name, ceph_mon.name, actions=('run',))
evapi.add_dep(keys.name, ceph_mon.name, actions=('run',))
evapi.add_dep(remote_file.name, ceph_osd.name, actions=('run',))
evapi.add_dep(managed_apt1.name, ceph_mon.name, actions=('run',))
evapi.add_dep(managed_apt2.name, ceph_osd.name, actions=('run',))
evapi.add_dep(ceph_mon.name, ceph_osd.name, actions=('run',))
evapi.add_dep(ceph_disk.name, ceph_osd.name, actions=('run',))


if __name__ == '__main__':
deploy()
5 changes: 5 additions & 0 deletions resources/ceph_disk/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
# Prepare disk for ceph osd deployment

Takes only one parameter as disk name and configure it for osd deploy:
- make label
- set guid
19 changes: 19 additions & 0 deletions resources/ceph_disk/actions/run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
- hosts: [{{ host }}]
sudo: yes
tasks:
- name: check if disk has gpt
shell: "parted {{disk_name}} print | grep 'Partition Table: gpt'"
register: gpt_created
ignore_errors: True
- name: set gpt label
shell: "echo yes | parted {{disk_name}} mklabel gpt"
ignore_errors: True
when: gpt_created|failed
- name: check if guid set
shell: "sgdisk -i1 {{disk_name}} | grep 'Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D'"
register: guid_set
ignore_errors: True
- name: set guid
shell: "sgdisk -n 1:0:9G -t 1:4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D -p {{disk_name}}"
ignore_errors: True
when: guid_set|failed
7 changes: 7 additions & 0 deletions resources/ceph_disk/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
id: ceph_disk
handler: ansible
version: 1.0.0
input:
disk_name:
schema: str
value: '/dev/vdb'
9 changes: 9 additions & 0 deletions resources/ceph_keys/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Prepare ceph keys

This resource prepare ceph keys for future ceph cluster deployment.

Takes 3 parameters:
- target_directory, default - '/var/lib/astute/'
- key_name, default - 'ceph'
- path, default - '/var/lib/astute/ceph/'

3 changes: 3 additions & 0 deletions resources/ceph_keys/actions/run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ function generate_ssh_keys {
else
echo 'Key $key_path already exists'
fi

### FIXME: Dirty hack to allow scp under vagrant user ###
chmod +r $key_path
}

generate_ssh_keys
42 changes: 42 additions & 0 deletions resources/ceph_mon/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Deploy ceph mon

This resource deploy ceph mon.

Example:

```
STORAGE = {'objects_ceph': True,
'osd_pool_size': 2,
'pg_num': 128}

KEYSTONE = {'admin_token': 'abcde'}


NETWORK_SCHEMA = {
'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}},
'roles': {'ceph/replication': 'eth1',
'ceph/public': 'eth1'}
}

NETWORK_METADATA = yaml.load("""
solar-dev1:
uid: '1'
fqdn: solar-dev1
network_roles:
ceph/public: 10.0.0.3
ceph/replication: 10.0.0.3
node_roles:
- ceph-mon
name: solar-dev1

""")

ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'primary-controller',
})[0]
```
8 changes: 4 additions & 4 deletions resources/ceph_mon/actions/run.pp
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
notice('MODULAR: ceph/mon.pp')


$storage_hash = hiera('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
Expand All @@ -27,6 +26,8 @@
$use_ceph = false
}

class {'firewall': }

if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
Expand Down Expand Up @@ -76,7 +77,7 @@
hasrestart => true,
}

Class['ceph'] ~> Service['cinder-volume']
Class['firewall'] -> Class['ceph'] ~> Service['cinder-volume']
Class['ceph'] ~> Service['cinder-backup']
}

Expand All @@ -89,7 +90,6 @@
hasrestart => true,
}

Class['ceph'] ~> Service['glance-api']
Class['firewall'] -> Class['ceph'] ~> Service['glance-api']
}

}
3 changes: 3 additions & 0 deletions resources/ceph_mon/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
id: ceph_mon
handler: puppetv2
version: 1.0.0
actions:
run: run.pp
update: run.pp
input:
ip:
schema: str!
Expand Down
42 changes: 42 additions & 0 deletions resources/ceph_osd/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Deploy ceph osd

This resource deploy ceph osd with preprepaired disk drives.

Example:

```
STORAGE = {'objects_ceph': True,
'osd_pool_size': 2,
'pg_num': 128}

KEYSTONE = {'admin_token': 'abcde'}


NETWORK_SCHEMA = {
'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}},
'roles': {'ceph/replication': 'eth1',
'ceph/public': 'eth1'}
}

NETWORK_METADATA = yaml.load("""
solar-dev1:
uid: '1'
fqdn: solar-dev1
network_roles:
ceph/public: 10.0.0.3
ceph/replication: 10.0.0.3
node_roles:
- ceph-mon
name: solar-dev1

""")

ceph_osd = vr.create('ceph_osd2', 'resources/ceph_osd',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'ceph-osd',
})[0]
```
59 changes: 59 additions & 0 deletions resources/ceph_osd/actions/run.pp
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
notice('MODULAR: ceph-osd.pp')

# Pulling hiera
$storage_hash = hiera('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_neutron = hiera('use_neutron', false)
#$mp_hash = hiera('mp')
$verbose = pick($storage_hash['verbose'], true)
$debug = pick($storage_hash['debug'], hiera('debug', true))
$use_monit = false
$auto_assign_floating_ip = hiera('auto_assign_floating_ip', false)
$keystone_hash = hiera('keystone', {})
$access_hash = hiera('access', {})
$network_scheme = hiera_hash('network_scheme')
$neutron_mellanox = hiera('neutron_mellanox', false)
$syslog_hash = hiera('syslog', {})
$use_syslog = hiera('use_syslog', true)
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config($network_scheme)
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')

class {'firewall': } ->

class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => $storage_hash['objects_ceph'],
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'),
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph'],
}

$osd_devices = split($::osd_devices_list, ' ')
#Class Ceph is already defined so it will do it's thing.
notify {"ceph_osd: ${osd_devices}": }
notify {"osd_devices: ${::osd_devices_list}": }
# TODO(bogdando) add monit ceph-osd services monitoring, if required

#################################################################

# vim: set ts=2 sw=2 et :
Loading