Skip to content

Commit

Permalink
ceph_pool: refact module
Browse files Browse the repository at this point in the history
remove complexity about current defaults in running cluster

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
(cherry picked from commit 29fc115)
  • Loading branch information
guits committed Oct 2, 2020
1 parent 7fc7893 commit cb44f65
Show file tree
Hide file tree
Showing 7 changed files with 97 additions and 102 deletions.
168 changes: 79 additions & 89 deletions library/ceph_pool.py
Expand Up @@ -51,8 +51,8 @@
If 'present' is used, the module creates a pool if it doesn't exist
or update it if it already exists.
If 'absent' is used, the module will simply delete the pool.
If 'list' is used, the module will return all details about the existing pools
(json formatted).
If 'list' is used, the module will return all details about the
existing pools. (json formatted).
required: false
choices: ['present', 'absent', 'list']
default: present
Expand Down Expand Up @@ -247,27 +247,7 @@ def generate_get_config_cmd(param, cluster, user, user_key, container_image=None
return cmd


def get_default_running_config(module, cluster, user, user_key, output_format='json', container_image=None): # noqa E501
'''
Get some default values set in the cluster
'''

params = ['osd_pool_default_size', 'osd_pool_default_min_size', 'osd_pool_default_pg_num', 'osd_pool_default_pgp_num'] # noqa E501

default_running_values = {}

for param in params:
rc, cmd, out, err = exec_commands(module, generate_get_config_cmd(param, cluster, user, user_key, container_image=container_image)) # noqa E501

if rc == 0:
default_running_values[param] = out.strip()
else:
return rc, cmd, out, err

return rc, cmd, default_running_values, err


def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None): # noqa E501
def get_application_pool(cluster, name, user, user_key, output_format='json', container_image=None):
'''
Get application type enabled on a given pool
'''
Expand Down Expand Up @@ -319,6 +299,23 @@ def get_pool_details(module, cluster, name, user, user_key, output_format='json'

_rc, _cmd, application_pool, _err = exec_commands(module, get_application_pool(cluster, name, user, user_key, container_image=container_image)) # noqa E501

# This is a trick because "target_size_ratio" isn't present at the same level in the dict
# ie:
# {
# 'pg_num': 8,
# 'pgp_num': 8,
# 'pg_autoscale_mode': 'on',
# 'options': {
# 'target_size_ratio': 0.1
# }
# }
# If 'target_size_ratio' is present in 'options', we set it, this way we end up
# with a dict containing all needed keys at the same level.
if 'target_size_ratio' in out['options'].keys():
out['target_size_ratio'] = out['options']['target_size_ratio']
else:
out['target_size_ratio'] = None

application = list(json.loads(application_pool.strip()).keys())

if len(application) == 0:
Expand All @@ -335,15 +332,12 @@ def compare_pool_config(user_pool_config, running_pool_details):
'''

delta = {}
filter_keys = ['pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode']
filter_keys = [ 'pg_num', 'pg_placement_num', 'size', 'pg_autoscale_mode', 'target_size_ratio']
for key in filter_keys:
if str(running_pool_details[key]) != user_pool_config[key]['value']:
if str(running_pool_details[key]) != user_pool_config[key]['value'] and user_pool_config[key]['value']:
delta[key] = user_pool_config[key]

if str(running_pool_details['options'].get('target_size_ratio')) != user_pool_config['target_size_ratio']['value'] and user_pool_config['target_size_ratio']['value'] is not None: # noqa E501
delta['target_size_ratio'] = user_pool_config['target_size_ratio']

if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] is not None: # noqa E501
if running_pool_details['application'] != user_pool_config['application']['value'] and user_pool_config['application']['value'] != None:
delta['application'] = {}
delta['application']['new_application'] = user_pool_config['application']['value'] # noqa E501
# to be improved (for update_pools()...)
Expand Down Expand Up @@ -375,10 +369,16 @@ def create_pool(cluster, name, user, user_key, user_pool_config, container_image
Create a new pool
'''

args = ['create', user_pool_config['pool_name']['value'], '--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value'], user_pool_config['type']['value']] # noqa E501
args = [ 'create', user_pool_config['pool_name']['value'], user_pool_config['type']['value'] ]

if user_pool_config['pg_autoscale_mode']['value'] != 'on':
args.extend(['--pg_num', user_pool_config['pg_num']['value'], '--pgp_num', user_pool_config['pgp_num']['value']])

if user_pool_config['type']['value'] == 'replicated':
args.extend([user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--size', user_pool_config['size']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value']]) # noqa E501
args.extend([ user_pool_config['crush_rule']['value'], '--expected_num_objects', user_pool_config['expected_num_objects']['value'], '--autoscale-mode', user_pool_config['pg_autoscale_mode']['value'] ])

if user_pool_config['size']['value'] and user_pool_config['type']['value'] == "replicated":
args.extend(['--size', user_pool_config['size']['value']])

elif user_pool_config['type']['value'] == 'erasure':
args.extend([user_pool_config['erasure_profile']['value']])
Expand Down Expand Up @@ -462,8 +462,8 @@ def run_module():
details=dict(type='bool', required=False, default=False),
size=dict(type='str', required=False),
min_size=dict(type='str', required=False),
pg_num=dict(type='str', required=False, default=None),
pgp_num=dict(type='str', required=False, default=None),
pg_num=dict(type='str', required=False),
pgp_num=dict(type='str', required=False),
pg_autoscale_mode=dict(type='str', required=False, default='on'),
target_size_ratio=dict(type='str', required=False, default=None),
pool_type=dict(type='str', required=False, default='replicated', choices=['replicated', 'erasure', '1', '3']), # noqa E501
Expand All @@ -475,16 +475,18 @@ def run_module():

module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True,
supports_check_mode=True
)

# Gather module parameters in variables
cluster = module.params.get('cluster')
name = module.params.get('name')
state = module.params.get('state')
details = module.params.get('details')
pg_num = module.params.get('pg')
pgp_num = module.params.get('pgp')
size = module.params.get('size')
min_size = module.params.get('min_size')
pg_num = module.params.get('pg_num')
pgp_num = module.params.get('pgp_num')
pg_autoscale_mode = module.params.get('pg_autoscale_mode')
target_size_ratio = module.params.get('target_size_ratio')
application = module.params.get('application')
Expand Down Expand Up @@ -512,16 +514,18 @@ def run_module():
expected_num_objects = module.params.get('expected_num_objects')

user_pool_config = {
'pool_name': {'value': name},
'pg_num': {'value': pg_num, 'cli_set_opt': 'pg_num'},
'pgp_num': {'value': pgp_num, 'cli_set_opt': 'pgp_num'},
'pg_autoscale_mode': {'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode'}, # noqa E501
'target_size_ratio': {'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio'}, # noqa E501
'application': {'value': application},
'type': {'value': pool_type},
'erasure_profile': {'value': erasure_profile},
'crush_rule': {'value': rule_name, 'cli_set_opt': 'crush_rule'},
'expected_num_objects': {'value': expected_num_objects}
'pool_name': { 'value': name },
'pg_num': { 'value': pg_num, 'cli_set_opt': 'pg_num' },
'pgp_num': { 'value': pgp_num, 'cli_set_opt': 'pgp_num' },
'pg_autoscale_mode': { 'value': pg_autoscale_mode, 'cli_set_opt': 'pg_autoscale_mode' },
'target_size_ratio': { 'value': target_size_ratio, 'cli_set_opt': 'target_size_ratio' },
'application': {'value': application },
'type': { 'value': pool_type },
'erasure_profile': { 'value': erasure_profile },
'crush_rule': { 'value': rule_name, 'cli_set_opt': 'crush_rule' },
'expected_num_objects': { 'value': expected_num_objects },
'size': { 'value': size },
'min_size': { 'value': min_size, 'cli_set_opt': 'size' }
}

if module.check_mode:
Expand All @@ -545,65 +549,51 @@ def run_module():
keyring_filename = cluster + '.' + user + '.keyring'
user_key = os.path.join("/etc/ceph/", keyring_filename)

def_opt = {
'size': {
'conf_name': 'osd_pool_default_size',
'cli_set_opt': 'size'
},
'min_size': {
'conf_name': 'osd_pool_default_min_size',
'cli_set_opt': 'min_size'
},
'pg_num': {
'conf_name': 'osd_pool_default_pg_num',
'cli_set_opt': 'pg_num'
},
'pgp_num': {
'conf_name': 'osd_pool_default_pgp_num',
'cli_set_opt': 'pgp_num'
}
}

if state == "present":
rc, cmd, default_running_ceph_config, err = get_default_running_config(module, cluster, user, user_key, container_image=container_image) # noqa E501
rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))
if rc == 0:
for k, v in def_opt.items():
if module.params[k] is None:
user_pool_config[k] = {'value': default_running_ceph_config[v['conf_name']], 'cli_set_opt': v['cli_set_opt']} # noqa E501
else:
user_pool_config[k] = {'value': module.params.get(k), 'cli_set_opt': v['cli_set_opt']} # noqa E501
rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image)) # noqa E501
if rc == 0:
running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image) # noqa E501
user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' } # noqa E501
delta = compare_pool_config(user_pool_config, running_pool_details[2]) # noqa E501
if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys(): # noqa E501
rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image) # noqa E501
if rc == 0:
changed = True
else:
out = "Pool {} already exists and there is nothing to update.".format(name) # noqa E501
running_pool_details = get_pool_details(module, cluster, name, user, user_key, container_image=container_image)
user_pool_config['pg_placement_num'] = { 'value': str(running_pool_details[2]['pg_placement_num']), 'cli_set_opt': 'pgp_num' }
delta = compare_pool_config(user_pool_config, running_pool_details[2])
if len(delta) > 0 and running_pool_details[2]['erasure_code_profile'] == "" and 'size' not in delta.keys():
rc, cmd, out, err = update_pool(module, cluster, name, user, user_key, delta, container_image=container_image)
if rc == 0:
changed = True
else:
rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image)) # noqa E501
if user_pool_config['application']['value'] is not None:
_rc, _cmd, _out, _err = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image)) # noqa E501
changed = True
out = "Pool {} already exists and there is nothing to update.".format(name)
else:
rc, cmd, out, err = exec_commands(module, create_pool(cluster, name, user, user_key, user_pool_config=user_pool_config, container_image=container_image))
if user_pool_config['application']['value'] != None:
rc, _, _, _ = exec_commands(module, enable_application_pool(cluster, name, user_pool_config['application']['value'], user, user_key, container_image=container_image))
if user_pool_config['min_size']['value'] != None:
pass
changed = True

elif state == "list":
rc, cmd, out, err = exec_commands(module, list_pools(cluster, name, user, user_key, details, container_image=container_image)) # noqa E501
if rc != 0:
out = "Couldn't list pool(s) present on the cluster"

elif state == "absent":
rc, cmd, out, err = exec_commands(module, check_pool_exist(cluster, name, user, user_key, container_image=container_image))
rc, cmd, out, err = exec_commands(module,
check_pool_exist(cluster,
name, user,
user_key, container_image=container_image))
if rc == 0:
rc, cmd, out, err = exec_commands(module, remove_pool(cluster, name, user, user_key, container_image=container_image))
rc, cmd, out, err = exec_commands(module,
remove_pool(cluster,
name,
user,
user_key,
container_image=
container_image))
changed = True
else:
rc = 0
out = "Skipped, since pool {} doesn't exist".format(name)

exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd, changed=changed) # noqa E501
exit_module(module=module, out=out, rc=rc, cmd=cmd, err=err, startd=startd,
changed=changed)


def main():
Expand Down
4 changes: 2 additions & 2 deletions roles/ceph-client/tasks/create_users_keys.yml
Expand Up @@ -76,8 +76,8 @@
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
pg_num: "{{ item.pg_num | default(omit) }}"
pgp_num: "{{ item.pgp_num | default(omit) }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
Expand Down
3 changes: 1 addition & 2 deletions roles/ceph-iscsi-gw/tasks/common.yml
Expand Up @@ -56,8 +56,7 @@
ceph_pool:
name: "{{ iscsi_pool_name }}"
cluster: "{{ cluster }}"
pg_num: "{{ osd_pool_default_pg_num }}"
size: "{{ iscsi_pool_size | default(osd_pool_default_size) }}"
size: "{{ iscsi_pool_size | default(omit) }}"
application: "rbd"
run_once: true
delegate_to: "{{ groups[mon_group_name][0] }}"
Expand Down
4 changes: 2 additions & 2 deletions roles/ceph-mds/tasks/create_mds_filesystems.yml
Expand Up @@ -7,8 +7,8 @@
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
pg_num: "{{ item.pg_num | default(omit) }}"
pgp_num: "{{ item.pgp_num | default(omit) }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
Expand Down
4 changes: 2 additions & 2 deletions roles/ceph-osd/tasks/openstack_config.yml
Expand Up @@ -5,8 +5,8 @@
ceph_pool:
name: "{{ item.name }}"
cluster: "{{ cluster }}"
pg_num: "{{ item.pg_num | default(osd_pool_default_pg_num) if not item.0.pg_autoscale_mode | default(False) | bool else 16 }}"
pgp_num: "{{ item.pgp_num | default(item.pg_num) | default(osd_pool_default_pg_num) if not item.pg_autoscale_mode | default(False) | bool else omit }}"
pg_num: "{{ item.pg_num | default(omit) }}"
pgp_num: "{{ item.pgp_num | default(omit) }}"
size: "{{ item.size | default(omit) }}"
min_size: "{{ item.min_size | default(omit) }}"
pool_type: "{{ item.type | default('replicated') }}"
Expand Down
12 changes: 7 additions & 5 deletions roles/ceph-rgw/tasks/rgw_create_pools.yml
Expand Up @@ -32,8 +32,9 @@
name: "{{ item.key }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
pgp_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
pg_num: "{{ item.value.pg_num | default(omit) }}"
pgp_num: "{{ item.value.pgp_num | default(omit) }}"
size: "{{ item.value.size | default(omit) }}"
pool_type: erasure
erasure_profile: "{{ item.value.ec_profile }}"
application: rgw
Expand All @@ -51,9 +52,10 @@
name: "{{ item.key }}"
state: present
cluster: "{{ cluster }}"
pg_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
pgp_num: "{{ item.value.pg_num | default(osd_pool_default_pg_num) }}"
size: "{{ item.value.size | default(osd_pool_default_size) }}"
pg_num: "{{ item.value.pg_num | default(omit) }}"
pgp_num: "{{ item.value.pgp_num | default(omit) }}"
size: "{{ item.value.size | default(omit) }}"
min_size: "{{ item.value.min_size | default(omit) }}"
pool_type: replicated
rule_name: "{{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
application: rgw
Expand Down
4 changes: 4 additions & 0 deletions tests/library/test_ceph_pool.py
Expand Up @@ -81,6 +81,10 @@ def setup_method(self):
'expected_num_objects': 0,
'fast_read': False,
'options': {},
# 'target_size_ratio' is a key present in the dict above
# 'options': {}
# see comment in get_pool_details() for more details
'target_size_ratio': None,
'application_metadata': {
'rbd': {}
},
Expand Down

0 comments on commit cb44f65

Please sign in to comment.