Skip to content

Commit

Permalink
ceph-rgw: allow specifying crush rule on pool
Browse files Browse the repository at this point in the history
We already support specifiying a custom crush rule during pool creation
in ceph-osd role but not in ceph-rgw role.
This patch adds the missing code to implement this feature.
Note this is only available for replicated pool not erasure. The rule
must also exist prior the pool creation.

Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1855439

Signed-off-by: Dimitri Savineau <dsavinea@redhat.com>
(cherry picked from commit cb8f023)
  • Loading branch information
dsavineau authored and guits committed Aug 17, 2020
1 parent 8ebe813 commit e9c6028
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 1 deletion.
4 changes: 4 additions & 0 deletions group_vars/rgws.yml.sample
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ dummy:
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
# - The rule_name key can be used with a specific crush rule value (must exist).
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.

#rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
Expand All @@ -74,6 +77,7 @@ dummy:
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo


##########
Expand Down
4 changes: 4 additions & 0 deletions roles/ceph-rgw/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ copy_admin_key: false
# each pool.
# - If a pool's type is 'ec', k and m values must be set via
# the ec_k, and ec_m variables.
# - The rule_name key can be used with a specific crush rule value (must exist).
# If the key doesn't exist it falls back to the default replicated_rule.
# This only works for replicated pool type not erasure.

#rgw_create_pools:
# "{{ rgw_zone }}.rgw.buckets.data":
Expand All @@ -66,6 +69,7 @@ copy_admin_key: false
# pg_num: 8
# size: 3
# type: replicated
# rule_name: foo


##########
Expand Down
15 changes: 14 additions & 1 deletion roles/ceph-rgw/tasks/rgw_create_pools.yml
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
- item.value.type == 'ec'

- name: create replicated pools for rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated"
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool create {{ item.key }} {{ item.value.pg_num | default(osd_pool_default_pg_num) }} replicated {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
changed_when: false
register: result
retries: 60
Expand All @@ -60,6 +60,19 @@
- item.value.type is not defined or item.value.type == 'replicated'
- item.value.size | default(osd_pool_default_size) != ceph_osd_pool_default_size

- name: customize replicated pool crush_rule
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool set {{ item.key }} crush_rule {{ item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name) }}"
register: result
retries: 60
delay: 3
until: result is succeeded
loop: "{{ rgw_create_pools | dict2items }}"
delegate_to: "{{ groups[mon_group_name][0] }}"
changed_when: false
when:
- item.value.type is not defined or item.value.type == 'replicated'
- item.value.rule_name | default(ceph_osd_pool_default_crush_rule_name)

- name: set the rgw_create_pools pools application to rgw
command: "{{ container_exec_cmd }} ceph --connect-timeout 10 --cluster {{ cluster }} osd pool application enable {{ item.key }} rgw"
register: result
Expand Down

0 comments on commit e9c6028

Please sign in to comment.