Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add new container scenario #3308

Merged
merged 23 commits into from Nov 27, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
e7035d2
ceph_key: rework container support
leseb Nov 16, 2018
ba23c67
Add new container scenario
leseb Nov 8, 2018
686edcf
client: do not use a dummy container anymore
leseb Nov 17, 2018
76896a5
ceph_key: use the right container runtime binary
leseb Nov 18, 2018
1b5fdd5
ceph_key: remove set-uid support
leseb Nov 16, 2018
3079fb1
test_build_key_path_bootstrap_osd: fix
leseb Nov 16, 2018
94c2276
test_lookup_ceph_initial_entities: fix
leseb Nov 16, 2018
7c06f25
ceph_key: fix rstrip for python 3
leseb Nov 19, 2018
caacdd2
testinfra: add support for podman
leseb Nov 19, 2018
40a5247
testinfra: linting
leseb Nov 19, 2018
8ca0423
iscsi: expose /dev/log in the container
leseb Nov 19, 2018
bebb746
site: choose the right container runtime binary
leseb Nov 19, 2018
f83dcf2
infra playbooks: use the right container binary
leseb Nov 19, 2018
2b3b43a
rolling_update: update ceph_key task for container
leseb Nov 19, 2018
fba39be
site: symlink site-docker to site-container
leseb Nov 20, 2018
70e00fa
ceph-defaults: use podman on Fedora only
leseb Nov 20, 2018
2cc4e99
defaults: declare container_binary
leseb Nov 20, 2018
a871b27
defaults: play set_radosgw_address.yml only on rgw nodes
guits Nov 22, 2018
aa90e7a
shrink-osd: add missing CEPH_BINARY
leseb Nov 22, 2018
6074a3a
container-common: remove leftover
leseb Nov 26, 2018
c2a9605
fix template generation
leseb Nov 26, 2018
575e8b4
ceph_key: fix after rebase
leseb Nov 27, 2018
0b1aa7e
ceph-osd fix batch with container binary
leseb Nov 27, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions group_vars/all.yml.sample
Expand Up @@ -543,6 +543,8 @@ dummy:
#ceph_docker_enable_centos_extra_repo: false
#ceph_docker_on_openstack: false
#containerized_deployment: False
#container_binary:


############
# KV store #
Expand Down
2 changes: 2 additions & 0 deletions group_vars/rhcs.yml.sample
Expand Up @@ -543,6 +543,8 @@ ceph_docker_registry: "registry.access.redhat.com/rhceph/"
#ceph_docker_enable_centos_extra_repo: false
#ceph_docker_on_openstack: false
#containerized_deployment: False
#container_binary:


############
# KV store #
Expand Down
26 changes: 18 additions & 8 deletions infrastructure-playbooks/rolling_update.yml
Expand Up @@ -177,7 +177,8 @@
- not containerized_deployment

- name: container | waiting for the containerized monitor to join the quorum...
command: docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
register: ceph_health_raw
until: >
hostvars[mon_host]['ansible_hostname'] in (ceph_health_raw.stdout | from_json)["quorum_names"] or
Expand Down Expand Up @@ -214,8 +215,8 @@
when: not containerized_deployment

- name: set containerized osd flags
command: |
docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd set {{ item }}
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }} ceph --cluster {{ cluster }} osd set {{ item }}
with_items:
- noout
- norebalance
Expand All @@ -232,6 +233,10 @@
serial: 1
become: True
tasks:
- import_role:
name: ceph-defaults
private: false

- name: non container - get current fsid
command: "ceph --cluster {{ cluster }} fsid"
register: cluster_uuid_non_container
Expand All @@ -240,7 +245,8 @@
- not containerized_deployment

- name: container - get current fsid
command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} fsid"
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} fsid
register: cluster_uuid_container
delegate_to: "{{ groups[mon_group_name][0] }}"
when:
Expand Down Expand Up @@ -275,7 +281,10 @@
osd: allow *
mds: allow *
cluster: "{{ cluster }}"
containerized: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
environment:
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_UID: "{{ ceph_uid if containerized_deployment else 'ceph' }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
when:
- containerized_deployment
- cephx
Expand Down Expand Up @@ -425,7 +434,7 @@

- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment

Expand Down Expand Up @@ -481,7 +490,7 @@

- name: set_fact docker_exec_cmd_osd
set_fact:
docker_exec_cmd_update_osd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
docker_exec_cmd_update_osd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment

Expand Down Expand Up @@ -834,9 +843,10 @@
- import_role:
name: ceph-defaults
private: false

- name: set_fact docker_exec_cmd_status
set_fact:
docker_exec_cmd_status: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
docker_exec_cmd_status: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when:
- containerized_deployment

Expand Down
4 changes: 2 additions & 2 deletions infrastructure-playbooks/shrink-mon.yml
Expand Up @@ -79,9 +79,9 @@
when:
- item != mon_to_kill

- name: set_fact docker_exec_cmd build docker exec command (containerized)
- name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[mon_host]['ansible_hostname'] }}"
when: containerized_deployment

- name: exit playbook, if can not connect to the cluster
Expand Down
12 changes: 6 additions & 6 deletions infrastructure-playbooks/shrink-osd-ceph-disk.yml
Expand Up @@ -57,9 +57,9 @@
private: false

# post-task for preceding import
- name: set_fact docker_exec_cmd build docker exec command (containerized)
- name: "set_fact docker_exec_cmd build {{ container_binary }} exec command (containerized)"
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment

- name: exit playbook, if can not connect to the cluster
Expand Down Expand Up @@ -102,7 +102,7 @@
# NOTE(leseb): using '>' is the only way I could have the command working
- name: find osd device based on the id
shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | awk -v pattern=osd.{{ item.0 }} '$0 ~ pattern {print $1}'
with_together:
Expand All @@ -115,7 +115,7 @@

- name: find osd dedicated devices - container
shell: >
docker run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ container_binary }} run --privileged=true -v /dev:/dev --entrypoint /usr/sbin/ceph-disk
{{ ceph_docker_registry}}/{{ ceph_docker_image }}:{{ ceph_docker_image_tag }}
list | grep osd.{{ item.0 }} | grep -Eo '/dev/([hsv]d[a-z]{1,2})[0-9]{1,2}|/dev/nvme[0-9]n[0-9]p[0-9]'
with_together:
Expand Down Expand Up @@ -162,7 +162,7 @@

- name: zap ceph osd disks
shell: |
docker run --rm \
{{ container_binary }} run --rm \
--privileged=true \
--name ceph-osd-zap-{{ hostvars[item.1]['ansible_hostname'] }}-{{ item.0.stdout }} \
-v /dev/:/dev/ \
Expand All @@ -184,7 +184,7 @@
pkname=$(lsblk --nodeps -no PKNAME "${osd}");
then
echo zapping ceph osd partitions "${osd}";
docker run --rm \
{{ container_binary }} run --rm \
--privileged=true \
--name ceph-osd-zap-{{ hostvars[item.0]['ansible_hostname'] }}-$(basename "${osd}") \
-v /dev/:/dev/ \
Expand Down
4 changes: 3 additions & 1 deletion infrastructure-playbooks/shrink-osd.yml
Expand Up @@ -63,7 +63,7 @@
# post-task for preceding import
- name: set_fact docker_exec_cmd build docker exec command (containerized)
set_fact:
docker_exec_cmd: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
docker_exec_cmd: "{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }}"
when: containerized_deployment

- name: exit playbook, if can not connect to the cluster
Expand Down Expand Up @@ -91,6 +91,7 @@
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
with_items: "{{ osd_hosts }}"
delegate_to: "{{ item }}"
register: osd_volumes
Expand Down Expand Up @@ -135,6 +136,7 @@
environment:
CEPH_VOLUME_DEBUG: 1
CEPH_CONTAINER_IMAGE: "{{ ceph_docker_registry + '/' + ceph_docker_image + ':' + ceph_docker_image_tag if containerized_deployment else None }}"
CEPH_CONTAINER_BINARY: "{{ container_binary }}"
delegate_to: "{{ item.host }}"
with_items: "{{ osd_host_volumes_to_kill_non_container }}"

Expand Down
Expand Up @@ -26,7 +26,7 @@
when: ireallymeanit != 'yes'


- name: make sure docker is present and started
- name: gather facts

hosts:
- "{{ mon_group_name|default('mons') }}"
Expand Down Expand Up @@ -210,6 +210,10 @@
become: true
tasks:

- import_role:
name: ceph-defaults
private: false

# pre-tasks for following importing
- name: collect running osds and ceph-disk unit(s)
shell: |
Expand Down Expand Up @@ -274,7 +278,7 @@

- name: check if containerized osds are already running
command: >
docker ps --filter='name=ceph-osd'
{{ container_binary }} ps --filter='name=ceph-osd'
changed_when: false
failed_when: false
register: osd_running
Expand Down Expand Up @@ -313,12 +317,14 @@

# post-task for preceding import -
- name: get num_pgs
command: docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster "{{ cluster }}" -s --format json
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json
register: ceph_pgs
delegate_to: "{{ groups[mon_group_name][0] }}"

- name: container - waiting for clean pgs...
command: "docker exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json"
command: >
{{ container_binary }} exec ceph-mon-{{ hostvars[groups[mon_group_name][0]]['ansible_hostname'] }} ceph --cluster {{ cluster }} -s --format json
register: ceph_health_post
until: >
(((ceph_health_post.stdout | from_json).pgmap.pgs_by_state | length) > 0)
Expand Down