This repository has been archived by the owner on Feb 29, 2024. It is now read-only.
/
main.yml
461 lines (413 loc) · 16.1 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
# We're going to try putting files in `local_working_dir`, so make
# sure it exists first.
- name: Ensure local working dir exists
delegate_to: localhost
file:
path: "{{ local_working_dir }}"
state: directory
# Generate MAC addresses for the undercloud node.
- name: get MACs for the undercloud
generate_macs:
nodes:
- "{{ undercloud_node }}"
networks: "{{ networks }}"
register: undercloud_mac_map
# Check if the undercloud volume exists. If not, we call out to
# [fetch_image.yml](fetch_image.yml.html) to download the image.
- name: Check if undercloud volume exists
command: >
virsh vol-info --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
ignore_errors: true
changed_when: false
register: undercloud_vol_check
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- when: undercloud_vol_check is failed
environment:
LIBGUESTFS_BACKEND: direct
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Conditionally include a playbook for all the images specified
# in options that downloads, cache and extract if tar archived
# only if the images aren't already in volume pool
- name: Fetch the images
include_role:
name: fetch-images
# Conditionally include a playbook for all the images specified
# in options that updates images with the repos provided via the
# release config.
- include_tasks: inject_repos.yml
when: update_images|bool or devmode|bool
# inject the gating repo generated by ansible-role-tripleo-gate
- include_tasks: inject_gating_repo.yml
when: compressed_gating_repo is defined and compressed_gating_repo
# Converts an overcloud-full.qcow2 into a undercloud.qcow2
- include_tasks: convert_image.yml
when: overcloud_as_undercloud|bool or baseos_as_undercloud|bool
# Update images after we have converted the overcloud-full to an
# undercloud image when using devmode. This also clones tripleo-ci
# on the undercloud image.
- include_tasks: update_image.yml
when: devmode|bool
# Inject updated overcloud and ipa images into our converted undercloud
# image
- name: Inject additional images
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/{{ item }}:/home/{{ undercloud_user }}/{{ item }}
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/{{ item }}'
changed_when: true
with_items: "{{ inject_images | default('') }}"
when:
- overcloud_as_undercloud|bool or use_external_images|bool
- inject_images|length > 0
# This copies the `instackenv.json` configuration file that we
# generated in the overcloud setup role to the undercloud host.
- name: Copy instackenv.json to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload {{ working_dir }}/instackenv.json:/home/{{ undercloud_user }}/instackenv.json
--run-command 'chown {{ undercloud_user }}:{{ undercloud_user }} /home/{{ undercloud_user }}/instackenv.json'
when: inject_instackenv|bool
# Copy the undercloud public key to the virthost, because we're going
# to inject it into the undercloud image in the next task.
- name: Copy undercloud ssh public key to working dir
copy:
src: "{{ undercloud_key }}.pub"
dest: "{{ working_dir }}/id_rsa_undercloud.pub"
# Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
# VirtualBMC be able to access the hypervisor where the VMs are located
- name: Copy virt host ssh private key to working dir
when: release not in ['newton']
copy:
src: "{{ virt_power_key }}"
dest: "{{ working_dir }}/id_rsa_virt_power"
# When using qemu:///system, the vbmc will need to ssh back to the virthost
# as the root user to perform power operations
- name: Add virt power key to root authorized keys if using qemu:///system
authorized_key:
user: root
key: "{{ lookup('file', virt_power_key|quote + '.pub')|default('') }}"
when: libvirt_uri == "qemu:///system"
become: true
# Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
# and `undercloud_user` user on the undercloud.
- name: Inject undercloud ssh public key to appliance
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--mkdir {{ item.homedir }}/.ssh/
--upload '{{ working_dir }}/id_rsa_undercloud.pub:{{ item.homedir }}/.ssh/authorized_keys'
--run-command 'chown -R {{ item.owner }}:{{ item.group }} {{ item.homedir }}/.ssh'
--run-command 'chmod 0700 {{ item.homedir }}/.ssh'
--run-command 'chmod 0600 {{ item.homedir }}/.ssh/authorized_keys'
with_items:
- homedir: /root
owner: root
group: root
- homedir: '/home/{{ undercloud_user }}'
owner: '{{ undercloud_user }}'
group: '{{ undercloud_user }}'
# This copies the `id_rsa_virt_power` private key that we generated
# in the overcloud setup role to the undercloud host to be used by
# VirtualBMC+libvirt to access the virthost.
- name: Copy id_rsa_virt_power to appliance
when: release not in ['newton']
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
--run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
--run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
- name: Create undercloud customize script
template:
src: "{{ undercloud_customize_script }}"
dest: "{{ working_dir }}/undercloud-customize.sh"
mode: 0755
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# undercloud image, to cover any extra needs.
- name: Perform extra undercloud customizations
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--run '{{ working_dir }}/undercloud-customize.sh'
when: undercloud_customize_script is defined
# This allows to run a customization script on the
# overcloud image, to cover any extra needs.
- name: Perform extra overcloud customizations
include_tasks: customize_overcloud.yml
when: overcloud_customize_script is defined
# Perform an SELinux relabel on the undercloud image to avoid problems
# caused by bad labelling, since by default the undercloud runs in
# enforcing mode.
- name: Perform selinux relabel on undercloud image
command: >
virt-customize -a {{ working_dir }}/undercloud.qcow2
--selinux-relabel
# NOTE(trown) Nested blocks do not seem to work as expected so instead using
# conditionals with AND to simulate the same thing.
# Resize the undercloud image if it was not converted from an overcloud
# image
- when:
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
block:
- name: >
Determine if the undercloud image is a whole disk image
so we can resize it appropriately
command: >
virt-filesystems -a {{ working_dir }}/undercloud.qcow2
environment:
LIBGUESTFS_BACKEND: direct
register: undercloud_partitions
- when:
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda1'
block:
# Handle the resize for the whole disk image case
- name: Resize undercloud image (create target image)
command: >
qemu-img create -f qcow2 -o preallocation=off
'{{ working_dir }}/undercloud-resized.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (call virt-resize)
command: >
virt-resize --expand /dev/sda1
'{{ working_dir }}/undercloud.qcow2'
'{{ working_dir }}/undercloud-resized.qcow2'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Rename resized image to original name
command: >
mv -f '{{ working_dir }}/undercloud-resized.qcow2'
'{{ working_dir }}/undercloud.qcow2'
- when:
- undercloud_vol_check is failed
- not overcloud_as_undercloud|bool
- undercloud_partitions.stdout=='/dev/sda'
block:
# Handle the resize for the partition image case
- name: Resize undercloud image (expand the image)
command: >
qemu-img resize
'{{ working_dir }}/undercloud.qcow2'
'{{ flavors[undercloud_node.flavor].disk }}G'
- name: Resize undercloud image (expand the FS)
command: >
virt-customize -a '{{ working_dir }}/undercloud.qcow2'
--run-command 'FS_TYPE=`findmnt -o FSTYPE -fn /`;
if [ "$FS_TYPE" = "xfs" ]; then xfs_growfs /;
elif [ "$FS_TYPE" = "ext4" ]; then resize2fs /dev/sda;
else echo "ERROR: Unknown filesystem $FSTYPE, cannot resize.";
exit 1; fi'
environment:
LIBGUESTFS_BACKEND: direct
LIBGUESTFS_DEBUG: 1
LIBGUESTFS_TRACE: 1
- name: Set libvirt environment when using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
LIBGUESTFS_BACKEND: "direct"
cacheable: true
when: ssh_user == "root"
- name: Set libvirt environment when not using root to run tasks
set_fact:
libvirt_environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
cacheable: true
when: ssh_user != "root"
# NOTE(trown) We use the overcloud-full initramfs and kernel as DIB
# seems a bit smarter about extracting them than virt-get-kernel and
# the partition image is simply a converted overcloud-full
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-copy-out -a '{{ working_dir }}/undercloud.qcow2'
'/home/{{ undercloud_user }}/overcloud-full.vmlinuz'
'/home/{{ undercloud_user }}/overcloud-full.initrd'
'{{ working_dir }}'
environment: "{{ libvirt_environment }}"
when: not undercloud_use_custom_boot_images|bool
- when:
- not undercloud_use_custom_boot_images|bool
- not overcloud_as_undercloud|bool
block:
# NOTE(ykarel) This is required to get the undercloud specific
# kernel when not using overcloud_as_undercloud.
- name: Extract the kernel and initramfs from the undercloud image
command: >
virt-get-kernel -a '{{ working_dir }}/undercloud.qcow2' --unversioned-names
--output '{{ working_dir }}'
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.vmlinuz. We can update the devmode case
# to not require this step
- name: rename undercloud kernel
command: >
mv '{{ working_dir }}/vmlinuz'
'{{ working_dir }}/overcloud-full.vmlinuz'
# NOTE(trown) The undercloudvm template expects this to be
# named overcloud-full.initrd. We can update the devmode case
# to not require this step
- name: rename undercloud initramfs
command: >
mv '{{ working_dir }}/initramfs'
'{{ working_dir }}/overcloud-full.initrd'
# NOTE(trown): This is a bit of a hack to get the undercloud vm
# template to use the external kernel and initrd. We should
# instead use a different var for this and set it in the devmode
# case as well.
- name: Set overcloud_as_undercloud to true
set_fact:
overcloud_as_undercloud: true
cacheable: true
- when: undercloud_vol_check is failed
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
block:
# Create a libvirt volume and upload the undercloud image to
# libvirt.
- name: Create undercloud volume
command: >
virsh vol-create-as {{ libvirt_volume_pool }}
{{ undercloud_node.name }}.qcow2
{{ flavors[undercloud_node.flavor].disk }}G --format qcow2
- name: Upload undercloud volume to storage pool
command: >
virsh -k 0 vol-upload --pool '{{ libvirt_volume_pool }}'
'{{ undercloud_node.name }}.qcow2'
'{{ working_dir }}/undercloud.qcow2'
async: 600
poll: 10
# Define (but do no start) the undercloud virtual machine.
- name: Define undercloud vm
virt:
name: "{{ undercloud_node.name }}"
command: define
xml: "{{ lookup('template', 'undercloudvm.xml.j2') }}"
uri: "{{ libvirt_uri }}"
# Make sure we can read the image file after the copy
- name: Ensure file permissions if root used as task runner
file:
path: "{{ working_dir }}"
owner: "{{ non_root_user }}"
group: "{{ non_root_user }}"
mode: "a+x"
recurse: true
state: 'directory'
when: non_root_chown|bool
# This block only run when ansible version < 2.3.
- block:
# Start the undercloud virtual machine.
- name: Start undercloud vm
virt:
name: "{{ undercloud_node.name }}"
command: start
state: running
uri: "{{ libvirt_uri }}"
# Configure the undercloud virtual machine to be
# automatically started at boot.
- name: Configure undercloud vm to start at virthost boot
virt:
name: "{{ undercloud_node.name }}"
command: autostart
uri: "{{ libvirt_uri }}"
when: ansible_version.full|version_compare('2.3','<')
# Start the undercloud virtual machine and make it
# automatically start for ansible-version >= 2.3
- name: Start undercloud vm
virt:
name: "{{ undercloud_node.name }}"
command: start
autostart: true
state: running
uri: "{{ libvirt_uri }}"
when: ansible_version.full|version_compare('2.3','>=')
# Get the ip address of the undercloud. This will retry several times
# (`undercloud_ip_retries`) until the undercloud is ready. The script
# works by getting the MAC address of the first undercloud interface,
# and then looking that up in the kernel ARP table.
- name: Get undercloud vm ip address
script: "get-undercloud-ip.sh {{ undercloud_node.name }}"
register: undercloud_vm_ip_result
until: undercloud_vm_ip_result is success
retries: "{{ undercloud_ip_retries }}"
delay: 10
environment:
LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
- name: Set_fact for undercloud ip
set_fact:
undercloud_ip: "{{ undercloud_vm_ip_result.stdout_lines[0] }}"
cacheable: true
- name: Wait until ssh is available on undercloud node
wait_for:
host: "{{ undercloud_ip }}"
state: started
port: 22
timeout: 600
# Add the undercloud to the in-memory inventory.
- name: Add undercloud vm to inventory
add_host:
name: undercloud
groups: undercloud
ansible_host: undercloud
ansible_fqdn: undercloud
ansible_user: '{{ undercloud_user }}'
ansible_private_key_file: "{{ undercloud_key }}"
ansible_ssh_extra_args: '-F "{{ local_working_dir }}/ssh.config.ansible"'
undercloud_ip: "{{ undercloud_ip }}"
- name: Generate ssh configuration
delegate_to: localhost
template:
src: ssh.config.j2
dest: "{{ local_working_dir }}/ssh.config.ansible"
- when: enable_port_forward_for_tripleo_ui|bool
block:
# TO-DO weshayutin
# In the upcoming release of ansible 2.4 this should be moved to
# iptables_raw
# - name: ensure the required tcp ports are open on the virthost
- name: configure iptables
iptables:
table: filter
chain: INPUT
action: insert
protocol: tcp
match: tcp
ctstate: NEW
jump: ACCEPT
destination_port: "{{ item }}"
become: true
with_items:
- 6385
- 5000
- 5050
- 8004
- 8080
- 9000
- 8989
- 8774
- 3000
- 8181
- 8443
- 443
- name: Create ssh tunnel systemd service
template:
src: "{{ ssh_tunnel_service_file }}"
dest: "/etc/systemd/system/ssh-tunnel.service"
mode: 0644
become: true
- name: reload the systemctl daemon after file update
shell: systemctl daemon-reload
become: true
tags:
- skip_ansible_lint
- name: Enable ssh tunnel service
service:
name: ssh-tunnel
enabled: true
state: restarted
become: true