/
main.yml
178 lines (156 loc) · 6.15 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
---
- name: determine if we are inside AWS EC2
command: 'curl -s http://instance-data.ec2.internal'
failed_when: no
register: ec2_probe
- name: configure EC2 parameters for inventory when controlling from inside EC2
set_fact:
origin_ci_aws_destination_variable: 'private_dns_name'
origin_ci_aws_vpc_destination_variable: 'private_ip_address'
origin_ci_aws_host_address_variable: 'private_ip'
when: ec2_probe | succeeded
- name: determine where to put the AWS API cache
set_fact:
origin_ci_aws_cache_dir: '{{ origin_ci_inventory_dir }}/.ec2_cache'
- name: ensure we have a place to put the AWS API cache
file:
path: '{{ origin_ci_aws_cache_dir }}'
state: directory
- name: place the EC2 dynamic inventory script
copy:
src: 'ec2.py'
dest: '{{ origin_ci_inventory_dir }}/ec2.py'
mode: 'a+rx'
- name: place the EC2 dynamic inventory configuration
template:
src: 'ec2.j2'
dest: '{{ origin_ci_inventory_dir }}/ec2.ini'
- name: place the EC2 tag to group mappings
copy:
src: 'tag_to_group_mappings.ini'
dest: '{{ origin_ci_inventory_dir }}/tag_to_group_mappings'
- name: determine which AMI to use
ec2_ami_find:
region: '{{ origin_ci_aws_region }}'
ami_tags:
operating_system: '{{ origin_ci_aws_ami_os }}'
image_stage: '{{ origin_ci_aws_ami_stage }}'
ready: 'yes'
sort: 'creationDate'
sort_order: ascending
no_result_action: fail
register: ami_facts
- name: determine which AMI to use
set_fact:
origin_ci_aws_ami_id: '{{ item.ami_id }}'
with_items: '{{ ami_facts.results }}'
when: "'qe' not in item.tags"
- name: determine which subnets are available
ec2_vpc_subnet_facts:
region: '{{ origin_ci_aws_region }}'
filters: "{ 'tag:{{ origin_ci_aws_identifying_tag_key }}': '{{ origin_ci_aws_master_subnet_tag_value }}' }"
register: subnet_facts
when: origin_ci_aws_master_subnet_ids is not defined
- name: determine which subnets to use for the master
set_fact:
origin_ci_aws_master_subnet_ids: "{{ subnet_facts.subnets | map(attribute='id') | list }}"
when: origin_ci_aws_master_subnet_ids is not defined
- name: determine which security groups are available
ec2_group_facts:
region: '{{ origin_ci_aws_region }}'
filters: "{ 'tag:{{ origin_ci_aws_identifying_tag_key }}': '{{ origin_ci_aws_master_security_group_tag_value }}' }"
register: security_group_facts
when: origin_ci_aws_master_security_group_ids is not defined
- name: determine which security group to use
set_fact:
origin_ci_aws_master_security_group_ids: "{{ security_group_facts.security_groups | map(attribute='group_id') | list }}"
when: origin_ci_aws_master_security_group_ids is not defined
- name: provision an AWS EC2 instance
ec2:
region: '{{ origin_ci_aws_region }}'
key_name: '{{ origin_ci_aws_keypair_name }}'
image: '{{ origin_ci_aws_ami_id }}'
group_id: '{{ origin_ci_aws_master_security_group_ids }}'
vpc_subnet_id: '{{ origin_ci_aws_master_subnet_ids[0] }}'
instance_type: '{{ origin_ci_aws_master_instance_type }}'
instance_tags:
Name: '{{ origin_ci_aws_instance_name }}'
openshift_master: ''
openshift_node: ''
openshift_etcd: ''
volumes:
- device_name: '/dev/sda1'
volume_type: 'gp2'
volume_size: '25'
delete_on_termination: yes
- device_name: '/dev/sdb'
volume_type: 'gp2'
volume_size: '25'
delete_on_termination: yes
wait: yes
wait_timeout: 600
register: ec2
- name: determine the host address
set_fact:
origin_ci_aws_host: '{{ ec2.instances[0][origin_ci_aws_host_address_variable] }}'
- name: determine the default user to use for SSH
set_fact:
origin_ci_aws_ssh_user: 'ec2-user'
when: origin_ci_aws_ami_stage == 'bare'
- name: determine the default user to use for SSH
set_fact:
origin_ci_aws_ssh_user: 'origin'
when: origin_ci_aws_ami_stage != 'bare'
- name: update variables for the host
copy:
content:
origin_ci_aws_host: '{{ origin_ci_aws_host }}'
origin_ci_aws_hostname: '{{ origin_ci_aws_hostname }}'
origin_ci_aws_instance_name: '{{ origin_ci_aws_instance_name }}'
origin_ci_aws_instance_id: '{{ ec2.instance_ids[0] }}'
origin_ci_aws_ami_os: '{{ origin_ci_aws_ami_os }}'
origin_ci_aws_ami_stage: '{{ origin_ci_aws_ami_stage }}'
ansible_ssh_private_key_file: '{{ origin_ci_aws_private_key_path }}'
ansible_ssh_user: '{{ origin_ci_aws_ssh_user }}'
ansible_ssh_extra_args: '-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o IdentitiesOnly=yes -o ConnectTimeout=0 -o ServerAliveInterval=30'
ansible_timeout: 0
openshift_schedulable: '{{ openshift_schedulable }}'
openshift_node_labels: '{{ openshift_node_labels }}'
dest: '{{ origin_ci_inventory_dir }}/host_vars/{{ origin_ci_aws_host }}.yml'
- name: determine where updated SSH configuration should go
set_fact:
origin_ci_ssh_config_files: ['{{ origin_ci_inventory_dir }}/.ssh_config']
when: origin_ci_ssh_config_strategy == 'discrete'
- name: determine where updated SSH configuration should go
set_fact:
origin_ci_ssh_config_files: ['{{ origin_ci_inventory_dir }}/.ssh_config', '{{ ansible_env.HOME }}/.ssh/config']
when: origin_ci_ssh_config_strategy == 'update'
- name: ensure the targeted SSH configuration file exists
file:
path: '{{ item }}'
state: touch
with_items: '{{ origin_ci_ssh_config_files }}'
- name: update the SSH configuration
blockinfile:
dest: '{{ item }}'
block: >
Host {{ origin_ci_aws_hostname }} {{ origin_ci_aws_host }}
HostName {{ origin_ci_aws_host }}
User {{ origin_ci_aws_ssh_user }}
Port 22
UserKnownHostsFile /dev/null
StrictHostKeyChecking no
PasswordAuthentication no
IdentityFile {{ origin_ci_aws_private_key_path }}
IdentitiesOnly yes
LogLevel FATAL
state: present
marker: '# {mark} ANSIBLE MANAGED BLOCK FOR HOST {{ origin_ci_aws_hostname }}'
with_items: '{{ origin_ci_ssh_config_files }}'
- name: wait for SSH to be available
wait_for:
host: '{{ origin_ci_aws_host }}'
port: 22
delay: 10
timeout: 600
state: 'started'