This repository has been archived by the owner on Feb 29, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 79
/
main.yml
167 lines (140 loc) · 5.45 KB
/
main.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
# This directory is used to store a variety of files generated
# during the deploy process (ansible inventory, ssh configuration, ssh
# key files, etc)
local_working_dir: "{{ lookup('env', 'HOME') }}/.quickstart"
# this will define the user that ansible will connect with
ansible_user: stack
# This is where we store generated artifacts (like ssh config files,
# keys, etc).
working_dir: "/home/{{ ansible_user }}"
# This is a directory no the virthost in which we store the downloaded
# undercloud image.
image_cache_dir: "/var/cache/tripleo-quickstart/images"
# This determines whether to download a pre-built undercloud.qcow2 or
# whether to instead use an overcloud-full.qcow2 and convert it on
# the fly. The default is to use a pre-built undercloud.qcow2.
overcloud_as_undercloud: false
# These defaults are used if there are no flavor-specific
# overrides configured.
default_disk: 50
default_memory: 8192
# Setting controller and compute nodes to 2 vcpus, so we have more processing
# power to run the tests
default_vcpu: 2
# The undercloud needs more than the default amount of memory
# and disk.
undercloud_memory: 12288
undercloud_disk: 50
# Setting undercloud to 6 cpus, so we continue to have around 10 vcpus
# by default
undercloud_vcpu: 6
# The default deployment has flavors for compute, controllers, ceph
# nodes, and undercloud nodes. All flavors defined in the `flavors`
# key will be created with an `oooq_` prefix to avoid conflicts with
# the pre-defined flavors created by `openstack install undercloud`.
flavors:
compute:
memory: '{{compute_memory|default(default_memory)}}'
disk: '{{compute_disk|default(default_disk)}}'
vcpu: '{{compute_vcpu|default(default_vcpu)}}'
control:
memory: '{{control_memory|default(default_memory)}}'
disk: '{{control_disk|default(default_disk)}}'
vcpu: '{{control_vcpu|default(default_vcpu)}}'
ceph:
memory: '{{ceph_memory|default(default_memory)}}'
disk: '{{ceph_disk|default(default_disk)}}'
vcpu: '{{ceph_vcpu|default(default_vcpu)}}'
blockstorage:
memory: '{{block_memory|default(default_memory)}}'
disk: '{{block_disk|default(default_disk)}}'
vcpu: '{{block_vcpu|default(default_vcpu)}}'
objectstorage:
memory: '{{objectstorage_memory|default(default_memory)}}'
disk: '{{objectstorage_disk|default(default_disk)}}'
vcpu: '{{objectstorage_vcpu|default(default_vcpu)}}'
extradisks: true
undercloud:
memory: '{{undercloud_memory|default(undercloud_memory)}}'
disk: '{{undercloud_disk|default(undercloud_disk)}}'
vcpu: '{{undercloud_vcpu|default(undercloud_vcpu)}}'
# We create a single undercloud node.
undercloud_node:
name: undercloud
flavor: undercloud
# The overcloud will have three controllers, one compute node,
# and a ceph storage node.
overcloud_nodes:
- name: control_0
flavor: control
- name: control_1
flavor: control
- name: control_2
flavor: control
- name: compute_0
flavor: compute
- name: ceph_0
flavor: ceph
# Describe our virtual networks. These networks will be attached to
# the undercloud node and to the overcloud nodes (except for the
# "external" network) in the order in which they are defined. The
# playbooks expect to find both an "external" network and a
# "overcloud" network.
external_network_cidr: 192.168.23.0/24
networks:
- name: external
bridge: brext
forward_mode: nat
address: "{{ external_network_cidr|nthhost(1) }}"
netmask: "{{ external_network_cidr|ipaddr('netmask') }}"
dhcp_range:
- "{{ external_network_cidr|nthhost(10) }}"
- "{{ external_network_cidr|nthhost(50) }}"
nat_port_range:
- 1024
- 65535
- name: overcloud
bridge: brovc
#Enable network isolation with single-nic-vlans for virtualized deployments
undercloud_external_network_cidr: 10.0.0.1/24
undercloud_external_network_cidr6: 2001:db8:fd00:1000::1/64
undercloud_networks:
external:
address: "{{ undercloud_external_network_cidr|nthhost(1) }}"
netmask: "{{ undercloud_external_network_cidr|ipaddr('netmask') }}"
address6: "{{ undercloud_external_network_cidr6|nthhost(1) }}"
device_type: ovs
type: OVSIntPort
ovs_bridge: br-ctlplane
ovs_options: '"tag=10"'
tag: 10
network_isolation: true
enable_pacemaker: false
ipv6: false
# This enables the deployment of the overcloud with SSL.
ssl_overcloud: false
# If ssl_overcloud is True, then the overcloud public vip must be explicitly
# specified as part of the deployment configuration. Note that the VIP used has
# to be set accordingly with the `undercloud_external_network_cidr`.
overcloud_public_vip: 10.0.0.5
overcloud_public_vip6: 2001:db8:fd00:1000::14
# Set this to `true` if you want your undercloud and overcloud vms to
# have a VNC console available.
enable_vnc_console: false
# We have some version specific behaviors, so we need a release variable
#
# TODO(trown): It would be better to write a release into the image itself
# and set this variable from there.
release: newton
# This option controls whether or not to use the repo setup for TripleO
# development. Since this requires some other options to be funtional,
# it is best to specify release master-tripleo-ci rather than
# just flipping this to true.
devmode: false
# Tuned profile set while provisioning remote hosts to optimize for deployment
tuned_profile: 'virtual-host'
# This is the name of the user the `provision` role will create on the
# remote host.
non_root_user: stack
# Path for volume storage
libvirt_volume_path: "{{ working_dir }}/pool"