forked from hagleitn/Openstack-Devstack2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
anvil.ini
400 lines (298 loc) · 13.8 KB
/
anvil.ini
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
# Anvil's local configuration
# When a value looks like a bash variable + default then it is parsed like a bash
# variable and will perform similar lookups. Ie ${SQL_HOST:-localhost} will
# look in environment variable SQL_HOST and if that does not exist then
# localhost will be used instead.
#
# We also allow for simple referencing of other variables, similar to bash
# variables to occur when a keys value like the following format are found:
#
# web_host = ${RUNNING_HOST:-http://$(X:Y)}
#
# For this example, the RUNNING_HOST enviroment variable will be referenced.
# If it is not found (no value exists), then "http://$(X:Y)" will be
# examined and found to be contain a expression (denoted by "$(X:Y)").
#
# Then in that expression there are components of the format "X:Y" which the
# configuration class will attempt to resolve those values by looking up in the
# configuration file for a value in section "X" with option "Y" and replacing the
# retrieved value for what was previously "$(X:Y)". Multiple of these "expressions"
# are allowed and each will have its expression "text" replaced with the resolved
# value before the final value for the original variable is determined.
#
# For this example if the section X with option Y contained value "1.2.3.4" then
# the final string would be "http://1.2.3.4" which would then be cached as the value
# for option web_host.
[DEFAULT]
# Which run type to use.
# For forking/daemonizing mode use: anvil.runners.fork:ForkRunner
# For screen mode use: anvil.runners.screen:ScreenRunner
# For upstart mode use: anvil.runners.upstart:UpstartRunner
run_type = ${RUN_TYPE:-anvil.runners.fork:ForkRunner}
# How many seconds to wait until a service comes online before using it.
# For example, before uploading to glance we need keystone and glance to be online.
# Sometimes this takes 5 to 10 seconds to start these up....
service_wait_seconds = ${SERVICE_WAIT_SECONDS:-5}
[upstart]
# These flags are used for starting components under upstart (if default/run_type is upstart)
respawn = 1
# Note that we support component start/stop and "all"
# Start and stop, this is the "all" event name
start_event = all_os_start
stop_event = all_os_stop
[host]
# Set api host endpoint
# If this is empty in code we will try to determine your network ip.
ip = ${HOST_IP:-}
[rabbit]
# Where is rabbit located?
rabbit_host = ${RABBIT_HOST:-$(host:ip)}
# Which rabbit user should be used
rabbit_userid = ${RABBIT_USER:-guest}
[qpid]
# Where is qpid located?
qpid_hostname = ${QPID_HOST:-$(host:ip)}
# Which qpid user should be used
qpid_username = ${QPID_USER:-}
[db]
# Where you db is located at and how to access it.
sql_host = ${SQL_HOST:-localhost}
sql_user = ${SQL_USER:-root}
port = ${SQL_PORT:-3306}
# What type of database is this?
type = ${SQL_TYPE:-mysql}
[keystone]
# Where is the keystone auth host at?
keystone_auth_host = ${KEYSTONE_AUTH_HOST:-$(host:ip)}
keystone_auth_port = ${KEYSTONE_AUTH_PORT:-35357}
keystone_auth_protocol = ${KEYSTONE_AUTH_PROTOCOL:-http}
# Where is the keystone service host at?
keystone_service_host = ${KEYSTONE_SERVICE_HOST:-$(host:ip)}
keystone_service_port = ${KEYSTONE_SERVICE_PORT:-5000}
keystone_service_protocol = ${KEYSTONE_SERVICE_PROTOCOL:-http}
[glance]
# See: https://bugs.launchpad.net/anvil/+bug/1002093
eliminate_pip_gits = 1
glance_host = ${GLANCE_HOST:-$(host:ip)}
glance_port = ${GLANCE_HOSTPORT:-9292}
glance_protocol = ${GLANCE_PROTOCOL:-http}
# Specify a comma-separated list of images to download and install into glance.
image_urls = http://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-uec.tar.gz,
http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz,
[nova]
# Should nova be in verbose mode?
verbose = ${NOVA_VERBOSE:-1}
# Force backing images to raw format?
force_raw_images = 1
# Should we setup root wrap or not
do_root_wrap = 0
# Set api_rate_limit = 0 (or blank) to turn OFF rate limiting
api_rate_limit = ${API_RATE_LIMIT:-}
# Allow the admin api to be accessible?
allow_admin_api = 1
# Currently novaclient needs you to specify the *compute api* version.
nova_version = ${NOVA_VERSION:-1.1}
# Which scheduler will nova be running with?
scheduler = ${NOVA_SCHEDULER:-nova.scheduler.filter_scheduler.FilterScheduler}
# Network settings
# Very useful to read over:
# http://docs.openstack.org/trunk/openstack-compute/admin/content/configuring-networking-on-the-compute-node.html
# A fixed network will be created for you (unless disabled)
enable_fixed = 1
fixed_range = ${NOVA_FIXED_RANGE:-10.0.0.0/24}
fixed_network_size = ${NOVA_FIXED_NETWORK_SIZE:-256}
# Which network manager and which interface should be used
network_manager = ${NET_MAN:-nova.network.manager.FlatDHCPManager}
public_interface = ${PUBLIC_INTERFACE:-eth0}
# DHCP Warning: If your flat interface device uses DHCP, there will be a hiccup while the network
# is moved from the flat interface to the flat network bridge. This will happen when you launch
# your first instance. Upon launch you will lose all connectivity to the node, and the vm launch will probably fail.
#
# If you are running on a single node and don't need to access the VMs from devices other than
# that node, you can set the flat interface to the same value as FLAT_NETWORK_BRIDGE.
# This will stop the network hiccup from occurring.
flat_interface = ${FLAT_INTERFACE:-eth0}
vlan_interface = ${VLAN_INTERFACE:-$(nova:public_interface)}
flat_network_bridge = ${FLAT_NETWORK_BRIDGE:-br100}
# If using a flat manager (not dhcp) then you probably want this on
flat_injected = 0
# A floating network will be created for you (unless disabled)
enable_floating = 1
floating_range = ${FLOATING_RANGE:-172.24.4.224/28}
test_floating_pool = ${TEST_FLOATING_POOL:-test}
test_floating_range = ${TEST_FLOATING_RANGE:-192.168.253.0/29}
# TODO document these
vncproxy_url = ${VNCPROXY_URL:-http://$(host:ip):6080/vnc_auto.html}
xvpvncproxy_url = ${XVPVNCPROXY_URL:-http://$(host:ip):6081/console}
vncserver_proxyclient_address = ${VNCSERVER_PROXYCLIENT_ADDRESS:-}
ec2_dmz_host = ${EC2_DMZ_HOST:-$(host:ip)}
# Adjust this if you want to have libvirt's vnc accessible
# Ie, making it 0.0.0.0 will allow easier access from external machines.
vncserver_listen = ${VNCSERVER_LISTEN:-127.0.0.1}
# This decides which firewall driver to use:
# The default here should work with linux + iptables + libvirt special sauce...
libvirt_firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver
# This is just a firewall based on iptables, for non-libvirt and xen server usage
basic_firewall_driver = nova.virt.firewall.IptablesFirewallDriver
# Volume settings
volume_group = ${VOLUME_GROUP:-nova-volumes}
volume_backing_file = ${VOLUME_BACKING_FILE:-}
volume_backing_file_size =${VOLUME_BACKING_FILE_SIZE:-2052M}
volume_name_prefix = ${VOLUME_NAME_PREFIX:-volume-}
volume_name_postfix = ${VOLUME_NAME_POSTFIX:-%08x}
# How instances will be named
instance_name_prefix = ${INSTANCE_NAME_PREFIX:-instance-}
instance_name_postfix = ${INSTANCE_NAME_POSTFIX:-%08x}
# Where instances will be stored, defaults to $NOVA_DIR/instances
instances_path = ${INSTANCES_PATH:-}
# Are we setup in multihost mode?
# Multi-host is a mode where each compute node runs its own network node.
# This allows network operations and routing for a VM to occur on the server that is running the VM - removing a SPOF and bandwidth bottleneck.
multi_host = ${MULTI_HOST:-0}
# Virtualization settings
# Drivers known (libvirt, xensever, vmware, baremetal)
# Defaults to libvirt (the most compatible) if unknown.
virt_driver = ${VIRT_DRIVER:-libvirt}
# Only useful if above libvirt_type is "libvirt"
# Types known (qemu, kvm, xen, uml, lxc)
# Defaults to qemu (the most compatible) if unknown (or blank).
libvirt_type = ${LIBVIRT_TYPE:-}
# What type of image service will be used?
img_service = ${IMG_SERVICE:-nova.image.glance.GlanceImageService}
# Ensure base images checksummed
checksum_base_images = 1
# Only applicable if using glance...
glance_server = ${GLANCE_SERVER:-$(glance:glance_host):$(glance:glance_port)}
# Used however you want - ensure you know nova's conf file format if you use this!
extra_flags = ${NOVA_EXTRA_FLAGS:-}
# Xen server/api settings???
# Need more docs here...
xa_connection_url = ${XA_CONNECTION_URL:-http://169.254.0.1:80/)}
xa_connection_username = ${XA_CONNECTION_USERNAME:-root)}
xs_firewall_driver = ${XS_FIREWALL_DRIVER:-$(nova:basic_firewall_driver)}
xs_flat_interface = ${XS_FLAT_INTERFACE:-eth1)}
xs_flat_network_bridge = ${XS_FLAT_NETWORK_BRIDGE:-xapi1)}
[extern]
# Set the ec2 url so euca2ools works
# Typically like http://localhost:8773/services/Cloud
# If blank we will generate this.
ec2_url = ${EC2_URL:-}
# Set the s3 url so euca2ools works
# Typically like http://localhost:3333/services/Cloud
# If blank we will generate this.
s3_url = ${S3_URL:-}
# Not used (currently)??
ec2_user_id = 42
ec2_cert_fn = ~/cert.pm
[git]
# Compute service git repo
nova_repo = git://github.com/openstack/nova.git
nova_branch = master
# Storage service git repo
swift_repo = git://github.com/openstack/swift.git
swift_branch = master
# Image catalog service git repo
glance_repo = git://github.com/openstack/glance.git
glance_branch = master
# Python glance client library
glanceclient_repo = git://github.com/openstack/python-glanceclient.git
glanceclient_branch = master
# Unified auth system (manages accounts/tokens) git repo
keystone_repo = git://github.com/openstack/keystone.git
keystone_branch = master
# A websockets/html5 or flash powered VNC console for vm instances
novnc_repo = git://github.com/kanaka/noVNC.git
novnc_branch = master
# Django powered web control panel for openstack
horizon_repo = git://github.com/openstack/horizon.git
horizon_branch = master
# Python keystone client library to nova that horizon uses
keystoneclient_repo = git://github.com/openstack/python-keystoneclient.git
keystoneclient_branch = master
# Python client library to nova that horizon (and others) use
novaclient_repo = git://github.com/openstack/python-novaclient.git
novaclient_branch = master
# Quantum service git repo
quantum_repo = git://github.com/openstack/quantum.git
quantum_branch = master
# Quantum client git repo
quantum_client_repo = git://github.com/openstack/python-quantumclient.git
quantum_client_branch = master
# Melange service
melange_repo = git://github.com/openstack/melange.git
melange_branch = master
# Python melange client library
melangeclient_repo = git://github.com/openstack/python-melangeclient.git
melangeclient_branch = master
# Python openstack client library
openstackclient_repo = git://github.com/openstack/python-openstackclient.git
openstackclient_branch = master
[melange]
# Default Melange Port
m_port = ${M_PORT:-9898}
# Default Melange Host
m_host = ${M_HOST:-$(host:ip)}
# Melange MAC Address Range
m_mac_range = ${M_MAC_RANGE:-FE-EE-DD-00-00-00/24}
[quantum]
# Where your quantum host is at
q_host = ${Q_HOST:-$(host:ip)}
# Which port your quantum host is at
q_port = ${Q_PORT:-9696}
# Which type of quantum plugin you will be using
q_plugin = ${Q_PLUGIN:-openvswitch}
# Default OVS bridge name
ovs_bridge = ${OVS_BRIDGE:-br-int}
# OVS bridge external name
ovs_bridge_external_name = ${OVS_BRIDGE_EXTERNAL_NAME:-$(quantum:ovs_bridge)}
[horizon]
# What user will apache be serving from.
#
# Root will typically not work (for apache on most distros)
# sudo adduser <username> then sudo adduser <username> admin will be what you want to set this up (in ubuntu)
# I typically use user "horizon" for ubuntu and the runtime user (who will have sudo access) for RHEL.
#
# NOTE: If blank the currently executing user will be used.
apache_user = ${APACHE_USER:-}
# This is the group of the previous user (adjust as needed)
apache_group = ${APACHE_GROUP:-$(horizon:apache_user)}
# Port horizon should run on
port = ${HORIZON_PORT:-80}
# See: https://bugs.launchpad.net/anvil/+bug/1002093
eliminate_pip_gits = 1
[swift]
# By default the location of swift drives and objects is located inside the swift source directory.
data_location = ${SWIFT_DATA_LOCATION:-data}
# TBD
swift_user = ${SWIFT_USER:-root}
# TBD
swift_group = ${SWIFT_GROUP:-$(swift:swift_user)}
# We will create a loop-back disk formatted as XFS to store the swift data.
# By default the disk size is 1 gigabyte.
loopback_disk_size = ${SWIFT_LOOPBACK_DISK_SIZE:-1G}
# The ring uses a configurable number of bits from a path’s MD5 hash as a
# partition index that designates a device. The number of bits kept from the
# hash is known as the partition power, and 2 to the partition power indicates
# the partition count. Partitioning the full MD5 hash ring allows other parts
# of the cluster to work in batches of items at once which ends up either more
# efficient or at least less complex than working with each item separately or
# the entire cluster all at once. By default we define 9 for the partition count (which mean 512).
partition_power_size = ${SWIFT_PARTITION_POWER_SIZE:-9}
[passwords]
# This section is where passwords could be stored. This section also has special meaning
# in code in that the configuration class we use will look in this section for passwords
# and if no password is found (ie an empty string) then the user will be prompted to enter
# a password, if they do not enter one (or its blank) then one will be generated for the user.
# NOTE: You will need to send the same MYSQL_PASSWORD to every host if you are doing a multi-node openstack installation.
sql = ${MYSQL_PASSWORD:-}
# Change the rabbit password since the default is "guest"
rabbit = ${RABBIT_PASSWORD:-}
# This password will be used by horizon and keystone as the admin password
horizon_keystone_admin = ${ADMIN_PASSWORD:-}
# Openstack components need to have an admin token to validate user tokens.
service_token = ${SERVICE_TOKEN:-}
service_password = ${SERVICE_PASSWORD:-}
# The xen api connection password
xenapi_connection = ${XENAPI_CONNECTION:-}
# Swift hash is a random unique string for a swift cluster that can never change.
swift_hash = ${SWIFT_HASH:-}