This repository has been archived by the owner on Feb 29, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 35
/
utils.py
2412 lines (1988 loc) · 85.7 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
import csv
import datetime
import errno
import getpass
import glob
import hashlib
import logging
import multiprocessing
import netaddr
import os
import os.path
import pwd
import re
import shutil
import simplejson
import six
import socket
import subprocess
import sys
import tempfile
import time
import yaml
import ansible_runner
from heatclient.common import event_utils
from heatclient.common import template_utils
from heatclient.common import utils as heat_utils
from heatclient.exc import HTTPNotFound
from osc_lib import exceptions as oscexc
from osc_lib.i18n import _
from oslo_concurrency import processutils
from six.moves import configparser
from heatclient import exc as hc_exc
from six.moves.urllib import error as url_error
from six.moves.urllib import request
from tripleoclient import constants
from tripleoclient import exceptions
LOG = logging.getLogger(__name__ + ".utils")
# NOTE(cloudnull): This is setting the FileExistsError for py2 environments.
# When we no longer support py2 (centos7) this should be
# removed.
try:
FileExistsError = FileExistsError
except NameError:
FileExistsError = OSError
class Pushd(object):
"""Simple context manager to change directories and then return."""
def __init__(self, directory):
"""This context manager will enter and exit directories.
>>> with Pushd(directory='/tmp'):
... with open('file', 'w') as f:
... f.write('test')
:param directory: path to change directory to
:type directory: `string`
"""
self.dir = directory
self.pwd = self.cwd = os.getcwd()
def __enter__(self):
os.chdir(self.dir)
self.cwd = os.getcwd()
return self
def __exit__(self, *args):
if self.pwd != self.cwd:
os.chdir(self.pwd)
class TempDirs(object):
"""Simple context manager to manage temp directories."""
def __init__(self, dir_path=None, dir_prefix='tripleo', cleanup=True,
chdir=True):
"""This context manager will create, push, and cleanup temp directories.
>>> with TempDirs() as t:
... with open('file', 'w') as f:
... f.write('test')
... print(t)
... os.mkdir('testing')
... with open(os.path.join(t, 'file')) as w:
... print(w.read())
... with open('testing/file', 'w') as f:
... f.write('things')
... with open(os.path.join(t, 'testing/file')) as w:
... print(w.read())
:param dir_path: path to create the temp directory
:type dir_path: `string`
:param dir_prefix: prefix to add to a temp directory
:type dir_prefix: `string`
:param cleanup: when enabled the temp directory will be
removed on exit.
:type cleanup: `boolean`
:param chdir: Change to/from the created temporary dir on enter/exit.
:type chdir: `boolean`
"""
# NOTE(cloudnull): kwargs for tempfile.mkdtemp are created
# because args are not processed correctly
# in py2. When we drop py2 support (cent7)
# these args can be removed and used directly
# in the `tempfile.mkdtemp` function.
tempdir_kwargs = dict()
if dir_path:
tempdir_kwargs['dir'] = dir_path
if dir_prefix:
tempdir_kwargs['prefix'] = dir_prefix
self.dir = tempfile.mkdtemp(**tempdir_kwargs)
self.pushd = Pushd(directory=self.dir)
self.cleanup = cleanup
self.chdir = chdir
def __enter__(self):
if self.chdir:
self.pushd.__enter__()
return self.dir
def __exit__(self, *args):
if self.chdir:
self.pushd.__exit__()
if self.cleanup:
self.clean()
else:
LOG.warning("Not cleaning temporary directory [ %s ]" % self.dir)
def clean(self):
shutil.rmtree(self.dir, ignore_errors=True)
LOG.info("Temporary directory [ %s ] cleaned up" % self.dir)
def _encode_envvars(env):
"""Encode a hash of values.
:param env: A hash of key=value items.
:type env: `dict`.
"""
for key, value in env.items():
env[key] = six.text_type(value)
else:
return env
def makedirs(dir_path):
"""Recursively make directories and log the interaction.
:param dir_path: full path of the directories to make.
:type dir_path: `string`
:returns: `boolean`
"""
try:
os.makedirs(dir_path)
except FileExistsError:
LOG.debug(
'Directory "{}" was not created because it'
' already exists.'.format(
dir_path
)
)
return False
else:
LOG.debug('Directory "{}" was created.'.format(dir_path))
return True
def playbook_limit_parse(limit_nodes):
"""Return a parsed string for limits.
This will sanitize user inputs so that we guarantee what is provided is
expected to be functional. If limit_nodes is None, this function will
return None.
:returns: String
"""
if not limit_nodes:
return limit_nodes
return ':'.join([i.strip() for i in re.split(',| |:', limit_nodes) if i])
def playbook_verbosity(self):
"""Return an integer for playbook verbosity levels.
:param self: Class object used to interpret the runtime state.
:type self: Object
:returns: Integer
"""
if self.app.options.debug:
return 3
else:
if self.app_args.verbose_level <= 1:
return 0
else:
return self.app_args.verbose_level
def run_ansible_playbook(playbook, inventory, workdir, playbook_dir=None,
connection='smart', output_callback='tripleo_dense',
ssh_user='root', key=None, module_path=None,
limit_hosts=None, tags=None, skip_tags=None,
verbosity=0, quiet=False, extra_vars=None,
extra_vars_file=None, plan='overcloud',
gathering_policy='smart', extra_env_variables=None,
parallel_run=False,
callback_whitelist=constants.ANSIBLE_CWL,
ansible_cfg=None, ansible_timeout=30,
reproduce_command=False,
timeout=None, forks=None):
"""Simple wrapper for ansible-playbook.
:param playbook: Playbook filename.
:type playbook: String
:param inventory: Either proper inventory file, or a coma-separated list.
:type inventory: String
:param workdir: Location of the working directory.
:type workdir: String
:param playbook_dir: Location of the playbook directory.
(defaults to workdir).
:type playbook_dir: String
:param connection: Connection type (local, smart, etc).
:type connection: String
:param output_callback: Callback for output format. Defaults to
"tripleo_dense".
:type output_callback: String
:param callback_whitelist: Comma separated list of callback plugins.
Defaults to
"tripleo_dense,tripleo_profile_tasks,
tripleo_states".
Custom output_callback is also whitelisted.
:type callback_whitelist: String
:param ssh_user: User for the ssh connection.
:type ssh_user: String
:param key: Private key to use for the ssh connection.
:type key: String
:param module_path: Location of the ansible module and library.
:type module_path: String
:param limit_hosts: Limit the execution to the hosts.
:type limit_hosts: String
:param tags: Run specific tags.
:type tags: String
:param skip_tags: Skip specific tags.
:type skip_tags: String
:param verbosity: Verbosity level for Ansible execution.
:type verbosity: Integer
:param quiet: Disable all output (Defaults to False)
:type quiet: Boolean
:param extra_vars: Set additional variables as a Dict or the absolute
path of a JSON or YAML file type.
:type extra_vars: Either a Dict or the absolute path of JSON or YAML
:param extra_vars_file: Set additional ansible variables using an
extravar file.
:type extra_vars_file: Dictionary
:param plan: Plan name (Defaults to "overcloud").
:type plan: String
:param gathering_policy: This setting controls the default policy of
fact gathering ('smart', 'implicit', 'explicit').
:type gathering_facts: String
:param extra_env_variables: Dict option to extend or override any of the
default environment variables.
:type extra_env_variables: Dict
:param parallel_run: Isolate playbook execution when playbooks are to be
executed with multi-processing.
:type parallel_run: Boolean
:param ansible_cfg: Path to an ansible configuration file. One will be
generated in the artifact path if this option is None.
:type ansible_cfg: String
:param ansible_timeout: Timeout for ansible connections.
:type ansible_timeout: int
:param reproduce_command: Enable or disable option to reproduce ansible
commands upon failure. This option will produce
a bash script that can reproduce a failing
playbook command which is helpful for debugging
and retry purposes.
:type reproduce_command: Boolean
:param timeout: Timeout for ansible to finish playbook execution (minutes).
:type timeout: int
"""
def _playbook_check(play):
if not os.path.exists(play):
play = os.path.join(playbook_dir, play)
if not os.path.exists(play):
raise RuntimeError('No such playbook: {}'.format(play))
LOG.debug('Ansible playbook {} found'.format(play))
return play
def _inventory(inventory):
if inventory:
if isinstance(inventory, six.string_types):
# check is file path
if os.path.exists(inventory):
return inventory
elif isinstance(inventory, dict):
inventory = yaml.safe_dump(
inventory,
default_flow_style=False
)
return ansible_runner.utils.dump_artifact(
inventory,
ansible_artifact_path,
constants.ANSIBLE_HOSTS_FILENAME
)
def _running_ansible_msg(playbook, timeout=None):
if timeout and timeout > 0:
return ('Running Ansible playbook with timeout %sm: %s,' %
(timeout, playbook))
else:
return ('Running Ansible playbook: %s,' % playbook)
if not playbook_dir:
playbook_dir = workdir
# Ensure that the ansible-runner env exists
runner_env = os.path.join(workdir, 'env')
makedirs(runner_env)
if extra_vars_file:
runner_extra_vars = os.path.join(runner_env, 'extravars')
with open(runner_extra_vars, 'w') as f:
f.write(yaml.safe_dump(extra_vars_file, default_flow_style=False))
if timeout and timeout > 0:
settings_file = os.path.join(runner_env, 'settings')
timeout_value = timeout * 60
if os.path.exists(settings_file):
with open(settings_file, 'r') as f:
settings_object = yaml.safe_load(f.read())
settings_object['job_timeout'] = timeout_value
else:
settings_object = {'job_timeout': timeout_value}
with open(settings_file, 'w') as f:
f.write(yaml.safe_dump(settings_object, default_flow_style=False))
if isinstance(playbook, (list, set)):
verified_playbooks = [_playbook_check(play=i) for i in playbook]
playbook = os.path.join(workdir, 'tripleo-multi-playbook.yaml')
with open(playbook, 'w') as f:
f.write(
yaml.safe_dump(
[{'import_playbook': i} for i in verified_playbooks],
default_flow_style=False
)
)
LOG.info(
_running_ansible_msg(playbook, timeout) +
' multi-playbook execution: {}'
' Working directory: {},'
' Playbook directory: {}'.format(
verified_playbooks,
workdir,
playbook_dir
)
)
else:
playbook = _playbook_check(play=playbook)
LOG.info(
_running_ansible_msg(playbook, timeout) +
' Working directory: {},'
' Playbook directory: {}'.format(
workdir,
playbook_dir
)
)
if limit_hosts:
LOG.info(
'Running ansible with the following limit: {}'.format(
limit_hosts
)
)
cwd = os.getcwd()
ansible_fact_path = os.path.join(
os.path.join(
tempfile.gettempdir(),
'tripleo-ansible'
),
'fact_cache'
)
makedirs(ansible_fact_path)
if output_callback not in callback_whitelist.split(','):
callback_whitelist = ','.join([callback_whitelist, output_callback])
if not forks:
forks = min(multiprocessing.cpu_count() * 4, 100)
env = dict()
env['ANSIBLE_SSH_ARGS'] = (
'-o UserKnownHostsFile={} '
'-o StrictHostKeyChecking=no '
'-o ControlMaster=auto '
'-o ControlPersist=30m '
'-o ServerAliveInterval=64 '
'-o ServerAliveCountMax=1024 '
'-o Compression=no '
'-o TCPKeepAlive=yes '
'-o VerifyHostKeyDNS=no '
'-o ForwardX11=no '
'-o ForwardAgent=yes '
'-o PreferredAuthentications=publickey '
'-T'
).format(os.devnull)
env['ANSIBLE_DISPLAY_FAILED_STDERR'] = True
env['ANSIBLE_FORKS'] = forks
env['ANSIBLE_TIMEOUT'] = ansible_timeout
env['ANSIBLE_GATHER_TIMEOUT'] = 45
env['ANSIBLE_SSH_RETRIES'] = 3
env['ANSIBLE_PIPELINING'] = True
env['ANSIBLE_SCP_IF_SSH'] = True
env['ANSIBLE_REMOTE_USER'] = ssh_user
env['ANSIBLE_STDOUT_CALLBACK'] = output_callback
env['ANSIBLE_LIBRARY'] = os.path.expanduser(
'{}/.ansible/plugins/modules:'
'{}:{}:'
'/usr/share/ansible/tripleo-plugins/modules:'
'/usr/share/ansible/plugins/modules:'
'/usr/share/ceph-ansible/library:'
'/usr/share/ansible-modules:'
'{}/library:'
'{}/library'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'modules'),
os.path.join(cwd, 'modules'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_LOOKUP_PLUGINS'] = os.path.expanduser(
'{}/.ansible/plugins/lookup:'
'{}:{}:'
'/usr/share/ansible/tripleo-plugins/lookup:'
'/usr/share/ansible/plugins/lookup:'
'/usr/share/ceph-ansible/plugins/lookup:'
'{}/lookup_plugins:'
'{}/lookup_plugins'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'lookup'),
os.path.join(cwd, 'lookup'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_CALLBACK_PLUGINS'] = os.path.expanduser(
'{}/.ansible/plugins/callback:'
'{}:{}:'
'/usr/share/ansible/tripleo-plugins/callback:'
'/usr/share/ansible/plugins/callback:'
'/usr/share/ceph-ansible/plugins/callback:'
'{}/callback_plugins:'
'{}/callback_plugins'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'callback'),
os.path.join(cwd, 'callback'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_ACTION_PLUGINS'] = os.path.expanduser(
'{}/.ansible/plugins/action:'
'{}:{}:'
'/usr/share/ansible/tripleo-plugins/action:'
'/usr/share/ansible/plugins/action:'
'/usr/share/ceph-ansible/plugins/actions:'
'{}/action_plugins:'
'{}/action_plugins'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'action'),
os.path.join(cwd, 'action'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_FILTER_PLUGINS'] = os.path.expanduser(
'{}/.ansible/plugins/filter:'
'{}:{}:'
'/usr/share/ansible/tripleo-plugins/filter:'
'/usr/share/ansible/plugins/filter:'
'/usr/share/ceph-ansible/plugins/filter:'
'{}/filter_plugins:'
'{}/filter_plugins'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'filter'),
os.path.join(cwd, 'filter'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_ROLES_PATH'] = os.path.expanduser(
'{}/.ansible/roles:'
'{}:{}:'
'/usr/share/ansible/tripleo-roles:'
'/usr/share/ansible/roles:'
'/usr/share/ceph-ansible/roles:'
'/etc/ansible/roles:'
'{}/roles:'
'{}/roles'.format(
constants.CLOUD_HOME_DIR,
os.path.join(workdir, 'roles'),
os.path.join(cwd, 'roles'),
constants.DEFAULT_VALIDATIONS_BASEDIR,
constants.DEFAULT_VALIDATIONS_LEGACY_BASEDIR
)
)
env['ANSIBLE_CALLBACK_WHITELIST'] = callback_whitelist
env['ANSIBLE_RETRY_FILES_ENABLED'] = False
env['ANSIBLE_HOST_KEY_CHECKING'] = False
env['ANSIBLE_TRANSPORT'] = connection
env['ANSIBLE_CACHE_PLUGIN_TIMEOUT'] = 7200
if connection == 'local':
env['ANSIBLE_PYTHON_INTERPRETER'] = sys.executable
if gathering_policy in ('smart', 'explicit', 'implicit'):
env['ANSIBLE_GATHERING'] = gathering_policy
if module_path:
env['ANSIBLE_LIBRARY'] = ':'.join(
[env['ANSIBLE_LIBRARY'], module_path]
)
env['TRIPLEO_PLAN_NAME'] = plan
get_uid = int(os.getenv('SUDO_UID', os.getuid()))
try:
user_pwd = pwd.getpwuid(get_uid)
except (KeyError, TypeError):
home = constants.CLOUD_HOME_DIR
else:
home = user_pwd.pw_dir
env['ANSIBLE_LOG_PATH'] = os.path.join(home, 'ansible.log')
if key:
env['ANSIBLE_PRIVATE_KEY_FILE'] = key
# NOTE(cloudnull): Re-apply the original environment ensuring that
# anything defined on the CLI is set accordingly.
env.update(os.environ.copy())
if extra_env_variables:
if not isinstance(extra_env_variables, dict):
msg = "extra_env_variables must be a dict"
LOG.error(msg)
raise SystemError(msg)
else:
env.update(extra_env_variables)
if 'ANSIBLE_CONFIG' not in env and not ansible_cfg:
config_download = os.path.join(constants.DEFAULT_WORK_DIR, plan)
makedirs(config_download)
ansible_cfg = os.path.join(config_download, 'ansible.cfg')
config = configparser.ConfigParser()
if os.path.isfile(ansible_cfg):
config.read(ansible_cfg)
if 'defaults' not in config.sections():
config.add_section('defaults')
config.set('defaults', 'internal_poll_interval', '0.01')
with open(ansible_cfg, 'w') as f:
config.write(f)
env['ANSIBLE_CONFIG'] = ansible_cfg
elif 'ANSIBLE_CONFIG' not in env and ansible_cfg:
env['ANSIBLE_CONFIG'] = ansible_cfg
command_path = None
with TempDirs(chdir=False) as ansible_artifact_path:
r_opts = {
'private_data_dir': workdir,
'project_dir': playbook_dir,
'inventory': _inventory(inventory),
'envvars': _encode_envvars(env=env),
'playbook': playbook,
'verbosity': verbosity,
'quiet': quiet,
'extravars': extra_vars,
'fact_cache': ansible_fact_path,
'fact_cache_type': 'jsonfile',
'artifact_dir': ansible_artifact_path,
'rotate_artifacts': 256
}
if skip_tags:
r_opts['skip_tags'] = skip_tags
if tags:
r_opts['tags'] = tags
if limit_hosts:
r_opts['limit'] = limit_hosts
if parallel_run:
r_opts['directory_isolation_base_path'] = ansible_artifact_path
runner_config = ansible_runner.runner_config.RunnerConfig(**r_opts)
runner_config.prepare()
# NOTE(cloudnull): overload the output callback after prepare
# to define the specific format we want.
# This is only required until PR
# https://github.com/ansible/ansible-runner/pull/387
# is merged and released. After this PR has been
# made available to us, this line should be removed.
runner_config.env['ANSIBLE_STDOUT_CALLBACK'] = \
r_opts['envvars']['ANSIBLE_STDOUT_CALLBACK']
runner = ansible_runner.Runner(config=runner_config)
if reproduce_command:
command_path = os.path.join(
playbook_dir,
"ansible-playbook-command.sh"
)
with open(command_path, 'w') as f:
f.write('#!/usr/bin/env bash\n')
f.write('echo -e "Exporting environment variables"\n')
for key, value in r_opts['envvars'].items():
f.write('export {}="{}"\n'.format(key, value))
f.write('echo -e "Running Ansible command"\n')
f.write('{} $@\n'.format(' '.join(runner_config.command)))
os.chmod(command_path, 0o750)
try:
status, rc = runner.run()
finally:
# NOTE(cloudnull): After a playbook executes, ensure the log
# file, if it exists, was created with
# appropriate ownership.
_log_path = r_opts['envvars']['ANSIBLE_LOG_PATH']
if os.path.isfile(_log_path):
os.chown(_log_path, get_uid, -1)
if rc != 0:
err_msg = (
'Ansible execution failed. playbook: {},'
' Run Status: {},'
' Return Code: {}'.format(
playbook,
status,
rc
)
)
if command_path:
err_msg += (
', To rerun the failed command manually execute the'
' following script: {}'.format(
command_path
)
)
if not quiet:
LOG.error(err_msg)
raise RuntimeError(err_msg)
LOG.info(
'Ansible execution success. playbook: {}'.format(
playbook))
def convert(data):
"""Recursively converts dictionary keys,values to strings."""
if isinstance(data, six.string_types):
return str(data)
elif isinstance(data, collectionsAbc.Mapping):
return dict(map(convert, six.iteritems(data)))
elif isinstance(data, collectionsAbc.Iterable):
return type(data)(map(convert, data))
else:
return data
def bracket_ipv6(address):
"""Put a bracket around address if it is valid IPv6
Return it unchanged if it is a hostname or IPv4 address.
"""
try:
socket.inet_pton(socket.AF_INET6, address)
return "[%s]" % address
except socket.error:
return address
def is_valid_ip(ip):
"""Return True if the IP is either v4 or v6
Return False if invalid.
"""
return netaddr.valid_ipv4(ip) or netaddr.valid_ipv6(ip)
def is_loopback(host):
"""Return True of the IP or the host is a loopback
Return False if not.
"""
loopbacks = ['127', '::1']
for l in loopbacks:
if host.startswith(l):
return True
return False
def get_host_ips(host, type=None):
"""Lookup an host to return a list of IPs.
:param host: Host to lookup
:type host: string
:param type: Type of socket (e.g. socket.AF_INET, socket.AF_INET6)
:type type: string
"""
ips = set()
if type:
types = (type,)
else:
types = (socket.AF_INET, socket.AF_INET6)
for t in types:
try:
res = socket.getaddrinfo(host, None, t, socket.SOCK_STREAM)
except socket.error:
continue
nips = set([x[4][0] for x in res])
ips.update(nips)
return list(ips)
def get_single_ip(host, allow_loopback=False, ip_version=4):
"""Translate an hostname into a single IP address if it is a valid IP.
:param host: IP or hostname or FQDN to lookup
:type host: string
:param allow_loopback: Whether or not a loopback IP can be returned.
Defaults is False.
:type allow_loopback: boolean
Return the host unchanged if it is already an IPv4 or IPv6 address.
"""
ip = host
if not is_valid_ip(host):
type = socket.AF_INET6 if ip_version == 6 else socket.AF_INET
ips = get_host_ips(host, type=type)
if not ips:
raise exceptions.LookupError('No IP was found for the host: '
'%s' % host)
else:
ip = ips[0]
if len(ips) > 1:
raise exceptions.LookupError('More than one IP was found for the '
'host %s: %s' % (host, ips))
if not allow_loopback and is_loopback(ip):
raise exceptions.LookupError('IP address for host %s is a loopback'
' IP: %s' % (host, ip))
if not is_valid_ip(ip):
raise exceptions.LookupError('IP address for host %s is not a '
'valid IP: %s' % (host, ip))
return ip
def write_env_file(env_data, env_file, registry_overwrites):
"""Write the tht env file as yaml"""
data = {'parameter_defaults': env_data}
if registry_overwrites:
data['resource_registry'] = registry_overwrites
with open(env_file, "w") as f:
dumper = yaml.dumper.SafeDumper
dumper.ignore_aliases = lambda self, data: True
yaml.dump(data, f, default_flow_style=False, Dumper=dumper)
def store_cli_param(command_name, parsed_args):
"""write the cli parameters into an history file"""
# The command name is the part after "openstack" with spaces. Switching
# to "-" makes it easier to read. "openstack undercloud install" will be
# stored as "undercloud-install" for example.
command_name = command_name.replace(" ", "-")
history_path = os.path.join(constants.CLOUD_HOME_DIR, '.tripleo')
makedirs(history_path)
if os.path.isdir(history_path):
try:
with open(os.path.join(history_path,
'history'), 'a') as history:
args = parsed_args.__dict__.copy()
used_args = ', '.join('%s=%s' % (key, value)
for key, value in args.items())
history.write(' '.join([str(datetime.datetime.now()),
str(command_name), used_args, "\n"]))
except IOError as e:
messages = _("Unable to write into TripleO history file: "
"{0}, {1}").format(history_path, e)
raise IOError(messages)
else:
raise exceptions.InvalidConfiguration(_("Target path %s is not a "
"directory") % history_path)
def create_tempest_deployer_input(config_name='tempest-deployer-input.conf'):
config = configparser.ConfigParser()
# Create required sections
for section in ('auth', 'compute', 'compute-feature-enabled', 'identity',
'image', 'network', 'object-storage', 'orchestration',
'volume', 'volume-feature-enabled'):
config.add_section(section)
# Dynamic credentials means tempest will create the required credentials if
# a test requires a new account to work, tempest will create one just for
# that test
config.set('auth', 'use_dynamic_credentials', 'true')
# Does the test environment support obtaining instance serial console
# output? (default: true)
# set in [nova.serial_console]->enabled
config.set('compute-feature-enabled', 'console_output', 'false')
# Role required for users to be able to manage stacks
# (default: 'heat_stack_owner')
# keystone role-list returns this role
config.set('orchestration', 'stack_owner_role', 'swiftoperator')
# Name of the backend1 (must be declared in cinder.conf)
# (default: 'BACKEND_1')
# set in [cinder]->enabled_backends
config.set('volume', 'backend1_name', 'tripleo_iscsi')
# Update bootable status of a volume Not implemented on icehouse
# (default: false)
# python-cinderclient supports set-bootable
config.set('volume-feature-enabled', 'bootable', 'true')
# Fix region value because TripleO is using non-standard value
for section in ('compute', 'identity', 'image', 'network',
'object-storage', 'orchestration', 'volume'):
config.set(section, 'region', 'regionOne')
with open(config_name, 'w+') as config_file:
config.write(config_file)
def wait_for_stack_ready(orchestration_client, stack_name, marker=None,
action='CREATE', nested_depth=2,
max_retries=10):
"""Check the status of an orchestration stack
Get the status of an orchestration stack and check whether it is complete
or failed.
:param orchestration_client: Instance of Orchestration client
:type orchestration_client: heatclient.v1.client.Client
:param stack_name: Name or UUID of stack to retrieve
:type stack_name: string
:param marker: UUID of the last stack event before the current action
:type marker: string
:param action: Current action to check the stack for COMPLETE
:type action: string
:param verbose: Whether to print events
:type verbose: boolean
:param nested_depth: Max depth to look for events
:type nested_depth: int
:param max_retries: Number of retries in the case of server problems
:type max_retries: int
"""
log = logging.getLogger(__name__ + ".wait_for_stack_ready")
stack = get_stack(orchestration_client, stack_name)
if not stack:
return False
stack_name = "%s/%s" % (stack.stack_name, stack.id)
retries = 0
while retries <= max_retries:
try:
stack_status, msg = event_utils.poll_for_events(
orchestration_client, stack_name, action=action,
poll_period=5, marker=marker, out=sys.stdout,
nested_depth=nested_depth)
print(msg)
return stack_status == '%s_COMPLETE' % action
except hc_exc.HTTPException as e:
if e.code in [500, 503, 504]:
retries += 1
log.warning("Server issue while waiting for stack to be ready."
" Attempting retry {} of {}".format(retries,
max_retries))
time.sleep(retries * 5)
continue
log.error("Error occured while waiting for stack to be ready.")
raise e
raise RuntimeError(
"wait_for_stack_ready: Max retries {} reached".format(max_retries))
def get_stack_output_item(stack, item):
if not stack:
return None
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == item:
return output['output_value']
# item not found in outputs
return None
def get_overcloud_endpoint(stack):
return get_stack_output_item(stack, 'KeystoneURL')
def get_service_ips(stack):
service_ips = {}
for output in stack.to_dict().get('outputs', {}):
service_ips[output['output_key']] = output['output_value']
return service_ips
def get_endpoint_map(stack):
endpoint_map = get_stack_output_item(stack, 'EndpointMap')
if not endpoint_map:
endpoint_map = {}