Permalink
Newer
Older
100644 2197 lines (1825 sloc) 65 KB
1
# vi: ts=4 expandtab
2
#
3
# Copyright (C) 2012 Canonical Ltd.
4
# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P.
6
#
7
# Author: Scott Moser <scott.moser@canonical.com>
8
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
10
#
11
# This program is free software: you can redistribute it and/or modify
12
# it under the terms of the GNU General Public License version 3, as
13
# published by the Free Software Foundation.
14
#
15
# This program is distributed in the hope that it will be useful,
16
# but WITHOUT ANY WARRANTY; without even the implied warranty of
17
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
# GNU General Public License for more details.
19
#
20
# You should have received a copy of the GNU General Public License
21
# along with this program. If not, see <http://www.gnu.org/licenses/>.
23
import contextlib
@smoser
Aug 22, 2012
24
import copy as obj_copy
29
import grp
30
import gzip
31
import hashlib
32
import json
35
import platform
36
import pwd
37
import random
42
import string
43
import subprocess
@warsaw
Jan 27, 2015
48
from base64 import b64decode, b64encode
49
from six.moves.urllib import parse as urlparse
50
51
import six
52
import yaml
53
54
from cloudinit import importer
55
from cloudinit import log as logging
56
from cloudinit import mergers
57
from cloudinit import safeyaml
58
from cloudinit import type_utils
59
from cloudinit import url_helper
60
from cloudinit import version
62
from cloudinit.settings import (CFG_BUILTIN)
65
_DNS_REDIRECT_IP = None
66
LOG = logging.getLogger(__name__)
67
68
# Helps cleanup filenames to ensure they aren't FS incompatible
69
FN_REPLACEMENTS = {
70
os.sep: '_',
71
}
72
FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
74
TRUE_STRINGS = ('true', '1', 'on', 'yes')
75
FALSE_STRINGS = ('off', '0', 'no', 'false')
76
77
78
# Helper utils to see if running in a container
79
CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
80
81
82
def decode_binary(blob, encoding='utf-8'):
83
# Converts a binary type into a text type using given encoding.
84
if isinstance(blob, six.text_type):
85
return blob
86
return blob.decode(encoding)
87
88
89
def encode_text(text, encoding='utf-8'):
90
# Converts a text string into a binary type using given encoding.
91
if isinstance(text, six.binary_type):
92
return text
93
return text.encode(encoding)
@warsaw
Jan 27, 2015
95
96
def b64d(source):
97
# Base64 decode some data, accepting bytes or unicode/str, and returning
98
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
99
decoded = b64decode(source)
100
try:
101
return decoded.decode('utf-8')
102
except UnicodeDecodeError:
103
return decoded
@warsaw
Jan 27, 2015
104
@smoser
Feb 11, 2015
105
@warsaw
Jan 27, 2015
106
def b64e(source):
107
# Base64 encode some data, accepting bytes or unicode/str, and returning
108
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
109
if not isinstance(source, bytes):
110
source = source.encode('utf-8')
111
return b64encode(source).decode('utf-8')
112
113
@warsaw
Jan 27, 2015
114
def fully_decoded_payload(part):
115
# In Python 3, decoding the payload will ironically hand us a bytes object.
116
# 'decode' means to decode according to Content-Transfer-Encoding, not
117
# according to any charset in the Content-Type. So, if we end up with
118
# bytes, first try to decode to str via CT charset, and failing that, try
119
# utf-8 using surrogate escapes.
120
cte_payload = part.get_payload(decode=True)
@smoser
Feb 11, 2015
121
if (six.PY3 and
@warsaw
Jan 27, 2015
122
part.get_content_maintype() == 'text' and
123
isinstance(cte_payload, bytes)):
124
charset = part.get_charset() or 'utf-8'
125
return cte_payload.decode(charset, errors='surrogateescape')
126
return cte_payload
127
128
129
# Path for DMI Data
130
DMI_SYS_PATH = "/sys/class/dmi/id"
131
132
# dmidecode and /sys/class/dmi/id/* use different names for the same value,
133
# this allows us to refer to them by one canonical name
134
DMIDECODE_TO_DMI_SYS_MAPPING = {
135
'baseboard-asset-tag': 'board_asset_tag',
136
'baseboard-manufacturer': 'board_vendor',
137
'baseboard-product-name': 'board_name',
138
'baseboard-serial-number': 'board_serial',
139
'baseboard-version': 'board_version',
140
'bios-release-date': 'bios_date',
141
'bios-vendor': 'bios_vendor',
142
'bios-version': 'bios_version',
143
'chassis-asset-tag': 'chassis_asset_tag',
144
'chassis-manufacturer': 'chassis_vendor',
145
'chassis-serial-number': 'chassis_serial',
146
'chassis-version': 'chassis_version',
147
'system-manufacturer': 'sys_vendor',
148
'system-product-name': 'product_name',
149
'system-serial-number': 'product_serial',
150
'system-uuid': 'product_uuid',
151
'system-version': 'product_version',
152
}
153
154
155
class ProcessExecutionError(IOError):
156
157
MESSAGE_TMPL = ('%(description)s\n'
158
'Command: %(cmd)s\n'
159
'Exit code: %(exit_code)s\n'
160
'Reason: %(reason)s\n'
161
'Stdout: %(stdout)r\n'
162
'Stderr: %(stderr)r')
163
164
def __init__(self, stdout=None, stderr=None,
165
exit_code=None, cmd=None,
166
description=None, reason=None):
167
if not cmd:
168
self.cmd = '-'
169
else:
170
self.cmd = cmd
171
172
if not description:
173
self.description = 'Unexpected error while running command.'
174
else:
175
self.description = description
176
177
if not isinstance(exit_code, six.integer_types):
178
self.exit_code = '-'
179
else:
180
self.exit_code = exit_code
181
182
if not stderr:
183
self.stderr = ''
184
else:
185
self.stderr = stderr
186
187
if not stdout:
188
self.stdout = ''
189
else:
190
self.stdout = stdout
191
192
if reason:
193
self.reason = reason
194
else:
195
self.reason = '-'
196
197
message = self.MESSAGE_TMPL % {
198
'description': self.description,
199
'cmd': self.cmd,
200
'exit_code': self.exit_code,
201
'stdout': self.stdout,
202
'stderr': self.stderr,
203
'reason': self.reason,
204
}
205
IOError.__init__(self, message)
206
# For backward compatibility with Python 2.
207
if not hasattr(self, 'message'):
208
self.message = message
212
def __init__(self, path, recursive=False):
213
# Late import since it might not always
214
# be possible to use this
215
try:
216
self.selinux = importer.import_module('selinux')
217
except ImportError:
218
self.selinux = None
219
self.path = path
220
self.recursive = recursive
221
222
def __enter__(self):
223
if self.selinux and self.selinux.is_selinux_enabled():
224
return True
225
else:
226
return False
228
def __exit__(self, excp_type, excp_value, excp_traceback):
229
if not self.selinux or not self.selinux.is_selinux_enabled():
230
return
231
if not os.path.lexists(self.path):
232
return
233
234
path = os.path.realpath(self.path)
235
# path should be a string, not unicode
236
if six.PY2:
237
path = str(path)
238
try:
239
stats = os.lstat(path)
240
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
241
except OSError:
242
return
243
244
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
245
path, self.recursive)
246
self.selinux.restorecon(path, recursive=self.recursive)
249
class MountFailedError(Exception):
250
pass
251
252
253
class DecompressionError(Exception):
254
pass
255
256
257
def ExtendedTemporaryFile(**kwargs):
258
fh = tempfile.NamedTemporaryFile(**kwargs)
259
# Replace its unlink with a quiet version
260
# that does not raise errors when the
261
# file to unlink has been unlinked elsewhere..
262
LOG.debug("Created temporary file %s", fh.name)
263
fh.unlink = del_file
264
265
# Add a new method that will unlink
266
# right 'now' but still lets the exit
267
# method attempt to remove it (which will
268
# not throw due to our del file being quiet
269
# about files that are not there)
270
def unlink_now():
271
fh.unlink(fh.name)
273
setattr(fh, 'unlink_now', unlink_now)
274
return fh
275
276
@jayofdoom
Sep 15, 2014
277
def fork_cb(child_cb, *args, **kwargs):
278
fid = os.fork()
279
if fid == 0:
280
try:
@jayofdoom
Sep 15, 2014
281
child_cb(*args, **kwargs)
@jayofdoom
Sep 15, 2014
282
os._exit(0)
284
logexc(LOG, "Failed forking and calling callback %s",
285
type_utils.obj_name(child_cb))
@jayofdoom
Sep 15, 2014
286
os._exit(1)
287
else:
288
LOG.debug("Forked child %s who will run callback %s",
289
fid, type_utils.obj_name(child_cb))
292
def is_true(val, addons=None):
293
if isinstance(val, (bool)):
294
return val is True
297
check_set = list(check_set) + addons
298
if six.text_type(val).lower().strip() in check_set:
299
return True
300
return False
301
302
303
def is_false(val, addons=None):
304
if isinstance(val, (bool)):
305
return val is False
308
check_set = list(check_set) + addons
309
if six.text_type(val).lower().strip() in check_set:
310
return True
311
return False
312
313
314
def translate_bool(val, addons=None):
316
# This handles empty lists and false and
317
# other things that python believes are false
318
return False
319
# If its already a boolean skip
320
if isinstance(val, (bool)):
322
return is_true(val, addons)
323
324
325
def rand_str(strlen=32, select_from=None):
326
if not select_from:
327
select_from = string.ascii_letters + string.digits
328
return "".join([random.choice(select_from) for _x in range(0, strlen)])
329
@smoser
Jan 20, 2010
331
def read_conf(fname):
332
try:
333
return load_yaml(load_file(fname), default={})
334
except IOError as e:
335
if e.errno == errno.ENOENT:
336
return {}
341
# Merges X lists, and then keeps the
342
# unique ones, but orders by sort order
343
# instead of by the original order
344
def uniq_merge_sorted(*lists):
345
return sorted(uniq_merge(*lists))
346
347
348
# Merges X lists and then iterates over those
349
# and only keeps the unique items (order preserving)
350
# and returns that merged and uniqued list as the
351
# final result.
352
#
353
# Note: if any entry is a string it will be
354
# split on commas and empty entries will be
355
# evicted and merged in accordingly.
356
def uniq_merge(*lists):
357
combined_list = []
358
for a_list in lists:
359
if isinstance(a_list, six.string_types):
360
a_list = a_list.strip().split(",")
361
# Kickout the empty ones
362
a_list = [a for a in a_list if len(a)]
363
combined_list.extend(a_list)
364
return uniq_list(combined_list)
367
def clean_filename(fn):
368
for (k, v) in FN_REPLACEMENTS.items():
369
fn = fn.replace(k, v)
370
removals = []
371
for k in fn:
372
if k not in FN_ALLOWED:
373
removals.append(k)
374
for k in removals:
375
fn = fn.replace(k, '')
376
fn = fn.strip()
377
return fn
380
def decomp_gzip(data, quiet=True, decode=True):
382
buf = six.BytesIO(encode_text(data))
383
with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
384
if decode:
385
return decode_binary(gh.read())
386
else:
387
return gh.read()
388
except Exception as e:
389
if quiet:
390
return data
391
else:
392
raise DecompressionError(six.text_type(e))
393
394
395
def extract_usergroup(ug_pair):
396
if not ug_pair:
397
return (None, None)
398
ug_parted = ug_pair.split(':', 1)
399
u = ug_parted[0].strip()
400
if len(ug_parted) == 2:
401
g = ug_parted[1].strip()
402
else:
403
g = None
404
if not u or u == "-1" or u.lower() == "none":
405
u = None
406
if not g or g == "-1" or g.lower() == "none":
407
g = None
408
return (u, g)
411
def find_modules(root_dir):
412
entries = dict()
413
for fname in glob.glob(os.path.join(root_dir, "*.py")):
414
if not os.path.isfile(fname):
415
continue
416
modname = os.path.basename(fname)[0:-3]
417
modname = modname.strip()
418
if modname and modname.find(".") == -1:
419
entries[fname] = modname
420
return entries
421
422
423
def multi_log(text, console=True, stderr=True,
424
log=None, log_level=logging.DEBUG):
425
if stderr:
426
sys.stderr.write(text)
427
if console:
428
conpath = "/dev/console"
429
if os.path.exists(conpath):
431
wfh.write(text)
432
wfh.flush()
433
else:
434
# A container may lack /dev/console (arguably a container bug). If
435
# it does not exist, then write output to stdout. this will result
436
# in duplicate stderr and stdout messages if stderr was True.
@smoser
Sep 27, 2013
437
#
438
# even though upstart or systemd might have set up output to go to
439
# /dev/console, the user may have configured elsewhere via
440
# cloud-config 'output'. If there is /dev/console, messages will
441
# still get there.
442
sys.stdout.write(text)
444
if text[-1] == "\n":
445
log.log(log_level, text[:-1])
446
else:
447
log.log(log_level, text)
@smoser
Aug 22, 2012
449
450
def load_json(text, root_types=(dict,)):
451
decoded = json.loads(decode_binary(text))
452
if not isinstance(decoded, tuple(root_types)):
453
expected_types = ", ".join([str(t) for t in root_types])
454
raise TypeError("(%s) root types expected, got %s instead"
455
% (expected_types, type(decoded)))
456
return decoded
457
458
459
def is_ipv4(instr):
@smoser
Aug 22, 2012
460
"""determine if input string is a ipv4 address. return boolean."""
461
toks = instr.split('.')
462
if len(toks) != 4:
463
return False
464
465
try:
@smoser
Jan 29, 2014
466
toks = [x for x in toks if int(x) < 256 and int(x) >= 0]
467
except:
468
return False
469
470
return len(toks) == 4
@smoser
Jan 20, 2010
473
def get_cfg_option_bool(yobj, key, default=False):
474
if key not in yobj:
476
return translate_bool(yobj[key])
@smoser
Jan 20, 2010
477
@smoser
Jan 20, 2010
479
def get_cfg_option_str(yobj, key, default=None):
480
if key not in yobj:
482
val = yobj[key]
483
if not isinstance(val, six.string_types):
484
val = str(val)
485
return val
@smoser
Jan 20, 2010
486
488
def get_cfg_option_int(yobj, key, default=0):
489
return int(get_cfg_option_str(yobj, key, default=default))
490
491
492
def system_info():
493
return {
494
'platform': platform.platform(),
495
'release': platform.release(),
496
'python': platform.python_version(),
497
'uname': platform.uname(),
498
'dist': platform.linux_distribution(),
502
def get_cfg_option_list(yobj, key, default=None):
503
"""
504
Gets the C{key} config option from C{yobj} as a list of strings. If the
505
key is present as a single string it will be returned as a list with one
506
string arg.
507
508
@param yobj: The configuration object.
509
@param key: The configuration key to get.
510
@param default: The default to return if key is not found.
511
@return: The configuration option as a list of strings or default if key
512
is not found.
513
"""
515
return default
516
if yobj[key] is None:
517
return []
518
val = yobj[key]
519
if isinstance(val, (list)):
520
cval = [v for v in val]
522
if not isinstance(val, six.string_types):
523
val = str(val)
524
return [val]
527
# get a cfg entry by its path array
528
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
529
def get_cfg_by_path(yobj, keyp, default=None):
530
cur = yobj
531
for tok in keyp:
533
return default
534
cur = cur[tok]
538
def fixup_output(cfg, mode):
539
(outfmt, errfmt) = get_output_cfg(cfg, mode)
540
redirect_output(outfmt, errfmt)
544
# redirect_output(outfmt, errfmt, orig_out, orig_err)
545
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
546
# fmt can be:
547
# > FILEPATH
548
# >> FILEPATH
549
# | program [ arg1 [ arg2 [ ... ] ] ]
550
#
551
# with a '|', arguments are passed to shell, so one level of
552
# shell escape is required.
553
#
554
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
555
# value then output input will not be closed (useful for debugging).
556
#
557
def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
558
559
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
560
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
561
return
562
563
if not o_out:
564
o_out = sys.stdout
565
if not o_err:
566
o_err = sys.stderr
567
569
LOG.debug("Redirecting %s to %s", o_out, outfmt)
570
(mode, arg) = outfmt.split(" ", 1)
571
if mode == ">" or mode == ">>":
572
owith = "ab"
573
if mode == ">":
574
owith = "wb"
575
new_fp = open(arg, owith)
576
elif mode == "|":
577
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
578
new_fp = proc.stdin
580
raise TypeError("Invalid type for output format: %s" % outfmt)
581
582
if o_out:
583
os.dup2(new_fp.fileno(), o_out.fileno())
585
if errfmt == outfmt:
586
LOG.debug("Redirecting %s to %s", o_err, outfmt)
587
os.dup2(new_fp.fileno(), o_err.fileno())
588
return
589
590
if errfmt:
591
LOG.debug("Redirecting %s to %s", o_err, errfmt)
592
(mode, arg) = errfmt.split(" ", 1)
593
if mode == ">" or mode == ">>":
594
owith = "ab"
595
if mode == ">":
596
owith = "wb"
597
new_fp = open(arg, owith)
598
elif mode == "|":
599
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
600
new_fp = proc.stdin
602
raise TypeError("Invalid type for error format: %s" % errfmt)
603
604
if o_err:
605
os.dup2(new_fp.fileno(), o_err.fileno())
606
607
608
def make_url(scheme, host, port=None,
609
path='', params='', query='', fragment=''):
610
611
pieces = []
612
pieces.append(scheme or '')
613
614
netloc = ''
615
if host:
616
netloc = str(host)
617
618
if port is not None:
619
netloc += ":" + "%s" % (port)
620
621
pieces.append(netloc or '')
622
pieces.append(path or '')
623
pieces.append(params or '')
624
pieces.append(query or '')
625
pieces.append(fragment or '')
626
627
return urlparse.urlunparse(pieces)
628
629
630
def mergemanydict(srcs, reverse=False):
631
if reverse:
632
srcs = reversed(srcs)
633
merged_cfg = {}
634
for cfg in srcs:
635
if cfg:
636
# Figure out which mergers to apply...
637
mergers_to_apply = mergers.dict_extract_mergers(cfg)
638
if not mergers_to_apply:
639
mergers_to_apply = mergers.default_mergers()
640
merger = mergers.construct(mergers_to_apply)
641
merged_cfg = merger.merge(merged_cfg, cfg)
642
return merged_cfg
645
@contextlib.contextmanager
646
def chdir(ndir):
647
curr = os.getcwd()
648
try:
649
os.chdir(ndir)
650
yield ndir
651
finally:
652
os.chdir(curr)
653
654
655
@contextlib.contextmanager
656
def umask(n_msk):
657
old = os.umask(n_msk)
658
try:
659
yield old
660
finally:
661
os.umask(old)
662
663
664
@contextlib.contextmanager
665
def tempdir(**kwargs):
666
# This seems like it was only added in python 3.2
667
# Make it since its useful...
668
# See: http://bugs.python.org/file12970/tempdir.patch
669
tdir = tempfile.mkdtemp(**kwargs)
671
yield tdir
672
finally:
673
del_dir(tdir)
676
def center(text, fill, max_len):
677
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
678
align="^", size=max_len)
679
680
681
def del_dir(path):
682
LOG.debug("Recursively deleting %s", path)
683
shutil.rmtree(path)
686
def runparts(dirp, skip_no_exist=True, exe_prefix=None):
687
if skip_no_exist and not os.path.isdir(dirp):
688
return
690
failed = []
691
attempted = []
692
693
if exe_prefix is None:
694
prefix = []
695
elif isinstance(exe_prefix, str):
696
prefix = [str(exe_prefix)]
697
elif isinstance(exe_prefix, list):
698
prefix = exe_prefix
699
else:
700
raise TypeError("exe_prefix must be None, str, or list")
701
702
for exe_name in sorted(os.listdir(dirp)):
703
exe_path = os.path.join(dirp, exe_name)
704
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
705
attempted.append(exe_path)
707
subp(prefix + [exe_path], capture=False)
708
except ProcessExecutionError as e:
709
logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
710
failed.append(e)
712
if failed and attempted:
713
raise RuntimeError('Runparts: %s failures in %s attempted commands'
714
% (len(failed), len(attempted)))
717
# read_optional_seed
718
# returns boolean indicating success or failure (presense of files)
719
# if files are present, populates 'fill' dictionary with 'user-data' and
720
# 'meta-data' entries
721
def read_optional_seed(fill, base="", ext="", timeout=5):
723
(md, ud) = read_seeded(base, ext, timeout)
724
fill['user-data'] = ud
725
fill['meta-data'] = md
726
return True
727
except url_helper.UrlError as e:
728
if e.code == url_helper.NOT_FOUND:
729
return False
730
raise
733
def fetch_ssl_details(paths=None):
734
ssl_details = {}
735
# Lookup in these locations for ssl key/cert files
736
ssl_cert_paths = [
737
'/var/lib/cloud/data/ssl',
738
'/var/lib/cloud/instance/data/ssl',
739
]
740
if paths:
741
ssl_cert_paths.extend([
742
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
743
os.path.join(paths.get_cpath('data'), 'ssl'),
744
])
745
ssl_cert_paths = uniq_merge(ssl_cert_paths)
746
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
747
cert_file = None
748
for d in ssl_cert_paths:
749
if os.path.isfile(os.path.join(d, 'cert.pem')):
750
cert_file = os.path.join(d, 'cert.pem')
751
break
752
key_file = None
753
for d in ssl_cert_paths:
754
if os.path.isfile(os.path.join(d, 'key.pem')):
755
key_file = os.path.join(d, 'key.pem')
756
break
757
if cert_file and key_file:
758
ssl_details['cert_file'] = cert_file
759
ssl_details['key_file'] = key_file
760
elif cert_file:
761
ssl_details['cert_file'] = cert_file
762
return ssl_details
763
764
765
def load_tfile_or_url(*args, **kwargs):
766
return(decode_binary(read_file_or_url(*args, **kwargs).contents))
767
768
769
def read_file_or_url(url, timeout=5, retries=10,
770
headers=None, data=None, sec_between=1, ssl_details=None,
771
headers_cb=None, exception_cb=None):
772
url = url.lstrip()
773
if url.startswith("/"):
774
url = "file://%s" % url
775
if url.lower().startswith("file://"):
776
if data:
777
LOG.warn("Unable to post data to file resource %s", url)
778
file_path = url[len("file://"):]
780
contents = load_file(file_path, decode=False)
781
except IOError as e:
782
code = e.errno
783
if e.errno == errno.ENOENT:
784
code = url_helper.NOT_FOUND
785
raise url_helper.UrlError(cause=e, code=code, headers=None)
786
return url_helper.FileResponse(file_path, contents=contents)
787
else:
788
return url_helper.readurl(url,
789
timeout=timeout,
790
retries=retries,
791
headers=headers,
792
headers_cb=headers_cb,
793
data=data,
794
sec_between=sec_between,
795
ssl_details=ssl_details,
796
exception_cb=exception_cb)
797
798
799
def load_yaml(blob, default=None, allowed=(dict,)):
800
loaded = default
803
LOG.debug("Attempting to load yaml from string "
804
"of length %s with allowed root types %s",
806
converted = safeyaml.load(blob)
807
if not isinstance(converted, allowed):
808
# Yes this will just be caught, but thats ok for now...
809
raise TypeError(("Yaml load allows %s root types,"
810
" but got %s instead") %
811
(allowed, type_utils.obj_name(converted)))
813
except (yaml.YAMLError, TypeError, ValueError):
814
if len(blob) == 0:
815
LOG.debug("load_yaml given empty string, returning default")
816
else:
817
logexc(LOG, "Failed loading yaml blob")
821
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
822
if base.startswith("/"):
825
# default retries for file is 0. for network is 10
826
if base.startswith("file://"):
827
retries = file_retries
828
829
if base.find("%s") >= 0:
830
ud_url = base % ("user-data" + ext)
831
md_url = base % ("meta-data" + ext)
832
else:
833
ud_url = "%s%s%s" % (base, "user-data", ext)
834
md_url = "%s%s%s" % (base, "meta-data", ext)
836
md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries)
839
md = load_yaml(md_resp.contents, default={})
841
ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
846
return (md, ud)
850
# Get reverse sorted list (later trumps newer)
851
confs = sorted(os.listdir(confd), reverse=True)
853
# Remove anything not ending in '.cfg'
854
confs = [f for f in confs if f.endswith(".cfg")]
856
# Remove anything not a file
857
confs = [f for f in confs
858
if os.path.isfile(os.path.join(confd, f))]
860
# Load them all so that they can be merged
861
cfgs = []
862
for fn in confs:
Jun 23, 2012
863
cfgs.append(read_conf(os.path.join(confd, fn)))
864
865
return mergemanydict(cfgs)
868
def read_conf_with_confd(cfgfile):
869
cfg = read_conf(cfgfile)
871
confd = False
872
if "conf_d" in cfg:
873
confd = cfg['conf_d']
874
if confd:
875
if not isinstance(confd, six.string_types):
876
raise TypeError(("Config file %s contains 'conf_d' "
877
"with non-string type %s") %
878
(cfgfile, type_utils.obj_name(confd)))
881
elif os.path.isdir("%s.d" % cfgfile):
882
confd = "%s.d" % cfgfile
883
884
if not confd or not os.path.isdir(confd):
887
# Conf.d settings override input configuration
888
confd_cfg = read_conf_d(confd)
889
return mergemanydict([confd_cfg, cfg])
892
def read_cc_from_cmdline(cmdline=None):
893
# this should support reading cloud-config information from
894
# the kernel command line. It is intended to support content of the
895
# format:
896
# cc: <yaml content here> [end_cc]
897
# this would include:
898
# cc: ssh_import_id: [smoser, kirkland]\\n
899
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
900
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
901
if cmdline is None:
902
cmdline = get_cmdline()
903
904
tag_begin = "cc:"
905
tag_end = "end_cc"
906
begin_l = len(tag_begin)
907
end_l = len(tag_end)
908
clen = len(cmdline)
909
tokens = []
910
begin = cmdline.find(tag_begin)
911
while begin >= 0:
912
end = cmdline.find(tag_end, begin + begin_l)
913
if end < 0:
914
end = clen
915
tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
916
"\n"))
917
918
begin = cmdline.find(tag_begin, end + end_l)
919
920
return '\n'.join(tokens)
923
def dos2unix(contents):
924
# find first end of line
925
pos = contents.find('\n')
926
if pos <= 0 or contents[pos - 1] != '\r':
927
return contents
928
return contents.replace('\r\n', '\n')
931
def get_hostname_fqdn(cfg, cloud):
932
# return the hostname and fqdn from 'cfg'. If not found in cfg,
933
# then fall back to data from cloud
934
if "fqdn" in cfg:
935
# user specified a fqdn. Default hostname then is based off that
936
fqdn = cfg['fqdn']
937
hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
938
else:
939
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
940
# user specified hostname, and it had '.' in it
941
# be nice to them. set fqdn and hostname from that
942
fqdn = cfg['hostname']
943
hostname = cfg['hostname'][:fqdn.find('.')]
945
# no fqdn set, get fqdn from cloud.
946
# get hostname from cfg if available otherwise cloud
947
fqdn = cloud.get_hostname(fqdn=True)
948
if "hostname" in cfg:
949
hostname = cfg['hostname']
950
else:
951
hostname = cloud.get_hostname()
952
return (hostname, fqdn)
955
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
956
"""
957
For each host a single line should be present with
958
the following information:
959
960
IP_address canonical_hostname [aliases...]
961
962
Fields of the entry are separated by any number of blanks and/or tab
@smoser
Jan 23, 2014
963
characters. Text from a "#" character until the end of the line is a
964
comment, and is ignored. Host names may contain only alphanumeric
965
characters, minus signs ("-"), and periods ("."). They must begin with
966
an alphabetic character and end with an alphanumeric character.
967
Optional aliases provide for name changes, alternate spellings, shorter
968
hostnames, or generic hostnames (for example, localhost).
969
"""
972
for line in load_file(filename).splitlines():
973
hashpos = line.find("#")
974
if hashpos >= 0:
975
line = line[0:hashpos]
976
line = line.strip()
977
if not line:
978
continue
979
980
# If there there is less than 3 entries
981
# (IP_address, canonical_hostname, alias)
982
# then ignore this line
984
if len(toks) < 3:
985
continue
987
if hostname in toks[2:]:
988
fqdn = toks[1]
989
break
995
def get_cmdline_url(names=('cloud-config-url', 'url'),
996
starts=b"#cloud-config", cmdline=None):
997
if cmdline is None:
998
cmdline = get_cmdline()
999
1000
data = keyval_str_to_dict(cmdline)
1001
url = None
1002
key = None
1003
for key in names:
1004
if key in data:
1005
url = data[key]
1006
break
1007
1008
if not url:
1009
return (None, None, None)
1010
1011
resp = read_file_or_url(url)
1012
# allow callers to pass starts as text when comparing to bytes contents
1013
starts = encode_text(starts)
1014
if resp.ok() and resp.contents.startswith(starts):
1015
return (key, url, resp.contents)
1016
1017
return (key, url, None)
1018
1019
1020
def is_resolvable(name):
@smoser
Aug 22, 2012
1021
"""determine if a url is resolvable, return a boolean
1022
This also attempts to be resilent against dns redirection.
1023
1024
Note, that normal nsswitch resolution is used here. So in order
1025
to avoid any utilization of 'search' entries in /etc/resolv.conf
@smoser
Aug 22, 2012
1026
we have to append '.'.
1027
1028
The top level 'invalid' domain is invalid per RFC. And example.com
1029
should also not exist. The random entry will be resolved inside
1030
the search list.
1031
"""
1032
global _DNS_REDIRECT_IP
1033
if _DNS_REDIRECT_IP is None:
1034
badips = set()
1035
badnames = ("does-not-exist.example.com.", "example.invalid.",
1036
rand_str())
1037
badresults = {}
1038
for iname in badnames:
1039
try:
1040
result = socket.getaddrinfo(iname, None, 0, 0,
1041
socket.SOCK_STREAM, socket.AI_CANONNAME)
@smoser
Aug 22, 2012
1042
badresults[iname] = []
1043
for (_fam, _stype, _proto, cname, sockaddr) in result:
1044
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
1045
badips.add(sockaddr[0])
1046
except (socket.gaierror, socket.error):
1047
pass
1048
_DNS_REDIRECT_IP = badips
1049
if badresults:
1050
LOG.debug("detected dns redirection: %s", badresults)
@smoser
Aug 22, 2012
1051
1053
result = socket.getaddrinfo(name, None)
1054
# check first result's sockaddr field
1055
addr = result[0][4][0]
1056
if addr in _DNS_REDIRECT_IP:
1057
return False
1058
return True
1059
except (socket.gaierror, socket.error):
1060
return False
1061
1063
def get_hostname():
1064
hostname = socket.gethostname()
1065
return hostname
1066
1067
1068
def gethostbyaddr(ip):
1069
try:
1070
return socket.gethostbyaddr(ip)[0]
1071
except socket.herror:
1072
return None
1073
1074
1075
def is_resolvable_url(url):
@smoser
Aug 22, 2012
1076
"""determine if this url is resolvable (existing or ip)."""
1077
return is_resolvable(urlparse.urlparse(url).hostname)
1080
def search_for_mirror(candidates):
1081
"""
1082
Search through a list of mirror urls for one that works
1083
This needs to return quickly.
1084
"""
1085
for cand in candidates:
1086
try:
1087
if is_resolvable_url(cand):
1088
return cand
1091
return None
1095
"""
1096
reopen stdin as /dev/null so even subprocesses or other os level things get
1097
/dev/null as input.
1098
1099
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
1100
value then input will not be closed (useful for debugging).
1102
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDIN")):
1103
return
1104
with open(os.devnull) as fp:
1105
os.dup2(fp.fileno(), sys.stdin.fileno())
@smoser
Feb 17, 2012
1107
1108
def find_devs_with(criteria=None, oformat='device',
1109
tag=None, no_cache=False, path=None):
1110
"""
1111
find devices matching given criteria (via blkid)
1112
criteria can be *one* of:
1113
TYPE=<filesystem>
1114
LABEL=<label>
1115
UUID=<uuid>
1116
"""
1117
blk_id_cmd = ['blkid']
1118
options = []
1119
if criteria:
1120
# Search for block devices with tokens named NAME that
1121
# have the value 'value' and display any devices which are found.
1122
# Common values for NAME include TYPE, LABEL, and UUID.
1123
# If there are no devices specified on the command line,
1124
# all block devices will be searched; otherwise,
1125
# only search the devices specified by the user.
1126
options.append("-t%s" % (criteria))
1127
if tag:
1128
# For each (specified) device, show only the tags that match tag.
1129
options.append("-s%s" % (tag))
1130
if no_cache:
1131
# If you want to start with a clean cache
1132
# (i.e. don't report devices previously scanned
1133
# but not necessarily available at this time), specify /dev/null.
1134
options.extend(["-c", "/dev/null"])
1135
if oformat:
1136
# Display blkid's output using the specified format.
1137
# The format parameter may be:
1138
# full, value, list, device, udev, export
1139
options.append('-o%s' % (oformat))
1140
if path:
1141
options.append(path)
1142
cmd = blk_id_cmd + options
1143
# See man blkid for why 2 is added
1144
(out, _err) = subp(cmd, rcs=[0, 2])
1145
entries = []
1146
for line in out.splitlines():
1147
line = line.strip()
1148
if line:
1149
entries.append(line)
1150
return entries
1153
def peek_file(fname, max_bytes):
1154
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
1155
with open(fname, 'rb') as ifh:
1156
return ifh.read(max_bytes)
1157
1158
1159
def uniq_list(in_list):
1160
out_list = []
1161
for i in in_list:
1162
if i in out_list:
1163
continue
1164
else:
1165
out_list.append(i)
1166
return out_list
1167
1168
1169
def load_file(fname, read_cb=None, quiet=False, decode=True):
1170
LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
1172
try:
1173
with open(fname, 'rb') as ifh:
1174
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
1175
except IOError as e:
1176
if not quiet:
1177
raise
1178
if e.errno != errno.ENOENT:
1179
raise
1180
contents = ofh.getvalue()
1181
LOG.debug("Read %s bytes from %s", len(contents), fname)
1182
if decode:
1183
return decode_binary(contents)
1184
else:
1185
return contents
@smoser
Feb 17, 2012
1186
1187
1188
def get_cmdline():
1189
if 'DEBUG_PROC_CMDLINE' in os.environ:
1190
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
1192
try:
1193
cmdline = load_file("/proc/cmdline").strip()
1194
except:
1195
cmdline = ""
1196
return cmdline
1197
1198
1199
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
1200
bytes_piped = 0
1201
while True:
1202
data = in_fh.read(chunk_size)
1203
if len(data) == 0:
1204
break
1205
else:
1206
out_fh.write(data)
1207
bytes_piped += len(data)
1208
if chunk_cb:
1209
chunk_cb(bytes_piped)
1210
out_fh.flush()
1211
return bytes_piped
1212
@smoser
Feb 17, 2012
1213
1214
def chownbyid(fname, uid=None, gid=None):
1215
if uid in [None, -1] and gid in [None, -1]:
1216
# Nothing to do
1217
return
1218
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
1219
os.chown(fname, uid, gid)
1220
1221
1222
def chownbyname(fname, user=None, group=None):
1223
uid = -1
1224
gid = -1
1225
try:
1226
if user:
1227
uid = pwd.getpwnam(user).pw_uid
1228
if group:
1229
gid = grp.getgrnam(group).gr_gid
1230
except KeyError as e:
1231
raise OSError("Unknown user or group: %s" % (e))
1232
chownbyid(fname, uid, gid)
1235
# Always returns well formated values
1236
# cfg is expected to have an entry 'output' in it, which is a dictionary
1237
# that includes entries for 'init', 'config', 'final' or 'all'
1238
# init: /var/log/cloud.out
1239
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
1240
# final:
1241
# output: "| logger -p"
1242
# error: "> /dev/null"
1243
# this returns the specific 'mode' entry, cleanly formatted, with value
1244
def get_output_cfg(cfg, mode):
1245
ret = [None, None]
1246
if not cfg or 'output' not in cfg:
1247
return ret
1248
1249
outcfg = cfg['output']
1250
if mode in outcfg:
1251
modecfg = outcfg[mode]
1252
else:
1253
if 'all' not in outcfg:
1254
return ret
1255
# if there is a 'all' item in the output list
1256
# then it applies to all users of this (init, config, final)
1257
modecfg = outcfg['all']
1258
1259
# if value is a string, it specifies stdout and stderr
1260
if isinstance(modecfg, str):
1261
ret = [modecfg, modecfg]
1262
1263
# if its a list, then we expect (stdout, stderr)
1264
if isinstance(modecfg, list):
1265
if len(modecfg) > 0:
1266
ret[0] = modecfg[0]
1267
if len(modecfg) > 1:
1268
ret[1] = modecfg[1]
1269
1270
# if it is a dictionary, expect 'out' and 'error'
1271
# items, which indicate out and error
1272
if isinstance(modecfg, dict):
1273
if 'output' in modecfg:
1274
ret[0] = modecfg['output']
1275
if 'error' in modecfg:
1276
ret[1] = modecfg['error']
1277
1278
# if err's entry == "&1", then make it same as stdout
1279
# as in shell syntax of "echo foo >/dev/null 2>&1"
1280
if ret[1] == "&1":
1281
ret[1] = ret[0]
1282
1283
swlist = [">>", ">", "|"]
1284
for i in range(len(ret)):
1285
if not ret[i]:
1286
continue
1287
val = ret[i].lstrip()
1288
found = False
1289
for s in swlist:
1290
if val.startswith(s):
1291
val = "%s %s" % (s, val[len(s):].strip())
1292
found = True
1293
break
1294
if not found:
1295
# default behavior is append
1296
val = "%s %s" % (">>", val.strip())
1297
ret[i] = val
1298
1299
return ret
1300
1301
1302
def logexc(log, msg, *args):
1303
# Setting this here allows this to change
1304
# levels easily (not always error level)
1305
# or even desirable to have that much junk
1306
# coming out to a non-debug stream
1307
if msg:
1308
log.warn(msg, *args)
1309
# Debug gets the full trace. However, nose has a bug whereby its
1310
# logcapture plugin doesn't properly handle the case where there is no
1311
# actual exception. To avoid tracebacks during the test suite then, we'll
1312
# do the actual exc_info extraction here, and if there is no exception in
1313
# flight, we'll just pass in None.
1314
exc_info = sys.exc_info()
1315
if exc_info == (None, None, None):
1316
exc_info = None
1317
log.debug(msg, exc_info=exc_info, *args)
1320
def hash_blob(blob, routine, mlen=None):
1321
hasher = hashlib.new(routine)
1322
hasher.update(encode_text(blob))
1323
digest = hasher.hexdigest()
1324
# Don't get to long now
1325
if mlen is not None:
1326
return digest[0:mlen]
1327
else:
1328
return digest
1329
1330
1331
def is_user(name):
1332
try:
1333
if pwd.getpwnam(name):
1334
return True
1335
except KeyError:
1336
return False
1337
1338
1339
def is_group(name):
1340
try:
1341
if grp.getgrnam(name):
1342
return True
1343
except KeyError:
1344
return False
1345
1346
1347
def rename(src, dest):
1348
LOG.debug("Renaming %s to %s", src, dest)
@smoser
Aug 22, 2012
1349
# TODO(harlowja) use a se guard here??
1350
os.rename(src, dest)
1351
1352
1353
def ensure_dirs(dirlist, mode=0o755):
1354
for d in dirlist:
1355
ensure_dir(d, mode)
1356
1357
1358
def read_write_cmdline_url(target_fn):
1359
if not os.path.exists(target_fn):
1360
try:
1361
(key, url, content) = get_cmdline_url()
1362
except:
1363
logexc(LOG, "Failed fetching command line url")
1364
return
1365
try:
1366
if key and content:
1367
write_file(target_fn, content, mode=0o600)
1368
LOG.debug(("Wrote to %s with contents of command line"
1369
" url %s (len=%s)"), target_fn, url, len(content))
1370
elif key and not content:
1371
LOG.debug(("Command line key %s with url"
1372
" %s had no contents"), key, url)
1373
except:
1374
logexc(LOG, "Failed writing url content to %s", target_fn)
1375
1376
1377
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
1378
return yaml.safe_dump(obj,
1379
line_break="\n",
1380
indent=4,
1381
explicit_start=explicit_start,
1382
explicit_end=explicit_end,
1386
def ensure_dir(path, mode=None):
1387
if not os.path.isdir(path):
1388
# Make the dir and adjust the mode
1389
with SeLinuxGuard(os.path.dirname(path), recursive=True):
1390
os.makedirs(path)
1391
chmod(path, mode)
1392
else:
1393
# Just adjust the mode
1394
chmod(path, mode)
1395
1396
1397
@contextlib.contextmanager
1398
def unmounter(umount):
1399
try:
1400
yield umount
1401
finally:
1402
if umount:
1403
umount_cmd = ["umount", umount]
1404
subp(umount_cmd)
1405
1406
1407
def mounts():
1408
mounted = {}
1409
try:
1410
# Go through mounts to see what is already mounted
@smoser
Jan 23, 2014
1411
if os.path.exists("/proc/mounts"):
1412
mount_locs = load_file("/proc/mounts").splitlines()
1413
method = 'proc'
@smoser
Jan 23, 2014
1414
else:
1415
(mountoutput, _err) = subp("mount")
1416
mount_locs = mountoutput.splitlines()
1417
method = 'mount'
1418
mountre = r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$'
1420
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
1421
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
1423
if method == 'proc':
1424
(dev, mp, fstype, opts, _freq, _passno) = mpline.split()
1426
m = re.search(mountre, mpline)
1427
dev = m.group(1)
1428
mp = m.group(2)
1429
fstype = m.group(3)
1430
opts = m.group(4)
1433
# If the name of the mount point contains spaces these
1434
# can be escaped as '\040', so undo that..
1435
mp = mp.replace("\\040", " ")
1436
mounted[dev] = {
1437
'fstype': fstype,
1438
'mountpoint': mp,
1439
'opts': opts,
1440
}
1441
LOG.debug("Fetched %s mounts from %s", mounted, method)
1443
logexc(LOG, "Failed fetching mount points")
1447
def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
1448
"""
1449
Mount the device, call method 'callback' passing the directory
1450
in which it was mounted, then unmount. Return whatever 'callback'
1451
returned. If data != None, also pass data to callback.
1452
1453
mtype is a filesystem type. it may be a list, string (a single fsname)
1454
or a list of fsnames.
1456
1457
if isinstance(mtype, str):
1458
mtypes = [mtype]
1459
elif isinstance(mtype, (list, tuple)):
1460
mtypes = list(mtype)
1461
elif mtype is None:
1462
mtypes = None
1463
1464
# clean up 'mtype' input a bit based on platform.
1465
platsys = platform.system().lower()
1466
if platsys == "linux":
1467
if mtypes is None:
1468
mtypes = ["auto"]
1469
elif platsys.endswith("bsd"):
1470
if mtypes is None:
1471
mtypes = ['ufs', 'cd9660', 'vfat']
1472
for index, mtype in enumerate(mtypes):
1473
if mtype == "iso9660":
1474
mtypes[index] = "cd9660"
1475
else:
1476
# we cannot do a smart "auto", so just call 'mount' once with no -t
1477
mtypes = ['']
1479
mounted = mounts()
1480
with tempdir() as tmpd:
1481
umount = False
1482
if device in mounted:
1483
mountpoint = mounted[device]['mountpoint']
1485
failure_reason = None
1486
for mtype in mtypes:
1487
mountpoint = None
1488
try:
1489
mountcmd = ['mount']
1490
mountopts = []
1491
if rw:
1492
mountopts.append('rw')
1493
else:
1494
mountopts.append('ro')
1495
if sync:
1496
# This seems like the safe approach to do
1497
# (ie where this is on by default)
1498
mountopts.append("sync")
1499
if mountopts:
1500
mountcmd.extend(["-o", ",".join(mountopts)])
1501
if mtype:
1502
mountcmd.extend(['-t', mtype])
1503
mountcmd.append(device)
1504
mountcmd.append(tmpd)
1505
subp(mountcmd)
1506
umount = tmpd # This forces it to be unmounted (when set)
1507
mountpoint = tmpd
1508
break
1509
except (IOError, OSError) as exc:
1510
LOG.debug("Failed mount of '%s' as '%s': %s",
1511
device, mtype, exc)
1512
failure_reason = exc
1514
raise MountFailedError("Failed mounting %s to %s due to: %s" %
1515
(device, tmpd, failure_reason))
1517
# Be nice and ensure it ends with a slash
1518
if not mountpoint.endswith("/"):
1519
mountpoint += "/"
1520
with unmounter(umount):
1521
if data is None:
1522
ret = callback(mountpoint)
1523
else:
1524
ret = callback(mountpoint, data)
1525
return ret
1526
1527
1528
def get_builtin_cfg():
1529
# Deep copy so that others can't modify
1530
return obj_copy.deepcopy(CFG_BUILTIN)
1533
def sym_link(source, link, force=False):
1534
LOG.debug("Creating symbolic link from %r => %r", link, source)
1535
if force and os.path.exists(link):
1536
del_file(link)
1537
os.symlink(source, link)
1539
1540
def del_file(path):
1541
LOG.debug("Attempting to remove %s", path)
1543
os.unlink(path)
1544
except OSError as e:
1545
if e.errno != errno.ENOENT:
1546
raise e
1549
def copy(src, dest):
1550
LOG.debug("Copying %s to %s", src, dest)
1551
shutil.copy(src, dest)
1552
1553
1554
def time_rfc2822():
1555
try:
1556
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
1557
except:
1558
ts = "??"
1559
return ts
1560
1561
1562
def uptime():
1564
method = 'unknown'
1566
if os.path.exists("/proc/uptime"):
1567
method = '/proc/uptime'
1568
contents = load_file("/proc/uptime").strip()
1569
if contents:
1570
uptime_str = contents.split()[0]
1571
else:
1572
method = 'ctypes'
1573
libc = ctypes.CDLL('/lib/libc.so.7')
1574
size = ctypes.c_size_t()
1575
buf = ctypes.c_int()
1576
size.value = ctypes.sizeof(buf)
1577
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
1578
ctypes.byref(size), None, 0)
1579
now = time.time()
1580
bootup = buf.value
1581
uptime_str = now - bootup
1582
1584
logexc(LOG, "Unable to read uptime using method: %s" % method)
1588
def append_file(path, content):
1589
write_file(path, content, omode="ab", mode=None)
1590
1591
1592
def ensure_file(path, mode=0o644):
1593
write_file(path, content='', omode="ab", mode=mode)
1596
def safe_int(possible_int):
1598
return int(possible_int)
1599
except (ValueError, TypeError):
1600
return None
1601
1602
1603
def chmod(path, mode):
1604
real_mode = safe_int(mode)
1607
os.chmod(path, real_mode)
1610
def write_file(filename, content, mode=0o644, omode="wb"):
1611
"""
1612
Writes a file with the given content and sets the file mode as specified.
1613
Resotres the SELinux context if possible.
1615
@param filename: The full path of the file to write.
1616
@param content: The content to write to the file.
1617
@param mode: The filesystem mode to set on the file.
1618
@param omode: The open mode used when opening the file (w, wb, a, etc.)
1620
ensure_dir(os.path.dirname(filename))
1621
if 'b' in omode.lower():
1622
content = encode_text(content)
1623
write_type = 'bytes'
1624
else:
1625
content = decode_binary(content)
1626
write_type = 'characters'
1627
LOG.debug("Writing to %s - %s: [%s] %s %s",
1628
filename, omode, mode, len(content), write_type)
1629
with SeLinuxGuard(path=filename):
1630
with open(filename, omode) as fh:
1631
fh.write(content)
1632
fh.flush()
1633
chmod(filename, mode)
1634
1635
1636
def delete_dir_contents(dirname):
1638
Deletes all contents of a directory without deleting the directory itself.
1640
@param dirname: The directory whose contents should be deleted.
1641
"""
1642
for node in os.listdir(dirname):
1643
node_fullpath = os.path.join(dirname, node)
1644
if os.path.isdir(node_fullpath):
1645
del_dir(node_fullpath)
1646
else:
1647
del_file(node_fullpath)
1650
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1651
logstring=False):
1652
if rcs is None:
1653
rcs = [0]
1657
LOG.debug(("Running command %s with allowed return codes %s"
1658
" (shell=%s, capture=%s)"), args, rcs, shell, capture)
1660
LOG.debug(("Running hidden command to protect sensitive "
1661
"input/output logstring: %s"), logstring)
1663
if not capture:
1664
stdout = None
1665
stderr = None
1666
else:
1667
stdout = subprocess.PIPE
1668
stderr = subprocess.PIPE
1669
stdin = subprocess.PIPE
1670
kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
1671
env=env, shell=shell)
1672
if six.PY3:
1673
# Use this so subprocess output will be (Python 3) str, not bytes.
1674
kws['universal_newlines'] = True
1675
sp = subprocess.Popen(args, **kws)
1676
(out, err) = sp.communicate(data)
1677
except OSError as e:
1678
raise ProcessExecutionError(cmd=args, reason=e)
1679
rc = sp.returncode
1680
if rc not in rcs:
1681
raise ProcessExecutionError(stdout=out, stderr=err,
1682
exit_code=rc,
1683
cmd=args)
1684
# Just ensure blank instead of none?? (iff capturing)
1685
if not out and capture:
1687
if not err and capture:
1688
err = ''
1689
return (out, err)
1692
def make_header(comment_char="#", base='created'):
1693
ci_ver = version.version_string()
1694
header = str(comment_char)
1695
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
1696
header += " on %s" % time_rfc2822()
1697
return header
1698
1699
1700
def abs_join(*paths):
1701
return os.path.abspath(os.path.join(*paths))
1702
1703
1704
# shellify, takes a list of commands
1705
# for each entry in the list
1706
# if it is an array, shell protect it (with single ticks)
1707
# if it is a string, do nothing
1708
def shellify(cmdlist, add_header=True):
1709
content = ''
1710
if add_header:
1711
content += "#!/bin/sh\n"
1712
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
1713
cmds_made = 0
1714
for args in cmdlist:
1715
# If the item is a list, wrap all items in single tick.
1716
# If its not, then just write it directly.
1717
if isinstance(args, list):
1718
fixed = []
1719
for f in args:
1720
fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
1721
content = "%s%s\n" % (content, ' '.join(fixed))
1722
cmds_made += 1
1723
elif isinstance(args, six.string_types):
1724
content = "%s%s\n" % (content, args)
1725
cmds_made += 1
1727
raise RuntimeError(("Unable to shellify type %s"
1728
" which is not a list or string")
1729
% (type_utils.obj_name(args)))
1730
LOG.debug("Shellified %s commands.", cmds_made)
1731
return content
1732
1733
1734
def strip_prefix_suffix(line, prefix=None, suffix=None):
1735
if prefix and line.startswith(prefix):
1736
line = line[len(prefix):]
1737
if suffix and line.endswith(suffix):
1738
line = line[:-len(suffix)]
1739
return line
1740
1741
1742
def is_container():
1743
"""
1744
Checks to see if this code running in a container of some sort
1745
"""
1747
for helper in CONTAINER_TESTS:
1748
try:
1749
# try to run a helper program. if it returns true/zero
1750
# then we're inside a container. otherwise, no
1752
return True
1754
pass
1755
1756
# this code is largely from the logic in
1757
# ubuntu's /etc/init/container-detect.conf
1758
try:
1759
# Detect old-style libvirt
1760
# Detect OpenVZ containers
1761
pid1env = get_proc_env(1)
1762
if "container" in pid1env:
1763
return True
1764
if "LIBVIRT_LXC_UUID" in pid1env:
1765
return True
1769
# Detect OpenVZ containers
1770
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
1771
return True
1772
1773
try:
1774
# Detect Vserver containers
1775
lines = load_file("/proc/self/status").splitlines()
1776
for line in lines:
1777
if line.startswith("VxID:"):
1778
(_key, val) = line.strip().split(":", 1)
1779
if val != "0":
1780
return True
1787
def get_proc_env(pid):
1788
"""
1789
Return the environment in a dict that a given process id was started with.
1790
"""
1791
1792
env = {}
1793
fn = os.path.join("/proc/", str(pid), "environ")
1794
try:
1795
contents = load_file(fn)
1796
toks = contents.split("\x00")
1797
for tok in toks:
1798
if tok == "":
1799
continue
1800
(name, val) = tok.split("=", 1)
1802
env[name] = val
1804
pass
1805
return env
1806
1807
1808
def keyval_str_to_dict(kvstring):
1809
ret = {}
1810
for tok in kvstring.split():
1811
try:
1812
(key, val) = tok.split("=", 1)
1813
except ValueError:
1814
key = tok
1815
val = True
1816
ret[key] = val
1818
1819
1820
def is_partition(device):
1821
if device.startswith("/dev/"):
1822
device = device[5:]
1823
1824
return os.path.isfile("/sys/class/block/%s/partition" % device)
1825
1826
1827
def expand_package_list(version_fmt, pkgs):
1828
# we will accept tuples, lists of tuples, or just plain lists
1829
if not isinstance(pkgs, list):
1830
pkgs = [pkgs]
1831
1832
pkglist = []
1833
for pkg in pkgs:
1834
if isinstance(pkg, six.string_types):
1835
pkglist.append(pkg)
1836
continue
1837
1838
if isinstance(pkg, (tuple, list)):
1839
if len(pkg) < 1 or len(pkg) > 2:
1840
raise RuntimeError("Invalid package & version tuple.")
1842
if len(pkg) == 2 and pkg[1]:
1843
pkglist.append(version_fmt % tuple(pkg))
1844
continue
1846
pkglist.append(pkg[0])
1847
1848
else:
1849
raise RuntimeError("Invalid package type.")
1850
1851
return pkglist
1852
1853
1854
def parse_mount_info(path, mountinfo_lines, log=LOG):
1855
"""Return the mount information for PATH given the lines from
1856
/proc/$$/mountinfo."""
1857
1858
path_elements = [e for e in path.split('/') if e]
1859
devpth = None
1860
fs_type = None
1861
match_mount_point = None
1862
match_mount_point_elements = None
1863
for i, line in enumerate(mountinfo_lines):
1864
parts = line.split()
1865
1866
# Completely fail if there is anything in any line that is
1867
# unexpected, as continuing to parse past a bad line could
1868
# cause an incorrect result to be returned, so it's better
1869
# return nothing than an incorrect result.
1870
1871
# The minimum number of elements in a valid line is 10.
1872
if len(parts) < 10:
1873
log.debug("Line %d has two few columns (%d): %s",
1874
i + 1, len(parts), line)
1875
return None
1876
1877
mount_point = parts[4]
1878
mount_point_elements = [e for e in mount_point.split('/') if e]
1879
1880
# Ignore mounts deeper than the path in question.
1881
if len(mount_point_elements) > len(path_elements):
1882
continue
1883
1884
# Ignore mounts where the common path is not the same.
1885
l = min(len(mount_point_elements), len(path_elements))
1886
if mount_point_elements[0:l] != path_elements[0:l]:
1887
continue
1888
1889
# Ignore mount points higher than an already seen mount
1890
# point.
1891
if (match_mount_point_elements is not None and
1892
len(match_mount_point_elements) > len(mount_point_elements)):
1893
continue
1894
1895
# Find the '-' which terminates a list of optional columns to
1896
# find the filesystem type and the path to the device. See
1897
# man 5 proc for the format of this file.
1898
try:
1899
i = parts.index('-')
1900
except ValueError:
1901
log.debug("Did not find column named '-' in line %d: %s",
1902
i + 1, line)
1903
return None
1904
1905
# Get the path to the device.
1906
try:
1907
fs_type = parts[i + 1]
1908
devpth = parts[i + 2]
1909
except IndexError:
1910
log.debug("Too few columns after '-' column in line %d: %s",
1911
i + 1, line)
1912
return None
1913
1914
match_mount_point = mount_point
1915
match_mount_point_elements = mount_point_elements
1916
1917
if devpth and fs_type and match_mount_point:
1918
return (devpth, fs_type, match_mount_point)
1919
else:
1920
return None
1923
def parse_mtab(path):
1924
"""On older kernels there's no /proc/$$/mountinfo, so use mtab."""
1925
for line in load_file("/etc/mtab").splitlines():
1926
devpth, mount_point, fs_type = line.split()[:3]
1927
if mount_point == path:
1928
return devpth, fs_type, mount_point
1929
return None
1930
1931
1932
def parse_mount(path):
1933
(mountoutput, _err) = subp("mount")
1934
mount_locs = mountoutput.splitlines()
1935
for line in mount_locs:
1936
m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line)
1937
devpth = m.group(1)
1938
mount_point = m.group(2)
1939
fs_type = m.group(3)
1940
if mount_point == path:
1941
return devpth, fs_type, mount_point
1942
return None
1943
@smoser
Jan 23, 2014
1944
1945
def get_mount_info(path, log=LOG):
1946
# Use /proc/$$/mountinfo to find the device where path is mounted.
1947
# This is done because with a btrfs filesystem using os.stat(path)
1948
# does not return the ID of the device.
1949
#
1950
# Here, / has a device of 18 (decimal).
1951
#
1952
# $ stat /
1953
# File: '/'
1954
# Size: 234 Blocks: 0 IO Block: 4096 directory
1955
# Device: 12h/18d Inode: 256 Links: 1
1956
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
1957
# Access: 2013-01-13 07:31:04.358011255 +0000
1958
# Modify: 2013-01-13 18:48:25.930011255 +0000
1959
# Change: 2013-01-13 18:48:25.930011255 +0000
1960
# Birth: -
1961
#
1962
# Find where / is mounted:
1963
#
1964
# $ mount | grep ' / '
1965
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
1966
#
1967
# And the device ID for /dev/vda1 is not 18:
1968
#
1969
# $ ls -l /dev/vda1
1970
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
1971
#
1972
# So use /proc/$$/mountinfo to find the device underlying the
1973
# input path.
1974
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
1975
if os.path.exists(mountinfo_path):
1976
lines = load_file(mountinfo_path).splitlines()
1977
return parse_mount_info(path, lines, log)
1978
elif os.path.exists("/etc/mtab"):
1980
else:
1981
return parse_mount(path)
1984
def which(program):
1985
# Return path of program for execution if found in path
1986
def is_exe(fpath):
1987
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
1988
1989
_fpath, _ = os.path.split(program)
1990
if _fpath:
1991
if is_exe(program):
1992
return program
1993
else:
1994
for path in os.environ.get("PATH", "").split(os.pathsep):
1995
path = path.strip('"')
1996
exe_file = os.path.join(path, program)
1997
if is_exe(exe_file):
1998
return exe_file
1999
2000
return None
2001
2002
2003
def log_time(logfunc, msg, func, args=None, kwargs=None, get_uptime=False):
2004
if args is None:
2005
args = []
2006
if kwargs is None:
2007
kwargs = {}
2008
2009
start = time.time()
2010
2011
ustart = None
2012
if get_uptime:
2013
try:
2014
ustart = float(uptime())
2015
except ValueError:
2016
pass
2017
2018
try:
2019
ret = func(*args, **kwargs)
2020
finally:
2021
delta = time.time() - start
2022
udelta = None
2023
if ustart is not None:
2024
try:
2025
udelta = float(uptime()) - ustart
2026
except ValueError:
2027
pass
2028
2029
tmsg = " took %0.3f seconds" % delta
2030
if get_uptime:
2031
if isinstance(udelta, (float)):
2032
tmsg += " (%0.2f)" % udelta
2033
else:
2034
tmsg += " (N/A)"
2035
try:
2036
logfunc(msg + tmsg)
2037
except:
2038
pass
2039
return ret
2042
def expand_dotted_devname(dotted):
2043
toks = dotted.rsplit(".", 1)
2044
if len(toks) > 1:
2045
return toks
2046
else:
2047
return (dotted, None)
2048
2049
2050
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
2051
# return a dictionary populated with keys in 'required' and 'optional'
2052
# by reading files in prefix + delim + entry
2053
if required is None:
2054
required = []
2055
if optional is None:
2056
optional = []
2057
2058
missing = []
2059
ret = {}
2060
for f in required + optional:
2061
try:
2062
ret[f] = load_file(base + delim + f, quiet=False, decode=False)
2063
except IOError as e:
2064
if e.errno != errno.ENOENT:
2065
raise
2066
if f in required:
2067
missing.append(f)
2068
2069
if len(missing):
2070
raise ValueError("Missing required files: %s", ','.join(missing))
2071
2072
return ret
2073
2074
2075
def read_meminfo(meminfo="/proc/meminfo", raw=False):
2076
# read a /proc/meminfo style file and return
2077
# a dict with 'total', 'free', and 'available'
2078
mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
2079
kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
2080
'MemAvailable:': 'available'}
2081
ret = {}
2082
for line in load_file(meminfo).splitlines():
2083
try:
2084
key, value, unit = line.split()
2085
except ValueError:
2086
key, value = line.split()
2087
unit = 'B'
2088
if raw:
2089
ret[key] = int(value) * mpliers[unit]
2090
elif key in kmap:
2091
ret[kmap[key]] = int(value) * mpliers[unit]
2092
2093
return ret
2094
2095
2096
def human2bytes(size):
2097
"""Convert human string or integer to size in bytes
2098
10M => 10485760
2099
.5G => 536870912
2100
"""
2101
size_in = size
2102
if size.endswith("B"):
2103
size = size[:-1]
2104
2105
mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
2106
2107
num = size
2108
mplier = 'B'
2109
for m in mpliers:
2110
if size.endswith(m):
2111
mplier = m
2112
num = size[0:-len(m)]
2113
2114
try:
2115
num = float(num)
2116
except ValueError:
2117
raise ValueError("'%s' is not valid input." % size_in)
2118
2119
if num < 0:
2120
raise ValueError("'%s': cannot be negative" % size_in)
2121
2122
return int(num * mpliers[mplier])
2125
def _read_dmi_syspath(key):
2126
"""
2127
Reads dmi data with from /sys/class/dmi/id
2128
"""
2129
if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
2130
return None
2131
mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
2132
dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
2133
LOG.debug("querying dmi data %s", dmi_key_path)
2135
if not os.path.exists(dmi_key_path):
2136
LOG.debug("did not find %s", dmi_key_path)
2137
return None
2138
2139
key_data = load_file(dmi_key_path)
2140
if not key_data:
2141
LOG.debug("%s did not return any data", dmi_key_path)
2142
return None
2143
2144
LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
2145
return key_data.strip()
2146
2147
except Exception as e:
2148
logexc(LOG, "failed read of %s", dmi_key_path, e)
2149
return None
2150
2151
2152
def _call_dmidecode(key, dmidecode_path):
2153
"""
2154
Calls out to dmidecode to get the data out. This is mostly for supporting
2155
OS's without /sys/class/dmi/id support.
2156
"""
2157
try:
2158
cmd = [dmidecode_path, "--string", key]
2159
(result, _err) = subp(cmd)
2160
LOG.debug("dmidecode returned '%s' for '%s'", result, key)
2162
except (IOError, OSError) as _err:
2163
LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
2164
return None
2165
2166
2167
def read_dmi_data(key):
2168
"""
2169
Wrapper for reading DMI data.
2170
2171
This will do the following (returning the first that produces a
2172
result):
2173
1) Use a mapping to translate `key` from dmidecode naming to
2174
sysfs naming and look in /sys/class/dmi/... for a value.
2175
2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
2176
3) Fall-back to passing `key` to `dmidecode --string`.
2177
2178
If all of the above fail to find a value, None will be returned.
2180
syspath_value = _read_dmi_syspath(key)
2181
if syspath_value is not None:
2182
return syspath_value
2183
2184
dmidecode_path = which('dmidecode')
2185
if dmidecode_path:
2186
return _call_dmidecode(key, dmidecode_path)
2187
2188
LOG.warn("did not find either path %s or dmidecode command",
2189
DMI_SYS_PATH)
2191
2192
2193
def message_from_string(string):
2194
if sys.version_info[:2] < (2, 7):
2195
return email.message_from_file(six.StringIO(string))
2196
return email.message_from_string(string)