Permalink
Jun 9, 2012
Jun 9, 2012
Jun 16, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jun 30, 2012
Jun 16, 2012
Jun 16, 2012
Jul 1, 2012
Jul 1, 2012
Sep 28, 2012
Sep 28, 2012
Jun 16, 2012
Jul 11, 2012
Jun 9, 2012
Jul 2, 2012
Jun 9, 2012
Jan 17, 2012
Jun 16, 2012
Jun 13, 2012
Jun 13, 2012
Jun 13, 2012
Jun 13, 2012
Apr 17, 2013
Jun 26, 2012
Jun 9, 2012
Jun 9, 2012
Jun 12, 2012
Jun 9, 2012
Feb 20, 2013
Feb 20, 2013
Jun 12, 2012
Jul 1, 2012
Jun 12, 2012
Jun 9, 2012
Jun 12, 2012
Jun 12, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jun 16, 2012
Jun 16, 2012
Jun 21, 2012
Jun 16, 2012
Jun 16, 2012
Jun 16, 2012
Sep 21, 2012
Sep 20, 2012
Jun 16, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jul 4, 2012
Jun 13, 2012
Jun 13, 2012
Jun 13, 2012
Jun 16, 2012
Sep 21, 2012
Jun 19, 2012
Jun 19, 2012
Jun 12, 2012
Jun 13, 2012
Jun 12, 2012
Jun 12, 2012
Jun 22, 2012
Jun 12, 2012
Jun 9, 2012
Jun 16, 2012
Sep 21, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jun 13, 2012
Jun 13, 2012
Oct 10, 2012
Jun 9, 2012
Jun 29, 2012
Jun 9, 2012
Jun 9, 2012
Jun 9, 2012
Jun 12, 2012
Jun 9, 2012
Jun 9, 2012
Jan 31, 2013
Jan 31, 2013
Jan 31, 2013
Nov 14, 2013
Nov 14, 2013
Jul 18, 2013
Jul 18, 2013
Jan 21, 2015
Jan 21, 2015
Newer
100644
2197 lines (1825 sloc)
65 KB
8
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
9
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
10
#
11
# This program is free software: you can redistribute it and/or modify
12
# it under the terms of the GNU General Public License version 3, as
13
# published by the Free Software Foundation.
14
#
15
# This program is distributed in the hope that it will be useful,
16
# but WITHOUT ANY WARRANTY; without even the implied warranty of
17
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18
# GNU General Public License for more details.
19
#
20
# You should have received a copy of the GNU General Public License
21
# along with this program. If not, see <http://www.gnu.org/licenses/>.
66
LOG = logging.getLogger(__name__)
67
68
# Helps cleanup filenames to ensure they aren't FS incompatible
69
FN_REPLACEMENTS = {
70
os.sep: '_',
71
}
74
TRUE_STRINGS = ('true', '1', 'on', 'yes')
75
FALSE_STRINGS = ('off', '0', 'no', 'false')
76
77
79
CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
80
81
82
def decode_binary(blob, encoding='utf-8'):
83
# Converts a binary type into a text type using given encoding.
84
if isinstance(blob, six.text_type):
85
return blob
86
return blob.decode(encoding)
87
88
89
def encode_text(text, encoding='utf-8'):
90
# Converts a text string into a binary type using given encoding.
91
if isinstance(text, six.binary_type):
92
return text
93
return text.encode(encoding)
95
96
def b64d(source):
97
# Base64 decode some data, accepting bytes or unicode/str, and returning
98
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
99
decoded = b64decode(source)
100
try:
101
return decoded.decode('utf-8')
102
except UnicodeDecodeError:
103
return decoded
106
def b64e(source):
107
# Base64 encode some data, accepting bytes or unicode/str, and returning
108
# str/unicode if the result is utf-8 compatible, otherwise returning bytes.
109
if not isinstance(source, bytes):
110
source = source.encode('utf-8')
111
return b64encode(source).decode('utf-8')
112
113
114
def fully_decoded_payload(part):
115
# In Python 3, decoding the payload will ironically hand us a bytes object.
116
# 'decode' means to decode according to Content-Transfer-Encoding, not
117
# according to any charset in the Content-Type. So, if we end up with
118
# bytes, first try to decode to str via CT charset, and failing that, try
119
# utf-8 using surrogate escapes.
120
cte_payload = part.get_payload(decode=True)
122
part.get_content_maintype() == 'text' and
123
isinstance(cte_payload, bytes)):
124
charset = part.get_charset() or 'utf-8'
125
return cte_payload.decode(charset, errors='surrogateescape')
126
return cte_payload
127
128
132
# dmidecode and /sys/class/dmi/id/* use different names for the same value,
133
# this allows us to refer to them by one canonical name
134
DMIDECODE_TO_DMI_SYS_MAPPING = {
135
'baseboard-asset-tag': 'board_asset_tag',
136
'baseboard-manufacturer': 'board_vendor',
137
'baseboard-product-name': 'board_name',
138
'baseboard-serial-number': 'board_serial',
139
'baseboard-version': 'board_version',
140
'bios-release-date': 'bios_date',
141
'bios-vendor': 'bios_vendor',
142
'bios-version': 'bios_version',
143
'chassis-asset-tag': 'chassis_asset_tag',
144
'chassis-manufacturer': 'chassis_vendor',
145
'chassis-serial-number': 'chassis_serial',
146
'chassis-version': 'chassis_version',
147
'system-manufacturer': 'sys_vendor',
148
'system-product-name': 'product_name',
149
'system-serial-number': 'product_serial',
150
'system-uuid': 'product_uuid',
151
'system-version': 'product_version',
152
}
153
157
MESSAGE_TMPL = ('%(description)s\n'
158
'Command: %(cmd)s\n'
159
'Exit code: %(exit_code)s\n'
160
'Reason: %(reason)s\n'
161
'Stdout: %(stdout)r\n'
162
'Stderr: %(stderr)r')
163
164
def __init__(self, stdout=None, stderr=None,
165
exit_code=None, cmd=None,
166
description=None, reason=None):
167
if not cmd:
168
self.cmd = '-'
169
else:
170
self.cmd = cmd
171
172
if not description:
173
self.description = 'Unexpected error while running command.'
174
else:
175
self.description = description
176
178
self.exit_code = '-'
179
else:
180
self.exit_code = exit_code
181
182
if not stderr:
183
self.stderr = ''
184
else:
185
self.stderr = stderr
186
187
if not stdout:
188
self.stdout = ''
189
else:
190
self.stdout = stdout
191
197
message = self.MESSAGE_TMPL % {
198
'description': self.description,
199
'cmd': self.cmd,
200
'exit_code': self.exit_code,
201
'stdout': self.stdout,
202
'stderr': self.stderr,
206
# For backward compatibility with Python 2.
207
if not hasattr(self, 'message'):
208
self.message = message
213
# Late import since it might not always
214
# be possible to use this
215
try:
216
self.selinux = importer.import_module('selinux')
217
except ImportError:
218
self.selinux = None
229
if not self.selinux or not self.selinux.is_selinux_enabled():
230
return
231
if not os.path.lexists(self.path):
232
return
233
234
path = os.path.realpath(self.path)
235
# path should be a string, not unicode
238
try:
239
stats = os.lstat(path)
240
self.selinux.matchpathcon(path, stats[stat.ST_MODE])
241
except OSError:
242
return
243
244
LOG.debug("Restoring selinux mode for %s (recursive=%s)",
245
path, self.recursive)
246
self.selinux.restorecon(path, recursive=self.recursive)
258
fh = tempfile.NamedTemporaryFile(**kwargs)
259
# Replace its unlink with a quiet version
260
# that does not raise errors when the
261
# file to unlink has been unlinked elsewhere..
262
LOG.debug("Created temporary file %s", fh.name)
263
fh.unlink = del_file
266
# right 'now' but still lets the exit
267
# method attempt to remove it (which will
268
# not throw due to our del file being quiet
269
# about files that are not there)
270
def unlink_now():
271
fh.unlink(fh.name)
292
def is_true(val, addons=None):
293
if isinstance(val, (bool)):
294
return val is True
297
check_set = list(check_set) + addons
298
if six.text_type(val).lower().strip() in check_set:
303
def is_false(val, addons=None):
304
if isinstance(val, (bool)):
305
return val is False
308
check_set = list(check_set) + addons
309
if six.text_type(val).lower().strip() in check_set:
341
# Merges X lists, and then keeps the
342
# unique ones, but orders by sort order
343
# instead of by the original order
344
def uniq_merge_sorted(*lists):
345
return sorted(uniq_merge(*lists))
346
347
348
# Merges X lists and then iterates over those
349
# and only keeps the unique items (order preserving)
350
# and returns that merged and uniqued list as the
351
# final result.
352
#
353
# Note: if any entry is a string it will be
354
# split on commas and empty entries will be
355
# evicted and merged in accordingly.
356
def uniq_merge(*lists):
357
combined_list = []
358
for a_list in lists:
360
a_list = a_list.strip().split(",")
361
# Kickout the empty ones
362
a_list = [a for a in a_list if len(a)]
363
combined_list.extend(a_list)
370
removals = []
371
for k in fn:
372
if k not in FN_ALLOWED:
373
removals.append(k)
374
for k in removals:
375
fn = fn.replace(k, '')
376
fn = fn.strip()
377
return fn
393
394
395
def extract_usergroup(ug_pair):
396
if not ug_pair:
397
return (None, None)
398
ug_parted = ug_pair.split(':', 1)
399
u = ug_parted[0].strip()
400
if len(ug_parted) == 2:
401
g = ug_parted[1].strip()
402
else:
403
g = None
404
if not u or u == "-1" or u.lower() == "none":
405
u = None
406
if not g or g == "-1" or g.lower() == "none":
407
g = None
408
return (u, g)
411
def find_modules(root_dir):
412
entries = dict()
413
for fname in glob.glob(os.path.join(root_dir, "*.py")):
414
if not os.path.isfile(fname):
415
continue
416
modname = os.path.basename(fname)[0:-3]
417
modname = modname.strip()
418
if modname and modname.find(".") == -1:
419
entries[fname] = modname
420
return entries
421
422
423
def multi_log(text, console=True, stderr=True,
424
log=None, log_level=logging.DEBUG):
431
wfh.write(text)
432
wfh.flush()
433
else:
434
# A container may lack /dev/console (arguably a container bug). If
435
# it does not exist, then write output to stdout. this will result
436
# in duplicate stderr and stdout messages if stderr was True.
438
# even though upstart or systemd might have set up output to go to
439
# /dev/console, the user may have configured elsewhere via
440
# cloud-config 'output'. If there is /dev/console, messages will
441
# still get there.
442
sys.stdout.write(text)
444
if text[-1] == "\n":
445
log.log(log_level, text[:-1])
446
else:
447
log.log(log_level, text)
452
if not isinstance(decoded, tuple(root_types)):
453
expected_types = ", ".join([str(t) for t in root_types])
454
raise TypeError("(%s) root types expected, got %s instead"
455
% (expected_types, type(decoded)))
456
return decoded
457
458
488
def get_cfg_option_int(yobj, key, default=0):
489
return int(get_cfg_option_str(yobj, key, default=default))
490
491
492
def system_info():
493
return {
494
'platform': platform.platform(),
495
'release': platform.release(),
496
'python': platform.python_version(),
497
'uname': platform.uname(),
503
"""
504
Gets the C{key} config option from C{yobj} as a list of strings. If the
505
key is present as a single string it will be returned as a list with one
506
string arg.
507
508
@param yobj: The configuration object.
509
@param key: The configuration key to get.
510
@param default: The default to return if key is not found.
511
@return: The configuration option as a list of strings or default if key
512
is not found.
513
"""
527
# get a cfg entry by its path array
528
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
538
def fixup_output(cfg, mode):
539
(outfmt, errfmt) = get_output_cfg(cfg, mode)
540
redirect_output(outfmt, errfmt)
544
# redirect_output(outfmt, errfmt, orig_out, orig_err)
545
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
546
# fmt can be:
547
# > FILEPATH
548
# >> FILEPATH
549
# | program [ arg1 [ arg2 [ ... ] ] ]
550
#
551
# with a '|', arguments are passed to shell, so one level of
552
# shell escape is required.
553
#
554
# if _CLOUD_INIT_SAVE_STDOUT is set in environment to a non empty and true
555
# value then output input will not be closed (useful for debugging).
556
#
558
559
if is_true(os.environ.get("_CLOUD_INIT_SAVE_STDOUT")):
560
LOG.debug("Not redirecting output due to _CLOUD_INIT_SAVE_STDOUT")
561
return
562
570
(mode, arg) = outfmt.split(" ", 1)
571
if mode == ">" or mode == ">>":
572
owith = "ab"
573
if mode == ">":
574
owith = "wb"
575
new_fp = open(arg, owith)
576
elif mode == "|":
577
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
592
(mode, arg) = errfmt.split(" ", 1)
593
if mode == ">" or mode == ">>":
594
owith = "ab"
595
if mode == ">":
596
owith = "wb"
597
new_fp = open(arg, owith)
598
elif mode == "|":
599
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
603
604
if o_err:
605
os.dup2(new_fp.fileno(), o_err.fileno())
606
607
608
def make_url(scheme, host, port=None,
609
path='', params='', query='', fragment=''):
610
611
pieces = []
612
pieces.append(scheme or '')
613
614
netloc = ''
615
if host:
616
netloc = str(host)
617
618
if port is not None:
619
netloc += ":" + "%s" % (port)
620
621
pieces.append(netloc or '')
622
pieces.append(path or '')
623
pieces.append(params or '')
624
pieces.append(query or '')
625
pieces.append(fragment or '')
626
627
return urlparse.urlunparse(pieces)
628
629
633
merged_cfg = {}
634
for cfg in srcs:
635
if cfg:
636
# Figure out which mergers to apply...
637
mergers_to_apply = mergers.dict_extract_mergers(cfg)
638
if not mergers_to_apply:
639
mergers_to_apply = mergers.default_mergers()
645
@contextlib.contextmanager
646
def chdir(ndir):
647
curr = os.getcwd()
648
try:
649
os.chdir(ndir)
650
yield ndir
651
finally:
652
os.chdir(curr)
653
654
655
@contextlib.contextmanager
656
def umask(n_msk):
657
old = os.umask(n_msk)
658
try:
659
yield old
660
finally:
661
os.umask(old)
662
663
664
@contextlib.contextmanager
665
def tempdir(**kwargs):
666
# This seems like it was only added in python 3.2
667
# Make it since its useful...
668
# See: http://bugs.python.org/file12970/tempdir.patch
669
tdir = tempfile.mkdtemp(**kwargs)
676
def center(text, fill, max_len):
677
return '{0:{fill}{align}{size}}'.format(text, fill=fill,
678
align="^", size=max_len)
679
680
681
def del_dir(path):
682
LOG.debug("Recursively deleting %s", path)
683
shutil.rmtree(path)
692
693
if exe_prefix is None:
694
prefix = []
695
elif isinstance(exe_prefix, str):
696
prefix = [str(exe_prefix)]
697
elif isinstance(exe_prefix, list):
698
prefix = exe_prefix
699
else:
700
raise TypeError("exe_prefix must be None, str, or list")
701
702
for exe_name in sorted(os.listdir(dirp)):
703
exe_path = os.path.join(dirp, exe_name)
704
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
717
# read_optional_seed
718
# returns boolean indicating success or failure (presense of files)
719
# if files are present, populates 'fill' dictionary with 'user-data' and
720
# 'meta-data' entries
733
def fetch_ssl_details(paths=None):
734
ssl_details = {}
735
# Lookup in these locations for ssl key/cert files
736
ssl_cert_paths = [
737
'/var/lib/cloud/data/ssl',
738
'/var/lib/cloud/instance/data/ssl',
739
]
740
if paths:
741
ssl_cert_paths.extend([
742
os.path.join(paths.get_ipath_cur('data'), 'ssl'),
743
os.path.join(paths.get_cpath('data'), 'ssl'),
744
])
745
ssl_cert_paths = uniq_merge(ssl_cert_paths)
746
ssl_cert_paths = [d for d in ssl_cert_paths if d and os.path.isdir(d)]
747
cert_file = None
748
for d in ssl_cert_paths:
749
if os.path.isfile(os.path.join(d, 'cert.pem')):
750
cert_file = os.path.join(d, 'cert.pem')
751
break
752
key_file = None
753
for d in ssl_cert_paths:
754
if os.path.isfile(os.path.join(d, 'key.pem')):
755
key_file = os.path.join(d, 'key.pem')
756
break
757
if cert_file and key_file:
758
ssl_details['cert_file'] = cert_file
759
ssl_details['key_file'] = key_file
760
elif cert_file:
761
ssl_details['cert_file'] = cert_file
762
return ssl_details
763
764
765
def load_tfile_or_url(*args, **kwargs):
766
return(decode_binary(read_file_or_url(*args, **kwargs).contents))
767
768
781
except IOError as e:
782
code = e.errno
783
if e.errno == errno.ENOENT:
784
code = url_helper.NOT_FOUND
785
raise url_helper.UrlError(cause=e, code=code, headers=None)
786
return url_helper.FileResponse(file_path, contents=contents)
787
else:
788
return url_helper.readurl(url,
789
timeout=timeout,
790
retries=retries,
791
headers=headers,
803
LOG.debug("Attempting to load yaml from string "
804
"of length %s with allowed root types %s",
807
if not isinstance(converted, allowed):
808
# Yes this will just be caught, but thats ok for now...
814
if len(blob) == 0:
815
LOG.debug("load_yaml given empty string, returning default")
816
else:
817
logexc(LOG, "Failed loading yaml blob")
825
# default retries for file is 0. for network is 10
826
if base.startswith("file://"):
827
retries = file_retries
828
829
if base.find("%s") >= 0:
830
ud_url = base % ("user-data" + ext)
831
md_url = base % ("meta-data" + ext)
833
ud_url = "%s%s%s" % (base, "user-data", ext)
834
md_url = "%s%s%s" % (base, "meta-data", ext)
856
# Remove anything not a file
857
confs = [f for f in confs
858
if os.path.isfile(os.path.join(confd, f))]
876
raise TypeError(("Config file %s contains 'conf_d' "
877
"with non-string type %s") %
892
def read_cc_from_cmdline(cmdline=None):
893
# this should support reading cloud-config information from
894
# the kernel command line. It is intended to support content of the
895
# format:
896
# cc: <yaml content here> [end_cc]
897
# this would include:
898
# cc: ssh_import_id: [smoser, kirkland]\\n
899
# cc: ssh_import_id: [smoser, bob]\\nruncmd: [ [ ls, -l ], echo hi ] end_cc
900
# cc:ssh_import_id: [smoser] end_cc cc:runcmd: [ [ ls, -l ] ] end_cc
901
if cmdline is None:
902
cmdline = get_cmdline()
903
910
begin = cmdline.find(tag_begin)
911
while begin >= 0:
912
end = cmdline.find(tag_end, begin + begin_l)
913
if end < 0:
914
end = clen
925
pos = contents.find('\n')
926
if pos <= 0 or contents[pos - 1] != '\r':
927
return contents
928
return contents.replace('\r\n', '\n')
931
def get_hostname_fqdn(cfg, cloud):
932
# return the hostname and fqdn from 'cfg'. If not found in cfg,
933
# then fall back to data from cloud
934
if "fqdn" in cfg:
935
# user specified a fqdn. Default hostname then is based off that
936
fqdn = cfg['fqdn']
938
else:
939
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
940
# user specified hostname, and it had '.' in it
941
# be nice to them. set fqdn and hostname from that
942
fqdn = cfg['hostname']
943
hostname = cfg['hostname'][:fqdn.find('.')]
946
# get hostname from cfg if available otherwise cloud
947
fqdn = cloud.get_hostname(fqdn=True)
948
if "hostname" in cfg:
949
hostname = cfg['hostname']
950
else:
951
hostname = cloud.get_hostname()
956
"""
957
For each host a single line should be present with
958
the following information:
963
characters. Text from a "#" character until the end of the line is a
964
comment, and is ignored. Host names may contain only alphanumeric
965
characters, minus signs ("-"), and periods ("."). They must begin with
966
an alphabetic character and end with an alphanumeric character.
967
Optional aliases provide for name changes, alternate spellings, shorter
968
hostnames, or generic hostnames (for example, localhost).
969
"""
972
for line in load_file(filename).splitlines():
973
hashpos = line.find("#")
974
if hashpos >= 0:
975
line = line[0:hashpos]
997
if cmdline is None:
998
cmdline = get_cmdline()
999
1000
data = keyval_str_to_dict(cmdline)
1001
url = None
1002
key = None
1003
for key in names:
1004
if key in data:
1005
url = data[key]
1006
break
1007
1008
if not url:
1009
return (None, None, None)
1010
1012
# allow callers to pass starts as text when comparing to bytes contents
1013
starts = encode_text(starts)
1014
if resp.ok() and resp.contents.startswith(starts):
1015
return (key, url, resp.contents)
1022
This also attempts to be resilent against dns redirection.
1023
1024
Note, that normal nsswitch resolution is used here. So in order
1025
to avoid any utilization of 'search' entries in /etc/resolv.conf
1027
1028
The top level 'invalid' domain is invalid per RFC. And example.com
1029
should also not exist. The random entry will be resolved inside
1030
the search list.
1031
"""
1033
if _DNS_REDIRECT_IP is None:
1034
badips = set()
1035
badnames = ("does-not-exist.example.com.", "example.invalid.",
1036
rand_str())
1037
badresults = {}
1038
for iname in badnames:
1039
try:
1040
result = socket.getaddrinfo(iname, None, 0, 0,
1041
socket.SOCK_STREAM, socket.AI_CANONNAME)
1043
for (_fam, _stype, _proto, cname, sockaddr) in result:
1044
badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
1045
badips.add(sockaddr[0])
1053
result = socket.getaddrinfo(name, None)
1054
# check first result's sockaddr field
1055
addr = result[0][4][0]
1056
if addr in _DNS_REDIRECT_IP:
1057
return False
1063
def get_hostname():
1064
hostname = socket.gethostname()
1065
return hostname
1066
1067
1068
def gethostbyaddr(ip):
1069
try:
1070
return socket.gethostbyaddr(ip)[0]
1071
except socket.herror:
1072
return None
1073
1074
1081
"""
1082
Search through a list of mirror urls for one that works
1083
This needs to return quickly.
1084
"""
1085
for cand in candidates:
1086
try:
1087
if is_resolvable_url(cand):
1088
return cand
1095
"""
1096
reopen stdin as /dev/null so even subprocesses or other os level things get
1097
/dev/null as input.
1098
1099
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty and true
1100
value then input will not be closed (useful for debugging).
1103
return
1104
with open(os.devnull) as fp:
1105
os.dup2(fp.fileno(), sys.stdin.fileno())
1110
"""
1111
find devices matching given criteria (via blkid)
1112
criteria can be *one* of:
1113
TYPE=<filesystem>
1114
LABEL=<label>
1115
UUID=<uuid>
1116
"""
1121
# have the value 'value' and display any devices which are found.
1122
# Common values for NAME include TYPE, LABEL, and UUID.
1123
# If there are no devices specified on the command line,
1125
# only search the devices specified by the user.
1126
options.append("-t%s" % (criteria))
1127
if tag:
1128
# For each (specified) device, show only the tags that match tag.
1129
options.append("-s%s" % (tag))
1130
if no_cache:
1131
# If you want to start with a clean cache
1132
# (i.e. don't report devices previously scanned
1133
# but not necessarily available at this time), specify /dev/null.
1134
options.extend(["-c", "/dev/null"])
1135
if oformat:
1137
# The format parameter may be:
1138
# full, value, list, device, udev, export
1139
options.append('-o%s' % (oformat))
1140
if path:
1141
options.append(path)
1142
cmd = blk_id_cmd + options
1145
entries = []
1146
for line in out.splitlines():
1147
line = line.strip()
1148
if line:
1149
entries.append(line)
1150
return entries
1153
def peek_file(fname, max_bytes):
1154
LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes)
1155
with open(fname, 'rb') as ifh:
1156
return ifh.read(max_bytes)
1157
1158
1159
def uniq_list(in_list):
1160
out_list = []
1161
for i in in_list:
1162
if i in out_list:
1163
continue
1164
else:
1165
out_list.append(i)
1166
return out_list
1167
1168
1172
try:
1173
with open(fname, 'rb') as ifh:
1174
pipe_in_out(ifh, ofh, chunk_cb=read_cb)
1175
except IOError as e:
1176
if not quiet:
1177
raise
1178
if e.errno != errno.ENOENT:
1179
raise
1180
contents = ofh.getvalue()
1181
LOG.debug("Read %s bytes from %s", len(contents), fname)
1188
def get_cmdline():
1189
if 'DEBUG_PROC_CMDLINE' in os.environ:
1190
cmdline = os.environ["DEBUG_PROC_CMDLINE"]
1192
try:
1193
cmdline = load_file("/proc/cmdline").strip()
1194
except:
1195
cmdline = ""
1196
return cmdline
1197
1198
1199
def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
1200
bytes_piped = 0
1201
while True:
1202
data = in_fh.read(chunk_size)
1204
break
1205
else:
1206
out_fh.write(data)
1207
bytes_piped += len(data)
1208
if chunk_cb:
1209
chunk_cb(bytes_piped)
1210
out_fh.flush()
1211
return bytes_piped
1212
1217
return
1218
LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
1219
os.chown(fname, uid, gid)
1220
1221
1222
def chownbyname(fname, user=None, group=None):
1223
uid = -1
1224
gid = -1
1225
try:
1226
if user:
1227
uid = pwd.getpwnam(user).pw_uid
1228
if group:
1229
gid = grp.getgrnam(group).gr_gid
1235
# Always returns well formated values
1236
# cfg is expected to have an entry 'output' in it, which is a dictionary
1237
# that includes entries for 'init', 'config', 'final' or 'all'
1238
# init: /var/log/cloud.out
1239
# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
1240
# final:
1241
# output: "| logger -p"
1242
# error: "> /dev/null"
1243
# this returns the specific 'mode' entry, cleanly formatted, with value
1247
return ret
1248
1249
outcfg = cfg['output']
1250
if mode in outcfg:
1251
modecfg = outcfg[mode]
1252
else:
1253
if 'all' not in outcfg:
1254
return ret
1255
# if there is a 'all' item in the output list
1256
# then it applies to all users of this (init, config, final)
1257
modecfg = outcfg['all']
1258
1259
# if value is a string, it specifies stdout and stderr
1260
if isinstance(modecfg, str):
1261
ret = [modecfg, modecfg]
1262
1263
# if its a list, then we expect (stdout, stderr)
1264
if isinstance(modecfg, list):
1265
if len(modecfg) > 0:
1266
ret[0] = modecfg[0]
1267
if len(modecfg) > 1:
1268
ret[1] = modecfg[1]
1269
1270
# if it is a dictionary, expect 'out' and 'error'
1271
# items, which indicate out and error
1272
if isinstance(modecfg, dict):
1273
if 'output' in modecfg:
1274
ret[0] = modecfg['output']
1275
if 'error' in modecfg:
1276
ret[1] = modecfg['error']
1277
1278
# if err's entry == "&1", then make it same as stdout
1279
# as in shell syntax of "echo foo >/dev/null 2>&1"
1280
if ret[1] == "&1":
1281
ret[1] = ret[0]
1282
1283
swlist = [">>", ">", "|"]
1284
for i in range(len(ret)):
1285
if not ret[i]:
1286
continue
1287
val = ret[i].lstrip()
1288
found = False
1289
for s in swlist:
1290
if val.startswith(s):
1291
val = "%s %s" % (s, val[len(s):].strip())
1292
found = True
1293
break
1294
if not found:
1295
# default behavior is append
1296
val = "%s %s" % (">>", val.strip())
1297
ret[i] = val
1298
1299
return ret
1300
1301
1303
# Setting this here allows this to change
1304
# levels easily (not always error level)
1305
# or even desirable to have that much junk
1306
# coming out to a non-debug stream
1307
if msg:
1308
log.warn(msg, *args)
1309
# Debug gets the full trace. However, nose has a bug whereby its
1310
# logcapture plugin doesn't properly handle the case where there is no
1311
# actual exception. To avoid tracebacks during the test suite then, we'll
1312
# do the actual exc_info extraction here, and if there is no exception in
1313
# flight, we'll just pass in None.
1314
exc_info = sys.exc_info()
1315
if exc_info == (None, None, None):
1316
exc_info = None
1317
log.debug(msg, exc_info=exc_info, *args)
1323
digest = hasher.hexdigest()
1324
# Don't get to long now
1325
if mlen is not None:
1326
return digest[0:mlen]
1327
else:
1328
return digest
1329
1330
1331
def is_user(name):
1332
try:
1333
if pwd.getpwnam(name):
1334
return True
1335
except KeyError:
1336
return False
1337
1338
1339
def is_group(name):
1340
try:
1341
if grp.getgrnam(name):
1342
return True
1343
except KeyError:
1344
return False
1345
1346
1358
def read_write_cmdline_url(target_fn):
1359
if not os.path.exists(target_fn):
1360
try:
1361
(key, url, content) = get_cmdline_url()
1362
except:
1363
logexc(LOG, "Failed fetching command line url")
1364
return
1365
try:
1366
if key and content:
1368
LOG.debug(("Wrote to %s with contents of command line"
1369
" url %s (len=%s)"), target_fn, url, len(content))
1370
elif key and not content:
1371
LOG.debug(("Command line key %s with url"
1372
" %s had no contents"), key, url)
1373
except:
1374
logexc(LOG, "Failed writing url content to %s", target_fn)
1375
1376
1377
def yaml_dumps(obj, explicit_start=True, explicit_end=True):
1378
return yaml.safe_dump(obj,
1379
line_break="\n",
1380
indent=4,
1381
explicit_start=explicit_start,
1382
explicit_end=explicit_end,
1391
chmod(path, mode)
1392
else:
1393
# Just adjust the mode
1394
chmod(path, mode)
1395
1396
1397
@contextlib.contextmanager
1415
(mountoutput, _err) = subp("mount")
1416
mount_locs = mountoutput.splitlines()
1417
method = 'mount'
1420
# Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered)
1421
# FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates)
1427
dev = m.group(1)
1428
mp = m.group(2)
1429
fstype = m.group(3)
1430
opts = m.group(4)
1436
mounted[dev] = {
1437
'fstype': fstype,
1438
'mountpoint': mp,
1439
'opts': opts,
1440
}
1448
"""
1449
Mount the device, call method 'callback' passing the directory
1450
in which it was mounted, then unmount. Return whatever 'callback'
1451
returned. If data != None, also pass data to callback.
1452
1453
mtype is a filesystem type. it may be a list, string (a single fsname)
1454
or a list of fsnames.
1456
1457
if isinstance(mtype, str):
1458
mtypes = [mtype]
1459
elif isinstance(mtype, (list, tuple)):
1460
mtypes = list(mtype)
1461
elif mtype is None:
1462
mtypes = None
1463
1464
# clean up 'mtype' input a bit based on platform.
1470
if mtypes is None:
1471
mtypes = ['ufs', 'cd9660', 'vfat']
1472
for index, mtype in enumerate(mtypes):
1473
if mtype == "iso9660":
1474
mtypes[index] = "cd9660"
1475
else:
1476
# we cannot do a smart "auto", so just call 'mount' once with no -t
1477
mtypes = ['']
1479
mounted = mounts()
1480
with tempdir() as tmpd:
1481
umount = False
1482
if device in mounted:
1486
for mtype in mtypes:
1487
mountpoint = None
1488
try:
1489
mountcmd = ['mount']
1490
mountopts = []
1491
if rw:
1492
mountopts.append('rw')
1493
else:
1494
mountopts.append('ro')
1495
if sync:
1496
# This seems like the safe approach to do
1497
# (ie where this is on by default)
1498
mountopts.append("sync")
1499
if mountopts:
1500
mountcmd.extend(["-o", ",".join(mountopts)])
1501
if mtype:
1502
mountcmd.extend(['-t', mtype])
1503
mountcmd.append(device)
1504
mountcmd.append(tmpd)
1505
subp(mountcmd)
1506
umount = tmpd # This forces it to be unmounted (when set)
1507
mountpoint = tmpd
1508
break
1509
except (IOError, OSError) as exc:
1510
LOG.debug("Failed mount of '%s' as '%s': %s",
1511
device, mtype, exc)
1517
# Be nice and ensure it ends with a slash
1518
if not mountpoint.endswith("/"):
1519
mountpoint += "/"
1521
if data is None:
1522
ret = callback(mountpoint)
1523
else:
1524
ret = callback(mountpoint, data)
1525
return ret
1526
1527
1528
def get_builtin_cfg():
1529
# Deep copy so that others can't modify
1543
os.unlink(path)
1544
except OSError as e:
1545
if e.errno != errno.ENOENT:
1546
raise e
1549
def copy(src, dest):
1550
LOG.debug("Copying %s to %s", src, dest)
1551
shutil.copy(src, dest)
1552
1553
1554
def time_rfc2822():
1555
try:
1556
ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
1557
except:
1558
ts = "??"
1559
return ts
1560
1561
1562
def uptime():
1568
contents = load_file("/proc/uptime").strip()
1569
if contents:
1570
uptime_str = contents.split()[0]
1571
else:
1573
libc = ctypes.CDLL('/lib/libc.so.7')
1574
size = ctypes.c_size_t()
1575
buf = ctypes.c_int()
1576
size.value = ctypes.sizeof(buf)
1577
libc.sysctlbyname("kern.boottime", ctypes.byref(buf),
1578
ctypes.byref(size), None, 0)
1588
def append_file(path, content):
1589
write_file(path, content, omode="ab", mode=None)
1590
1591
1611
"""
1612
Writes a file with the given content and sets the file mode as specified.
1613
Resotres the SELinux context if possible.
1615
@param filename: The full path of the file to write.
1616
@param content: The content to write to the file.
1617
@param mode: The filesystem mode to set on the file.
1621
if 'b' in omode.lower():
1622
content = encode_text(content)
1623
write_type = 'bytes'
1624
else:
1625
content = decode_binary(content)
1626
write_type = 'characters'
1627
LOG.debug("Writing to %s - %s: [%s] %s %s",
1628
filename, omode, mode, len(content), write_type)
1640
@param dirname: The directory whose contents should be deleted.
1641
"""
1642
for node in os.listdir(dirname):
1643
node_fullpath = os.path.join(dirname, node)
1644
if os.path.isdir(node_fullpath):
1645
del_dir(node_fullpath)
1646
else:
1647
del_file(node_fullpath)
1650
def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
1651
logstring=False):
1660
LOG.debug(("Running hidden command to protect sensitive "
1661
"input/output logstring: %s"), logstring)
1663
if not capture:
1664
stdout = None
1665
stderr = None
1666
else:
1667
stdout = subprocess.PIPE
1668
stderr = subprocess.PIPE
1669
stdin = subprocess.PIPE
1670
kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
1671
env=env, shell=shell)
1672
if six.PY3:
1673
# Use this so subprocess output will be (Python 3) str, not bytes.
1674
kws['universal_newlines'] = True
1675
sp = subprocess.Popen(args, **kws)
1682
exit_code=rc,
1683
cmd=args)
1684
# Just ensure blank instead of none?? (iff capturing)
1685
if not out and capture:
1692
def make_header(comment_char="#", base='created'):
1693
ci_ver = version.version_string()
1694
header = str(comment_char)
1695
header += " %s by cloud-init v. %s" % (base.title(), ci_ver)
1696
header += " on %s" % time_rfc2822()
1697
return header
1698
1699
1704
# shellify, takes a list of commands
1705
# for each entry in the list
1706
# if it is an array, shell protect it (with single ticks)
1707
# if it is a string, do nothing
1708
def shellify(cmdlist, add_header=True):
1709
content = ''
1710
if add_header:
1711
content += "#!/bin/sh\n"
1712
escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
1715
# If the item is a list, wrap all items in single tick.
1716
# If its not, then just write it directly.
1734
def strip_prefix_suffix(line, prefix=None, suffix=None):
1735
if prefix and line.startswith(prefix):
1736
line = line[len(prefix):]
1737
if suffix and line.endswith(suffix):
1738
line = line[:-len(suffix)]
1739
return line
1740
1741
1748
try:
1749
# try to run a helper program. if it returns true/zero
1750
# then we're inside a container. otherwise, no
1754
pass
1755
1756
# this code is largely from the logic in
1757
# ubuntu's /etc/init/container-detect.conf
1758
try:
1759
# Detect old-style libvirt
1760
# Detect OpenVZ containers
1761
pid1env = get_proc_env(1)
1762
if "container" in pid1env:
1763
return True
1764
if "LIBVIRT_LXC_UUID" in pid1env:
1765
return True
1769
# Detect OpenVZ containers
1770
if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
1771
return True
1772
1773
try:
1774
# Detect Vserver containers
1775
lines = load_file("/proc/self/status").splitlines()
1776
for line in lines:
1777
if line.startswith("VxID:"):
1778
(_key, val) = line.strip().split(":", 1)
1779
if val != "0":
1780
return True
1788
"""
1789
Return the environment in a dict that a given process id was started with.
1790
"""
1791
1792
env = {}
1793
fn = os.path.join("/proc/", str(pid), "environ")
1794
try:
1795
contents = load_file(fn)
1797
for tok in toks:
1798
if tok == "":
1799
continue
1800
(name, val) = tok.split("=", 1)
1808
def keyval_str_to_dict(kvstring):
1809
ret = {}
1810
for tok in kvstring.split():
1811
try:
1812
(key, val) = tok.split("=", 1)
1813
except ValueError:
1814
key = tok
1815
val = True
1816
ret[key] = val
1818
1819
1820
def is_partition(device):
1821
if device.startswith("/dev/"):
1822
device = device[5:]
1823
1824
return os.path.isfile("/sys/class/block/%s/partition" % device)
1825
1826
1827
def expand_package_list(version_fmt, pkgs):
1828
# we will accept tuples, lists of tuples, or just plain lists
1829
if not isinstance(pkgs, list):
1830
pkgs = [pkgs]
1831
1832
pkglist = []
1833
for pkg in pkgs:
1838
if isinstance(pkg, (tuple, list)):
1839
if len(pkg) < 1 or len(pkg) > 2:
1840
raise RuntimeError("Invalid package & version tuple.")
1842
if len(pkg) == 2 and pkg[1]:
1843
pkglist.append(version_fmt % tuple(pkg))
1844
continue
1846
pkglist.append(pkg[0])
1847
1848
else:
1849
raise RuntimeError("Invalid package type.")
1854
def parse_mount_info(path, mountinfo_lines, log=LOG):
1855
"""Return the mount information for PATH given the lines from
1856
/proc/$$/mountinfo."""
1857
1858
path_elements = [e for e in path.split('/') if e]
1859
devpth = None
1860
fs_type = None
1861
match_mount_point = None
1862
match_mount_point_elements = None
1866
# Completely fail if there is anything in any line that is
1867
# unexpected, as continuing to parse past a bad line could
1868
# cause an incorrect result to be returned, so it's better
1869
# return nothing than an incorrect result.
1870
1871
# The minimum number of elements in a valid line is 10.
1872
if len(parts) < 10:
1873
log.debug("Line %d has two few columns (%d): %s",
1874
i + 1, len(parts), line)
1875
return None
1876
1877
mount_point = parts[4]
1878
mount_point_elements = [e for e in mount_point.split('/') if e]
1879
1880
# Ignore mounts deeper than the path in question.
1881
if len(mount_point_elements) > len(path_elements):
1882
continue
1883
1884
# Ignore mounts where the common path is not the same.
1885
l = min(len(mount_point_elements), len(path_elements))
1886
if mount_point_elements[0:l] != path_elements[0:l]:
1887
continue
1888
1889
# Ignore mount points higher than an already seen mount
1890
# point.
1891
if (match_mount_point_elements is not None and
1893
continue
1894
1895
# Find the '-' which terminates a list of optional columns to
1896
# find the filesystem type and the path to the device. See
1897
# man 5 proc for the format of this file.
1898
try:
1899
i = parts.index('-')
1900
except ValueError:
1903
return None
1904
1905
# Get the path to the device.
1906
try:
1907
fs_type = parts[i + 1]
1908
devpth = parts[i + 2]
1909
except IndexError:
1912
return None
1913
1914
match_mount_point = mount_point
1915
match_mount_point_elements = mount_point_elements
1916
1917
if devpth and fs_type and match_mount_point:
1918
return (devpth, fs_type, match_mount_point)
1919
else:
1920
return None
1923
def parse_mtab(path):
1924
"""On older kernels there's no /proc/$$/mountinfo, so use mtab."""
1925
for line in load_file("/etc/mtab").splitlines():
1926
devpth, mount_point, fs_type = line.split()[:3]
1927
if mount_point == path:
1928
return devpth, fs_type, mount_point
1929
return None
1930
1931
1932
def parse_mount(path):
1933
(mountoutput, _err) = subp("mount")
1934
mount_locs = mountoutput.splitlines()
1935
for line in mount_locs:
1937
devpth = m.group(1)
1938
mount_point = m.group(2)
1939
fs_type = m.group(3)
1940
if mount_point == path:
1941
return devpth, fs_type, mount_point
1942
return None
1943
1945
def get_mount_info(path, log=LOG):
1946
# Use /proc/$$/mountinfo to find the device where path is mounted.
1947
# This is done because with a btrfs filesystem using os.stat(path)
1948
# does not return the ID of the device.
1949
#
1950
# Here, / has a device of 18 (decimal).
1951
#
1952
# $ stat /
1953
# File: '/'
1954
# Size: 234 Blocks: 0 IO Block: 4096 directory
1955
# Device: 12h/18d Inode: 256 Links: 1
1956
# Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root)
1957
# Access: 2013-01-13 07:31:04.358011255 +0000
1958
# Modify: 2013-01-13 18:48:25.930011255 +0000
1959
# Change: 2013-01-13 18:48:25.930011255 +0000
1960
# Birth: -
1961
#
1962
# Find where / is mounted:
1963
#
1964
# $ mount | grep ' / '
1965
# /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo)
1966
#
1967
# And the device ID for /dev/vda1 is not 18:
1968
#
1969
# $ ls -l /dev/vda1
1970
# brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1
1971
#
1972
# So use /proc/$$/mountinfo to find the device underlying the
1973
# input path.
1974
mountinfo_path = '/proc/%s/mountinfo' % os.getpid()
1975
if os.path.exists(mountinfo_path):
1976
lines = load_file(mountinfo_path).splitlines()
1977
return parse_mount_info(path, lines, log)
1984
def which(program):
1985
# Return path of program for execution if found in path
1986
def is_exe(fpath):
1987
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
1988
1995
path = path.strip('"')
1996
exe_file = os.path.join(path, program)
1997
if is_exe(exe_file):
1998
return exe_file
1999
2000
return None
2004
if args is None:
2005
args = []
2006
if kwargs is None:
2007
kwargs = {}
2008
2009
start = time.time()
2010
2011
ustart = None
2012
if get_uptime:
2013
try:
2014
ustart = float(uptime())
2015
except ValueError:
2016
pass
2017
2018
try:
2019
ret = func(*args, **kwargs)
2020
finally:
2021
delta = time.time() - start
2023
if ustart is not None:
2024
try:
2025
udelta = float(uptime()) - ustart
2026
except ValueError:
2031
if isinstance(udelta, (float)):
2032
tmsg += " (%0.2f)" % udelta
2033
else:
2034
tmsg += " (N/A)"
2042
def expand_dotted_devname(dotted):
2043
toks = dotted.rsplit(".", 1)
2044
if len(toks) > 1:
2045
return toks
2046
else:
2047
return (dotted, None)
2048
2049
2050
def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
2051
# return a dictionary populated with keys in 'required' and 'optional'
2052
# by reading files in prefix + delim + entry
2053
if required is None:
2054
required = []
2055
if optional is None:
2056
optional = []
2057
2058
missing = []
2059
ret = {}
2060
for f in required + optional:
2061
try:
2063
except IOError as e:
2064
if e.errno != errno.ENOENT:
2065
raise
2066
if f in required:
2067
missing.append(f)
2068
2069
if len(missing):
2070
raise ValueError("Missing required files: %s", ','.join(missing))
2071
2072
return ret
2073
2074
2075
def read_meminfo(meminfo="/proc/meminfo", raw=False):
2076
# read a /proc/meminfo style file and return
2077
# a dict with 'total', 'free', and 'available'
2079
kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
2080
'MemAvailable:': 'available'}
2081
ret = {}
2082
for line in load_file(meminfo).splitlines():
2083
try:
2084
key, value, unit = line.split()
2085
except ValueError:
2086
key, value = line.split()
2087
unit = 'B'
2088
if raw:
2089
ret[key] = int(value) * mpliers[unit]
2090
elif key in kmap:
2091
ret[kmap[key]] = int(value) * mpliers[unit]
2092
2093
return ret
2094
2095
2096
def human2bytes(size):
2097
"""Convert human string or integer to size in bytes
2098
10M => 10485760
2099
.5G => 536870912
2100
"""
2101
size_in = size
2102
if size.endswith("B"):
2103
size = size[:-1]
2104
2105
mpliers = {'B': 1, 'K': 2 ** 10, 'M': 2 ** 20, 'G': 2 ** 30, 'T': 2 ** 40}
2106
2107
num = size
2108
mplier = 'B'
2109
for m in mpliers:
2110
if size.endswith(m):
2111
mplier = m
2112
num = size[0:-len(m)]
2113
2114
try:
2115
num = float(num)
2116
except ValueError:
2117
raise ValueError("'%s' is not valid input." % size_in)
2118
2119
if num < 0:
2120
raise ValueError("'%s': cannot be negative" % size_in)
2121
2122
return int(num * mpliers[mplier])
2129
if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
2130
return None
2131
mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
2132
dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
2150
2151
2152
def _call_dmidecode(key, dmidecode_path):
2153
"""
2154
Calls out to dmidecode to get the data out. This is mostly for supporting
2155
OS's without /sys/class/dmi/id support.
2156
"""
2157
try:
2158
cmd = [dmidecode_path, "--string", key]
2159
(result, _err) = subp(cmd)
2169
Wrapper for reading DMI data.
2170
2171
This will do the following (returning the first that produces a
2172
result):
2173
1) Use a mapping to translate `key` from dmidecode naming to
2174
sysfs naming and look in /sys/class/dmi/... for a value.
2175
2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
2176
3) Fall-back to passing `key` to `dmidecode --string`.
2177
2178
If all of the above fail to find a value, None will be returned.
2180
syspath_value = _read_dmi_syspath(key)
2181
if syspath_value is not None:
2182
return syspath_value
2183
2184
dmidecode_path = which('dmidecode')
2185
if dmidecode_path:
2186
return _call_dmidecode(key, dmidecode_path)
2187