|
| 1 | +import json |
| 2 | +import uuid |
| 3 | + |
| 4 | +from copy import deepcopy |
| 5 | +from functools import reduce |
| 6 | +from collections import Mapping, defaultdict |
| 7 | + |
| 8 | +from .schema import TransformationTypes, ARG_MAP |
| 9 | +from .transformer import BaseTransformer |
| 10 | + |
| 11 | + |
| 12 | +def update_nested_dict(d, u): |
| 13 | + for k, v in u.items(): |
| 14 | + if isinstance(v, Mapping): |
| 15 | + r = update_nested_dict(d.get(k, {}), v) |
| 16 | + d[k] = r |
| 17 | + elif isinstance(v, list): |
| 18 | + if not d.get(k): |
| 19 | + d[k] = v |
| 20 | + else: |
| 21 | + d[k] += v |
| 22 | + else: |
| 23 | + d[k] = u[k] |
| 24 | + return d |
| 25 | + |
| 26 | + |
| 27 | +def lookup_nested_dict(dic, key, *keys): |
| 28 | + if keys: |
| 29 | + return lookup_nested_dict(dic.get(key, None), *keys) |
| 30 | + return dic.get(key) |
| 31 | + |
| 32 | + |
| 33 | +class MarathonTransformer(BaseTransformer): |
| 34 | + """ |
| 35 | + A transformer for Marathon Apps |
| 36 | +
|
| 37 | + To use this class: |
| 38 | +
|
| 39 | + .. code-block:: python |
| 40 | +
|
| 41 | + transformer = MarathonTransformer('./app.json') |
| 42 | + output = transformer.ingest_container() |
| 43 | + print(json.dumps(output, indent=4)) |
| 44 | +
|
| 45 | + """ |
| 46 | + input_type = TransformationTypes.COMPOSE.value |
| 47 | + |
| 48 | + def __init__(self, filename=None): |
| 49 | + """ |
| 50 | + We override ``.__init__()`` on purpose, we need to record the docker option data. |
| 51 | +
|
| 52 | + :param filename: The file to be loaded |
| 53 | + :type filename: str |
| 54 | + """ |
| 55 | + if filename: |
| 56 | + self._filename = filename |
| 57 | + stream = self._read_file(filename) |
| 58 | + self.stream = stream |
| 59 | + else: |
| 60 | + self.stream = None |
| 61 | + |
| 62 | + def _read_stream(self, stream): |
| 63 | + """ |
| 64 | + Read in the json stream |
| 65 | + """ |
| 66 | + return json.load(stream) |
| 67 | + |
| 68 | + def _lookup_parameter(self, container, key, common_type=None): |
| 69 | + """ |
| 70 | + Lookup the `docker run` keyword from the 'container.docker.parameters' list |
| 71 | + :param container: The container in question |
| 72 | + :param key: The key name we're looking up |
| 73 | + :param is_list: if the response is a list of items |
| 74 | + """ |
| 75 | + if not container.get('container', {}).get('docker', {}).get('parameters'): |
| 76 | + return |
| 77 | + params = container['container']['docker']['parameters'] |
| 78 | + |
| 79 | + # Super hacky - log-opt is a sub option of the logging directive of everything else |
| 80 | + if key == 'log-driver': |
| 81 | + return [ |
| 82 | + p |
| 83 | + for p |
| 84 | + in params |
| 85 | + if p['key'] in ['log-opt', 'log-driver']] |
| 86 | + |
| 87 | + matching_params = [ |
| 88 | + p['value'] |
| 89 | + for p |
| 90 | + in params |
| 91 | + if p['key'] == key] |
| 92 | + |
| 93 | + if matching_params: |
| 94 | + if common_type == list: |
| 95 | + return matching_params |
| 96 | + else: |
| 97 | + return matching_params[0] |
| 98 | + |
| 99 | + def flatten_container(self, container): |
| 100 | + """ |
| 101 | + Accepts a marathon container and pulls out the nested values into the top level |
| 102 | + """ |
| 103 | + for names in ARG_MAP.values(): |
| 104 | + if names[TransformationTypes.MARATHON.value]['name'] and \ |
| 105 | + '.' in names[TransformationTypes.MARATHON.value]['name']: |
| 106 | + marathon_dotted_name = names[TransformationTypes.MARATHON.value]['name'] |
| 107 | + parts = marathon_dotted_name.split('.') |
| 108 | + |
| 109 | + if parts[-2] == 'parameters': |
| 110 | + # Special lookup for docker parameters |
| 111 | + common_type = names[TransformationTypes.MARATHON.value].get('type') |
| 112 | + result = self._lookup_parameter(container, parts[-1], common_type) |
| 113 | + if result: |
| 114 | + container[marathon_dotted_name] = result |
| 115 | + else: |
| 116 | + result = lookup_nested_dict(container, *parts) |
| 117 | + if result: |
| 118 | + container[marathon_dotted_name] = result |
| 119 | + return container |
| 120 | + |
| 121 | + def ingest_containers(self, containers=None): |
| 122 | + containers = containers or self.stream or {} |
| 123 | + # Accept groups api output |
| 124 | + if 'apps' in containers: |
| 125 | + containers = containers['apps'] |
| 126 | + elif isinstance(containers, dict): |
| 127 | + containers = [containers] |
| 128 | + return [ |
| 129 | + self.flatten_container(container) |
| 130 | + for container |
| 131 | + in containers] |
| 132 | + |
| 133 | + def emit_containers(self, containers, verbose=True): |
| 134 | + """ |
| 135 | + Emits the applications and sorts containers by name |
| 136 | +
|
| 137 | + :param containers: List of the container definitions |
| 138 | + :type containers: list of dict |
| 139 | +
|
| 140 | + :param verbose: Print out newlines and indented JSON |
| 141 | + :type verbose: bool |
| 142 | +
|
| 143 | + :returns: The text output |
| 144 | + :rtype: str |
| 145 | + """ |
| 146 | + containers = sorted(containers, key=lambda c: c.get('id')) |
| 147 | + if verbose: |
| 148 | + return json.dumps(containers, indent=4, sort_keys=True) |
| 149 | + else: |
| 150 | + return json.dumps(containers) |
| 151 | + |
| 152 | + def validate(self, container): |
| 153 | + # Ensure container name |
| 154 | + container_name = container.get('id', str(uuid.uuid4())) |
| 155 | + container['id'] = container_name |
| 156 | + |
| 157 | + container_data = defaultdict(lambda: defaultdict(dict)) |
| 158 | + container_data.update(container) |
| 159 | + |
| 160 | + # Find keys with periods in the name, these are keys that we delete and |
| 161 | + # create the corresponding entry for |
| 162 | + for key, value in deepcopy(container_data).items(): |
| 163 | + if key.startswith('container.'): |
| 164 | + parts = key.split('.') |
| 165 | + |
| 166 | + if parts[-2] == 'parameters': |
| 167 | + # Parameters are inserted below |
| 168 | + parts = parts[:-1] |
| 169 | + data = reduce(lambda x, y: {y: x}, reversed(parts + [value])) |
| 170 | + update_nested_dict(container_data, data) |
| 171 | + del container_data[key] |
| 172 | + else: |
| 173 | + data = reduce(lambda x, y: {y: x}, reversed(parts + [value])) |
| 174 | + update_nested_dict(container_data, data) |
| 175 | + del container_data[key] |
| 176 | + |
| 177 | + # Sort the parameters in a deterministic way |
| 178 | + if container_data['container']['docker'].get('parameters'): |
| 179 | + old_params = container_data['container']['docker']['parameters'] |
| 180 | + sorted_values = sorted( |
| 181 | + old_params, key=lambda p: p.get('value') |
| 182 | + ) |
| 183 | + sorted_keys = sorted( |
| 184 | + sorted_values, key=lambda p: p.get('key') |
| 185 | + ) |
| 186 | + container_data['container']['docker']['parameters'] = sorted_keys |
| 187 | + |
| 188 | + # Set requirePorts if any hostPorts are specified. |
| 189 | + if container_data['container']['docker'].get('portMappings'): |
| 190 | + host_ports = set([ |
| 191 | + p.get('hostPort', 0) |
| 192 | + for p |
| 193 | + in container_data['container']['docker']['portMappings']]) |
| 194 | + container_data['requirePorts'] = bool(host_ports.difference({0})) |
| 195 | + |
| 196 | + # Assume the network mode is BRIDGE if unspecified |
| 197 | + if container_data.get('container', {}).get('docker', {}).get('network') == 'HOST': |
| 198 | + if container_data['container']['docker'].get('portMappings'): |
| 199 | + container_data['ports'] = [ |
| 200 | + p.get('containerPort') or p.get('hostPort') |
| 201 | + for p |
| 202 | + in container_data['container']['docker']['portMappings']] |
| 203 | + # del container_data['container']['docker']['portMappings'] |
| 204 | + container_data['requirePorts'] = True |
| 205 | + else: |
| 206 | + container_data['container']['docker']['network'] = 'BRIDGE' |
| 207 | + |
| 208 | + container_data['container']['docker']['forcePullImage'] = True |
| 209 | + container_data['container']['type'] = 'DOCKER' |
| 210 | + container_data['acceptedResourceRoles'] = [] |
| 211 | + container_data['fetch'] = [] |
| 212 | + |
| 213 | + return container_data |
| 214 | + |
| 215 | + def ingest_name(self, name): |
| 216 | + return name.split('/')[-1] |
| 217 | + |
| 218 | + def emit_links(self, links): |
| 219 | + return ['/{0}'.format(link) for link in links] |
| 220 | + |
| 221 | + @staticmethod |
| 222 | + def _parse_port_mapping(mapping): |
| 223 | + output = { |
| 224 | + 'container_port': int(mapping['containerPort']), |
| 225 | + 'protocol': mapping.get('protocol') |
| 226 | + } |
| 227 | + if 'hostPort' in mapping: |
| 228 | + output['host_port'] = int(mapping.get('hostPort')) |
| 229 | + return output |
| 230 | + |
| 231 | + def ingest_port_mappings(self, port_mappings): |
| 232 | + """ |
| 233 | + Transform the port mappings to base schema mappings |
| 234 | +
|
| 235 | + :param port_mappings: The port mappings |
| 236 | + :type port_mappings: list of dict |
| 237 | + :return: The base schema mappings |
| 238 | + :rtype: list of dict |
| 239 | + """ |
| 240 | + return [self._parse_port_mapping(mapping) for mapping in port_mappings] |
| 241 | + |
| 242 | + def emit_port_mappings(self, port_mappings): |
| 243 | + return [ |
| 244 | + { |
| 245 | + 'containerPort': mapping['container_port'], |
| 246 | + 'hostPort': mapping.get('host_port', 0), |
| 247 | + 'protocol': mapping.get('protocol', 'tcp') |
| 248 | + } |
| 249 | + for mapping |
| 250 | + in port_mappings] |
| 251 | + |
| 252 | + def ingest_memory(self, memory): |
| 253 | + return memory << 20 |
| 254 | + |
| 255 | + def emit_memory(self, memory): |
| 256 | + mem_in_mb = memory >> 20 |
| 257 | + if 4 > mem_in_mb: |
| 258 | + return 4 |
| 259 | + return mem_in_mb |
| 260 | + |
| 261 | + def ingest_cpu(self, cpu): |
| 262 | + return float(cpu * 1024) |
| 263 | + |
| 264 | + def emit_cpu(self, cpu): |
| 265 | + return float(cpu/1024) |
| 266 | + |
| 267 | + def ingest_environment(self, environment): |
| 268 | + return environment |
| 269 | + |
| 270 | + def emit_environment(self, environment): |
| 271 | + return environment |
| 272 | + |
| 273 | + def ingest_command(self, command): |
| 274 | + return ' '.join(command) |
| 275 | + |
| 276 | + def emit_command(self, command): |
| 277 | + return command.split() |
| 278 | + |
| 279 | + def ingest_entrypoint(self, entrypoint): |
| 280 | + return entrypoint |
| 281 | + |
| 282 | + def emit_entrypoint(self, entrypoint): |
| 283 | + return [{'key': 'entrypoint', 'value': entrypoint}] |
| 284 | + |
| 285 | + def ingest_volumes_from(self, volumes_from): |
| 286 | + return volumes_from |
| 287 | + |
| 288 | + def emit_volumes_from(self, volumes_from): |
| 289 | + return [{'key': 'volumes-from', 'value': vol} for vol in volumes_from] |
| 290 | + |
| 291 | + def _convert_volume(self, volume): |
| 292 | + """ |
| 293 | + This is for ingesting the "volumes" of a app description |
| 294 | + """ |
| 295 | + data = { |
| 296 | + 'host': volume.get('hostPath'), |
| 297 | + 'container': volume.get('containerPath'), |
| 298 | + 'readonly': volume.get('mode') == 'RO', |
| 299 | + } |
| 300 | + return data |
| 301 | + |
| 302 | + def ingest_volumes(self, volumes): |
| 303 | + return [self._convert_volume(volume) for volume in volumes] |
| 304 | + |
| 305 | + @staticmethod |
| 306 | + def _build_volume(volume): |
| 307 | + """ |
| 308 | + Given a generic volume definition, create the volumes element |
| 309 | + """ |
| 310 | + return { |
| 311 | + 'hostPath': volume.get('host'), |
| 312 | + 'containerPath': volume.get('container'), |
| 313 | + 'mode': 'RO' if volume.get('readonly') else 'RW' |
| 314 | + } |
| 315 | + |
| 316 | + def emit_volumes(self, volumes): |
| 317 | + return [ |
| 318 | + self._build_volume(volume) |
| 319 | + for volume |
| 320 | + in volumes |
| 321 | + ] |
| 322 | + |
| 323 | + def ingest_logging(self, logging): |
| 324 | + # Super hacky continued - in self._lookup_parameter() we flatten the logging options |
| 325 | + data = { |
| 326 | + 'driver': [p['value'] for p in logging if p['key'] == 'log-driver'][0], |
| 327 | + 'options': dict([p['value'].split('=') for p in logging if p['key'] == 'log-opt']) |
| 328 | + } |
| 329 | + return data |
| 330 | + |
| 331 | + def emit_logging(self, logging): |
| 332 | + output = [{ |
| 333 | + 'key': 'log-driver', |
| 334 | + 'value': logging.get('driver') |
| 335 | + }] |
| 336 | + if logging.get('options') and isinstance(logging.get('options'), dict): |
| 337 | + for k, v in logging.get('options').items(): |
| 338 | + output.append({ |
| 339 | + 'key': 'log-opt', |
| 340 | + 'value': '{k}={v}'.format(k=k, v=v) |
| 341 | + }) |
| 342 | + return output |
| 343 | + |
| 344 | + def emit_dns(self, dns): |
| 345 | + return [{'key': 'dns', 'value': serv} for serv in dns] |
| 346 | + |
| 347 | + def emit_domain(self, domain): |
| 348 | + return [{'key': 'dns-search', 'value': d} for d in domain] |
| 349 | + |
| 350 | + def emit_work_dir(self, work_dir): |
| 351 | + return [{'key': 'workdir', 'value': work_dir}] |
| 352 | + |
| 353 | + def emit_network(self, network): |
| 354 | + return [{'key': 'net', 'value': net} for net in network] |
| 355 | + |
| 356 | + def ingest_net_mode(self, net_mode): |
| 357 | + return net_mode.lower() |
| 358 | + |
| 359 | + def emit_net_mode(self, net_mode): |
| 360 | + return net_mode.upper() |
| 361 | + |
| 362 | + def emit_user(self, user): |
| 363 | + return [{'key': 'user', 'value': user}] |
0 commit comments