Skip to content

Commit

Permalink
Merge pull request #39153 from mgfritch/cephadm-flake8
Browse files Browse the repository at this point in the history
mgr/cephadm: introduce flake8

Reviewed-by: Sebastian Wagner <sebastian.wagner@suse.com>
  • Loading branch information
sebastian-philipp committed Feb 8, 2021
2 parents 46493eb + 80990f5 commit d4d3d17
Show file tree
Hide file tree
Showing 32 changed files with 279 additions and 237 deletions.
2 changes: 1 addition & 1 deletion src/pybind/mgr/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ if(WITH_MGR_ROOK_CLIENT)
endif()
if(WITH_TESTS)
include(AddCephTest)
add_tox_test(mgr ${CMAKE_CURRENT_SOURCE_DIR} TOX_ENVS py3 mypy)
add_tox_test(mgr ${CMAKE_CURRENT_SOURCE_DIR} TOX_ENVS py3 mypy flake8)
endif()

# Location needs to match default setting for mgr_module_path, currently:
Expand Down
10 changes: 7 additions & 3 deletions src/pybind/mgr/cephadm/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
import os
from .module import CephadmOrchestrator

__all__ = [
"CephadmOrchestrator",
]

import os
if 'UNITTEST' in os.environ:
import tests

from .module import CephadmOrchestrator
__all__.append(tests.__name__)
8 changes: 4 additions & 4 deletions src/pybind/mgr/cephadm/inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,14 +199,14 @@ class HostCache():
4. `last_etc_ceph_ceph_conf` O(hosts)
Stores the last refresh time for the /etc/ceph/ceph.conf. Used
Stores the last refresh time for the /etc/ceph/ceph.conf. Used
to avoid deploying new configs when failing over to a new mgr.
5. `scheduled_daemon_actions`: O(daemons)
Used to run daemon actions after deploying a daemon. We need to
store it persistently, in order to stay consistent across
MGR failovers.
MGR failovers.
"""

def __init__(self, mgr):
Expand All @@ -218,7 +218,7 @@ def __init__(self, mgr):
self.facts = {} # type: Dict[str, Dict[str, Any]]
self.last_facts_update = {} # type: Dict[str, datetime.datetime]
self.osdspec_previews = {} # type: Dict[str, List[Dict[str, Any]]]
self.osdspec_last_applied = {} # type: Dict[str, Dict[str, datetime.datetime]]
self.osdspec_last_applied = {} # type: Dict[str, Dict[str, datetime.datetime]]
self.networks = {} # type: Dict[str, Dict[str, List[str]]]
self.last_device_update = {} # type: Dict[str, datetime.datetime]
self.last_device_change = {} # type: Dict[str, datetime.datetime]
Expand Down Expand Up @@ -624,7 +624,7 @@ def osdspec_needs_apply(self, host: str, spec: ServiceSpec) -> bool:
created = self.mgr.spec_store.get_created(spec)
if created and created > self.last_device_change[host]:
return True
return self.osdspec_last_applied[host][spec.service_name()] < self.last_device_change[host];
return self.osdspec_last_applied[host][spec.service_name()] < self.last_device_change[host]

def update_last_etc_ceph_ceph_conf(self, host: str) -> None:
if not self.mgr.last_monmap:
Expand Down
5 changes: 2 additions & 3 deletions src/pybind/mgr/cephadm/migrations.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import logging
from typing import TYPE_CHECKING, Iterator, Union
from typing import TYPE_CHECKING, Iterator

from ceph.deployment.service_spec import PlacementSpec, ServiceSpec, HostPlacementSpec
from cephadm.schedule import HostAssignment
Expand Down Expand Up @@ -109,8 +109,7 @@ def to_hostname(d: DaemonDescription) -> HostPlacementSpec:
return HostPlacementSpec(d.hostname, '', '')

old_hosts = {h.hostname: h for h in spec.placement.hosts}
new_hosts = [to_hostname(d) for d in existing_daemons
]
new_hosts = [to_hostname(d) for d in existing_daemons]

new_placement = PlacementSpec(
hosts=new_hosts,
Expand Down
51 changes: 22 additions & 29 deletions src/pybind/mgr/cephadm/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@
import shlex
from collections import defaultdict
from configparser import ConfigParser
from contextlib import contextmanager
from functools import wraps
from tempfile import TemporaryDirectory
from threading import Event

import string
from typing import List, Dict, Optional, Callable, Tuple, TypeVar, \
Any, Set, TYPE_CHECKING, cast, Iterator, NamedTuple, Sequence
Any, Set, TYPE_CHECKING, cast, NamedTuple, Sequence

import datetime
import os
Expand All @@ -25,19 +24,19 @@
from ceph.deployment.drive_group import DriveGroupSpec
from ceph.deployment.service_spec import \
NFSServiceSpec, ServiceSpec, PlacementSpec, assert_valid_host, \
HostPlacementSpec, HA_RGWSpec
HostPlacementSpec
from ceph.utils import str_to_datetime, datetime_to_str, datetime_now
from cephadm.serve import CephadmServe
from cephadm.services.cephadmservice import CephadmDaemonSpec

from mgr_module import MgrModule, HandleCommandResult, Option
from mgr_util import create_self_signed_cert, verify_tls, ServerConfigException
from mgr_util import create_self_signed_cert
import secrets
import orchestrator
from orchestrator import OrchestratorError, OrchestratorValidationError, HostSpec, \
CLICommandMeta, OrchestratorEvent, set_exception_subject, DaemonDescription
CLICommandMeta, DaemonDescription
from orchestrator._interface import GenericSpec
from orchestrator._interface import daemon_type_to_service, service_to_daemon_types
from orchestrator._interface import daemon_type_to_service

from . import remotes
from . import utils
Expand All @@ -48,7 +47,7 @@
from .services.iscsi import IscsiService
from .services.ha_rgw import HA_RGWService
from .services.nfs import NFSService
from .services.osd import RemoveUtil, OSDRemovalQueue, OSDService, OSD, NotFoundError
from .services.osd import OSDRemovalQueue, OSDService, OSD, NotFoundError
from .services.monitoring import GrafanaService, AlertmanagerService, PrometheusService, \
NodeExporterService
from .schedule import HostAssignment
Expand All @@ -69,16 +68,10 @@ def remoto_has_connection(self: Any) -> bool:
from remoto.backends import BaseConnection
BaseConnection.has_connection = remoto_has_connection
import remoto.process
import execnet.gateway_bootstrap
except ImportError as e:
remoto = None
remoto_import_error = str(e)

try:
from typing import List
except ImportError:
pass

logger = logging.getLogger(__name__)

T = TypeVar('T')
Expand Down Expand Up @@ -524,7 +517,7 @@ def _trigger_osd_removal(self) -> None:
# if _ANY_ osd that is currently in the queue appears to be empty,
# start the removal process
if int(osd.get('osd')) in self.to_remove_osds.as_osd_ids():
self.log.debug(f"Found empty osd. Starting removal process")
self.log.debug('Found empty osd. Starting removal process')
# if the osd that is now empty is also part of the removal queue
# start the process
self._kick_serve_loop()
Expand Down Expand Up @@ -637,10 +630,10 @@ def validate_ssh_config_content(self, ssh_config: Optional[str]) -> None:
if ssh_config is None or len(ssh_config.strip()) == 0:
raise OrchestratorValidationError('ssh_config cannot be empty')
# StrictHostKeyChecking is [yes|no] ?
l = re.findall(r'StrictHostKeyChecking\s+.*', ssh_config)
if not l:
res = re.findall(r'StrictHostKeyChecking\s+.*', ssh_config)
if not res:
raise OrchestratorValidationError('ssh_config requires StrictHostKeyChecking')
for s in l:
for s in res:
if 'ask' in s.lower():
raise OrchestratorValidationError(f'ssh_config cannot contain: \'{s}\'')

Expand Down Expand Up @@ -913,10 +906,10 @@ def check_host(self, host: str, addr: Optional[str] = None) -> Tuple[int, str, s
error_ok=True, no_fsid=True)
if code:
return 1, '', ('check-host failed:\n' + '\n'.join(err))
except OrchestratorError as e:
except OrchestratorError:
self.log.exception(f"check-host failed for '{host}'")
return 1, '', ('check-host failed:\n' +
f"Host '{host}' not found. Use 'ceph orch host ls' to see all managed hosts.")
return 1, '', ('check-host failed:\n'
+ f"Host '{host}' not found. Use 'ceph orch host ls' to see all managed hosts.")
# if we have an outstanding health alert for this host, give the
# serve thread a kick
if 'CEPHADM_HOST_CHECK_FAILED' in self.health_checks:
Expand Down Expand Up @@ -1184,8 +1177,8 @@ def _hosts_with_daemon_inventory(self) -> List[HostSpec]:
"""
return [
h for h in self.inventory.all_specs()
if self.cache.host_had_daemon_refresh(h.hostname) and
h.status.lower() not in ['maintenance', 'offline']
if self.cache.host_had_daemon_refresh(h.hostname)
and h.status.lower() not in ['maintenance', 'offline']
]

def _add_host(self, spec):
Expand Down Expand Up @@ -1295,8 +1288,8 @@ def _host_ok_to_stop(self, hostname: str, force: bool = False) -> Tuple[int, str
return 1, '\n'.join(error_notifications)

if notifications:
return 0, (f'It is presumed safe to stop host {hostname}. ' +
'Note the following:\n\n' + '\n'.join(notifications))
return 0, (f'It is presumed safe to stop host {hostname}. '
+ 'Note the following:\n\n' + '\n'.join(notifications))
return 0, f'It is presumed safe to stop host {hostname}'

@trivial_completion
Expand Down Expand Up @@ -1333,15 +1326,15 @@ def enter_host_maintenance(self, hostname: str, force: bool = False) -> str:
Placing a host into maintenance disables the cluster's ceph target in systemd
and stops all ceph daemons. If the host is an osd host we apply the noout flag
for the host subtree in crush to prevent data movement during a host maintenance
for the host subtree in crush to prevent data movement during a host maintenance
window.
:param hostname: (str) name of the host (must match an inventory hostname)
:raises OrchestratorError: Hostname is invalid, host is already in maintenance
"""
if len(self.cache.get_hosts()) == 1:
raise OrchestratorError(f"Maintenance feature is not supported on single node clusters")
raise OrchestratorError("Maintenance feature is not supported on single node clusters")

# if upgrade is active, deny
if self.upgrade.upgrade_state:
Expand Down Expand Up @@ -1402,7 +1395,7 @@ def exit_host_maintenance(self, hostname: str) -> str:
"""Exit maintenance mode and return a host to an operational state
Returning from maintnenance will enable the clusters systemd target and
start it, and remove any noout that has been added for the host if the
start it, and remove any noout that has been added for the host if the
host has osd daemons
:param hostname: (str) host name
Expand Down Expand Up @@ -1541,7 +1534,7 @@ def describe_service(self, service_type: Optional[str] = None, service_name: Opt
sm[n].container_image_name = 'mix'
if dd.daemon_type == 'haproxy' or dd.daemon_type == 'keepalived':
# ha-rgw has 2 daemons running per host
sm[n].size = sm[n].size*2
sm[n].size = sm[n].size * 2
for n, spec in self.spec_store.specs.items():
if n in sm:
continue
Expand All @@ -1560,7 +1553,7 @@ def describe_service(self, service_type: Optional[str] = None, service_name: Opt
sm[n].rados_config_location = spec.rados_config_location()
if spec.service_type == 'ha-rgw':
# ha-rgw has 2 daemons running per host
sm[n].size = sm[n].size*2
sm[n].size = sm[n].size * 2
return list(sm.values())

@trivial_completion
Expand Down
4 changes: 2 additions & 2 deletions src/pybind/mgr/cephadm/remotes.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,5 +27,5 @@ def choose_python():


if __name__ == '__channelexec__':
for item in channel: # type: ignore
channel.send(eval(item)) # type: ignore
for item in channel: # type: ignore # noqa: F821
channel.send(eval(item)) # type: ignore # noqa: F821
25 changes: 13 additions & 12 deletions src/pybind/mgr/cephadm/schedule.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import List, Optional, Callable, Iterable, TypeVar, Set

import orchestrator
from ceph.deployment.service_spec import PlacementSpec, HostPlacementSpec, ServiceSpec
from ceph.deployment.service_spec import HostPlacementSpec, ServiceSpec
from orchestrator._interface import DaemonDescription
from orchestrator import OrchestratorValidationError

Expand Down Expand Up @@ -124,9 +124,10 @@ def place(self):
logger.debug('Provided hosts: %s' % candidates)
# if asked to place even number of mons, deploy 1 less
if self.spec.service_type == 'mon' and (len(candidates) % 2) == 0:
count = len(candidates) - 1
logger.info("Deploying %s monitor(s) instead of %s so monitors may achieve consensus" % (
len(candidates) - 1, len(candidates)))
return candidates[0:len(candidates)-1]
count, len(candidates)))
return candidates[0:count]

# do not deploy ha-rgw on hosts that don't support virtual ips
if self.spec.service_type == 'ha-rgw' and self.filter_new_host:
Expand Down Expand Up @@ -271,29 +272,29 @@ def hosts_with_daemons(self, candidates: List[HostPlacementSpec]) -> List[HostPl
return existing


def merge_hostspecs(l: List[HostPlacementSpec], r: List[HostPlacementSpec]) -> Iterable[HostPlacementSpec]:
def merge_hostspecs(lh: List[HostPlacementSpec], rh: List[HostPlacementSpec]) -> Iterable[HostPlacementSpec]:
"""
Merge two lists of HostPlacementSpec by hostname. always returns `l` first.
Merge two lists of HostPlacementSpec by hostname. always returns `lh` first.
>>> list(merge_hostspecs([HostPlacementSpec(hostname='h', name='x', network='')],
... [HostPlacementSpec(hostname='h', name='y', network='')]))
[HostPlacementSpec(hostname='h', network='', name='x')]
"""
l_names = {h.hostname for h in l}
yield from l
yield from (h for h in r if h.hostname not in l_names)
lh_names = {h.hostname for h in lh}
yield from lh
yield from (h for h in rh if h.hostname not in lh_names)


def difference_hostspecs(l: List[HostPlacementSpec], r: List[HostPlacementSpec]) -> List[HostPlacementSpec]:
def difference_hostspecs(lh: List[HostPlacementSpec], rh: List[HostPlacementSpec]) -> List[HostPlacementSpec]:
"""
returns l "minus" r by hostname.
returns lh "minus" rh by hostname.
>>> list(difference_hostspecs([HostPlacementSpec(hostname='h1', name='x', network=''),
... HostPlacementSpec(hostname='h2', name='y', network='')],
... [HostPlacementSpec(hostname='h2', name='', network='')]))
[HostPlacementSpec(hostname='h1', network='', name='x')]
"""
r_names = {h.hostname for h in r}
return [h for h in l if h.hostname not in r_names]
rh_names = {h.hostname for h in rh}
return [h for h in lh if h.hostname not in rh_names]
5 changes: 2 additions & 3 deletions src/pybind/mgr/cephadm/serve.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import datetime
import json
import logging
from collections import defaultdict
Expand Down Expand Up @@ -99,7 +98,7 @@ def serve(self) -> None:
def _serve_sleep(self) -> None:
sleep_interval = 600
self.log.debug('Sleeping for %d seconds', sleep_interval)
ret = self.mgr.event.wait(sleep_interval)
self.mgr.event.wait(sleep_interval)
self.mgr.event.clear()

def _update_paused_health(self) -> None:
Expand Down Expand Up @@ -329,7 +328,7 @@ def update_osdspec_previews(self, search_host: str = '') -> None:
# query OSDSpecs for host <search host> and generate/get the preview
# There can be multiple previews for one host due to multiple OSDSpecs.
previews.extend(self.mgr.osd_service.get_previews(search_host))
self.log.debug(f"Loading OSDSpec previews to HostCache")
self.log.debug(f'Loading OSDSpec previews to HostCache for host <{search_host}>')
self.mgr.cache.osdspec_previews[search_host] = previews
# Unset global 'pending' flag for host
self.mgr.cache.loading_osdspec_preview.remove(search_host)
Expand Down

0 comments on commit d4d3d17

Please sign in to comment.