Skip to content

Commit

Permalink
Merge PR #44228 into master
Browse files Browse the repository at this point in the history
* refs/pull/44228/head:
	qa/suites/orch/cephadm/osds: test 'ceph cephadm osd activate'
	mgr/cephadm/services/osd: skip found osds that already have daemons
	mgr/cephadm: allow activation of OSDs that have previously started

Reviewed-by: Sebastian Wagner <sewagner@redhat.com>
  • Loading branch information
liewegas committed Dec 17, 2021
2 parents 1db2388 + 867bf04 commit 12bf02a
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 3 deletions.
20 changes: 20 additions & 0 deletions qa/suites/orch/cephadm/osds/2-ops/rmdir-reactivate.yaml
@@ -0,0 +1,20 @@
tasks:
- cephadm.shell:
host.a:
- |
set -e
set -x
ceph orch ps
HOST=$(hostname -s)
OSD=$(ceph orch ps $HOST | grep osd | head -n 1 | awk '{print $1}')
echo "host $HOST, osd $OSD"
ceph orch daemon stop $OSD
while ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done
ceph auth export $OSD > k
ceph orch daemon rm $OSD --force
ceph orch ps --refresh
while ceph orch ps | grep $OSD ; do sleep 5 ; done
ceph auth add $OSD -i k
ceph cephadm osd activate $HOST
while ! ceph orch ps | grep $OSD | grep running ; do sleep 5 ; done
- cephadm.healthy:
4 changes: 2 additions & 2 deletions src/pybind/mgr/cephadm/inventory.py
Expand Up @@ -825,9 +825,9 @@ def get_daemon(self, daemon_name: str, host: Optional[str] = None) -> orchestrat

raise orchestrator.OrchestratorError(f'Unable to find {daemon_name} daemon(s)')

def has_daemon(self, daemon_name: str) -> bool:
def has_daemon(self, daemon_name: str, host: Optional[str] = None) -> bool:
try:
self.get_daemon(daemon_name)
self.get_daemon(daemon_name, host)
except orchestrator.OrchestratorError:
return False
return True
Expand Down
2 changes: 1 addition & 1 deletion src/pybind/mgr/cephadm/module.py
Expand Up @@ -2134,7 +2134,7 @@ def get_osd_uuid_map(self, only_up=False):
osd_id = o.get('osd')
if osd_id is None:
raise OrchestratorError("Could not retrieve osd_id from osd_map")
if not only_up or (o['up_from'] > 0):
if not only_up:
r[str(osd_id)] = o.get('uuid', '')
return r

Expand Down
8 changes: 8 additions & 0 deletions src/pybind/mgr/cephadm/services/osd.py
Expand Up @@ -125,6 +125,10 @@ async def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: st
if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids:
# if it exists but is part of the replacement operation, don't skip
continue
if self.mgr.cache.has_daemon(f'osd.{osd_id}', host):
# cephadm daemon instance already exists
logger.debug(f'osd id {osd_id} daemon already exists')
continue
if osd_id not in osd_uuid_map:
logger.debug('osd id {} does not exist in cluster'.format(osd_id))
continue
Expand Down Expand Up @@ -162,6 +166,10 @@ async def deploy_osd_daemons_for_existing_osds(self, host: str, service_name: st
if osd_id in before_osd_uuid_map and osd_id not in replace_osd_ids:
# if it exists but is part of the replacement operation, don't skip
continue
if self.mgr.cache.has_daemon(f'osd.{osd_id}', host):
# cephadm daemon instance already exists
logger.debug(f'osd id {osd_id} daemon already exists')
continue
if osd_id not in osd_uuid_map:
logger.debug('osd id {} does not exist in cluster'.format(osd_id))
continue
Expand Down

0 comments on commit 12bf02a

Please sign in to comment.