Skip to content

Commit 361c69d

Browse files
cephadm: refactor get_container_info to use new listing apis
Thoroughly refactor the get_container_info function so that it uses the new listing functions and updaters with the aim to avoid redundant operations that can waste resources. A fairly heavy rework of the tests was needed too. Signed-off-by: John Mulligan <[email protected]>
1 parent 37269ae commit 361c69d

File tree

2 files changed

+107
-55
lines changed

2 files changed

+107
-55
lines changed

src/cephadm/cephadm.py

Lines changed: 65 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -467,65 +467,79 @@ def update_default_image(ctx: CephadmContext) -> None:
467467
ctx.image = _get_default_image(ctx)
468468

469469

470-
def get_container_info(ctx: CephadmContext, daemon_filter: str, by_name: bool) -> Optional[ContainerInfo]:
470+
def get_container_info(
471+
ctx: CephadmContext, daemon_filter: str, by_name: bool
472+
) -> Optional[ContainerInfo]:
471473
"""
472474
:param ctx: Cephadm context
473475
:param daemon_filter: daemon name or type
474476
:param by_name: must be set to True if daemon name is provided
475477
:return: Container information or None
476478
"""
477-
def daemon_name_or_type(daemon: Dict[str, str]) -> str:
478-
return daemon['name'] if by_name else daemon['name'].split('.', 1)[0]
479-
480479
if by_name and '.' not in daemon_filter:
481-
logger.warning(f'Trying to get container info using invalid daemon name {daemon_filter}')
480+
logger.warning(
481+
f'Trying to get container info using invalid daemon name {daemon_filter}'
482+
)
482483
return None
483-
if by_name:
484-
# NOTE: we are not passing detail=False to this list_daemons call
485-
# as we want the container_image name in the case where we are
486-
# doing this by name and this is skipped when detail=False
487-
matching_daemons = list_daemons(ctx, daemon_name=daemon_filter)
488-
if len(matching_daemons) > 1:
489-
logger.warning(f'Found multiple daemons sharing same name: {daemon_filter}')
490-
# Take the first daemon we find that is actually running, or just the
491-
# first in the list if none are running
492-
matched_daemon = None
493-
for d in matching_daemons:
494-
if 'state' in d and d['state'] == 'running':
495-
matched_daemon = d
496-
break
497-
if not matched_daemon:
498-
matched_daemon = matching_daemons[0]
499-
matching_daemons = [matched_daemon]
500-
else:
501-
# NOTE: we are passing detail=False here as in this case where we are not
502-
# doing it by_name, we really only need the names of the daemons. Additionally,
503-
# when not doing it by_name, we are getting the info for all daemons on the
504-
# host, and doing this with detail=True tends to be slow.
505-
daemons = list_daemons(ctx, detail=False)
506-
matching_daemons = [d for d in daemons if daemon_name_or_type(d) == daemon_filter and d['fsid'] == ctx.fsid]
507-
if matching_daemons:
508-
if (
509-
by_name
510-
and 'state' in matching_daemons[0]
511-
and matching_daemons[0]['state'] != 'running'
512-
and 'container_image_name' in matching_daemons[0]
513-
and matching_daemons[0]['container_image_name']
514-
):
515-
# this daemon contianer is not running so the regular `podman/docker inspect` on the
516-
# container will not help us. If we have the image name from the list_daemons output
517-
# we can try that.
518-
image_name = matching_daemons[0]['container_image_name']
519-
cinfo = parsed_container_image_stats(ctx, image_name)
520-
if cinfo:
521-
return cinfo
522-
else:
523-
d_type, d_id = matching_daemons[0]['name'].split('.', 1)
524-
cinfo = get_container_stats(
525-
ctx, DaemonIdentity(ctx.fsid, d_type, d_id)
526-
)
527-
if cinfo:
528-
return cinfo
484+
485+
# configure filters: fsid and (daemon name or daemon type)
486+
kwargs = {
487+
'fsid': ctx.fsid,
488+
('daemon_name' if by_name else 'daemon_type'): daemon_filter,
489+
}
490+
# use keep_container_info to cache the ContainerInfo generated
491+
# during the loop and hopefully avoid having to perform the same
492+
# lookup right away.
493+
_cinfo_key = '_container_info'
494+
_updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
495+
matching_daemons = [
496+
_updater.expand(ctx, entry)
497+
for entry in daemons_matching(ctx, **kwargs)
498+
]
499+
500+
if not matching_daemons:
501+
# no matches at all
502+
logger.debug(
503+
'no daemons match: daemon_filter=%r, by_name=%r',
504+
daemon_filter,
505+
by_name,
506+
)
507+
return None
508+
if by_name and len(matching_daemons) > 1:
509+
# too many matches while searching by name
510+
logger.warning(
511+
f'Found multiple daemons sharing same name: {daemon_filter}'
512+
)
513+
# Prefer to take the first daemon we find that is actually running, or
514+
# just the first in the list if none are running
515+
# (key reminder: false (0) sorts before true (1))
516+
matching_daemons = sorted(
517+
matching_daemons, key=lambda d: d.get('state') != 'running'
518+
)
519+
520+
matched_deamon = matching_daemons[0]
521+
is_running = matched_deamon.get('state') == 'running'
522+
image_name = matched_deamon.get('container_image_name', '')
523+
if is_running:
524+
cinfo = matched_deamon.get(_cinfo_key)
525+
if cinfo:
526+
# found a cached ContainerInfo while getting daemon statuses
527+
return cinfo
528+
return get_container_stats(
529+
ctx,
530+
DaemonIdentity.from_name(
531+
matched_deamon['fsid'], matched_deamon['name']
532+
),
533+
)
534+
elif image_name:
535+
# this daemon's container is not running. the regular container inspect
536+
# command will not work. Fall back to inspecting the container image
537+
assert isinstance(image_name, str)
538+
return parsed_container_image_stats(ctx, image_name)
539+
# not running, but no image name to look up!
540+
logger.debug(
541+
'bad daemon state: no image, not running: %r', matched_deamon
542+
)
529543
return None
530544

531545

src/cephadm/tests/test_cephadm.py

Lines changed: 42 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -786,10 +786,32 @@ def test_get_container_info(
786786
output,
787787
funkypatch,
788788
):
789+
import cephadmlib.listing
790+
import cephadmlib.daemon_identity
791+
789792
ctx = _cephadm.CephadmContext()
790793
ctx.fsid = '00000000-0000-0000-0000-0000deadbeef'
791794
ctx.container_engine = mock_podman()
792-
funkypatch.patch('cephadm.list_daemons').return_value = daemon_list
795+
796+
def _dms(*args, **kwargs):
797+
for val in daemon_list:
798+
yield cephadmlib.listing.DaemonEntry(
799+
cephadmlib.daemon_identity.DaemonIdentity.from_name(
800+
val['fsid'], val['name']
801+
),
802+
val,
803+
'',
804+
)
805+
806+
funkypatch.patch('cephadmlib.listing.daemons').side_effect = _dms
807+
funkypatch.patch('cephadmlib.systemd.check_unit').return_value = (
808+
True,
809+
'running' if container_stats else 'stopped',
810+
'',
811+
)
812+
funkypatch.patch('cephadmlib.call_wrappers.call').side_effect = (
813+
ValueError
814+
)
793815
cinfo = (
794816
_cephadm.ContainerInfo(*container_stats)
795817
if container_stats
@@ -803,9 +825,18 @@ def test_get_container_info(
803825
)
804826

805827
def test_get_container_info_daemon_down(self, funkypatch):
828+
import cephadmlib.listing
829+
import cephadmlib.daemon_identity
830+
funkypatch.patch('cephadmlib.systemd.check_unit').return_value = (True, 'stopped', '')
831+
funkypatch.patch('cephadmlib.call_wrappers.call').side_effect = ValueError
806832
_get_stats_by_name = funkypatch.patch('cephadmlib.container_engines.parsed_container_image_stats')
807833
_get_stats = funkypatch.patch('cephadmlib.container_types.get_container_stats')
808-
_list_daemons = funkypatch.patch('cephadm.list_daemons')
834+
_daemons = funkypatch.patch('cephadmlib.listing.daemons')
835+
836+
class FakeUpdater(cephadmlib.listing.NoOpDaemonStatusUpdater):
837+
def __init__(self, *args, **kwargs):
838+
pass
839+
funkypatch.patch('cephadmlib.listing_updaters.CoreStatusUpdater', dest=FakeUpdater)
809840

810841
ctx = _cephadm.CephadmContext()
811842
ctx.fsid = '5e39c134-dfc5-11ee-a344-5254000ee071'
@@ -844,7 +875,14 @@ def test_get_container_info_daemon_down(self, funkypatch):
844875
"deployed": "2024-03-11T17:37:23.520061Z",
845876
"configured": "2024-03-11T17:37:28.494075Z"
846877
}
847-
_list_daemons.return_value = [down_osd_json]
878+
879+
_current_daemons = []
880+
def _dms(*args, **kwargs):
881+
for d in _current_daemons:
882+
ident = cephadmlib.daemon_identity.DaemonIdentity.from_name(d['fsid'], d['name'])
883+
yield cephadmlib.listing.DaemonEntry(ident, d, '')
884+
_daemons.side_effect = _dms
885+
_current_daemons[:] = [down_osd_json]
848886

849887
expected_container_info = _cephadm.ContainerInfo(
850888
container_id='',
@@ -867,7 +905,7 @@ def test_get_container_info_daemon_down(self, funkypatch):
867905
up_osd_json = copy.deepcopy(down_osd_json)
868906
up_osd_json['state'] = 'running'
869907
_get_stats.return_value = _cephadm.ContainerInfo('container_id', 'image_name','image_id','the_past','')
870-
_list_daemons.return_value = [down_osd_json, up_osd_json]
908+
_current_daemons[:] = [down_osd_json, up_osd_json]
871909

872910
expected_container_info = _cephadm.ContainerInfo(
873911
container_id='container_id',

0 commit comments

Comments
 (0)