Skip to content

Commit 5e5b4b9

Browse files
authored
Merge pull request ceph#56674 from adk3798/wrong-grafana-uid
mgr/cephadm: pass daemon's current image when reconfiguring Reviewed-by: Laura Flores <[email protected]>
2 parents 860cfcf + 5922d20 commit 5e5b4b9

File tree

4 files changed

+77
-13
lines changed

4 files changed

+77
-13
lines changed

src/pybind/mgr/cephadm/module.py

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1591,9 +1591,27 @@ def _client_keyring_rm(
15911591
self._kick_serve_loop()
15921592
return HandleCommandResult()
15931593

1594-
def _get_container_image(self, daemon_name: str) -> Optional[str]:
1594+
def _get_container_image(self, daemon_name: str, use_current_daemon_image: bool = False) -> Optional[str]:
15951595
daemon_type = daemon_name.split('.', 1)[0] # type: ignore
15961596
image: Optional[str] = None
1597+
# Try to use current image if specified. This is necessary
1598+
# because, if we're reconfiguring the daemon, we can
1599+
# run into issues during upgrade. For example if the default grafana
1600+
# image is changing and we pass the new image name when doing
1601+
# the reconfig, we could end up using the UID required by the
1602+
# new image as owner for the config files we write, while the
1603+
# unit.run will still reference the old image that requires those
1604+
# config files to be owned by a different UID
1605+
# Note that "current image" just means the one we picked up
1606+
# when we last ran "cephadm ls" on the host
1607+
if use_current_daemon_image:
1608+
try:
1609+
dd = self.cache.get_daemon(daemon_name=daemon_name)
1610+
if dd.container_image_name:
1611+
return dd.container_image_name
1612+
except OrchestratorError:
1613+
self.log.debug(f'Could not find daemon {daemon_name} in cache '
1614+
'while searching for its image')
15971615
if daemon_type in CEPH_IMAGE_TYPES:
15981616
# get container image
15991617
image = str(self.get_foreign_ceph_option(

src/pybind/mgr/cephadm/serve.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1374,6 +1374,7 @@ async def _create_daemon(self,
13741374
),
13751375
config_blobs=daemon_spec.final_config,
13761376
).dump_json_str(),
1377+
use_current_daemon_image=reconfig,
13771378
)
13781379

13791380
if daemon_spec.daemon_type == 'agent':
@@ -1518,11 +1519,12 @@ async def _run_cephadm_json(self,
15181519
error_ok: Optional[bool] = False,
15191520
image: Optional[str] = "",
15201521
log_output: Optional[bool] = True,
1522+
use_current_daemon_image: bool = False,
15211523
) -> Any:
15221524
try:
15231525
out, err, code = await self._run_cephadm(
15241526
host, entity, command, args, no_fsid=no_fsid, error_ok=error_ok,
1525-
image=image, log_output=log_output)
1527+
image=image, log_output=log_output, use_current_daemon_image=use_current_daemon_image)
15261528
if code:
15271529
raise OrchestratorError(f'host {host} `cephadm {command}` returned {code}: {err}')
15281530
except Exception as e:
@@ -1547,6 +1549,7 @@ async def _run_cephadm(self,
15471549
env_vars: Optional[List[str]] = None,
15481550
log_output: Optional[bool] = True,
15491551
timeout: Optional[int] = None, # timeout in seconds
1552+
use_current_daemon_image: bool = False,
15501553
) -> Tuple[List[str], List[str], int]:
15511554
"""
15521555
Run cephadm on the remote host with the given command + args
@@ -1567,7 +1570,10 @@ async def _run_cephadm(self,
15671570
# Skip the image check for daemons deployed that are not ceph containers
15681571
if not str(entity).startswith(bypass_image):
15691572
if not image and entity is not cephadmNoImage:
1570-
image = self.mgr._get_container_image(entity)
1573+
image = self.mgr._get_container_image(
1574+
entity,
1575+
use_current_daemon_image=use_current_daemon_image
1576+
)
15711577

15721578
final_args = []
15731579

src/pybind/mgr/cephadm/tests/test_cephadm.py

Lines changed: 27 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -127,10 +127,12 @@ async def _ceph_volume_list(s, host, entity, cmd, **kwargs):
127127
[host]).stdout == f"Created osd(s) 1 on host '{host}'"
128128
assert _run_cephadm.mock_calls == [
129129
mock.call(host, 'osd', 'ceph-volume',
130-
['--', 'lvm', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
131-
mock.call(host, f'osd.{osd_id}', ['_orch', 'deploy'], [], stdin=mock.ANY),
130+
['--', 'lvm', 'list', '--format', 'json'],
131+
no_fsid=False, error_ok=False, image='', log_output=True, use_current_daemon_image=False),
132+
mock.call(host, f'osd.{osd_id}', ['_orch', 'deploy'], [], stdin=mock.ANY, use_current_daemon_image=False),
132133
mock.call(host, 'osd', 'ceph-volume',
133-
['--', 'raw', 'list', '--format', 'json'], no_fsid=False, error_ok=False, image='', log_output=True),
134+
['--', 'raw', 'list', '--format', 'json'],
135+
no_fsid=False, error_ok=False, image='', log_output=True, use_current_daemon_image=False),
134136
]
135137
dd = cephadm_module.cache.get_daemon(f'osd.{osd_id}', host=host)
136138
assert dd.name() == f'osd.{osd_id}'
@@ -524,6 +526,7 @@ def test_daemon_check_extra_config(self, _run_cephadm, cephadm_module: CephadmOr
524526
},
525527
},
526528
}),
529+
use_current_daemon_image=True,
527530
)
528531

529532
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -578,6 +581,7 @@ def test_mon_crush_location_deployment(self, _run_cephadm, cephadm_module: Cepha
578581
"crush_location": "datacenter=a",
579582
},
580583
}),
584+
use_current_daemon_image=False,
581585
)
582586

583587
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -619,6 +623,7 @@ def test_extra_container_args(self, _run_cephadm, cephadm_module: CephadmOrchest
619623
"keyring": "[client.crash.test]\nkey = None\n",
620624
},
621625
}),
626+
use_current_daemon_image=False,
622627
)
623628

624629
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -660,6 +665,7 @@ def test_extra_entrypoint_args(self, _run_cephadm, cephadm_module: CephadmOrches
660665
},
661666
"config_blobs": {},
662667
}),
668+
use_current_daemon_image=False,
663669
)
664670

665671
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -709,6 +715,7 @@ def test_extra_entrypoint_and_container_args(self, _run_cephadm, cephadm_module:
709715
},
710716
"config_blobs": {},
711717
}),
718+
use_current_daemon_image=False,
712719
)
713720

714721
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -762,6 +769,7 @@ def test_extra_entrypoint_and_container_args_with_spaces(self, _run_cephadm, cep
762769
},
763770
"config_blobs": {},
764771
}),
772+
use_current_daemon_image=False,
765773
)
766774

767775
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1107,9 +1115,13 @@ def test_apply_osd_save(self, _run_cephadm, cephadm_module: CephadmOrchestrator)
11071115
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=foo'], error_ok=True,
11081116
stdin='{"config": "", "keyring": ""}')
11091117
_run_cephadm.assert_any_call(
1110-
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1118+
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'],
1119+
image='', no_fsid=False, error_ok=False, log_output=True, use_current_daemon_image=False
1120+
)
11111121
_run_cephadm.assert_any_call(
1112-
'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1122+
'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'],
1123+
image='', no_fsid=False, error_ok=False, log_output=True, use_current_daemon_image=False
1124+
)
11131125

11141126
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")
11151127
def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
@@ -1147,11 +1159,16 @@ def test_apply_osd_save_non_collocated(self, _run_cephadm, cephadm_module: Cepha
11471159
'--no-auto', '/dev/sdb', '--db-devices', '/dev/sdc',
11481160
'--wal-devices', '/dev/sdd', '--yes', '--no-systemd'],
11491161
env_vars=['CEPH_VOLUME_OSDSPEC_AFFINITY=noncollocated'],
1150-
error_ok=True, stdin='{"config": "", "keyring": ""}')
1162+
error_ok=True, stdin='{"config": "", "keyring": ""}',
1163+
)
11511164
_run_cephadm.assert_any_call(
1152-
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1165+
'test', 'osd', 'ceph-volume', ['--', 'lvm', 'list', '--format', 'json'],
1166+
image='', no_fsid=False, error_ok=False, log_output=True, use_current_daemon_image=False
1167+
)
11531168
_run_cephadm.assert_any_call(
1154-
'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'], image='', no_fsid=False, error_ok=False, log_output=True)
1169+
'test', 'osd', 'ceph-volume', ['--', 'raw', 'list', '--format', 'json'],
1170+
image='', no_fsid=False, error_ok=False, log_output=True, use_current_daemon_image=False
1171+
)
11551172

11561173
@mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
11571174
@mock.patch("cephadm.module.SpecStore.save")
@@ -2298,10 +2315,10 @@ def test_ceph_volume_no_filter_for_batch(self, _run_cephadm, cephadm_module: Cep
22982315
assert _run_cephadm.mock_calls == [
22992316
mock.call('test', 'osd', 'ceph-volume',
23002317
['--', 'inventory', '--format=json-pretty', '--filter-for-batch'], image='',
2301-
no_fsid=False, error_ok=False, log_output=False),
2318+
no_fsid=False, error_ok=False, log_output=False, use_current_daemon_image=False),
23022319
mock.call('test', 'osd', 'ceph-volume',
23032320
['--', 'inventory', '--format=json-pretty'], image='',
2304-
no_fsid=False, error_ok=False, log_output=False),
2321+
no_fsid=False, error_ok=False, log_output=False, use_current_daemon_image=False),
23052322
]
23062323

23072324
@mock.patch("cephadm.serve.CephadmServe._run_cephadm")

src/pybind/mgr/cephadm/tests/test_services.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -345,6 +345,7 @@ def test_iscsi_config(self, _get_trusted_ips, _get_name, _run_cephadm, cephadm_m
345345
},
346346
}
347347
}),
348+
use_current_daemon_image=False,
348349
)
349350

350351

@@ -480,6 +481,7 @@ def test_nvmeof_config(self, _get_name, _run_cephadm, cephadm_module: CephadmOrc
480481
}
481482
}
482483
}),
484+
use_current_daemon_image=False,
483485
)
484486

485487

@@ -590,6 +592,7 @@ def test_alertmanager_config(
590592
"peers": [],
591593
}
592594
}),
595+
use_current_daemon_image=False,
593596
)
594597

595598
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -685,6 +688,7 @@ def get_root_cert():
685688
'web_config': '/etc/alertmanager/web.yml',
686689
}
687690
}),
691+
use_current_daemon_image=False,
688692
)
689693

690694
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -819,6 +823,7 @@ def test_prometheus_config_security_disabled(self, _run_cephadm, cephadm_module:
819823
'ip_to_bind_to': '1.2.3.1',
820824
},
821825
}),
826+
use_current_daemon_image=False,
822827
)
823828

824829
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -997,6 +1002,7 @@ def gen_cert(host, addr):
9971002
'web_config': '/etc/prometheus/web.yml',
9981003
},
9991004
}),
1005+
use_current_daemon_image=False,
10001006
)
10011007

10021008
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1065,6 +1071,7 @@ def test_loki_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
10651071
},
10661072
},
10671073
}),
1074+
use_current_daemon_image=False,
10681075
)
10691076

10701077
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1123,6 +1130,7 @@ def test_promtail_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator
11231130
},
11241131
},
11251132
}),
1133+
use_current_daemon_image=False,
11261134
)
11271135

11281136
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1234,6 +1242,7 @@ def test_grafana_config(self, _run_cephadm, cephadm_module: CephadmOrchestrator)
12341242
"files": files,
12351243
},
12361244
}),
1245+
use_current_daemon_image=False,
12371246
)
12381247

12391248
@patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('{}'))
@@ -1408,6 +1417,7 @@ def test_monitoring_ports(self, _run_cephadm, cephadm_module: CephadmOrchestrato
14081417
},
14091418
"config_blobs": {},
14101419
}),
1420+
use_current_daemon_image=True,
14111421
)
14121422

14131423

@@ -1514,6 +1524,7 @@ def test_snmp_v2c_deployment(self, _run_cephadm, cephadm_module: CephadmOrchestr
15141524
},
15151525
"config_blobs": config,
15161526
}),
1527+
use_current_daemon_image=False,
15171528
)
15181529

15191530
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1561,6 +1572,7 @@ def test_snmp_v2c_with_port(self, _run_cephadm, cephadm_module: CephadmOrchestra
15611572
},
15621573
"config_blobs": config,
15631574
}),
1575+
use_current_daemon_image=False,
15641576
)
15651577

15661578
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1612,6 +1624,7 @@ def test_snmp_v3nopriv_deployment(self, _run_cephadm, cephadm_module: CephadmOrc
16121624
},
16131625
"config_blobs": config,
16141626
}),
1627+
use_current_daemon_image=False,
16151628
)
16161629

16171630
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -1668,6 +1681,7 @@ def test_snmp_v3priv_deployment(self, _run_cephadm, cephadm_module: CephadmOrche
16681681
},
16691682
"config_blobs": config,
16701683
}),
1684+
use_current_daemon_image=False,
16711685
)
16721686

16731687

@@ -2735,6 +2749,7 @@ def test_jaeger_query(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
27352749
},
27362750
"config_blobs": config,
27372751
}),
2752+
use_current_daemon_image=False,
27382753
)
27392754

27402755
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -2774,6 +2789,7 @@ def test_jaeger_collector_es_deploy(self, _run_cephadm, cephadm_module: CephadmO
27742789
},
27752790
"config_blobs": es_config,
27762791
}),
2792+
use_current_daemon_image=False,
27772793
)
27782794
with with_service(cephadm_module, collector_spec):
27792795
_run_cephadm.assert_called_with(
@@ -2801,6 +2817,7 @@ def test_jaeger_collector_es_deploy(self, _run_cephadm, cephadm_module: CephadmO
28012817
},
28022818
"config_blobs": collector_config,
28032819
}),
2820+
use_current_daemon_image=False,
28042821
)
28052822

28062823
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -2840,6 +2857,7 @@ def test_jaeger_agent(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
28402857
},
28412858
"config_blobs": collector_config,
28422859
}),
2860+
use_current_daemon_image=False,
28432861
)
28442862
with with_service(cephadm_module, agent_spec):
28452863
_run_cephadm.assert_called_with(
@@ -2867,6 +2885,7 @@ def test_jaeger_agent(self, _run_cephadm, cephadm_module: CephadmOrchestrator):
28672885
},
28682886
"config_blobs": agent_config,
28692887
}),
2888+
use_current_daemon_image=False,
28702889
)
28712890

28722891

@@ -2923,6 +2942,7 @@ def test_deploy_custom_container(
29232942
},
29242943
}
29252944
),
2945+
use_current_daemon_image=False,
29262946
)
29272947

29282948
@patch("cephadm.serve.CephadmServe._run_cephadm")
@@ -3009,6 +3029,7 @@ def test_deploy_custom_container_with_init_ctrs(
30093029
['_orch', 'deploy'],
30103030
[],
30113031
stdin=json.dumps(expected),
3032+
use_current_daemon_image=False,
30123033
)
30133034

30143035

@@ -3059,6 +3080,7 @@ def test_deploy_smb(
30593080
['_orch', 'deploy'],
30603081
[],
30613082
stdin=json.dumps(expected),
3083+
use_current_daemon_image=False,
30623084
)
30633085

30643086
@patch("cephadm.module.CephadmOrchestrator.get_unique_name")
@@ -3128,4 +3150,5 @@ def test_deploy_smb_join_dns(
31283150
['_orch', 'deploy'],
31293151
[],
31303152
stdin=json.dumps(expected),
3153+
use_current_daemon_image=False,
31313154
)

0 commit comments

Comments
 (0)