|
3 | 3 | from dataclasses import replace |
4 | 4 | from datetime import timedelta |
5 | 5 | import os |
| 6 | +from pathlib import PurePath |
6 | 7 | from unittest.mock import AsyncMock, patch |
| 8 | +from uuid import uuid4 |
7 | 9 |
|
8 | | -from aiohasupervisor.models.mounts import CIFSMountResponse, MountsInfo, MountState |
| 10 | +from aiohasupervisor.models.mounts import ( |
| 11 | + CIFSMountResponse, |
| 12 | + MountsInfo, |
| 13 | + MountState, |
| 14 | + MountType, |
| 15 | + MountUsage, |
| 16 | + NFSMountResponse, |
| 17 | +) |
9 | 18 | import pytest |
10 | 19 |
|
11 | 20 | from homeassistant.components.hassio import DOMAIN |
|
18 | 27 |
|
19 | 28 | from tests.common import MockConfigEntry, async_fire_time_changed |
20 | 29 | from tests.test_util.aiohttp import AiohttpClientMocker |
| 30 | +from tests.typing import WebSocketGenerator |
21 | 31 |
|
22 | 32 | MOCK_ENVIRON = {"SUPERVISOR": "127.0.0.1", "SUPERVISOR_TOKEN": "abcdefgh"} |
23 | 33 |
|
@@ -230,16 +240,16 @@ async def test_mount_binary_sensor( |
230 | 240 | assert hass.states.get(entity_id) is None |
231 | 241 |
|
232 | 242 | # Add a mount. |
233 | | - mock_mounts = [ |
| 243 | + mock_mounts: list[CIFSMountResponse | NFSMountResponse] = [ |
234 | 244 | CIFSMountResponse( |
235 | 245 | share="files", |
236 | 246 | server="1.2.3.4", |
237 | 247 | name="NAS", |
238 | | - type="cifs", |
239 | | - usage="share", |
| 248 | + type=MountType.CIFS, |
| 249 | + usage=MountUsage.SHARE, |
240 | 250 | read_only=False, |
241 | 251 | state=MountState.ACTIVE, |
242 | | - user_path="/share/nas", |
| 252 | + user_path=PurePath("/share/nas"), |
243 | 253 | ) |
244 | 254 | ] |
245 | 255 | supervisor_client.mounts.info = AsyncMock( |
@@ -282,3 +292,115 @@ async def test_mount_binary_sensor( |
282 | 292 | async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=1000)) |
283 | 293 | await hass.async_block_till_done(wait_background_tasks=True) |
284 | 294 | assert hass.states.get(entity_id) is not None |
| 295 | + |
| 296 | + |
| 297 | +async def test_mount_refresh_after_issue( |
| 298 | + hass: HomeAssistant, |
| 299 | + entity_registry: er.EntityRegistry, |
| 300 | + supervisor_client: AsyncMock, |
| 301 | + hass_ws_client: WebSocketGenerator, |
| 302 | +) -> None: |
| 303 | + """Test hassio mount state is refreshed after an issue was send by the supervisor.""" |
| 304 | + # Add a mount. |
| 305 | + mock_mounts: list[CIFSMountResponse | NFSMountResponse] = [ |
| 306 | + CIFSMountResponse( |
| 307 | + share="files", |
| 308 | + server="1.2.3.4", |
| 309 | + name="NAS", |
| 310 | + type=MountType.CIFS, |
| 311 | + usage=MountUsage.SHARE, |
| 312 | + read_only=False, |
| 313 | + state=MountState.ACTIVE, |
| 314 | + user_path=PurePath("/share/nas"), |
| 315 | + ) |
| 316 | + ] |
| 317 | + supervisor_client.mounts.info = AsyncMock( |
| 318 | + return_value=MountsInfo(default_backup_mount=None, mounts=mock_mounts) |
| 319 | + ) |
| 320 | + |
| 321 | + config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN) |
| 322 | + config_entry.add_to_hass(hass) |
| 323 | + |
| 324 | + with patch.dict(os.environ, MOCK_ENVIRON): |
| 325 | + result = await async_setup_component( |
| 326 | + hass, |
| 327 | + "hassio", |
| 328 | + {"http": {"server_port": 9999, "server_host": "127.0.0.1"}, "hassio": {}}, |
| 329 | + ) |
| 330 | + assert result |
| 331 | + await hass.async_block_till_done() |
| 332 | + |
| 333 | + # Enable the entity. |
| 334 | + entity_id = "binary_sensor.nas_connected" |
| 335 | + entity_registry.async_update_entity(entity_id, disabled_by=None) |
| 336 | + await hass.config_entries.async_reload(config_entry.entry_id) |
| 337 | + await hass.async_block_till_done() |
| 338 | + |
| 339 | + # Test new entity. |
| 340 | + entity = hass.states.get(entity_id) |
| 341 | + assert entity is not None |
| 342 | + assert entity.state == "on" |
| 343 | + |
| 344 | + # Change mount state to failed, issue a repair, and verify entity's state. |
| 345 | + mock_mounts[0] = replace(mock_mounts[0], state=MountState.FAILED) |
| 346 | + client = await hass_ws_client(hass) |
| 347 | + issue_uuid = uuid4().hex |
| 348 | + await client.send_json( |
| 349 | + { |
| 350 | + "id": 1, |
| 351 | + "type": "supervisor/event", |
| 352 | + "data": { |
| 353 | + "event": "issue_changed", |
| 354 | + "data": { |
| 355 | + "uuid": issue_uuid, |
| 356 | + "type": "mount_failed", |
| 357 | + "context": "mount", |
| 358 | + "reference": "nas", |
| 359 | + "suggestions": [ |
| 360 | + { |
| 361 | + "uuid": uuid4().hex, |
| 362 | + "type": "execute_reload", |
| 363 | + "context": "mount", |
| 364 | + "reference": "nas", |
| 365 | + }, |
| 366 | + { |
| 367 | + "uuid": uuid4().hex, |
| 368 | + "type": "execute_remove", |
| 369 | + "context": "mount", |
| 370 | + "reference": "nas", |
| 371 | + }, |
| 372 | + ], |
| 373 | + }, |
| 374 | + }, |
| 375 | + } |
| 376 | + ) |
| 377 | + msg = await client.receive_json() |
| 378 | + assert msg["success"] |
| 379 | + await hass.async_block_till_done(wait_background_tasks=True) |
| 380 | + entity = hass.states.get(entity_id) |
| 381 | + assert entity is not None |
| 382 | + assert entity.state == "off" |
| 383 | + |
| 384 | + # Change mount state to active, issue a repair, and verify entity's state. |
| 385 | + mock_mounts[0] = replace(mock_mounts[0], state=MountState.ACTIVE) |
| 386 | + await client.send_json( |
| 387 | + { |
| 388 | + "id": 2, |
| 389 | + "type": "supervisor/event", |
| 390 | + "data": { |
| 391 | + "event": "issue_removed", |
| 392 | + "data": { |
| 393 | + "uuid": issue_uuid, |
| 394 | + "type": "mount_failed", |
| 395 | + "context": "mount", |
| 396 | + "reference": "nas", |
| 397 | + }, |
| 398 | + }, |
| 399 | + } |
| 400 | + ) |
| 401 | + msg = await client.receive_json() |
| 402 | + assert msg["success"] |
| 403 | + await hass.async_block_till_done(wait_background_tasks=True) |
| 404 | + entity = hass.states.get(entity_id) |
| 405 | + assert entity is not None |
| 406 | + assert entity.state == "on" |
0 commit comments