From d877386777c743fb1adb9c7d2957f99efad39f8f Mon Sep 17 00:00:00 2001 From: Sahas Subramanian Date: Thu, 15 May 2025 16:39:11 +0200 Subject: [PATCH 1/2] Fix `succeeded_power` calculation in PV power distribution This was reported incorrectly earlier using a variable that was never set. That variable and related (unused) channels have been removed. This also updates the PV pool tests to ensure that the distribution results are accurate. Signed-off-by: Sahas Subramanian --- .../_pv_inverter_manager.py | 9 +- .../_pv_pool/test_pv_pool_control_methods.py | 104 +++++++++++++++++- 2 files changed, 103 insertions(+), 10 deletions(-) diff --git a/src/frequenz/sdk/microgrid/_power_distributing/_component_managers/_pv_inverter_manager/_pv_inverter_manager.py b/src/frequenz/sdk/microgrid/_power_distributing/_component_managers/_pv_inverter_manager/_pv_inverter_manager.py index f9afc1f39..4814ebe97 100644 --- a/src/frequenz/sdk/microgrid/_power_distributing/_component_managers/_pv_inverter_manager/_pv_inverter_manager.py +++ b/src/frequenz/sdk/microgrid/_power_distributing/_component_managers/_pv_inverter_manager/_pv_inverter_manager.py @@ -8,7 +8,7 @@ import logging from datetime import timedelta -from frequenz.channels import Broadcast, LatestValueCache, Sender +from frequenz.channels import LatestValueCache, Sender from frequenz.client.microgrid import ( ApiClientError, ComponentCategory, @@ -67,9 +67,6 @@ def __init__( self._component_data_caches: dict[ ComponentId, LatestValueCache[InverterData] ] = {} - self._target_power = Power.zero() - self._target_power_channel = Broadcast[Request](name="target_power") - self._target_power_tx = self._target_power_channel.new_sender() self._task: asyncio.Task[None] | None = None @override @@ -241,7 +238,7 @@ async def _set_api_power( # pylint: disable=too-many-locals failed_components=failed_components, succeeded_components=succeeded_components, failed_power=failed_power, - succeeded_power=self._target_power - failed_power, + succeeded_power=request.power - failed_power - remaining_power, excess_power=remaining_power, request=request, ) @@ -250,7 +247,7 @@ async def _set_api_power( # pylint: disable=too-many-locals await self._results_sender.send( Success( succeeded_components=succeeded_components, - succeeded_power=self._target_power, + succeeded_power=request.power - remaining_power, excess_power=remaining_power, request=request, ) diff --git a/tests/timeseries/_pv_pool/test_pv_pool_control_methods.py b/tests/timeseries/_pv_pool/test_pv_pool_control_methods.py index 3bea88145..96bea5c77 100644 --- a/tests/timeseries/_pv_pool/test_pv_pool_control_methods.py +++ b/tests/timeseries/_pv_pool/test_pv_pool_control_methods.py @@ -151,6 +151,8 @@ async def test_setting_power( # pylint: disable=too-many-statements bounds_rx, lambda x: x.bounds is not None and x.bounds.lower.as_watts() == -100000.0, ) + dist_results_rx = pv_pool.power_distribution_results.new_receiver() + self._assert_report(latest_report, power=None, lower=-100000.0, upper=0.0) await pv_pool.propose_power(Power.from_watts(-80000.0)) await self._recv_reports_until( @@ -172,6 +174,18 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[2], -25000.0), mocker.call(inv_ids[3], -25000.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-80000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } set_power.reset_mock() await pv_pool.propose_power(Power.from_watts(-4000.0)) @@ -194,6 +208,18 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[2], -1000.0), mocker.call(inv_ids[3], -1000.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-4000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } # After failing 1 inverter, bounds should go down and power shouldn't be # distributed to that inverter. @@ -205,6 +231,17 @@ async def test_setting_power( # pylint: disable=too-many-statements self._assert_report( await bounds_rx.receive(), power=-4000.0, lower=-80000.0, upper=0.0 ) + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-4000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(28), + ComponentId(38), + } set_power.reset_mock() await pv_pool.propose_power(Power.from_watts(-70000.0)) @@ -227,6 +264,17 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[2], -30000.0), mocker.call(inv_ids[3], -30000.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-70000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(28), + ComponentId(38), + } # After the failed inverter recovers, bounds should go back up and power # should be distributed to all inverters @@ -238,17 +286,29 @@ async def test_setting_power( # pylint: disable=too-many-statements self._assert_report( await bounds_rx.receive(), power=-70000.0, lower=-100000.0, upper=0.0 ) + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-70000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } set_power.reset_mock() - await pv_pool.propose_power(Power.from_watts(-90000.0)) + await pv_pool.propose_power(Power.from_watts(-200000.0)) await self._recv_reports_until( bounds_rx, lambda x: x.target_power is not None - and x.target_power.as_watts() == -90000.0, + and x.target_power.as_watts() == -100000.0, ) self._assert_report( - await bounds_rx.receive(), power=-90000.0, lower=-100000.0, upper=0.0 + await bounds_rx.receive(), power=-100000.0, lower=-100000.0, upper=0.0 ) await asyncio.sleep(0.0) @@ -258,8 +318,20 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[0], -10000.0), mocker.call(inv_ids[1], -20000.0), mocker.call(inv_ids[2], -30000.0), - mocker.call(inv_ids[3], -30000.0), + mocker.call(inv_ids[3], -40000.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-100000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } # Setting 0 power should set all inverters to 0 set_power.reset_mock() @@ -281,6 +353,18 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[2], 0.0), mocker.call(inv_ids[3], 0.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.zero() + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } # Resetting the power should lead to default (full) power getting set for all # inverters. @@ -301,3 +385,15 @@ async def test_setting_power( # pylint: disable=too-many-statements mocker.call(inv_ids[2], -30_000.0), mocker.call(inv_ids[3], -40_000.0), ] + dist_results = await dist_results_rx.receive() + assert isinstance( + dist_results, _power_distributing.Success + ), f"Expected a success, got {dist_results}" + assert dist_results.succeeded_power == Power.from_watts(-100000.0) + assert dist_results.excess_power == Power.zero() + assert dist_results.succeeded_components == { + ComponentId(8), + ComponentId(18), + ComponentId(28), + ComponentId(38), + } From 02fbd213127019ab625f23f1bfaa3e4ed614f5c9 Mon Sep 17 00:00:00 2001 From: Sahas Subramanian Date: Thu, 15 May 2025 16:57:06 +0200 Subject: [PATCH 2/2] Update release notes Signed-off-by: Sahas Subramanian --- RELEASE_NOTES.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 77990748b..919b44181 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -47,7 +47,7 @@ ## Bug Fixes -- Components used to be just forgotten by the power manager when all proposals are withdrawn, leaving them at their last set power values. This has been fixed by getting the power manager to set the components to their default powers, based on the component category (according to the table below), as the last step. +- The power manager used to just forgot components when all proposals to them are withdrawn, leaving them at their last set power values. This has been fixed by getting the power manager to set the components to their default powers, based on the component category (according to the table below), as the last step. | component category | default power | @@ -58,4 +58,6 @@ - PV Pool instances can now be created in sites without any PV. This allows for writing generic code that works for all locations, that depends on the PV power formula, for example. +- `Success/PartialFailure` results from `PVPool.power_distribution_results` now report correct `succeeded_power` values. + - The `find_first_descendant_component` method in the component graph was allowing non-root components to be used as the root component during traversal. This was leading to confusing behaviour when the root component couldn't be identified deterministically. For example, if the root category was specified as a meter, it could start traversing from a different meter each time. It is no-longer possible to specify a root category anymore and it always traverses from the `GRID` component.