Skip to content

Commit bf267ba

Browse files
zoncasd109
andauthored
Skip volumes with the keep property set to true in volumes_for_cluster (#176)
* Skip volumes with the keep property set to true in volumes_for_cluster See zonca/jupyterhub-deploy-kubernetes-jetstream#93 (comment) * Run auto-formatter * Use top-level variable for volume property * Add section on user-configurable volume cleanup behaviour * Fix formatting * Add unit test for volume property filter * Rename volumes_for_cluster -> filtered_volumes_for_cluster * Fix comment typos * Make formatter happy --------- Co-authored-by: sd109 <[email protected]>
1 parent 1732b56 commit bf267ba

File tree

4 files changed

+82
-8
lines changed

4 files changed

+82
-8
lines changed

.github/workflows/test-pr.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -73,11 +73,11 @@ jobs:
7373
- name: Provision Azimuth
7474
uses: azimuth-cloud/azimuth-config/.github/actions/provision@devel
7575

76-
# # Run the tests
76+
# Run the tests
7777
- name: Run Azimuth tests
7878
uses: azimuth-cloud/azimuth-config/.github/actions/test@devel
7979

80-
# Tear down the environment
80+
# Tear down the environment
8181
- name: Destroy Azimuth
8282
uses: azimuth-cloud/azimuth-config/.github/actions/destroy@devel
8383
if: ${{ always() }}

README.md

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,21 @@ metadata:
5656
5757
> **NOTE**: Any value other than `delete` means volumes will be kept.
5858

59+
### User-configurable behaviour
60+
61+
Annotations on the Kubernetes resources are only available to administrators with
62+
access to the Cluster API management cluster's Kubernetes API; therefore, the Janitor
63+
also provides an alternative user-facing mechanism for marking volumes which should
64+
not be deleted during cluster clean up. This is done by adding a property to the
65+
OpenStack volume using:
66+
67+
```
68+
openstack volume set --property janitor.capi.azimuth-cloud.com/keep='true' <volume-name-or-id>
69+
```
70+
71+
Any value other than 'true' will result in the volume being deleted when the workload
72+
cluster is deleted.
73+
5974
## How it works
6075

6176
`cluster-api-janitor-openstack` watches for `OpenStackCluster`s being created and adds its
@@ -66,7 +81,7 @@ Cluster API `Cluster`, from being removed until the finalizer is removed.
6681
(specifically, it waits for the `deletionTimestamp` to be set, indicating that a deletion
6782
has been requested), at which point it uses the credential from
6883
`OpenStackCluster.spec.identityRef` to remove any dangling resources that were created by
69-
the OCCM or Cinder CSI with the same cluster name as the cluster being deleted.
84+
the OCCM or Cinder CSI with the same cluster name as the cluster being deleted.
7085
The cluster name is determined by the `cluster.x-k8s.io/cluster-name` label on the
7186
OpenStackCluster resource, if present.
7287
If the label is not set, the name of the OpenStackCluster resource (`metadata.name`) is

capi_janitor/openstack/operator.py

Lines changed: 16 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,11 @@
3131
RETRY_ANNOTATION = "janitor.capi.stackhpc.com/retry"
3232
RETRY_DEFAULT_DELAY = int(os.environ.get("CAPI_JANITOR_RETRY_DEFAULT_DELAY", "60"))
3333

34+
# The property on the OpenStack volume resource which, if set to 'true',
35+
# will instruct the Janitor to ignore this volume when cleaning up cluster
36+
# resources.
37+
OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY = "janitor.capi.azimuth-cloud.com/keep"
38+
3439

3540
@kopf.on.startup()
3641
async def on_startup(**kwargs):
@@ -103,14 +108,19 @@ async def secgroups_for_cluster(resource, cluster):
103108
yield sg
104109

105110

106-
async def volumes_for_cluster(resource, cluster):
111+
async def filtered_volumes_for_cluster(resource, cluster):
107112
"""
108113
Async iterator for volumes belonging to the specified cluster.
109114
"""
110115
async for vol in resource.list():
111116
# CSI Cinder sets metadata on the volumes that we can look for
112117
owner = vol.metadata.get("cinder.csi.openstack.org/cluster")
113-
if owner and owner == cluster:
118+
# Skip volumes with the keep property set to true
119+
if (
120+
owner
121+
and owner == cluster
122+
and vol.metadata.get(OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY) != "true"
123+
):
114124
yield vol
115125

116126

@@ -228,7 +238,7 @@ async def purge_openstack_resources(
228238
)
229239
logger.info("deleted snapshots for persistent volume claims")
230240
check_volumes = await try_delete(
231-
logger, volumes, volumes_for_cluster(volumes_detail, name)
241+
logger, volumes, filtered_volumes_for_cluster(volumes_detail, name)
232242
)
233243
logger.info("deleted volumes for persistent volume claims")
234244

@@ -239,7 +249,9 @@ async def purge_openstack_resources(
239249
raise ResourcesStillPresentError("loadbalancers", name)
240250
if check_secgroups and not await empty(secgroups_for_cluster(secgroups, name)):
241251
raise ResourcesStillPresentError("security-groups", name)
242-
if check_volumes and not await empty(volumes_for_cluster(volumes_detail, name)):
252+
if check_volumes and not await empty(
253+
filtered_volumes_for_cluster(volumes_detail, name)
254+
):
243255
raise ResourcesStillPresentError("volumes", name)
244256
if check_snapshots and not await empty(
245257
snapshots_for_cluster(snapshots_detail, name)

capi_janitor/tests/openstack/test_operator.py

Lines changed: 48 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,10 @@
44
from unittest import mock
55

66
import easykube
7+
from easykube.rest.util import PropertyDict
78

8-
from capi_janitor.openstack import operator
9+
from capi_janitor.openstack import operator, openstack
10+
from capi_janitor.openstack.operator import OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY
911

1012

1113
class TestOperator(unittest.IsolatedAsyncioTestCase):
@@ -145,3 +147,48 @@ async def test_on_openstackcluster_event_calls_purge(
145147
mock.call("removed janitor finalizer from cluster"),
146148
]
147149
)
150+
151+
@mock.patch.object(openstack, "Resource")
152+
async def test_user_keep_volumes_filter(self, mock_volumes_resource):
153+
# Arrange
154+
async def _list_volumes():
155+
test_volumes = [
156+
{
157+
"id": "123",
158+
"name": "volume-1",
159+
"metadata": {
160+
"cinder.csi.openstack.org/cluster": "cluster-1",
161+
OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY: "anything-but-true",
162+
},
163+
},
164+
{
165+
"id": "456",
166+
"name": "volume-2",
167+
"metadata": {
168+
"cinder.csi.openstack.org/cluster": "cluster-1",
169+
OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY: "true",
170+
},
171+
},
172+
{
173+
"id": "789",
174+
"name": "volume-3",
175+
"metadata": {
176+
"cinder.csi.openstack.org/cluster": "cluster-2",
177+
OPENSTACK_USER_VOLUMES_RECLAIM_PROPERTY: "true",
178+
},
179+
},
180+
]
181+
for volume in map(PropertyDict, test_volumes):
182+
yield volume
183+
184+
mock_volumes_resource.list.return_value = _list_volumes()
185+
# Act
186+
filtered_volumes = [
187+
v
188+
async for v in operator.filtered_volumes_for_cluster(
189+
mock_volumes_resource, "cluster-1"
190+
)
191+
]
192+
# Assert
193+
self.assertEqual(len(filtered_volumes), 1)
194+
self.assertEqual(filtered_volumes[0].get("name"), "volume-1")

0 commit comments

Comments
 (0)