Skip to content

Commit c48e308

Browse files
Merge pull request #23 from SomethingGeneric/copilot/fix-7d16f053-71ca-4b6e-b8e9-a1832bfe7b8e
[WIP] Add per-host snapshot quota in `vm_mapping`
2 parents 611aee5 + ea678bd commit c48e308

File tree

3 files changed

+68
-11
lines changed

3 files changed

+68
-11
lines changed

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -176,6 +176,7 @@ Maps Ansible inventory host names to Proxmox VM IDs and nodes:
176176
```toml
177177
# VM Mapping Configuration for miniupdate
178178
# Maps Ansible inventory host names to Proxmox VM IDs and nodes
179+
# Optional: Set max_snapshots per VM to limit snapshot count for capacity-limited storage
179180

180181
[vms.web1]
181182
node = "pve-node1"
@@ -184,12 +185,21 @@ vmid = 100
184185
[vms.web2]
185186
node = "pve-node1"
186187
vmid = 101
188+
# Optional: Limit to 2 snapshots for VMs on capacity-limited storage (e.g., small SSDs)
189+
max_snapshots = 2
187190

188191
[vms.db1]
189192
node = "pve-node2"
190193
vmid = 200
191194
```
192195

196+
**Per-Host Snapshot Quota:**
197+
- Use `max_snapshots` to set a maximum number of automated snapshots per VM
198+
- When set, miniupdate will keep only the N newest snapshots and delete older ones
199+
- Useful for VMs on capacity-limited storage backends (e.g., small SSDs)
200+
- Takes precedence over the global `snapshot_retention_days` setting
201+
- If not set, the global time-based retention policy applies
202+
193203
### inventory.yml (Ansible Format)
194204

195205
```yaml

miniupdate/update_automator.py

Lines changed: 39 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -486,14 +486,14 @@ def _handle_reboot_and_verification(self, host: Host, vm_mapping: Optional[VMMap
486486
return None # Success - no error report needed
487487

488488
def _cleanup_old_snapshots(self, vm_mapping: VMMapping):
489-
"""Clean up old automated snapshots."""
489+
"""Clean up old automated snapshots based on retention policy or count limit."""
490490
try:
491-
retention_days = self.update_config.get('snapshot_retention_days', 7)
492491
prefix = self.update_config.get('snapshot_name_prefix', 'pre-update')
493492

494493
snapshots = self.proxmox_client.list_snapshots(vm_mapping.node, vm_mapping.vmid)
495-
cutoff_time = datetime.now() - timedelta(days=retention_days)
496494

495+
# Filter to only automated snapshots with valid timestamps
496+
automated_snapshots = []
497497
for snapshot in snapshots:
498498
snap_name = snapshot.get('name', '')
499499
if not snap_name.startswith(prefix):
@@ -503,14 +503,45 @@ def _cleanup_old_snapshots(self, vm_mapping: VMMapping):
503503
try:
504504
timestamp_str = snap_name[len(prefix) + 1:] # Remove prefix and dash
505505
snap_time = datetime.strptime(timestamp_str, '%Y%m%d-%H%M%S')
506-
507-
if snap_time < cutoff_time:
508-
logger.info(f"Deleting old snapshot {snap_name} for VM {vm_mapping.vmid}")
509-
self.proxmox_client.delete_snapshot(vm_mapping.node, vm_mapping.vmid, snap_name)
510-
506+
automated_snapshots.append({
507+
'name': snap_name,
508+
'time': snap_time
509+
})
511510
except (ValueError, IndexError) as e:
512511
logger.debug(f"Could not parse snapshot timestamp for {snap_name}: {e}")
513512
continue
513+
514+
# Sort by timestamp, newest first
515+
automated_snapshots.sort(key=lambda x: x['time'], reverse=True)
516+
517+
snapshots_to_delete = []
518+
519+
# Check per-host max_snapshots limit first (takes precedence)
520+
if vm_mapping.max_snapshots is not None:
521+
# Keep only the newest max_snapshots, delete the rest
522+
if len(automated_snapshots) > vm_mapping.max_snapshots:
523+
snapshots_to_delete = automated_snapshots[vm_mapping.max_snapshots:]
524+
logger.info(f"Per-host snapshot quota: keeping {vm_mapping.max_snapshots} newest snapshots "
525+
f"for VM {vm_mapping.vmid}, deleting {len(snapshots_to_delete)} older ones")
526+
else:
527+
# Fall back to time-based retention policy
528+
retention_days = self.update_config.get('snapshot_retention_days', 7)
529+
cutoff_time = datetime.now() - timedelta(days=retention_days)
530+
531+
snapshots_to_delete = [
532+
snap for snap in automated_snapshots
533+
if snap['time'] < cutoff_time
534+
]
535+
536+
if snapshots_to_delete:
537+
logger.info(f"Time-based retention: deleting {len(snapshots_to_delete)} snapshots "
538+
f"older than {retention_days} days for VM {vm_mapping.vmid}")
539+
540+
# Delete the snapshots
541+
for snap in snapshots_to_delete:
542+
snap_name = snap['name']
543+
logger.info(f"Deleting old snapshot {snap_name} for VM {vm_mapping.vmid}")
544+
self.proxmox_client.delete_snapshot(vm_mapping.node, vm_mapping.vmid, snap_name)
514545

515546
except Exception as e:
516547
logger.warning(f"Failed to cleanup old snapshots for VM {vm_mapping.vmid}: {e}")

miniupdate/vm_mapping.py

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ class VMMapping(NamedTuple):
1818
node: str
1919
vmid: int
2020
host_name: str
21+
max_snapshots: Optional[int] = None
2122

2223

2324
class VMMapper:
@@ -72,6 +73,7 @@ def _load_mappings(self) -> Dict[str, VMMapping]:
7273

7374
node = vm_info.get('node')
7475
vmid = vm_info.get('vmid')
76+
max_snapshots = vm_info.get('max_snapshots')
7577

7678
if not node or not vmid:
7779
logger.warning(f"Incomplete VM mapping for {host_name}: "
@@ -84,10 +86,22 @@ def _load_mappings(self) -> Dict[str, VMMapping]:
8486
logger.warning(f"Invalid vmid for {host_name}: {vmid}")
8587
continue
8688

89+
# Validate max_snapshots if provided
90+
if max_snapshots is not None:
91+
try:
92+
max_snapshots = int(max_snapshots)
93+
if max_snapshots < 0:
94+
logger.warning(f"Invalid max_snapshots for {host_name}: must be >= 0")
95+
max_snapshots = None
96+
except ValueError:
97+
logger.warning(f"Invalid max_snapshots for {host_name}: {max_snapshots}")
98+
max_snapshots = None
99+
87100
mappings[host_name] = VMMapping(
88101
node=node,
89102
vmid=vmid,
90-
host_name=host_name
103+
host_name=host_name,
104+
max_snapshots=max_snapshots
91105
)
92106

93107
logger.info(f"Loaded VM mappings for {len(mappings)} hosts")
@@ -126,7 +140,8 @@ def create_example_vm_mapping(path: str = "vm_mapping.toml.example") -> None:
126140
},
127141
"web2": {
128142
"node": "pve-node1",
129-
"vmid": 101
143+
"vmid": 101,
144+
"max_snapshots": 2 # Optional: limit to 2 snapshots for capacity-limited storage
130145
},
131146
"db1": {
132147
"node": "pve-node2",
@@ -138,6 +153,7 @@ def create_example_vm_mapping(path: str = "vm_mapping.toml.example") -> None:
138153
with open(path, 'w', encoding='utf-8') as f:
139154
# Write with comments
140155
f.write("# VM Mapping Configuration for miniupdate\n")
141-
f.write("# Maps Ansible inventory host names to Proxmox VM IDs and nodes\n\n")
156+
f.write("# Maps Ansible inventory host names to Proxmox VM IDs and nodes\n")
157+
f.write("# Optional: Set max_snapshots per VM to limit snapshot count for capacity-limited storage\n\n")
142158

143159
toml.dump(example_config, f)

0 commit comments

Comments
 (0)