From b58fd0eda881db0277373d91ec006e0629d656d9 Mon Sep 17 00:00:00 2001 From: Paul Laffitte Date: Wed, 21 May 2025 15:38:01 +0200 Subject: [PATCH] feat: add pvecontrol vm restore command --- src/pvecontrol/actions/vm.py | 40 +++++++++++++++++++++++++++++--- src/pvecontrol/models/cluster.py | 1 + src/pvecontrol/models/vm.py | 6 +++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/src/pvecontrol/actions/vm.py b/src/pvecontrol/actions/vm.py index c902ae9..7a2804e 100644 --- a/src/pvecontrol/actions/vm.py +++ b/src/pvecontrol/actions/vm.py @@ -2,10 +2,11 @@ import sys import click +import proxmoxer.core from pvecontrol.utils import print_task -from pvecontrol.cli import ResourceGroup, migration_related_command -from pvecontrol.models.vm import COLUMNS +from pvecontrol.cli import ResourceGroup, migration_related_command, task_related_command +from pvecontrol.models.vm import PVEVm, COLUMNS from pvecontrol.models.cluster import PVECluster @@ -77,12 +78,45 @@ def migrate(ctx, vmid, target, online, follow, wait, dry_run): # Suivre la task cree # pylint: disable=duplicate-code proxmox.refresh() - _task = proxmox.find_task(upid) print_task(proxmox, upid, follow, wait) else: print("Dry run, skipping migration") +@root.command() +@click.argument("vmid", type=int) +@click.option("-t", "--target", metavar="NODEID", required=True, help="ID of the target node") +@click.option( + "-a", + "--archive", + metavar="ARCHIVE", + required=True, + help="The archive to restore. Either the file system path to a .tar or .vma file or a proxmox storage backup volume identifier.", +) +@click.option( + "-s", + "--storage", + metavar="STORAGE", + help="Target storage ID where the VM's disks will be created (defaults to the storage from the backup configuration).", +) +@click.option("--force", is_flag=True, help="Overwrite existing VM") +@task_related_command +@click.pass_context +def restore(ctx, vmid, target, archive, storage, force, follow, wait): + """Restore a VM from a backup archive""" + + proxmox = PVECluster.create_from_config(ctx.obj["args"].cluster) + + try: + upid = PVEVm.create(proxmox, vmid, target, archive=archive, storage=storage, force=force) + proxmox.refresh() + print_task(proxmox, upid, follow, wait) + except proxmoxer.core.ResourceException as e: + logging.error("Error creating VM: %s", e) + sys.exit(1) + + +# FIXME: merge with PVECluster.get_vm() def _get_vm(proxmox, vmid): for v in proxmox.vms: logging.debug("_get_vm: %s", v) diff --git a/src/pvecontrol/models/cluster.py b/src/pvecontrol/models/cluster.py index 150733e..e0b0e48 100644 --- a/src/pvecontrol/models/cluster.py +++ b/src/pvecontrol/models/cluster.py @@ -153,6 +153,7 @@ def get_vm(self, vm_id): result = None node_name = None + # FIXME: wouldn't be easier AND faster to iterate over all nodes and vms directly? for vm in self.resources_vms: if vm["vmid"] == vm_id: node_name = vm["node"] diff --git a/src/pvecontrol/models/vm.py b/src/pvecontrol/models/vm.py index 74d692b..669fe40 100644 --- a/src/pvecontrol/models/vm.py +++ b/src/pvecontrol/models/vm.py @@ -77,6 +77,12 @@ def migrate(self, target, online=False): upid = self._api.nodes(self.node).qemu(self.vmid).migrate.post(**options) return upid + @staticmethod + def create(proxmox, vmid, target, **options): + if "force" in options: + options["force"] = 1 if options["force"] else 0 + return proxmox.api.nodes(target).qemu().post(vmid=vmid, **options) + def get_backup_jobs(self, proxmox): vm_backup_jobs = [] for backup_job in proxmox.backup_jobs: