diff --git a/cmd/prefilter-manual/main.go b/cmd/prefilter-manual/main.go
new file mode 100644
index 0000000..6f0d1a8
--- /dev/null
+++ b/cmd/prefilter-manual/main.go
@@ -0,0 +1,59 @@
+package main
+
+import (
+ "context"
+ "flag"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/tis24dev/proxsave/internal/backup"
+ "github.com/tis24dev/proxsave/internal/logging"
+ "github.com/tis24dev/proxsave/internal/types"
+)
+
+func parseLogLevel(raw string) types.LogLevel {
+ switch strings.ToLower(strings.TrimSpace(raw)) {
+ case "debug":
+ return types.LogLevelDebug
+ case "info", "":
+ return types.LogLevelInfo
+ case "warning", "warn":
+ return types.LogLevelWarning
+ case "error":
+ return types.LogLevelError
+ default:
+ return types.LogLevelInfo
+ }
+}
+
+func main() {
+ var (
+ root string
+ maxSize int64
+ levelLabel string
+ )
+
+ flag.StringVar(&root, "root", "/tmp/test_prefilter", "Root directory to run prefilter on")
+ flag.Int64Var(&maxSize, "max-size", 8*1024*1024, "Max file size (bytes) to prefilter")
+ flag.StringVar(&levelLabel, "log-level", "info", "Log level: debug|info|warn|error")
+ flag.Parse()
+
+ root = filepath.Clean(strings.TrimSpace(root))
+ if root == "" || root == "." {
+ root = string(os.PathSeparator)
+ }
+
+ logger := logging.New(parseLogLevel(levelLabel), false)
+ logger.SetOutput(os.Stdout)
+
+ cfg := backup.OptimizationConfig{
+ EnablePrefilter: true,
+ PrefilterMaxFileSizeBytes: maxSize,
+ }
+
+ if err := backup.ApplyOptimizations(context.Background(), logger, root, cfg); err != nil {
+ logger.Error("Prefilter failed: %v", err)
+ os.Exit(1)
+ }
+}
diff --git a/docs/BACKUP_ENV_MAPPING.md b/docs/BACKUP_ENV_MAPPING.md
index 9d1057e..db10996 100644
--- a/docs/BACKUP_ENV_MAPPING.md
+++ b/docs/BACKUP_ENV_MAPPING.md
@@ -88,12 +88,15 @@ WEBHOOK_TIMEOUT = SAME
## Go-only variables (new)
SYSTEM_ROOT_PREFIX = NEW (Go-only) → Override system root for collection (testing/chroot). Empty or "/" uses the real root.
+NOTE: PBS restore behavior is selected interactively during `--restore` and is intentionally not configured via `backup.env`.
BACKUP_PBS_S3_ENDPOINTS = NEW (Go-only) → Collect `s3.cfg` and S3 endpoint snapshots (PBS).
BACKUP_PBS_NODE_CONFIG = NEW (Go-only) → Collect `node.cfg` and node snapshots (PBS).
BACKUP_PBS_ACME_ACCOUNTS = NEW (Go-only) → Collect `acme/accounts.cfg` and ACME account snapshots (PBS).
BACKUP_PBS_ACME_PLUGINS = NEW (Go-only) → Collect `acme/plugins.cfg` and ACME plugin snapshots (PBS).
BACKUP_PBS_METRIC_SERVERS = NEW (Go-only) → Collect `metricserver.cfg` (PBS).
BACKUP_PBS_TRAFFIC_CONTROL = NEW (Go-only) → Collect `traffic-control.cfg` and traffic-control snapshots (PBS).
+BACKUP_PBS_NOTIFICATIONS = NEW (Go-only) → Collect `notifications.cfg` and notification snapshots (PBS).
+BACKUP_PBS_NOTIFICATIONS_PRIV = NEW (Go-only) → Collect `notifications-priv.cfg` (PBS notification secrets/credentials).
BACKUP_PBS_NETWORK_CONFIG = NEW (Go-only) → Collect `network.cfg` and network snapshots (PBS), independent from BACKUP_NETWORK_CONFIGS (system).
## Renamed variables / Supported aliases in Go
diff --git a/docs/CLI_REFERENCE.md b/docs/CLI_REFERENCE.md
index 4aea693..eae8335 100644
--- a/docs/CLI_REFERENCE.md
+++ b/docs/CLI_REFERENCE.md
@@ -789,6 +789,9 @@ CONFIG_FILE=/etc/pbs/prod.env ./build/proxsave
# Force dry-run mode
DRY_RUN=true ./build/proxsave
+# PBS restore behavior
+# Selected interactively during `--restore` on PBS hosts (Merge vs Clean 1:1).
+
# Set debug level
DEBUG_LEVEL=extreme ./build/proxsave --log-level debug
diff --git a/docs/CONFIGURATION.md b/docs/CONFIGURATION.md
index 33deac6..6603768 100644
--- a/docs/CONFIGURATION.md
+++ b/docs/CONFIGURATION.md
@@ -6,6 +6,7 @@ Complete reference for all 200+ configuration variables in `configs/backup.env`.
- [Configuration File Location](#configuration-file-location)
- [General Settings](#general-settings)
+- [Restore (PBS)](#restore-pbs)
- [Security Settings](#security-settings)
- [Disk Space](#disk-space)
- [Storage Paths](#storage-paths)
@@ -74,6 +75,25 @@ PROFILING_ENABLED=true # true | false (profiles written under LOG_PA
---
+## Restore (PBS)
+
+PBS restore behavior is chosen **interactively at restore time** on PBS hosts (not via `backup.env`).
+
+You will be asked to choose a behavior:
+- **Merge (existing PBS)**: intended for restoring onto an already operational PBS; ProxSave applies supported PBS categories via `proxmox-backup-manager` without deleting existing objects that are not in the backup.
+- **Clean 1:1 (fresh PBS install)**: intended for restoring onto a new, clean PBS; ProxSave attempts to make supported PBS objects match the backup (may remove objects that exist on the system but are not in the backup).
+
+ProxSave applies supported PBS staged categories via API automatically (and may fall back to file-based staged apply only in **Clean 1:1** mode).
+
+**Current API coverage**:
+- Node + traffic control (`pbs_host`)
+- Datastores + S3 endpoints (`datastore_pbs`)
+- Remotes (`pbs_remotes`)
+- Jobs (sync/verify/prune) (`pbs_jobs`)
+- Notifications endpoints/matchers (`pbs_notifications`)
+
+---
+
## Security Settings
```bash
@@ -321,7 +341,29 @@ PREFILTER_MAX_FILE_SIZE_MB=8 # Skip prefilter for files >8MB
- **Smart chunking**: Splits large files for parallel processing
- **Deduplication**: Detects duplicate data blocks (reduces storage)
-- **Prefilter**: Analyzes small files before compression (optimizes algorithm selection)
+- **Prefilter**: Applies safe, semantic-preserving normalization to small text/JSON files to improve compression (e.g. removes CR from CRLF line endings and minifies JSON). It does **not** reorder, de-indent, or strip structured configuration files, and it avoids touching Proxmox/PBS structured config paths (e.g. `etc/pve/**`, `etc/proxmox-backup/**`).
+
+### Prefilter (`ENABLE_PREFILTER`) — details and risks
+
+**What it does** (on the *staged* backup tree, before compression):
+- Removes `\r` from CRLF text files (`.txt`, `.log`, `.md`, `.conf`, `.cfg`, `.ini`) to normalize line endings
+- Minifies JSON (`.json`) while keeping valid JSON semantics
+
+**What it does not do**:
+- It does **not** reorder lines, remove indentation, or otherwise rewrite whitespace/ordering-sensitive structured configs.
+- It does **not** follow symlinks (symlinks are skipped).
+- It skips Proxmox/PBS structured configuration paths where formatting/order matters, such as:
+ - `etc/pve/**`
+ - `etc/proxmox-backup/**`
+ - `etc/systemd/system/**`
+ - `etc/ssh/**`
+ - `etc/pam.d/**`
+
+**Why you might disable it** (even though it's safe):
+- If you need maximum fidelity (bit-for-bit) of text/JSON formatting as originally collected (CRLF preservation, JSON pretty-printing, etc.)
+- If you prefer the most conservative pipeline possible (forensics/compliance)
+
+**Important**: Prefilter never edits files on the host system — it only operates on the temporary staging directory that will be archived.
---
@@ -938,6 +980,24 @@ BACKUP_VM_CONFIGS=true # VM/CT config files
# PBS datastore configs
BACKUP_DATASTORE_CONFIGS=true # Datastore definitions
+# S3 endpoints (used by S3 datastores)
+BACKUP_PBS_S3_ENDPOINTS=true # s3.cfg (S3 endpoints, used by S3 datastores)
+
+# Node/global config
+BACKUP_PBS_NODE_CONFIG=true # node.cfg (global PBS settings)
+
+# ACME
+BACKUP_PBS_ACME_ACCOUNTS=true # acme/accounts.cfg
+BACKUP_PBS_ACME_PLUGINS=true # acme/plugins.cfg
+
+# Integrations
+BACKUP_PBS_METRIC_SERVERS=true # metricserver.cfg
+BACKUP_PBS_TRAFFIC_CONTROL=true # traffic-control.cfg
+
+# Notifications
+BACKUP_PBS_NOTIFICATIONS=true # notifications.cfg (targets/matchers/endpoints)
+BACKUP_PBS_NOTIFICATIONS_PRIV=true # notifications-priv.cfg (secrets/credentials for endpoints)
+
# User and permissions
BACKUP_USER_CONFIGS=true # PBS users and tokens
@@ -953,6 +1013,9 @@ BACKUP_VERIFICATION_JOBS=true # Backup verification schedules
# Tape backup
BACKUP_TAPE_CONFIGS=true # Tape library configuration
+# Network configuration (PBS)
+BACKUP_PBS_NETWORK_CONFIG=true # network.cfg (PBS), independent from BACKUP_NETWORK_CONFIGS (system)
+
# Prune schedules
BACKUP_PRUNE_SCHEDULES=true # Retention prune schedules
diff --git a/docs/RESTORE_DIAGRAMS.md b/docs/RESTORE_DIAGRAMS.md
index 5007fb8..3dca90a 100644
--- a/docs/RESTORE_DIAGRAMS.md
+++ b/docs/RESTORE_DIAGRAMS.md
@@ -138,6 +138,8 @@ flowchart TD
style CheckboxMenu fill:#87CEEB
```
+**Note (PBS)**: ProxSave applies supported PBS staged categories via `proxmox-backup-manager` by default. In **Clean 1:1** mode it may fall back to writing staged `*.cfg` files back to `/etc/proxmox-backup` when API apply is unavailable or fails.
+
---
## Service Management Flow
diff --git a/docs/RESTORE_GUIDE.md b/docs/RESTORE_GUIDE.md
index bbc30aa..bac43c2 100644
--- a/docs/RESTORE_GUIDE.md
+++ b/docs/RESTORE_GUIDE.md
@@ -86,7 +86,7 @@ Restore operations are organized into **20–22 categories** (PBS = 20, PVE = 22
Each category is handled in one of three ways:
- **Normal**: extracted directly to `/` (system paths) after safety backup
-- **Staged**: extracted to `/tmp/proxsave/restore-stage-*` and then applied in a controlled way (file copy/validation or `pvesh`); when staged files are written to system paths, ProxSave applies them **atomically** and enforces the final permissions/ownership (including for any created parent directories; not left to `umask`)
+- **Staged**: extracted to `/tmp/proxsave/restore-stage-*` and then applied in a controlled way (file copy/validation or API apply: `pvesh`/`pveum` on PVE, `proxmox-backup-manager` on PBS); when staged files are written to system paths, ProxSave applies them **atomically** and enforces the final permissions/ownership (including for any created parent directories; not left to `umask`)
- **Export-only**: extracted to an export directory for manual review (never written to system paths)
### PVE-Specific Categories (11 categories)
@@ -107,17 +107,23 @@ Each category is handled in one of three ways:
### PBS-Specific Categories (9 categories)
+**PBS staged apply behavior**: During restore on PBS, ProxSave prompts you to choose how to reconcile PBS objects:
+- **Merge (existing PBS)**: intended for restoring onto an already operational PBS; applies supported PBS categories via `proxmox-backup-manager` without deleting existing objects that are not in the backup.
+- **Clean 1:1 (fresh PBS install)**: intended for restoring onto a new, clean PBS; attempts to make supported PBS objects match the backup (may remove objects not in the backup).
+
+API apply is automatic for supported PBS staged categories; ProxSave may fall back to file-based staged apply only in **Clean 1:1** mode.
+
| Category | Name | Description | Paths |
|----------|------|-------------|-------|
| `pbs_config` | PBS Config Export | **Export-only** copy of /etc/proxmox-backup (never written to system) | `./etc/proxmox-backup/` |
-| `pbs_host` | PBS Host & Integrations | **Staged** node settings, ACME, proxy, metric servers and traffic control | `./etc/proxmox-backup/node.cfg`
`./etc/proxmox-backup/proxy.cfg`
`./etc/proxmox-backup/acme/accounts.cfg`
`./etc/proxmox-backup/acme/plugins.cfg`
`./etc/proxmox-backup/metricserver.cfg`
`./etc/proxmox-backup/traffic-control.cfg` |
-| `datastore_pbs` | PBS Datastore Configuration | **Staged** datastore definitions (incl. S3 endpoints) | `./etc/proxmox-backup/datastore.cfg`
`./etc/proxmox-backup/s3.cfg` |
+| `pbs_host` | PBS Host & Integrations | **Staged** node settings, ACME, proxy, metric servers and traffic control (API/file apply) | `./etc/proxmox-backup/node.cfg`
`./etc/proxmox-backup/proxy.cfg`
`./etc/proxmox-backup/acme/accounts.cfg`
`./etc/proxmox-backup/acme/plugins.cfg`
`./etc/proxmox-backup/metricserver.cfg`
`./etc/proxmox-backup/traffic-control.cfg`
`./var/lib/proxsave-info/commands/pbs/node_config.json`
`./var/lib/proxsave-info/commands/pbs/acme_accounts.json`
`./var/lib/proxsave-info/commands/pbs/acme_plugins.json`
`./var/lib/proxsave-info/commands/pbs/acme_account_*_info.json`
`./var/lib/proxsave-info/commands/pbs/acme_plugin_*_config.json`
`./var/lib/proxsave-info/commands/pbs/traffic_control.json` |
+| `datastore_pbs` | PBS Datastore Configuration | **Staged** datastore definitions (incl. S3 endpoints) (API/file apply) | `./etc/proxmox-backup/datastore.cfg`
`./etc/proxmox-backup/s3.cfg`
`./var/lib/proxsave-info/commands/pbs/datastore_list.json`
`./var/lib/proxsave-info/commands/pbs/datastore_*_status.json`
`./var/lib/proxsave-info/commands/pbs/s3_endpoints.json`
`./var/lib/proxsave-info/commands/pbs/s3_endpoint_*_buckets.json`
`./var/lib/proxsave-info/commands/pbs/pbs_datastore_inventory.json` |
| `maintenance_pbs` | PBS Maintenance | Maintenance settings | `./etc/proxmox-backup/maintenance.cfg` |
-| `pbs_jobs` | PBS Jobs | **Staged** sync/verify/prune jobs | `./etc/proxmox-backup/sync.cfg`
`./etc/proxmox-backup/verification.cfg`
`./etc/proxmox-backup/prune.cfg` |
-| `pbs_remotes` | PBS Remotes | **Staged** remotes for sync/verify (may include credentials) | `./etc/proxmox-backup/remote.cfg` |
-| `pbs_notifications` | PBS Notifications | **Staged** notification targets and matchers | `./etc/proxmox-backup/notifications.cfg`
`./etc/proxmox-backup/notifications-priv.cfg` |
-| `pbs_access_control` | PBS Access Control | **Staged** access control + secrets restored 1:1 (root@pam safety rail) | `./etc/proxmox-backup/user.cfg`
`./etc/proxmox-backup/domains.cfg`
`./etc/proxmox-backup/acl.cfg`
`./etc/proxmox-backup/token.cfg`
`./etc/proxmox-backup/shadow.json`
`./etc/proxmox-backup/token.shadow`
`./etc/proxmox-backup/tfa.json` |
-| `pbs_tape` | PBS Tape Backup | **Staged** tape config, jobs and encryption keys | `./etc/proxmox-backup/tape.cfg`
`./etc/proxmox-backup/tape-job.cfg`
`./etc/proxmox-backup/media-pool.cfg`
`./etc/proxmox-backup/tape-encryption-keys.json` |
+| `pbs_jobs` | PBS Jobs | **Staged** sync/verify/prune jobs (API/file apply) | `./etc/proxmox-backup/sync.cfg`
`./etc/proxmox-backup/verification.cfg`
`./etc/proxmox-backup/prune.cfg`
`./var/lib/proxsave-info/commands/pbs/sync_jobs.json`
`./var/lib/proxsave-info/commands/pbs/verification_jobs.json`
`./var/lib/proxsave-info/commands/pbs/prune_jobs.json`
`./var/lib/proxsave-info/commands/pbs/gc_jobs.json` |
+| `pbs_remotes` | PBS Remotes | **Staged** remotes for sync/verify (may include credentials) (API/file apply) | `./etc/proxmox-backup/remote.cfg`
`./var/lib/proxsave-info/commands/pbs/remote_list.json` |
+| `pbs_notifications` | PBS Notifications | **Staged** notification targets and matchers (API/file apply) | `./etc/proxmox-backup/notifications.cfg`
`./etc/proxmox-backup/notifications-priv.cfg`
`./var/lib/proxsave-info/commands/pbs/notification_targets.json`
`./var/lib/proxsave-info/commands/pbs/notification_matchers.json`
`./var/lib/proxsave-info/commands/pbs/notification_endpoints_*.json` |
+| `pbs_access_control` | PBS Access Control | **Staged** access control + secrets restored 1:1 (root@pam safety rail) | `./etc/proxmox-backup/user.cfg`
`./etc/proxmox-backup/domains.cfg`
`./etc/proxmox-backup/acl.cfg`
`./etc/proxmox-backup/token.cfg`
`./etc/proxmox-backup/shadow.json`
`./etc/proxmox-backup/token.shadow`
`./etc/proxmox-backup/tfa.json`
`./var/lib/proxsave-info/commands/pbs/user_list.json`
`./var/lib/proxsave-info/commands/pbs/realms_ldap.json`
`./var/lib/proxsave-info/commands/pbs/realms_ad.json`
`./var/lib/proxsave-info/commands/pbs/realms_openid.json`
`./var/lib/proxsave-info/commands/pbs/acl_list.json` |
+| `pbs_tape` | PBS Tape Backup | **Staged** tape config, jobs and encryption keys | `./etc/proxmox-backup/tape.cfg`
`./etc/proxmox-backup/tape-job.cfg`
`./etc/proxmox-backup/media-pool.cfg`
`./etc/proxmox-backup/tape-encryption-keys.json`
`./var/lib/proxsave-info/commands/pbs/tape_drives.json`
`./var/lib/proxsave-info/commands/pbs/tape_changers.json`
`./var/lib/proxsave-info/commands/pbs/tape_pools.json` |
### Common Categories (11 categories)
@@ -225,10 +231,10 @@ Select restore mode:
- `zfs` - ZFS configuration
**PBS Categories**:
-- `datastore_pbs` - Datastore definitions (staged apply)
+- `datastore_pbs` - Datastore definitions (staged apply; API preferred, file fallback in Clean 1:1)
- `maintenance_pbs` - Maintenance settings
-- `pbs_jobs` - Sync/verify/prune jobs (staged apply)
-- `pbs_remotes` - Remotes for sync jobs (staged apply)
+- `pbs_jobs` - Sync/verify/prune jobs (staged apply; API preferred, file fallback in Clean 1:1)
+- `pbs_remotes` - Remotes for sync jobs (staged apply; API preferred, file fallback in Clean 1:1)
- `filesystem` - /etc/fstab
- `storage_stack` - Storage stack config (mount prerequisites)
- `zfs` - ZFS configuration
@@ -2371,6 +2377,28 @@ systemctl restart proxmox-backup proxmox-backup-proxy
---
+**Issue: "Bad Request (400) parsing /etc/proxmox-backup/datastore.cfg ... duplicate property 'gc-schedule'"**
+
+**Cause**: `datastore.cfg` is malformed (multiple datastore definitions merged into a single block). This typically happens if the file lost its structure (header/order/indentation), leading PBS to interpret keys like `gc-schedule`, `notification-mode`, or `path` as duplicated **within the same datastore**.
+
+**Restore behavior**:
+- ProxSave detects this condition during staged apply.
+- If `var/lib/proxsave-info/commands/pbs/pbs_datastore_inventory.json` is available in the backup, ProxSave will use its embedded snapshot of the original `datastore.cfg` to recover a valid configuration.
+- If recovery is not possible, ProxSave will **leave the existing** `/etc/proxmox-backup/datastore.cfg` unchanged to avoid breaking PBS.
+
+**Manual diagnosis**:
+```bash
+nl -ba /etc/proxmox-backup/datastore.cfg | sed -n '1,120p'
+
+# Look for duplicate keys inside the same datastore block:
+awk '
+/^datastore: /{ds=$2; delete seen}
+/^[[:space:]]*[A-Za-z0-9-]+[[:space:]]+/{key=$1; if(seen[key]++) printf "DUP datastore=%s key=%s line=%d: %s\n", ds, key, NR, $0}
+' /etc/proxmox-backup/datastore.cfg
+```
+
+---
+
**Issue: "unable to read prune/verification job config ... syntax error (expected header)"**
**Cause**: PBS job config files (`/etc/proxmox-backup/prune.cfg`, `/etc/proxmox-backup/verification.cfg`) are empty or malformed. PBS expects a section header at the first non-comment line; an empty file can trigger parse errors.
@@ -2671,7 +2699,7 @@ tar -xzf /path/to/decrypted.tar.gz ./specific/file/path
A: Yes:
- **Extraction**: ProxSave preserves UID/GID, mode bits and timestamps (mtime/atime) for extracted entries.
-- **Staged categories**: files are extracted under `/tmp/proxsave/restore-stage-*` and then applied to system paths using atomic replace; ProxSave explicitly applies mode bits (not left to `umask`) and preserves/derives ownership/group to match expected system defaults (important on PBS, where `proxmox-backup-proxy` runs as `backup`; ProxSave also repairs common `root:root` group regressions by inheriting the destination parent directory's group).
+- **Staged categories**: files are extracted under `/tmp/proxsave/restore-stage-*` and then applied to system paths using atomic replace; ProxSave explicitly applies mode bits (not left to `umask`) and preserves/derives ownership/group to match expected system defaults (important on PBS, where `proxmox-backup-proxy` runs as `backup`; ProxSave also repairs common `root:root` group regressions by inheriting the destination parent directory's group). On supported filesystems, staged writes also `fsync()` the temporary file and the destination directory to reduce the risk of incomplete writes after a crash/power loss.
- **ctime**: Cannot be set (kernel-managed).
---
diff --git a/docs/RESTORE_TECHNICAL.md b/docs/RESTORE_TECHNICAL.md
index c7da91b..2bd5c02 100644
--- a/docs/RESTORE_TECHNICAL.md
+++ b/docs/RESTORE_TECHNICAL.md
@@ -869,6 +869,31 @@ func extractSelectiveArchive(
---
+#### Phase 10: Staged Apply (PVE/PBS)
+
+After extraction, **staged categories** are applied from the staging directory under `/tmp/proxsave/restore-stage-*`.
+
+**PBS staged apply**:
+- Selected interactively during restore on PBS hosts: **Merge (existing PBS)** vs **Clean 1:1 (fresh PBS install)**.
+- ProxSave applies supported PBS categories via `proxmox-backup-manager`.
+ - **Merge**: create/update only (no deletions of existing objects not in the backup).
+ - **Clean 1:1**: attempts 1:1 reconciliation (may remove objects not present in the backup).
+- If API apply is unavailable or fails, ProxSave may fall back to applying staged `*.cfg` files back to `/etc/proxmox-backup` (**Clean 1:1 only**).
+
+**Current PBS API coverage**:
+- `pbs_host`: node + traffic control
+- `datastore_pbs`: datastores + S3 endpoints
+- `pbs_remotes`: remotes
+- `pbs_jobs`: sync/verify/prune jobs
+- `pbs_notifications`: notification endpoints/matchers
+
+Other PBS categories remain file-based (e.g. access control, tape, proxy/ACME/metricserver).
+
+**Key code paths**:
+- `internal/orchestrator/pbs_staged_apply.go` (`maybeApplyPBSConfigsFromStage`)
+- `internal/orchestrator/restore_notifications.go` (`maybeApplyNotificationsFromStage`, `pbs_notifications`)
+- `internal/orchestrator/pbs_api_apply.go` / `internal/orchestrator/pbs_notifications_api_apply.go` (API apply engines)
+
## Category System
### Category Definition Structure
@@ -1072,6 +1097,8 @@ func shouldStopPBSServices(categories []Category) bool {
}
```
+**API apply note**: When ProxSave applies PBS staged categories via API (`proxmox-backup-manager`), it may start PBS services again during the **staged apply** phase (even if services were stopped earlier for safe file extraction).
+
### Error Handling Philosophy
**Stop Phase**: **FAIL-FAST**
diff --git a/internal/backup/collector.go b/internal/backup/collector.go
index 8fb0d5a..676a9f8 100644
--- a/internal/backup/collector.go
+++ b/internal/backup/collector.go
@@ -162,6 +162,8 @@ type CollectorConfig struct {
BackupPBSAcmePlugins bool
BackupPBSMetricServers bool
BackupPBSTrafficControl bool
+ BackupPBSNotifications bool
+ BackupPBSNotificationsPriv bool
BackupUserConfigs bool
BackupRemoteConfigs bool
BackupSyncJobs bool
@@ -251,7 +253,7 @@ func (c *CollectorConfig) Validate() error {
c.BackupPVEBackupFiles || c.BackupCephConfig ||
c.BackupDatastoreConfigs || c.BackupPBSS3Endpoints || c.BackupPBSNodeConfig ||
c.BackupPBSAcmeAccounts || c.BackupPBSAcmePlugins || c.BackupPBSMetricServers ||
- c.BackupPBSTrafficControl || c.BackupUserConfigs || c.BackupRemoteConfigs ||
+ c.BackupPBSTrafficControl || c.BackupPBSNotifications || c.BackupUserConfigs || c.BackupRemoteConfigs ||
c.BackupSyncJobs || c.BackupVerificationJobs || c.BackupTapeConfigs ||
c.BackupPBSNetworkConfig || c.BackupPruneSchedules || c.BackupPxarFiles ||
c.BackupNetworkConfigs || c.BackupAptSources || c.BackupCronJobs ||
@@ -334,15 +336,17 @@ func GetDefaultCollectorConfig() *CollectorConfig {
BackupDatastoreConfigs: true,
BackupPBSS3Endpoints: true,
BackupPBSNodeConfig: true,
- BackupPBSAcmeAccounts: true,
- BackupPBSAcmePlugins: true,
- BackupPBSMetricServers: true,
- BackupPBSTrafficControl: true,
- BackupUserConfigs: true,
- BackupRemoteConfigs: true,
- BackupSyncJobs: true,
- BackupVerificationJobs: true,
- BackupTapeConfigs: true,
+ BackupPBSAcmeAccounts: true,
+ BackupPBSAcmePlugins: true,
+ BackupPBSMetricServers: true,
+ BackupPBSTrafficControl: true,
+ BackupPBSNotifications: true,
+ BackupPBSNotificationsPriv: true,
+ BackupUserConfigs: true,
+ BackupRemoteConfigs: true,
+ BackupSyncJobs: true,
+ BackupVerificationJobs: true,
+ BackupTapeConfigs: true,
BackupPBSNetworkConfig: true,
BackupPruneSchedules: true,
BackupPxarFiles: true,
diff --git a/internal/backup/collector_collectall_test.go b/internal/backup/collector_collectall_test.go
index aca9062..1bac94e 100644
--- a/internal/backup/collector_collectall_test.go
+++ b/internal/backup/collector_collectall_test.go
@@ -59,9 +59,9 @@ func TestCollectorCollectAll_PVEBranchWrapsCollectionError(t *testing.T) {
collector := NewCollector(logger, cfg, t.TempDir(), types.ProxmoxVE, false)
err := collector.CollectAll(context.Background())
if err == nil {
- t.Fatalf("expected error, got %v", err)
+ t.Fatalf("expected error, got nil")
}
- if err == nil || !strings.Contains(err.Error(), "PVE collection failed:") {
+ if !strings.Contains(err.Error(), "PVE collection failed:") {
t.Fatalf("expected wrapped PVE collection error, got %v", err)
}
}
diff --git a/internal/backup/collector_pbs.go b/internal/backup/collector_pbs.go
index 6759b00..98212e9 100644
--- a/internal/backup/collector_pbs.go
+++ b/internal/backup/collector_pbs.go
@@ -214,6 +214,11 @@ func (c *Collector) collectPBSDirectories(ctx context.Context, root string) erro
if !c.config.BackupPBSTrafficControl {
extraExclude = append(extraExclude, "traffic-control.cfg")
}
+ if !c.config.BackupPBSNotifications {
+ extraExclude = append(extraExclude, "notifications.cfg", "notifications-priv.cfg")
+ } else if !c.config.BackupPBSNotificationsPriv {
+ extraExclude = append(extraExclude, "notifications-priv.cfg")
+ }
if !c.config.BackupUserConfigs {
// User-related configs are intentionally excluded together.
extraExclude = append(extraExclude, "user.cfg", "acl.cfg", "domains.cfg")
@@ -281,6 +286,17 @@ func (c *Collector) collectPBSDirectories(ctx context.Context, root string) erro
c.pbsManifest["traffic-control.cfg"] = c.collectPBSConfigFile(ctx, root, "traffic-control.cfg",
"Traffic control rules", c.config.BackupPBSTrafficControl, "BACKUP_PBS_TRAFFIC_CONTROL")
+ // Notifications (targets/endpoints + matcher routing; secrets are stored in notifications-priv.cfg)
+ c.pbsManifest["notifications.cfg"] = c.collectPBSConfigFile(ctx, root, "notifications.cfg",
+ "Notifications configuration", c.config.BackupPBSNotifications, "BACKUP_PBS_NOTIFICATIONS")
+ privEnabled := c.config.BackupPBSNotifications && c.config.BackupPBSNotificationsPriv
+ privDisableHint := "BACKUP_PBS_NOTIFICATIONS_PRIV"
+ if !c.config.BackupPBSNotifications {
+ privDisableHint = "BACKUP_PBS_NOTIFICATIONS"
+ }
+ c.pbsManifest["notifications-priv.cfg"] = c.collectPBSConfigFile(ctx, root, "notifications-priv.cfg",
+ "Notifications secrets", privEnabled, privDisableHint)
+
// User configuration
c.pbsManifest["user.cfg"] = c.collectPBSConfigFile(ctx, root, "user.cfg",
"User configuration", c.config.BackupUserConfigs, "BACKUP_USER_CONFIGS")
@@ -381,7 +397,10 @@ func (c *Collector) collectPBSCommands(ctx context.Context, datastores []pbsData
}
// Notifications (targets, matchers, endpoints)
- c.collectPBSNotificationSnapshots(ctx, commandsDir)
+ if c.config.BackupPBSNotifications {
+ c.collectPBSNotificationSnapshots(ctx, commandsDir)
+ c.writePBSNotificationSummary(commandsDir)
+ }
// User list
if c.config.BackupUserConfigs {
diff --git a/internal/backup/collector_pbs_commands_coverage_test.go b/internal/backup/collector_pbs_commands_coverage_test.go
index 9cac09a..28bb64c 100644
--- a/internal/backup/collector_pbs_commands_coverage_test.go
+++ b/internal/backup/collector_pbs_commands_coverage_test.go
@@ -50,17 +50,18 @@ func TestCollectPBSCommandsWritesExpectedOutputs(t *testing.T) {
"datastore_store1_status.json",
"acme_accounts.json",
"acme_plugins.json",
- "notification_targets.json",
- "notification_matchers.json",
- "notification_endpoints_smtp.json",
- "notification_endpoints_sendmail.json",
- "notification_endpoints_gotify.json",
- "notification_endpoints_webhook.json",
- "user_list.json",
- "realms_ldap.json",
- "realms_ad.json",
- "realms_openid.json",
- "acl_list.json",
+ "notification_targets.json",
+ "notification_matchers.json",
+ "notification_endpoints_smtp.json",
+ "notification_endpoints_sendmail.json",
+ "notification_endpoints_gotify.json",
+ "notification_endpoints_webhook.json",
+ "notifications_summary.json",
+ "user_list.json",
+ "realms_ldap.json",
+ "realms_ad.json",
+ "realms_openid.json",
+ "acl_list.json",
"remote_list.json",
"sync_jobs.json",
"verification_jobs.json",
diff --git a/internal/backup/collector_pbs_notifications_summary.go b/internal/backup/collector_pbs_notifications_summary.go
new file mode 100644
index 0000000..180e163
--- /dev/null
+++ b/internal/backup/collector_pbs_notifications_summary.go
@@ -0,0 +1,220 @@
+package backup
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+)
+
+type pbsNotificationSnapshotSummary struct {
+ Present bool `json:"present"`
+ Bytes int64 `json:"bytes,omitempty"`
+ Total int `json:"total,omitempty"`
+ BuiltIn int `json:"built_in,omitempty"`
+ Custom int `json:"custom,omitempty"`
+ Names []string `json:"names,omitempty"`
+ Error string `json:"error,omitempty"`
+}
+
+type pbsNotificationsConfigFilesSummary struct {
+ NotificationsCfg ManifestEntry `json:"notifications_cfg"`
+ NotificationsPrivCfg ManifestEntry `json:"notifications_priv_cfg"`
+}
+
+type pbsNotificationsSummary struct {
+ GeneratedAt time.Time `json:"generated_at"`
+ Enabled bool `json:"enabled"`
+ PrivEnabled bool `json:"priv_enabled"`
+
+ ConfigFiles *pbsNotificationsConfigFilesSummary `json:"config_files,omitempty"`
+
+ Targets pbsNotificationSnapshotSummary `json:"targets"`
+ Matchers pbsNotificationSnapshotSummary `json:"matchers"`
+ Endpoints map[string]pbsNotificationSnapshotSummary `json:"endpoints"`
+
+ Notes []string `json:"notes,omitempty"`
+ Warnings []string `json:"warnings,omitempty"`
+}
+
+func (c *Collector) writePBSNotificationSummary(commandsDir string) {
+ if c == nil {
+ return
+ }
+
+ summary := pbsNotificationsSummary{
+ GeneratedAt: time.Now().UTC(),
+ Enabled: c.config != nil && c.config.BackupPBSNotifications,
+ PrivEnabled: c.config != nil && c.config.BackupPBSNotifications && c.config.BackupPBSNotificationsPriv,
+ Endpoints: make(map[string]pbsNotificationSnapshotSummary),
+ }
+
+ if c.pbsManifest != nil {
+ summary.ConfigFiles = &pbsNotificationsConfigFilesSummary{
+ NotificationsCfg: c.pbsManifest["notifications.cfg"],
+ NotificationsPrivCfg: c.pbsManifest["notifications-priv.cfg"],
+ }
+ }
+
+ summary.Targets = summarizePBSNotificationSnapshot(filepath.Join(commandsDir, "notification_targets.json"))
+ summary.Matchers = summarizePBSNotificationSnapshot(filepath.Join(commandsDir, "notification_matchers.json"))
+ for _, typ := range []string{"smtp", "sendmail", "gotify", "webhook"} {
+ summary.Endpoints[typ] = summarizePBSNotificationSnapshot(filepath.Join(commandsDir, fmt.Sprintf("notification_endpoints_%s.json", typ)))
+ }
+
+ if summary.ConfigFiles != nil {
+ cfg := summary.ConfigFiles.NotificationsCfg
+ priv := summary.ConfigFiles.NotificationsPrivCfg
+
+ if cfg.Status != StatusCollected && cfg.Status != StatusDisabled {
+ if summary.Targets.Total > 0 || sumEndpointTotals(summary.Endpoints) > 0 {
+ summary.Warnings = append(summary.Warnings, "Notification objects detected in snapshots, but notifications.cfg was not collected (check BACKUP_PBS_NOTIFICATIONS and exclusions).")
+ }
+ }
+
+ if priv.Status == StatusDisabled {
+ summary.Notes = append(summary.Notes, "notifications-priv.cfg backup is disabled (BACKUP_PBS_NOTIFICATIONS_PRIV=false); endpoint credentials/secrets will not be included.")
+ } else if priv.Status != StatusCollected {
+ if summary.Targets.Custom > 0 || sumEndpointCustom(summary.Endpoints) > 0 {
+ summary.Warnings = append(summary.Warnings, "Custom notification endpoints/targets detected, but notifications-priv.cfg was not collected; restore may require re-entering secrets/credentials.")
+ }
+ }
+ }
+
+ // Surface important mismatches in the console log too.
+ if c.logger != nil {
+ c.logger.Info("PBS notifications snapshot summary: targets=%d matchers=%d endpoints=%d",
+ summary.Targets.Total,
+ summary.Matchers.Total,
+ sumEndpointTotals(summary.Endpoints),
+ )
+ for _, note := range summary.Notes {
+ c.logger.Info("PBS notifications: %s", note)
+ }
+ for _, warning := range summary.Warnings {
+ c.logger.Warning("PBS notifications: %s", warning)
+ }
+ }
+
+ out, err := json.MarshalIndent(summary, "", " ")
+ if err != nil {
+ c.logger.Debug("PBS notifications summary skipped: marshal error: %v", err)
+ return
+ }
+
+ if err := c.writeReportFile(filepath.Join(commandsDir, "notifications_summary.json"), out); err != nil {
+ c.logger.Debug("PBS notifications summary write failed: %v", err)
+ }
+}
+
+func sumEndpointTotals(endpoints map[string]pbsNotificationSnapshotSummary) int {
+ total := 0
+ for _, s := range endpoints {
+ total += s.Total
+ }
+ return total
+}
+
+func sumEndpointCustom(endpoints map[string]pbsNotificationSnapshotSummary) int {
+ total := 0
+ for _, s := range endpoints {
+ total += s.Custom
+ }
+ return total
+}
+
+func summarizePBSNotificationSnapshot(path string) pbsNotificationSnapshotSummary {
+ raw, err := os.ReadFile(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return pbsNotificationSnapshotSummary{Present: false}
+ }
+ return pbsNotificationSnapshotSummary{
+ Present: false,
+ Error: err.Error(),
+ }
+ }
+
+ summary := pbsNotificationSnapshotSummary{
+ Present: true,
+ Bytes: int64(len(raw)),
+ }
+
+ trimmed := strings.TrimSpace(string(raw))
+ if trimmed == "" {
+ return summary
+ }
+
+ var payload any
+ if err := json.Unmarshal([]byte(trimmed), &payload); err != nil {
+ summary.Error = fmt.Sprintf("invalid json: %v", err)
+ return summary
+ }
+
+ // Unwrap proxmox-backup-manager JSON envelope (common shape: {"data":[...], ...}).
+ if m, ok := payload.(map[string]any); ok {
+ if data, ok := m["data"]; ok {
+ payload = data
+ }
+ }
+
+ items, ok := payload.([]any)
+ if !ok {
+ summary.Error = "unexpected json shape (expected list)"
+ return summary
+ }
+
+ summary.Total = len(items)
+
+ names := make([]string, 0, len(items))
+ for _, item := range items {
+ entry, ok := item.(map[string]any)
+ if !ok {
+ continue
+ }
+
+ name := firstString(entry, "name", "id", "target", "matcher")
+ if name != "" {
+ names = append(names, name)
+ }
+
+ origin := strings.ToLower(strings.TrimSpace(firstString(entry, "origin")))
+ switch {
+ case strings.Contains(origin, "built"):
+ summary.BuiltIn++
+ case strings.Contains(origin, "custom"):
+ summary.Custom++
+ }
+ }
+
+ sort.Strings(names)
+ if len(names) > 100 {
+ names = names[:100]
+ }
+ if len(names) > 0 {
+ summary.Names = names
+ }
+
+ return summary
+}
+
+func firstString(entry map[string]any, keys ...string) string {
+ for _, key := range keys {
+ v, ok := entry[key]
+ if !ok {
+ continue
+ }
+ s, ok := v.(string)
+ if !ok {
+ continue
+ }
+ s = strings.TrimSpace(s)
+ if s != "" {
+ return s
+ }
+ }
+ return ""
+}
diff --git a/internal/backup/collector_pve.go b/internal/backup/collector_pve.go
index 756356a..f8a6efa 100644
--- a/internal/backup/collector_pve.go
+++ b/internal/backup/collector_pve.go
@@ -1326,7 +1326,7 @@ func (pw *patternWriter) Write(path string, info os.FileInfo) error {
func (pw *patternWriter) Close() error {
var err error
if pw.writer != nil {
- if flushErr := pw.writer.Flush(); flushErr != nil && err == nil {
+ if flushErr := pw.writer.Flush(); flushErr != nil {
err = flushErr
}
}
@@ -1556,6 +1556,9 @@ func (c *Collector) copyIfExists(source, target, description string) error {
}
func (c *Collector) aggregateBackupHistory(ctx context.Context, jobsDir, target string) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
entries, err := os.ReadDir(jobsDir)
if err != nil {
return err
@@ -1563,6 +1566,9 @@ func (c *Collector) aggregateBackupHistory(ctx context.Context, jobsDir, target
var buffers []json.RawMessage
for _, entry := range entries {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
if entry.IsDir() {
continue
}
@@ -1599,6 +1605,9 @@ func (c *Collector) aggregateBackupHistory(ctx context.Context, jobsDir, target
}
func (c *Collector) aggregateReplicationStatus(ctx context.Context, replicationDir, target string) error {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
entries, err := os.ReadDir(replicationDir)
if err != nil {
return err
@@ -1606,6 +1615,9 @@ func (c *Collector) aggregateReplicationStatus(ctx context.Context, replicationD
var buffers []json.RawMessage
for _, entry := range entries {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
if entry.IsDir() {
continue
}
diff --git a/internal/backup/optimizations.go b/internal/backup/optimizations.go
index c4e8892..f31943f 100644
--- a/internal/backup/optimizations.go
+++ b/internal/backup/optimizations.go
@@ -9,7 +9,6 @@ import (
"io"
"os"
"path/filepath"
- "sort"
"strings"
"github.com/tis24dev/proxsave/internal/logging"
@@ -310,7 +309,37 @@ func prefilterFiles(ctx context.Context, logger *logging.Logger, root string, ma
}
logger.Debug("Prefiltering files under %s (max size %d bytes)", root, maxSize)
- var processed int
+ type prefilterStats struct {
+ scanned int
+ optimized int
+ skippedStructured int
+ skippedSymlink int
+ }
+ var stats prefilterStats
+
+ isStructuredConfigPath := func(path string) bool {
+ rel, err := filepath.Rel(root, path)
+ if err != nil {
+ return false
+ }
+ rel = filepath.ToSlash(filepath.Clean(rel))
+ rel = strings.TrimPrefix(rel, "./")
+ switch {
+ case strings.HasPrefix(rel, "etc/proxmox-backup/"):
+ return true
+ case strings.HasPrefix(rel, "etc/pve/"):
+ return true
+ case strings.HasPrefix(rel, "etc/ssh/"):
+ return true
+ case strings.HasPrefix(rel, "etc/pam.d/"):
+ return true
+ case strings.HasPrefix(rel, "etc/systemd/system/"):
+ return true
+ default:
+ return false
+ }
+ }
+
err := filepath.WalkDir(root, func(path string, d os.DirEntry, err error) error {
if err != nil {
return err
@@ -322,27 +351,39 @@ func prefilterFiles(ctx context.Context, logger *logging.Logger, root string, ma
return nil
}
- info, err := d.Info()
- if err != nil {
+ info, err := os.Lstat(path)
+ if err != nil || info == nil {
+ return nil
+ }
+ if info.Mode()&os.ModeSymlink != 0 {
+ stats.skippedSymlink++
+ return nil
+ }
+ if !info.Mode().IsRegular() {
return nil
}
if info.Size() == 0 || info.Size() > maxSize {
return nil
}
+ stats.scanned++
ext := strings.ToLower(filepath.Ext(path))
switch ext {
case ".txt", ".log", ".md":
- if err := normalizeTextFile(path); err == nil {
- processed++
+ if changed, err := normalizeTextFile(path); err == nil && changed {
+ stats.optimized++
}
case ".conf", ".cfg", ".ini":
- if err := normalizeConfigFile(path); err == nil {
- processed++
+ if isStructuredConfigPath(path) {
+ stats.skippedStructured++
+ return nil
+ }
+ if changed, err := normalizeConfigFile(path); err == nil && changed {
+ stats.optimized++
}
case ".json":
- if err := minifyJSON(path); err == nil {
- processed++
+ if changed, err := minifyJSON(path); err == nil && changed {
+ stats.optimized++
}
}
return nil
@@ -352,52 +393,43 @@ func prefilterFiles(ctx context.Context, logger *logging.Logger, root string, ma
return fmt.Errorf("prefilter walk failed: %w", err)
}
- logger.Info("Prefilter completed: %d files optimized", processed)
+ logger.Info("Prefilter completed: optimized=%d scanned=%d skipped_structured=%d skipped_symlink=%d", stats.optimized, stats.scanned, stats.skippedStructured, stats.skippedSymlink)
return nil
}
-func normalizeTextFile(path string) error {
+func normalizeTextFile(path string) (bool, error) {
data, err := os.ReadFile(path)
if err != nil {
- return err
+ return false, err
}
normalized := bytes.ReplaceAll(data, []byte("\r"), nil)
if bytes.Equal(data, normalized) {
- return nil
+ return false, nil
}
- return os.WriteFile(path, normalized, defaultChunkFilePerm)
+ return true, os.WriteFile(path, normalized, defaultChunkFilePerm)
}
-func normalizeConfigFile(path string) error {
- data, err := os.ReadFile(path)
- if err != nil {
- return err
- }
- lines := strings.Split(string(data), "\n")
- filtered := lines[:0]
- for _, line := range lines {
- line = strings.TrimSpace(line)
- if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") {
- continue
- }
- filtered = append(filtered, line)
- }
- sort.Strings(filtered)
- return os.WriteFile(path, []byte(strings.Join(filtered, "\n")), defaultChunkFilePerm)
+func normalizeConfigFile(path string) (bool, error) {
+ // Config files can be whitespace/ordering-sensitive (e.g. section headers).
+ // Only perform safe, semantic-preserving normalization here.
+ return normalizeTextFile(path)
}
-func minifyJSON(path string) error {
+func minifyJSON(path string) (bool, error) {
data, err := os.ReadFile(path)
if err != nil {
- return err
+ return false, err
}
var tmp any
if err := json.Unmarshal(data, &tmp); err != nil {
- return err
+ return false, err
}
minified, err := json.Marshal(tmp)
if err != nil {
- return err
+ return false, err
+ }
+ if bytes.Equal(bytes.TrimSpace(data), minified) {
+ return false, nil
}
- return os.WriteFile(path, minified, defaultChunkFilePerm)
+ return true, os.WriteFile(path, minified, defaultChunkFilePerm)
}
diff --git a/internal/backup/optimizations_helpers_test.go b/internal/backup/optimizations_helpers_test.go
index 2c8032e..7c6fffa 100644
--- a/internal/backup/optimizations_helpers_test.go
+++ b/internal/backup/optimizations_helpers_test.go
@@ -5,6 +5,7 @@ import (
"encoding/json"
"os"
"path/filepath"
+ "strings"
"testing"
)
@@ -46,8 +47,10 @@ func TestNormalizeTextFileAndConfigAndJSON(t *testing.T) {
if err := os.WriteFile(textPath, []byte("line1\r\nline2\r\n"), 0o640); err != nil {
t.Fatalf("write text: %v", err)
}
- if err := normalizeTextFile(textPath); err != nil {
+ if changed, err := normalizeTextFile(textPath); err != nil {
t.Fatalf("normalizeTextFile: %v", err)
+ } else if !changed {
+ t.Fatalf("expected text to be normalized")
}
data, _ := os.ReadFile(textPath)
if bytes.Contains(data, []byte("\r")) {
@@ -55,24 +58,31 @@ func TestNormalizeTextFileAndConfigAndJSON(t *testing.T) {
}
cfgPath := filepath.Join(tmp, "app.conf")
- cfgContent := "#comment\nz=1\n\n;ignored\na=2\n"
+ cfgContent := "#comment\r\nz=1\r\n\r\n;ignored\r\na=2\r\n"
if err := os.WriteFile(cfgPath, []byte(cfgContent), 0o640); err != nil {
t.Fatalf("write conf: %v", err)
}
- if err := normalizeConfigFile(cfgPath); err != nil {
+ if changed, err := normalizeConfigFile(cfgPath); err != nil {
t.Fatalf("normalizeConfigFile: %v", err)
+ } else if !changed {
+ t.Fatalf("expected config to be normalized")
}
cfgData, _ := os.ReadFile(cfgPath)
- if string(cfgData) != "a=2\nz=1" {
- t.Fatalf("config not normalized/sorted, got %q", cfgData)
+ if bytes.Contains(cfgData, []byte("\r")) {
+ t.Fatalf("expected CR removed from config, got %q", cfgData)
+ }
+ if string(cfgData) != strings.ReplaceAll(cfgContent, "\r", "") {
+ t.Fatalf("config contents changed unexpectedly, got %q", cfgData)
}
jsonPath := filepath.Join(tmp, "data.json")
if err := os.WriteFile(jsonPath, []byte("{\n \"a\": 1,\n \"b\": 2\n}\n"), 0o640); err != nil {
t.Fatalf("write json: %v", err)
}
- if err := minifyJSON(jsonPath); err != nil {
+ if changed, err := minifyJSON(jsonPath); err != nil {
t.Fatalf("minifyJSON: %v", err)
+ } else if !changed {
+ t.Fatalf("expected JSON to be minified")
}
jdata, _ := os.ReadFile(jsonPath)
if bytes.Contains(jdata, []byte(" ")) || bytes.Contains(jdata, []byte("\n")) {
@@ -82,7 +92,7 @@ func TestNormalizeTextFileAndConfigAndJSON(t *testing.T) {
if err := os.WriteFile(jsonPath, []byte("{invalid"), 0o640); err != nil {
t.Fatalf("write invalid json: %v", err)
}
- if err := minifyJSON(jsonPath); err == nil {
+ if _, err := minifyJSON(jsonPath); err == nil {
t.Fatalf("expected error for invalid json")
}
}
@@ -95,8 +105,10 @@ func TestMinifyJSONKeepsData(t *testing.T) {
if err := os.WriteFile(path, payload, 0o640); err != nil {
t.Fatalf("write json: %v", err)
}
- if err := minifyJSON(path); err != nil {
+ if changed, err := minifyJSON(path); err != nil {
t.Fatalf("minifyJSON: %v", err)
+ } else if !changed {
+ t.Fatalf("expected JSON to be minified")
}
roundTrip, _ := os.ReadFile(path)
var decoded map[string]int
diff --git a/internal/backup/optimizations_structured_test.go b/internal/backup/optimizations_structured_test.go
new file mode 100644
index 0000000..1a46a16
--- /dev/null
+++ b/internal/backup/optimizations_structured_test.go
@@ -0,0 +1,77 @@
+package backup
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/tis24dev/proxsave/internal/logging"
+ "github.com/tis24dev/proxsave/internal/types"
+)
+
+func TestPrefilterSkipsStructuredConfigs(t *testing.T) {
+ tmp := t.TempDir()
+
+ // Create structured config (should be skipped)
+ pbsDir := filepath.Join(tmp, "etc", "proxmox-backup")
+ if err := os.MkdirAll(pbsDir, 0o755); err != nil {
+ t.Fatalf("mkdir: %v", err)
+ }
+
+ pbsCfg := filepath.Join(pbsDir, "datastore.cfg")
+ pbsContent := "datastore: Test\n\tpath /mnt/test\n\tcomment Test DS\n"
+ if err := os.WriteFile(pbsCfg, []byte(pbsContent), 0o640); err != nil {
+ t.Fatalf("write pbs config: %v", err)
+ }
+
+ // Create normal config with CRLF (should be normalized)
+ normalCfg := filepath.Join(tmp, "etc", "normal.cfg")
+ normalContent := "option1\r\noption2\r\n"
+ if err := os.WriteFile(normalCfg, []byte(normalContent), 0o640); err != nil {
+ t.Fatalf("write normal config: %v", err)
+ }
+
+ // Create log file with CRLF (should be normalized)
+ logDir := filepath.Join(tmp, "var", "log")
+ if err := os.MkdirAll(logDir, 0o755); err != nil {
+ t.Fatalf("mkdir log: %v", err)
+ }
+ logFile := filepath.Join(logDir, "test.log")
+ logContent := "line1\r\nline2\r\n"
+ if err := os.WriteFile(logFile, []byte(logContent), 0o640); err != nil {
+ t.Fatalf("write log: %v", err)
+ }
+
+ // Run prefilter
+ logger := logging.New(types.LogLevelError, false)
+ if err := prefilterFiles(context.Background(), logger, tmp, 8*1024*1024); err != nil {
+ t.Fatalf("prefilterFiles: %v", err)
+ }
+
+ // Verify PBS config unchanged (TABs preserved)
+ pbsAfter, _ := os.ReadFile(pbsCfg)
+ if string(pbsAfter) != pbsContent {
+ t.Fatalf("PBS config was modified!\nExpected: %q\nGot: %q", pbsContent, string(pbsAfter))
+ }
+ if !strings.Contains(string(pbsAfter), "\t") {
+ t.Fatalf("PBS config lost TAB indentation")
+ }
+
+ // Verify normal config normalized (CRLF removed)
+ normalAfter, _ := os.ReadFile(normalCfg)
+ if strings.Contains(string(normalAfter), "\r") {
+ t.Fatalf("Normal config still has CRLF: %q", normalAfter)
+ }
+ expectedNormal := strings.ReplaceAll(normalContent, "\r", "")
+ if string(normalAfter) != expectedNormal {
+ t.Fatalf("Normal config not normalized correctly\nExpected: %q\nGot: %q", expectedNormal, string(normalAfter))
+ }
+
+ // Verify log normalized (CRLF removed)
+ logAfter, _ := os.ReadFile(logFile)
+ if strings.Contains(string(logAfter), "\r") {
+ t.Fatalf("Log file still has CRLF: %q", logAfter)
+ }
+}
diff --git a/internal/backup/optimizations_test.go b/internal/backup/optimizations_test.go
index b3ae733..1cd7e0c 100644
--- a/internal/backup/optimizations_test.go
+++ b/internal/backup/optimizations_test.go
@@ -40,7 +40,8 @@ func TestApplyOptimizationsRunsAllStages(t *testing.T) {
dupB := mustWriteFile(filepath.Join("dup", "two.txt"), "identical data")
logFile := mustWriteFile(filepath.Join("logs", "app.log"), "line one\r\nline two\r\n")
- confFile := mustWriteFile(filepath.Join("conf", "settings.conf"), "# comment\nkey=value\n\n;ignored\nalpha=beta\n")
+ confOriginal := "# comment\nkey=value\n\n;ignored\nalpha=beta\n"
+ confFile := mustWriteFile(filepath.Join("conf", "settings.conf"), confOriginal)
jsonFile := mustWriteFile(filepath.Join("meta", "data.json"), "{\n \"a\": 1,\n \"b\": 2\n}\n")
chunkTarget := mustWriteFile("chunk.bin", string(bytes.Repeat([]byte("x"), 96)))
@@ -75,7 +76,7 @@ func TestApplyOptimizationsRunsAllStages(t *testing.T) {
t.Fatalf("symlink data mismatch, got %q", data)
}
- // Prefilter should strip CR characters and comments/sort config files.
+ // Prefilter should strip CR characters and keep config files semantically intact.
logContents, err := os.ReadFile(logFile)
if err != nil {
t.Fatalf("read log file: %v", err)
@@ -87,7 +88,7 @@ func TestApplyOptimizationsRunsAllStages(t *testing.T) {
if err != nil {
t.Fatalf("read config file: %v", err)
}
- if string(confContents) != "alpha=beta\nkey=value" {
+ if string(confContents) != confOriginal {
t.Fatalf("unexpected config contents: %q", confContents)
}
jsonContents, err := os.ReadFile(jsonFile)
diff --git a/internal/config/config.go b/internal/config/config.go
index 8c4cf28..50094b1 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -199,30 +199,32 @@ type Config struct {
CephConfigPath string
// PBS-specific collection options
- BackupDatastoreConfigs bool
- BackupPBSS3Endpoints bool
- BackupPBSNodeConfig bool
- BackupPBSAcmeAccounts bool
- BackupPBSAcmePlugins bool
- BackupPBSMetricServers bool
- BackupPBSTrafficControl bool
- BackupUserConfigs bool
- BackupRemoteConfigs bool
- BackupSyncJobs bool
- BackupVerificationJobs bool
- BackupTapeConfigs bool
- BackupPBSNetworkConfig bool
- BackupPruneSchedules bool
- BackupPxarFiles bool
- PxarDatastoreConcurrency int
- PxarIntraConcurrency int
- PxarScanFanoutLevel int
- PxarScanMaxRoots int
- PxarStopOnCap bool
- PxarEnumWorkers int
- PxarEnumBudgetMs int
- PxarFileIncludePatterns []string
- PxarFileExcludePatterns []string
+ BackupDatastoreConfigs bool
+ BackupPBSS3Endpoints bool
+ BackupPBSNodeConfig bool
+ BackupPBSAcmeAccounts bool
+ BackupPBSAcmePlugins bool
+ BackupPBSMetricServers bool
+ BackupPBSTrafficControl bool
+ BackupPBSNotifications bool
+ BackupPBSNotificationsPriv bool
+ BackupUserConfigs bool
+ BackupRemoteConfigs bool
+ BackupSyncJobs bool
+ BackupVerificationJobs bool
+ BackupTapeConfigs bool
+ BackupPBSNetworkConfig bool
+ BackupPruneSchedules bool
+ BackupPxarFiles bool
+ PxarDatastoreConcurrency int
+ PxarIntraConcurrency int
+ PxarScanFanoutLevel int
+ PxarScanMaxRoots int
+ PxarStopOnCap bool
+ PxarEnumWorkers int
+ PxarEnumBudgetMs int
+ PxarFileIncludePatterns []string
+ PxarFileExcludePatterns []string
// System collection options
BackupNetworkConfigs bool
@@ -680,6 +682,8 @@ func (c *Config) parsePBSSettings() {
c.BackupPBSAcmePlugins = c.getBool("BACKUP_PBS_ACME_PLUGINS", true)
c.BackupPBSMetricServers = c.getBool("BACKUP_PBS_METRIC_SERVERS", true)
c.BackupPBSTrafficControl = c.getBool("BACKUP_PBS_TRAFFIC_CONTROL", true)
+ c.BackupPBSNotifications = c.getBool("BACKUP_PBS_NOTIFICATIONS", true)
+ c.BackupPBSNotificationsPriv = c.getBool("BACKUP_PBS_NOTIFICATIONS_PRIV", c.BackupPBSNotifications)
c.BackupUserConfigs = c.getBool("BACKUP_USER_CONFIGS", true)
c.BackupRemoteConfigs = c.getBoolWithFallback([]string{"BACKUP_REMOTE_CONFIGS", "BACKUP_REMOTE_CFG"}, true)
c.BackupSyncJobs = c.getBool("BACKUP_SYNC_JOBS", true)
diff --git a/internal/config/templates/backup.env b/internal/config/templates/backup.env
index 7a7cd71..01244a2 100644
--- a/internal/config/templates/backup.env
+++ b/internal/config/templates/backup.env
@@ -296,6 +296,8 @@ BACKUP_PBS_ACME_ACCOUNTS=true # acme/accounts.cfg
BACKUP_PBS_ACME_PLUGINS=true # acme/plugins.cfg
BACKUP_PBS_METRIC_SERVERS=true # metricserver.cfg
BACKUP_PBS_TRAFFIC_CONTROL=true # traffic-control.cfg
+BACKUP_PBS_NOTIFICATIONS=true # notifications.cfg (targets/matchers/endpoints)
+BACKUP_PBS_NOTIFICATIONS_PRIV=true # notifications-priv.cfg (secrets/credentials for endpoints)
BACKUP_USER_CONFIGS=true
BACKUP_REMOTE_CONFIGS=true
BACKUP_SYNC_JOBS=true
diff --git a/internal/config/upgrade.go b/internal/config/upgrade.go
index 755f12c..d319a4a 100644
--- a/internal/config/upgrade.go
+++ b/internal/config/upgrade.go
@@ -558,17 +558,3 @@ func findClosingQuoteLine(lines []string, start int) (int, error) {
}
return 0, fmt.Errorf("closing quote not found")
}
-
-func renderEnvValue(key string, value envValue) []string {
- if value.kind == envValueKindBlock {
- lines := []string{fmt.Sprintf("%s=\"", key)}
- lines = append(lines, value.blockLines...)
- lines = append(lines, "\"")
- return lines
- }
- line := fmt.Sprintf("%s=%s", key, value.rawValue)
- if value.comment != "" {
- line += " " + value.comment
- }
- return []string{line}
-}
diff --git a/internal/orchestrator/bundle_test.go b/internal/orchestrator/bundle_test.go
index 6a9305c..9462b4b 100644
--- a/internal/orchestrator/bundle_test.go
+++ b/internal/orchestrator/bundle_test.go
@@ -90,11 +90,12 @@ func TestCreateBundle_CreatesValidTarArchive(t *testing.T) {
}
expectedContent := testData[""]
- if header.Name == "backup.tar.sha256" {
+ switch header.Name {
+ case "backup.tar.sha256":
expectedContent = testData[".sha256"]
- } else if header.Name == "backup.tar.metadata" {
+ case "backup.tar.metadata":
expectedContent = testData[".metadata"]
- } else if header.Name == "backup.tar.metadata.sha256" {
+ case "backup.tar.metadata.sha256":
expectedContent = testData[".metadata.sha256"]
}
diff --git a/internal/orchestrator/categories.go b/internal/orchestrator/categories.go
index 053bb83..56483d8 100644
--- a/internal/orchestrator/categories.go
+++ b/internal/orchestrator/categories.go
@@ -178,6 +178,12 @@ func GetAllCategories() []Category {
"./etc/proxmox-backup/acme/plugins.cfg",
"./etc/proxmox-backup/metricserver.cfg",
"./etc/proxmox-backup/traffic-control.cfg",
+ "./var/lib/proxsave-info/commands/pbs/node_config.json",
+ "./var/lib/proxsave-info/commands/pbs/acme_accounts.json",
+ "./var/lib/proxsave-info/commands/pbs/acme_plugins.json",
+ "./var/lib/proxsave-info/commands/pbs/acme_account_*_info.json",
+ "./var/lib/proxsave-info/commands/pbs/acme_plugin_*_config.json",
+ "./var/lib/proxsave-info/commands/pbs/traffic_control.json",
},
},
{
@@ -188,6 +194,11 @@ func GetAllCategories() []Category {
Paths: []string{
"./etc/proxmox-backup/datastore.cfg",
"./etc/proxmox-backup/s3.cfg",
+ "./var/lib/proxsave-info/commands/pbs/datastore_list.json",
+ "./var/lib/proxsave-info/commands/pbs/datastore_*_status.json",
+ "./var/lib/proxsave-info/commands/pbs/s3_endpoints.json",
+ "./var/lib/proxsave-info/commands/pbs/s3_endpoint_*_buckets.json",
+ "./var/lib/proxsave-info/commands/pbs/pbs_datastore_inventory.json",
},
},
{
@@ -208,6 +219,10 @@ func GetAllCategories() []Category {
"./etc/proxmox-backup/sync.cfg",
"./etc/proxmox-backup/verification.cfg",
"./etc/proxmox-backup/prune.cfg",
+ "./var/lib/proxsave-info/commands/pbs/sync_jobs.json",
+ "./var/lib/proxsave-info/commands/pbs/verification_jobs.json",
+ "./var/lib/proxsave-info/commands/pbs/prune_jobs.json",
+ "./var/lib/proxsave-info/commands/pbs/gc_jobs.json",
},
},
{
@@ -217,6 +232,7 @@ func GetAllCategories() []Category {
Type: CategoryTypePBS,
Paths: []string{
"./etc/proxmox-backup/remote.cfg",
+ "./var/lib/proxsave-info/commands/pbs/remote_list.json",
},
},
{
@@ -227,6 +243,9 @@ func GetAllCategories() []Category {
Paths: []string{
"./etc/proxmox-backup/notifications.cfg",
"./etc/proxmox-backup/notifications-priv.cfg",
+ "./var/lib/proxsave-info/commands/pbs/notification_targets.json",
+ "./var/lib/proxsave-info/commands/pbs/notification_matchers.json",
+ "./var/lib/proxsave-info/commands/pbs/notification_endpoints_*.json",
},
},
{
@@ -242,6 +261,11 @@ func GetAllCategories() []Category {
"./etc/proxmox-backup/shadow.json",
"./etc/proxmox-backup/token.shadow",
"./etc/proxmox-backup/tfa.json",
+ "./var/lib/proxsave-info/commands/pbs/user_list.json",
+ "./var/lib/proxsave-info/commands/pbs/realms_ldap.json",
+ "./var/lib/proxsave-info/commands/pbs/realms_ad.json",
+ "./var/lib/proxsave-info/commands/pbs/realms_openid.json",
+ "./var/lib/proxsave-info/commands/pbs/acl_list.json",
},
},
{
@@ -254,6 +278,9 @@ func GetAllCategories() []Category {
"./etc/proxmox-backup/tape-job.cfg",
"./etc/proxmox-backup/media-pool.cfg",
"./etc/proxmox-backup/tape-encryption-keys.json",
+ "./var/lib/proxsave-info/commands/pbs/tape_drives.json",
+ "./var/lib/proxsave-info/commands/pbs/tape_changers.json",
+ "./var/lib/proxsave-info/commands/pbs/tape_pools.json",
},
},
@@ -443,14 +470,17 @@ func GetCategoriesForSystem(systemType string) []Category {
all := GetAllCategories()
var categories []Category
- for _, cat := range all {
- if systemType == "pve" {
- // PVE system: include PVE and common categories
+ switch systemType {
+ case "pve":
+ // PVE system: include PVE and common categories
+ for _, cat := range all {
if cat.Type == CategoryTypePVE || cat.Type == CategoryTypeCommon {
categories = append(categories, cat)
}
- } else if systemType == "pbs" {
- // PBS system: include PBS and common categories
+ }
+ case "pbs":
+ // PBS system: include PBS and common categories
+ for _, cat := range all {
if cat.Type == CategoryTypePBS || cat.Type == CategoryTypeCommon {
categories = append(categories, cat)
}
@@ -539,14 +569,15 @@ func GetStorageModeCategories(systemType string) []Category {
all := GetAllCategories()
var categories []Category
- if systemType == "pve" {
+ switch systemType {
+ case "pve":
// PVE: cluster + storage + jobs + zfs + filesystem + storage stack
for _, cat := range all {
if cat.ID == "pve_cluster" || cat.ID == "storage_pve" || cat.ID == "pve_jobs" || cat.ID == "zfs" || cat.ID == "filesystem" || cat.ID == "storage_stack" {
categories = append(categories, cat)
}
}
- } else if systemType == "pbs" {
+ case "pbs":
// PBS: config export + datastore + maintenance + jobs + remotes + zfs + filesystem + storage stack
for _, cat := range all {
if cat.ID == "pbs_config" || cat.ID == "datastore_pbs" || cat.ID == "maintenance_pbs" || cat.ID == "pbs_jobs" || cat.ID == "pbs_remotes" || cat.ID == "zfs" || cat.ID == "filesystem" || cat.ID == "storage_stack" {
diff --git a/internal/orchestrator/compatibility.go b/internal/orchestrator/compatibility.go
index 8e541cd..cc2eb3f 100644
--- a/internal/orchestrator/compatibility.go
+++ b/internal/orchestrator/compatibility.go
@@ -124,11 +124,12 @@ func GetSystemInfo() map[string]string {
info["type_name"] = GetSystemTypeString(systemType)
// Get version information
- if systemType == SystemTypePVE {
+ switch systemType {
+ case SystemTypePVE:
if content, err := compatFS.ReadFile("/etc/pve-release"); err == nil {
info["version"] = strings.TrimSpace(string(content))
}
- } else if systemType == SystemTypePBS {
+ case SystemTypePBS:
if content, err := compatFS.ReadFile("/etc/proxmox-backup-release"); err == nil {
info["version"] = strings.TrimSpace(string(content))
}
diff --git a/internal/orchestrator/decrypt.go b/internal/orchestrator/decrypt.go
index 92e9e6b..c995530 100644
--- a/internal/orchestrator/decrypt.go
+++ b/internal/orchestrator/decrypt.go
@@ -80,8 +80,8 @@ func RunDecryptWorkflowWithDeps(ctx context.Context, deps *Deps, version string)
done := logging.DebugStart(logger, "decrypt workflow", "version=%s", version)
defer func() { done(err) }()
- ui := newCLIWorkflowUI(bufio.NewReader(os.Stdin), logger)
- return runDecryptWorkflowWithUI(ctx, cfg, logger, version, ui)
+ ui := newCLIWorkflowUI(bufio.NewReader(os.Stdin), logger)
+ return runDecryptWorkflowWithUI(ctx, cfg, logger, version, ui)
}
// RunDecryptWorkflow is the legacy entrypoint that builds default deps.
@@ -652,7 +652,7 @@ func copyRawArtifactsToWorkdirWithLogger(ctx context.Context, cand *decryptCandi
func decryptArchiveWithPrompts(ctx context.Context, reader *bufio.Reader, encryptedPath, outputPath string, logger *logging.Logger) error {
ui := newCLIWorkflowUI(reader, logger)
displayName := filepath.Base(encryptedPath)
- return decryptArchiveWithSecretPrompt(ctx, encryptedPath, outputPath, displayName, logger, ui.PromptDecryptSecret)
+ return decryptArchiveWithSecretPrompt(ctx, encryptedPath, outputPath, displayName, ui.PromptDecryptSecret)
}
func parseIdentityInput(input string) ([]age.Identity, error) {
diff --git a/internal/orchestrator/decrypt_test.go b/internal/orchestrator/decrypt_test.go
index d663df7..59be13c 100644
--- a/internal/orchestrator/decrypt_test.go
+++ b/internal/orchestrator/decrypt_test.go
@@ -34,19 +34,19 @@ func TestBuildDecryptPathOptions(t *testing.T) {
wantPaths []string
wantLabel []string
}{
- {
- name: "all paths enabled",
- cfg: &config.Config{
- BackupPath: "/backup/local",
- SecondaryEnabled: true,
- SecondaryPath: "/backup/secondary",
- CloudEnabled: true,
- CloudRemote: "/backup/cloud",
- },
- wantCount: 3,
- wantPaths: []string{"/backup/local", "/backup/secondary", "/backup/cloud"},
- wantLabel: []string{"Local backups", "Secondary backups", "Cloud backups"},
+ {
+ name: "all paths enabled",
+ cfg: &config.Config{
+ BackupPath: "/backup/local",
+ SecondaryEnabled: true,
+ SecondaryPath: "/backup/secondary",
+ CloudEnabled: true,
+ CloudRemote: "/backup/cloud",
},
+ wantCount: 3,
+ wantPaths: []string{"/backup/local", "/backup/secondary", "/backup/cloud"},
+ wantLabel: []string{"Local backups", "Secondary backups", "Cloud backups"},
+ },
{
name: "only local path",
cfg: &config.Config{
@@ -91,28 +91,28 @@ func TestBuildDecryptPathOptions(t *testing.T) {
wantPaths: []string{"/backup/local"},
wantLabel: []string{"Local backups"},
},
- {
- name: "cloud with rclone remote included",
- cfg: &config.Config{
- BackupPath: "/backup/local",
- CloudEnabled: true,
- CloudRemote: "gdrive:backups", // rclone remote
- },
- wantCount: 2,
- wantPaths: []string{"/backup/local", "gdrive:backups"},
- wantLabel: []string{"Local backups", "Cloud backups (rclone)"},
+ {
+ name: "cloud with rclone remote included",
+ cfg: &config.Config{
+ BackupPath: "/backup/local",
+ CloudEnabled: true,
+ CloudRemote: "gdrive:backups", // rclone remote
},
- {
- name: "cloud with local absolute path included",
- cfg: &config.Config{
- BackupPath: "/backup/local",
- CloudEnabled: true,
- CloudRemote: "/mnt/cloud/backups",
- },
- wantCount: 2,
- wantPaths: []string{"/backup/local", "/mnt/cloud/backups"},
- wantLabel: []string{"Local backups", "Cloud backups"},
+ wantCount: 2,
+ wantPaths: []string{"/backup/local", "gdrive:backups"},
+ wantLabel: []string{"Local backups", "Cloud backups (rclone)"},
+ },
+ {
+ name: "cloud with local absolute path included",
+ cfg: &config.Config{
+ BackupPath: "/backup/local",
+ CloudEnabled: true,
+ CloudRemote: "/mnt/cloud/backups",
},
+ wantCount: 2,
+ wantPaths: []string{"/backup/local", "/mnt/cloud/backups"},
+ wantLabel: []string{"Local backups", "Cloud backups"},
+ },
{
name: "secondary enabled but path empty",
cfg: &config.Config{
@@ -135,17 +135,17 @@ func TestBuildDecryptPathOptions(t *testing.T) {
wantPaths: []string{"/backup/local"},
wantLabel: []string{"Local backups"},
},
- {
- name: "cloud absolute with colon allowed",
- cfg: &config.Config{
- BackupPath: "/backup/local",
- CloudEnabled: true,
- CloudRemote: "/mnt/backups:foo",
- },
- wantCount: 2,
- wantPaths: []string{"/backup/local", "/mnt/backups:foo"},
- wantLabel: []string{"Local backups", "Cloud backups"},
+ {
+ name: "cloud absolute with colon allowed",
+ cfg: &config.Config{
+ BackupPath: "/backup/local",
+ CloudEnabled: true,
+ CloudRemote: "/mnt/backups:foo",
},
+ wantCount: 2,
+ wantPaths: []string{"/backup/local", "/mnt/backups:foo"},
+ wantLabel: []string{"Local backups", "Cloud backups"},
+ },
{
name: "all paths empty",
cfg: &config.Config{},
@@ -2588,7 +2588,7 @@ func TestInspectRcloneMetadataManifest_RcloneFails(t *testing.T) {
// copyRawArtifactsToWorkdirWithLogger coverage tests
// =====================================
-func TestCopyRawArtifactsToWorkdir_NilContext(t *testing.T) {
+func TestCopyRawArtifactsToWorkdir_ContextWorks(t *testing.T) {
origFS := restoreFS
restoreFS = osFS{}
t.Cleanup(func() { restoreFS = origFS })
@@ -2612,8 +2612,7 @@ func TestCopyRawArtifactsToWorkdir_NilContext(t *testing.T) {
RawChecksumPath: "",
}
- // Pass nil context - function should use context.Background()
- staged, err := copyRawArtifactsToWorkdirWithLogger(nil, cand, workDir, nil)
+ staged, err := copyRawArtifactsToWorkdirWithLogger(context.TODO(), cand, workDir, nil)
if err != nil {
t.Fatalf("copyRawArtifactsToWorkdirWithLogger error: %v", err)
}
diff --git a/internal/orchestrator/decrypt_tui.go b/internal/orchestrator/decrypt_tui.go
index b0f23a8..655f7f5 100644
--- a/internal/orchestrator/decrypt_tui.go
+++ b/internal/orchestrator/decrypt_tui.go
@@ -340,12 +340,21 @@ func preparePlainBundleTUI(ctx context.Context, cand *decryptCandidate, version
func decryptArchiveWithTUIPrompts(ctx context.Context, encryptedPath, outputPath, displayName, configPath, buildSig string, logger *logging.Logger) error {
var promptError string
+ if ctx == nil {
+ ctx = context.Background()
+ }
for {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
identities, err := promptDecryptIdentity(displayName, configPath, buildSig, promptError)
if err != nil {
return err
}
+ if err := ctx.Err(); err != nil {
+ return err
+ }
if err := decryptWithIdentity(encryptedPath, outputPath, identities...); err != nil {
var noMatch *age.NoIdentityMatchError
if errors.Is(err, age.ErrIncorrectIdentity) || errors.As(err, &noMatch) {
diff --git a/internal/orchestrator/decrypt_workflow_ui.go b/internal/orchestrator/decrypt_workflow_ui.go
index 24d0377..a57d45a 100644
--- a/internal/orchestrator/decrypt_workflow_ui.go
+++ b/internal/orchestrator/decrypt_workflow_ui.go
@@ -137,7 +137,7 @@ func ensureWritablePathWithUI(ctx context.Context, ui DecryptWorkflowUI, targetP
}
}
-func decryptArchiveWithSecretPrompt(ctx context.Context, encryptedPath, outputPath, displayName string, logger *logging.Logger, prompt func(ctx context.Context, displayName, previousError string) (string, error)) error {
+func decryptArchiveWithSecretPrompt(ctx context.Context, encryptedPath, outputPath, displayName string, prompt func(ctx context.Context, displayName, previousError string) (string, error)) error {
promptError := ""
for {
secret, err := prompt(ctx, displayName, promptError)
@@ -246,7 +246,7 @@ func preparePlainBundleWithUI(ctx context.Context, cand *decryptCandidate, versi
if strings.TrimSpace(displayName) == "" {
displayName = filepath.Base(manifestCopy.ArchivePath)
}
- if err := decryptArchiveWithSecretPrompt(ctx, staged.ArchivePath, plainArchivePath, displayName, logger, ui.PromptDecryptSecret); err != nil {
+ if err := decryptArchiveWithSecretPrompt(ctx, staged.ArchivePath, plainArchivePath, displayName, ui.PromptDecryptSecret); err != nil {
cleanup()
return nil, err
}
diff --git a/internal/orchestrator/deps.go b/internal/orchestrator/deps.go
index cb64194..648e20b 100644
--- a/internal/orchestrator/deps.go
+++ b/internal/orchestrator/deps.go
@@ -197,7 +197,6 @@ func NewWithDeps(deps Deps) *Orchestrator {
base.Time = deps.Time
}
if deps.Config != nil {
- base.Config = deps.Config
base.DryRun = deps.Config.DryRun
}
diff --git a/internal/orchestrator/directory_recreation.go b/internal/orchestrator/directory_recreation.go
index 19dc9f3..611aaff 100644
--- a/internal/orchestrator/directory_recreation.go
+++ b/internal/orchestrator/directory_recreation.go
@@ -294,7 +294,7 @@ func createPBSDatastoreStructure(basePath, datastoreName string, logger *logging
// If the datastore already contains chunk/index data, avoid any modifications to prevent touching real backup data.
// We only validate and report issues.
if hasData {
- if warn := validatePBSDatastoreReadOnly(basePath, logger); warn != "" {
+ if warn := validatePBSDatastoreReadOnly(basePath); warn != "" {
logger.Warning("PBS datastore preflight: %s", warn)
}
logger.Info("PBS datastore preflight: datastore %s appears to contain data; skipping directory/permission changes to avoid risking datastore contents", datastoreName)
@@ -371,7 +371,7 @@ func createPBSDatastoreStructure(basePath, datastoreName string, logger *logging
return changed, nil
}
-func validatePBSDatastoreReadOnly(datastorePath string, logger *logging.Logger) string {
+func validatePBSDatastoreReadOnly(datastorePath string) string {
if datastorePath == "" {
return "datastore path is empty"
}
@@ -843,15 +843,16 @@ func isIgnorableOwnershipError(err error) bool {
func RecreateDirectoriesFromConfig(systemType SystemType, logger *logging.Logger) error {
logger.Info("Recreating directory structures from configuration...")
- if systemType == SystemTypePVE {
+ switch systemType {
+ case SystemTypePVE:
if err := RecreateStorageDirectories(logger); err != nil {
return fmt.Errorf("recreate PVE storage directories: %w", err)
}
- } else if systemType == SystemTypePBS {
+ case SystemTypePBS:
if err := RecreateDatastoreDirectories(logger); err != nil {
return fmt.Errorf("recreate PBS datastore directories: %w", err)
}
- } else {
+ default:
logger.Debug("Unknown system type, skipping directory recreation")
}
diff --git a/internal/orchestrator/fs_atomic.go b/internal/orchestrator/fs_atomic.go
index 476c845..879f0ce 100644
--- a/internal/orchestrator/fs_atomic.go
+++ b/internal/orchestrator/fs_atomic.go
@@ -75,7 +75,7 @@ func ensureDirExistsWithInheritedMeta(dir string) error {
return nil
}
return fmt.Errorf("path exists but is not a directory: %s", dir)
- } else if err != nil && !errors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("stat %s: %w", dir, err)
}
@@ -123,7 +123,7 @@ func ensureDirExistsWithInheritedMeta(dir string) error {
continue
}
return fmt.Errorf("path exists but is not a directory: %s", p)
- } else if err != nil && !errors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("stat %s: %w", p, err)
}
diff --git a/internal/orchestrator/mount_guard.go b/internal/orchestrator/mount_guard.go
index bc352f9..037811d 100644
--- a/internal/orchestrator/mount_guard.go
+++ b/internal/orchestrator/mount_guard.go
@@ -220,6 +220,13 @@ func maybeApplyPBSDatastoreMountGuards(ctx context.Context, logger *logging.Logg
}
func guardMountPoint(ctx context.Context, guardTarget string) error {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
target := filepath.Clean(strings.TrimSpace(guardTarget))
if target == "" || target == "." || target == string(os.PathSeparator) {
return fmt.Errorf("invalid guard target: %q", guardTarget)
diff --git a/internal/orchestrator/network_apply_workflow_ui.go b/internal/orchestrator/network_apply_workflow_ui.go
index 14e931b..b150c89 100644
--- a/internal/orchestrator/network_apply_workflow_ui.go
+++ b/internal/orchestrator/network_apply_workflow_ui.go
@@ -267,7 +267,7 @@ func applyNetworkWithRollbackWithUI(ctx context.Context, ui RestoreWorkflowUI, l
if strings.TrimSpace(iface) != "" {
if cur, err := currentNetworkEndpoint(ctx, iface, 2*time.Second); err == nil {
- if tgt, err := targetNetworkEndpointFromConfig(logger, iface); err == nil {
+ if tgt, err := targetNetworkEndpointFromConfig(iface); err == nil {
logger.Info("Network plan: %s -> %s", cur.summary(), tgt.summary())
}
}
@@ -275,7 +275,7 @@ func applyNetworkWithRollbackWithUI(ctx context.Context, ui RestoreWorkflowUI, l
if diagnosticsDir != "" {
logging.DebugStep(logger, "network safe apply (ui)", "Write network plan (current -> target)")
- if planText, err := buildNetworkPlanReport(ctx, logger, iface, source, 2*time.Second); err != nil {
+ if planText, err := buildNetworkPlanReport(ctx, iface, source, 2*time.Second); err != nil {
logger.Debug("Network plan build failed: %v", err)
} else if strings.TrimSpace(planText) != "" {
if path, err := writeNetworkTextReportFile(diagnosticsDir, "plan.txt", planText+"\n"); err != nil {
diff --git a/internal/orchestrator/network_health.go b/internal/orchestrator/network_health.go
index 2c7faed..8b583f5 100644
--- a/internal/orchestrator/network_health.go
+++ b/internal/orchestrator/network_health.go
@@ -89,17 +89,6 @@ type networkHealthOptions struct {
LocalPortChecks []tcpPortCheck
}
-func defaultNetworkHealthOptions() networkHealthOptions {
- return networkHealthOptions{
- SystemType: SystemTypeUnknown,
- Logger: nil,
- CommandTimeout: 3 * time.Second,
- EnableGatewayPing: true,
- ForceSSHRouteCheck: false,
- EnableDNSResolve: true,
- }
-}
-
type tcpPortCheck struct {
Name string
Address string
diff --git a/internal/orchestrator/network_plan.go b/internal/orchestrator/network_plan.go
index 7c07711..11c1eb9 100644
--- a/internal/orchestrator/network_plan.go
+++ b/internal/orchestrator/network_plan.go
@@ -7,8 +7,6 @@ import (
"sort"
"strings"
"time"
-
- "github.com/tis24dev/proxsave/internal/logging"
)
type networkEndpoint struct {
@@ -33,7 +31,7 @@ func (e networkEndpoint) summary() string {
return fmt.Sprintf("iface=%s ip=%s gw=%s", iface, addrs, gw)
}
-func buildNetworkPlanReport(ctx context.Context, logger *logging.Logger, iface, source string, timeout time.Duration) (string, error) {
+func buildNetworkPlanReport(ctx context.Context, iface, source string, timeout time.Duration) (string, error) {
if strings.TrimSpace(iface) == "" {
return fmt.Sprintf("Network plan\n\n- Management interface: n/a\n- Detection source: %s\n", strings.TrimSpace(source)), nil
}
@@ -42,7 +40,7 @@ func buildNetworkPlanReport(ctx context.Context, logger *logging.Logger, iface,
}
current, _ := currentNetworkEndpoint(ctx, iface, timeout)
- target, _ := targetNetworkEndpointFromConfig(logger, iface)
+ target, _ := targetNetworkEndpointFromConfig(iface)
var b strings.Builder
b.WriteString("Network plan\n\n")
@@ -77,7 +75,7 @@ func currentNetworkEndpoint(ctx context.Context, iface string, timeout time.Dura
return ep, nil
}
-func targetNetworkEndpointFromConfig(logger *logging.Logger, iface string) (networkEndpoint, error) {
+func targetNetworkEndpointFromConfig(iface string) (networkEndpoint, error) {
ep := networkEndpoint{Interface: strings.TrimSpace(iface)}
if ep.Interface == "" {
return ep, fmt.Errorf("empty interface")
diff --git a/internal/orchestrator/orchestrator.go b/internal/orchestrator/orchestrator.go
index ddab036..ccaa936 100644
--- a/internal/orchestrator/orchestrator.go
+++ b/internal/orchestrator/orchestrator.go
@@ -231,7 +231,7 @@ func (o *Orchestrator) logStep(step int, format string, args ...interface{}) {
if len(args) > 0 {
message = fmt.Sprintf(format, args...)
}
- o.logger.Step("%s", message)
+ o.logger.Step("[%d] %s", step, message)
}
// SetUpdateInfo records version update information discovered by the CLI layer.
@@ -1054,6 +1054,13 @@ func (s *BackupStats) toPrometheusMetrics() *metrics.BackupMetrics {
}
func (o *Orchestrator) createBundle(ctx context.Context, archivePath string) (bundlePath string, err error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ if err := ctx.Err(); err != nil {
+ return "", err
+ }
+
logger := o.logger
fs := o.filesystem()
dir := filepath.Dir(archivePath)
@@ -1073,6 +1080,9 @@ func (o *Orchestrator) createBundle(ctx context.Context, archivePath string) (bu
}
for _, file := range associated[:3] {
+ if err := ctx.Err(); err != nil {
+ return "", err
+ }
if _, err := fs.Stat(filepath.Join(dir, file)); err != nil {
return "", fmt.Errorf("associated file not found: %s: %w", file, err)
}
@@ -1093,6 +1103,9 @@ func (o *Orchestrator) createBundle(ctx context.Context, archivePath string) (bu
// Add each associated file to the tar archive
for _, filename := range associated {
+ if err := ctx.Err(); err != nil {
+ return "", err
+ }
filePath := filepath.Join(dir, filename)
// Get file info
@@ -1119,7 +1132,7 @@ func (o *Orchestrator) createBundle(ctx context.Context, archivePath string) (bu
return "", fmt.Errorf("failed to open %s: %w", filename, err)
}
- if _, err := io.Copy(tw, file); err != nil {
+ if _, err := io.Copy(tw, &contextReader{ctx: ctx, r: file}); err != nil {
file.Close()
return "", fmt.Errorf("failed to write %s to tar: %w", filename, err)
}
@@ -1144,6 +1157,18 @@ func (o *Orchestrator) createBundle(ctx context.Context, archivePath string) (bu
return bundlePath, nil
}
+type contextReader struct {
+ ctx context.Context
+ r io.Reader
+}
+
+func (cr *contextReader) Read(p []byte) (int, error) {
+ if err := cr.ctx.Err(); err != nil {
+ return 0, err
+ }
+ return cr.r.Read(p)
+}
+
func (o *Orchestrator) removeAssociatedFiles(archivePath string) error {
logger := o.logger
fs := o.filesystem()
@@ -1482,6 +1507,8 @@ func applyCollectorOverrides(cc *backup.CollectorConfig, cfg *config.Config) {
cc.BackupPBSAcmePlugins = cfg.BackupPBSAcmePlugins
cc.BackupPBSMetricServers = cfg.BackupPBSMetricServers
cc.BackupPBSTrafficControl = cfg.BackupPBSTrafficControl
+ cc.BackupPBSNotifications = cfg.BackupPBSNotifications
+ cc.BackupPBSNotificationsPriv = cfg.BackupPBSNotificationsPriv
cc.BackupUserConfigs = cfg.BackupUserConfigs
cc.BackupRemoteConfigs = cfg.BackupRemoteConfigs
cc.BackupSyncJobs = cfg.BackupSyncJobs
diff --git a/internal/orchestrator/pbs_api_apply.go b/internal/orchestrator/pbs_api_apply.go
new file mode 100644
index 0000000..830bcc4
--- /dev/null
+++ b/internal/orchestrator/pbs_api_apply.go
@@ -0,0 +1,695 @@
+package orchestrator
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/tis24dev/proxsave/internal/logging"
+)
+
+func normalizeProxmoxCfgKey(key string) string {
+ key = strings.ToLower(strings.TrimSpace(key))
+ key = strings.ReplaceAll(key, "_", "-")
+ return key
+}
+
+func buildProxmoxManagerFlags(entries []proxmoxNotificationEntry, skipKeys ...string) []string {
+ if len(entries) == 0 {
+ return nil
+ }
+ skip := make(map[string]struct{}, len(skipKeys)+2)
+ for _, k := range skipKeys {
+ skip[normalizeProxmoxCfgKey(k)] = struct{}{}
+ }
+ // Common no-op keys
+ skip["digest"] = struct{}{}
+ skip["name"] = struct{}{}
+
+ args := make([]string, 0, len(entries)*2)
+ for _, kv := range entries {
+ key := normalizeProxmoxCfgKey(kv.Key)
+ if key == "" {
+ continue
+ }
+ if _, ok := skip[key]; ok {
+ continue
+ }
+ value := strings.TrimSpace(kv.Value)
+ args = append(args, "--"+key)
+ args = append(args, value)
+ }
+ return args
+}
+
+func popEntryValue(entries []proxmoxNotificationEntry, keys ...string) (value string, remaining []proxmoxNotificationEntry, ok bool) {
+ if len(entries) == 0 || len(keys) == 0 {
+ return "", entries, false
+ }
+ want := make(map[string]struct{}, len(keys))
+ for _, k := range keys {
+ want[normalizeProxmoxCfgKey(k)] = struct{}{}
+ }
+
+ remaining = make([]proxmoxNotificationEntry, 0, len(entries))
+ for _, kv := range entries {
+ key := normalizeProxmoxCfgKey(kv.Key)
+ if _, match := want[key]; match && !ok {
+ value = strings.TrimSpace(kv.Value)
+ ok = true
+ continue
+ }
+ remaining = append(remaining, kv)
+ }
+ return value, remaining, ok
+}
+
+func runPBSManagerRedacted(ctx context.Context, args []string, redactFlags []string, redactIndexes []int) ([]byte, error) {
+ out, err := restoreCmd.Run(ctx, "proxmox-backup-manager", args...)
+ if err == nil {
+ return out, nil
+ }
+ redacted := redactCLIArgs(args, redactFlags)
+ for _, idx := range redactIndexes {
+ if idx >= 0 && idx < len(redacted) {
+ redacted[idx] = ""
+ }
+ }
+ return out, fmt.Errorf("proxmox-backup-manager %s failed: %w", strings.Join(redacted, " "), err)
+}
+
+func runPBSManager(ctx context.Context, args ...string) ([]byte, error) {
+ return runPBSManagerRedacted(ctx, args, nil, nil)
+}
+
+func runPBSManagerSensitive(ctx context.Context, args []string, redactFlags ...string) ([]byte, error) {
+ return runPBSManagerRedacted(ctx, args, redactFlags, nil)
+}
+
+func unwrapPBSJSONData(raw []byte) []byte {
+ trimmed := strings.TrimSpace(string(raw))
+ if trimmed == "" {
+ return nil
+ }
+ var wrapper map[string]json.RawMessage
+ if err := json.Unmarshal([]byte(trimmed), &wrapper); err != nil {
+ return []byte(trimmed)
+ }
+ if data, ok := wrapper["data"]; ok && len(bytesTrimSpace(data)) > 0 {
+ return data
+ }
+ return []byte(trimmed)
+}
+
+func bytesTrimSpace(b []byte) []byte {
+ return []byte(strings.TrimSpace(string(b)))
+}
+
+func parsePBSListIDs(raw []byte, candidateKeys ...string) ([]string, error) {
+ data := unwrapPBSJSONData(raw)
+ if len(data) == 0 {
+ return nil, nil
+ }
+
+ keys := make([]string, 0, len(candidateKeys))
+ for _, k := range candidateKeys {
+ k = strings.TrimSpace(k)
+ if k == "" {
+ continue
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) == 0 {
+ return nil, fmt.Errorf("no candidate keys provided for PBS list ID parsing")
+ }
+
+ var rows []map[string]any
+ if err := json.Unmarshal(data, &rows); err != nil {
+ return nil, err
+ }
+
+ out := make([]string, 0, len(rows))
+ seen := make(map[string]struct{}, len(rows))
+ for idx, row := range rows {
+ id := ""
+ for _, k := range keys {
+ v, ok := row[k]
+ if !ok || v == nil {
+ continue
+ }
+ s, ok := v.(string)
+ if !ok {
+ continue
+ }
+ id = strings.TrimSpace(s)
+ if id != "" {
+ break
+ }
+ }
+ if id == "" {
+ available := make([]string, 0, len(row))
+ for k := range row {
+ available = append(available, k)
+ }
+ sort.Strings(available)
+ return nil, fmt.Errorf("failed to parse PBS list row %d: none of %v present as non-empty string (available keys: %v)", idx, keys, available)
+ }
+ if _, ok := seen[id]; ok {
+ continue
+ }
+ seen[id] = struct{}{}
+ out = append(out, id)
+ }
+ sort.Strings(out)
+ return out, nil
+}
+
+func ensurePBSServicesForAPI(ctx context.Context, logger *logging.Logger) error {
+ if logger == nil {
+ logger = logging.GetDefaultLogger()
+ }
+
+ if !isRealRestoreFS(restoreFS) {
+ return fmt.Errorf("non-system filesystem in use")
+ }
+ if os.Geteuid() != 0 {
+ return fmt.Errorf("requires root privileges")
+ }
+
+ if _, err := restoreCmd.Run(ctx, "proxmox-backup-manager", "version"); err != nil {
+ return fmt.Errorf("proxmox-backup-manager not available: %w", err)
+ }
+
+ // Best-effort: ensure services are started before API apply.
+ startCtx, cancel := context.WithTimeout(ctx, 2*serviceStartTimeout+serviceVerifyTimeout+5*time.Second)
+ defer cancel()
+ if err := startPBSServices(startCtx, logger); err != nil {
+ return err
+ }
+ return nil
+}
+
+func applyPBSRemoteCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ remoteRaw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/remote.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(remoteRaw)
+ if err != nil {
+ return fmt.Errorf("parse staged remote.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ name := strings.TrimSpace(s.Name)
+ if name == "" {
+ continue
+ }
+ desired[name] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "remote", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "id", "name")
+ if err != nil {
+ return fmt.Errorf("parse remote list: %w", err)
+ }
+ for _, id := range current {
+ if _, ok := desired[id]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "remote", "remove", id); err != nil {
+ logger.Warning("PBS API apply: remote remove %s failed (continuing): %v", id, err)
+ }
+ }
+ }
+
+ ids := make([]string, 0, len(desired))
+ for id := range desired {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ s := desired[id]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"remote", "create", id}, flags...)
+ if _, err := runPBSManagerSensitive(ctx, createArgs, "--password"); err != nil {
+ updateArgs := append([]string{"remote", "update", id}, flags...)
+ if _, upErr := runPBSManagerSensitive(ctx, updateArgs, "--password"); upErr != nil {
+ return fmt.Errorf("remote %s: %v (create) / %v (update)", id, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSS3CfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ s3Raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/s3.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(s3Raw)
+ if err != nil {
+ return fmt.Errorf("parse staged s3.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ id := strings.TrimSpace(s.Name)
+ if id == "" {
+ continue
+ }
+ desired[id] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "s3", "endpoint", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "id", "name")
+ if err != nil {
+ return fmt.Errorf("parse s3 endpoint list: %w", err)
+ }
+ for _, id := range current {
+ if _, ok := desired[id]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "s3", "endpoint", "remove", id); err != nil {
+ logger.Warning("PBS API apply: s3 endpoint remove %s failed (continuing): %v", id, err)
+ }
+ }
+ }
+
+ ids := make([]string, 0, len(desired))
+ for id := range desired {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ s := desired[id]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"s3", "endpoint", "create", id}, flags...)
+ if _, err := runPBSManagerSensitive(ctx, createArgs, "--access-key", "--secret-key"); err != nil {
+ updateArgs := append([]string{"s3", "endpoint", "update", id}, flags...)
+ if _, upErr := runPBSManagerSensitive(ctx, updateArgs, "--access-key", "--secret-key"); upErr != nil {
+ return fmt.Errorf("s3 endpoint %s: %v (create) / %v (update)", id, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSDatastoreCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ dsRaw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/datastore.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(dsRaw)
+ if err != nil {
+ return fmt.Errorf("parse staged datastore.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ name := strings.TrimSpace(s.Name)
+ if name == "" {
+ continue
+ }
+ desired[name] = s
+ }
+
+ type dsRow struct {
+ Name string `json:"name"`
+ Store string `json:"store"`
+ ID string `json:"id"`
+ Path string `json:"path"`
+ }
+ currentPaths := make(map[string]string)
+ if out, err := runPBSManager(ctx, "datastore", "list", "--output-format=json"); err == nil {
+ var rows []dsRow
+ if err := json.Unmarshal(unwrapPBSJSONData(out), &rows); err == nil {
+ for _, row := range rows {
+ name := strings.TrimSpace(row.Name)
+ if name == "" {
+ name = strings.TrimSpace(row.Store)
+ }
+ if name == "" {
+ name = strings.TrimSpace(row.ID)
+ }
+ if name == "" {
+ continue
+ }
+ currentPaths[name] = strings.TrimSpace(row.Path)
+ }
+ }
+ }
+
+ if strict {
+ current := make([]string, 0, len(currentPaths))
+ for name := range currentPaths {
+ current = append(current, name)
+ }
+ sort.Strings(current)
+ for _, name := range current {
+ if _, ok := desired[name]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "datastore", "remove", name); err != nil {
+ logger.Warning("PBS API apply: datastore remove %s failed (continuing): %v", name, err)
+ }
+ }
+ }
+
+ names := make([]string, 0, len(desired))
+ for name := range desired {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ s := desired[name]
+ path, entries, ok := popEntryValue(s.Entries, "path")
+ if !ok || strings.TrimSpace(path) == "" {
+ logger.Warning("PBS API apply: datastore %s missing path; skipping", name)
+ continue
+ }
+ flags := buildProxmoxManagerFlags(entries)
+ if currentPath, exists := currentPaths[name]; exists {
+ if currentPath != "" && strings.TrimSpace(currentPath) != strings.TrimSpace(path) {
+ if strict {
+ if _, err := runPBSManager(ctx, "datastore", "remove", name); err != nil {
+ return fmt.Errorf("datastore %s: path mismatch (%s != %s) and remove failed: %w", name, currentPath, path, err)
+ }
+ createArgs := append([]string{"datastore", "create", name, path}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ return fmt.Errorf("datastore %s: recreate after path mismatch failed: %w", name, err)
+ }
+ continue
+ }
+ logger.Warning("PBS API apply: datastore %s path mismatch (%s != %s); leaving path unchanged (use Clean 1:1 restore to enforce 1:1)", name, currentPath, path)
+ }
+
+ updateArgs := append([]string{"datastore", "update", name}, flags...)
+ if _, err := runPBSManager(ctx, updateArgs...); err != nil {
+ return fmt.Errorf("datastore %s: update failed: %w", name, err)
+ }
+ continue
+ }
+
+ createArgs := append([]string{"datastore", "create", name, path}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"datastore", "update", name}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("datastore %s: %v (create) / %v (update)", name, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSSyncCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/sync.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(raw)
+ if err != nil {
+ return fmt.Errorf("parse staged sync.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ id := strings.TrimSpace(s.Name)
+ if id == "" {
+ continue
+ }
+ desired[id] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "sync-job", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "id", "name")
+ if err != nil {
+ return fmt.Errorf("parse sync-job list: %w", err)
+ }
+ for _, id := range current {
+ if _, ok := desired[id]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "sync-job", "remove", id); err != nil {
+ logger.Warning("PBS API apply: sync-job remove %s failed (continuing): %v", id, err)
+ }
+ }
+ }
+
+ ids := make([]string, 0, len(desired))
+ for id := range desired {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ s := desired[id]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"sync-job", "create", id}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"sync-job", "update", id}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("sync-job %s: %v (create) / %v (update)", id, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSVerificationCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/verification.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(raw)
+ if err != nil {
+ return fmt.Errorf("parse staged verification.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ id := strings.TrimSpace(s.Name)
+ if id == "" {
+ continue
+ }
+ desired[id] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "verify-job", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "id", "name")
+ if err != nil {
+ return fmt.Errorf("parse verify-job list: %w", err)
+ }
+ for _, id := range current {
+ if _, ok := desired[id]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "verify-job", "remove", id); err != nil {
+ logger.Warning("PBS API apply: verify-job remove %s failed (continuing): %v", id, err)
+ }
+ }
+ }
+
+ ids := make([]string, 0, len(desired))
+ for id := range desired {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ s := desired[id]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"verify-job", "create", id}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"verify-job", "update", id}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("verify-job %s: %v (create) / %v (update)", id, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSPruneCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/prune.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(raw)
+ if err != nil {
+ return fmt.Errorf("parse staged prune.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ id := strings.TrimSpace(s.Name)
+ if id == "" {
+ continue
+ }
+ desired[id] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "prune-job", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "id", "name")
+ if err != nil {
+ return fmt.Errorf("parse prune-job list: %w", err)
+ }
+ for _, id := range current {
+ if _, ok := desired[id]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "prune-job", "remove", id); err != nil {
+ logger.Warning("PBS API apply: prune-job remove %s failed (continuing): %v", id, err)
+ }
+ }
+ }
+
+ ids := make([]string, 0, len(desired))
+ for id := range desired {
+ ids = append(ids, id)
+ }
+ sort.Strings(ids)
+ for _, id := range ids {
+ s := desired[id]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"prune-job", "create", id}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"prune-job", "update", id}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("prune-job %s: %v (create) / %v (update)", id, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSTrafficControlCfgViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/traffic-control.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(raw)
+ if err != nil {
+ return fmt.Errorf("parse staged traffic-control.cfg: %w", err)
+ }
+
+ desired := make(map[string]proxmoxNotificationSection, len(sections))
+ for _, s := range sections {
+ name := strings.TrimSpace(s.Name)
+ if name == "" {
+ continue
+ }
+ desired[name] = s
+ }
+
+ if strict {
+ out, err := runPBSManager(ctx, "traffic-control", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "name", "id")
+ if err != nil {
+ return fmt.Errorf("parse traffic-control list: %w", err)
+ }
+ for _, name := range current {
+ if _, ok := desired[name]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "traffic-control", "remove", name); err != nil {
+ logger.Warning("PBS API apply: traffic-control remove %s failed (continuing): %v", name, err)
+ }
+ }
+ }
+
+ names := make([]string, 0, len(desired))
+ for name := range desired {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ s := desired[name]
+ flags := buildProxmoxManagerFlags(s.Entries)
+ createArgs := append([]string{"traffic-control", "create", name}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"traffic-control", "update", name}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("traffic-control %s: %v (create) / %v (update)", name, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
+
+func applyPBSNodeCfgViaAPI(ctx context.Context, stageRoot string) error {
+ raw, present, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/node.cfg")
+ if err != nil {
+ return err
+ }
+ if !present {
+ return nil
+ }
+ sections, err := parseProxmoxNotificationSections(raw)
+ if err != nil {
+ return fmt.Errorf("parse staged node.cfg: %w", err)
+ }
+ if len(sections) == 0 {
+ return nil
+ }
+ // node update applies to the local node; use the first section.
+ flags := buildProxmoxManagerFlags(sections[0].Entries)
+ args := append([]string{"node", "update"}, flags...)
+ if _, err := runPBSManager(ctx, args...); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/internal/orchestrator/pbs_notifications_api_apply.go b/internal/orchestrator/pbs_notifications_api_apply.go
new file mode 100644
index 0000000..032debb
--- /dev/null
+++ b/internal/orchestrator/pbs_notifications_api_apply.go
@@ -0,0 +1,243 @@
+package orchestrator
+
+import (
+ "context"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/tis24dev/proxsave/internal/logging"
+)
+
+func applyPBSNotificationsViaAPI(ctx context.Context, logger *logging.Logger, stageRoot string, strict bool) error {
+ cfgRaw, cfgPresent, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/notifications.cfg")
+ if err != nil {
+ return err
+ }
+ if !cfgPresent {
+ return nil
+ }
+ privRaw, _, err := readStageFileOptional(stageRoot, "etc/proxmox-backup/notifications-priv.cfg")
+ if err != nil {
+ return err
+ }
+
+ cfgSections, err := parseProxmoxNotificationSections(cfgRaw)
+ if err != nil {
+ return fmt.Errorf("parse staged notifications.cfg: %w", err)
+ }
+ privSections, err := parseProxmoxNotificationSections(privRaw)
+ if err != nil {
+ return fmt.Errorf("parse staged notifications-priv.cfg: %w", err)
+ }
+
+ privByKey := make(map[string][]proxmoxNotificationEntry)
+ privRedactFlagsByKey := make(map[string][]string)
+ for _, s := range privSections {
+ if strings.TrimSpace(s.Type) == "" || strings.TrimSpace(s.Name) == "" {
+ continue
+ }
+ key := fmt.Sprintf("%s:%s", strings.TrimSpace(s.Type), strings.TrimSpace(s.Name))
+ privByKey[key] = append([]proxmoxNotificationEntry{}, s.Entries...)
+ privRedactFlagsByKey[key] = append([]string(nil), notificationRedactFlagsFromEntries(s.Entries)...)
+ }
+
+ type endpointSection struct {
+ section proxmoxNotificationSection
+ redactFlags []string
+ redactIndex []int
+ positional []string
+ sectionKey string
+ endpointType string
+ }
+
+ var endpoints []endpointSection
+ var matchers []proxmoxNotificationSection
+
+ for _, s := range cfgSections {
+ typ := strings.TrimSpace(s.Type)
+ name := strings.TrimSpace(s.Name)
+ if typ == "" || name == "" {
+ continue
+ }
+ switch typ {
+ case "smtp", "sendmail", "gotify", "webhook":
+ key := fmt.Sprintf("%s:%s", typ, name)
+ if priv, ok := privByKey[key]; ok && len(priv) > 0 {
+ s.Entries = append(s.Entries, priv...)
+ }
+ redactFlags := notificationRedactFlags(s)
+ if extra := privRedactFlagsByKey[key]; len(extra) > 0 {
+ redactFlags = append(redactFlags, extra...)
+ }
+
+ pos := []string{}
+ entries := s.Entries
+
+ switch typ {
+ case "smtp":
+ recipients, remaining, ok := popEntryValue(entries, "recipients", "mailto", "mail-to")
+ if !ok || strings.TrimSpace(recipients) == "" {
+ logger.Warning("PBS notifications API apply: smtp endpoint %s missing recipients; skipping", name)
+ continue
+ }
+ pos = append(pos, recipients)
+ s.Entries = remaining
+ case "sendmail":
+ mailto, remaining, ok := popEntryValue(entries, "mailto", "mail-to", "recipients")
+ if !ok || strings.TrimSpace(mailto) == "" {
+ logger.Warning("PBS notifications API apply: sendmail endpoint %s missing mailto; skipping", name)
+ continue
+ }
+ pos = append(pos, mailto)
+ s.Entries = remaining
+ case "gotify":
+ server, remaining, ok := popEntryValue(entries, "server")
+ if !ok || strings.TrimSpace(server) == "" {
+ logger.Warning("PBS notifications API apply: gotify endpoint %s missing server; skipping", name)
+ continue
+ }
+ token, remaining2, ok := popEntryValue(remaining, "token")
+ if !ok || strings.TrimSpace(token) == "" {
+ logger.Warning("PBS notifications API apply: gotify endpoint %s missing token; skipping", name)
+ continue
+ }
+ pos = append(pos, server, token)
+ s.Entries = remaining2
+ case "webhook":
+ url, remaining, ok := popEntryValue(entries, "url")
+ if !ok || strings.TrimSpace(url) == "" {
+ logger.Warning("PBS notifications API apply: webhook endpoint %s missing url; skipping", name)
+ continue
+ }
+ pos = append(pos, url)
+ s.Entries = remaining
+ }
+
+ redactIndex := []int(nil)
+ if typ == "gotify" {
+ // proxmox-backup-manager notification endpoint gotify create/update
+ redactIndex = []int{6}
+ }
+
+ endpoints = append(endpoints, endpointSection{
+ section: s,
+ redactFlags: redactFlags,
+ redactIndex: redactIndex,
+ positional: pos,
+ sectionKey: key,
+ endpointType: typ,
+ })
+ case "matcher":
+ matchers = append(matchers, s)
+ default:
+ logger.Warning("PBS notifications API apply: unknown section %q (%s); skipping", typ, name)
+ }
+ }
+
+ // In strict mode, remove matchers first so endpoint cleanup isn't blocked by references.
+ desiredMatchers := make(map[string]proxmoxNotificationSection, len(matchers))
+ for _, m := range matchers {
+ name := strings.TrimSpace(m.Name)
+ if name == "" {
+ continue
+ }
+ desiredMatchers[name] = m
+ }
+
+ matcherNames := make([]string, 0, len(desiredMatchers))
+ for name := range desiredMatchers {
+ matcherNames = append(matcherNames, name)
+ }
+ sort.Strings(matcherNames)
+
+ if strict {
+ out, err := runPBSManager(ctx, "notification", "matcher", "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "name", "id")
+ if err != nil {
+ return fmt.Errorf("parse matcher list: %w", err)
+ }
+ for _, name := range current {
+ if _, ok := desiredMatchers[name]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "notification", "matcher", "remove", name); err != nil {
+ // Built-in matchers may not be removable; keep going.
+ logger.Warning("PBS notifications API apply: matcher remove %s failed (continuing): %v", name, err)
+ }
+ }
+ }
+
+ // Endpoints first (matchers refer to targets/endpoints).
+ for _, typ := range []string{"smtp", "sendmail", "gotify", "webhook"} {
+ desiredNames := make(map[string]endpointSection)
+ for _, e := range endpoints {
+ if e.endpointType != typ {
+ continue
+ }
+ name := strings.TrimSpace(e.section.Name)
+ if name == "" {
+ continue
+ }
+ desiredNames[name] = e
+ }
+
+ names := make([]string, 0, len(desiredNames))
+ for name := range desiredNames {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ if strict {
+ out, err := runPBSManager(ctx, "notification", "endpoint", typ, "list", "--output-format=json")
+ if err != nil {
+ return err
+ }
+ current, err := parsePBSListIDs(out, "name", "id")
+ if err != nil {
+ return fmt.Errorf("parse endpoint list (%s): %w", typ, err)
+ }
+ for _, name := range current {
+ if _, ok := desiredNames[name]; ok {
+ continue
+ }
+ if _, err := runPBSManager(ctx, "notification", "endpoint", typ, "remove", name); err != nil {
+ // Built-in endpoints may not be removable; keep going.
+ logger.Warning("PBS notifications API apply: endpoint remove %s:%s failed (continuing): %v", typ, name, err)
+ }
+ }
+ }
+
+ for _, name := range names {
+ e := desiredNames[name]
+ flags := buildProxmoxManagerFlags(e.section.Entries)
+ createArgs := append([]string{"notification", "endpoint", typ, "create", name}, e.positional...)
+ createArgs = append(createArgs, flags...)
+ if _, err := runPBSManagerRedacted(ctx, createArgs, e.redactFlags, e.redactIndex); err != nil {
+ updateArgs := append([]string{"notification", "endpoint", typ, "update", name}, e.positional...)
+ updateArgs = append(updateArgs, flags...)
+ if _, upErr := runPBSManagerRedacted(ctx, updateArgs, e.redactFlags, e.redactIndex); upErr != nil {
+ return fmt.Errorf("endpoint %s:%s: %v (create) / %v (update)", typ, name, err, upErr)
+ }
+ }
+ }
+ }
+
+ // Then matchers.
+ for _, name := range matcherNames {
+ m := desiredMatchers[name]
+ flags := buildProxmoxManagerFlags(m.Entries)
+ createArgs := append([]string{"notification", "matcher", "create", name}, flags...)
+ if _, err := runPBSManager(ctx, createArgs...); err != nil {
+ updateArgs := append([]string{"notification", "matcher", "update", name}, flags...)
+ if _, upErr := runPBSManager(ctx, updateArgs...); upErr != nil {
+ return fmt.Errorf("matcher %s: %v (create) / %v (update)", name, err, upErr)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/internal/orchestrator/pbs_notifications_api_apply_test.go b/internal/orchestrator/pbs_notifications_api_apply_test.go
new file mode 100644
index 0000000..51a0ca7
--- /dev/null
+++ b/internal/orchestrator/pbs_notifications_api_apply_test.go
@@ -0,0 +1,63 @@
+package orchestrator
+
+import (
+ "context"
+ "os"
+ "reflect"
+ "testing"
+
+ "github.com/tis24dev/proxsave/internal/logging"
+ "github.com/tis24dev/proxsave/internal/types"
+)
+
+func TestApplyPBSNotificationsViaAPI_CreatesEndpointAndMatcher(t *testing.T) {
+ origCmd := restoreCmd
+ origFS := restoreFS
+ t.Cleanup(func() {
+ restoreCmd = origCmd
+ restoreFS = origFS
+ })
+
+ fakeFS := NewFakeFS()
+ t.Cleanup(func() { _ = os.RemoveAll(fakeFS.Root) })
+ restoreFS = fakeFS
+
+ stageRoot := "/stage"
+
+ if err := fakeFS.WriteFile(stageRoot+"/etc/proxmox-backup/notifications.cfg", []byte(
+ "smtp: Gmail-relay\n"+
+ " recipients user@example.com\n"+
+ " from-address pbs@example.com\n"+
+ " server smtp.gmail.com\n"+
+ " port 587\n"+
+ " username user\n"+
+ "\n"+
+ "matcher: default-matcher\n"+
+ " target Gmail-relay\n",
+ ), 0o640); err != nil {
+ t.Fatalf("write staged notifications.cfg: %v", err)
+ }
+ if err := fakeFS.WriteFile(stageRoot+"/etc/proxmox-backup/notifications-priv.cfg", []byte(
+ "smtp: Gmail-relay\n"+
+ " password secret123\n",
+ ), 0o600); err != nil {
+ t.Fatalf("write staged notifications-priv.cfg: %v", err)
+ }
+
+ runner := &fakeCommandRunner{}
+ restoreCmd = runner
+
+ logger := logging.New(types.LogLevelDebug, false)
+ if err := applyPBSNotificationsViaAPI(context.Background(), logger, stageRoot, false); err != nil {
+ t.Fatalf("applyPBSNotificationsViaAPI error: %v", err)
+ }
+
+ want := []string{
+ "proxmox-backup-manager notification endpoint smtp create Gmail-relay user@example.com --from-address pbs@example.com --server smtp.gmail.com --port 587 --username user --password secret123",
+ "proxmox-backup-manager notification matcher create default-matcher --target Gmail-relay",
+ }
+ if !reflect.DeepEqual(runner.calls, want) {
+ t.Fatalf("calls=%v want %v", runner.calls, want)
+ }
+}
+
diff --git a/internal/orchestrator/pbs_restore_behavior.go b/internal/orchestrator/pbs_restore_behavior.go
new file mode 100644
index 0000000..a55b57d
--- /dev/null
+++ b/internal/orchestrator/pbs_restore_behavior.go
@@ -0,0 +1,33 @@
+package orchestrator
+
+// PBSRestoreBehavior controls how PBS objects are reconciled during staged apply.
+// It is intentionally chosen at restore time (UI), not via backup.env.
+type PBSRestoreBehavior int
+
+const (
+ PBSRestoreBehaviorUnspecified PBSRestoreBehavior = iota
+ PBSRestoreBehaviorMerge
+ PBSRestoreBehaviorClean
+)
+
+func (b PBSRestoreBehavior) String() string {
+ switch b {
+ case PBSRestoreBehaviorMerge:
+ return "merge"
+ case PBSRestoreBehaviorClean:
+ return "clean-1to1"
+ default:
+ return "unspecified"
+ }
+}
+
+func (b PBSRestoreBehavior) DisplayName() string {
+ switch b {
+ case PBSRestoreBehaviorMerge:
+ return "Merge (existing PBS)"
+ case PBSRestoreBehaviorClean:
+ return "Clean 1:1 (fresh PBS install)"
+ default:
+ return "Unspecified"
+ }
+}
diff --git a/internal/orchestrator/pbs_staged_apply.go b/internal/orchestrator/pbs_staged_apply.go
index 3111daa..be72037 100644
--- a/internal/orchestrator/pbs_staged_apply.go
+++ b/internal/orchestrator/pbs_staged_apply.go
@@ -2,6 +2,7 @@ package orchestrator
import (
"context"
+ "encoding/json"
"errors"
"fmt"
"os"
@@ -39,34 +40,151 @@ func maybeApplyPBSConfigsFromStage(ctx context.Context, logger *logging.Logger,
return nil
}
- if plan.HasCategoryID("datastore_pbs") {
- if err := applyPBSS3CfgFromStage(ctx, logger, stageRoot); err != nil {
- logger.Warning("PBS staged apply: s3.cfg: %v", err)
+ behavior := plan.PBSRestoreBehavior
+ strict := behavior == PBSRestoreBehaviorClean
+ allowFileFallback := behavior == PBSRestoreBehaviorClean
+
+ needsAPI := plan.HasCategoryID("pbs_host") || plan.HasCategoryID("datastore_pbs") || plan.HasCategoryID("pbs_remotes") || plan.HasCategoryID("pbs_jobs")
+ apiAvailable := false
+ if needsAPI {
+ if err := ensurePBSServicesForAPI(ctx, logger); err != nil {
+ if allowFileFallback {
+ logger.Warning("PBS API apply unavailable; falling back to file-based staged apply where possible: %v", err)
+ } else {
+ logger.Warning("PBS API apply unavailable; skipping API-applied PBS categories (merge mode): %v", err)
+ }
+ } else {
+ apiAvailable = true
}
- if err := applyPBSDatastoreCfgFromStage(ctx, logger, stageRoot); err != nil {
- logger.Warning("PBS staged apply: datastore.cfg: %v", err)
+ }
+
+ if plan.HasCategoryID("pbs_host") {
+ // Always restore file-only configs (no stable API coverage yet).
+ // ACME should be applied before node config (node.cfg references ACME accounts/plugins).
+ for _, rel := range []string{
+ "etc/proxmox-backup/acme/accounts.cfg",
+ "etc/proxmox-backup/acme/plugins.cfg",
+ "etc/proxmox-backup/metricserver.cfg",
+ "etc/proxmox-backup/proxy.cfg",
+ } {
+ if err := applyPBSConfigFileFromStage(ctx, logger, stageRoot, rel); err != nil {
+ logger.Warning("PBS staged apply: %s: %v", rel, err)
+ }
+ }
+
+ if apiAvailable {
+ if err := applyPBSTrafficControlCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: traffic-control failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based traffic-control.cfg")
+ _ = applyPBSConfigFileFromStage(ctx, logger, stageRoot, "etc/proxmox-backup/traffic-control.cfg")
+ }
+ }
+ if err := applyPBSNodeCfgViaAPI(ctx, stageRoot); err != nil {
+ logger.Warning("PBS API apply: node config failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based node.cfg")
+ _ = applyPBSConfigFileFromStage(ctx, logger, stageRoot, "etc/proxmox-backup/node.cfg")
+ }
+ }
+ } else if allowFileFallback {
+ for _, rel := range []string{
+ "etc/proxmox-backup/traffic-control.cfg",
+ "etc/proxmox-backup/node.cfg",
+ } {
+ if err := applyPBSConfigFileFromStage(ctx, logger, stageRoot, rel); err != nil {
+ logger.Warning("PBS staged apply: %s: %v", rel, err)
+ }
+ }
+ } else {
+ logging.DebugStep(logger, "pbs staged apply", "Skipping node.cfg/traffic-control.cfg: merge mode requires PBS API apply")
}
}
- if plan.HasCategoryID("pbs_jobs") {
- if err := applyPBSJobConfigsFromStage(ctx, logger, stageRoot); err != nil {
- logger.Warning("PBS staged apply: job configs: %v", err)
+
+ if plan.HasCategoryID("datastore_pbs") {
+ if apiAvailable {
+ if err := applyPBSS3CfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: s3.cfg failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based s3.cfg")
+ _ = applyPBSS3CfgFromStage(ctx, logger, stageRoot)
+ }
+ }
+ if err := applyPBSDatastoreCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: datastore.cfg failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based datastore.cfg")
+ _ = applyPBSDatastoreCfgFromStage(ctx, logger, stageRoot)
+ }
+ }
+ } else if allowFileFallback {
+ if err := applyPBSS3CfgFromStage(ctx, logger, stageRoot); err != nil {
+ logger.Warning("PBS staged apply: s3.cfg: %v", err)
+ }
+ if err := applyPBSDatastoreCfgFromStage(ctx, logger, stageRoot); err != nil {
+ logger.Warning("PBS staged apply: datastore.cfg: %v", err)
+ }
+ } else {
+ logging.DebugStep(logger, "pbs staged apply", "Skipping datastore.cfg/s3.cfg: merge mode requires PBS API apply")
}
}
+
if plan.HasCategoryID("pbs_remotes") {
- if err := applyPBSRemoteCfgFromStage(ctx, logger, stageRoot); err != nil {
- logger.Warning("PBS staged apply: remote.cfg: %v", err)
+ if apiAvailable {
+ if err := applyPBSRemoteCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: remote.cfg failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based remote.cfg")
+ _ = applyPBSRemoteCfgFromStage(ctx, logger, stageRoot)
+ }
+ }
+ } else if allowFileFallback {
+ if err := applyPBSRemoteCfgFromStage(ctx, logger, stageRoot); err != nil {
+ logger.Warning("PBS staged apply: remote.cfg: %v", err)
+ }
+ } else {
+ logging.DebugStep(logger, "pbs staged apply", "Skipping remote.cfg: merge mode requires PBS API apply")
}
}
- if plan.HasCategoryID("pbs_host") {
- if err := applyPBSHostConfigsFromStage(ctx, logger, stageRoot); err != nil {
- logger.Warning("PBS staged apply: host configs: %v", err)
+
+ if plan.HasCategoryID("pbs_jobs") {
+ if apiAvailable {
+ if err := applyPBSSyncCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: sync jobs failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based job configs")
+ _ = applyPBSJobConfigsFromStage(ctx, logger, stageRoot)
+ }
+ }
+ if err := applyPBSVerificationCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: verification jobs failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based job configs")
+ _ = applyPBSJobConfigsFromStage(ctx, logger, stageRoot)
+ }
+ }
+ if err := applyPBSPruneCfgViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ logger.Warning("PBS API apply: prune jobs failed: %v", err)
+ if allowFileFallback {
+ logger.Warning("PBS staged apply: falling back to file-based job configs")
+ _ = applyPBSJobConfigsFromStage(ctx, logger, stageRoot)
+ }
+ }
+ } else if allowFileFallback {
+ if err := applyPBSJobConfigsFromStage(ctx, logger, stageRoot); err != nil {
+ logger.Warning("PBS staged apply: job configs: %v", err)
+ }
+ } else {
+ logging.DebugStep(logger, "pbs staged apply", "Skipping sync/verification/prune configs: merge mode requires PBS API apply")
}
}
+
if plan.HasCategoryID("pbs_tape") {
if err := applyPBSTapeConfigsFromStage(ctx, logger, stageRoot); err != nil {
logger.Warning("PBS staged apply: tape configs: %v", err)
}
}
+
return nil
}
@@ -84,27 +202,6 @@ func applyPBSS3CfgFromStage(ctx context.Context, logger *logging.Logger, stageRo
return applyPBSConfigFileFromStage(ctx, logger, stageRoot, "etc/proxmox-backup/s3.cfg")
}
-func applyPBSHostConfigsFromStage(ctx context.Context, logger *logging.Logger, stageRoot string) (err error) {
- done := logging.DebugStart(logger, "pbs staged apply host configs", "stage=%s", stageRoot)
- defer func() { done(err) }()
-
- // ACME should be applied before node.cfg (node.cfg references ACME account/plugins).
- paths := []string{
- "etc/proxmox-backup/acme/accounts.cfg",
- "etc/proxmox-backup/acme/plugins.cfg",
- "etc/proxmox-backup/metricserver.cfg",
- "etc/proxmox-backup/traffic-control.cfg",
- "etc/proxmox-backup/proxy.cfg",
- "etc/proxmox-backup/node.cfg",
- }
- for _, rel := range paths {
- if err := applyPBSConfigFileFromStage(ctx, logger, stageRoot, rel); err != nil {
- logger.Warning("PBS staged apply: %s: %v", rel, err)
- }
- }
- return nil
-}
-
func applyPBSTapeConfigsFromStage(ctx context.Context, logger *logging.Logger, stageRoot string) (err error) {
done := logging.DebugStart(logger, "pbs staged apply tape configs", "stage=%s", stageRoot)
defer func() { done(err) }()
@@ -170,6 +267,32 @@ func applyPBSDatastoreCfgFromStage(ctx context.Context, logger *logging.Logger,
return nil
}
+ if reason := detectPBSDatastoreCfgDuplicateKeys(blocks); reason != "" {
+ logger.Warning("PBS staged apply: staged datastore.cfg looks invalid (%s); attempting recovery from pbs_datastore_inventory.json", reason)
+ if recovered, src, recErr := loadPBSDatastoreCfgFromInventory(stageRoot); recErr != nil {
+ logger.Warning("PBS staged apply: unable to recover datastore.cfg from inventory (%v); leaving current configuration unchanged", recErr)
+ return nil
+ } else if strings.TrimSpace(recovered) == "" {
+ logger.Warning("PBS staged apply: recovered datastore.cfg from %s is empty; leaving current configuration unchanged", src)
+ return nil
+ } else {
+ normalized, fixed = normalizePBSDatastoreCfgContent(recovered)
+ if fixed > 0 {
+ logger.Warning("PBS staged apply: recovered datastore.cfg normalization fixed %d malformed line(s) (properties must be indented)", fixed)
+ }
+ blocks, err = parsePBSDatastoreCfgBlocks(normalized)
+ if err != nil {
+ logger.Warning("PBS staged apply: recovered datastore.cfg from %s is still invalid (%v); leaving current configuration unchanged", src, err)
+ return nil
+ }
+ if reason := detectPBSDatastoreCfgDuplicateKeys(blocks); reason != "" {
+ logger.Warning("PBS staged apply: recovered datastore.cfg from %s still looks invalid (%s); leaving current configuration unchanged", src, reason)
+ return nil
+ }
+ logger.Info("PBS staged apply: datastore.cfg recovered from %s", src)
+ }
+ }
+
var applyBlocks []pbsDatastoreBlock
var deferred []pbsDatastoreBlock
for _, b := range blocks {
@@ -213,6 +336,90 @@ func applyPBSDatastoreCfgFromStage(ctx context.Context, logger *logging.Logger,
return nil
}
+type pbsDatastoreInventoryRestoreLite struct {
+ Files map[string]struct {
+ Content string `json:"content"`
+ } `json:"files"`
+ Datastores []struct {
+ Name string `json:"name"`
+ Path string `json:"path"`
+ Comment string `json:"comment"`
+ } `json:"datastores"`
+}
+
+func loadPBSDatastoreCfgFromInventory(stageRoot string) (string, string, error) {
+ inventoryPath := filepath.Join(stageRoot, "var/lib/proxsave-info/commands/pbs/pbs_datastore_inventory.json")
+ raw, err := restoreFS.ReadFile(inventoryPath)
+ if err != nil {
+ return "", "", fmt.Errorf("read inventory %s: %w", inventoryPath, err)
+ }
+ trimmed := strings.TrimSpace(string(raw))
+ if trimmed == "" {
+ return "", "", fmt.Errorf("inventory %s is empty", inventoryPath)
+ }
+
+ var report pbsDatastoreInventoryRestoreLite
+ if err := json.Unmarshal([]byte(trimmed), &report); err != nil {
+ return "", "", fmt.Errorf("parse inventory %s: %w", inventoryPath, err)
+ }
+
+ if report.Files != nil {
+ if snap := strings.TrimSpace(report.Files["pbs_datastore_cfg"].Content); snap != "" {
+ return report.Files["pbs_datastore_cfg"].Content, "pbs_datastore_inventory.json.files[pbs_datastore_cfg].content", nil
+ }
+ }
+
+ // Fallback: generate a minimal datastore.cfg from the inventory's datastore list.
+ var out strings.Builder
+ for _, ds := range report.Datastores {
+ name := strings.TrimSpace(ds.Name)
+ path := strings.TrimSpace(ds.Path)
+ if name == "" || path == "" {
+ continue
+ }
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.WriteString(fmt.Sprintf("datastore: %s\n", name))
+ if comment := strings.TrimSpace(ds.Comment); comment != "" {
+ out.WriteString(fmt.Sprintf(" comment %s\n", comment))
+ }
+ out.WriteString(fmt.Sprintf(" path %s\n", path))
+ }
+
+ generated := strings.TrimSpace(out.String())
+ if generated == "" {
+ return "", "", fmt.Errorf("inventory %s contains no usable datastore definitions", inventoryPath)
+ }
+ return out.String(), "pbs_datastore_inventory.json.datastores", nil
+}
+
+func detectPBSDatastoreCfgDuplicateKeys(blocks []pbsDatastoreBlock) string {
+ for _, block := range blocks {
+ seen := map[string]int{}
+ for _, line := range block.Lines {
+ trimmed := strings.TrimSpace(line)
+ if trimmed == "" || strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, "datastore:") {
+ continue
+ }
+
+ fields := strings.Fields(trimmed)
+ if len(fields) == 0 {
+ continue
+ }
+ key := strings.TrimSpace(fields[0])
+ if key == "" {
+ continue
+ }
+ seen[key]++
+ if seen[key] > 1 {
+ return fmt.Sprintf("datastore %s has duplicate key %q", strings.TrimSpace(block.Name), key)
+ }
+ }
+ }
+ return ""
+}
+
func parsePBSDatastoreCfgBlocks(content string) ([]pbsDatastoreBlock, error) {
var blocks []pbsDatastoreBlock
var current *pbsDatastoreBlock
@@ -293,7 +500,7 @@ func shouldApplyPBSDatastoreBlock(block pbsDatastoreBlock, logger *logging.Logge
}
if hasData {
- if warn := validatePBSDatastoreReadOnly(path, logger); warn != "" {
+ if warn := validatePBSDatastoreReadOnly(path); warn != "" && logger != nil {
logger.Warning("PBS datastore preflight: %s", warn)
}
return true, ""
diff --git a/internal/orchestrator/pbs_staged_apply_test.go b/internal/orchestrator/pbs_staged_apply_test.go
index ecb7abb..0ee7ab7 100644
--- a/internal/orchestrator/pbs_staged_apply_test.go
+++ b/internal/orchestrator/pbs_staged_apply_test.go
@@ -3,6 +3,7 @@ package orchestrator
import (
"context"
"os"
+ "strings"
"testing"
)
@@ -76,3 +77,71 @@ func TestShouldApplyPBSDatastoreBlock_AllowsMountLikePathsOnRootFS(t *testing.T)
t.Fatalf("expected datastore block to be applied, got ok=false reason=%q", reason)
}
}
+
+func TestApplyPBSDatastoreCfgFromStage_RecoversFromInventoryWhenFlattened(t *testing.T) {
+ origFS := restoreFS
+ t.Cleanup(func() { restoreFS = origFS })
+
+ fakeFS := NewFakeFS()
+ t.Cleanup(func() { _ = os.RemoveAll(fakeFS.Root) })
+ restoreFS = fakeFS
+
+ stageRoot := "/stage"
+
+ // This is a representative "flattened" datastore.cfg produced by an unsafe prefilter
+ // (headers separated from their respective properties).
+ staged := strings.Join([]string{
+ "comment Local ext4 disk datastore",
+ "comment Synology NFS sync target",
+ "datastore: Data1",
+ "datastore: Synology-Archive",
+ "gc-schedule 05:00",
+ "gc-schedule 06:30",
+ "notification-mode notification-system",
+ "notification-mode notification-system",
+ "path /mnt/Synology_NFS/PBS_Backup",
+ "path /mnt/datastore/Data1",
+ "",
+ }, "\n")
+ if err := fakeFS.WriteFile(stageRoot+"/etc/proxmox-backup/datastore.cfg", []byte(staged), 0o640); err != nil {
+ t.Fatalf("write staged datastore.cfg: %v", err)
+ }
+
+ // Inventory contains a verbatim snapshot of the original datastore.cfg, which should be preferred.
+ inventory := `{"files":{"pbs_datastore_cfg":{"content":"datastore: Synology-Archive\n comment Synology NFS sync target\n gc-schedule 05:00\n notification-mode notification-system\n path /mnt/Synology_NFS/PBS_Backup\n\ndatastore: Data1\n comment Local ext4 disk datastore\n gc-schedule 06:30\n notification-mode notification-system\n path /mnt/datastore/Data1\n"}}}`
+ if err := fakeFS.WriteFile(stageRoot+"/var/lib/proxsave-info/commands/pbs/pbs_datastore_inventory.json", []byte(inventory), 0o640); err != nil {
+ t.Fatalf("write inventory: %v", err)
+ }
+
+ if err := applyPBSDatastoreCfgFromStage(context.Background(), newTestLogger(), stageRoot); err != nil {
+ t.Fatalf("applyPBSDatastoreCfgFromStage error: %v", err)
+ }
+
+ out, err := fakeFS.ReadFile("/etc/proxmox-backup/datastore.cfg")
+ if err != nil {
+ t.Fatalf("read restored datastore.cfg: %v", err)
+ }
+
+ blocks, err := parsePBSDatastoreCfgBlocks(string(out))
+ if err != nil {
+ t.Fatalf("parse restored datastore.cfg: %v", err)
+ }
+ if len(blocks) != 2 {
+ t.Fatalf("expected 2 datastore blocks, got %d", len(blocks))
+ }
+ if reason := detectPBSDatastoreCfgDuplicateKeys(blocks); reason != "" {
+ t.Fatalf("restored datastore.cfg still has duplicate keys: %s", reason)
+ }
+
+ // Verify the expected datastore paths are preserved.
+ paths := map[string]string{}
+ for _, b := range blocks {
+ paths[b.Name] = b.Path
+ }
+ if paths["Synology-Archive"] != "/mnt/Synology_NFS/PBS_Backup" {
+ t.Fatalf("Synology-Archive path=%q", paths["Synology-Archive"])
+ }
+ if paths["Data1"] != "/mnt/datastore/Data1" {
+ t.Fatalf("Data1 path=%q", paths["Data1"])
+ }
+}
diff --git a/internal/orchestrator/restore.go b/internal/orchestrator/restore.go
index 0369703..2f14d20 100644
--- a/internal/orchestrator/restore.go
+++ b/internal/orchestrator/restore.go
@@ -26,16 +26,15 @@ import (
var ErrRestoreAborted = errors.New("restore workflow aborted by user")
var (
- serviceStopTimeout = 45 * time.Second
- serviceStopNoBlockTimeout = 15 * time.Second
- serviceStartTimeout = 30 * time.Second
- serviceVerifyTimeout = 30 * time.Second
- serviceStatusCheckTimeout = 5 * time.Second
- servicePollInterval = 500 * time.Millisecond
- serviceRetryDelay = 500 * time.Millisecond
- restoreLogSequence uint64
- restoreGlob = filepath.Glob
- prepareDecryptedBackupFunc = prepareDecryptedBackup
+ serviceStopTimeout = 45 * time.Second
+ serviceStopNoBlockTimeout = 15 * time.Second
+ serviceStartTimeout = 30 * time.Second
+ serviceVerifyTimeout = 30 * time.Second
+ serviceStatusCheckTimeout = 5 * time.Second
+ servicePollInterval = 500 * time.Millisecond
+ serviceRetryDelay = 500 * time.Millisecond
+ restoreLogSequence uint64
+ restoreGlob = filepath.Glob
)
// RestoreAbortInfo contains information about an aborted restore with network rollback.
@@ -763,8 +762,17 @@ func confirmRestoreAction(ctx context.Context, reader *bufio.Reader, cand *decry
manifest := cand.Manifest
fmt.Println()
fmt.Printf("Selected backup: %s (%s)\n", cand.DisplayBase, manifest.CreatedAt.Format("2006-01-02 15:04:05"))
- fmt.Println("Restore destination: / (system root; original paths will be preserved)")
- fmt.Println("WARNING: This operation will overwrite configuration files on this system.")
+ cleanDest := filepath.Clean(strings.TrimSpace(dest))
+ if cleanDest == "" || cleanDest == "." {
+ cleanDest = string(os.PathSeparator)
+ }
+ if cleanDest == string(os.PathSeparator) {
+ fmt.Println("Restore destination: / (system root; original paths will be preserved)")
+ fmt.Println("WARNING: This operation will overwrite configuration files on this system.")
+ } else {
+ fmt.Printf("Restore destination: %s (original paths will be preserved under this directory)\n", cleanDest)
+ fmt.Printf("WARNING: This operation will overwrite existing files under %s.\n", cleanDest)
+ }
fmt.Println("Type RESTORE to proceed or 0 to cancel.")
for {
@@ -978,7 +986,7 @@ func applyVMConfigs(ctx context.Context, entries []vmEntry, logger *logging.Logg
logger.Warning("VM apply aborted: %v", err)
return applied, failed
}
- target := fmt.Sprintf("/nodes/%s/%s/%s/config", detectNodeForVM(vm), vm.Kind, vm.VMID)
+ target := fmt.Sprintf("/nodes/%s/%s/%s/config", detectNodeForVM(), vm.Kind, vm.VMID)
args := []string{"set", target, "--filename", vm.Path}
if err := runPvesh(ctx, logger, args); err != nil {
logger.Warning("Failed to apply %s (vmid=%s kind=%s): %v", target, vm.VMID, vm.Kind, err)
@@ -995,7 +1003,7 @@ func applyVMConfigs(ctx context.Context, entries []vmEntry, logger *logging.Logg
return applied, failed
}
-func detectNodeForVM(vm vmEntry) string {
+func detectNodeForVM() string {
host, _ := os.Hostname()
host = shortHost(host)
if host != "" {
@@ -1564,7 +1572,7 @@ func extractTarEntry(tarReader *tar.Reader, header *tar.Header, destRoot string,
case tar.TypeSymlink:
return extractSymlink(target, header, cleanDestRoot, logger)
case tar.TypeLink:
- return extractHardlink(target, header, cleanDestRoot, logger)
+ return extractHardlink(target, header, cleanDestRoot)
default:
logger.Debug("Skipping unsupported file type %d: %s", header.Typeflag, header.Name)
return nil
@@ -1715,7 +1723,7 @@ func extractSymlink(target string, header *tar.Header, destRoot string, logger *
}
// extractHardlink creates a hard link
-func extractHardlink(target string, header *tar.Header, destRoot string, logger *logging.Logger) error {
+func extractHardlink(target string, header *tar.Header, destRoot string) error {
// Validate hard link target
linkName := header.Linkname
diff --git a/internal/orchestrator/restore_access_control.go b/internal/orchestrator/restore_access_control.go
index efdda05..118168f 100644
--- a/internal/orchestrator/restore_access_control.go
+++ b/internal/orchestrator/restore_access_control.go
@@ -91,6 +91,13 @@ func maybeApplyAccessControlFromStage(ctx context.Context, logger *logging.Logge
}
func applyPBSAccessControlFromStage(ctx context.Context, logger *logging.Logger, stageRoot string) (err error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
done := logging.DebugStart(logger, "pbs access control apply", "stage=%s", stageRoot)
defer func() { done(err) }()
@@ -283,17 +290,17 @@ func applyPBSACLFromStage(logger *logging.Logger, stagedACL string) error {
// - header-style (section + indented keys)
// - colon-delimited line format (acl::::)
if pbsConfigHasHeader(raw) {
- return applyPBSACLSectionFormat(logger, raw)
+ return applyPBSACLSectionFormat(raw)
}
if isPBSACLLineFormat(raw) {
- return applyPBSACLLineFormat(logger, raw)
+ return applyPBSACLLineFormat(raw)
}
logger.Warning("PBS access control: staged acl.cfg has unknown format; skipping apply")
return nil
}
-func applyPBSACLSectionFormat(logger *logging.Logger, raw string) error {
+func applyPBSACLSectionFormat(raw string) error {
backupSections, err := parseProxmoxNotificationSections(raw)
if err != nil {
return fmt.Errorf("parse staged acl.cfg: %w", err)
@@ -369,7 +376,7 @@ func parsePBSACLLine(line string) (pbsACLLine, bool) {
}, true
}
-func applyPBSACLLineFormat(logger *logging.Logger, raw string) error {
+func applyPBSACLLineFormat(raw string) error {
var outLines []string
var hasRootAdmin bool
@@ -667,6 +674,13 @@ func mustMarshalRaw(v any) json.RawMessage {
}
func applyPVEAccessControlFromStage(ctx context.Context, logger *logging.Logger, stageRoot string) (err error) {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
done := logging.DebugStart(logger, "pve access control apply", "stage=%s", stageRoot)
defer func() { done(err) }()
diff --git a/internal/orchestrator/restore_errors_test.go b/internal/orchestrator/restore_errors_test.go
index d324e4c..cad33a8 100644
--- a/internal/orchestrator/restore_errors_test.go
+++ b/internal/orchestrator/restore_errors_test.go
@@ -1011,8 +1011,7 @@ func TestExtractHardlink_AbsoluteTargetRejectedError(t *testing.T) {
Typeflag: tar.TypeLink,
}
- logger := logging.New(logging.GetDefaultLogger().GetLevel(), false)
- err := extractHardlink("/tmp/link", header, "/tmp", logger)
+ err := extractHardlink("/tmp/link", header, "/tmp")
if err == nil || !strings.Contains(err.Error(), "absolute hardlink target not allowed") {
t.Fatalf("expected absolute target error, got: %v", err)
}
@@ -1043,8 +1042,7 @@ func TestExtractHardlink_LinkCreationFails(t *testing.T) {
}
linkPath := filepath.Join(fakeFS.Root, "link")
- logger := logging.New(logging.GetDefaultLogger().GetLevel(), false)
- err := extractHardlink(linkPath, header, fakeFS.Root, logger)
+ err := extractHardlink(linkPath, header, fakeFS.Root)
if err == nil || !strings.Contains(err.Error(), "hardlink") {
t.Fatalf("expected link creation error, got: %v", err)
}
diff --git a/internal/orchestrator/restore_firewall.go b/internal/orchestrator/restore_firewall.go
index 9e655c1..64c7419 100644
--- a/internal/orchestrator/restore_firewall.go
+++ b/internal/orchestrator/restore_firewall.go
@@ -233,7 +233,7 @@ func maybeApplyPVEFirewallWithUI(
return nil
}
- if err := restartPVEFirewallService(ctx, logger); err != nil {
+ if err := restartPVEFirewallService(ctx); err != nil {
logger.Warning("PVE firewall restore: reload/restart failed: %v", err)
}
@@ -300,7 +300,7 @@ func applyPVEFirewallFromStage(logger *logging.Logger, stageRoot string) (applie
applied = append(applied, destFirewall)
}
}
- } else if err != nil && !errors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
return applied, fmt.Errorf("stat staged firewall config %s: %w", stageFirewall, err)
}
@@ -382,7 +382,7 @@ func selectStageHostFirewall(logger *logging.Logger, stageRoot string) (path str
return "", "", false, nil
}
-func restartPVEFirewallService(ctx context.Context, logger *logging.Logger) error {
+func restartPVEFirewallService(ctx context.Context) error {
timeoutCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
diff --git a/internal/orchestrator/restore_ha.go b/internal/orchestrator/restore_ha.go
index 5341941..d69db66 100644
--- a/internal/orchestrator/restore_ha.go
+++ b/internal/orchestrator/restore_ha.go
@@ -278,7 +278,7 @@ func stageHasPVEHAConfig(stageRoot string) (bool, error) {
for _, candidate := range candidates {
if _, err := restoreFS.Stat(candidate); err == nil {
return true, nil
- } else if err != nil && !errors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
return false, fmt.Errorf("stat %s: %w", candidate, err)
}
}
diff --git a/internal/orchestrator/restore_notifications.go b/internal/orchestrator/restore_notifications.go
index 6181c09..3774a5d 100644
--- a/internal/orchestrator/restore_notifications.go
+++ b/internal/orchestrator/restore_notifications.go
@@ -56,7 +56,29 @@ func maybeApplyNotificationsFromStage(ctx context.Context, logger *logging.Logge
if !plan.HasCategoryID("pbs_notifications") {
return nil
}
- return applyPBSNotificationsFromStage(ctx, logger, stageRoot)
+ behavior := plan.PBSRestoreBehavior
+ strict := behavior == PBSRestoreBehaviorClean
+ allowFileFallback := behavior == PBSRestoreBehaviorClean
+
+ if err := ensurePBSServicesForAPI(ctx, logger); err != nil {
+ if allowFileFallback {
+ logger.Warning("PBS notifications API apply unavailable; falling back to file-based apply: %v", err)
+ return applyPBSNotificationsFromStage(ctx, logger, stageRoot)
+ }
+ logger.Warning("PBS notifications API apply unavailable; skipping apply (merge mode): %v", err)
+ return nil
+ }
+
+ if err := applyPBSNotificationsViaAPI(ctx, logger, stageRoot, strict); err != nil {
+ if allowFileFallback {
+ logger.Warning("PBS notifications API apply failed; falling back to file-based apply: %v", err)
+ return applyPBSNotificationsFromStage(ctx, logger, stageRoot)
+ }
+ logger.Warning("PBS notifications API apply failed; skipping apply (merge mode): %v", err)
+ return nil
+ }
+ logger.Info("PBS notifications applied via API (%s)", behavior.DisplayName())
+ return nil
case SystemTypePVE:
if !plan.HasCategoryID("pve_notifications") {
return nil
diff --git a/internal/orchestrator/restore_plan.go b/internal/orchestrator/restore_plan.go
index 3c88564..6d54716 100644
--- a/internal/orchestrator/restore_plan.go
+++ b/internal/orchestrator/restore_plan.go
@@ -13,6 +13,7 @@ type RestorePlan struct {
NormalCategories []Category
StagedCategories []Category
ExportCategories []Category
+ PBSRestoreBehavior PBSRestoreBehavior
ClusterBackup bool
ClusterSafeMode bool
NeedsClusterRestore bool
@@ -80,4 +81,3 @@ func (p *RestorePlan) HasCategoryID(id string) bool {
}
return hasCategoryID(p.NormalCategories, id) || hasCategoryID(p.StagedCategories, id) || hasCategoryID(p.ExportCategories, id)
}
-
diff --git a/internal/orchestrator/restore_sdn.go b/internal/orchestrator/restore_sdn.go
index 5cd81bf..01c9c59 100644
--- a/internal/orchestrator/restore_sdn.go
+++ b/internal/orchestrator/restore_sdn.go
@@ -12,6 +12,8 @@ import (
)
func maybeApplyPVESDNFromStage(ctx context.Context, logger *logging.Logger, plan *RestorePlan, stageRoot string, dryRun bool) (err error) {
+ _ = ctx // reserved for future timeouts/cancellation hooks
+
if plan == nil || plan.SystemType != SystemTypePVE || !plan.HasCategoryID("pve_sdn") {
return nil
}
@@ -94,7 +96,7 @@ func applyPVESDNFromStage(logger *logging.Logger, stageRoot string) (applied []s
applied = append(applied, destSDN)
}
}
- } else if err != nil && !errors.Is(err, os.ErrNotExist) {
+ } else if !errors.Is(err, os.ErrNotExist) {
return applied, fmt.Errorf("stat staged sdn %s: %w", stageSDN, err)
}
diff --git a/internal/orchestrator/restore_test.go b/internal/orchestrator/restore_test.go
index c9d008d..80a1f10 100644
--- a/internal/orchestrator/restore_test.go
+++ b/internal/orchestrator/restore_test.go
@@ -512,28 +512,26 @@ func TestExtractDirectory_Success(t *testing.T) {
// --------------------------------------------------------------------------
func TestExtractHardlink_AbsoluteTargetRejected(t *testing.T) {
- logger := logging.New(types.LogLevelDebug, false)
header := &tar.Header{
Name: "link",
Linkname: "/absolute/path",
Typeflag: tar.TypeLink,
}
- err := extractHardlink("/tmp/dest", header, "/tmp/dest", logger)
+ err := extractHardlink("/tmp/dest", header, "/tmp/dest")
if err == nil || !strings.Contains(err.Error(), "absolute hardlink target not allowed") {
t.Fatalf("expected absolute target error, got: %v", err)
}
}
func TestExtractHardlink_EscapesRoot(t *testing.T) {
- logger := logging.New(types.LogLevelDebug, false)
header := &tar.Header{
Name: "link",
Linkname: "../../../etc/passwd",
Typeflag: tar.TypeLink,
}
- err := extractHardlink("/tmp/dest/link", header, "/tmp/dest", logger)
+ err := extractHardlink("/tmp/dest/link", header, "/tmp/dest")
if err == nil || !strings.Contains(err.Error(), "escapes root") {
t.Fatalf("expected escape error, got: %v", err)
}
@@ -544,7 +542,6 @@ func TestExtractHardlink_Success(t *testing.T) {
t.Cleanup(func() { restoreFS = orig })
restoreFS = osFS{}
- logger := logging.New(types.LogLevelDebug, false)
destRoot := t.TempDir()
originalFile := filepath.Join(destRoot, "original.txt")
linkFile := filepath.Join(destRoot, "link.txt")
@@ -559,7 +556,7 @@ func TestExtractHardlink_Success(t *testing.T) {
Typeflag: tar.TypeLink,
}
- if err := extractHardlink(linkFile, header, destRoot, logger); err != nil {
+ if err := extractHardlink(linkFile, header, destRoot); err != nil {
t.Fatalf("extractHardlink failed: %v", err)
}
@@ -1002,10 +999,7 @@ func TestReadVMName_FileNotFound(t *testing.T) {
// --------------------------------------------------------------------------
func TestDetectNodeForVM_ReturnsHostname(t *testing.T) {
- entry := vmEntry{
- Path: "/export/etc/pve/nodes/node1/qemu-server/100.conf",
- }
- node := detectNodeForVM(entry)
+ node := detectNodeForVM()
// detectNodeForVM returns the current hostname, not the node from path
if node == "" {
t.Fatalf("expected non-empty node from hostname")
diff --git a/internal/orchestrator/restore_tui.go b/internal/orchestrator/restore_tui.go
index be03115..8b41afd 100644
--- a/internal/orchestrator/restore_tui.go
+++ b/internal/orchestrator/restore_tui.go
@@ -143,6 +143,90 @@ func selectRestoreModeTUI(systemType SystemType, configPath, buildSig, backupSum
return selected, nil
}
+func selectPBSRestoreBehaviorTUI(configPath, buildSig, backupSummary string) (PBSRestoreBehavior, error) {
+ app := newTUIApp()
+ var selected PBSRestoreBehavior
+ var aborted bool
+
+ list := tview.NewList().ShowSecondaryText(true)
+ list.SetMainTextColor(tcell.ColorWhite).
+ SetSelectedTextColor(tcell.ColorWhite).
+ SetSelectedBackgroundColor(tui.ProxmoxOrange)
+
+ list.AddItem(
+ "1) Merge (existing PBS)",
+ "Restore onto an already operational PBS. Avoids API-side deletions of existing PBS objects that are not in the backup.",
+ 0,
+ nil,
+ )
+ list.AddItem(
+ "2) Clean 1:1 (fresh PBS install)",
+ "Restore onto a new, clean PBS installation. Tries to make PBS configuration match the backup (may remove objects not in the backup).",
+ 0,
+ nil,
+ )
+
+ list.SetSelectedFunc(func(index int, mainText, secondaryText string, shortcut rune) {
+ switch index {
+ case 0:
+ selected = PBSRestoreBehaviorMerge
+ case 1:
+ selected = PBSRestoreBehaviorClean
+ default:
+ selected = PBSRestoreBehaviorUnspecified
+ }
+ if selected != PBSRestoreBehaviorUnspecified {
+ app.Stop()
+ }
+ })
+ list.SetDoneFunc(func() {
+ aborted = true
+ app.Stop()
+ })
+
+ form := components.NewForm(app)
+ listItem := components.NewListFormItem(list).
+ SetLabel("Select PBS restore behavior").
+ SetFieldHeight(6)
+ form.Form.AddFormItem(listItem)
+ form.Form.SetFocus(0)
+
+ form.SetOnCancel(func() {
+ aborted = true
+ })
+ form.AddCancelButton("Cancel")
+ enableFormNavigation(form, nil)
+
+ // Selected backup summary
+ summaryText := strings.TrimSpace(backupSummary)
+ var summaryView tview.Primitive
+ if summaryText != "" {
+ summary := tview.NewTextView().
+ SetText(fmt.Sprintf("Selected backup: %s", summaryText)).
+ SetWrap(true).
+ SetTextColor(tcell.ColorWhite)
+ summary.SetBorder(false)
+ summaryView = summary
+ } else {
+ summaryView = tview.NewBox()
+ }
+
+ content := tview.NewFlex().
+ SetDirection(tview.FlexRow).
+ AddItem(summaryView, 2, 0, false).
+ AddItem(form.Form, 0, 1, true)
+
+ page := buildRestoreWizardPage("PBS restore behavior", configPath, buildSig, content)
+ app.SetRoot(page, true).SetFocus(form.Form)
+ if err := app.Run(); err != nil {
+ return PBSRestoreBehaviorUnspecified, err
+ }
+ if aborted || selected == PBSRestoreBehaviorUnspecified {
+ return PBSRestoreBehaviorUnspecified, ErrRestoreAborted
+ }
+ return selected, nil
+}
+
func filterAndSortCategoriesForSystem(available []Category, systemType SystemType) []Category {
relevant := make([]Category, 0, len(available))
for _, cat := range available {
@@ -328,7 +412,7 @@ func maybeRepairNICNamesTUI(ctx context.Context, logger *logging.Logger, archive
return &nicRepairResult{AppliedAt: nowRestore(), SkippedReason: plan.SkippedReason}
}
- if plan != nil && !plan.Mapping.IsEmpty() {
+ if !plan.Mapping.IsEmpty() {
logging.DebugStep(logger, "NIC repair", "Detect persistent NIC naming overrides (udev/systemd)")
overrides, err := detectNICNamingOverrideRules(logger)
if err != nil {
@@ -756,35 +840,6 @@ func promptYesNoTUIWithCountdown(ctx context.Context, logger *logging.Logger, ti
return result, nil
}
-func promptOkTUI(title, configPath, buildSig, message, okLabel string) error {
- app := newTUIApp()
-
- infoText := tview.NewTextView().
- SetText(message).
- SetWrap(true).
- SetTextColor(tcell.ColorWhite).
- SetDynamicColors(true)
-
- form := components.NewForm(app)
- form.SetOnSubmit(func(values map[string]string) error {
- return nil
- })
- form.SetOnCancel(func() {})
- form.AddSubmitButton(okLabel)
- form.AddCancelButton("Close")
- enableFormNavigation(form, nil)
-
- content := tview.NewFlex().
- SetDirection(tview.FlexRow).
- AddItem(infoText, 0, 1, false).
- AddItem(form.Form, 3, 0, true)
-
- page := buildRestoreWizardPage(title, configPath, buildSig, content)
- form.SetParentView(page)
-
- return app.SetRoot(page, true).SetFocus(form.Form).Run()
-}
-
func promptNetworkCommitTUI(timeout time.Duration, health networkHealthReport, nicRepair *nicRepairResult, diagnosticsDir, configPath, buildSig string) (bool, error) {
app := newTUIApp()
var committed bool
diff --git a/internal/orchestrator/restore_workflow_ui.go b/internal/orchestrator/restore_workflow_ui.go
index 871ad9b..f90c1ad 100644
--- a/internal/orchestrator/restore_workflow_ui.go
+++ b/internal/orchestrator/restore_workflow_ui.go
@@ -129,6 +129,22 @@ func runRestoreWorkflowWithUI(ctx context.Context, cfg *config.Config, logger *l
plan := PlanRestore(candidate.Manifest, selectedCategories, systemType, mode)
+ if plan.SystemType == SystemTypePBS &&
+ (plan.HasCategoryID("pbs_host") ||
+ plan.HasCategoryID("datastore_pbs") ||
+ plan.HasCategoryID("pbs_remotes") ||
+ plan.HasCategoryID("pbs_jobs") ||
+ plan.HasCategoryID("pbs_notifications") ||
+ plan.HasCategoryID("pbs_access_control") ||
+ plan.HasCategoryID("pbs_tape")) {
+ behavior, err := ui.SelectPBSRestoreBehavior(ctx)
+ if err != nil {
+ return err
+ }
+ plan.PBSRestoreBehavior = behavior
+ logger.Info("PBS restore behavior: %s", behavior.DisplayName())
+ }
+
clusterBackup := strings.EqualFold(strings.TrimSpace(candidate.Manifest.ClusterMode), "cluster")
if plan.NeedsClusterRestore && clusterBackup {
logger.Info("Backup marked as cluster node; enabling guarded restore options for pve_cluster")
@@ -762,13 +778,14 @@ func runRestoreWorkflowWithUI(ctx context.Context, cfg *config.Config, logger *l
logger.Info("")
logger.Info("IMPORTANT: You may need to restart services for changes to take effect.")
- if systemType == SystemTypePVE {
+ switch systemType {
+ case SystemTypePVE:
if needsClusterRestore && clusterServicesStopped {
logger.Info(" PVE services were stopped/restarted during restore; verify status with: pvecm status")
} else {
logger.Info(" PVE services: systemctl restart pve-cluster pvedaemon pveproxy")
}
- } else if systemType == SystemTypePBS {
+ case SystemTypePBS:
if pbsServicesStopped {
logger.Info(" PBS services were stopped/restarted during restore; verify status with: systemctl status proxmox-backup proxmox-backup-proxy")
} else {
diff --git a/internal/orchestrator/restore_workflow_ui_helpers_test.go b/internal/orchestrator/restore_workflow_ui_helpers_test.go
index dc03218..f2a03724 100644
--- a/internal/orchestrator/restore_workflow_ui_helpers_test.go
+++ b/internal/orchestrator/restore_workflow_ui_helpers_test.go
@@ -7,40 +7,46 @@ import (
)
type fakeRestoreWorkflowUI struct {
- mode RestoreMode
- categories []Category
- confirmRestore bool
- confirmCompatible bool
- clusterMode ClusterRestoreMode
- continueNoSafety bool
+ mode RestoreMode
+ categories []Category
+ pbsBehavior PBSRestoreBehavior
+ confirmRestore bool
+ confirmCompatible bool
+ clusterMode ClusterRestoreMode
+ continueNoSafety bool
continuePBSServices bool
- confirmFstabMerge bool
- exportNode string
- applyVMConfigs bool
- applyStorageCfg bool
- applyDatacenterCfg bool
- confirmAction bool
- networkCommit bool
-
- modeErr error
- categoriesErr error
- confirmRestoreErr error
- confirmCompatibleErr error
- clusterModeErr error
- continueNoSafetyErr error
+ confirmFstabMerge bool
+ exportNode string
+ applyVMConfigs bool
+ applyStorageCfg bool
+ applyDatacenterCfg bool
+ confirmAction bool
+ networkCommit bool
+
+ modeErr error
+ categoriesErr error
+ pbsBehaviorErr error
+ confirmRestoreErr error
+ confirmCompatibleErr error
+ clusterModeErr error
+ continueNoSafetyErr error
continuePBSServicesErr error
- confirmFstabMergeErr error
- confirmActionErr error
- repairNICNamesErr error
- networkCommitErr error
+ confirmFstabMergeErr error
+ confirmActionErr error
+ repairNICNamesErr error
+ networkCommitErr error
}
func (f *fakeRestoreWorkflowUI) RunTask(ctx context.Context, title, initialMessage string, run func(ctx context.Context, report ProgressReporter) error) error {
return run(ctx, nil)
}
-func (f *fakeRestoreWorkflowUI) ShowMessage(ctx context.Context, title, message string) error { return nil }
-func (f *fakeRestoreWorkflowUI) ShowError(ctx context.Context, title, message string) error { return nil }
+func (f *fakeRestoreWorkflowUI) ShowMessage(ctx context.Context, title, message string) error {
+ return nil
+}
+func (f *fakeRestoreWorkflowUI) ShowError(ctx context.Context, title, message string) error {
+ return nil
+}
func (f *fakeRestoreWorkflowUI) SelectBackupSource(ctx context.Context, options []decryptPathOption) (decryptPathOption, error) {
return decryptPathOption{}, fmt.Errorf("unexpected SelectBackupSource call")
@@ -62,7 +68,16 @@ func (f *fakeRestoreWorkflowUI) SelectCategories(ctx context.Context, available
return f.categories, f.categoriesErr
}
-func (f *fakeRestoreWorkflowUI) ShowRestorePlan(ctx context.Context, config *SelectiveRestoreConfig) error { return nil }
+func (f *fakeRestoreWorkflowUI) SelectPBSRestoreBehavior(ctx context.Context) (PBSRestoreBehavior, error) {
+ if f.pbsBehavior == PBSRestoreBehaviorUnspecified && f.pbsBehaviorErr == nil {
+ return PBSRestoreBehaviorClean, nil
+ }
+ return f.pbsBehavior, f.pbsBehaviorErr
+}
+
+func (f *fakeRestoreWorkflowUI) ShowRestorePlan(ctx context.Context, config *SelectiveRestoreConfig) error {
+ return nil
+}
func (f *fakeRestoreWorkflowUI) ConfirmRestore(ctx context.Context) (bool, error) {
return f.confirmRestore, f.confirmRestoreErr
@@ -115,4 +130,3 @@ func (f *fakeRestoreWorkflowUI) RepairNICNames(ctx context.Context, archivePath
func (f *fakeRestoreWorkflowUI) PromptNetworkCommit(ctx context.Context, remaining time.Duration, health networkHealthReport, nicRepair *nicRepairResult, diagnosticsDir string) (bool, error) {
return f.networkCommit, f.networkCommitErr
}
-
diff --git a/internal/orchestrator/selective.go b/internal/orchestrator/selective.go
index 18f7583..4b05ea2 100644
--- a/internal/orchestrator/selective.go
+++ b/internal/orchestrator/selective.go
@@ -158,11 +158,12 @@ func ShowRestoreModeMenuWithReader(ctx context.Context, reader *bufio.Reader, lo
fmt.Println("Select restore mode:")
fmt.Println(" [1] FULL restore - Restore everything from backup")
- if systemType == SystemTypePVE {
+ switch systemType {
+ case SystemTypePVE:
fmt.Println(" [2] STORAGE only - PVE cluster + storage + jobs + mounts")
- } else if systemType == SystemTypePBS {
+ case SystemTypePBS:
fmt.Println(" [2] DATASTORE only - PBS datastore definitions + sync/verify/prune jobs + mounts")
- } else {
+ default:
fmt.Println(" [2] STORAGE/DATASTORE only - Storage or datastore configuration")
}
diff --git a/internal/orchestrator/workflow_ui.go b/internal/orchestrator/workflow_ui.go
index e951c5c..03db940 100644
--- a/internal/orchestrator/workflow_ui.go
+++ b/internal/orchestrator/workflow_ui.go
@@ -55,6 +55,7 @@ type RestoreWorkflowUI interface {
PromptDecryptSecret(ctx context.Context, displayName, previousError string) (string, error)
SelectRestoreMode(ctx context.Context, systemType SystemType) (RestoreMode, error)
SelectCategories(ctx context.Context, available []Category, systemType SystemType) ([]Category, error)
+ SelectPBSRestoreBehavior(ctx context.Context) (PBSRestoreBehavior, error)
ShowRestorePlan(ctx context.Context, config *SelectiveRestoreConfig) error
ConfirmRestore(ctx context.Context) (bool, error)
diff --git a/internal/orchestrator/workflow_ui_cli.go b/internal/orchestrator/workflow_ui_cli.go
index 940c3b1..1d303c7 100644
--- a/internal/orchestrator/workflow_ui_cli.go
+++ b/internal/orchestrator/workflow_ui_cli.go
@@ -184,6 +184,32 @@ func (u *cliWorkflowUI) SelectCategories(ctx context.Context, available []Catego
return ShowCategorySelectionMenuWithReader(ctx, u.reader, u.logger, available, systemType)
}
+func (u *cliWorkflowUI) SelectPBSRestoreBehavior(ctx context.Context) (PBSRestoreBehavior, error) {
+ fmt.Println()
+ fmt.Println("PBS restore reconciliation:")
+ fmt.Println(" [1] Merge (existing PBS) - Restore onto an already operational PBS (avoids API-side deletions of existing PBS objects not in the backup).")
+ fmt.Println(" [2] Clean 1:1 (fresh PBS install) - Restore onto a new, clean PBS and try to make configuration match the backup (may remove existing PBS objects not in the backup).")
+ fmt.Println(" [0] Exit")
+
+ for {
+ fmt.Print("Choice: ")
+ line, err := input.ReadLineWithContext(ctx, u.reader)
+ if err != nil {
+ return PBSRestoreBehaviorUnspecified, err
+ }
+ switch strings.TrimSpace(line) {
+ case "1":
+ return PBSRestoreBehaviorMerge, nil
+ case "2":
+ return PBSRestoreBehaviorClean, nil
+ case "0":
+ return PBSRestoreBehaviorUnspecified, ErrRestoreAborted
+ default:
+ fmt.Println("Please enter 1, 2 or 0.")
+ }
+ }
+}
+
func (u *cliWorkflowUI) ShowRestorePlan(ctx context.Context, config *SelectiveRestoreConfig) error {
ShowRestorePlan(u.logger, config)
return nil
diff --git a/internal/orchestrator/workflow_ui_tui_restore.go b/internal/orchestrator/workflow_ui_tui_restore.go
index 7c53681..350bd82 100644
--- a/internal/orchestrator/workflow_ui_tui_restore.go
+++ b/internal/orchestrator/workflow_ui_tui_restore.go
@@ -21,6 +21,10 @@ func (u *tuiWorkflowUI) SelectCategories(ctx context.Context, available []Catego
return selectCategoriesTUI(available, systemType, u.configPath, u.buildSig)
}
+func (u *tuiWorkflowUI) SelectPBSRestoreBehavior(ctx context.Context) (PBSRestoreBehavior, error) {
+ return selectPBSRestoreBehaviorTUI(u.configPath, u.buildSig, strings.TrimSpace(u.selectedBackupSummary))
+}
+
func (u *tuiWorkflowUI) ShowRestorePlan(ctx context.Context, config *SelectiveRestoreConfig) error {
return showRestorePlanTUI(config, u.configPath, u.buildSig)
}
@@ -147,4 +151,3 @@ func (u *tuiWorkflowUI) ConfirmApplyDatacenterCfg(ctx context.Context, datacente
message := fmt.Sprintf("Datacenter configuration found:\n\n%s\n\nApply datacenter.cfg via pvesh now?", strings.TrimSpace(datacenterCfgPath))
return promptYesNoTUIFunc("Apply datacenter.cfg", u.configPath, u.buildSig, message, "Apply via API", "Skip")
}
-