From ebaebe0aad001576cb9c99c060533bec567837c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Sun, 30 Nov 2025 18:53:32 +0100 Subject: [PATCH 01/30] config import via json --- README.md | 345 ++++++++++++++++++ app/server/modules/backups/backups.service.ts | 13 +- app/server/modules/lifecycle/startup.ts | 180 ++++++++- .../notifications/notifications.service.ts | 16 +- .../repositories/repositories.service.ts | 42 ++- app/server/utils/crypto.ts | 5 + docker-compose.yml | 11 +- zerobyte.config.json | 231 ++++++++++++ 8 files changed, 822 insertions(+), 21 deletions(-) create mode 100644 zerobyte.config.json diff --git a/README.md b/README.md index f2f3652e..a40507bd 100644 --- a/README.md +++ b/README.md @@ -37,6 +37,350 @@ Zerobyte is a backup automation tool that helps you save your data across multip In order to run Zerobyte, you need to have Docker and Docker Compose installed on your server. Then, you can use the provided `docker-compose.yml` file to start the application. +### Configure Zerobyte via Config File + + +You can pre-configure backup sources (volumes), destinations (repositories), backup schedules, notification destinations and admin user using a config file (`zerobyte.config.json` by default (mounted in /app dir), or set `ZEROBYTE_CONFIG_PATH`). + +Secrets/credentials in the config file can reference environment variables using `${VAR_NAME}` syntax for secure injection. + +> **ℹ️ Config File Behavior** +> +> The config file is applied on startup using a **create-only** approach: +> - Resources defined in the config are only created if they don't already exist in the database +> - Existing resources with the same name are **not overwritten** - a warning is logged and the config entry is skipped +> - Changes made via the UI are preserved across container restarts +> - To update a resource from config, either modify it via the UI or delete it first +> +> This means the config file serves as "initial setup" rather than "desired state sync". + +#### zerobyte.config.json Structure + +```json +{ + "volumes": [ + // Array of volume objects. Each must have a unique "name" and a "config" matching one of the types below. + ], + "repositories": [ + // Array of repository objects. Each must have a unique "name" and a "config" matching one of the types below. + // Optionally, "compressionMode" ("auto", "off", "max") + ], + "backupSchedules": [ + // Array of backup schedule objects as described below. + ], + "notificationDestinations": [ + // Array of notification destination objects as described below. + ] +} +``` + +##### Volume Types + +- **Local Directory** + ```json + { + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/data", + "readOnly": true + } + } + ``` + +- **NFS** + ```json + { + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } + } + ``` + +- **SMB** + ```json + { + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } + } + ``` + +- **WebDAV** + ```json + { + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } + } + ``` + +##### Repository Types + +- **Local Directory** + ```json + { + "name": "local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories" + }, + "compressionMode": "auto" + } + ``` + > **Note for importing existing local repositories:** If you're importing an existing repository (e.g., from a backup or migration), include the `name` field in `config` with the original subfolder name. The actual restic repo is stored at `{path}/{name}`. You can find this value in an exported config under `repositories[].config.name`. + +- **S3-Compatible** + ```json + { + "name": "backup-repo", + "config": { + "backend": "s3", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" + } + ``` + +- **Google Cloud Storage** + ```json + { + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } + } + ``` + +- **Azure Blob Storage** + ```json + { + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } + } + ``` + +- **WebDAV, rclone, SFTP, REST, etc.** + (See documentation for required fields; all support env variable secrets.) + +##### Backup Schedules + +- **Example:** + ```json + { + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "enabled": true, + "notifications": ["slack-alerts", "email-admin"] + } + ``` +- **Fields:** + - `volume`: Name of the source volume + - `repository`: Name of the destination repository + - `cronExpression`: Cron string for schedule + - `retentionPolicy`: Object with retention rules (e.g., keepLast, keepDaily) + - `includePatterns`/`excludePatterns`: Arrays of patterns + - `enabled`: Boolean + - `notifications`: Array of notification destination names (strings) or detailed objects: + - Simple: `["slack-alerts", "email-admin"]` + - Detailed: `[{"name": "slack-alerts", "notifyOnStart": false, "notifyOnSuccess": true, "notifyOnFailure": true}]` + +##### Notification Destinations + +- **Examples:** + - **Slack** + ```json + { + "name": "slack-alerts", + "type": "slack", + "config": { + "webhookUrl": "${SLACK_WEBHOOK_URL}", + "channel": "#backups", + "username": "zerobyte", + "iconEmoji": ":floppy_disk:" + } + } + ``` + - **Email** + ```json + { + "name": "email-admin", + "type": "email", + "config": { + "smtpHost": "smtp.example.com", + "smtpPort": 587, + "username": "admin@example.com", + "password": "${EMAIL_PASSWORD}", + "from": "zerobyte@example.com", + "to": ["admin@example.com"], + "useTLS": true + } + } + ``` + - **Discord** + ```json + { + "name": "discord-backups", + "type": "discord", + "config": { + "webhookUrl": "${DISCORD_WEBHOOK_URL}", + "username": "zerobyte", + "avatarUrl": "https://example.com/avatar.png", + "threadId": "1234567890" + } + } + ``` + - **Gotify** + ```json + { + "name": "gotify-notify", + "type": "gotify", + "config": { + "serverUrl": "https://gotify.example.com", + "token": "${GOTIFY_TOKEN}", + "path": "/message", + "priority": 5 + } + } + ``` + - **ntfy** + ```json + { + "name": "ntfy-notify", + "type": "ntfy", + "config": { + "serverUrl": "https://ntfy.example.com", + "topic": "zerobyte-backups", + "priority": "high", + "username": "ntfyuser", + "password": "${NTFY_PASSWORD}" + } + } + ``` + - **Pushover** + ```json + { + "name": "pushover-notify", + "type": "pushover", + "config": { + "userKey": "${PUSHOVER_USER_KEY}", + "apiToken": "${PUSHOVER_API_TOKEN}", + "devices": "phone,tablet", + "priority": 1 + } + } + ``` + - **Telegram** + ```json + { + "name": "telegram-notify", + "type": "telegram", + "config": { + "botToken": "${TELEGRAM_BOT_TOKEN}", + "chatId": "123456789" + } + } + ``` + - **Custom (shoutrrr)** + ```json + { + "name": "custom-shoutrrr", + "type": "custom", + "config": { + "shoutrrrUrl": "${SHOUTRRR_URL}" + } + } + ``` + +- **Fields:** + - `name`: Unique name for the notification config + - `type`: Notification type (email, slack, discord, gotify, ntfy, pushover, telegram, custom) + - `config`: Type-specific config, secrets via `${ENV_VAR}` + +##### Admin Setup (Automated) + +- **Example:** + ```json + { + "admin": { + "username": "admin", + "password": "${ADMIN_PASSWORD}", + "recoveryKey": "${RECOVERY_KEY}" + } + } + ``` +- **Fields:** + - `username`: Admin username to create on first startup + - `password`: Admin password (can use `${ENV_VAR}`) + - `recoveryKey`: Optional recovery key (can use `${ENV_VAR}`) - if provided, the UI prompt to download recovery key will be skipped + +**On first startup, Zerobyte will automatically create the admin user from the config file.** + +> **⚠️ About the Recovery Key** +> +> The recovery key is a 64-character hex string that serves two critical purposes: +> 1. **Restic repository password** - Used to encrypt all your backup data +> 2. **Database encryption key** - Used to encrypt credentials stored in Zerobyte's database +> +> **If you lose this key, you will lose access to all your backups and stored credentials.** +> +> **Generating a recovery key ahead of time:** +> ```bash +> # Using OpenSSL (Linux/macOS) +> openssl rand -hex 32 +> +> # Using Python +> python3 -c "import secrets; print(secrets.token_hex(32))" +> ``` +> +> **Retrieving from an existing instance:** +> - Download via UI: Settings → Download Recovery Key +> - Or read directly from the container: `docker exec zerobyte cat /var/lib/zerobyte/data/restic.pass` + +--- + +**Notes:** +- All secrets (passwords, keys) can use `${ENV_VAR}` syntax to inject from environment variables. +- All paths must be accessible inside the container (mount host paths as needed). +- `readOnly` is supported for all volume types that allow it, including local directories. + ```yaml services: zerobyte: @@ -54,6 +398,7 @@ services: volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte + - ./zerobyte.config.json:/app/zerobyte.config.json:ro # Mount your config file ``` > [!WARNING] diff --git a/app/server/modules/backups/backups.service.ts b/app/server/modules/backups/backups.service.ts index 89aa3ade..dd276754 100644 --- a/app/server/modules/backups/backups.service.ts +++ b/app/server/modules/backups/backups.service.ts @@ -1,4 +1,4 @@ -import { eq } from "drizzle-orm"; +import { and, eq } from "drizzle-orm"; import cron from "node-cron"; import { CronExpressionParser } from "cron-parser"; import { NotFoundError, BadRequestError, ConflictError } from "http-errors-enhanced"; @@ -77,6 +77,17 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { throw new NotFoundError("Repository not found"); } + const existingSchedule = await db.query.backupSchedulesTable.findFirst({ + where: and( + eq(backupSchedulesTable.volumeId, volume.id), + eq(backupSchedulesTable.repositoryId, repository.id) + ), + }); + + if (existingSchedule) { + throw new ConflictError(`A backup schedule for volume '${volume.name}' and repository '${repository.name}' already exists`); + } + const nextBackupAt = calculateNextRun(data.cronExpression); const [newSchedule] = await db diff --git a/app/server/modules/lifecycle/startup.ts b/app/server/modules/lifecycle/startup.ts index ea5b6f98..77538b00 100644 --- a/app/server/modules/lifecycle/startup.ts +++ b/app/server/modules/lifecycle/startup.ts @@ -1,7 +1,7 @@ import { Scheduler } from "../../core/scheduler"; import { and, eq, or } from "drizzle-orm"; import { db } from "../../db/db"; -import { volumesTable } from "../../db/schema"; +import { volumesTable, usersTable, repositoriesTable, notificationDestinationsTable } from "../../db/schema"; import { logger } from "../../utils/logger"; import { restic } from "../../utils/restic"; import { volumeService } from "../volumes/volume.service"; @@ -12,13 +12,191 @@ import { BackupExecutionJob } from "../../jobs/backup-execution"; import { CleanupSessionsJob } from "../../jobs/cleanup-sessions"; export const startup = async () => { + let configFileVolumes = []; + let configFileRepositories = []; + let configFileBackupSchedules = []; + let configFileNotificationDestinations = []; + let configFileAdmin = null; + try { + const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; + const fs = await import("node:fs/promises"); + const path = await import("node:path"); + const configFullPath = path.resolve(process.cwd(), configPath); + if (await fs.stat(configFullPath).then(() => true, () => false)) { + const raw = await fs.readFile(configFullPath, "utf-8"); + const config = JSON.parse(raw); + + function interpolate(obj) { + if (typeof obj === "string") { + return obj.replace(/\$\{([^}]+)\}/g, (_, v) => process.env[v] || ""); + } else if (Array.isArray(obj)) { + return obj.map(interpolate); + } else if (obj && typeof obj === "object") { + return Object.fromEntries(Object.entries(obj).map(([k, v]) => [k, interpolate(v)])); + } + return obj; + } + configFileVolumes = interpolate(config.volumes || []); + configFileRepositories = interpolate(config.repositories || []); + configFileBackupSchedules = interpolate(config.backupSchedules || []); + configFileNotificationDestinations = interpolate(config.notificationDestinations || []); + configFileAdmin = interpolate(config.admin || null); + } + } catch (e) { + logger.warn(`No config file loaded or error parsing config: ${e.message}`); + } + await Scheduler.start(); await Scheduler.clear(); + try { + const fs = await import("node:fs/promises"); + const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); + if (configFileAdmin && configFileAdmin.recoveryKey) { + await fs.writeFile(RESTIC_PASS_FILE, configFileAdmin.recoveryKey, { mode: 0o600 }); + logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); + } + } catch (err) { + const e = err instanceof Error ? err : new Error(String(err)); + logger.error(`Failed to write recovery key from config: ${e.message}`); + } + await restic.ensurePassfile().catch((err) => { logger.error(`Error ensuring restic passfile exists: ${err.message}`); }); + try { + for (const v of configFileVolumes) { + try { + await volumeService.createVolume(v.name, v.config); + logger.info(`Initialized volume from config: ${v.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Volume ${v.name} not created: ${err.message}`); + } + } + const repoServiceModule = await import("../repositories/repositories.service"); + for (const r of configFileRepositories) { + try { + await repoServiceModule.repositoriesService.createRepository(r.name, r.config, r.compressionMode); + logger.info(`Initialized repository from config: ${r.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Repository ${r.name} not created: ${err.message}`); + } + } + const notificationsServiceModule = await import("../notifications/notifications.service"); + for (const n of configFileNotificationDestinations) { + try { + await notificationsServiceModule.notificationsService.createDestination(n.name, n.config); + logger.info(`Initialized notification destination from config: ${n.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Notification destination ${n.name} not created: ${err.message}`); + } + } + + const backupServiceModule = await import("../backups/backups.service"); + for (const s of configFileBackupSchedules) { + const volumeName = s.volume || s.volumeName; + const volume = await db.query.volumesTable.findFirst({ + where: eq(volumesTable.name, volumeName), + }); + if (!volume) { + logger.warn(`Backup schedule not created: Volume '${volumeName}' not found`); + continue; + } + const repositoryName = s.repository || s.repositoryName; + const repository = await db.query.repositoriesTable.findFirst({ + where: eq(repositoriesTable.name, repositoryName), + }); + if (!repository) { + logger.warn(`Backup schedule not created: Repository '${repositoryName}' not found`); + continue; + } + if (volume.status !== "mounted") { + try { + await volumeService.mountVolume(volume.name); + logger.info(`Mounted volume ${volume.name} for backup schedule`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Could not mount volume ${volume.name}: ${err.message}`); + continue; + } + } + let createdSchedule; + try { + createdSchedule = await backupServiceModule.backupsService.createSchedule({ + ...s, + volumeId: volume.id, + repositoryId: repository.id, + }); + logger.info(`Initialized backup schedule from config: ${s.cronExpression || s.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Backup schedule not created: ${err.message}`); + continue; + } + + if (createdSchedule && s.notifications && Array.isArray(s.notifications) && s.notifications.length > 0) { + try { + const assignments: Array<{ + destinationId: number; + notifyOnStart: boolean; + notifyOnSuccess: boolean; + notifyOnFailure: boolean; + }> = []; + for (const notif of s.notifications) { + const destName = typeof notif === 'string' ? notif : notif.name; + const dest = await db.query.notificationDestinationsTable.findFirst({ + where: eq(notificationDestinationsTable.name, destName), + }); + if (dest) { + assignments.push({ + destinationId: dest.id, + notifyOnStart: typeof notif === 'object' ? (notif.notifyOnStart ?? true) : true, + notifyOnSuccess: typeof notif === 'object' ? (notif.notifyOnSuccess ?? true) : true, + notifyOnFailure: typeof notif === 'object' ? (notif.notifyOnFailure ?? true) : true, + }); + } else { + logger.warn(`Notification destination '${destName}' not found for schedule`); + } + } + if (assignments.length > 0) { + await notificationsServiceModule.notificationsService.updateScheduleNotifications(createdSchedule.id, assignments); + logger.info(`Assigned ${assignments.length} notification(s) to backup schedule`); + } + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Failed to assign notifications to schedule: ${err.message}`); + } + } + + } + + try { + const { authService } = await import("../auth/auth.service"); + if (configFileAdmin && configFileAdmin.username && configFileAdmin.password) { + const hasUsers = await authService.hasUsers(); + if (!hasUsers) { + const { user } = await authService.register(configFileAdmin.username, configFileAdmin.password); + logger.info(`Admin user '${configFileAdmin.username}' created from config.`); + if (configFileAdmin.recoveryKey) { + await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, user.id)); + } + } + } else { + logger.warn("Admin config missing required fields (username, password). Skipping automated admin setup."); + } + } catch (err) { + const e = err instanceof Error ? err : new Error(String(err)); + logger.error(`Automated admin setup failed: ${e.message}`); + } + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.error(`Failed to initialize from config: ${err.message}`); + } + const volumes = await db.query.volumesTable.findMany({ where: or( eq(volumesTable.status, "mounted"), diff --git a/app/server/modules/notifications/notifications.service.ts b/app/server/modules/notifications/notifications.service.ts index 4099d8b4..360bca2f 100644 --- a/app/server/modules/notifications/notifications.service.ts +++ b/app/server/modules/notifications/notifications.service.ts @@ -38,42 +38,42 @@ async function encryptSensitiveFields(config: NotificationConfig): Promise { const encryptConfig = async (config: RepositoryConfig): Promise => { const encryptedConfig: Record = { ...config }; - if (config.customPassword) { + if (config.customPassword && !cryptoUtils.isEncrypted(config.customPassword)) { encryptedConfig.customPassword = await cryptoUtils.encrypt(config.customPassword); } switch (config.backend) { case "s3": case "r2": - encryptedConfig.accessKeyId = await cryptoUtils.encrypt(config.accessKeyId); - encryptedConfig.secretAccessKey = await cryptoUtils.encrypt(config.secretAccessKey); + if (!cryptoUtils.isEncrypted(config.accessKeyId)) { + encryptedConfig.accessKeyId = await cryptoUtils.encrypt(config.accessKeyId); + } + if (!cryptoUtils.isEncrypted(config.secretAccessKey)) { + encryptedConfig.secretAccessKey = await cryptoUtils.encrypt(config.secretAccessKey); + } break; case "gcs": - encryptedConfig.credentialsJson = await cryptoUtils.encrypt(config.credentialsJson); + if (!cryptoUtils.isEncrypted(config.credentialsJson)) { + encryptedConfig.credentialsJson = await cryptoUtils.encrypt(config.credentialsJson); + } break; case "azure": - encryptedConfig.accountKey = await cryptoUtils.encrypt(config.accountKey); + if (!cryptoUtils.isEncrypted(config.accountKey)) { + encryptedConfig.accountKey = await cryptoUtils.encrypt(config.accountKey); + } break; case "rest": - if (config.username) { + if (config.username && !cryptoUtils.isEncrypted(config.username)) { encryptedConfig.username = await cryptoUtils.encrypt(config.username); } - if (config.password) { + if (config.password && !cryptoUtils.isEncrypted(config.password)) { encryptedConfig.password = await cryptoUtils.encrypt(config.password); } break; case "sftp": - encryptedConfig.privateKey = await cryptoUtils.encrypt(config.privateKey); + if (!cryptoUtils.isEncrypted(config.privateKey)) { + encryptedConfig.privateKey = await cryptoUtils.encrypt(config.privateKey); + } break; } @@ -62,7 +72,8 @@ const createRepository = async (name: string, config: RepositoryConfig, compress } const id = crypto.randomUUID(); - const shortId = generateShortId(); + + const shortId = (config.backend === "local" && config.name) ? config.name : generateShortId(); let processedConfig = config; if (config.backend === "local") { @@ -94,12 +105,23 @@ const createRepository = async (name: string, config: RepositoryConfig, compress const result = await restic .snapshots(encryptedConfig) .then(() => ({ error: null })) - .catch((error) => ({ error })); + .catch((err) => ({ error: err })); error = result.error; } else { const initResult = await restic.init(encryptedConfig); error = initResult.error; + + if (error) { + const errorStr = typeof error === "string" ? error : (error as Error)?.message || ""; + if (errorStr.includes("config file already exists")) { + const verifyResult = await restic + .snapshots(encryptedConfig) + .then(() => ({ error: null })) + .catch((err) => ({ error: err })); + error = verifyResult.error; + } + } } if (!error) { diff --git a/app/server/utils/crypto.ts b/app/server/utils/crypto.ts index 651bebe9..877f16a6 100644 --- a/app/server/utils/crypto.ts +++ b/app/server/utils/crypto.ts @@ -5,6 +5,10 @@ const algorithm = "aes-256-gcm" as const; const keyLength = 32; const encryptionPrefix = "encv1"; +const isEncrypted = (val?: string): boolean => { + return typeof val === "string" && val.startsWith(encryptionPrefix); +}; + /** * Given a string, encrypts it using a randomly generated salt */ @@ -56,6 +60,7 @@ const decrypt = async (encryptedData: string) => { }; export const cryptoUtils = { + isEncrypted, encrypt, decrypt, }; diff --git a/docker-compose.yml b/docker-compose.yml index 2478d6bf..6e265361 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,7 +17,7 @@ services: volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte - + - ./app:/app/app - ~/.config/rclone:/root/.config/rclone # - /run/docker/plugins:/run/docker/plugins @@ -36,8 +36,17 @@ services: - SYS_ADMIN ports: - "4096:4096" + environment: + - ACCESS_KEY_ID=your-access-key-id + - SECRET_ACCESS_KEY=your-secret-access-key + - SMB_PASSWORD=your-smb-password + - WEBDAV_PASSWORD=your-webdav-password + - GCS_CREDENTIALS=your-gcs-credentials-json + - AZURE_KEY=your-azure-key volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte:rshared + - ./zerobyte.config.json:/app/zerobyte.config.json:ro + - ./mydata:/mydata:ro - /run/docker/plugins:/run/docker/plugins - /var/run/docker.sock:/var/run/docker.sock diff --git a/zerobyte.config.json b/zerobyte.config.json new file mode 100644 index 00000000..9d6403df --- /dev/null +++ b/zerobyte.config.json @@ -0,0 +1,231 @@ +{ + "volumes": [ + { + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/mydata", + "readOnly": true + } + }, + { + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } + }, + { + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } + }, + { + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } + }, + { + "name": "sftp-volume", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/data", + "readOnly": false + } + } + ], + "repositories": [ + { + "name": "local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories" + }, + "compressionMode": "auto" + }, + { + "name": "s3-repo", + "config": { + "backend": "s3", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" + }, + { + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } + }, + { + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } + }, + { + "name": "rclone-repo", + "config": { + "backend": "rclone", + "remote": "myremote", + "path": "backups/zerobyte" + } + }, + { + "name": "webdav-repo", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "ssl": true + } + }, + { + "name": "sftp-repo", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/backups" + } + } + ], + "backupSchedules": [ + { + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "enabled": true, + "notifications": ["slack-alerts", "email-admin", "discord-backups", "gotify-notify", "ntfy-notify", "pushover-notify", "telegram-notify", "custom-shoutrrr"] + } + ], + "notificationDestinations": [ + { + "name": "slack-alerts", + "type": "slack", + "config": { + "webhookUrl": "${SLACK_WEBHOOK_URL}", + "channel": "#backups", + "username": "zerobyte", + "iconEmoji": ":floppy_disk:" + } + }, + { + "name": "email-admin", + "type": "email", + "config": { + "smtpHost": "smtp.example.com", + "smtpPort": 587, + "username": "admin@example.com", + "password": "${EMAIL_PASSWORD}", + "from": "zerobyte@example.com", + "to": ["admin@example.com"], + "useTLS": true + } + }, + { + "name": "discord-backups", + "type": "discord", + "config": { + "webhookUrl": "${DISCORD_WEBHOOK_URL}", + "username": "zerobyte", + "avatarUrl": "https://example.com/avatar.png", + "threadId": "1234567890" + } + }, + { + "name": "gotify-notify", + "type": "gotify", + "config": { + "serverUrl": "https://gotify.example.com", + "token": "${GOTIFY_TOKEN}", + "path": "/message", + "priority": 5 + } + }, + { + "name": "ntfy-notify", + "type": "ntfy", + "config": { + "serverUrl": "https://ntfy.example.com", + "topic": "zerobyte-backups", + "priority": "high", + "username": "ntfyuser", + "password": "${NTFY_PASSWORD}" + } + }, + { + "name": "pushover-notify", + "type": "pushover", + "config": { + "userKey": "${PUSHOVER_USER_KEY}", + "apiToken": "${PUSHOVER_API_TOKEN}", + "devices": "phone,tablet", + "priority": 1 + } + }, + { + "name": "telegram-notify", + "type": "telegram", + "config": { + "botToken": "${TELEGRAM_BOT_TOKEN}", + "chatId": "123456789" + } + }, + { + "name": "custom-shoutrrr", + "type": "custom", + "config": { + "shoutrrrUrl": "${SHOUTRRR_URL}" + } + } + ], + "admin": { + "username": "admin", + "password": "${ADMIN_PASSWORD}", + "recoveryKey": "${RECOVERY_KEY}" + } +} From 8f540164dfc96849f8b3462e97072efa39c004d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 1 Dec 2025 12:23:47 +0100 Subject: [PATCH 02/30] avoid conflict with export PR --- app/server/utils/crypto.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/server/utils/crypto.ts b/app/server/utils/crypto.ts index 877f16a6..1ed05d2a 100644 --- a/app/server/utils/crypto.ts +++ b/app/server/utils/crypto.ts @@ -60,7 +60,7 @@ const decrypt = async (encryptedData: string) => { }; export const cryptoUtils = { - isEncrypted, encrypt, decrypt, + isEncrypted, }; From d76734634556543f82604263359b17cb443a5944 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 1 Dec 2025 12:29:30 +0100 Subject: [PATCH 03/30] admin passwordHash import support --- README.md | 19 +++++++++++++++++-- app/server/modules/lifecycle/startup.ts | 23 ++++++++++++++++++----- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index a40507bd..30b9fc28 100644 --- a/README.md +++ b/README.md @@ -336,7 +336,7 @@ Secrets/credentials in the config file can reference environment variables using ##### Admin Setup (Automated) -- **Example:** +- **Example (new instance):** ```json { "admin": { @@ -346,11 +346,26 @@ Secrets/credentials in the config file can reference environment variables using } } ``` + +- **Example (migration from another instance):** + ```json + { + "admin": { + "username": "admin", + "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$...", + "recoveryKey": "${RECOVERY_KEY}" + } + } + ``` + - **Fields:** - `username`: Admin username to create on first startup - - `password`: Admin password (can use `${ENV_VAR}`) + - `password`: Admin password for new instances (can use `${ENV_VAR}`) + - `passwordHash`: Pre-hashed password for migration (exported from another instance) - `recoveryKey`: Optional recovery key (can use `${ENV_VAR}`) - if provided, the UI prompt to download recovery key will be skipped +> **Note:** Use either `password` OR `passwordHash`, not both. The `passwordHash` option is useful when migrating from another Zerobyte instance using an exported config with `includePasswordHash=true`. + **On first startup, Zerobyte will automatically create the admin user from the config file.** > **⚠️ About the Recovery Key** diff --git a/app/server/modules/lifecycle/startup.ts b/app/server/modules/lifecycle/startup.ts index 77538b00..432ec81c 100644 --- a/app/server/modules/lifecycle/startup.ts +++ b/app/server/modules/lifecycle/startup.ts @@ -176,17 +176,30 @@ export const startup = async () => { try { const { authService } = await import("../auth/auth.service"); - if (configFileAdmin && configFileAdmin.username && configFileAdmin.password) { + if (configFileAdmin && configFileAdmin.username && (configFileAdmin.password || configFileAdmin.passwordHash)) { const hasUsers = await authService.hasUsers(); if (!hasUsers) { - const { user } = await authService.register(configFileAdmin.username, configFileAdmin.password); - logger.info(`Admin user '${configFileAdmin.username}' created from config.`); + let userId: number; + if (configFileAdmin.passwordHash) { + // Import with existing password hash (migration from another instance) + const [user] = await db.insert(usersTable).values({ + username: configFileAdmin.username, + passwordHash: configFileAdmin.passwordHash, + }).returning(); + userId = user.id; + logger.info(`Admin user '${configFileAdmin.username}' imported with password hash from config.`); + } else { + // Create new user with plaintext password + const { user } = await authService.register(configFileAdmin.username, configFileAdmin.password); + userId = user.id; + logger.info(`Admin user '${configFileAdmin.username}' created from config.`); + } if (configFileAdmin.recoveryKey) { - await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, user.id)); + await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, userId)); } } } else { - logger.warn("Admin config missing required fields (username, password). Skipping automated admin setup."); + logger.warn("Admin config missing required fields (username, password or passwordHash). Skipping automated admin setup."); } } catch (err) { const e = err instanceof Error ? err : new Error(String(err)); From 34123becd040bfbdcb2ae9eccfa5631803086e48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 2 Dec 2025 09:59:04 +0100 Subject: [PATCH 04/30] examples corrected --- README.md | 20 ++++++++++---------- zerobyte.config.json | 34 ++++++++++------------------------ 2 files changed, 20 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index 30b9fc28..9840fbb0 100644 --- a/README.md +++ b/README.md @@ -229,8 +229,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "slack-alerts", - "type": "slack", "config": { + "type": "slack", "webhookUrl": "${SLACK_WEBHOOK_URL}", "channel": "#backups", "username": "zerobyte", @@ -242,8 +242,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "email-admin", - "type": "email", "config": { + "type": "email", "smtpHost": "smtp.example.com", "smtpPort": 587, "username": "admin@example.com", @@ -258,8 +258,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "discord-backups", - "type": "discord", "config": { + "type": "discord", "webhookUrl": "${DISCORD_WEBHOOK_URL}", "username": "zerobyte", "avatarUrl": "https://example.com/avatar.png", @@ -271,8 +271,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "gotify-notify", - "type": "gotify", "config": { + "type": "gotify", "serverUrl": "https://gotify.example.com", "token": "${GOTIFY_TOKEN}", "path": "/message", @@ -284,8 +284,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "ntfy-notify", - "type": "ntfy", "config": { + "type": "ntfy", "serverUrl": "https://ntfy.example.com", "topic": "zerobyte-backups", "priority": "high", @@ -298,8 +298,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "pushover-notify", - "type": "pushover", "config": { + "type": "pushover", "userKey": "${PUSHOVER_USER_KEY}", "apiToken": "${PUSHOVER_API_TOKEN}", "devices": "phone,tablet", @@ -311,8 +311,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "telegram-notify", - "type": "telegram", "config": { + "type": "telegram", "botToken": "${TELEGRAM_BOT_TOKEN}", "chatId": "123456789" } @@ -322,8 +322,8 @@ Secrets/credentials in the config file can reference environment variables using ```json { "name": "custom-shoutrrr", - "type": "custom", "config": { + "type": "custom", "shoutrrrUrl": "${SHOUTRRR_URL}" } } @@ -331,8 +331,8 @@ Secrets/credentials in the config file can reference environment variables using - **Fields:** - `name`: Unique name for the notification config - - `type`: Notification type (email, slack, discord, gotify, ntfy, pushover, telegram, custom) - - `config`: Type-specific config, secrets via `${ENV_VAR}` + - `config.type`: Notification type (email, slack, discord, gotify, ntfy, pushover, telegram, custom) + - `config`: Type-specific config with `type` field, secrets via `${ENV_VAR}` ##### Admin Setup (Automated) diff --git a/zerobyte.config.json b/zerobyte.config.json index 9d6403df..1323b939 100644 --- a/zerobyte.config.json +++ b/zerobyte.config.json @@ -45,19 +45,6 @@ "readOnly": false, "ssl": true } - }, - { - "name": "sftp-volume", - "config": { - "backend": "sftp", - "host": "sftp.example.com", - "port": 22, - "username": "user", - "password": "${SFTP_PASSWORD}", - "privateKey": "${SFTP_PRIVATE_KEY}", - "path": "/data", - "readOnly": false - } } ], "repositories": [ @@ -123,8 +110,7 @@ "backend": "sftp", "host": "sftp.example.com", "port": 22, - "username": "user", - "password": "${SFTP_PASSWORD}", + "user": "sftpuser", "privateKey": "${SFTP_PRIVATE_KEY}", "path": "/backups" } @@ -139,14 +125,14 @@ "includePatterns": ["important-folder"], "excludePatterns": ["*.tmp", "*.log"], "enabled": true, - "notifications": ["slack-alerts", "email-admin", "discord-backups", "gotify-notify", "ntfy-notify", "pushover-notify", "telegram-notify", "custom-shoutrrr"] + "notifications": ["slack-alerts"] } ], "notificationDestinations": [ { "name": "slack-alerts", - "type": "slack", "config": { + "type": "slack", "webhookUrl": "${SLACK_WEBHOOK_URL}", "channel": "#backups", "username": "zerobyte", @@ -155,8 +141,8 @@ }, { "name": "email-admin", - "type": "email", "config": { + "type": "email", "smtpHost": "smtp.example.com", "smtpPort": 587, "username": "admin@example.com", @@ -168,8 +154,8 @@ }, { "name": "discord-backups", - "type": "discord", "config": { + "type": "discord", "webhookUrl": "${DISCORD_WEBHOOK_URL}", "username": "zerobyte", "avatarUrl": "https://example.com/avatar.png", @@ -178,8 +164,8 @@ }, { "name": "gotify-notify", - "type": "gotify", "config": { + "type": "gotify", "serverUrl": "https://gotify.example.com", "token": "${GOTIFY_TOKEN}", "path": "/message", @@ -188,8 +174,8 @@ }, { "name": "ntfy-notify", - "type": "ntfy", "config": { + "type": "ntfy", "serverUrl": "https://ntfy.example.com", "topic": "zerobyte-backups", "priority": "high", @@ -199,8 +185,8 @@ }, { "name": "pushover-notify", - "type": "pushover", "config": { + "type": "pushover", "userKey": "${PUSHOVER_USER_KEY}", "apiToken": "${PUSHOVER_API_TOKEN}", "devices": "phone,tablet", @@ -209,16 +195,16 @@ }, { "name": "telegram-notify", - "type": "telegram", "config": { + "type": "telegram", "botToken": "${TELEGRAM_BOT_TOKEN}", "chatId": "123456789" } }, { "name": "custom-shoutrrr", - "type": "custom", "config": { + "type": "custom", "shoutrrrUrl": "${SHOUTRRR_URL}" } } From a4d1cc079111b973d67c3e63775f6afedecfc603 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 2 Dec 2025 10:07:14 +0100 Subject: [PATCH 05/30] enhance config interpolation and validation for admin user setup --- app/server/modules/lifecycle/startup.ts | 80 +++++++++++++++---------- 1 file changed, 49 insertions(+), 31 deletions(-) diff --git a/app/server/modules/lifecycle/startup.ts b/app/server/modules/lifecycle/startup.ts index 432ec81c..45d2568c 100644 --- a/app/server/modules/lifecycle/startup.ts +++ b/app/server/modules/lifecycle/startup.ts @@ -12,39 +12,45 @@ import { BackupExecutionJob } from "../../jobs/backup-execution"; import { CleanupSessionsJob } from "../../jobs/cleanup-sessions"; export const startup = async () => { - let configFileVolumes = []; - let configFileRepositories = []; - let configFileBackupSchedules = []; - let configFileNotificationDestinations = []; - let configFileAdmin = null; - try { - const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; - const fs = await import("node:fs/promises"); - const path = await import("node:path"); - const configFullPath = path.resolve(process.cwd(), configPath); - if (await fs.stat(configFullPath).then(() => true, () => false)) { - const raw = await fs.readFile(configFullPath, "utf-8"); - const config = JSON.parse(raw); + let configFileVolumes = []; + let configFileRepositories = []; + let configFileBackupSchedules = []; + let configFileNotificationDestinations = []; + let configFileAdmin = null; + try { + const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; + const fs = await import("node:fs/promises"); + const path = await import("node:path"); + const configFullPath = path.resolve(process.cwd(), configPath); + if (await fs.stat(configFullPath).then(() => true, () => false)) { + const raw = await fs.readFile(configFullPath, "utf-8"); + const config = JSON.parse(raw); - function interpolate(obj) { - if (typeof obj === "string") { - return obj.replace(/\$\{([^}]+)\}/g, (_, v) => process.env[v] || ""); - } else if (Array.isArray(obj)) { - return obj.map(interpolate); - } else if (obj && typeof obj === "object") { - return Object.fromEntries(Object.entries(obj).map(([k, v]) => [k, interpolate(v)])); - } - return obj; + function interpolate(obj) { + if (typeof obj === "string") { + return obj.replace(/\$\{([^}]+)\}/g, (_, v) => { + if (process.env[v] === undefined) { + logger.warn(`Environment variable '${v}' is not defined. Replacing with empty string.`); + return ""; + } + return process.env[v]; + }); + } else if (Array.isArray(obj)) { + return obj.map(interpolate); + } else if (obj && typeof obj === "object") { + return Object.fromEntries(Object.entries(obj).map(([k, v]) => [k, interpolate(v)])); } - configFileVolumes = interpolate(config.volumes || []); - configFileRepositories = interpolate(config.repositories || []); - configFileBackupSchedules = interpolate(config.backupSchedules || []); - configFileNotificationDestinations = interpolate(config.notificationDestinations || []); - configFileAdmin = interpolate(config.admin || null); + return obj; } - } catch (e) { - logger.warn(`No config file loaded or error parsing config: ${e.message}`); + configFileVolumes = interpolate(config.volumes || []); + configFileRepositories = interpolate(config.repositories || []); + configFileBackupSchedules = interpolate(config.backupSchedules || []); + configFileNotificationDestinations = interpolate(config.notificationDestinations || []); + configFileAdmin = interpolate(config.admin || null); } + } catch (e) { + logger.warn(`No config file loaded or error parsing config: ${e.message}`); + } await Scheduler.start(); await Scheduler.clear(); @@ -53,7 +59,15 @@ export const startup = async () => { const fs = await import("node:fs/promises"); const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); if (configFileAdmin && configFileAdmin.recoveryKey) { - await fs.writeFile(RESTIC_PASS_FILE, configFileAdmin.recoveryKey, { mode: 0o600 }); + const recoveryKey = configFileAdmin.recoveryKey; + if ( + typeof recoveryKey !== "string" || + recoveryKey.length !== 64 || + !/^[a-fA-F0-9]{64}$/.test(recoveryKey) + ) { + throw new Error("Recovery key must be a 64-character hex string"); + } + await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); } } catch (err) { @@ -177,6 +191,10 @@ export const startup = async () => { try { const { authService } = await import("../auth/auth.service"); if (configFileAdmin && configFileAdmin.username && (configFileAdmin.password || configFileAdmin.passwordHash)) { + if (configFileAdmin.password && configFileAdmin.passwordHash) { + logger.error("Config error: Both 'password' and 'passwordHash' provided for admin user. Use only one."); + throw new Error("Invalid admin configuration"); + } const hasUsers = await authService.hasUsers(); if (!hasUsers) { let userId: number; @@ -198,7 +216,7 @@ export const startup = async () => { await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, userId)); } } - } else { + } else if (configFileAdmin) { logger.warn("Admin config missing required fields (username, password or passwordHash). Skipping automated admin setup."); } } catch (err) { From 3c7dd856df1927936094cc75948ebc7adc91636f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 2 Dec 2025 12:20:25 +0100 Subject: [PATCH 06/30] add missing env vars to docker-compose.yml --- docker-compose.yml | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 6e265361..68ace2c5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -37,12 +37,29 @@ services: ports: - "4096:4096" environment: + # Cloud storage credentials - ACCESS_KEY_ID=your-access-key-id - SECRET_ACCESS_KEY=your-secret-access-key - - SMB_PASSWORD=your-smb-password - - WEBDAV_PASSWORD=your-webdav-password - GCS_CREDENTIALS=your-gcs-credentials-json - AZURE_KEY=your-azure-key + # Volume credentials + - SMB_PASSWORD=your-smb-password + - WEBDAV_PASSWORD=your-webdav-password + # SFTP credentials (for repositories) + - SFTP_PRIVATE_KEY=your-sftp-private-key + # Notification credentials + - SLACK_WEBHOOK_URL=your-slack-webhook-url + - EMAIL_PASSWORD=your-email-password + - DISCORD_WEBHOOK_URL=your-discord-webhook-url + - GOTIFY_TOKEN=your-gotify-token + - NTFY_PASSWORD=your-ntfy-password + - PUSHOVER_USER_KEY=your-pushover-user-key + - PUSHOVER_API_TOKEN=your-pushover-api-token + - TELEGRAM_BOT_TOKEN=your-telegram-bot-token + - SHOUTRRR_URL=your-shoutrrr-url + # Admin credentials + - ADMIN_PASSWORD=your-admin-password + - RECOVERY_KEY=your-64-char-hex-recovery-key volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte:rshared From 1de82fa8633b8d53a85a94e445a7aefa8fd0de12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 2 Dec 2025 12:55:35 +0100 Subject: [PATCH 07/30] better handle importing of existing local repository config + documentation --- README.md | 16 +++++++++++++++- .../modules/repositories/repositories.service.ts | 15 ++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9840fbb0..dde16c55 100644 --- a/README.md +++ b/README.md @@ -151,7 +151,21 @@ Secrets/credentials in the config file can reference environment variables using "compressionMode": "auto" } ``` - > **Note for importing existing local repositories:** If you're importing an existing repository (e.g., from a backup or migration), include the `name` field in `config` with the original subfolder name. The actual restic repo is stored at `{path}/{name}`. You can find this value in an exported config under `repositories[].config.name`. + > **Note for importing existing local repositories:** If you're migrating an existing repository (e.g., from a backup or another Zerobyte instance), include the `name` field in `config` with the original subfolder name, and set `isExistingRepository: true`. The actual restic repo is stored at `{path}/{name}`. + > + > **Example (migration):** + > ```json + > { + > "name": "my-local-repo", + > "config": { + > "backend": "local", + > "path": "/var/lib/zerobyte/repositories", + > "name": "abc123", + > "isExistingRepository": true + > } + > } + > ``` + > You can find the `config.name` value in an exported config under `repositories[].config.name`. This value must be unique across all repositories. - **S3-Compatible** ```json diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index d7dde170..03bee4cc 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -73,7 +73,20 @@ const createRepository = async (name: string, config: RepositoryConfig, compress const id = crypto.randomUUID(); - const shortId = (config.backend === "local" && config.name) ? config.name : generateShortId(); + // Determine shortId: use provided config.name for local repo migrations, otherwise generate + let shortId: string; + if (config.backend === "local" && config.name?.length) { + // User provided a name (migration scenario) - check for conflicts + shortId = config.name; + const existingByShortId = await db.query.repositoriesTable.findFirst({ + where: eq(repositoriesTable.shortId, shortId), + }); + if (existingByShortId) { + throw new ConflictError(`A repository with shortId '${shortId}' already exists. The shortId is used as the subdirectory name for local repositories.`); + } + } else { + shortId = generateShortId(); + } let processedConfig = config; if (config.backend === "local") { From abe6eb79d83e6198985408a9e1430f6fe0cd9a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Wed, 3 Dec 2025 23:25:53 +0100 Subject: [PATCH 08/30] removed duplicate isencrypted check as its handled inside encrypt already --- .../notifications/notifications.service.ts | 16 ++++++++-------- .../modules/repositories/repositories.service.ts | 16 +++------------- app/server/utils/crypto.ts | 5 ----- 3 files changed, 11 insertions(+), 26 deletions(-) diff --git a/app/server/modules/notifications/notifications.service.ts b/app/server/modules/notifications/notifications.service.ts index 360bca2f..4099d8b4 100644 --- a/app/server/modules/notifications/notifications.service.ts +++ b/app/server/modules/notifications/notifications.service.ts @@ -38,42 +38,42 @@ async function encryptSensitiveFields(config: NotificationConfig): Promise { const encryptConfig = async (config: RepositoryConfig): Promise => { const encryptedConfig: Record = { ...config }; - if (config.customPassword && !cryptoUtils.isEncrypted(config.customPassword)) { + if (config.customPassword) { encryptedConfig.customPassword = await cryptoUtils.encrypt(config.customPassword); } switch (config.backend) { case "s3": case "r2": - if (!cryptoUtils.isEncrypted(config.accessKeyId)) { encryptedConfig.accessKeyId = await cryptoUtils.encrypt(config.accessKeyId); - } - if (!cryptoUtils.isEncrypted(config.secretAccessKey)) { encryptedConfig.secretAccessKey = await cryptoUtils.encrypt(config.secretAccessKey); - } break; case "gcs": - if (!cryptoUtils.isEncrypted(config.credentialsJson)) { encryptedConfig.credentialsJson = await cryptoUtils.encrypt(config.credentialsJson); - } break; case "azure": - if (!cryptoUtils.isEncrypted(config.accountKey)) { encryptedConfig.accountKey = await cryptoUtils.encrypt(config.accountKey); - } break; case "rest": - if (config.username && !cryptoUtils.isEncrypted(config.username)) { + if (config.username) { encryptedConfig.username = await cryptoUtils.encrypt(config.username); } - if (config.password && !cryptoUtils.isEncrypted(config.password)) { + if (config.password) { encryptedConfig.password = await cryptoUtils.encrypt(config.password); } break; case "sftp": - if (!cryptoUtils.isEncrypted(config.privateKey)) { encryptedConfig.privateKey = await cryptoUtils.encrypt(config.privateKey); - } break; } diff --git a/app/server/utils/crypto.ts b/app/server/utils/crypto.ts index 1ed05d2a..651bebe9 100644 --- a/app/server/utils/crypto.ts +++ b/app/server/utils/crypto.ts @@ -5,10 +5,6 @@ const algorithm = "aes-256-gcm" as const; const keyLength = 32; const encryptionPrefix = "encv1"; -const isEncrypted = (val?: string): boolean => { - return typeof val === "string" && val.startsWith(encryptionPrefix); -}; - /** * Given a string, encrypts it using a randomly generated salt */ @@ -62,5 +58,4 @@ const decrypt = async (encryptedData: string) => { export const cryptoUtils = { encrypt, decrypt, - isEncrypted, }; From 1d4263046c0105ed1fa6ef10d13293fd9e107008 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Thu, 4 Dec 2025 13:31:08 +0100 Subject: [PATCH 09/30] enhance repository creation logic to check for existing repositories and handle errors appropriately --- .../repositories/repositories.service.ts | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index 887fc018..2b68d351 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -25,14 +25,14 @@ const encryptConfig = async (config: RepositoryConfig): Promise true) + .catch(() => false); + + if (repoExists && !config.isExistingRepository) { + throw new ConflictError( + `A restic repository already exists at this location. ` + + `If you want to use the existing repository, set "isExistingRepository": true in the config.` + ); + } + + if (!repoExists && config.isExistingRepository) { + throw new InternalServerError( + `Cannot access existing repository. Verify the path/credentials are correct and the repository exists.` + ); + } + const [created] = await db .insert(repositoriesTable) .values({ @@ -104,27 +122,9 @@ const createRepository = async (name: string, config: RepositoryConfig, compress let error: string | null = null; - if (config.isExistingRepository) { - const result = await restic - .snapshots(encryptedConfig) - .then(() => ({ error: null })) - .catch((err) => ({ error: err })); - - error = result.error; - } else { + if (!repoExists) { const initResult = await restic.init(encryptedConfig); error = initResult.error; - - if (error) { - const errorStr = typeof error === "string" ? error : (error as Error)?.message || ""; - if (errorStr.includes("config file already exists")) { - const verifyResult = await restic - .snapshots(encryptedConfig) - .then(() => ({ error: null })) - .catch((err) => ({ error: err })); - error = verifyResult.error; - } - } } if (!error) { From 7f124f81cbb8230211789da528ac8f5749976839 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Thu, 18 Dec 2025 15:31:27 +0100 Subject: [PATCH 10/30] refactor the import logic - Updated README to reflect changes in config file structure and usage. - Modified error messages for user registration to be more generic. - Cleaned up startup logic to utilize the new config import functionality when requested. - Adjusted JSON config structure to support user definitions. --- README.md | 51 ++- app/server/modules/auth/auth.service.ts | 2 +- app/server/modules/backups/backups.service.ts | 11 - app/server/modules/lifecycle/config-import.ts | 404 ++++++++++++++++++ app/server/modules/lifecycle/startup.ts | 215 +--------- docker-compose.yml | 2 +- zerobyte.config.json | 13 +- 7 files changed, 455 insertions(+), 243 deletions(-) create mode 100644 app/server/modules/lifecycle/config-import.ts diff --git a/README.md b/README.md index 55bf3cdf..e07807d0 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,9 @@ In order to run Zerobyte, you need to have Docker and Docker Compose installed o ### Configure Zerobyte via Config File -You can pre-configure backup sources (volumes), destinations (repositories), backup schedules, notification destinations and admin user using a config file (`zerobyte.config.json` by default (mounted in /app dir), or set `ZEROBYTE_CONFIG_PATH`). +You can pre-configure backup sources (volumes), destinations (repositories), backup schedules, notification destinations and initial users using a config file (`zerobyte.config.json` by default (mounted in /app dir), or set `ZEROBYTE_CONFIG_PATH`). + +Config import is opt-in. Enable it by setting `ZEROBYTE_CONFIG_IMPORT=true`. Secrets/credentials in the config file can reference environment variables using `${VAR_NAME}` syntax for secure injection. @@ -58,6 +60,7 @@ Secrets/credentials in the config file can reference environment variables using ```json { + "recoveryKey": "${RECOVERY_KEY}", "volumes": [ // Array of volume objects. Each must have a unique "name" and a "config" matching one of the types below. ], @@ -70,6 +73,10 @@ Secrets/credentials in the config file can reference environment variables using ], "notificationDestinations": [ // Array of notification destination objects as described below. + ], + "users": [ + // Array of user objects. Each must have a unique "username". + // Note: Zerobyte currently supports a single user; only the first entry is applied. ] } ``` @@ -215,6 +222,7 @@ Secrets/credentials in the config file can reference environment variables using - **Example:** ```json { + "name": "local-volume-local-repo", "volume": "local-volume", "repository": "local-repo", "cronExpression": "0 2 * * *", @@ -226,6 +234,7 @@ Secrets/credentials in the config file can reference environment variables using } ``` - **Fields:** + - `name`: Unique name of the schedule - `volume`: Name of the source volume - `repository`: Name of the destination repository - `cronExpression`: Cron string for schedule @@ -234,7 +243,7 @@ Secrets/credentials in the config file can reference environment variables using - `enabled`: Boolean - `notifications`: Array of notification destination names (strings) or detailed objects: - Simple: `["slack-alerts", "email-admin"]` - - Detailed: `[{"name": "slack-alerts", "notifyOnStart": false, "notifyOnSuccess": true, "notifyOnFailure": true}]` + - Detailed: `[{"name": "slack-alerts", "notifyOnStart": false, "notifyOnSuccess": true, "notifyOnWarning": true, "notifyOnFailure": true}]` ##### Notification Destinations @@ -348,39 +357,47 @@ Secrets/credentials in the config file can reference environment variables using - `config.type`: Notification type (email, slack, discord, gotify, ntfy, pushover, telegram, custom) - `config`: Type-specific config with `type` field, secrets via `${ENV_VAR}` -##### Admin Setup (Automated) +##### User Setup (Automated) + +Zerobyte currently supports a **single user**. If multiple entries are provided in `users[]`, only the first one will be applied. - **Example (new instance):** ```json { - "admin": { - "username": "admin", - "password": "${ADMIN_PASSWORD}", - "recoveryKey": "${RECOVERY_KEY}" - } + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "password": "${ADMIN_PASSWORD}" + } + ] } ``` - **Example (migration from another instance):** ```json { - "admin": { - "username": "admin", - "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$...", - "recoveryKey": "${RECOVERY_KEY}" - } + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$..." + } + ] } ``` - **Fields:** - - `username`: Admin username to create on first startup - - `password`: Admin password for new instances (can use `${ENV_VAR}`) - - `passwordHash`: Pre-hashed password for migration (exported from another instance) - `recoveryKey`: Optional recovery key (can use `${ENV_VAR}`) - if provided, the UI prompt to download recovery key will be skipped + - `users[]`: List of users to create on first startup (create-only). Only the first user is applied. + - `users[].username`: Username + - `users[].password`: Plaintext password for new instances (can use `${ENV_VAR}`) + - `users[].passwordHash`: Pre-hashed password for migration (exported from another instance) + - `users[].hasDownloadedResticPassword`: Optional boolean; defaults to `true` when `recoveryKey` is provided > **Note:** Use either `password` OR `passwordHash`, not both. The `passwordHash` option is useful when migrating from another Zerobyte instance using an exported config with `includePasswordHash=true`. -**On first startup, Zerobyte will automatically create the admin user from the config file.** +**On first startup, Zerobyte will automatically create users from the config file.** > **⚠️ About the Recovery Key** > diff --git a/app/server/modules/auth/auth.service.ts b/app/server/modules/auth/auth.service.ts index 4a97c0df..19cadff3 100644 --- a/app/server/modules/auth/auth.service.ts +++ b/app/server/modules/auth/auth.service.ts @@ -13,7 +13,7 @@ export class AuthService { const [existingUser] = await db.select().from(usersTable); if (existingUser) { - throw new Error("Admin user already exists"); + throw new Error("A user already exists"); } const passwordHash = await Bun.password.hash(password, { diff --git a/app/server/modules/backups/backups.service.ts b/app/server/modules/backups/backups.service.ts index 6a7e5952..584fada7 100644 --- a/app/server/modules/backups/backups.service.ts +++ b/app/server/modules/backups/backups.service.ts @@ -88,17 +88,6 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { throw new NotFoundError("Repository not found"); } - const existingSchedule = await db.query.backupSchedulesTable.findFirst({ - where: and( - eq(backupSchedulesTable.volumeId, volume.id), - eq(backupSchedulesTable.repositoryId, repository.id) - ), - }); - - if (existingSchedule) { - throw new ConflictError(`A backup schedule for volume '${volume.name}' and repository '${repository.name}' already exists`); - } - const nextBackupAt = calculateNextRun(data.cronExpression); const [newSchedule] = await db diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts new file mode 100644 index 00000000..d5e9ae09 --- /dev/null +++ b/app/server/modules/lifecycle/config-import.ts @@ -0,0 +1,404 @@ +import { eq } from "drizzle-orm"; +import slugify from "slugify"; +import { db } from "../../db/db"; +import { usersTable } from "../../db/schema"; +import { logger } from "../../utils/logger"; +import { volumeService } from "../volumes/volume.service"; +import type { NotificationConfig } from "~/schemas/notifications"; +import type { RepositoryConfig } from "~/schemas/restic"; +import type { BackendConfig } from "~/schemas/volumes"; + +const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null; + +const asStringArray = (value: unknown): string[] => { + if (!Array.isArray(value)) return []; + return value.filter((item): item is string => typeof item === "string"); +}; + +type RetentionPolicy = { + keepLast?: number; + keepHourly?: number; + keepDaily?: number; + keepWeekly?: number; + keepMonthly?: number; + keepYearly?: number; + keepWithinDuration?: string; +}; + +type ImportConfig = { + volumes: unknown[]; + repositories: unknown[]; + backupSchedules: unknown[]; + notificationDestinations: unknown[]; + users: unknown[]; + recoveryKey: string | null; +}; + +function interpolateEnvVars(value: unknown): unknown { + if (typeof value === "string") { + return value.replace(/\$\{([^}]+)\}/g, (_, v) => { + if (process.env[v] === undefined) { + logger.warn(`Environment variable '${v}' is not defined. Replacing with empty string.`); + return ""; + } + return process.env[v]; + }); + } + if (Array.isArray(value)) { + return value.map(interpolateEnvVars); + } + if (value && typeof value === "object") { + return Object.fromEntries(Object.entries(value).map(([k, v]) => [k, interpolateEnvVars(v)])); + } + return value; +} + +async function loadConfigFromFile(): Promise { + try { + const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; + const fs = await import("node:fs/promises"); + const path = await import("node:path"); + const configFullPath = path.resolve(process.cwd(), configPath); + try { + const raw = await fs.readFile(configFullPath, "utf-8"); + return JSON.parse(raw); + } catch (error) { + if (isRecord(error) && error.code === "ENOENT") return null; + throw error; + } + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + logger.warn(`No config file loaded or error parsing config: ${err.message}`); + return null; + } +} + +function parseImportConfig(configRaw: unknown): ImportConfig { + const root = isRecord(configRaw) ? configRaw : {}; + const config = isRecord(root.config) ? (root.config as Record) : root; + + const volumes = interpolateEnvVars(config.volumes || []); + const repositories = interpolateEnvVars(config.repositories || []); + const backupSchedules = interpolateEnvVars(config.backupSchedules || []); + const notificationDestinations = interpolateEnvVars(config.notificationDestinations || []); + const users = interpolateEnvVars(config.users || []); + const recoveryKeyRaw = interpolateEnvVars(config.recoveryKey || null); + + return { + volumes: Array.isArray(volumes) ? volumes : [], + repositories: Array.isArray(repositories) ? repositories : [], + backupSchedules: Array.isArray(backupSchedules) ? backupSchedules : [], + notificationDestinations: Array.isArray(notificationDestinations) ? notificationDestinations : [], + users: Array.isArray(users) ? users : [], + recoveryKey: typeof recoveryKeyRaw === "string" ? recoveryKeyRaw : null, + }; +} + +async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { + try { + const fs = await import("node:fs/promises"); + const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); + if (!recoveryKey) return; + + if (typeof recoveryKey !== "string" || recoveryKey.length !== 64 || !/^[a-fA-F0-9]{64}$/.test(recoveryKey)) { + throw new Error("Recovery key must be a 64-character hex string"); + } + const passFileExists = await fs.stat(RESTIC_PASS_FILE).then( + () => true, + () => false, + ); + if (passFileExists) { + logger.info(`Restic passfile already exists at ${RESTIC_PASS_FILE}; skipping config recovery key write`); + return; + } + await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); + logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); + } catch (err) { + const e = err instanceof Error ? err : new Error(String(err)); + logger.error(`Failed to write recovery key from config: ${e.message}`); + } +} + +async function importVolumes(volumes: unknown[]): Promise { + for (const v of volumes) { + try { + if (!isRecord(v) || typeof v.name !== "string" || !isRecord(v.config) || typeof v.config.backend !== "string") { + throw new Error("Invalid volume entry"); + } + await volumeService.createVolume(v.name, v.config as BackendConfig); + logger.info(`Initialized volume from config: ${v.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Volume not created: ${err.message}`); + } + } +} + +async function importRepositories(repositories: unknown[]): Promise { + const repoServiceModule = await import("../repositories/repositories.service"); + for (const r of repositories) { + try { + if (!isRecord(r) || typeof r.name !== "string" || !isRecord(r.config) || typeof r.config.backend !== "string") { + throw new Error("Invalid repository entry"); + } + const compressionMode = + r.compressionMode === "auto" || r.compressionMode === "off" || r.compressionMode === "max" + ? r.compressionMode + : undefined; + await repoServiceModule.repositoriesService.createRepository( + r.name, + r.config as RepositoryConfig, + compressionMode, + ); + logger.info(`Initialized repository from config: ${r.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Repository not created: ${err.message}`); + } + } +} + +async function importNotificationDestinations(notificationDestinations: unknown[]): Promise { + const notificationsServiceModule = await import("../notifications/notifications.service"); + for (const n of notificationDestinations) { + try { + if (!isRecord(n) || typeof n.name !== "string" || !isRecord(n.config) || typeof n.config.type !== "string") { + throw new Error("Invalid notification destination entry"); + } + await notificationsServiceModule.notificationsService.createDestination(n.name, n.config as NotificationConfig); + logger.info(`Initialized notification destination from config: ${n.name}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Notification destination not created: ${err.message}`); + } + } +} + +function getScheduleVolumeName(schedule: Record): string | null { + return typeof schedule.volume === "string" ? schedule.volume : typeof schedule.volumeName === "string" ? schedule.volumeName : null; +} + +function getScheduleRepositoryName(schedule: Record): string | null { + return typeof schedule.repository === "string" + ? schedule.repository + : typeof schedule.repositoryName === "string" + ? schedule.repositoryName + : null; +} + +type ScheduleNotificationAssignment = { + destinationId: number; + notifyOnStart: boolean; + notifyOnSuccess: boolean; + notifyOnWarning: boolean; + notifyOnFailure: boolean; +}; + +function buildScheduleNotificationAssignments( + notifications: unknown[], + destinationBySlug: Map, +): ScheduleNotificationAssignment[] { + const assignments: ScheduleNotificationAssignment[] = []; + + for (const notif of notifications) { + const destName = typeof notif === "string" ? notif : isRecord(notif) ? notif.name : null; + if (typeof destName !== "string" || destName.length === 0) { + logger.warn("Notification destination missing name for schedule"); + continue; + } + const destSlug = slugify(destName, { lower: true, strict: true }); + const dest = destinationBySlug.get(destSlug); + if (!dest) { + logger.warn(`Notification destination '${destName}' not found for schedule`); + continue; + } + assignments.push({ + destinationId: dest.id, + notifyOnStart: isRecord(notif) && typeof notif.notifyOnStart === "boolean" ? notif.notifyOnStart : true, + notifyOnSuccess: isRecord(notif) && typeof notif.notifyOnSuccess === "boolean" ? notif.notifyOnSuccess : true, + notifyOnWarning: isRecord(notif) && typeof notif.notifyOnWarning === "boolean" ? notif.notifyOnWarning : true, + notifyOnFailure: isRecord(notif) && typeof notif.notifyOnFailure === "boolean" ? notif.notifyOnFailure : true, + }); + } + + return assignments; +} + +async function attachScheduleNotifications( + scheduleId: number, + notifications: unknown[], + destinationBySlug: Map, + notificationsServiceModule: typeof import("../notifications/notifications.service"), +): Promise { + try { + const assignments = buildScheduleNotificationAssignments(notifications, destinationBySlug); + if (assignments.length === 0) return; + + await notificationsServiceModule.notificationsService.updateScheduleNotifications(scheduleId, assignments); + logger.info(`Assigned ${assignments.length} notification(s) to backup schedule`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Failed to assign notifications to schedule: ${err.message}`); + } +} + +async function importBackupSchedules(backupSchedules: unknown[]): Promise { + const backupServiceModule = await import("../backups/backups.service"); + const notificationsServiceModule = await import("../notifications/notifications.service"); + if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return; + + const volumes = await db.query.volumesTable.findMany(); + const repositories = await db.query.repositoriesTable.findMany(); + const destinations = await db.query.notificationDestinationsTable.findMany(); + + const volumeByName = new Map(volumes.map((v) => [v.name, v] as const)); + const repoByName = new Map(repositories.map((r) => [r.name, r] as const)); + const destinationBySlug = new Map(destinations.map((d) => [d.name, d] as const)); + + for (const s of backupSchedules) { + if (!isRecord(s)) { + continue; + } + const volumeName = getScheduleVolumeName(s); + if (typeof volumeName !== "string" || volumeName.length === 0) { + logger.warn("Backup schedule not created: Missing volume name"); + continue; + } + const volume = volumeByName.get(volumeName); + if (!volume) { + logger.warn(`Backup schedule not created: Volume '${volumeName}' not found`); + continue; + } + + const repositoryName = getScheduleRepositoryName(s); + if (typeof repositoryName !== "string" || repositoryName.length === 0) { + logger.warn("Backup schedule not created: Missing repository name"); + continue; + } + const repository = repoByName.get(repositoryName); + if (!repository) { + logger.warn(`Backup schedule not created: Repository '${repositoryName}' not found`); + continue; + } + + const scheduleName = typeof s.name === "string" && s.name.length > 0 ? s.name : `${volumeName}-${repositoryName}`; + if (typeof s.cronExpression !== "string" || s.cronExpression.length === 0) { + logger.warn(`Backup schedule not created: Missing cronExpression for '${scheduleName}'`); + continue; + } + + if (volume.status !== "mounted") { + try { + await volumeService.mountVolume(volume.name); + volumeByName.set(volume.name, { ...volume, status: "mounted" }); + logger.info(`Mounted volume ${volume.name} for backup schedule`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Could not mount volume ${volume.name}: ${err.message}`); + continue; + } + } + + let createdSchedule: { id: number } | null = null; + try { + const retentionPolicy = isRecord(s.retentionPolicy) ? (s.retentionPolicy as RetentionPolicy) : undefined; + createdSchedule = await backupServiceModule.backupsService.createSchedule({ + name: scheduleName, + volumeId: volume.id, + repositoryId: repository.id, + enabled: typeof s.enabled === "boolean" ? s.enabled : true, + cronExpression: s.cronExpression, + retentionPolicy, + excludePatterns: asStringArray(s.excludePatterns), + excludeIfPresent: asStringArray(s.excludeIfPresent), + includePatterns: asStringArray(s.includePatterns), + }); + logger.info(`Initialized backup schedule from config: ${scheduleName}`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Backup schedule not created: ${err.message}`); + continue; + } + + if (createdSchedule && Array.isArray(s.notifications) && s.notifications.length > 0) { + await attachScheduleNotifications(createdSchedule.id, s.notifications, destinationBySlug, notificationsServiceModule); + } + } +} + +async function setupInitialUser(users: unknown[], recoveryKey: string | null): Promise { + try { + const { authService } = await import("../auth/auth.service"); + const hasUsers = await authService.hasUsers(); + if (hasUsers) return; + if (!Array.isArray(users) || users.length === 0) return; + + if (users.length > 1) { + logger.warn( + "Multiple users provided in config. Zerobyte currently supports a single initial user; extra entries will be ignored.", + ); + } + + for (const u of users) { + if (!isRecord(u)) continue; + if (typeof u.username !== "string" || u.username.length === 0) continue; + + if (typeof u.passwordHash === "string" && u.passwordHash.length > 0) { + try { + await db.insert(usersTable).values({ + username: u.username, + passwordHash: u.passwordHash, + hasDownloadedResticPassword: + typeof u.hasDownloadedResticPassword === "boolean" ? u.hasDownloadedResticPassword : Boolean(recoveryKey), + }); + logger.info(`User '${u.username}' imported with password hash from config.`); + break; + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + logger.warn(`User '${u.username}' not imported: ${err.message}`); + } + continue; + } + + if (typeof u.password === "string" && u.password.length > 0) { + try { + const { user } = await authService.register(u.username, u.password); + const hasDownloadedResticPassword = + typeof u.hasDownloadedResticPassword === "boolean" ? u.hasDownloadedResticPassword : Boolean(recoveryKey); + if (hasDownloadedResticPassword) { + await db.update(usersTable).set({ hasDownloadedResticPassword }).where(eq(usersTable.id, user.id)); + } + logger.info(`User '${u.username}' created from config.`); + break; + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + logger.warn(`User '${u.username}' not created: ${err.message}`); + } + continue; + } + + logger.warn(`User '${u.username}' missing passwordHash/password; skipping`); + } + } catch (err) { + const e = err instanceof Error ? err : new Error(String(err)); + logger.error(`Automated user setup failed: ${e.message}`); + } +} + +export async function applyConfigImportFromFile(): Promise { + const configRaw = await loadConfigFromFile(); + const config = parseImportConfig(configRaw); + + await writeRecoveryKeyFromConfig(config.recoveryKey); + + try { + await importVolumes(config.volumes); + await importRepositories(config.repositories); + await importNotificationDestinations(config.notificationDestinations); + await importBackupSchedules(config.backupSchedules); + await setupInitialUser(config.users, config.recoveryKey); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.error(`Failed to initialize from config: ${err.message}`); + } +} diff --git a/app/server/modules/lifecycle/startup.ts b/app/server/modules/lifecycle/startup.ts index bb0a7ae6..fd6a96f5 100644 --- a/app/server/modules/lifecycle/startup.ts +++ b/app/server/modules/lifecycle/startup.ts @@ -1,7 +1,7 @@ import { Scheduler } from "../../core/scheduler"; import { and, eq, or } from "drizzle-orm"; import { db } from "../../db/db"; -import { volumesTable, usersTable, repositoriesTable, notificationDestinationsTable } from "../../db/schema"; +import { volumesTable } from "../../db/schema"; import { logger } from "../../utils/logger"; import { restic } from "../../utils/restic"; import { volumeService } from "../volumes/volume.service"; @@ -10,224 +10,23 @@ import { VolumeHealthCheckJob } from "../../jobs/healthchecks"; import { RepositoryHealthCheckJob } from "../../jobs/repository-healthchecks"; import { BackupExecutionJob } from "../../jobs/backup-execution"; import { CleanupSessionsJob } from "../../jobs/cleanup-sessions"; +import { applyConfigImportFromFile } from "./config-import"; export const startup = async () => { - let configFileVolumes = []; - let configFileRepositories = []; - let configFileBackupSchedules = []; - let configFileNotificationDestinations = []; - let configFileAdmin = null; - try { - const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; - const fs = await import("node:fs/promises"); - const path = await import("node:path"); - const configFullPath = path.resolve(process.cwd(), configPath); - if (await fs.stat(configFullPath).then(() => true, () => false)) { - const raw = await fs.readFile(configFullPath, "utf-8"); - const config = JSON.parse(raw); - - function interpolate(obj) { - if (typeof obj === "string") { - return obj.replace(/\$\{([^}]+)\}/g, (_, v) => { - if (process.env[v] === undefined) { - logger.warn(`Environment variable '${v}' is not defined. Replacing with empty string.`); - return ""; - } - return process.env[v]; - }); - } else if (Array.isArray(obj)) { - return obj.map(interpolate); - } else if (obj && typeof obj === "object") { - return Object.fromEntries(Object.entries(obj).map(([k, v]) => [k, interpolate(v)])); - } - return obj; - } - configFileVolumes = interpolate(config.volumes || []); - configFileRepositories = interpolate(config.repositories || []); - configFileBackupSchedules = interpolate(config.backupSchedules || []); - configFileNotificationDestinations = interpolate(config.notificationDestinations || []); - configFileAdmin = interpolate(config.admin || null); - } - } catch (e) { - logger.warn(`No config file loaded or error parsing config: ${e.message}`); - } - await Scheduler.start(); await Scheduler.clear(); - try { - const fs = await import("node:fs/promises"); - const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); - if (configFileAdmin && configFileAdmin.recoveryKey) { - const recoveryKey = configFileAdmin.recoveryKey; - if ( - typeof recoveryKey !== "string" || - recoveryKey.length !== 64 || - !/^[a-fA-F0-9]{64}$/.test(recoveryKey) - ) { - throw new Error("Recovery key must be a 64-character hex string"); - } - await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); - logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); - } - } catch (err) { - const e = err instanceof Error ? err : new Error(String(err)); - logger.error(`Failed to write recovery key from config: ${e.message}`); + if (process.env.ZEROBYTE_CONFIG_IMPORT === "true") { + logger.info("Config import enabled (ZEROBYTE_CONFIG_IMPORT=true)"); + await applyConfigImportFromFile(); + } else { + logger.info("Config import skipped (set ZEROBYTE_CONFIG_IMPORT=true to enable)"); } await restic.ensurePassfile().catch((err) => { logger.error(`Error ensuring restic passfile exists: ${err.message}`); }); - try { - for (const v of configFileVolumes) { - try { - await volumeService.createVolume(v.name, v.config); - logger.info(`Initialized volume from config: ${v.name}`); - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Volume ${v.name} not created: ${err.message}`); - } - } - const repoServiceModule = await import("../repositories/repositories.service"); - for (const r of configFileRepositories) { - try { - await repoServiceModule.repositoriesService.createRepository(r.name, r.config, r.compressionMode); - logger.info(`Initialized repository from config: ${r.name}`); - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Repository ${r.name} not created: ${err.message}`); - } - } - const notificationsServiceModule = await import("../notifications/notifications.service"); - for (const n of configFileNotificationDestinations) { - try { - await notificationsServiceModule.notificationsService.createDestination(n.name, n.config); - logger.info(`Initialized notification destination from config: ${n.name}`); - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Notification destination ${n.name} not created: ${err.message}`); - } - } - - const backupServiceModule = await import("../backups/backups.service"); - for (const s of configFileBackupSchedules) { - const volumeName = s.volume || s.volumeName; - const volume = await db.query.volumesTable.findFirst({ - where: eq(volumesTable.name, volumeName), - }); - if (!volume) { - logger.warn(`Backup schedule not created: Volume '${volumeName}' not found`); - continue; - } - const repositoryName = s.repository || s.repositoryName; - const repository = await db.query.repositoriesTable.findFirst({ - where: eq(repositoriesTable.name, repositoryName), - }); - if (!repository) { - logger.warn(`Backup schedule not created: Repository '${repositoryName}' not found`); - continue; - } - if (volume.status !== "mounted") { - try { - await volumeService.mountVolume(volume.name); - logger.info(`Mounted volume ${volume.name} for backup schedule`); - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Could not mount volume ${volume.name}: ${err.message}`); - continue; - } - } - let createdSchedule; - try { - createdSchedule = await backupServiceModule.backupsService.createSchedule({ - ...s, - volumeId: volume.id, - repositoryId: repository.id, - }); - logger.info(`Initialized backup schedule from config: ${s.cronExpression || s.name}`); - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Backup schedule not created: ${err.message}`); - continue; - } - - if (createdSchedule && s.notifications && Array.isArray(s.notifications) && s.notifications.length > 0) { - try { - const assignments: Array<{ - destinationId: number; - notifyOnStart: boolean; - notifyOnSuccess: boolean; - notifyOnFailure: boolean; - }> = []; - for (const notif of s.notifications) { - const destName = typeof notif === 'string' ? notif : notif.name; - const dest = await db.query.notificationDestinationsTable.findFirst({ - where: eq(notificationDestinationsTable.name, destName), - }); - if (dest) { - assignments.push({ - destinationId: dest.id, - notifyOnStart: typeof notif === 'object' ? (notif.notifyOnStart ?? true) : true, - notifyOnSuccess: typeof notif === 'object' ? (notif.notifyOnSuccess ?? true) : true, - notifyOnFailure: typeof notif === 'object' ? (notif.notifyOnFailure ?? true) : true, - }); - } else { - logger.warn(`Notification destination '${destName}' not found for schedule`); - } - } - if (assignments.length > 0) { - await notificationsServiceModule.notificationsService.updateScheduleNotifications(createdSchedule.id, assignments); - logger.info(`Assigned ${assignments.length} notification(s) to backup schedule`); - } - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Failed to assign notifications to schedule: ${err.message}`); - } - } - - } - - try { - const { authService } = await import("../auth/auth.service"); - if (configFileAdmin && configFileAdmin.username && (configFileAdmin.password || configFileAdmin.passwordHash)) { - if (configFileAdmin.password && configFileAdmin.passwordHash) { - logger.error("Config error: Both 'password' and 'passwordHash' provided for admin user. Use only one."); - throw new Error("Invalid admin configuration"); - } - const hasUsers = await authService.hasUsers(); - if (!hasUsers) { - let userId: number; - if (configFileAdmin.passwordHash) { - // Import with existing password hash (migration from another instance) - const [user] = await db.insert(usersTable).values({ - username: configFileAdmin.username, - passwordHash: configFileAdmin.passwordHash, - }).returning(); - userId = user.id; - logger.info(`Admin user '${configFileAdmin.username}' imported with password hash from config.`); - } else { - // Create new user with plaintext password - const { user } = await authService.register(configFileAdmin.username, configFileAdmin.password); - userId = user.id; - logger.info(`Admin user '${configFileAdmin.username}' created from config.`); - } - if (configFileAdmin.recoveryKey) { - await db.update(usersTable).set({ hasDownloadedResticPassword: true }).where(eq(usersTable.id, userId)); - } - } - } else if (configFileAdmin) { - logger.warn("Admin config missing required fields (username, password or passwordHash). Skipping automated admin setup."); - } - } catch (err) { - const e = err instanceof Error ? err : new Error(String(err)); - logger.error(`Automated admin setup failed: ${e.message}`); - } - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.error(`Failed to initialize from config: ${err.message}`); - } - const volumes = await db.query.volumesTable.findMany({ where: or( eq(volumesTable.status, "mounted"), diff --git a/docker-compose.yml b/docker-compose.yml index acdbd17e..cd132c35 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,7 +20,7 @@ services: volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte - + - ./app:/app/app - ~/.config/rclone:/root/.config/rclone # - /run/docker/plugins:/run/docker/plugins diff --git a/zerobyte.config.json b/zerobyte.config.json index 1323b939..6e30e340 100644 --- a/zerobyte.config.json +++ b/zerobyte.config.json @@ -118,6 +118,7 @@ ], "backupSchedules": [ { + "name": "local-volume-local-repo", "volume": "local-volume", "repository": "local-repo", "cronExpression": "0 2 * * *", @@ -209,9 +210,11 @@ } } ], - "admin": { - "username": "admin", - "password": "${ADMIN_PASSWORD}", - "recoveryKey": "${RECOVERY_KEY}" - } + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "admin", + "password": "${ADMIN_PASSWORD}" + } + ] } From 7b2bb251fd1a7bb83bc971c323fa2edfafdd61c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Sun, 21 Dec 2025 14:06:37 +0100 Subject: [PATCH 11/30] moved documentation into separate example sub directory --- README.md | 397 +----------------- docker-compose.yml | 29 -- examples/README.md | 1 + examples/config-file-import/.env.example | 23 + examples/config-file-import/.gitignore | 2 + examples/config-file-import/README.md | 388 +++++++++++++++++ .../config-file-import/docker-compose.yml | 22 + .../zerobyte.config.example.json | 0 8 files changed, 442 insertions(+), 420 deletions(-) create mode 100644 examples/config-file-import/.env.example create mode 100644 examples/config-file-import/.gitignore create mode 100644 examples/config-file-import/README.md create mode 100644 examples/config-file-import/docker-compose.yml rename zerobyte.config.json => examples/config-file-import/zerobyte.config.example.json (100%) diff --git a/README.md b/README.md index ae261fbb..4c4cc2a1 100644 --- a/README.md +++ b/README.md @@ -37,396 +37,6 @@ Zerobyte is a backup automation tool that helps you save your data across multip In order to run Zerobyte, you need to have Docker and Docker Compose installed on your server. Then, you can use the provided `docker-compose.yml` file to start the application. -### Configure Zerobyte via Config File - - -You can pre-configure backup sources (volumes), destinations (repositories), backup schedules, notification destinations and initial users using a config file (`zerobyte.config.json` by default (mounted in /app dir), or set `ZEROBYTE_CONFIG_PATH`). - -Config import is opt-in. Enable it by setting `ZEROBYTE_CONFIG_IMPORT=true`. - -Secrets/credentials in the config file can reference environment variables using `${VAR_NAME}` syntax for secure injection. - -> **ℹ️ Config File Behavior** -> -> The config file is applied on startup using a **create-only** approach: -> - Resources defined in the config are only created if they don't already exist in the database -> - Existing resources with the same name are **not overwritten** - a warning is logged and the config entry is skipped -> - Changes made via the UI are preserved across container restarts -> - To update a resource from config, either modify it via the UI or delete it first -> -> This means the config file serves as "initial setup" rather than "desired state sync". - -#### zerobyte.config.json Structure - -```json -{ - "recoveryKey": "${RECOVERY_KEY}", - "volumes": [ - // Array of volume objects. Each must have a unique "name" and a "config" matching one of the types below. - ], - "repositories": [ - // Array of repository objects. Each must have a unique "name" and a "config" matching one of the types below. - // Optionally, "compressionMode" ("auto", "off", "max") - ], - "backupSchedules": [ - // Array of backup schedule objects as described below. - ], - "notificationDestinations": [ - // Array of notification destination objects as described below. - ], - "users": [ - // Array of user objects. Each must have a unique "username". - // Note: Zerobyte currently supports a single user; only the first entry is applied. - ] -} -``` - -##### Volume Types - -- **Local Directory** - ```json - { - "name": "local-volume", - "config": { - "backend": "directory", - "path": "/data", - "readOnly": true - } - } - ``` - -- **NFS** - ```json - { - "name": "nfs-volume", - "config": { - "backend": "nfs", - "server": "nfs.example.com", - "exportPath": "/data", - "port": 2049, - "version": "4", - "readOnly": false - } - } - ``` - -- **SMB** - ```json - { - "name": "smb-volume", - "config": { - "backend": "smb", - "server": "smb.example.com", - "share": "shared", - "username": "user", - "password": "${SMB_PASSWORD}", - "vers": "3.0", - "domain": "WORKGROUP", - "port": 445, - "readOnly": false - } - } - ``` - -- **WebDAV** - ```json - { - "name": "webdav-volume", - "config": { - "backend": "webdav", - "server": "webdav.example.com", - "path": "/remote.php/webdav", - "username": "user", - "password": "${WEBDAV_PASSWORD}", - "port": 80, - "readOnly": false, - "ssl": true - } - } - ``` - -##### Repository Types - -- **Local Directory** - ```json - { - "name": "local-repo", - "config": { - "backend": "local", - "path": "/var/lib/zerobyte/repositories" - }, - "compressionMode": "auto" - } - ``` - > **Note for importing existing local repositories:** If you're migrating an existing repository (e.g., from a backup or another Zerobyte instance), include the `name` field in `config` with the original subfolder name, and set `isExistingRepository: true`. The actual restic repo is stored at `{path}/{name}`. - > - > **Example (migration):** - > ```json - > { - > "name": "my-local-repo", - > "config": { - > "backend": "local", - > "path": "/var/lib/zerobyte/repositories", - > "name": "abc123", - > "isExistingRepository": true - > } - > } - > ``` - > You can find the `config.name` value in an exported config under `repositories[].config.name`. This value must be unique across all repositories. - -- **S3-Compatible** - ```json - { - "name": "backup-repo", - "config": { - "backend": "s3", - "bucket": "mybucket", - "accessKeyId": "${ACCESS_KEY_ID}", - "secretAccessKey": "${SECRET_ACCESS_KEY}" - }, - "compressionMode": "auto" - } - ``` - -- **Google Cloud Storage** - ```json - { - "name": "gcs-repo", - "config": { - "backend": "gcs", - "bucket": "mybucket", - "projectId": "my-gcp-project", - "credentialsJson": "${GCS_CREDENTIALS}" - } - } - ``` - -- **Azure Blob Storage** - ```json - { - "name": "azure-repo", - "config": { - "backend": "azure", - "container": "mycontainer", - "accountName": "myaccount", - "accountKey": "${AZURE_KEY}" - } - } - ``` - -- **WebDAV, rclone, SFTP, REST, etc.** - (See documentation for required fields; all support env variable secrets.) - -##### Backup Schedules - -- **Example:** - ```json - { - "name": "local-volume-local-repo", - "volume": "local-volume", - "repository": "local-repo", - "cronExpression": "0 2 * * *", - "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, - "includePatterns": ["important-folder"], - "excludePatterns": ["*.tmp", "*.log"], - "enabled": true, - "notifications": ["slack-alerts", "email-admin"] - } - ``` -- **Fields:** - - `name`: Unique name of the schedule - - `volume`: Name of the source volume - - `repository`: Name of the destination repository - - `cronExpression`: Cron string for schedule - - `retentionPolicy`: Object with retention rules (e.g., keepLast, keepDaily) - - `includePatterns`/`excludePatterns`: Arrays of patterns - - `enabled`: Boolean - - `notifications`: Array of notification destination names (strings) or detailed objects: - - Simple: `["slack-alerts", "email-admin"]` - - Detailed: `[{"name": "slack-alerts", "notifyOnStart": false, "notifyOnSuccess": true, "notifyOnWarning": true, "notifyOnFailure": true}]` - -##### Notification Destinations - -- **Examples:** - - **Slack** - ```json - { - "name": "slack-alerts", - "config": { - "type": "slack", - "webhookUrl": "${SLACK_WEBHOOK_URL}", - "channel": "#backups", - "username": "zerobyte", - "iconEmoji": ":floppy_disk:" - } - } - ``` - - **Email** - ```json - { - "name": "email-admin", - "config": { - "type": "email", - "smtpHost": "smtp.example.com", - "smtpPort": 587, - "username": "admin@example.com", - "password": "${EMAIL_PASSWORD}", - "from": "zerobyte@example.com", - "to": ["admin@example.com"], - "useTLS": true - } - } - ``` - - **Discord** - ```json - { - "name": "discord-backups", - "config": { - "type": "discord", - "webhookUrl": "${DISCORD_WEBHOOK_URL}", - "username": "zerobyte", - "avatarUrl": "https://example.com/avatar.png", - "threadId": "1234567890" - } - } - ``` - - **Gotify** - ```json - { - "name": "gotify-notify", - "config": { - "type": "gotify", - "serverUrl": "https://gotify.example.com", - "token": "${GOTIFY_TOKEN}", - "path": "/message", - "priority": 5 - } - } - ``` - - **ntfy** - ```json - { - "name": "ntfy-notify", - "config": { - "type": "ntfy", - "serverUrl": "https://ntfy.example.com", - "topic": "zerobyte-backups", - "priority": "high", - "username": "ntfyuser", - "password": "${NTFY_PASSWORD}" - } - } - ``` - - **Pushover** - ```json - { - "name": "pushover-notify", - "config": { - "type": "pushover", - "userKey": "${PUSHOVER_USER_KEY}", - "apiToken": "${PUSHOVER_API_TOKEN}", - "devices": "phone,tablet", - "priority": 1 - } - } - ``` - - **Telegram** - ```json - { - "name": "telegram-notify", - "config": { - "type": "telegram", - "botToken": "${TELEGRAM_BOT_TOKEN}", - "chatId": "123456789" - } - } - ``` - - **Custom (shoutrrr)** - ```json - { - "name": "custom-shoutrrr", - "config": { - "type": "custom", - "shoutrrrUrl": "${SHOUTRRR_URL}" - } - } - ``` - -- **Fields:** - - `name`: Unique name for the notification config - - `config.type`: Notification type (email, slack, discord, gotify, ntfy, pushover, telegram, custom) - - `config`: Type-specific config with `type` field, secrets via `${ENV_VAR}` - -##### User Setup (Automated) - -Zerobyte currently supports a **single user**. If multiple entries are provided in `users[]`, only the first one will be applied. - -- **Example (new instance):** - ```json - { - "recoveryKey": "${RECOVERY_KEY}", - "users": [ - { - "username": "my-user", - "password": "${ADMIN_PASSWORD}" - } - ] - } - ``` - -- **Example (migration from another instance):** - ```json - { - "recoveryKey": "${RECOVERY_KEY}", - "users": [ - { - "username": "my-user", - "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$..." - } - ] - } - ``` - -- **Fields:** - - `recoveryKey`: Optional recovery key (can use `${ENV_VAR}`) - if provided, the UI prompt to download recovery key will be skipped - - `users[]`: List of users to create on first startup (create-only). Only the first user is applied. - - `users[].username`: Username - - `users[].password`: Plaintext password for new instances (can use `${ENV_VAR}`) - - `users[].passwordHash`: Pre-hashed password for migration (exported from another instance) - - `users[].hasDownloadedResticPassword`: Optional boolean; defaults to `true` when `recoveryKey` is provided - -> **Note:** Use either `password` OR `passwordHash`, not both. The `passwordHash` option is useful when migrating from another Zerobyte instance using an exported config with `includePasswordHash=true`. - -**On first startup, Zerobyte will automatically create users from the config file.** - -> **⚠️ About the Recovery Key** -> -> The recovery key is a 64-character hex string that serves two critical purposes: -> 1. **Restic repository password** - Used to encrypt all your backup data -> 2. **Database encryption key** - Used to encrypt credentials stored in Zerobyte's database -> -> **If you lose this key, you will lose access to all your backups and stored credentials.** -> -> **Generating a recovery key ahead of time:** -> ```bash -> # Using OpenSSL (Linux/macOS) -> openssl rand -hex 32 -> -> # Using Python -> python3 -c "import secrets; print(secrets.token_hex(32))" -> ``` -> -> **Retrieving from an existing instance:** -> - Download via UI: Settings → Download Recovery Key -> - Or read directly from the container: `docker exec zerobyte cat /var/lib/zerobyte/data/restic.pass` - ---- - -**Notes:** -- All secrets (passwords, keys) can use `${ENV_VAR}` syntax to inject from environment variables. -- All paths must be accessible inside the container (mount host paths as needed). -- `readOnly` is supported for all volume types that allow it, including local directories. - ```yaml services: zerobyte: @@ -444,7 +54,6 @@ services: volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte - - ./zerobyte.config.json:/app/zerobyte.config.json:ro # Mount your config file ``` > [!WARNING] @@ -493,6 +102,12 @@ If you need remote mount capabilities, keep the original configuration with `cap See [examples/README.md](examples/README.md) for runnable, copy/paste-friendly examples. +### Config file import (Infrastructure as Code) + +If you want Zerobyte to create volumes, repositories, schedules, notification destinations, and an initial user from a JSON file on startup, check the following example: + +- [examples/config-file-import/README.md](examples/config-file-import/README.md) + ## Adding your first volume Zerobyte supports multiple volume backends including NFS, SMB, WebDAV, and local directories. A volume represents the source data you want to back up and monitor. diff --git a/docker-compose.yml b/docker-compose.yml index 4a0f2578..90a43015 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,14 +12,11 @@ services: - SYS_ADMIN environment: - NODE_ENV=development - - ZEROBYTE_CONFIG_IMPORT=true ports: - "4096:4096" volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte - - ./mydata:/mydata:ro - - ./zerobyte.config.json:/app/zerobyte.config.json:ro - ./app:/app/app - ~/.config/rclone:/root/.config/rclone @@ -36,33 +33,7 @@ services: - SYS_ADMIN ports: - "4096:4096" - environment: - # Cloud storage credentials - - ACCESS_KEY_ID=your-access-key-id - - SECRET_ACCESS_KEY=your-secret-access-key - - GCS_CREDENTIALS=your-gcs-credentials-json - - AZURE_KEY=your-azure-key - # Volume credentials - - SMB_PASSWORD=your-smb-password - - WEBDAV_PASSWORD=your-webdav-password - # SFTP credentials (for repositories) - - SFTP_PRIVATE_KEY=your-sftp-private-key - # Notification credentials - - SLACK_WEBHOOK_URL=your-slack-webhook-url - - EMAIL_PASSWORD=your-email-password - - DISCORD_WEBHOOK_URL=your-discord-webhook-url - - GOTIFY_TOKEN=your-gotify-token - - NTFY_PASSWORD=your-ntfy-password - - PUSHOVER_USER_KEY=your-pushover-user-key - - PUSHOVER_API_TOKEN=your-pushover-api-token - - TELEGRAM_BOT_TOKEN=your-telegram-bot-token - - SHOUTRRR_URL=your-shoutrrr-url - # Admin credentials - - ADMIN_PASSWORD=your-admin-password - - RECOVERY_KEY=your-64-char-hex-recovery-key volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte - - ./zerobyte.config.json:/app/zerobyte.config.json:ro - - ./mydata:/mydata:ro - ~/.config/rclone:/root/.config/rclone diff --git a/examples/README.md b/examples/README.md index 8d2e7657..0ae69c9a 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,6 +11,7 @@ This folder contains runnable, copy/paste-friendly examples for running Zerobyte - [Bind-mount a local directory](directory-bind-mount/README.md) — back up a host folder by mounting it into the container. - [Mount an rclone config](rclone-config-mount/README.md) — use rclone-based repository backends by mounting your rclone config. - [Secret placeholders + Docker secrets](secrets-placeholders/README.md) — keep secrets out of the DB using `env://...` and `file://...` references. +- [Config file import (Infrastructure as Code)](config-file-import/README.md) — pre-configure volumes/repos/schedules/users on startup. ### Advanced setups diff --git a/examples/config-file-import/.env.example b/examples/config-file-import/.env.example new file mode 100644 index 00000000..d59d0377 --- /dev/null +++ b/examples/config-file-import/.env.example @@ -0,0 +1,23 @@ +# Copy to .env and fill values + +# Used by examples/config-file-import/zerobyte.config.example.json +RECOVERY_KEY=your-64-char-hex-recovery-key +ADMIN_PASSWORD=change-me + +# Optional: referenced by some config examples +ACCESS_KEY_ID= +SECRET_ACCESS_KEY= +GCS_CREDENTIALS= +AZURE_KEY= +SMB_PASSWORD= +WEBDAV_PASSWORD= +SFTP_PRIVATE_KEY= +SLACK_WEBHOOK_URL= +EMAIL_PASSWORD= +DISCORD_WEBHOOK_URL= +GOTIFY_TOKEN= +NTFY_PASSWORD= +PUSHOVER_USER_KEY= +PUSHOVER_API_TOKEN= +TELEGRAM_BOT_TOKEN= +SHOUTRRR_URL= diff --git a/examples/config-file-import/.gitignore b/examples/config-file-import/.gitignore new file mode 100644 index 00000000..9961ad53 --- /dev/null +++ b/examples/config-file-import/.gitignore @@ -0,0 +1,2 @@ +.env +zerobyte.config.json diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md new file mode 100644 index 00000000..4bf1aa7d --- /dev/null +++ b/examples/config-file-import/README.md @@ -0,0 +1,388 @@ +# Config file import (Infrastructure as Code) + +Zerobyte supports **config file import** on startup. +This lets you pre-configure volumes, repositories, backup schedules, notification destinations, and an initial user. + +This example includes: + +- a runnable `docker-compose.yml` +- a comprehensive `zerobyte.config.example.json` template (trim it down to what you actually use) +- `.env.example` showing how to inject secrets via environment variables + +## Prerequisites + +- Docker + Docker Compose + +This example includes `SYS_ADMIN` and `/dev/fuse` because it’s compatible with remote volume mounts (SMB/NFS/WebDAV). + +## Setup + +1. Copy the env file: + +```bash +cp .env.example .env +``` + +2. Create a local directory to mount as a sample volume: + +```bash +mkdir -p mydata +``` + +3. Create a working config file (copy the example template): + +```bash +cp zerobyte.config.example.json zerobyte.config.json +``` + +This is the recommended workflow for quick testing: if you don't have your own JSON config yet, start from the template. + +4. Review/edit `zerobyte.config.json`. + + The example template is intentionally "kitchen-sink" (lots of volume/repository/notification types) so you can copy what you need. + Delete the entries you don't plan to use, and keep only the ones you have credentials/mounts for. + +5. Start Zerobyte: + +```bash +docker compose up -d +``` + +6. Access the UI at `http://localhost:4096`. + +## Notes + +### Enabling import + +Config import is opt-in and only runs when: + +- `ZEROBYTE_CONFIG_IMPORT=true` + +The config path defaults to `/app/zerobyte.config.json`, but you can override it via: + +- `ZEROBYTE_CONFIG_PATH=/app/your-config.json` + +### Secrets via env vars + +Zerobyte supports **two different mechanisms** that are easy to confuse: + +1. **Config import interpolation** (this example) +2. **Secret placeholders** (`env://...` and `file://...`) + +#### 1) Config import interpolation: `${VAR_NAME}` + +During config import, any string value in the JSON can reference an environment variable using `${VAR_NAME}`. + +Example: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "repositories": [ + { + "name": "s3-repo", + "config": { + "backend": "s3", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + } + } + ] +} +``` + +Important properties of `${...}` interpolation: + +- It runs **only during import**. +- Values are **resolved before** they are written to the database (meaning the actual secret ends up in the DB for fields that are stored as secrets). +- Because it reads `process.env`, Docker Compose must inject those variables into the container. + +This example uses: + +- `env_file: .env` + +So to make `${VAR_NAME}` work, put the variables in `.env` (or otherwise provide them in the container environment). + +#### 2) Secret placeholders: `env://...` and `file://...` + +Separately from config import, Zerobyte supports **secret placeholders** for *some sensitive fields*. +These placeholders are stored **as-is** in the database (the raw secret is not stored) and resolved at runtime. + +Supported formats: + +- `env://VAR_NAME` → reads `process.env.VAR_NAME` at runtime +- `file://secret_name` → reads `/run/secrets/secret_name` (Docker secrets) + +This is useful when you want to keep secrets out of the database and rotate them without editing Zerobyte’s stored config. + +See the runnable example: + +- [examples/secrets-placeholders/README.md](../secrets-placeholders/README.md) + +### Config file behavior (create-only) + +The config file is applied on startup using a **create-only** approach: + +- Resources defined in the config are only created if they don't already exist in the database +- Existing resources with the same name are **not overwritten** (a warning is logged and the config entry is skipped) +- Changes made via the UI are preserved across container restarts +- To update a resource from config, either modify it via the UI or delete it first + +This makes the config file better suited as "initial setup" than as a "desired state sync". + +--- + +## Config structure reference + +This example is intended to be the primary, copy/paste-friendly reference for config import. + +### `zerobyte.config.json` structure + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "volumes": [ + "..." + ], + "repositories": [ + "..." + ], + "backupSchedules": [ + "..." + ], + "notificationDestinations": [ + "..." + ], + "users": [ + "..." + ] +} +``` + +### Volume types + +#### Local directory + +```json +{ + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/mydata", + "readOnly": true + } +} +``` + +#### NFS + +```json +{ + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } +} +``` + +#### SMB + +```json +{ + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } +} +``` + +#### WebDAV + +```json +{ + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } +} +``` + +### Repository types + +#### Local + +```json +{ + "name": "local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories" + }, + "compressionMode": "auto" +} +``` + +Note for importing existing local repositories (migration): + +- include `config.name` and set `config.isExistingRepository: true` +- the actual restic repo is stored at `{path}/{name}` + +```json +{ + "name": "my-local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories", + "name": "abc123", + "isExistingRepository": true + } +} +``` + +#### S3-compatible + +```json +{ + "name": "backup-repo", + "config": { + "backend": "s3", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" +} +``` + +#### Google Cloud Storage + +```json +{ + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } +} +``` + +#### Azure Blob Storage + +```json +{ + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } +} +``` + +### Backup schedules + +```json +{ + "name": "local-volume-local-repo", + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "enabled": true, + "notifications": ["slack-alerts", "email-admin"] +} +``` + +`notifications` can also be an array of objects: + +```json +[ + { + "name": "slack-alerts", + "notifyOnStart": false, + "notifyOnSuccess": true, + "notifyOnWarning": true, + "notifyOnFailure": true + } +] +``` + +### User setup (automated) + +Zerobyte currently supports a **single user**. +If multiple entries are provided in `users[]`, only the first one will be applied. + +New instance: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "password": "${ADMIN_PASSWORD}" + } + ] +} +``` + +Migration: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$..." + } + ] +} +``` + +Use either `password` OR `passwordHash`, not both. + +### Recovery key + +The recovery key is a 64-character hex string that serves two critical purposes: + +1. Restic repository password (encrypts your backup data) +2. Database encryption key (encrypts credentials stored in Zerobyte) + +Generating a recovery key ahead of time: + +```bash +# Using OpenSSL (Linux/macOS) +openssl rand -hex 32 + +# Using Python +python3 -c "import secrets; print(secrets.token_hex(32))" + +# Using Docker (prints the key, container is removed) +docker run --rm python:3.12-alpine sh -lc 'echo "Key is on the next line:"; python -c "import secrets; print(secrets.token_hex(32))"' +``` diff --git a/examples/config-file-import/docker-compose.yml b/examples/config-file-import/docker-compose.yml new file mode 100644 index 00000000..78792779 --- /dev/null +++ b/examples/config-file-import/docker-compose.yml @@ -0,0 +1,22 @@ +services: + zerobyte: + image: ghcr.io/nicotsx/zerobyte:latest + container_name: zerobyte + restart: unless-stopped + cap_add: + - SYS_ADMIN + devices: + - /dev/fuse:/dev/fuse + ports: + - "4096:4096" + env_file: + - .env + environment: + - TZ=${TZ:-UTC} + - ZEROBYTE_CONFIG_IMPORT=true + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/lib/zerobyte:/var/lib/zerobyte + - ./zerobyte.config.json:/app/zerobyte.config.json:ro + - ./mydata:/mydata:ro + - ~/.config/rclone:/root/.config/rclone diff --git a/zerobyte.config.json b/examples/config-file-import/zerobyte.config.example.json similarity index 100% rename from zerobyte.config.json rename to examples/config-file-import/zerobyte.config.example.json From 7476897f87629d3887feff62749427327d3a0d0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 23 Dec 2025 15:29:27 +0100 Subject: [PATCH 12/30] feat(config-import): support mirrors, oneFileSystem, and optional flags - Add mirrors support for backup schedules (copy to secondary repos) - Add oneFileSystem flag to prevent crossing filesystem boundaries - Support autoRemount=false for volumes (defaults to true) - Support enabled=false for notification destinations (defaults to true) - Optimize: move early return before dynamic imports in importBackupSchedules - Update example config and README with new fields documentation --- app/server/modules/lifecycle/config-import.ts | 69 ++++++++++++++++++- examples/config-file-import/README.md | 38 +++++++++- .../zerobyte.config.example.json | 7 +- 3 files changed, 109 insertions(+), 5 deletions(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index d5e9ae09..7a458326 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -127,6 +127,12 @@ async function importVolumes(volumes: unknown[]): Promise { } await volumeService.createVolume(v.name, v.config as BackendConfig); logger.info(`Initialized volume from config: ${v.name}`); + + // If autoRemount is explicitly false, update the volume (default is true) + if (v.autoRemount === false) { + await volumeService.updateVolume(v.name, { autoRemount: false }); + logger.info(`Set autoRemount=false for volume: ${v.name}`); + } } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Volume not created: ${err.message}`); @@ -165,8 +171,14 @@ async function importNotificationDestinations(notificationDestinations: unknown[ if (!isRecord(n) || typeof n.name !== "string" || !isRecord(n.config) || typeof n.config.type !== "string") { throw new Error("Invalid notification destination entry"); } - await notificationsServiceModule.notificationsService.createDestination(n.name, n.config as NotificationConfig); + const created = await notificationsServiceModule.notificationsService.createDestination(n.name, n.config as NotificationConfig); logger.info(`Initialized notification destination from config: ${n.name}`); + + // If enabled is explicitly false, update the destination (default is true) + if (n.enabled === false) { + await notificationsServiceModule.notificationsService.updateDestination(created.id, { enabled: false }); + logger.info(`Set enabled=false for notification destination: ${n.name}`); + } } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Notification destination not created: ${err.message}`); @@ -243,9 +255,10 @@ async function attachScheduleNotifications( } async function importBackupSchedules(backupSchedules: unknown[]): Promise { + if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return; + const backupServiceModule = await import("../backups/backups.service"); const notificationsServiceModule = await import("../notifications/notifications.service"); - if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return; const volumes = await db.query.volumesTable.findMany(); const repositories = await db.query.repositoriesTable.findMany(); @@ -312,6 +325,7 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise excludePatterns: asStringArray(s.excludePatterns), excludeIfPresent: asStringArray(s.excludeIfPresent), includePatterns: asStringArray(s.includePatterns), + oneFileSystem: typeof s.oneFileSystem === "boolean" ? s.oneFileSystem : undefined, }); logger.info(`Initialized backup schedule from config: ${scheduleName}`); } catch (e) { @@ -323,6 +337,57 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise if (createdSchedule && Array.isArray(s.notifications) && s.notifications.length > 0) { await attachScheduleNotifications(createdSchedule.id, s.notifications, destinationBySlug, notificationsServiceModule); } + + if (createdSchedule && Array.isArray(s.mirrors) && s.mirrors.length > 0) { + await attachScheduleMirrors(createdSchedule.id, s.mirrors, repoByName, backupServiceModule); + } + } +} + +async function attachScheduleMirrors( + scheduleId: number, + mirrors: unknown[], + repoByName: Map, + backupServiceModule: typeof import("../backups/backups.service"), +): Promise { + try { + const mirrorConfigs: Array<{ repositoryId: string; enabled: boolean }> = []; + + for (const m of mirrors) { + if (!isRecord(m)) continue; + + // Support both repository name (string) and repository object with name + const repoName = + typeof m.repository === "string" + ? m.repository + : typeof m.repositoryName === "string" + ? m.repositoryName + : null; + + if (!repoName) { + logger.warn("Mirror missing repository name; skipping"); + continue; + } + + const repo = repoByName.get(repoName); + if (!repo) { + logger.warn(`Mirror repository '${repoName}' not found; skipping`); + continue; + } + + mirrorConfigs.push({ + repositoryId: repo.id, + enabled: typeof m.enabled === "boolean" ? m.enabled : true, + }); + } + + if (mirrorConfigs.length === 0) return; + + await backupServiceModule.backupsService.updateMirrors(scheduleId, { mirrors: mirrorConfigs }); + logger.info(`Assigned ${mirrorConfigs.length} mirror(s) to backup schedule`); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Failed to assign mirrors to schedule: ${err.message}`); } } diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index 4bf1aa7d..a16e0f0a 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -313,12 +313,34 @@ Note for importing existing local repositories (migration): "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, "includePatterns": ["important-folder"], "excludePatterns": ["*.tmp", "*.log"], + "excludeIfPresent": [".nobackup"], + "oneFileSystem": true, "enabled": true, - "notifications": ["slack-alerts", "email-admin"] + "notifications": ["slack-alerts", "email-admin"], + "mirrors": [ + { "repository": "s3-repo" }, + { "repository": "lo2" } + ] } ``` -`notifications` can also be an array of objects: +**Fields:** + +- `name`: Unique schedule name +- `volume`: Name of the source volume +- `repository`: Name of the primary destination repository +- `cronExpression`: Cron string for schedule timing +- `retentionPolicy`: Object with retention rules (`keepLast`, `keepHourly`, `keepDaily`, `keepWeekly`, `keepMonthly`, `keepYearly`, `keepWithinDuration`) +- `includePatterns` / `excludePatterns`: Arrays of file patterns +- `excludeIfPresent`: Array of filenames; if any of these files exist in a directory, that directory is excluded (e.g., `[".nobackup"]`) +- `oneFileSystem`: Boolean; if `true`, restic won't cross filesystem boundaries (useful when backing up `/` to avoid traversing into mounted volumes) +- `enabled`: Boolean +- `notifications`: Array of notification destination names or detailed objects (see below) +- `mirrors`: Array of mirror repositories (see below) + +#### Notifications (detailed) + +`notifications` can be strings (destination names) or objects with fine-grained control: ```json [ @@ -332,6 +354,18 @@ Note for importing existing local repositories (migration): ] ``` +#### Mirrors + +Mirrors let you automatically copy snapshots to additional repositories after each backup. +Each mirror references a repository by name: + +```json +"mirrors": [ + { "repository": "s3-repo" }, + { "repository": "lo2", "enabled": false } +] +``` + ### User setup (automated) Zerobyte currently supports a **single user**. diff --git a/examples/config-file-import/zerobyte.config.example.json b/examples/config-file-import/zerobyte.config.example.json index 6e30e340..33efda9a 100644 --- a/examples/config-file-import/zerobyte.config.example.json +++ b/examples/config-file-import/zerobyte.config.example.json @@ -125,8 +125,13 @@ "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, "includePatterns": ["important-folder"], "excludePatterns": ["*.tmp", "*.log"], + "excludeIfPresent": [".nobackup"], + "oneFileSystem": false, "enabled": true, - "notifications": ["slack-alerts"] + "notifications": ["slack-alerts"], + "mirrors": [ + { "repository": "s3-repo" } + ] } ], "notificationDestinations": [ From 45b5c0d752560898e1b619696e66717313584254 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 29 Dec 2025 11:04:12 +0100 Subject: [PATCH 13/30] feat(cli): add import-config command for manual config import Add CLI command to import configuration from file or stdin, providing an alternative to the env-var-based automatic import on startup. Features: - `import-config --config ` to import from a mounted file - `import-config --stdin` to import from piped input (no file mount needed) - `import-config --dry-run` to validate config without importing Changes: - Add app/server/cli/commands/import-config.ts with new command - Register importConfigCommand in CLI index - Refactor config-import.ts: extract runImport() and add applyConfigImport() for direct config object import (used by CLI) - Update docs with both import methods (env var and CLI examples) --- app/server/cli/commands/import-config.ts | 111 ++++++++++++++++++ app/server/cli/index.ts | 2 + app/server/modules/lifecycle/config-import.ts | 47 ++++++-- examples/config-file-import/README.md | 47 +++++++- 4 files changed, 192 insertions(+), 15 deletions(-) create mode 100644 app/server/cli/commands/import-config.ts diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts new file mode 100644 index 00000000..ee94ef5e --- /dev/null +++ b/app/server/cli/commands/import-config.ts @@ -0,0 +1,111 @@ +import { Command } from "commander"; +import path from "node:path"; +import fs from "node:fs/promises"; + +async function readStdin(): Promise { + const chunks: Buffer[] = []; + for await (const chunk of process.stdin) { + chunks.push(chunk); + } + return Buffer.concat(chunks).toString("utf-8"); +} + +export const importConfigCommand = new Command("import-config") + .description("Import configuration from a JSON file or stdin") + .option("-c, --config ", "Path to the configuration file") + .option("--stdin", "Read configuration from stdin") + .option("--dry-run", "Validate the config without importing") + .action(async (options) => { + console.log("\n📦 Zerobyte Config Import\n"); + + if (!options.config && !options.stdin) { + console.error("❌ Either --config or --stdin is required"); + console.log("\nUsage:"); + console.log(" zerobyte import-config --config /path/to/config.json"); + console.log(" cat config.json | zerobyte import-config --stdin"); + process.exit(1); + } + + if (options.config && options.stdin) { + console.error("❌ Cannot use both --config and --stdin"); + process.exit(1); + } + + let configJson: string; + + if (options.stdin) { + console.log("📄 Reading config from stdin..."); + try { + configJson = await readStdin(); + if (!configJson.trim()) { + console.error("❌ No input received from stdin"); + process.exit(1); + } + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + console.error(`❌ Failed to read stdin: ${err.message}`); + process.exit(1); + } + } else { + const configPath = path.resolve(process.cwd(), options.config); + + // Check if file exists + try { + await fs.access(configPath); + } catch { + console.error(`❌ Config file not found: ${configPath}`); + process.exit(1); + } + + console.log(`📄 Config file: ${configPath}`); + configJson = await fs.readFile(configPath, "utf-8"); + } + + // Parse and validate JSON + let config: unknown; + try { + config = JSON.parse(configJson); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + console.error(`❌ Invalid JSON: ${err.message}`); + process.exit(1); + } + + if (options.dryRun) { + console.log("🔍 Dry run mode - validating config only\n"); + + const root = typeof config === "object" && config !== null ? config : {}; + const configObj = + "config" in root && typeof root.config === "object" && root.config !== null ? root.config : root; + + const sections = ["volumes", "repositories", "backupSchedules", "notificationDestinations", "users"]; + for (const section of sections) { + const items = (configObj as Record)[section] || []; + const count = Array.isArray(items) ? items.length : 0; + console.log(` ${section}: ${count} item(s)`); + } + + const hasRecoveryKey = !!(configObj as Record).recoveryKey; + console.log(` recoveryKey: ${hasRecoveryKey ? "provided" : "not provided"}`); + + console.log("\n✅ Config is valid JSON"); + return; + } + + console.log("🚀 Starting import...\n"); + + try { + // Ensure database is initialized with migrations + const { runDbMigrations } = await import("../../db/db"); + runDbMigrations(); + + const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); + await applyConfigImport(config); + + console.log("\n✅ Import completed successfully"); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + console.error(`\n❌ Import failed: ${err.message}`); + process.exit(1); + } + }); diff --git a/app/server/cli/index.ts b/app/server/cli/index.ts index 128631e6..5e974390 100644 --- a/app/server/cli/index.ts +++ b/app/server/cli/index.ts @@ -1,9 +1,11 @@ import { Command } from "commander"; +import { importConfigCommand } from "./commands/import-config"; import { resetPasswordCommand } from "./commands/reset-password"; const program = new Command(); program.name("zerobyte").description("Zerobyte CLI - Backup automation tool built on top of Restic").version("1.0.0"); +program.addCommand(importConfigCommand); program.addCommand(resetPasswordCommand); export async function runCLI(argv: string[]): Promise { diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 7a458326..1f3024ba 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -171,7 +171,10 @@ async function importNotificationDestinations(notificationDestinations: unknown[ if (!isRecord(n) || typeof n.name !== "string" || !isRecord(n.config) || typeof n.config.type !== "string") { throw new Error("Invalid notification destination entry"); } - const created = await notificationsServiceModule.notificationsService.createDestination(n.name, n.config as NotificationConfig); + const created = await notificationsServiceModule.notificationsService.createDestination( + n.name, + n.config as NotificationConfig, + ); logger.info(`Initialized notification destination from config: ${n.name}`); // If enabled is explicitly false, update the destination (default is true) @@ -187,7 +190,11 @@ async function importNotificationDestinations(notificationDestinations: unknown[ } function getScheduleVolumeName(schedule: Record): string | null { - return typeof schedule.volume === "string" ? schedule.volume : typeof schedule.volumeName === "string" ? schedule.volumeName : null; + return typeof schedule.volume === "string" + ? schedule.volume + : typeof schedule.volumeName === "string" + ? schedule.volumeName + : null; } function getScheduleRepositoryName(schedule: Record): string | null { @@ -335,7 +342,12 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise } if (createdSchedule && Array.isArray(s.notifications) && s.notifications.length > 0) { - await attachScheduleNotifications(createdSchedule.id, s.notifications, destinationBySlug, notificationsServiceModule); + await attachScheduleNotifications( + createdSchedule.id, + s.notifications, + destinationBySlug, + notificationsServiceModule, + ); } if (createdSchedule && Array.isArray(s.mirrors) && s.mirrors.length > 0) { @@ -450,18 +462,33 @@ async function setupInitialUser(users: unknown[], recoveryKey: string | null): P } } +async function runImport(config: ImportConfig): Promise { + await writeRecoveryKeyFromConfig(config.recoveryKey); + + await importVolumes(config.volumes); + await importRepositories(config.repositories); + await importNotificationDestinations(config.notificationDestinations); + await importBackupSchedules(config.backupSchedules); + await setupInitialUser(config.users, config.recoveryKey); +} + +/** + * Import configuration from a raw config object (used by CLI) + */ +export async function applyConfigImport(configRaw: unknown): Promise { + const config = parseImportConfig(configRaw); + await runImport(config); +} + +/** + * Import configuration from a file (used by env var startup) + */ export async function applyConfigImportFromFile(): Promise { const configRaw = await loadConfigFromFile(); const config = parseImportConfig(configRaw); - await writeRecoveryKeyFromConfig(config.recoveryKey); - try { - await importVolumes(config.volumes); - await importRepositories(config.repositories); - await importNotificationDestinations(config.notificationDestinations); - await importBackupSchedules(config.backupSchedules); - await setupInitialUser(config.users, config.recoveryKey); + await runImport(config); } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.error(`Failed to initialize from config: ${err.message}`); diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index a16e0f0a..a36c1671 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -52,15 +52,52 @@ docker compose up -d ## Notes -### Enabling import +### Import methods -Config import is opt-in and only runs when: +Zerobyte supports two ways to import configuration: -- `ZEROBYTE_CONFIG_IMPORT=true` +#### Method 1: Environment variable (automatic on startup) -The config path defaults to `/app/zerobyte.config.json`, but you can override it via: +Set `ZEROBYTE_CONFIG_IMPORT=true` and the import runs automatically when the container starts: -- `ZEROBYTE_CONFIG_PATH=/app/your-config.json` +```yaml +services: + zerobyte: + environment: + - ZEROBYTE_CONFIG_IMPORT=true + - ZEROBYTE_CONFIG_PATH=/app/zerobyte.config.json # optional, this is the default +``` + +This is ideal for automated deployments where you want `docker compose up` to fully configure the instance. + +#### Method 2: CLI command (manual control) + +Run the import explicitly using the CLI: + +```bash +# Import from a mounted config file (starts a new temporary container) +docker compose run --rm zerobyte bun run cli import-config --config /app/zerobyte.config.json + +# Import from a mounted config file into an already-running container +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json + +# Import from stdin (into running container) +cat zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin + +# Import from stdin in PowerShell (into running container) +Get-Content zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin + +# Validate config without importing (dry run) +docker compose run --rm zerobyte bun run cli import-config --config /app/zerobyte.config.json --dry-run +``` + +The `--stdin` option is useful when you don't want to mount the config file - just pipe it directly. + +This is useful when you want to: +- See import output directly in your terminal +- Re-run import after fixing issues +- Test config files before applying them +- Import without modifying your docker-compose.yml ### Secrets via env vars From e4f66de4471c21524db231b1782e825973bda310 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 29 Dec 2025 11:16:36 +0100 Subject: [PATCH 14/30] linting and SFTP volume support --- .../repositories/repositories.service.ts | 10 +- examples/config-file-import/README.md | 37 ++ .../zerobyte.config.example.json | 457 +++++++++--------- 3 files changed, 277 insertions(+), 227 deletions(-) diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index dd5088a2..8f8c414f 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -1,6 +1,6 @@ import crypto from "node:crypto"; import { eq, or } from "drizzle-orm"; -import { InternalServerError, NotFoundError } from "http-errors-enhanced"; +import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; import { db } from "../../db/db"; import { repositoriesTable } from "../../db/schema"; import { toMessage } from "../../utils/errors"; @@ -74,7 +74,9 @@ const createRepository = async (name: string, config: RepositoryConfig, compress where: eq(repositoriesTable.shortId, shortId), }); if (existingByShortId) { - throw new ConflictError(`A repository with shortId '${shortId}' already exists. The shortId is used as the subdirectory name for local repositories.`); + throw new ConflictError( + `A repository with shortId '${shortId}' already exists. The shortId is used as the subdirectory name for local repositories.`, + ); } } else { shortId = generateShortId(); @@ -95,13 +97,13 @@ const createRepository = async (name: string, config: RepositoryConfig, compress if (repoExists && !config.isExistingRepository) { throw new ConflictError( `A restic repository already exists at this location. ` + - `If you want to use the existing repository, set "isExistingRepository": true in the config.` + `If you want to use the existing repository, set "isExistingRepository": true in the config.`, ); } if (!repoExists && config.isExistingRepository) { throw new InternalServerError( - `Cannot access existing repository. Verify the path/credentials are correct and the repository exists.` + `Cannot access existing repository. Verify the path/credentials are correct and the repository exists.`, ); } diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index a36c1671..3e37f353 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -264,6 +264,43 @@ This example is intended to be the primary, copy/paste-friendly reference for co } ``` +#### SFTP + +```json +{ + "name": "sftp-volume", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": true + } +} +``` + +For key-based authentication: + +```json +{ + "name": "sftp-volume-key", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": false, + "knownHosts": "sftp.example.com ssh-ed25519 AAAA..." + } +} +``` + ### Repository types #### Local diff --git a/examples/config-file-import/zerobyte.config.example.json b/examples/config-file-import/zerobyte.config.example.json index 33efda9a..584fa7d1 100644 --- a/examples/config-file-import/zerobyte.config.example.json +++ b/examples/config-file-import/zerobyte.config.example.json @@ -1,225 +1,236 @@ { - "volumes": [ - { - "name": "local-volume", - "config": { - "backend": "directory", - "path": "/mydata", - "readOnly": true - } - }, - { - "name": "nfs-volume", - "config": { - "backend": "nfs", - "server": "nfs.example.com", - "exportPath": "/data", - "port": 2049, - "version": "4", - "readOnly": false - } - }, - { - "name": "smb-volume", - "config": { - "backend": "smb", - "server": "smb.example.com", - "share": "shared", - "username": "user", - "password": "${SMB_PASSWORD}", - "vers": "3.0", - "domain": "WORKGROUP", - "port": 445, - "readOnly": false - } - }, - { - "name": "webdav-volume", - "config": { - "backend": "webdav", - "server": "webdav.example.com", - "path": "/remote.php/webdav", - "username": "user", - "password": "${WEBDAV_PASSWORD}", - "port": 80, - "readOnly": false, - "ssl": true - } - } - ], - "repositories": [ - { - "name": "local-repo", - "config": { - "backend": "local", - "path": "/var/lib/zerobyte/repositories" - }, - "compressionMode": "auto" - }, - { - "name": "s3-repo", - "config": { - "backend": "s3", - "bucket": "mybucket", - "accessKeyId": "${ACCESS_KEY_ID}", - "secretAccessKey": "${SECRET_ACCESS_KEY}" - }, - "compressionMode": "auto" - }, - { - "name": "gcs-repo", - "config": { - "backend": "gcs", - "bucket": "mybucket", - "projectId": "my-gcp-project", - "credentialsJson": "${GCS_CREDENTIALS}" - } - }, - { - "name": "azure-repo", - "config": { - "backend": "azure", - "container": "mycontainer", - "accountName": "myaccount", - "accountKey": "${AZURE_KEY}" - } - }, - { - "name": "rclone-repo", - "config": { - "backend": "rclone", - "remote": "myremote", - "path": "backups/zerobyte" - } - }, - { - "name": "webdav-repo", - "config": { - "backend": "webdav", - "server": "webdav.example.com", - "path": "/remote.php/webdav", - "username": "user", - "password": "${WEBDAV_PASSWORD}", - "port": 80, - "ssl": true - } - }, - { - "name": "sftp-repo", - "config": { - "backend": "sftp", - "host": "sftp.example.com", - "port": 22, - "user": "sftpuser", - "privateKey": "${SFTP_PRIVATE_KEY}", - "path": "/backups" - } - } - ], - "backupSchedules": [ - { - "name": "local-volume-local-repo", - "volume": "local-volume", - "repository": "local-repo", - "cronExpression": "0 2 * * *", - "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, - "includePatterns": ["important-folder"], - "excludePatterns": ["*.tmp", "*.log"], - "excludeIfPresent": [".nobackup"], - "oneFileSystem": false, - "enabled": true, - "notifications": ["slack-alerts"], - "mirrors": [ - { "repository": "s3-repo" } - ] - } - ], - "notificationDestinations": [ - { - "name": "slack-alerts", - "config": { - "type": "slack", - "webhookUrl": "${SLACK_WEBHOOK_URL}", - "channel": "#backups", - "username": "zerobyte", - "iconEmoji": ":floppy_disk:" - } - }, - { - "name": "email-admin", - "config": { - "type": "email", - "smtpHost": "smtp.example.com", - "smtpPort": 587, - "username": "admin@example.com", - "password": "${EMAIL_PASSWORD}", - "from": "zerobyte@example.com", - "to": ["admin@example.com"], - "useTLS": true - } - }, - { - "name": "discord-backups", - "config": { - "type": "discord", - "webhookUrl": "${DISCORD_WEBHOOK_URL}", - "username": "zerobyte", - "avatarUrl": "https://example.com/avatar.png", - "threadId": "1234567890" - } - }, - { - "name": "gotify-notify", - "config": { - "type": "gotify", - "serverUrl": "https://gotify.example.com", - "token": "${GOTIFY_TOKEN}", - "path": "/message", - "priority": 5 - } - }, - { - "name": "ntfy-notify", - "config": { - "type": "ntfy", - "serverUrl": "https://ntfy.example.com", - "topic": "zerobyte-backups", - "priority": "high", - "username": "ntfyuser", - "password": "${NTFY_PASSWORD}" - } - }, - { - "name": "pushover-notify", - "config": { - "type": "pushover", - "userKey": "${PUSHOVER_USER_KEY}", - "apiToken": "${PUSHOVER_API_TOKEN}", - "devices": "phone,tablet", - "priority": 1 - } - }, - { - "name": "telegram-notify", - "config": { - "type": "telegram", - "botToken": "${TELEGRAM_BOT_TOKEN}", - "chatId": "123456789" - } - }, - { - "name": "custom-shoutrrr", - "config": { - "type": "custom", - "shoutrrrUrl": "${SHOUTRRR_URL}" - } - } - ], - "recoveryKey": "${RECOVERY_KEY}", - "users": [ - { - "username": "admin", - "password": "${ADMIN_PASSWORD}" - } - ] + "volumes": [ + { + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/mydata", + "readOnly": true + } + }, + { + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } + }, + { + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } + }, + { + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } + }, + { + "name": "sftp-volume", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": true + } + } + ], + "repositories": [ + { + "name": "local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories" + }, + "compressionMode": "auto" + }, + { + "name": "s3-repo", + "config": { + "backend": "s3", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" + }, + { + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } + }, + { + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } + }, + { + "name": "rclone-repo", + "config": { + "backend": "rclone", + "remote": "myremote", + "path": "backups/zerobyte" + } + }, + { + "name": "webdav-repo", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "ssl": true + } + }, + { + "name": "sftp-repo", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "user": "sftpuser", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/backups" + } + } + ], + "backupSchedules": [ + { + "name": "local-volume-local-repo", + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "excludeIfPresent": [".nobackup"], + "oneFileSystem": false, + "enabled": true, + "notifications": ["slack-alerts"], + "mirrors": [{ "repository": "s3-repo" }] + } + ], + "notificationDestinations": [ + { + "name": "slack-alerts", + "config": { + "type": "slack", + "webhookUrl": "${SLACK_WEBHOOK_URL}", + "channel": "#backups", + "username": "zerobyte", + "iconEmoji": ":floppy_disk:" + } + }, + { + "name": "email-admin", + "config": { + "type": "email", + "smtpHost": "smtp.example.com", + "smtpPort": 587, + "username": "admin@example.com", + "password": "${EMAIL_PASSWORD}", + "from": "zerobyte@example.com", + "to": ["admin@example.com"], + "useTLS": true + } + }, + { + "name": "discord-backups", + "config": { + "type": "discord", + "webhookUrl": "${DISCORD_WEBHOOK_URL}", + "username": "zerobyte", + "avatarUrl": "https://example.com/avatar.png", + "threadId": "1234567890" + } + }, + { + "name": "gotify-notify", + "config": { + "type": "gotify", + "serverUrl": "https://gotify.example.com", + "token": "${GOTIFY_TOKEN}", + "path": "/message", + "priority": 5 + } + }, + { + "name": "ntfy-notify", + "config": { + "type": "ntfy", + "serverUrl": "https://ntfy.example.com", + "topic": "zerobyte-backups", + "priority": "high", + "username": "ntfyuser", + "password": "${NTFY_PASSWORD}" + } + }, + { + "name": "pushover-notify", + "config": { + "type": "pushover", + "userKey": "${PUSHOVER_USER_KEY}", + "apiToken": "${PUSHOVER_API_TOKEN}", + "devices": "phone,tablet", + "priority": 1 + } + }, + { + "name": "telegram-notify", + "config": { + "type": "telegram", + "botToken": "${TELEGRAM_BOT_TOKEN}", + "chatId": "123456789" + } + }, + { + "name": "custom-shoutrrr", + "config": { + "type": "custom", + "shoutrrrUrl": "${SHOUTRRR_URL}" + } + } + ], + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "admin", + "password": "${ADMIN_PASSWORD}" + } + ] } From 5a4f4641b440f5c7b806184b3ebe80739be4596f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 29 Dec 2025 18:34:02 +0100 Subject: [PATCH 15/30] feat: improve config import with result tracking and idempotent repository detection - Add ImportResult type to track succeeded/warnings/errors counts across all import functions - Implement multi-layer repository duplicate detection: - URL-based check (same path/bucket/endpoint already registered) - Restic repo check (path is already a restic repository) - Name-based fallback check - Standardize logging between CLI and env var import methods with logImportSummary() - CLI exits with code 1 when import has errors - Remove shortId override for local repo migrations (use full path with isExistingRepository instead) - Update example JSON and README: - Document that local repo path is optional (defaults to /var/lib/zerobyte/repositories) - Add existing-local-repo example with isExistingRepository: true - Add S3 endpoint field to example - Expand config behavior docs to explain all repository skip conditions - Improve .gitignore to exclude all JSON except example template --- app/server/cli/commands/import-config.ts | 11 +- app/server/modules/lifecycle/config-import.ts | 183 +++++++++++++++--- .../repositories/repositories.service.ts | 18 +- examples/config-file-import/.gitignore | 4 +- examples/config-file-import/README.md | 27 ++- .../zerobyte.config.example.json | 12 +- 6 files changed, 197 insertions(+), 58 deletions(-) diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts index ee94ef5e..8462a3bd 100644 --- a/app/server/cli/commands/import-config.ts +++ b/app/server/cli/commands/import-config.ts @@ -92,20 +92,21 @@ export const importConfigCommand = new Command("import-config") return; } - console.log("🚀 Starting import...\n"); - try { // Ensure database is initialized with migrations const { runDbMigrations } = await import("../../db/db"); runDbMigrations(); const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); - await applyConfigImport(config); + const result = await applyConfigImport(config); - console.log("\n✅ Import completed successfully"); + // Exit with error code if there were errors + if (result.errors > 0) { + process.exit(1); + } } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); - console.error(`\n❌ Import failed: ${err.message}`); + console.error(`❌ Import failed: ${err.message}`); process.exit(1); } }); diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 1f3024ba..250f38f9 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -34,6 +34,12 @@ type ImportConfig = { recoveryKey: string | null; }; +export type ImportResult = { + succeeded: number; + warnings: number; + errors: number; +}; + function interpolateEnvVars(value: unknown): unknown { if (typeof value === "string") { return value.replace(/\$\{([^}]+)\}/g, (_, v) => { @@ -94,11 +100,13 @@ function parseImportConfig(configRaw: unknown): ImportConfig { }; } -async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { +async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + try { const fs = await import("node:fs/promises"); const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); - if (!recoveryKey) return; + if (!recoveryKey) return result; if (typeof recoveryKey !== "string" || recoveryKey.length !== 64 || !/^[a-fA-F0-9]{64}$/.test(recoveryKey)) { throw new Error("Recovery key must be a 64-character hex string"); @@ -108,18 +116,25 @@ async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise false, ); if (passFileExists) { - logger.info(`Restic passfile already exists at ${RESTIC_PASS_FILE}; skipping config recovery key write`); - return; + logger.warn(`Restic passfile already exists at ${RESTIC_PASS_FILE}; skipping config recovery key write`); + result.warnings++; + return result; } await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); + result.succeeded++; } catch (err) { const e = err instanceof Error ? err : new Error(String(err)); logger.error(`Failed to write recovery key from config: ${e.message}`); + result.errors++; } + + return result; } -async function importVolumes(volumes: unknown[]): Promise { +async function importVolumes(volumes: unknown[]): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + for (const v of volumes) { try { if (!isRecord(v) || typeof v.name !== "string" || !isRecord(v.config) || typeof v.config.backend !== "string") { @@ -127,6 +142,7 @@ async function importVolumes(volumes: unknown[]): Promise { } await volumeService.createVolume(v.name, v.config as BackendConfig); logger.info(`Initialized volume from config: ${v.name}`); + result.succeeded++; // If autoRemount is explicitly false, update the volume (default is true) if (v.autoRemount === false) { @@ -136,17 +152,78 @@ async function importVolumes(volumes: unknown[]): Promise { } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Volume not created: ${err.message}`); + result.warnings++; } } + + return result; } -async function importRepositories(repositories: unknown[]): Promise { +async function importRepositories(repositories: unknown[]): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; const repoServiceModule = await import("../repositories/repositories.service"); + const { buildRepoUrl, restic } = await import("../../utils/restic"); + + // Get existing repositories and build sets for duplicate detection + const existingRepos = await repoServiceModule.repositoriesService.listRepositories(); + const existingNames = new Set(existingRepos.map((repo) => repo.name)); + const existingUrls = new Set(); + + for (const repo of existingRepos) { + try { + // Config fields used for URL (path, bucket, endpoint, etc.) are not encrypted + const url = buildRepoUrl(repo.config as RepositoryConfig); + existingUrls.add(url); + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Could not build URL for existing repository '${repo.name}': ${err.message}`); + } + } + for (const r of repositories) { try { if (!isRecord(r) || typeof r.name !== "string" || !isRecord(r.config) || typeof r.config.backend !== "string") { throw new Error("Invalid repository entry"); } + + // Skip if a repository pointing to the same location is already registered in DB + try { + const incomingUrl = buildRepoUrl(r.config as RepositoryConfig); + if (existingUrls.has(incomingUrl)) { + logger.warn(`Skipping '${r.name}': another repository is already registered for location ${incomingUrl}`); + result.warnings++; + continue; + } + } catch (e) { + const err = e instanceof Error ? e : new Error(String(e)); + logger.warn(`Could not build URL for '${r.name}' to check duplicates: ${err.message}`); + } + + // For local repos without isExistingRepository, check if the provided path is already a restic repo + // This catches the case where user forgot to set isExistingRepository: true + if (r.config.backend === "local" && !r.config.isExistingRepository) { + const isAlreadyRepo = await restic + .snapshots({ ...r.config, isExistingRepository: true } as RepositoryConfig) + .then(() => true) + .catch(() => false); + + if (isAlreadyRepo) { + logger.warn( + `Skipping '${r.name}': path '${r.config.path}' is already a restic repository. ` + + `Set "isExistingRepository": true to import it, or use a different path for a new repository.`, + ); + result.warnings++; + continue; + } + } + + // Skip if a repository with the same name already exists (fallback for repos without deterministic paths) + if (existingNames.has(r.name)) { + logger.warn(`Skipping '${r.name}': a repository with this name already exists`); + result.warnings++; + continue; + } + const compressionMode = r.compressionMode === "auto" || r.compressionMode === "off" || r.compressionMode === "max" ? r.compressionMode @@ -157,14 +234,19 @@ async function importRepositories(repositories: unknown[]): Promise { compressionMode, ); logger.info(`Initialized repository from config: ${r.name}`); + result.succeeded++; } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Repository not created: ${err.message}`); + result.warnings++; } } + + return result; } -async function importNotificationDestinations(notificationDestinations: unknown[]): Promise { +async function importNotificationDestinations(notificationDestinations: unknown[]): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; const notificationsServiceModule = await import("../notifications/notifications.service"); for (const n of notificationDestinations) { try { @@ -176,6 +258,7 @@ async function importNotificationDestinations(notificationDestinations: unknown[ n.config as NotificationConfig, ); logger.info(`Initialized notification destination from config: ${n.name}`); + result.succeeded++; // If enabled is explicitly false, update the destination (default is true) if (n.enabled === false) { @@ -185,8 +268,11 @@ async function importNotificationDestinations(notificationDestinations: unknown[ } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Notification destination not created: ${err.message}`); + result.warnings++; } } + + return result; } function getScheduleVolumeName(schedule: Record): string | null { @@ -261,8 +347,9 @@ async function attachScheduleNotifications( } } -async function importBackupSchedules(backupSchedules: unknown[]): Promise { - if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return; +async function importBackupSchedules(backupSchedules: unknown[]): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return result; const backupServiceModule = await import("../backups/backups.service"); const notificationsServiceModule = await import("../notifications/notifications.service"); @@ -282,28 +369,33 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise const volumeName = getScheduleVolumeName(s); if (typeof volumeName !== "string" || volumeName.length === 0) { logger.warn("Backup schedule not created: Missing volume name"); + result.warnings++; continue; } const volume = volumeByName.get(volumeName); if (!volume) { logger.warn(`Backup schedule not created: Volume '${volumeName}' not found`); + result.warnings++; continue; } const repositoryName = getScheduleRepositoryName(s); if (typeof repositoryName !== "string" || repositoryName.length === 0) { logger.warn("Backup schedule not created: Missing repository name"); + result.warnings++; continue; } const repository = repoByName.get(repositoryName); if (!repository) { logger.warn(`Backup schedule not created: Repository '${repositoryName}' not found`); + result.warnings++; continue; } const scheduleName = typeof s.name === "string" && s.name.length > 0 ? s.name : `${volumeName}-${repositoryName}`; if (typeof s.cronExpression !== "string" || s.cronExpression.length === 0) { logger.warn(`Backup schedule not created: Missing cronExpression for '${scheduleName}'`); + result.warnings++; continue; } @@ -335,9 +427,11 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise oneFileSystem: typeof s.oneFileSystem === "boolean" ? s.oneFileSystem : undefined, }); logger.info(`Initialized backup schedule from config: ${scheduleName}`); + result.succeeded++; } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); logger.warn(`Backup schedule not created: ${err.message}`); + result.warnings++; continue; } @@ -354,6 +448,8 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise await attachScheduleMirrors(createdSchedule.id, s.mirrors, repoByName, backupServiceModule); } } + + return result; } async function attachScheduleMirrors( @@ -403,17 +499,20 @@ async function attachScheduleMirrors( } } -async function setupInitialUser(users: unknown[], recoveryKey: string | null): Promise { +async function setupInitialUser(users: unknown[], recoveryKey: string | null): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + try { const { authService } = await import("../auth/auth.service"); const hasUsers = await authService.hasUsers(); - if (hasUsers) return; - if (!Array.isArray(users) || users.length === 0) return; + if (hasUsers) return result; + if (!Array.isArray(users) || users.length === 0) return result; if (users.length > 1) { logger.warn( "Multiple users provided in config. Zerobyte currently supports a single initial user; extra entries will be ignored.", ); + result.warnings++; } for (const u of users) { @@ -429,10 +528,12 @@ async function setupInitialUser(users: unknown[], recoveryKey: string | null): P typeof u.hasDownloadedResticPassword === "boolean" ? u.hasDownloadedResticPassword : Boolean(recoveryKey), }); logger.info(`User '${u.username}' imported with password hash from config.`); + result.succeeded++; break; } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); logger.warn(`User '${u.username}' not imported: ${err.message}`); + result.warnings++; } continue; } @@ -446,38 +547,70 @@ async function setupInitialUser(users: unknown[], recoveryKey: string | null): P await db.update(usersTable).set({ hasDownloadedResticPassword }).where(eq(usersTable.id, user.id)); } logger.info(`User '${u.username}' created from config.`); + result.succeeded++; break; } catch (error) { const err = error instanceof Error ? error : new Error(String(error)); logger.warn(`User '${u.username}' not created: ${err.message}`); + result.warnings++; } continue; } logger.warn(`User '${u.username}' missing passwordHash/password; skipping`); + result.warnings++; } } catch (err) { const e = err instanceof Error ? err : new Error(String(err)); logger.error(`Automated user setup failed: ${e.message}`); + result.errors++; } + + return result; } -async function runImport(config: ImportConfig): Promise { - await writeRecoveryKeyFromConfig(config.recoveryKey); +async function runImport(config: ImportConfig): Promise { + const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + + const recoveryKeyResult = await writeRecoveryKeyFromConfig(config.recoveryKey); + const volumeResult = await importVolumes(config.volumes); + const repoResult = await importRepositories(config.repositories); + const notifResult = await importNotificationDestinations(config.notificationDestinations); + const scheduleResult = await importBackupSchedules(config.backupSchedules); + const userResult = await setupInitialUser(config.users, config.recoveryKey); + + for (const r of [recoveryKeyResult, volumeResult, repoResult, notifResult, scheduleResult, userResult]) { + result.succeeded += r.succeeded; + result.warnings += r.warnings; + result.errors += r.errors; + } + + return result; +} - await importVolumes(config.volumes); - await importRepositories(config.repositories); - await importNotificationDestinations(config.notificationDestinations); - await importBackupSchedules(config.backupSchedules); - await setupInitialUser(config.users, config.recoveryKey); +function logImportSummary(result: ImportResult): void { + if (result.errors > 0) { + logger.error( + `Config import completed with ${result.errors} error(s) and ${result.warnings} warning(s), ${result.succeeded} item(s) imported`, + ); + } else if (result.warnings > 0) { + logger.warn(`Config import completed with ${result.warnings} warning(s), ${result.succeeded} item(s) imported`); + } else if (result.succeeded > 0) { + logger.info(`Config import completed successfully: ${result.succeeded} item(s) imported`); + } else { + logger.info("Config import completed: no items to import"); + } } /** * Import configuration from a raw config object (used by CLI) */ -export async function applyConfigImport(configRaw: unknown): Promise { +export async function applyConfigImport(configRaw: unknown): Promise { + logger.info("Starting config import..."); const config = parseImportConfig(configRaw); - await runImport(config); + const result = await runImport(config); + logImportSummary(result); + return result; } /** @@ -485,12 +618,16 @@ export async function applyConfigImport(configRaw: unknown): Promise { */ export async function applyConfigImportFromFile(): Promise { const configRaw = await loadConfigFromFile(); + if (configRaw === null) return; // No config file, nothing to do + + logger.info("Starting config import from file..."); const config = parseImportConfig(configRaw); try { - await runImport(config); + const result = await runImport(config); + logImportSummary(result); } catch (e) { const err = e instanceof Error ? e : new Error(String(e)); - logger.error(`Failed to initialize from config: ${err.message}`); + logger.error(`Config import failed: ${err.message}`); } } diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index 8f8c414f..2656dbd4 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -64,23 +64,7 @@ const encryptConfig = async (config: RepositoryConfig): Promise { const id = crypto.randomUUID(); - - // Determine shortId: use provided config.name for local repo migrations, otherwise generate - let shortId: string; - if (config.backend === "local" && config.name?.length) { - // User provided a name (migration scenario) - check for conflicts - shortId = config.name; - const existingByShortId = await db.query.repositoriesTable.findFirst({ - where: eq(repositoriesTable.shortId, shortId), - }); - if (existingByShortId) { - throw new ConflictError( - `A repository with shortId '${shortId}' already exists. The shortId is used as the subdirectory name for local repositories.`, - ); - } - } else { - shortId = generateShortId(); - } + const shortId = generateShortId(); let processedConfig = config; if (config.backend === "local" && !config.isExistingRepository) { diff --git a/examples/config-file-import/.gitignore b/examples/config-file-import/.gitignore index 9961ad53..481fcd55 100644 --- a/examples/config-file-import/.gitignore +++ b/examples/config-file-import/.gitignore @@ -1,2 +1,4 @@ .env -zerobyte.config.json + +*.json +!zerobyte.config.example.json diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index 3e37f353..16d3adbc 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -160,8 +160,11 @@ See the runnable example: The config file is applied on startup using a **create-only** approach: -- Resources defined in the config are only created if they don't already exist in the database -- Existing resources with the same name are **not overwritten** (a warning is logged and the config entry is skipped) +- **Volumes, notifications, schedules**: Skipped if a resource with the same name already exists +- **Repositories**: Skipped if any of these conditions are met: + - A repository pointing to the same location (path/bucket/endpoint) is already registered + - For local repos: the path is already a restic repository (set `isExistingRepository: true` to import it) + - A repository with the same name already exists - Changes made via the UI are preserved across container restarts - To update a resource from config, either modify it via the UI or delete it first @@ -303,36 +306,39 @@ For key-based authentication: ### Repository types -#### Local +#### Local (new repository) + +Creates a new restic repository. The `path` is optional and defaults to `/var/lib/zerobyte/repositories`: ```json { "name": "local-repo", "config": { - "backend": "local", - "path": "/var/lib/zerobyte/repositories" + "backend": "local" }, "compressionMode": "auto" } ``` -Note for importing existing local repositories (migration): +The actual repository will be created at `{path}/{auto-generated-id}`. -- include `config.name` and set `config.isExistingRepository: true` -- the actual restic repo is stored at `{path}/{name}` +#### Local (existing repository) + +To import an existing restic repository, set `isExistingRepository: true` and provide the **full path to the repository root**: ```json { "name": "my-local-repo", "config": { "backend": "local", - "path": "/var/lib/zerobyte/repositories", - "name": "abc123", + "path": "/var/lib/zerobyte/repositories/abc123", "isExistingRepository": true } } ``` +Note: The `path` must point directly to the restic repository root (the directory containing `config`, `data/`, `keys/`, etc.). + #### S3-compatible ```json @@ -340,6 +346,7 @@ Note for importing existing local repositories (migration): "name": "backup-repo", "config": { "backend": "s3", + "endpoint": "s3.amazonaws.com", "bucket": "mybucket", "accessKeyId": "${ACCESS_KEY_ID}", "secretAccessKey": "${SECRET_ACCESS_KEY}" diff --git a/examples/config-file-import/zerobyte.config.example.json b/examples/config-file-import/zerobyte.config.example.json index 584fa7d1..9680f6d2 100644 --- a/examples/config-file-import/zerobyte.config.example.json +++ b/examples/config-file-import/zerobyte.config.example.json @@ -64,15 +64,23 @@ { "name": "local-repo", "config": { - "backend": "local", - "path": "/var/lib/zerobyte/repositories" + "backend": "local" }, "compressionMode": "auto" }, + { + "name": "existing-local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories/abc123", + "isExistingRepository": true + } + }, { "name": "s3-repo", "config": { "backend": "s3", + "endpoint": "s3.amazonaws.com", "bucket": "mybucket", "accessKeyId": "${ACCESS_KEY_ID}", "secretAccessKey": "${SECRET_ACCESS_KEY}" From a71009ff7842228b00b368148c8aa834b111cd3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 29 Dec 2025 20:41:01 +0100 Subject: [PATCH 16/30] Increment warning count on volume mount failure --- app/server/modules/lifecycle/config-import.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 250f38f9..b4547caa 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -407,6 +407,7 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise Date: Mon, 29 Dec 2025 20:46:21 +0100 Subject: [PATCH 17/30] Enhance error logging for repo existence check Improve error handling in repository existence check. --- app/server/modules/lifecycle/config-import.ts | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index b4547caa..ecfacd3a 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -205,7 +205,11 @@ async function importRepositories(repositories: unknown[]): Promise true) - .catch(() => false); + .catch((e) => { + const err = e instanceof Error ? e : new Error(String(e)); + logger.debug(`Repo existence check for '${r.name}': ${err.message}`); + return false; + }); if (isAlreadyRepo) { logger.warn( From f908883784fdeb3105fb69fb0c30772c50f9236e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Mon, 29 Dec 2025 20:54:30 +0100 Subject: [PATCH 18/30] Fix error handling in repository existence check --- app/server/modules/lifecycle/config-import.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index ecfacd3a..3b20d9b7 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -206,7 +206,7 @@ async function importRepositories(repositories: unknown[]): Promise true) .catch((e) => { - const err = e instanceof Error ? e : new Error(String(e)); + const err = e instanceof Error ? e : new Error(String(e)); logger.debug(`Repo existence check for '${r.name}': ${err.message}`); return false; }); From 16ef4b4861afacbee40cc462974c9eb2ea123d89 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 30 Dec 2025 10:31:18 +0100 Subject: [PATCH 19/30] docs: add host-side environment variable interpolation examples for CI/CD --- examples/config-file-import/README.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index 16d3adbc..971b0a21 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -140,6 +140,26 @@ This example uses: So to make `${VAR_NAME}` work, put the variables in `.env` (or otherwise provide them in the container environment). +##### Host-side interpolation (alternative) + +You can also interpolate environment variables **on the host** before piping the config to the container. +This is useful in CI/CD pipelines where secrets are injected by the pipeline and you don't want them exposed to the container environment. + +**Linux/macOS** (using `envsubst`): + +```bash +# Load .env and substitute variables before piping +export $(grep -v '^#' .env | xargs) && envsubst < zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin +``` + +**PowerShell**: + +```powershell +# Load .env and substitute variables before piping +Get-Content .env | ForEach-Object { if ($_ -match '^([^#][^=]+)=(.*)$') { [Environment]::SetEnvironmentVariable($matches[1], $matches[2]) } } +(Get-Content zerobyte.config.json -Raw) -replace '\$\{(\w+)\}', { $env:($_.Groups[1].Value) } | docker compose exec -T zerobyte bun run cli import-config --stdin +``` + #### 2) Secret placeholders: `env://...` and `file://...` Separately from config import, Zerobyte supports **secret placeholders** for *some sensitive fields*. From 60c0778b74d47cbd6848a03a7860e83c742da892 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 30 Dec 2025 12:39:02 +0100 Subject: [PATCH 20/30] refactor: improve config import logging, add CLI JSON output and idempotent re-runs - Add `--json` flag for machine-readable JSON output - Add `--log-level` flag to control logging verbosity - Add `skipped` counter to ImportResult for idempotent operations - Change "already exists" conditions from warnings to skipped (info logs) - Recovery key mismatch now errors and stops import early - Pre-check volumes and notification destinations before creation - Attachment functions merge missing items instead of overwriting - Add toError() and mergeResults() helpers to reduce code duplication - Extract readConfigJson() and createOutput() for cleaner CLI code - Move fs/path imports to top level in config-import.ts --- app/server/cli/commands/import-config.ts | 137 ++++--- app/server/modules/lifecycle/config-import.ts | 378 ++++++++++++------ 2 files changed, 349 insertions(+), 166 deletions(-) diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts index 8462a3bd..93b15687 100644 --- a/app/server/cli/commands/import-config.ts +++ b/app/server/cli/commands/import-config.ts @@ -2,6 +2,10 @@ import { Command } from "commander"; import path from "node:path"; import fs from "node:fs/promises"; +const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); + +type Output = ReturnType; + async function readStdin(): Promise { const chunks: Buffer[] = []; for await (const chunk of process.stdin) { @@ -10,85 +14,118 @@ async function readStdin(): Promise { return Buffer.concat(chunks).toString("utf-8"); } +function createOutput(jsonOutput: boolean) { + return { + error: (message: string): never => { + if (jsonOutput) { + console.log(JSON.stringify({ error: message })); + } else { + console.error(`❌ ${message}`); + } + process.exit(1); + }, + info: (message: string): void => { + if (!jsonOutput) { + console.log(message); + } + }, + json: (data: object): void => { + if (jsonOutput) { + console.log(JSON.stringify(data)); + } + }, + }; +} + +async function readConfigJson(options: { stdin?: boolean; config?: string }, out: Output): Promise { + if (options.stdin) { + out.info("📄 Reading config from stdin..."); + try { + const configJson = await readStdin(); + if (!configJson.trim()) { + out.error("No input received from stdin"); + } + return configJson; + } catch (e) { + out.error(`Failed to read stdin: ${toError(e).message}`); + } + } + + const configPath = path.resolve(process.cwd(), options.config ?? ""); + try { + await fs.access(configPath); + } catch { + out.error(`Config file not found: ${configPath}`); + } + + out.info(`📄 Config file: ${configPath}`); + return fs.readFile(configPath, "utf-8"); +} + export const importConfigCommand = new Command("import-config") .description("Import configuration from a JSON file or stdin") .option("-c, --config ", "Path to the configuration file") .option("--stdin", "Read configuration from stdin") .option("--dry-run", "Validate the config without importing") + .option("--json", "Output results in JSON format") + .option("--log-level ", "Set log level (debug, info, warn, error)") .action(async (options) => { - console.log("\n📦 Zerobyte Config Import\n"); + const jsonOutput = options.json; + const out = createOutput(jsonOutput); - if (!options.config && !options.stdin) { - console.error("❌ Either --config or --stdin is required"); - console.log("\nUsage:"); - console.log(" zerobyte import-config --config /path/to/config.json"); - console.log(" cat config.json | zerobyte import-config --stdin"); - process.exit(1); + // Set log level: explicit option takes precedence + if (options.logLevel) { + process.env.LOG_LEVEL = options.logLevel; } - if (options.config && options.stdin) { - console.error("❌ Cannot use both --config and --stdin"); - process.exit(1); - } + out.info("\n📦 Zerobyte Config Import\n"); - let configJson: string; - - if (options.stdin) { - console.log("📄 Reading config from stdin..."); - try { - configJson = await readStdin(); - if (!configJson.trim()) { - console.error("❌ No input received from stdin"); - process.exit(1); - } - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - console.error(`❌ Failed to read stdin: ${err.message}`); - process.exit(1); - } - } else { - const configPath = path.resolve(process.cwd(), options.config); - - // Check if file exists - try { - await fs.access(configPath); - } catch { - console.error(`❌ Config file not found: ${configPath}`); - process.exit(1); + if (!options.config && !options.stdin) { + if (!jsonOutput) { + console.log("\nUsage:"); + console.log(" zerobyte import-config --config /path/to/config.json"); + console.log(" cat config.json | zerobyte import-config --stdin"); } + out.error("Either --config or --stdin is required"); + } - console.log(`📄 Config file: ${configPath}`); - configJson = await fs.readFile(configPath, "utf-8"); + if (options.config && options.stdin) { + out.error("Cannot use both --config and --stdin"); } + const configJson = await readConfigJson(options, out); + // Parse and validate JSON let config: unknown; try { config = JSON.parse(configJson); } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - console.error(`❌ Invalid JSON: ${err.message}`); - process.exit(1); + out.error(`Invalid JSON: ${toError(e).message}`); } if (options.dryRun) { - console.log("🔍 Dry run mode - validating config only\n"); - const root = typeof config === "object" && config !== null ? config : {}; const configObj = "config" in root && typeof root.config === "object" && root.config !== null ? root.config : root; const sections = ["volumes", "repositories", "backupSchedules", "notificationDestinations", "users"]; + const counts: Record = {}; for (const section of sections) { const items = (configObj as Record)[section] || []; - const count = Array.isArray(items) ? items.length : 0; - console.log(` ${section}: ${count} item(s)`); + counts[section] = Array.isArray(items) ? items.length : 0; } - const hasRecoveryKey = !!(configObj as Record).recoveryKey; - console.log(` recoveryKey: ${hasRecoveryKey ? "provided" : "not provided"}`); - console.log("\n✅ Config is valid JSON"); + if (jsonOutput) { + out.json({ dryRun: true, valid: true, counts, hasRecoveryKey }); + } else { + console.log("🔍 Dry run mode - validating config only\n"); + for (const section of sections) { + console.log(` ${section}: ${counts[section]} item(s)`); + } + console.log(` recoveryKey: ${hasRecoveryKey ? "provided" : "not provided"}`); + console.log("\n✅ Config is valid JSON"); + } return; } @@ -100,13 +137,13 @@ export const importConfigCommand = new Command("import-config") const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); const result = await applyConfigImport(config); + out.json({ ...result, success: result.errors === 0 }); + // Exit with error code if there were errors if (result.errors > 0) { process.exit(1); } } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - console.error(`❌ Import failed: ${err.message}`); - process.exit(1); + out.error(`Import failed: ${toError(e).message}`); } }); diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 3b20d9b7..41b44166 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -1,4 +1,6 @@ import { eq } from "drizzle-orm"; +import fs from "node:fs/promises"; +import path from "node:path"; import slugify from "slugify"; import { db } from "../../db/db"; import { usersTable } from "../../db/schema"; @@ -10,6 +12,8 @@ import type { BackendConfig } from "~/schemas/volumes"; const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null; +const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); + const asStringArray = (value: unknown): string[] => { if (!Array.isArray(value)) return []; return value.filter((item): item is string => typeof item === "string"); @@ -36,6 +40,7 @@ type ImportConfig = { export type ImportResult = { succeeded: number; + skipped: number; warnings: number; errors: number; }; @@ -62,8 +67,6 @@ function interpolateEnvVars(value: unknown): unknown { async function loadConfigFromFile(): Promise { try { const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; - const fs = await import("node:fs/promises"); - const path = await import("node:path"); const configFullPath = path.resolve(process.cwd(), configPath); try { const raw = await fs.readFile(configFullPath, "utf-8"); @@ -73,8 +76,7 @@ async function loadConfigFromFile(): Promise { throw error; } } catch (error) { - const err = error instanceof Error ? error : new Error(String(error)); - logger.warn(`No config file loaded or error parsing config: ${err.message}`); + logger.warn(`No config file loaded or error parsing config: ${toError(error).message}`); return null; } } @@ -100,11 +102,17 @@ function parseImportConfig(configRaw: unknown): ImportConfig { }; } +function mergeResults(target: ImportResult, source: ImportResult): void { + target.succeeded += source.succeeded; + target.skipped += source.skipped; + target.warnings += source.warnings; + target.errors += source.errors; +} + async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { - const fs = await import("node:fs/promises"); const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); if (!recoveryKey) return result; @@ -116,16 +124,22 @@ async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise false, ); if (passFileExists) { - logger.warn(`Restic passfile already exists at ${RESTIC_PASS_FILE}; skipping config recovery key write`); - result.warnings++; + // Check if existing key matches the one being imported + const existingKey = await fs.readFile(RESTIC_PASS_FILE, "utf-8"); + if (existingKey.trim() === recoveryKey) { + logger.info("Recovery key already configured with matching value"); + result.skipped++; + } else { + logger.error("Recovery key already exists with different value; cannot overwrite"); + result.errors++; + } return result; } await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); result.succeeded++; } catch (err) { - const e = err instanceof Error ? err : new Error(String(err)); - logger.error(`Failed to write recovery key from config: ${e.message}`); + logger.error(`Failed to write recovery key from config: ${toError(err).message}`); result.errors++; } @@ -133,13 +147,24 @@ async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + // Get existing volumes to check for duplicates + const existingVolumes = await volumeService.listVolumes(); + const existingNames = new Set(existingVolumes.map((v) => v.name)); for (const v of volumes) { try { if (!isRecord(v) || typeof v.name !== "string" || !isRecord(v.config) || typeof v.config.backend !== "string") { throw new Error("Invalid volume entry"); } + + if (existingNames.has(v.name)) { + logger.info(`Volume '${v.name}' already exists`); + result.skipped++; + continue; + } + await volumeService.createVolume(v.name, v.config as BackendConfig); logger.info(`Initialized volume from config: ${v.name}`); result.succeeded++; @@ -150,8 +175,8 @@ async function importVolumes(volumes: unknown[]): Promise { logger.info(`Set autoRemount=false for volume: ${v.name}`); } } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Volume not created: ${err.message}`); + const volumeName = isRecord(v) && typeof v.name === "string" ? v.name : "unknown"; + logger.warn(`Volume '${volumeName}' not created: ${toError(e).message}`); result.warnings++; } } @@ -160,7 +185,7 @@ async function importVolumes(volumes: unknown[]): Promise { } async function importRepositories(repositories: unknown[]): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; const repoServiceModule = await import("../repositories/repositories.service"); const { buildRepoUrl, restic } = await import("../../utils/restic"); @@ -175,8 +200,7 @@ async function importRepositories(repositories: unknown[]): Promise true) .catch((e) => { - const err = e instanceof Error ? e : new Error(String(e)); - logger.debug(`Repo existence check for '${r.name}': ${err.message}`); + logger.debug(`Repo existence check for '${r.name}': ${toError(e).message}`); return false; }); @@ -223,8 +245,8 @@ async function importRepositories(repositories: unknown[]): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; const notificationsServiceModule = await import("../notifications/notifications.service"); + + // Get existing destinations to check for duplicates + const existingDestinations = await notificationsServiceModule.notificationsService.listDestinations(); + const existingNames = new Set(existingDestinations.map((d) => d.name)); + for (const n of notificationDestinations) { try { if (!isRecord(n) || typeof n.name !== "string" || !isRecord(n.config) || typeof n.config.type !== "string") { throw new Error("Invalid notification destination entry"); } + + // The service uses slugify to normalize the name, so we check against stored names + if (existingNames.has(n.name)) { + logger.info(`Notification destination '${n.name}' already exists`); + result.skipped++; + continue; + } + const created = await notificationsServiceModule.notificationsService.createDestination( n.name, n.config as NotificationConfig, @@ -270,8 +305,8 @@ async function importNotificationDestinations(notificationDestinations: unknown[ logger.info(`Set enabled=false for notification destination: ${n.name}`); } } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Notification destination not created: ${err.message}`); + const destName = isRecord(n) && typeof n.name === "string" ? n.name : "unknown"; + logger.warn(`Notification destination '${destName}' not created: ${toError(e).message}`); result.warnings++; } } @@ -297,6 +332,7 @@ function getScheduleRepositoryName(schedule: Record): string | type ScheduleNotificationAssignment = { destinationId: number; + destinationName: string; notifyOnStart: boolean; notifyOnSuccess: boolean; notifyOnWarning: boolean; @@ -304,25 +340,30 @@ type ScheduleNotificationAssignment = { }; function buildScheduleNotificationAssignments( + scheduleName: string, notifications: unknown[], destinationBySlug: Map, -): ScheduleNotificationAssignment[] { +): { assignments: ScheduleNotificationAssignment[]; warnings: number } { const assignments: ScheduleNotificationAssignment[] = []; + let warnings = 0; for (const notif of notifications) { const destName = typeof notif === "string" ? notif : isRecord(notif) ? notif.name : null; if (typeof destName !== "string" || destName.length === 0) { - logger.warn("Notification destination missing name for schedule"); + logger.warn(`Notification destination missing name for schedule '${scheduleName}'`); + warnings++; continue; } const destSlug = slugify(destName, { lower: true, strict: true }); const dest = destinationBySlug.get(destSlug); if (!dest) { - logger.warn(`Notification destination '${destName}' not found for schedule`); + logger.warn(`Notification destination '${destName}' not found for schedule '${scheduleName}'`); + warnings++; continue; } assignments.push({ destinationId: dest.id, + destinationName: dest.name, notifyOnStart: isRecord(notif) && typeof notif.notifyOnStart === "boolean" ? notif.notifyOnStart : true, notifyOnSuccess: isRecord(notif) && typeof notif.notifyOnSuccess === "boolean" ? notif.notifyOnSuccess : true, notifyOnWarning: isRecord(notif) && typeof notif.notifyOnWarning === "boolean" ? notif.notifyOnWarning : true, @@ -330,29 +371,66 @@ function buildScheduleNotificationAssignments( }); } - return assignments; + return { assignments, warnings }; } async function attachScheduleNotifications( scheduleId: number, + scheduleName: string, notifications: unknown[], destinationBySlug: Map, notificationsServiceModule: typeof import("../notifications/notifications.service"), -): Promise { +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { - const assignments = buildScheduleNotificationAssignments(notifications, destinationBySlug); - if (assignments.length === 0) return; - - await notificationsServiceModule.notificationsService.updateScheduleNotifications(scheduleId, assignments); - logger.info(`Assigned ${assignments.length} notification(s) to backup schedule`); + const existingNotifications = + await notificationsServiceModule.notificationsService.getScheduleNotifications(scheduleId); + const existingDestIds = new Set(existingNotifications.map((n) => n.destinationId)); + + const { assignments, warnings } = buildScheduleNotificationAssignments( + scheduleName, + notifications, + destinationBySlug, + ); + result.warnings += warnings; + + // Filter out already attached notifications and track skipped + const newAssignments: typeof assignments = []; + for (const a of assignments) { + if (existingDestIds.has(a.destinationId)) { + logger.info(`Notification '${a.destinationName}' already attached to schedule '${scheduleName}'`); + result.skipped++; + } else { + newAssignments.push(a); + } + } + if (newAssignments.length === 0) return result; + + // Merge existing with new (strip destinationName for API call) + const mergedAssignments = [ + ...existingNotifications.map((n) => ({ + destinationId: n.destinationId, + notifyOnStart: n.notifyOnStart, + notifyOnSuccess: n.notifyOnSuccess, + notifyOnWarning: n.notifyOnWarning, + notifyOnFailure: n.notifyOnFailure, + })), + ...newAssignments.map(({ destinationName: _, ...rest }) => rest), + ]; + + await notificationsServiceModule.notificationsService.updateScheduleNotifications(scheduleId, mergedAssignments); + const notifNames = newAssignments.map((a) => a.destinationName).join(", "); + logger.info(`Assigned notification(s) [${notifNames}] to schedule '${scheduleName}'`); + result.succeeded += newAssignments.length; } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Failed to assign notifications to schedule: ${err.message}`); + logger.warn(`Failed to assign notifications to schedule '${scheduleName}': ${toError(e).message}`); + result.warnings++; } + return result; } async function importBackupSchedules(backupSchedules: unknown[]): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return result; const backupServiceModule = await import("../backups/backups.service"); @@ -361,10 +439,12 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise [v.name, v] as const)); const repoByName = new Map(repositories.map((r) => [r.name, r] as const)); const destinationBySlug = new Map(destinations.map((d) => [d.name, d] as const)); + const scheduleByName = new Map(existingSchedules.map((s) => [s.name, s] as const)); for (const s of backupSchedules) { if (!isRecord(s)) { @@ -372,85 +452,105 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise 0 ? s.name : `${volumeName}-${repositoryName}`; if (typeof s.cronExpression !== "string" || s.cronExpression.length === 0) { - logger.warn(`Backup schedule not created: Missing cronExpression for '${scheduleName}'`); + logger.warn(`Backup schedule not processed: Missing cronExpression for '${scheduleName}'`); result.warnings++; continue; } - if (volume.status !== "mounted") { + // Check if schedule already exists - if so, skip creation but still try attachments + const existingSchedule = scheduleByName.get(scheduleName); + let scheduleId: number; + + if (existingSchedule) { + logger.info(`Backup schedule '${scheduleName}' already exists`); + result.skipped++; + scheduleId = existingSchedule.id; + } else { + // Mount volume if needed for new schedule + if (volume.status !== "mounted") { + try { + await volumeService.mountVolume(volume.name); + volumeByName.set(volume.name, { ...volume, status: "mounted" }); + logger.info(`Mounted volume ${volume.name} for backup schedule`); + } catch (e) { + logger.warn(`Could not mount volume ${volume.name}: ${toError(e).message}`); + result.warnings++; + continue; + } + } + try { - await volumeService.mountVolume(volume.name); - volumeByName.set(volume.name, { ...volume, status: "mounted" }); - logger.info(`Mounted volume ${volume.name} for backup schedule`); + const retentionPolicy = isRecord(s.retentionPolicy) ? (s.retentionPolicy as RetentionPolicy) : undefined; + const createdSchedule = await backupServiceModule.backupsService.createSchedule({ + name: scheduleName, + volumeId: volume.id, + repositoryId: repository.id, + enabled: typeof s.enabled === "boolean" ? s.enabled : true, + cronExpression: s.cronExpression, + retentionPolicy, + excludePatterns: asStringArray(s.excludePatterns), + excludeIfPresent: asStringArray(s.excludeIfPresent), + includePatterns: asStringArray(s.includePatterns), + oneFileSystem: typeof s.oneFileSystem === "boolean" ? s.oneFileSystem : undefined, + }); + logger.info(`Initialized backup schedule from config: ${scheduleName}`); + result.succeeded++; + scheduleId = createdSchedule.id; } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Could not mount volume ${volume.name}: ${err.message}`); + logger.warn(`Backup schedule '${scheduleName}' not created: ${toError(e).message}`); result.warnings++; continue; } } - let createdSchedule: { id: number } | null = null; - try { - const retentionPolicy = isRecord(s.retentionPolicy) ? (s.retentionPolicy as RetentionPolicy) : undefined; - createdSchedule = await backupServiceModule.backupsService.createSchedule({ - name: scheduleName, - volumeId: volume.id, - repositoryId: repository.id, - enabled: typeof s.enabled === "boolean" ? s.enabled : true, - cronExpression: s.cronExpression, - retentionPolicy, - excludePatterns: asStringArray(s.excludePatterns), - excludeIfPresent: asStringArray(s.excludeIfPresent), - includePatterns: asStringArray(s.includePatterns), - oneFileSystem: typeof s.oneFileSystem === "boolean" ? s.oneFileSystem : undefined, - }); - logger.info(`Initialized backup schedule from config: ${scheduleName}`); - result.succeeded++; - } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Backup schedule not created: ${err.message}`); - result.warnings++; - continue; - } - - if (createdSchedule && Array.isArray(s.notifications) && s.notifications.length > 0) { - await attachScheduleNotifications( - createdSchedule.id, + // Attach notifications (checks if already attached) + if (Array.isArray(s.notifications) && s.notifications.length > 0) { + const notifResult = await attachScheduleNotifications( + scheduleId, + scheduleName, s.notifications, destinationBySlug, notificationsServiceModule, ); + mergeResults(result, notifResult); } - if (createdSchedule && Array.isArray(s.mirrors) && s.mirrors.length > 0) { - await attachScheduleMirrors(createdSchedule.id, s.mirrors, repoByName, backupServiceModule); + // Attach mirrors (checks if already attached) + if (Array.isArray(s.mirrors) && s.mirrors.length > 0) { + const mirrorResult = await attachScheduleMirrors( + scheduleId, + scheduleName, + s.mirrors, + repoByName, + backupServiceModule, + ); + mergeResults(result, mirrorResult); } } @@ -459,12 +559,17 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise, backupServiceModule: typeof import("../backups/backups.service"), -): Promise { +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { - const mirrorConfigs: Array<{ repositoryId: string; enabled: boolean }> = []; + const existingMirrors = await backupServiceModule.backupsService.getMirrors(scheduleId); + const existingRepoIds = new Set(existingMirrors.map((m) => m.repositoryId)); + + const mirrorConfigs: Array<{ repositoryId: string; repositoryName: string; enabled: boolean }> = []; for (const m of mirrors) { if (!isRecord(m)) continue; @@ -478,39 +583,70 @@ async function attachScheduleMirrors( : null; if (!repoName) { - logger.warn("Mirror missing repository name; skipping"); + logger.warn(`Mirror missing repository name for schedule '${scheduleName}'`); + result.warnings++; continue; } const repo = repoByName.get(repoName); if (!repo) { - logger.warn(`Mirror repository '${repoName}' not found; skipping`); + logger.warn(`Mirror repository '${repoName}' not found for schedule '${scheduleName}'`); + result.warnings++; continue; } mirrorConfigs.push({ repositoryId: repo.id, + repositoryName: repo.name, enabled: typeof m.enabled === "boolean" ? m.enabled : true, }); } - if (mirrorConfigs.length === 0) return; - - await backupServiceModule.backupsService.updateMirrors(scheduleId, { mirrors: mirrorConfigs }); - logger.info(`Assigned ${mirrorConfigs.length} mirror(s) to backup schedule`); + // Filter out already attached mirrors and track skipped + const newMirrors: typeof mirrorConfigs = []; + for (const m of mirrorConfigs) { + if (existingRepoIds.has(m.repositoryId)) { + logger.info(`Mirror '${m.repositoryName}' already attached to schedule '${scheduleName}'`); + result.skipped++; + } else { + newMirrors.push(m); + } + } + if (newMirrors.length === 0) return result; + + // Merge existing with new (strip repositoryName for API call) + const mergedMirrors = [ + ...existingMirrors.map((m) => ({ + repositoryId: m.repositoryId, + enabled: m.enabled, + })), + ...newMirrors.map(({ repositoryName: _, ...rest }) => rest), + ]; + + await backupServiceModule.backupsService.updateMirrors(scheduleId, { mirrors: mergedMirrors }); + const mirrorNames = newMirrors.map((m) => m.repositoryName).join(", "); + logger.info(`Assigned mirror(s) [${mirrorNames}] to schedule '${scheduleName}'`); + result.succeeded += newMirrors.length; } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.warn(`Failed to assign mirrors to schedule: ${err.message}`); + logger.warn(`Failed to assign mirrors to schedule '${scheduleName}': ${toError(e).message}`); + result.warnings++; } + return result; } -async function setupInitialUser(users: unknown[], recoveryKey: string | null): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; +async function importUsers(users: unknown[], recoveryKey: string | null): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { const { authService } = await import("../auth/auth.service"); const hasUsers = await authService.hasUsers(); - if (hasUsers) return result; + if (hasUsers) { + if (Array.isArray(users) && users.length > 0) { + logger.info("Users already exist; skipping user import from config"); + result.skipped++; + } + return result; + } if (!Array.isArray(users) || users.length === 0) return result; if (users.length > 1) { @@ -521,8 +657,16 @@ async function setupInitialUser(users: unknown[], recoveryKey: string | null): P } for (const u of users) { - if (!isRecord(u)) continue; - if (typeof u.username !== "string" || u.username.length === 0) continue; + if (!isRecord(u)) { + logger.warn("Invalid user entry in config; skipping"); + result.warnings++; + continue; + } + if (typeof u.username !== "string" || u.username.length === 0) { + logger.warn("User entry missing username; skipping"); + result.warnings++; + continue; + } if (typeof u.passwordHash === "string" && u.passwordHash.length > 0) { try { @@ -575,33 +719,36 @@ async function setupInitialUser(users: unknown[], recoveryKey: string | null): P } async function runImport(config: ImportConfig): Promise { - const result: ImportResult = { succeeded: 0, warnings: 0, errors: 0 }; - - const recoveryKeyResult = await writeRecoveryKeyFromConfig(config.recoveryKey); - const volumeResult = await importVolumes(config.volumes); - const repoResult = await importRepositories(config.repositories); - const notifResult = await importNotificationDestinations(config.notificationDestinations); - const scheduleResult = await importBackupSchedules(config.backupSchedules); - const userResult = await setupInitialUser(config.users, config.recoveryKey); - - for (const r of [recoveryKeyResult, volumeResult, repoResult, notifResult, scheduleResult, userResult]) { - result.succeeded += r.succeeded; - result.warnings += r.warnings; - result.errors += r.errors; + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + mergeResults(result, await writeRecoveryKeyFromConfig(config.recoveryKey)); + + // Stop immediately if recovery key has errors (e.g., mismatch with existing key) + if (result.errors > 0) { + return result; } + mergeResults(result, await importVolumes(config.volumes)); + mergeResults(result, await importRepositories(config.repositories)); + mergeResults(result, await importNotificationDestinations(config.notificationDestinations)); + mergeResults(result, await importBackupSchedules(config.backupSchedules)); + mergeResults(result, await importUsers(config.users, config.recoveryKey)); + return result; } function logImportSummary(result: ImportResult): void { + const skippedMsg = result.skipped > 0 ? `, ${result.skipped} skipped` : ""; if (result.errors > 0) { logger.error( - `Config import completed with ${result.errors} error(s) and ${result.warnings} warning(s), ${result.succeeded} item(s) imported`, + `Config import completed with ${result.errors} error(s) and ${result.warnings} warning(s), ${result.succeeded} imported${skippedMsg}`, ); } else if (result.warnings > 0) { - logger.warn(`Config import completed with ${result.warnings} warning(s), ${result.succeeded} item(s) imported`); - } else if (result.succeeded > 0) { - logger.info(`Config import completed successfully: ${result.succeeded} item(s) imported`); + logger.warn( + `Config import completed with ${result.warnings} warning(s), ${result.succeeded} imported${skippedMsg}`, + ); + } else if (result.succeeded > 0 || result.skipped > 0) { + logger.info(`Config import completed: ${result.succeeded} imported${skippedMsg}`); } else { logger.info("Config import completed: no items to import"); } @@ -632,7 +779,6 @@ export async function applyConfigImportFromFile(): Promise { const result = await runImport(config); logImportSummary(result); } catch (e) { - const err = e instanceof Error ? e : new Error(String(e)); - logger.error(`Config import failed: ${err.message}`); + logger.error(`Config import failed: ${toError(e).message}`); } } From 3c7419f401f0475f14250b47213cd97dbe84446b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Tue, 30 Dec 2025 14:33:22 +0100 Subject: [PATCH 21/30] make the isexisting repo in location check for all repos --- app/server/modules/lifecycle/config-import.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 41b44166..dd782738 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -222,9 +222,9 @@ async function importRepositories(repositories: unknown[]): Promise true) @@ -235,8 +235,8 @@ async function importRepositories(repositories: unknown[]): Promise Date: Tue, 30 Dec 2025 19:14:49 +0000 Subject: [PATCH 22/30] fix: normalize notification destination names before checking for existence --- app/server/modules/lifecycle/config-import.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index dd782738..5d357ce1 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -286,7 +286,8 @@ async function importNotificationDestinations(notificationDestinations: unknown[ } // The service uses slugify to normalize the name, so we check against stored names - if (existingNames.has(n.name)) { + const slugifiedName = slugify(n.name, { lower: true, strict: true }); + if (existingNames.has(slugifiedName)) { logger.info(`Notification destination '${n.name}' already exists`); result.skipped++; continue; From 37d04d3b61937853ec95e0d93ac63976c42bc7f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Wed, 31 Dec 2025 12:06:16 +0100 Subject: [PATCH 23/30] fix: normalize names for existence checks --- app/server/modules/lifecycle/config-import.ts | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 5d357ce1..537dfcff 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -159,7 +159,9 @@ async function importVolumes(volumes: unknown[]): Promise { throw new Error("Invalid volume entry"); } - if (existingNames.has(v.name)) { + // The service uses slugify to normalize the name, so we check against stored names + const slugifiedName = slugify(v.name, { lower: true, strict: true }); + if (existingNames.has(slugifiedName)) { logger.info(`Volume '${v.name}' already exists`); result.skipped++; continue; @@ -244,7 +246,8 @@ async function importRepositories(repositories: unknown[]): Promise Date: Fri, 2 Jan 2026 09:54:52 +0100 Subject: [PATCH 24/30] feat: add support for provided shortId in volume, repository and backup and pass shortIds to service creation from config import --- app/server/modules/backups/backups.service.ts | 25 +++++++++-- app/server/modules/lifecycle/config-import.ts | 44 ++++++++++++------- .../repositories/repositories.service.ts | 29 ++++++++++-- app/server/modules/volumes/volume.service.ts | 21 +++++++-- app/server/utils/id.ts | 9 +++- 5 files changed, 103 insertions(+), 25 deletions(-) diff --git a/app/server/modules/backups/backups.service.ts b/app/server/modules/backups/backups.service.ts index ea2b5fe5..2a066da5 100644 --- a/app/server/modules/backups/backups.service.ts +++ b/app/server/modules/backups/backups.service.ts @@ -14,7 +14,7 @@ import { notificationsService } from "../notifications/notifications.service"; import { repoMutex } from "../../core/repository-mutex"; import { checkMirrorCompatibility, getIncompatibleMirrorError } from "~/server/utils/backend-compatibility"; import path from "node:path"; -import { generateShortId } from "~/server/utils/id"; +import { generateShortId, isValidShortId } from "~/server/utils/id"; const runningBackups = new Map(); @@ -82,7 +82,7 @@ const getSchedule = async (scheduleId: number) => { return schedule; }; -const createSchedule = async (data: CreateBackupScheduleBody) => { +const createSchedule = async (data: CreateBackupScheduleBody, providedShortId?: string) => { if (!cron.validate(data.cronExpression)) { throw new BadRequestError("Invalid cron expression"); } @@ -95,6 +95,25 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { throw new ConflictError("A backup schedule with this name already exists"); } + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.backupSchedulesTable.findFirst({ + where: eq(backupSchedulesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError( + `Schedule shortId '${providedShortId}' is already in use by schedule '${shortIdInUse.name}'`, + ); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } + const volume = await db.query.volumesTable.findFirst({ where: eq(volumesTable.id, data.volumeId), }); @@ -127,7 +146,7 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { includePatterns: data.includePatterns ?? [], oneFileSystem: data.oneFileSystem, nextBackupAt: nextBackupAt, - shortId: generateShortId(), + shortId, }) .returning(); diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 537dfcff..587956c2 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -167,7 +167,9 @@ async function importVolumes(volumes: unknown[]): Promise { continue; } - await volumeService.createVolume(v.name, v.config as BackendConfig); + // Pass shortId from config if provided (for IaC reproducibility) + const shortId = typeof v.shortId === "string" ? v.shortId : undefined; + await volumeService.createVolume(v.name, v.config as BackendConfig, shortId); logger.info(`Initialized volume from config: ${v.name}`); result.succeeded++; @@ -257,10 +259,13 @@ async function importRepositories(repositories: unknown[]): Promise, + repoByName: Map, backupServiceModule: typeof import("../backups/backups.service"), ): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; @@ -576,7 +586,11 @@ async function attachScheduleMirrors( const existingMirrors = await backupServiceModule.backupsService.getMirrors(scheduleId); const existingRepoIds = new Set(existingMirrors.map((m) => m.repositoryId)); - const mirrorConfigs: Array<{ repositoryId: string; repositoryName: string; enabled: boolean }> = []; + const mirrorConfigs: Array<{ + repositoryId: string; + repositoryName: string; + enabled: boolean; + }> = []; for (const m of mirrors) { if (!isRecord(m)) continue; diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index 2656dbd4..1208b7ba 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -4,7 +4,7 @@ import { ConflictError, InternalServerError, NotFoundError } from "http-errors-e import { db } from "../../db/db"; import { repositoriesTable } from "../../db/schema"; import { toMessage } from "../../utils/errors"; -import { generateShortId } from "../../utils/id"; +import { generateShortId, isValidShortId } from "../../utils/id"; import { restic } from "../../utils/restic"; import { cryptoUtils } from "../../utils/crypto"; import { repoMutex } from "../../core/repository-mutex"; @@ -62,9 +62,32 @@ const encryptConfig = async (config: RepositoryConfig): Promise { +const createRepository = async ( + name: string, + config: RepositoryConfig, + compressionMode?: CompressionMode, + providedShortId?: string, +) => { const id = crypto.randomUUID(); - const shortId = generateShortId(); + + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new Error(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.repositoriesTable.findFirst({ + where: eq(repositoriesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError( + `Repository shortId '${providedShortId}' is already in use by repository '${shortIdInUse.name}'`, + ); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } let processedConfig = config; if (config.backend === "local" && !config.isExistingRepository) { diff --git a/app/server/modules/volumes/volume.service.ts b/app/server/modules/volumes/volume.service.ts index bde3be3b..32533fc1 100644 --- a/app/server/modules/volumes/volume.service.ts +++ b/app/server/modules/volumes/volume.service.ts @@ -8,7 +8,7 @@ import { db } from "../../db/db"; import { volumesTable } from "../../db/schema"; import { cryptoUtils } from "../../utils/crypto"; import { toMessage } from "../../utils/errors"; -import { generateShortId } from "../../utils/id"; +import { generateShortId, isValidShortId } from "../../utils/id"; import { getStatFs, type StatFs } from "../../utils/mountinfo"; import { withTimeout } from "../../utils/timeout"; import { createVolumeBackend } from "../backends/backend"; @@ -48,7 +48,7 @@ const listVolumes = async () => { return volumes; }; -const createVolume = async (name: string, backendConfig: BackendConfig) => { +const createVolume = async (name: string, backendConfig: BackendConfig, providedShortId?: string) => { const slug = slugify(name, { lower: true, strict: true }); const existing = await db.query.volumesTable.findFirst({ @@ -59,7 +59,22 @@ const createVolume = async (name: string, backendConfig: BackendConfig) => { throw new ConflictError("Volume already exists"); } - const shortId = generateShortId(); + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new Error(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.volumesTable.findFirst({ + where: eq(volumesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError(`Volume shortId '${providedShortId}' is already in use by volume '${shortIdInUse.name}'`); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } const encryptedConfig = await encryptSensitiveFields(backendConfig); const [created] = await db diff --git a/app/server/utils/id.ts b/app/server/utils/id.ts index 18bc2030..4a3143e0 100644 --- a/app/server/utils/id.ts +++ b/app/server/utils/id.ts @@ -1,6 +1,13 @@ import crypto from "node:crypto"; -export const generateShortId = (length = 8): string => { +const SHORT_ID_LENGTH = 8; + +export const generateShortId = (length = SHORT_ID_LENGTH): string => { const bytesNeeded = Math.ceil((length * 3) / 4); return crypto.randomBytes(bytesNeeded).toString("base64url").slice(0, length); }; + +export const isValidShortId = (value: string, length = SHORT_ID_LENGTH): boolean => { + const regex = new RegExp(`^[A-Za-z0-9_-]{${length}}$`); + return regex.test(value); +}; From 88bb174423254fbf948752b95a0d93a0c9707a5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 10:19:20 +0100 Subject: [PATCH 25/30] feat: add option to overwrite recovery key during config import and prevent overwrite if database is not empty --- app/server/cli/commands/import-config.ts | 3 +- app/server/modules/lifecycle/config-import.ts | 70 ++++++++++++++++--- 2 files changed, 63 insertions(+), 10 deletions(-) diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts index 93b15687..b3607f44 100644 --- a/app/server/cli/commands/import-config.ts +++ b/app/server/cli/commands/import-config.ts @@ -69,6 +69,7 @@ export const importConfigCommand = new Command("import-config") .option("--dry-run", "Validate the config without importing") .option("--json", "Output results in JSON format") .option("--log-level ", "Set log level (debug, info, warn, error)") + .option("--overwrite-recovery-key", "Overwrite existing recovery key (only allowed if database is empty)") .action(async (options) => { const jsonOutput = options.json; const out = createOutput(jsonOutput); @@ -135,7 +136,7 @@ export const importConfigCommand = new Command("import-config") runDbMigrations(); const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); - const result = await applyConfigImport(config); + const result = await applyConfigImport(config, { overwriteRecoveryKey: options.overwriteRecoveryKey }); out.json({ ...result, success: result.errors === 0 }); diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 587956c2..728dc070 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -3,7 +3,13 @@ import fs from "node:fs/promises"; import path from "node:path"; import slugify from "slugify"; import { db } from "../../db/db"; -import { usersTable } from "../../db/schema"; +import { + usersTable, + volumesTable, + repositoriesTable, + backupSchedulesTable, + notificationDestinationsTable, +} from "../../db/schema"; import { logger } from "../../utils/logger"; import { volumeService } from "../volumes/volume.service"; import type { NotificationConfig } from "~/schemas/notifications"; @@ -109,7 +115,31 @@ function mergeResults(target: ImportResult, source: ImportResult): void { target.errors += source.errors; } -async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { +/** + * Check if the database has any records in the main tables. + * Used to prevent recovery key overwrite when data already exists. + */ +async function isDatabaseEmpty(): Promise { + const [volumes, repositories, schedules, notifications, users] = await Promise.all([ + db.select({ id: volumesTable.id }).from(volumesTable).limit(1), + db.select({ id: repositoriesTable.id }).from(repositoriesTable).limit(1), + db.select({ id: backupSchedulesTable.id }).from(backupSchedulesTable).limit(1), + db.select({ id: notificationDestinationsTable.id }).from(notificationDestinationsTable).limit(1), + db.select({ id: usersTable.id }).from(usersTable).limit(1), + ]); + return ( + volumes.length === 0 && + repositories.length === 0 && + schedules.length === 0 && + notifications.length === 0 && + users.length === 0 + ); +} + +async function writeRecoveryKeyFromConfig( + recoveryKey: string | null, + overwriteRecoveryKey: boolean, +): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { @@ -129,11 +159,29 @@ async function writeRecoveryKeyFromConfig(recoveryKey: string | null): Promise { +type ImportOptions = { + overwriteRecoveryKey?: boolean; +}; + +async function runImport(config: ImportConfig, options: ImportOptions = {}): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; - mergeResults(result, await writeRecoveryKeyFromConfig(config.recoveryKey)); + mergeResults(result, await writeRecoveryKeyFromConfig(config.recoveryKey, options.overwriteRecoveryKey ?? false)); // Stop immediately if recovery key has errors (e.g., mismatch with existing key) if (result.errors > 0) { @@ -779,10 +831,10 @@ function logImportSummary(result: ImportResult): void { /** * Import configuration from a raw config object (used by CLI) */ -export async function applyConfigImport(configRaw: unknown): Promise { +export async function applyConfigImport(configRaw: unknown, options: ImportOptions = {}): Promise { logger.info("Starting config import..."); const config = parseImportConfig(configRaw); - const result = await runImport(config); + const result = await runImport(config, options); logImportSummary(result); return result; } From b25ed606b21e32508195e5b210740ea562a4b494 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 10:23:48 +0100 Subject: [PATCH 26/30] fix: replace Error with BadRequestError for invalid shortId format in repository and volume creation --- app/server/modules/repositories/repositories.service.ts | 6 +++--- app/server/modules/volumes/volume.service.ts | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index 1208b7ba..a59c34ba 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -1,6 +1,6 @@ import crypto from "node:crypto"; import { eq, or } from "drizzle-orm"; -import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; +import { BadRequestError, ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; import { db } from "../../db/db"; import { repositoriesTable } from "../../db/schema"; import { toMessage } from "../../utils/errors"; @@ -74,7 +74,7 @@ const createRepository = async ( let shortId: string; if (providedShortId) { if (!isValidShortId(providedShortId)) { - throw new Error(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); } const shortIdInUse = await db.query.repositoriesTable.findFirst({ where: eq(repositoriesTable.shortId, providedShortId), @@ -109,7 +109,7 @@ const createRepository = async ( } if (!repoExists && config.isExistingRepository) { - throw new InternalServerError( + throw new BadRequestError( `Cannot access existing repository. Verify the path/credentials are correct and the repository exists.`, ); } diff --git a/app/server/modules/volumes/volume.service.ts b/app/server/modules/volumes/volume.service.ts index 32533fc1..e2a61ea8 100644 --- a/app/server/modules/volumes/volume.service.ts +++ b/app/server/modules/volumes/volume.service.ts @@ -2,7 +2,7 @@ import * as fs from "node:fs/promises"; import * as os from "node:os"; import * as path from "node:path"; import { and, eq, ne } from "drizzle-orm"; -import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; +import { BadRequestError, ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; import slugify from "slugify"; import { db } from "../../db/db"; import { volumesTable } from "../../db/schema"; @@ -63,7 +63,7 @@ const createVolume = async (name: string, backendConfig: BackendConfig, provided let shortId: string; if (providedShortId) { if (!isValidShortId(providedShortId)) { - throw new Error(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); } const shortIdInUse = await db.query.volumesTable.findFirst({ where: eq(volumesTable.shortId, providedShortId), From 04cbb58559469fa51c7e573c106c4d26d3ed8d4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 10:35:18 +0100 Subject: [PATCH 27/30] refactor: remove config import from startup and update README for CLI usage --- app/server/modules/lifecycle/config-import.ts | 36 ------------ app/server/modules/lifecycle/startup.ts | 8 --- examples/config-file-import/README.md | 56 +++++++++---------- .../config-file-import/docker-compose.yml | 1 - 4 files changed, 26 insertions(+), 75 deletions(-) diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 728dc070..76822b17 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -1,6 +1,5 @@ import { eq } from "drizzle-orm"; import fs from "node:fs/promises"; -import path from "node:path"; import slugify from "slugify"; import { db } from "../../db/db"; import { @@ -70,23 +69,6 @@ function interpolateEnvVars(value: unknown): unknown { return value; } -async function loadConfigFromFile(): Promise { - try { - const configPath = process.env.ZEROBYTE_CONFIG_PATH || "zerobyte.config.json"; - const configFullPath = path.resolve(process.cwd(), configPath); - try { - const raw = await fs.readFile(configFullPath, "utf-8"); - return JSON.parse(raw); - } catch (error) { - if (isRecord(error) && error.code === "ENOENT") return null; - throw error; - } - } catch (error) { - logger.warn(`No config file loaded or error parsing config: ${toError(error).message}`); - return null; - } -} - function parseImportConfig(configRaw: unknown): ImportConfig { const root = isRecord(configRaw) ? configRaw : {}; const config = isRecord(root.config) ? (root.config as Record) : root; @@ -838,21 +820,3 @@ export async function applyConfigImport(configRaw: unknown, options: ImportOptio logImportSummary(result); return result; } - -/** - * Import configuration from a file (used by env var startup) - */ -export async function applyConfigImportFromFile(): Promise { - const configRaw = await loadConfigFromFile(); - if (configRaw === null) return; // No config file, nothing to do - - logger.info("Starting config import from file..."); - const config = parseImportConfig(configRaw); - - try { - const result = await runImport(config); - logImportSummary(result); - } catch (e) { - logger.error(`Config import failed: ${toError(e).message}`); - } -} diff --git a/app/server/modules/lifecycle/startup.ts b/app/server/modules/lifecycle/startup.ts index 52d3f7cc..aaf9475b 100644 --- a/app/server/modules/lifecycle/startup.ts +++ b/app/server/modules/lifecycle/startup.ts @@ -10,7 +10,6 @@ import { VolumeHealthCheckJob } from "../../jobs/healthchecks"; import { RepositoryHealthCheckJob } from "../../jobs/repository-healthchecks"; import { BackupExecutionJob } from "../../jobs/backup-execution"; import { CleanupSessionsJob } from "../../jobs/cleanup-sessions"; -import { applyConfigImportFromFile } from "./config-import"; import { repositoriesService } from "../repositories/repositories.service"; import { notificationsService } from "../notifications/notifications.service"; import { VolumeAutoRemountJob } from "~/server/jobs/auto-remount"; @@ -45,13 +44,6 @@ export const startup = async () => { await Scheduler.start(); await Scheduler.clear(); - if (process.env.ZEROBYTE_CONFIG_IMPORT === "true") { - logger.info("Config import enabled (ZEROBYTE_CONFIG_IMPORT=true)"); - await applyConfigImportFromFile(); - } else { - logger.info("Config import skipped (set ZEROBYTE_CONFIG_IMPORT=true to enable)"); - } - await restic.ensurePassfile().catch((err) => { logger.error(`Error ensuring restic passfile exists: ${err.message}`); }); diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index 971b0a21..978b7a82 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -1,6 +1,6 @@ # Config file import (Infrastructure as Code) -Zerobyte supports **config file import** on startup. +Zerobyte supports **config file import** via the CLI. This lets you pre-configure volumes, repositories, backup schedules, notification destinations, and an initial user. This example includes: @@ -13,7 +13,7 @@ This example includes: - Docker + Docker Compose -This example includes `SYS_ADMIN` and `/dev/fuse` because it’s compatible with remote volume mounts (SMB/NFS/WebDAV). +This example includes `SYS_ADMIN` and `/dev/fuse` because it's compatible with remote volume mounts (SMB/NFS/WebDAV). ## Setup @@ -48,31 +48,19 @@ This is the recommended workflow for quick testing: if you don't have your own J docker compose up -d ``` -6. Access the UI at `http://localhost:4096`. +6. Run the config import: -## Notes - -### Import methods - -Zerobyte supports two ways to import configuration: - -#### Method 1: Environment variable (automatic on startup) - -Set `ZEROBYTE_CONFIG_IMPORT=true` and the import runs automatically when the container starts: - -```yaml -services: - zerobyte: - environment: - - ZEROBYTE_CONFIG_IMPORT=true - - ZEROBYTE_CONFIG_PATH=/app/zerobyte.config.json # optional, this is the default +```bash +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json ``` -This is ideal for automated deployments where you want `docker compose up` to fully configure the instance. +7. Access the UI at `http://localhost:4096`. + +## Notes -#### Method 2: CLI command (manual control) +### CLI import command -Run the import explicitly using the CLI: +Import configuration using the CLI: ```bash # Import from a mounted config file (starts a new temporary container) @@ -89,15 +77,23 @@ Get-Content zerobyte.config.json | docker compose exec -T zerobyte bun run cli i # Validate config without importing (dry run) docker compose run --rm zerobyte bun run cli import-config --config /app/zerobyte.config.json --dry-run + +# Get JSON output for scripting +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json --json ``` The `--stdin` option is useful when you don't want to mount the config file - just pipe it directly. -This is useful when you want to: -- See import output directly in your terminal -- Re-run import after fixing issues -- Test config files before applying them -- Import without modifying your docker-compose.yml +### CLI options + +| Option | Description | +|--------|-------------| +| `--config ` | Path to the configuration file inside the container | +| `--stdin` | Read configuration from stdin | +| `--dry-run` | Validate the config without importing | +| `--json` | Output results in JSON format | +| `--log-level ` | Set log level (debug, info, warn, error) | +| `--overwrite-recovery-key` | Overwrite existing recovery key (only allowed if database is empty) | ### Secrets via env vars @@ -170,7 +166,7 @@ Supported formats: - `env://VAR_NAME` → reads `process.env.VAR_NAME` at runtime - `file://secret_name` → reads `/run/secrets/secret_name` (Docker secrets) -This is useful when you want to keep secrets out of the database and rotate them without editing Zerobyte’s stored config. +This is useful when you want to keep secrets out of the database and rotate them without editing Zerobyte's stored config. See the runnable example: @@ -178,14 +174,14 @@ See the runnable example: ### Config file behavior (create-only) -The config file is applied on startup using a **create-only** approach: +The config file is applied using a **create-only** approach: - **Volumes, notifications, schedules**: Skipped if a resource with the same name already exists - **Repositories**: Skipped if any of these conditions are met: - A repository pointing to the same location (path/bucket/endpoint) is already registered - For local repos: the path is already a restic repository (set `isExistingRepository: true` to import it) - A repository with the same name already exists -- Changes made via the UI are preserved across container restarts +- Changes made via the UI are preserved across imports - To update a resource from config, either modify it via the UI or delete it first This makes the config file better suited as "initial setup" than as a "desired state sync". diff --git a/examples/config-file-import/docker-compose.yml b/examples/config-file-import/docker-compose.yml index 78792779..fadfaea2 100644 --- a/examples/config-file-import/docker-compose.yml +++ b/examples/config-file-import/docker-compose.yml @@ -13,7 +13,6 @@ services: - .env environment: - TZ=${TZ:-UTC} - - ZEROBYTE_CONFIG_IMPORT=true volumes: - /etc/localtime:/etc/localtime:ro - /var/lib/zerobyte:/var/lib/zerobyte From 4889b852be87396252b137b1b88968b49a3b5738 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 10:38:25 +0100 Subject: [PATCH 28/30] fix: remove startup import type from README --- README.md | 2 +- examples/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1eb51ada..2a50def6 100644 --- a/README.md +++ b/README.md @@ -104,7 +104,7 @@ See [examples/README.md](examples/README.md) for runnable, copy/paste-friendly e ### Config file import (Infrastructure as Code) -If you want Zerobyte to create volumes, repositories, schedules, notification destinations, and an initial user from a JSON file on startup, check the following example: +If you want Zerobyte to create volumes, repositories, schedules, notification destinations, and an initial user from a JSON file, check the following example: - [examples/config-file-import/README.md](examples/config-file-import/README.md) diff --git a/examples/README.md b/examples/README.md index 0ae69c9a..97511a94 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,7 +11,7 @@ This folder contains runnable, copy/paste-friendly examples for running Zerobyte - [Bind-mount a local directory](directory-bind-mount/README.md) — back up a host folder by mounting it into the container. - [Mount an rclone config](rclone-config-mount/README.md) — use rclone-based repository backends by mounting your rclone config. - [Secret placeholders + Docker secrets](secrets-placeholders/README.md) — keep secrets out of the DB using `env://...` and `file://...` references. -- [Config file import (Infrastructure as Code)](config-file-import/README.md) — pre-configure volumes/repos/schedules/users on startup. +- [Config file import (Infrastructure as Code)](config-file-import/README.md) — pre-configure volumes/repos/schedules/users from json file. ### Advanced setups From 24f850b3b9ce944658e27802ca27f17d5c7039af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 11:54:26 +0100 Subject: [PATCH 29/30] refactor: move toError function to utils/errors for reuse across modules --- app/server/cli/commands/import-config.ts | 3 +-- app/server/modules/lifecycle/config-import.ts | 3 +-- app/server/utils/errors.ts | 2 ++ 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts index b3607f44..b336f0e5 100644 --- a/app/server/cli/commands/import-config.ts +++ b/app/server/cli/commands/import-config.ts @@ -1,8 +1,7 @@ import { Command } from "commander"; import path from "node:path"; import fs from "node:fs/promises"; - -const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); +import { toError } from "../../utils/errors"; type Output = ReturnType; diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 76822b17..08e18073 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -10,6 +10,7 @@ import { notificationDestinationsTable, } from "../../db/schema"; import { logger } from "../../utils/logger"; +import { toError } from "../../utils/errors"; import { volumeService } from "../volumes/volume.service"; import type { NotificationConfig } from "~/schemas/notifications"; import type { RepositoryConfig } from "~/schemas/restic"; @@ -17,8 +18,6 @@ import type { BackendConfig } from "~/schemas/volumes"; const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null; -const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); - const asStringArray = (value: unknown): string[] => { if (!Array.isArray(value)) return []; return value.filter((item): item is string => typeof item === "string"); diff --git a/app/server/utils/errors.ts b/app/server/utils/errors.ts index 22379a36..3ff14939 100644 --- a/app/server/utils/errors.ts +++ b/app/server/utils/errors.ts @@ -18,6 +18,8 @@ export const toMessage = (err: unknown): string => { return sanitizeSensitiveData(message); }; +export const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); + const resticErrorCodes: Record = { 1: "Command failed: An error occurred while executing the command.", 2: "Go runtime error: A runtime error occurred in the Go program.", From 8591432c5953d12972ea5b65b7d49e9517baa219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Tr=C3=A1vn=C3=ADk?= Date: Fri, 2 Jan 2026 14:02:06 +0100 Subject: [PATCH 30/30] refactor: add ArkType schema validation for config import - Add app/schemas/config-import.ts with ArkType schemas for upfront validation - Replace runtime type checks with schema-based validation in config-import.ts - Return validation errors early with detailed path and message info - Export retentionPolicySchema from backups.dto.ts for reuse - Update CLI to handle validation errors in both dry-run and import modes - Fix README mirrors examples to use existing repository names --- app/schemas/config-import.ts | 114 +++++++ app/server/cli/commands/import-config.ts | 57 +++- app/server/modules/backups/backups.dto.ts | 2 +- app/server/modules/lifecycle/config-import.ts | 315 +++++++----------- examples/config-file-import/README.md | 4 +- 5 files changed, 280 insertions(+), 212 deletions(-) create mode 100644 app/schemas/config-import.ts diff --git a/app/schemas/config-import.ts b/app/schemas/config-import.ts new file mode 100644 index 00000000..c162d6f5 --- /dev/null +++ b/app/schemas/config-import.ts @@ -0,0 +1,114 @@ +import { type } from "arktype"; +import { volumeConfigSchema } from "./volumes"; +import { repositoryConfigSchema } from "./restic"; +import { notificationConfigSchema } from "./notifications"; +import { retentionPolicySchema } from "../server/modules/backups/backups.dto"; + +/** + * ArkType schemas for validating config import JSON files. + * These provide runtime validation with detailed error messages. + */ + +// Short ID format: 8 character base64url string +const shortIdSchema = type(/^[A-Za-z0-9_-]{8}$/); + +// Volume entry schema for import +export const volumeImportSchema = type({ + name: "string>=1", + shortId: shortIdSchema.optional(), + autoRemount: "boolean?", + config: volumeConfigSchema, +}).onUndeclaredKey("delete"); + +// Repository entry schema for import +export const repositoryImportSchema = type({ + name: "string>=1", + shortId: shortIdSchema.optional(), + compressionMode: type("'auto' | 'off' | 'max'").optional(), + config: repositoryConfigSchema, +}).onUndeclaredKey("delete"); + +// Notification destination entry schema for import +export const notificationDestinationImportSchema = type({ + name: "string>=1", + enabled: "boolean?", + config: notificationConfigSchema, +}).onUndeclaredKey("delete"); + +// Schedule notification assignment (either string name or object with settings) +const scheduleNotificationObjectSchema = type({ + name: "string>=1", + notifyOnStart: "boolean?", + notifyOnSuccess: "boolean?", + notifyOnWarning: "boolean?", + notifyOnFailure: "boolean?", +}).onUndeclaredKey("delete"); + +export const scheduleNotificationAssignmentSchema = type("string>=1").or(scheduleNotificationObjectSchema); + +// Schedule mirror assignment +export const scheduleMirrorSchema = type({ + repository: "string>=1", + enabled: "boolean?", +}).onUndeclaredKey("delete"); + +// Array types for complex schemas +const scheduleNotificationsArray = scheduleNotificationAssignmentSchema.array(); +const scheduleMirrorsArray = scheduleMirrorSchema.array(); + +// Backup schedule entry schema for import +export const backupScheduleImportSchema = type({ + name: "string?", + shortId: shortIdSchema.optional(), + volume: "string>=1", + repository: "string>=1", + cronExpression: "string", + enabled: "boolean?", + retentionPolicy: retentionPolicySchema.or("null").optional(), + excludePatterns: "string[]?", + excludeIfPresent: "string[]?", + includePatterns: "string[]?", + oneFileSystem: "boolean?", + notifications: scheduleNotificationsArray.optional(), + mirrors: scheduleMirrorsArray.optional(), +}).onUndeclaredKey("delete"); + +// User entry schema for import +export const userImportSchema = type({ + username: "string>=1", + password: "(string>=1)?", + passwordHash: "(string>=1)?", + hasDownloadedResticPassword: "boolean?", +}).onUndeclaredKey("delete"); + +// Recovery key format: 64-character hex string +const recoveryKeySchema = type(/^[a-fA-F0-9]{64}$/); + +// Array types for root config +const volumesArray = volumeImportSchema.array(); +const repositoriesArray = repositoryImportSchema.array(); +const backupSchedulesArray = backupScheduleImportSchema.array(); +const notificationDestinationsArray = notificationDestinationImportSchema.array(); +const usersArray = userImportSchema.array(); + +// Root config schema +export const importConfigSchema = type({ + volumes: volumesArray.optional(), + repositories: repositoriesArray.optional(), + backupSchedules: backupSchedulesArray.optional(), + notificationDestinations: notificationDestinationsArray.optional(), + users: usersArray.optional(), + recoveryKey: recoveryKeySchema.optional(), +}).onUndeclaredKey("delete"); + +// Type exports +export type VolumeImport = typeof volumeImportSchema.infer; +export type RepositoryImport = typeof repositoryImportSchema.infer; +export type NotificationDestinationImport = typeof notificationDestinationImportSchema.infer; +export type BackupScheduleImport = typeof backupScheduleImportSchema.infer; +export type UserImport = typeof userImportSchema.infer; +export type ImportConfig = typeof importConfigSchema.infer; +export type ScheduleNotificationAssignment = typeof scheduleNotificationAssignmentSchema.infer; +export type ScheduleMirror = typeof scheduleMirrorSchema.infer; +// RetentionPolicy type is re-exported from backups.dto.ts +export type { RetentionPolicy } from "../server/modules/backups/backups.dto"; diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts index b336f0e5..6e02f729 100644 --- a/app/server/cli/commands/import-config.ts +++ b/app/server/cli/commands/import-config.ts @@ -104,27 +104,41 @@ export const importConfigCommand = new Command("import-config") } if (options.dryRun) { - const root = typeof config === "object" && config !== null ? config : {}; - const configObj = - "config" in root && typeof root.config === "object" && root.config !== null ? root.config : root; - - const sections = ["volumes", "repositories", "backupSchedules", "notificationDestinations", "users"]; - const counts: Record = {}; - for (const section of sections) { - const items = (configObj as Record)[section] || []; - counts[section] = Array.isArray(items) ? items.length : 0; + const { validateConfig } = await import("../../modules/lifecycle/config-import"); + const validation = validateConfig(config); + + if (!validation.success) { + if (jsonOutput) { + out.json({ dryRun: true, valid: false, validationErrors: validation.errors }); + } else { + console.log("🔍 Dry run mode - validating config\n"); + console.log("❌ Validation errors:"); + for (const error of validation.errors) { + console.log(` • ${error.path}: ${error.message}`); + } + } + process.exit(1); } - const hasRecoveryKey = !!(configObj as Record).recoveryKey; + + const { config: validConfig } = validation; + const counts = { + volumes: validConfig.volumes?.length ?? 0, + repositories: validConfig.repositories?.length ?? 0, + backupSchedules: validConfig.backupSchedules?.length ?? 0, + notificationDestinations: validConfig.notificationDestinations?.length ?? 0, + users: validConfig.users?.length ?? 0, + }; + const hasRecoveryKey = !!validConfig.recoveryKey; if (jsonOutput) { out.json({ dryRun: true, valid: true, counts, hasRecoveryKey }); } else { - console.log("🔍 Dry run mode - validating config only\n"); - for (const section of sections) { - console.log(` ${section}: ${counts[section]} item(s)`); + console.log("🔍 Dry run mode - validating config\n"); + for (const [section, count] of Object.entries(counts)) { + console.log(` ${section}: ${count} item(s)`); } console.log(` recoveryKey: ${hasRecoveryKey ? "provided" : "not provided"}`); - console.log("\n✅ Config is valid JSON"); + console.log("\n✅ Config is valid"); } return; } @@ -135,8 +149,21 @@ export const importConfigCommand = new Command("import-config") runDbMigrations(); const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); - const result = await applyConfigImport(config, { overwriteRecoveryKey: options.overwriteRecoveryKey }); + const importResult = await applyConfigImport(config, { overwriteRecoveryKey: options.overwriteRecoveryKey }); + + if (!importResult.success) { + if (jsonOutput) { + out.json({ success: false, validationErrors: importResult.validationErrors }); + } else { + console.log("❌ Validation errors:"); + for (const error of importResult.validationErrors) { + console.log(` • ${error.path}: ${error.message}`); + } + } + process.exit(1); + } + const { result } = importResult; out.json({ ...result, success: result.errors === 0 }); // Exit with error code if there were errors diff --git a/app/server/modules/backups/backups.dto.ts b/app/server/modules/backups/backups.dto.ts index fa21cfa5..6a7f9009 100644 --- a/app/server/modules/backups/backups.dto.ts +++ b/app/server/modules/backups/backups.dto.ts @@ -3,7 +3,7 @@ import { describeRoute, resolver } from "hono-openapi"; import { volumeSchema } from "../volumes/volume.dto"; import { repositorySchema } from "../repositories/repositories.dto"; -const retentionPolicySchema = type({ +export const retentionPolicySchema = type({ keepLast: "number?", keepHourly: "number?", keepDaily: "number?", diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts index 08e18073..0b450510 100644 --- a/app/server/modules/lifecycle/config-import.ts +++ b/app/server/modules/lifecycle/config-import.ts @@ -1,6 +1,7 @@ import { eq } from "drizzle-orm"; import fs from "node:fs/promises"; import slugify from "slugify"; +import { type } from "arktype"; import { db } from "../../db/db"; import { usersTable, @@ -15,33 +16,20 @@ import { volumeService } from "../volumes/volume.service"; import type { NotificationConfig } from "~/schemas/notifications"; import type { RepositoryConfig } from "~/schemas/restic"; import type { BackendConfig } from "~/schemas/volumes"; +import { + importConfigSchema, + type ImportConfig, + type VolumeImport, + type RepositoryImport, + type NotificationDestinationImport, + type BackupScheduleImport, + type UserImport, + type ScheduleNotificationAssignment as ScheduleNotificationImport, + type ScheduleMirror as ScheduleMirrorImport, +} from "~/schemas/config-import"; const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null; -const asStringArray = (value: unknown): string[] => { - if (!Array.isArray(value)) return []; - return value.filter((item): item is string => typeof item === "string"); -}; - -type RetentionPolicy = { - keepLast?: number; - keepHourly?: number; - keepDaily?: number; - keepWeekly?: number; - keepMonthly?: number; - keepYearly?: number; - keepWithinDuration?: string; -}; - -type ImportConfig = { - volumes: unknown[]; - repositories: unknown[]; - backupSchedules: unknown[]; - notificationDestinations: unknown[]; - users: unknown[]; - recoveryKey: string | null; -}; - export type ImportResult = { succeeded: number; skipped: number; @@ -68,25 +56,39 @@ function interpolateEnvVars(value: unknown): unknown { return value; } -function parseImportConfig(configRaw: unknown): ImportConfig { +export type ConfigValidationError = { + path: string; + message: string; +}; + +export type ParseConfigResult = + | { success: true; config: ImportConfig } + | { success: false; errors: ConfigValidationError[] }; + +/** + * Parse and validate import configuration using ArkType schema. + * Returns typed config on success or validation errors on failure. + */ +function parseImportConfig(configRaw: unknown): ParseConfigResult { + // Handle wrapped format: { config: { ... } } const root = isRecord(configRaw) ? configRaw : {}; - const config = isRecord(root.config) ? (root.config as Record) : root; - - const volumes = interpolateEnvVars(config.volumes || []); - const repositories = interpolateEnvVars(config.repositories || []); - const backupSchedules = interpolateEnvVars(config.backupSchedules || []); - const notificationDestinations = interpolateEnvVars(config.notificationDestinations || []); - const users = interpolateEnvVars(config.users || []); - const recoveryKeyRaw = interpolateEnvVars(config.recoveryKey || null); - - return { - volumes: Array.isArray(volumes) ? volumes : [], - repositories: Array.isArray(repositories) ? repositories : [], - backupSchedules: Array.isArray(backupSchedules) ? backupSchedules : [], - notificationDestinations: Array.isArray(notificationDestinations) ? notificationDestinations : [], - users: Array.isArray(users) ? users : [], - recoveryKey: typeof recoveryKeyRaw === "string" ? recoveryKeyRaw : null, - }; + const configData = isRecord(root.config) ? root.config : root; + + // Interpolate environment variables before validation + const interpolated = interpolateEnvVars(configData); + + // Validate against ArkType schema + const result = importConfigSchema(interpolated); + + if (result instanceof type.errors) { + const errors: ConfigValidationError[] = result.map((error) => ({ + path: error.path.join(".") || "(root)", + message: error.message, + })); + return { success: false, errors }; + } + + return { success: true, config: result }; } function mergeResults(target: ImportResult, source: ImportResult): void { @@ -118,7 +120,7 @@ async function isDatabaseEmpty(): Promise { } async function writeRecoveryKeyFromConfig( - recoveryKey: string | null, + recoveryKey: string | undefined, overwriteRecoveryKey: boolean, ): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; @@ -127,9 +129,6 @@ async function writeRecoveryKeyFromConfig( const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); if (!recoveryKey) return result; - if (typeof recoveryKey !== "string" || recoveryKey.length !== 64 || !/^[a-fA-F0-9]{64}$/.test(recoveryKey)) { - throw new Error("Recovery key must be a 64-character hex string"); - } const passFileExists = await fs.stat(RESTIC_PASS_FILE).then( () => true, () => false, @@ -175,7 +174,7 @@ async function writeRecoveryKeyFromConfig( return result; } -async function importVolumes(volumes: unknown[]): Promise { +async function importVolumes(volumes: VolumeImport[]): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; // Get existing volumes to check for duplicates @@ -184,10 +183,6 @@ async function importVolumes(volumes: unknown[]): Promise { for (const v of volumes) { try { - if (!isRecord(v) || typeof v.name !== "string" || !isRecord(v.config) || typeof v.config.backend !== "string") { - throw new Error("Invalid volume entry"); - } - // The service uses slugify to normalize the name, so we check against stored names const slugifiedName = slugify(v.name, { lower: true, strict: true }); if (existingNames.has(slugifiedName)) { @@ -197,8 +192,7 @@ async function importVolumes(volumes: unknown[]): Promise { } // Pass shortId from config if provided (for IaC reproducibility) - const shortId = typeof v.shortId === "string" ? v.shortId : undefined; - await volumeService.createVolume(v.name, v.config as BackendConfig, shortId); + await volumeService.createVolume(v.name, v.config as BackendConfig, v.shortId); logger.info(`Initialized volume from config: ${v.name}`); result.succeeded++; @@ -208,8 +202,7 @@ async function importVolumes(volumes: unknown[]): Promise { logger.info(`Set autoRemount=false for volume: ${v.name}`); } } catch (e) { - const volumeName = isRecord(v) && typeof v.name === "string" ? v.name : "unknown"; - logger.warn(`Volume '${volumeName}' not created: ${toError(e).message}`); + logger.warn(`Volume '${v.name}' not created: ${toError(e).message}`); result.warnings++; } } @@ -217,7 +210,7 @@ async function importVolumes(volumes: unknown[]): Promise { return result; } -async function importRepositories(repositories: unknown[]): Promise { +async function importRepositories(repositories: RepositoryImport[]): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; const repoServiceModule = await import("../repositories/repositories.service"); const { buildRepoUrl, restic } = await import("../../utils/restic"); @@ -239,10 +232,6 @@ async function importRepositories(repositories: unknown[]): Promise { +async function importNotificationDestinations( + notificationDestinations: NotificationDestinationImport[], +): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; const notificationsServiceModule = await import("../notifications/notifications.service"); @@ -318,10 +302,6 @@ async function importNotificationDestinations(notificationDestinations: unknown[ for (const n of notificationDestinations) { try { - if (!isRecord(n) || typeof n.name !== "string" || !isRecord(n.config) || typeof n.config.type !== "string") { - throw new Error("Invalid notification destination entry"); - } - // The service uses slugify to normalize the name, so we check against stored names const slugifiedName = slugify(n.name, { lower: true, strict: true }); if (existingNames.has(slugifiedName)) { @@ -343,8 +323,7 @@ async function importNotificationDestinations(notificationDestinations: unknown[ logger.info(`Set enabled=false for notification destination: ${n.name}`); } } catch (e) { - const destName = isRecord(n) && typeof n.name === "string" ? n.name : "unknown"; - logger.warn(`Notification destination '${destName}' not created: ${toError(e).message}`); + logger.warn(`Notification destination '${n.name}' not created: ${toError(e).message}`); result.warnings++; } } @@ -352,22 +331,6 @@ async function importNotificationDestinations(notificationDestinations: unknown[ return result; } -function getScheduleVolumeName(schedule: Record): string | null { - return typeof schedule.volume === "string" - ? schedule.volume - : typeof schedule.volumeName === "string" - ? schedule.volumeName - : null; -} - -function getScheduleRepositoryName(schedule: Record): string | null { - return typeof schedule.repository === "string" - ? schedule.repository - : typeof schedule.repositoryName === "string" - ? schedule.repositoryName - : null; -} - type ScheduleNotificationAssignment = { destinationId: number; destinationName: string; @@ -379,19 +342,15 @@ type ScheduleNotificationAssignment = { function buildScheduleNotificationAssignments( scheduleName: string, - notifications: unknown[], + notifications: ScheduleNotificationImport[], destinationBySlug: Map, ): { assignments: ScheduleNotificationAssignment[]; warnings: number } { const assignments: ScheduleNotificationAssignment[] = []; let warnings = 0; for (const notif of notifications) { - const destName = typeof notif === "string" ? notif : isRecord(notif) ? notif.name : null; - if (typeof destName !== "string" || destName.length === 0) { - logger.warn(`Notification destination missing name for schedule '${scheduleName}'`); - warnings++; - continue; - } + // Handle both string (name only) and object (with settings) formats + const destName = typeof notif === "string" ? notif : notif.name; const destSlug = slugify(destName, { lower: true, strict: true }); const dest = destinationBySlug.get(destSlug); if (!dest) { @@ -402,10 +361,10 @@ function buildScheduleNotificationAssignments( assignments.push({ destinationId: dest.id, destinationName: dest.name, - notifyOnStart: isRecord(notif) && typeof notif.notifyOnStart === "boolean" ? notif.notifyOnStart : true, - notifyOnSuccess: isRecord(notif) && typeof notif.notifyOnSuccess === "boolean" ? notif.notifyOnSuccess : true, - notifyOnWarning: isRecord(notif) && typeof notif.notifyOnWarning === "boolean" ? notif.notifyOnWarning : true, - notifyOnFailure: isRecord(notif) && typeof notif.notifyOnFailure === "boolean" ? notif.notifyOnFailure : true, + notifyOnStart: typeof notif === "object" && notif.notifyOnStart !== undefined ? notif.notifyOnStart : true, + notifyOnSuccess: typeof notif === "object" && notif.notifyOnSuccess !== undefined ? notif.notifyOnSuccess : true, + notifyOnWarning: typeof notif === "object" && notif.notifyOnWarning !== undefined ? notif.notifyOnWarning : true, + notifyOnFailure: typeof notif === "object" && notif.notifyOnFailure !== undefined ? notif.notifyOnFailure : true, }); } @@ -415,7 +374,7 @@ function buildScheduleNotificationAssignments( async function attachScheduleNotifications( scheduleId: number, scheduleName: string, - notifications: unknown[], + notifications: ScheduleNotificationImport[], destinationBySlug: Map, notificationsServiceModule: typeof import("../notifications/notifications.service"), ): Promise { @@ -467,9 +426,9 @@ async function attachScheduleNotifications( return result; } -async function importBackupSchedules(backupSchedules: unknown[]): Promise { +async function importBackupSchedules(backupSchedules: BackupScheduleImport[]): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; - if (!Array.isArray(backupSchedules) || backupSchedules.length === 0) return result; + if (backupSchedules.length === 0) return result; const backupServiceModule = await import("../backups/backups.service"); const notificationsServiceModule = await import("../notifications/notifications.service"); @@ -485,44 +444,23 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise [s.name, s] as const)); for (const s of backupSchedules) { - if (!isRecord(s)) { - continue; - } - const volumeName = getScheduleVolumeName(s); - if (typeof volumeName !== "string" || volumeName.length === 0) { - logger.warn("Backup schedule not processed: Missing volume name"); - result.warnings++; - continue; - } - // Volume names are stored slugified - const volumeSlug = slugify(volumeName, { lower: true, strict: true }); + const volumeSlug = slugify(s.volume, { lower: true, strict: true }); const volume = volumeByName.get(volumeSlug); if (!volume) { - logger.warn(`Backup schedule not processed: Volume '${volumeName}' not found`); + logger.warn(`Backup schedule not processed: Volume '${s.volume}' not found`); result.warnings++; continue; } - const repositoryName = getScheduleRepositoryName(s); - if (typeof repositoryName !== "string" || repositoryName.length === 0) { - logger.warn("Backup schedule not processed: Missing repository name"); - result.warnings++; - continue; - } // Repository names are stored trimmed - const repository = repoByName.get(repositoryName.trim()); + const repository = repoByName.get(s.repository.trim()); if (!repository) { - logger.warn(`Backup schedule not processed: Repository '${repositoryName}' not found`); + logger.warn(`Backup schedule not processed: Repository '${s.repository}' not found`); result.warnings++; continue; } - const scheduleName = typeof s.name === "string" && s.name.length > 0 ? s.name : `${volumeName}-${repositoryName}`; - if (typeof s.cronExpression !== "string" || s.cronExpression.length === 0) { - logger.warn(`Backup schedule not processed: Missing cronExpression for '${scheduleName}'`); - result.warnings++; - continue; - } + const scheduleName = s.name && s.name.length > 0 ? s.name : `${s.volume}-${s.repository}`; // Check if schedule already exists - if so, skip creation but still try attachments const existingSchedule = scheduleByName.get(scheduleName); @@ -547,23 +485,20 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise undefined + excludePatterns: s.excludePatterns ?? [], + excludeIfPresent: s.excludeIfPresent ?? [], + includePatterns: s.includePatterns ?? [], + oneFileSystem: s.oneFileSystem, }, - providedShortId, + s.shortId, ); logger.info(`Initialized backup schedule from config: ${scheduleName}`); result.succeeded++; @@ -606,7 +541,7 @@ async function importBackupSchedules(backupSchedules: unknown[]): Promise, backupServiceModule: typeof import("../backups/backups.service"), ): Promise { @@ -622,34 +557,18 @@ async function attachScheduleMirrors( }> = []; for (const m of mirrors) { - if (!isRecord(m)) continue; - - // Support both repository name (string) and repository object with name - const repoName = - typeof m.repository === "string" - ? m.repository - : typeof m.repositoryName === "string" - ? m.repositoryName - : null; - - if (!repoName) { - logger.warn(`Mirror missing repository name for schedule '${scheduleName}'`); - result.warnings++; - continue; - } - - // Repository names are stored trimmed - const repo = repoByName.get(repoName.trim()); + // Schema ensures repository is a non-empty string + const repo = repoByName.get(m.repository.trim()); if (!repo) { - logger.warn(`Mirror repository '${repoName}' not found for schedule '${scheduleName}'`); + logger.warn(`Mirror repository '${m.repository}' not found for schedule '${scheduleName}'`); result.warnings++; continue; } mirrorConfigs.push({ repositoryId: repo.id, - repositoryName: repo.name, - enabled: typeof m.enabled === "boolean" ? m.enabled : true, + repositoryName: m.repository, + enabled: m.enabled ?? true, }); } @@ -685,20 +604,20 @@ async function attachScheduleMirrors( return result; } -async function importUsers(users: unknown[], recoveryKey: string | null): Promise { +async function importUsers(users: UserImport[], recoveryKey: string | undefined): Promise { const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; try { const { authService } = await import("../auth/auth.service"); const hasUsers = await authService.hasUsers(); if (hasUsers) { - if (Array.isArray(users) && users.length > 0) { + if (users.length > 0) { logger.info("Users already exist; skipping user import from config"); result.skipped++; } return result; } - if (!Array.isArray(users) || users.length === 0) return result; + if (users.length === 0) return result; if (users.length > 1) { logger.warn( @@ -708,24 +627,12 @@ async function importUsers(users: unknown[], recoveryKey: string | null): Promis } for (const u of users) { - if (!isRecord(u)) { - logger.warn("Invalid user entry in config; skipping"); - result.warnings++; - continue; - } - if (typeof u.username !== "string" || u.username.length === 0) { - logger.warn("User entry missing username; skipping"); - result.warnings++; - continue; - } - - if (typeof u.passwordHash === "string" && u.passwordHash.length > 0) { + if (u.passwordHash) { try { await db.insert(usersTable).values({ username: u.username, passwordHash: u.passwordHash, - hasDownloadedResticPassword: - typeof u.hasDownloadedResticPassword === "boolean" ? u.hasDownloadedResticPassword : Boolean(recoveryKey), + hasDownloadedResticPassword: u.hasDownloadedResticPassword ?? Boolean(recoveryKey), }); logger.info(`User '${u.username}' imported with password hash from config.`); result.succeeded++; @@ -738,11 +645,10 @@ async function importUsers(users: unknown[], recoveryKey: string | null): Promis continue; } - if (typeof u.password === "string" && u.password.length > 0) { + if (u.password) { try { const { user } = await authService.register(u.username, u.password); - const hasDownloadedResticPassword = - typeof u.hasDownloadedResticPassword === "boolean" ? u.hasDownloadedResticPassword : Boolean(recoveryKey); + const hasDownloadedResticPassword = u.hasDownloadedResticPassword ?? Boolean(recoveryKey); if (hasDownloadedResticPassword) { await db.update(usersTable).set({ hasDownloadedResticPassword }).where(eq(usersTable.id, user.id)); } @@ -783,11 +689,11 @@ async function runImport(config: ImportConfig, options: ImportOptions = {}): Pro return result; } - mergeResults(result, await importVolumes(config.volumes)); - mergeResults(result, await importRepositories(config.repositories)); - mergeResults(result, await importNotificationDestinations(config.notificationDestinations)); - mergeResults(result, await importBackupSchedules(config.backupSchedules)); - mergeResults(result, await importUsers(config.users, config.recoveryKey)); + mergeResults(result, await importVolumes(config.volumes ?? [])); + mergeResults(result, await importRepositories(config.repositories ?? [])); + mergeResults(result, await importNotificationDestinations(config.notificationDestinations ?? [])); + mergeResults(result, await importBackupSchedules(config.backupSchedules ?? [])); + mergeResults(result, await importUsers(config.users ?? [], config.recoveryKey)); return result; } @@ -809,13 +715,34 @@ function logImportSummary(result: ImportResult): void { } } +export type ApplyConfigResult = + | { success: true; result: ImportResult } + | { success: false; validationErrors: ConfigValidationError[] }; + /** * Import configuration from a raw config object (used by CLI) + * Returns validation errors upfront if the config doesn't match the schema. */ -export async function applyConfigImport(configRaw: unknown, options: ImportOptions = {}): Promise { +export async function applyConfigImport(configRaw: unknown, options: ImportOptions = {}): Promise { logger.info("Starting config import..."); - const config = parseImportConfig(configRaw); - const result = await runImport(config, options); + + const parseResult = parseImportConfig(configRaw); + if (!parseResult.success) { + for (const error of parseResult.errors) { + logger.error(`Validation error at ${error.path}: ${error.message}`); + } + return { success: false, validationErrors: parseResult.errors }; + } + + const result = await runImport(parseResult.config, options); logImportSummary(result); - return result; + return { success: true, result }; +} + +/** + * Validate configuration without importing (used by CLI --dry-run) + * Returns validation errors if the config doesn't match the schema. + */ +export function validateConfig(configRaw: unknown): ParseConfigResult { + return parseImportConfig(configRaw); } diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md index 978b7a82..497d9c5b 100644 --- a/examples/config-file-import/README.md +++ b/examples/config-file-import/README.md @@ -416,7 +416,7 @@ Note: The `path` must point directly to the restic repository root (the director "notifications": ["slack-alerts", "email-admin"], "mirrors": [ { "repository": "s3-repo" }, - { "repository": "lo2" } + { "repository": "azure-repo" } ] } ``` @@ -459,7 +459,7 @@ Each mirror references a repository by name: ```json "mirrors": [ { "repository": "s3-repo" }, - { "repository": "lo2", "enabled": false } + { "repository": "azure-repo", "enabled": false } ] ```