diff --git a/README.md b/README.md index 2b91dfb8..4eeaf55e 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,12 @@ If you need remote mount capabilities, keep the original configuration with `cap See [examples/README.md](examples/README.md) for runnable, copy/paste-friendly examples. +### Config file import (Infrastructure as Code) + +If you want Zerobyte to create volumes, repositories, schedules, notification destinations, and an initial user from a JSON file, check the following example: + +- [examples/config-file-import/README.md](examples/config-file-import/README.md) + ## Adding your first volume Zerobyte supports multiple volume backends including NFS, SMB, WebDAV, SFTP, and local directories. A volume represents the source data you want to back up and monitor. diff --git a/app/schemas/config-import.ts b/app/schemas/config-import.ts new file mode 100644 index 00000000..c162d6f5 --- /dev/null +++ b/app/schemas/config-import.ts @@ -0,0 +1,114 @@ +import { type } from "arktype"; +import { volumeConfigSchema } from "./volumes"; +import { repositoryConfigSchema } from "./restic"; +import { notificationConfigSchema } from "./notifications"; +import { retentionPolicySchema } from "../server/modules/backups/backups.dto"; + +/** + * ArkType schemas for validating config import JSON files. + * These provide runtime validation with detailed error messages. + */ + +// Short ID format: 8 character base64url string +const shortIdSchema = type(/^[A-Za-z0-9_-]{8}$/); + +// Volume entry schema for import +export const volumeImportSchema = type({ + name: "string>=1", + shortId: shortIdSchema.optional(), + autoRemount: "boolean?", + config: volumeConfigSchema, +}).onUndeclaredKey("delete"); + +// Repository entry schema for import +export const repositoryImportSchema = type({ + name: "string>=1", + shortId: shortIdSchema.optional(), + compressionMode: type("'auto' | 'off' | 'max'").optional(), + config: repositoryConfigSchema, +}).onUndeclaredKey("delete"); + +// Notification destination entry schema for import +export const notificationDestinationImportSchema = type({ + name: "string>=1", + enabled: "boolean?", + config: notificationConfigSchema, +}).onUndeclaredKey("delete"); + +// Schedule notification assignment (either string name or object with settings) +const scheduleNotificationObjectSchema = type({ + name: "string>=1", + notifyOnStart: "boolean?", + notifyOnSuccess: "boolean?", + notifyOnWarning: "boolean?", + notifyOnFailure: "boolean?", +}).onUndeclaredKey("delete"); + +export const scheduleNotificationAssignmentSchema = type("string>=1").or(scheduleNotificationObjectSchema); + +// Schedule mirror assignment +export const scheduleMirrorSchema = type({ + repository: "string>=1", + enabled: "boolean?", +}).onUndeclaredKey("delete"); + +// Array types for complex schemas +const scheduleNotificationsArray = scheduleNotificationAssignmentSchema.array(); +const scheduleMirrorsArray = scheduleMirrorSchema.array(); + +// Backup schedule entry schema for import +export const backupScheduleImportSchema = type({ + name: "string?", + shortId: shortIdSchema.optional(), + volume: "string>=1", + repository: "string>=1", + cronExpression: "string", + enabled: "boolean?", + retentionPolicy: retentionPolicySchema.or("null").optional(), + excludePatterns: "string[]?", + excludeIfPresent: "string[]?", + includePatterns: "string[]?", + oneFileSystem: "boolean?", + notifications: scheduleNotificationsArray.optional(), + mirrors: scheduleMirrorsArray.optional(), +}).onUndeclaredKey("delete"); + +// User entry schema for import +export const userImportSchema = type({ + username: "string>=1", + password: "(string>=1)?", + passwordHash: "(string>=1)?", + hasDownloadedResticPassword: "boolean?", +}).onUndeclaredKey("delete"); + +// Recovery key format: 64-character hex string +const recoveryKeySchema = type(/^[a-fA-F0-9]{64}$/); + +// Array types for root config +const volumesArray = volumeImportSchema.array(); +const repositoriesArray = repositoryImportSchema.array(); +const backupSchedulesArray = backupScheduleImportSchema.array(); +const notificationDestinationsArray = notificationDestinationImportSchema.array(); +const usersArray = userImportSchema.array(); + +// Root config schema +export const importConfigSchema = type({ + volumes: volumesArray.optional(), + repositories: repositoriesArray.optional(), + backupSchedules: backupSchedulesArray.optional(), + notificationDestinations: notificationDestinationsArray.optional(), + users: usersArray.optional(), + recoveryKey: recoveryKeySchema.optional(), +}).onUndeclaredKey("delete"); + +// Type exports +export type VolumeImport = typeof volumeImportSchema.infer; +export type RepositoryImport = typeof repositoryImportSchema.infer; +export type NotificationDestinationImport = typeof notificationDestinationImportSchema.infer; +export type BackupScheduleImport = typeof backupScheduleImportSchema.infer; +export type UserImport = typeof userImportSchema.infer; +export type ImportConfig = typeof importConfigSchema.infer; +export type ScheduleNotificationAssignment = typeof scheduleNotificationAssignmentSchema.infer; +export type ScheduleMirror = typeof scheduleMirrorSchema.infer; +// RetentionPolicy type is re-exported from backups.dto.ts +export type { RetentionPolicy } from "../server/modules/backups/backups.dto"; diff --git a/app/server/cli/commands/import-config.ts b/app/server/cli/commands/import-config.ts new file mode 100644 index 00000000..6e02f729 --- /dev/null +++ b/app/server/cli/commands/import-config.ts @@ -0,0 +1,176 @@ +import { Command } from "commander"; +import path from "node:path"; +import fs from "node:fs/promises"; +import { toError } from "../../utils/errors"; + +type Output = ReturnType; + +async function readStdin(): Promise { + const chunks: Buffer[] = []; + for await (const chunk of process.stdin) { + chunks.push(chunk); + } + return Buffer.concat(chunks).toString("utf-8"); +} + +function createOutput(jsonOutput: boolean) { + return { + error: (message: string): never => { + if (jsonOutput) { + console.log(JSON.stringify({ error: message })); + } else { + console.error(`āŒ ${message}`); + } + process.exit(1); + }, + info: (message: string): void => { + if (!jsonOutput) { + console.log(message); + } + }, + json: (data: object): void => { + if (jsonOutput) { + console.log(JSON.stringify(data)); + } + }, + }; +} + +async function readConfigJson(options: { stdin?: boolean; config?: string }, out: Output): Promise { + if (options.stdin) { + out.info("šŸ“„ Reading config from stdin..."); + try { + const configJson = await readStdin(); + if (!configJson.trim()) { + out.error("No input received from stdin"); + } + return configJson; + } catch (e) { + out.error(`Failed to read stdin: ${toError(e).message}`); + } + } + + const configPath = path.resolve(process.cwd(), options.config ?? ""); + try { + await fs.access(configPath); + } catch { + out.error(`Config file not found: ${configPath}`); + } + + out.info(`šŸ“„ Config file: ${configPath}`); + return fs.readFile(configPath, "utf-8"); +} + +export const importConfigCommand = new Command("import-config") + .description("Import configuration from a JSON file or stdin") + .option("-c, --config ", "Path to the configuration file") + .option("--stdin", "Read configuration from stdin") + .option("--dry-run", "Validate the config without importing") + .option("--json", "Output results in JSON format") + .option("--log-level ", "Set log level (debug, info, warn, error)") + .option("--overwrite-recovery-key", "Overwrite existing recovery key (only allowed if database is empty)") + .action(async (options) => { + const jsonOutput = options.json; + const out = createOutput(jsonOutput); + + // Set log level: explicit option takes precedence + if (options.logLevel) { + process.env.LOG_LEVEL = options.logLevel; + } + + out.info("\nšŸ“¦ Zerobyte Config Import\n"); + + if (!options.config && !options.stdin) { + if (!jsonOutput) { + console.log("\nUsage:"); + console.log(" zerobyte import-config --config /path/to/config.json"); + console.log(" cat config.json | zerobyte import-config --stdin"); + } + out.error("Either --config or --stdin is required"); + } + + if (options.config && options.stdin) { + out.error("Cannot use both --config and --stdin"); + } + + const configJson = await readConfigJson(options, out); + + // Parse and validate JSON + let config: unknown; + try { + config = JSON.parse(configJson); + } catch (e) { + out.error(`Invalid JSON: ${toError(e).message}`); + } + + if (options.dryRun) { + const { validateConfig } = await import("../../modules/lifecycle/config-import"); + const validation = validateConfig(config); + + if (!validation.success) { + if (jsonOutput) { + out.json({ dryRun: true, valid: false, validationErrors: validation.errors }); + } else { + console.log("šŸ” Dry run mode - validating config\n"); + console.log("āŒ Validation errors:"); + for (const error of validation.errors) { + console.log(` • ${error.path}: ${error.message}`); + } + } + process.exit(1); + } + + const { config: validConfig } = validation; + const counts = { + volumes: validConfig.volumes?.length ?? 0, + repositories: validConfig.repositories?.length ?? 0, + backupSchedules: validConfig.backupSchedules?.length ?? 0, + notificationDestinations: validConfig.notificationDestinations?.length ?? 0, + users: validConfig.users?.length ?? 0, + }; + const hasRecoveryKey = !!validConfig.recoveryKey; + + if (jsonOutput) { + out.json({ dryRun: true, valid: true, counts, hasRecoveryKey }); + } else { + console.log("šŸ” Dry run mode - validating config\n"); + for (const [section, count] of Object.entries(counts)) { + console.log(` ${section}: ${count} item(s)`); + } + console.log(` recoveryKey: ${hasRecoveryKey ? "provided" : "not provided"}`); + console.log("\nāœ… Config is valid"); + } + return; + } + + try { + // Ensure database is initialized with migrations + const { runDbMigrations } = await import("../../db/db"); + runDbMigrations(); + + const { applyConfigImport } = await import("../../modules/lifecycle/config-import"); + const importResult = await applyConfigImport(config, { overwriteRecoveryKey: options.overwriteRecoveryKey }); + + if (!importResult.success) { + if (jsonOutput) { + out.json({ success: false, validationErrors: importResult.validationErrors }); + } else { + console.log("āŒ Validation errors:"); + for (const error of importResult.validationErrors) { + console.log(` • ${error.path}: ${error.message}`); + } + } + process.exit(1); + } + + const { result } = importResult; + out.json({ ...result, success: result.errors === 0 }); + + // Exit with error code if there were errors + if (result.errors > 0) { + process.exit(1); + } + } catch (e) { + out.error(`Import failed: ${toError(e).message}`); + } + }); diff --git a/app/server/cli/index.ts b/app/server/cli/index.ts index 9a4be586..6e05e107 100644 --- a/app/server/cli/index.ts +++ b/app/server/cli/index.ts @@ -1,9 +1,11 @@ import { Command } from "commander"; +import { importConfigCommand } from "./commands/import-config"; import { resetPasswordCommand } from "./commands/reset-password"; const program = new Command(); program.name("zerobyte").description("Zerobyte CLI - Backup automation tool built on top of Restic").version("1.0.0"); +program.addCommand(importConfigCommand); program.addCommand(resetPasswordCommand); export async function runCLI(argv: string[]): Promise { diff --git a/app/server/modules/auth/auth.service.ts b/app/server/modules/auth/auth.service.ts index 4a97c0df..19cadff3 100644 --- a/app/server/modules/auth/auth.service.ts +++ b/app/server/modules/auth/auth.service.ts @@ -13,7 +13,7 @@ export class AuthService { const [existingUser] = await db.select().from(usersTable); if (existingUser) { - throw new Error("Admin user already exists"); + throw new Error("A user already exists"); } const passwordHash = await Bun.password.hash(password, { diff --git a/app/server/modules/backups/backups.dto.ts b/app/server/modules/backups/backups.dto.ts index fa21cfa5..6a7f9009 100644 --- a/app/server/modules/backups/backups.dto.ts +++ b/app/server/modules/backups/backups.dto.ts @@ -3,7 +3,7 @@ import { describeRoute, resolver } from "hono-openapi"; import { volumeSchema } from "../volumes/volume.dto"; import { repositorySchema } from "../repositories/repositories.dto"; -const retentionPolicySchema = type({ +export const retentionPolicySchema = type({ keepLast: "number?", keepHourly: "number?", keepDaily: "number?", diff --git a/app/server/modules/backups/backups.service.ts b/app/server/modules/backups/backups.service.ts index 6c29f739..6f88ddf6 100644 --- a/app/server/modules/backups/backups.service.ts +++ b/app/server/modules/backups/backups.service.ts @@ -15,7 +15,7 @@ import { notificationsService } from "../notifications/notifications.service"; import { repoMutex } from "../../core/repository-mutex"; import { checkMirrorCompatibility, getIncompatibleMirrorError } from "~/server/utils/backend-compatibility"; import path from "node:path"; -import { generateShortId } from "~/server/utils/id"; +import { generateShortId, isValidShortId } from "~/server/utils/id"; const runningBackups = new Map(); @@ -83,7 +83,7 @@ const getSchedule = async (scheduleId: number) => { return schedule; }; -const createSchedule = async (data: CreateBackupScheduleBody) => { +const createSchedule = async (data: CreateBackupScheduleBody, providedShortId?: string) => { if (!cron.validate(data.cronExpression)) { throw new BadRequestError("Invalid cron expression"); } @@ -96,6 +96,25 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { throw new ConflictError("A backup schedule with this name already exists"); } + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.backupSchedulesTable.findFirst({ + where: eq(backupSchedulesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError( + `Schedule shortId '${providedShortId}' is already in use by schedule '${shortIdInUse.name}'`, + ); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } + const volume = await db.query.volumesTable.findFirst({ where: eq(volumesTable.id, data.volumeId), }); @@ -128,7 +147,7 @@ const createSchedule = async (data: CreateBackupScheduleBody) => { includePatterns: data.includePatterns ?? [], oneFileSystem: data.oneFileSystem, nextBackupAt: nextBackupAt, - shortId: generateShortId(), + shortId, }) .returning(); diff --git a/app/server/modules/lifecycle/config-import.ts b/app/server/modules/lifecycle/config-import.ts new file mode 100644 index 00000000..0b450510 --- /dev/null +++ b/app/server/modules/lifecycle/config-import.ts @@ -0,0 +1,748 @@ +import { eq } from "drizzle-orm"; +import fs from "node:fs/promises"; +import slugify from "slugify"; +import { type } from "arktype"; +import { db } from "../../db/db"; +import { + usersTable, + volumesTable, + repositoriesTable, + backupSchedulesTable, + notificationDestinationsTable, +} from "../../db/schema"; +import { logger } from "../../utils/logger"; +import { toError } from "../../utils/errors"; +import { volumeService } from "../volumes/volume.service"; +import type { NotificationConfig } from "~/schemas/notifications"; +import type { RepositoryConfig } from "~/schemas/restic"; +import type { BackendConfig } from "~/schemas/volumes"; +import { + importConfigSchema, + type ImportConfig, + type VolumeImport, + type RepositoryImport, + type NotificationDestinationImport, + type BackupScheduleImport, + type UserImport, + type ScheduleNotificationAssignment as ScheduleNotificationImport, + type ScheduleMirror as ScheduleMirrorImport, +} from "~/schemas/config-import"; + +const isRecord = (value: unknown): value is Record => typeof value === "object" && value !== null; + +export type ImportResult = { + succeeded: number; + skipped: number; + warnings: number; + errors: number; +}; + +function interpolateEnvVars(value: unknown): unknown { + if (typeof value === "string") { + return value.replace(/\$\{([^}]+)\}/g, (_, v) => { + if (process.env[v] === undefined) { + logger.warn(`Environment variable '${v}' is not defined. Replacing with empty string.`); + return ""; + } + return process.env[v]; + }); + } + if (Array.isArray(value)) { + return value.map(interpolateEnvVars); + } + if (value && typeof value === "object") { + return Object.fromEntries(Object.entries(value).map(([k, v]) => [k, interpolateEnvVars(v)])); + } + return value; +} + +export type ConfigValidationError = { + path: string; + message: string; +}; + +export type ParseConfigResult = + | { success: true; config: ImportConfig } + | { success: false; errors: ConfigValidationError[] }; + +/** + * Parse and validate import configuration using ArkType schema. + * Returns typed config on success or validation errors on failure. + */ +function parseImportConfig(configRaw: unknown): ParseConfigResult { + // Handle wrapped format: { config: { ... } } + const root = isRecord(configRaw) ? configRaw : {}; + const configData = isRecord(root.config) ? root.config : root; + + // Interpolate environment variables before validation + const interpolated = interpolateEnvVars(configData); + + // Validate against ArkType schema + const result = importConfigSchema(interpolated); + + if (result instanceof type.errors) { + const errors: ConfigValidationError[] = result.map((error) => ({ + path: error.path.join(".") || "(root)", + message: error.message, + })); + return { success: false, errors }; + } + + return { success: true, config: result }; +} + +function mergeResults(target: ImportResult, source: ImportResult): void { + target.succeeded += source.succeeded; + target.skipped += source.skipped; + target.warnings += source.warnings; + target.errors += source.errors; +} + +/** + * Check if the database has any records in the main tables. + * Used to prevent recovery key overwrite when data already exists. + */ +async function isDatabaseEmpty(): Promise { + const [volumes, repositories, schedules, notifications, users] = await Promise.all([ + db.select({ id: volumesTable.id }).from(volumesTable).limit(1), + db.select({ id: repositoriesTable.id }).from(repositoriesTable).limit(1), + db.select({ id: backupSchedulesTable.id }).from(backupSchedulesTable).limit(1), + db.select({ id: notificationDestinationsTable.id }).from(notificationDestinationsTable).limit(1), + db.select({ id: usersTable.id }).from(usersTable).limit(1), + ]); + return ( + volumes.length === 0 && + repositories.length === 0 && + schedules.length === 0 && + notifications.length === 0 && + users.length === 0 + ); +} + +async function writeRecoveryKeyFromConfig( + recoveryKey: string | undefined, + overwriteRecoveryKey: boolean, +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + try { + const { RESTIC_PASS_FILE } = await import("../../core/constants.js"); + if (!recoveryKey) return result; + + const passFileExists = await fs.stat(RESTIC_PASS_FILE).then( + () => true, + () => false, + ); + if (passFileExists) { + // Check if existing key matches the one being imported + const existingKey = await fs.readFile(RESTIC_PASS_FILE, "utf-8"); + if (existingKey.trim() === recoveryKey) { + logger.info("Recovery key already configured with matching value"); + result.skipped++; + return result; + } + + // Key exists with different value - check if overwrite is allowed + if (!overwriteRecoveryKey) { + logger.error("Recovery key already exists with different value; use --overwrite-recovery-key to replace"); + result.errors++; + return result; + } + + // Overwrite requested - verify database is empty for safety + const dbEmpty = await isDatabaseEmpty(); + if (!dbEmpty) { + logger.error( + "Cannot overwrite recovery key: database contains existing records. " + + "Overwriting the recovery key would make existing backups unrecoverable.", + ); + result.errors++; + return result; + } + + // Safe to overwrite - database is empty + logger.warn("Overwriting existing recovery key (database is empty)"); + } + await fs.writeFile(RESTIC_PASS_FILE, recoveryKey, { mode: 0o600 }); + logger.info(`Recovery key written from config to ${RESTIC_PASS_FILE}`); + result.succeeded++; + } catch (err) { + logger.error(`Failed to write recovery key from config: ${toError(err).message}`); + result.errors++; + } + + return result; +} + +async function importVolumes(volumes: VolumeImport[]): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + // Get existing volumes to check for duplicates + const existingVolumes = await volumeService.listVolumes(); + const existingNames = new Set(existingVolumes.map((v) => v.name)); + + for (const v of volumes) { + try { + // The service uses slugify to normalize the name, so we check against stored names + const slugifiedName = slugify(v.name, { lower: true, strict: true }); + if (existingNames.has(slugifiedName)) { + logger.info(`Volume '${v.name}' already exists`); + result.skipped++; + continue; + } + + // Pass shortId from config if provided (for IaC reproducibility) + await volumeService.createVolume(v.name, v.config as BackendConfig, v.shortId); + logger.info(`Initialized volume from config: ${v.name}`); + result.succeeded++; + + // If autoRemount is explicitly false, update the volume (default is true) + if (v.autoRemount === false) { + await volumeService.updateVolume(v.name, { autoRemount: false }); + logger.info(`Set autoRemount=false for volume: ${v.name}`); + } + } catch (e) { + logger.warn(`Volume '${v.name}' not created: ${toError(e).message}`); + result.warnings++; + } + } + + return result; +} + +async function importRepositories(repositories: RepositoryImport[]): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + const repoServiceModule = await import("../repositories/repositories.service"); + const { buildRepoUrl, restic } = await import("../../utils/restic"); + + // Get existing repositories and build sets for duplicate detection + const existingRepos = await repoServiceModule.repositoriesService.listRepositories(); + const existingNames = new Set(existingRepos.map((repo) => repo.name)); + const existingUrls = new Set(); + + for (const repo of existingRepos) { + try { + // Config fields used for URL (path, bucket, endpoint, etc.) are not encrypted + const url = buildRepoUrl(repo.config as RepositoryConfig); + existingUrls.add(url); + } catch (e) { + logger.warn(`Could not build URL for existing repository '${repo.name}': ${toError(e).message}`); + } + } + + for (const r of repositories) { + try { + // Skip if a repository pointing to the same location is already registered in DB + try { + const incomingUrl = buildRepoUrl(r.config as RepositoryConfig); + if (existingUrls.has(incomingUrl)) { + logger.info(`Repository '${r.name}': another repository already registered for location ${incomingUrl}`); + result.skipped++; + continue; + } + } catch (e) { + logger.warn(`Could not build URL for '${r.name}' to check duplicates: ${toError(e).message}`); + } + + // For repos without isExistingRepository, check if the location is already a restic repo + // This catches the case where user forgot to set isExistingRepository: true + if (!r.config.isExistingRepository) { + const isAlreadyRepo = await restic + .snapshots({ ...r.config, isExistingRepository: true } as RepositoryConfig) + .then(() => true) + .catch((e) => { + logger.debug(`Repo existence check for '${r.name}': ${toError(e).message}`); + return false; + }); + + if (isAlreadyRepo) { + logger.warn( + `Skipping '${r.name}': location is already a restic repository. ` + + `Set "isExistingRepository": true to import it, or use a different location for a new repository.`, + ); + result.warnings++; + continue; + } + } + + // Skip if a repository with the same name already exists (fallback for repos without deterministic paths) + // Repository names are stored trimmed + if (existingNames.has(r.name.trim())) { + logger.info(`Repository '${r.name}': a repository with this name already exists`); + result.skipped++; + continue; + } + + await repoServiceModule.repositoriesService.createRepository( + r.name, + r.config as RepositoryConfig, + r.compressionMode, + r.shortId, + ); + logger.info(`Initialized repository from config: ${r.name}`); + result.succeeded++; + } catch (e) { + logger.warn(`Repository '${r.name}' not created: ${toError(e).message}`); + result.warnings++; + } + } + + return result; +} + +async function importNotificationDestinations( + notificationDestinations: NotificationDestinationImport[], +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + const notificationsServiceModule = await import("../notifications/notifications.service"); + + // Get existing destinations to check for duplicates + const existingDestinations = await notificationsServiceModule.notificationsService.listDestinations(); + const existingNames = new Set(existingDestinations.map((d) => d.name)); + + for (const n of notificationDestinations) { + try { + // The service uses slugify to normalize the name, so we check against stored names + const slugifiedName = slugify(n.name, { lower: true, strict: true }); + if (existingNames.has(slugifiedName)) { + logger.info(`Notification destination '${n.name}' already exists`); + result.skipped++; + continue; + } + + const created = await notificationsServiceModule.notificationsService.createDestination( + n.name, + n.config as NotificationConfig, + ); + logger.info(`Initialized notification destination from config: ${n.name}`); + result.succeeded++; + + // If enabled is explicitly false, update the destination (default is true) + if (n.enabled === false) { + await notificationsServiceModule.notificationsService.updateDestination(created.id, { enabled: false }); + logger.info(`Set enabled=false for notification destination: ${n.name}`); + } + } catch (e) { + logger.warn(`Notification destination '${n.name}' not created: ${toError(e).message}`); + result.warnings++; + } + } + + return result; +} + +type ScheduleNotificationAssignment = { + destinationId: number; + destinationName: string; + notifyOnStart: boolean; + notifyOnSuccess: boolean; + notifyOnWarning: boolean; + notifyOnFailure: boolean; +}; + +function buildScheduleNotificationAssignments( + scheduleName: string, + notifications: ScheduleNotificationImport[], + destinationBySlug: Map, +): { assignments: ScheduleNotificationAssignment[]; warnings: number } { + const assignments: ScheduleNotificationAssignment[] = []; + let warnings = 0; + + for (const notif of notifications) { + // Handle both string (name only) and object (with settings) formats + const destName = typeof notif === "string" ? notif : notif.name; + const destSlug = slugify(destName, { lower: true, strict: true }); + const dest = destinationBySlug.get(destSlug); + if (!dest) { + logger.warn(`Notification destination '${destName}' not found for schedule '${scheduleName}'`); + warnings++; + continue; + } + assignments.push({ + destinationId: dest.id, + destinationName: dest.name, + notifyOnStart: typeof notif === "object" && notif.notifyOnStart !== undefined ? notif.notifyOnStart : true, + notifyOnSuccess: typeof notif === "object" && notif.notifyOnSuccess !== undefined ? notif.notifyOnSuccess : true, + notifyOnWarning: typeof notif === "object" && notif.notifyOnWarning !== undefined ? notif.notifyOnWarning : true, + notifyOnFailure: typeof notif === "object" && notif.notifyOnFailure !== undefined ? notif.notifyOnFailure : true, + }); + } + + return { assignments, warnings }; +} + +async function attachScheduleNotifications( + scheduleId: number, + scheduleName: string, + notifications: ScheduleNotificationImport[], + destinationBySlug: Map, + notificationsServiceModule: typeof import("../notifications/notifications.service"), +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + try { + const existingNotifications = + await notificationsServiceModule.notificationsService.getScheduleNotifications(scheduleId); + const existingDestIds = new Set(existingNotifications.map((n) => n.destinationId)); + + const { assignments, warnings } = buildScheduleNotificationAssignments( + scheduleName, + notifications, + destinationBySlug, + ); + result.warnings += warnings; + + // Filter out already attached notifications and track skipped + const newAssignments: typeof assignments = []; + for (const a of assignments) { + if (existingDestIds.has(a.destinationId)) { + logger.info(`Notification '${a.destinationName}' already attached to schedule '${scheduleName}'`); + result.skipped++; + } else { + newAssignments.push(a); + } + } + if (newAssignments.length === 0) return result; + + // Merge existing with new (strip destinationName for API call) + const mergedAssignments = [ + ...existingNotifications.map((n) => ({ + destinationId: n.destinationId, + notifyOnStart: n.notifyOnStart, + notifyOnSuccess: n.notifyOnSuccess, + notifyOnWarning: n.notifyOnWarning, + notifyOnFailure: n.notifyOnFailure, + })), + ...newAssignments.map(({ destinationName: _, ...rest }) => rest), + ]; + + await notificationsServiceModule.notificationsService.updateScheduleNotifications(scheduleId, mergedAssignments); + const notifNames = newAssignments.map((a) => a.destinationName).join(", "); + logger.info(`Assigned notification(s) [${notifNames}] to schedule '${scheduleName}'`); + result.succeeded += newAssignments.length; + } catch (e) { + logger.warn(`Failed to assign notifications to schedule '${scheduleName}': ${toError(e).message}`); + result.warnings++; + } + return result; +} + +async function importBackupSchedules(backupSchedules: BackupScheduleImport[]): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + if (backupSchedules.length === 0) return result; + + const backupServiceModule = await import("../backups/backups.service"); + const notificationsServiceModule = await import("../notifications/notifications.service"); + + const volumes = await db.query.volumesTable.findMany(); + const repositories = await db.query.repositoriesTable.findMany(); + const destinations = await db.query.notificationDestinationsTable.findMany(); + const existingSchedules = await db.query.backupSchedulesTable.findMany(); + + const volumeByName = new Map(volumes.map((v) => [v.name, v] as const)); + const repoByName = new Map(repositories.map((r) => [r.name, r] as const)); + const destinationBySlug = new Map(destinations.map((d) => [d.name, d] as const)); + const scheduleByName = new Map(existingSchedules.map((s) => [s.name, s] as const)); + + for (const s of backupSchedules) { + const volumeSlug = slugify(s.volume, { lower: true, strict: true }); + const volume = volumeByName.get(volumeSlug); + if (!volume) { + logger.warn(`Backup schedule not processed: Volume '${s.volume}' not found`); + result.warnings++; + continue; + } + + // Repository names are stored trimmed + const repository = repoByName.get(s.repository.trim()); + if (!repository) { + logger.warn(`Backup schedule not processed: Repository '${s.repository}' not found`); + result.warnings++; + continue; + } + + const scheduleName = s.name && s.name.length > 0 ? s.name : `${s.volume}-${s.repository}`; + + // Check if schedule already exists - if so, skip creation but still try attachments + const existingSchedule = scheduleByName.get(scheduleName); + let scheduleId: number; + + if (existingSchedule) { + logger.info(`Backup schedule '${scheduleName}' already exists`); + result.skipped++; + scheduleId = existingSchedule.id; + } else { + // Mount volume if needed for new schedule + if (volume.status !== "mounted") { + try { + await volumeService.mountVolume(volume.name); + volumeByName.set(volume.name, { ...volume, status: "mounted" }); + logger.info(`Mounted volume ${volume.name} for backup schedule`); + } catch (e) { + logger.warn(`Could not mount volume ${volume.name}: ${toError(e).message}`); + result.warnings++; + continue; + } + } + + try { + const createdSchedule = await backupServiceModule.backupsService.createSchedule( + { + name: scheduleName, + volumeId: volume.id, + repositoryId: repository.id, + enabled: s.enabled ?? true, + cronExpression: s.cronExpression, + retentionPolicy: s.retentionPolicy ?? undefined, // null -> undefined + excludePatterns: s.excludePatterns ?? [], + excludeIfPresent: s.excludeIfPresent ?? [], + includePatterns: s.includePatterns ?? [], + oneFileSystem: s.oneFileSystem, + }, + s.shortId, + ); + logger.info(`Initialized backup schedule from config: ${scheduleName}`); + result.succeeded++; + scheduleId = createdSchedule.id; + } catch (e) { + logger.warn(`Backup schedule '${scheduleName}' not created: ${toError(e).message}`); + result.warnings++; + continue; + } + } + + // Attach notifications (checks if already attached) + if (Array.isArray(s.notifications) && s.notifications.length > 0) { + const notifResult = await attachScheduleNotifications( + scheduleId, + scheduleName, + s.notifications, + destinationBySlug, + notificationsServiceModule, + ); + mergeResults(result, notifResult); + } + + // Attach mirrors (checks if already attached) + if (Array.isArray(s.mirrors) && s.mirrors.length > 0) { + const mirrorResult = await attachScheduleMirrors( + scheduleId, + scheduleName, + s.mirrors, + repoByName, + backupServiceModule, + ); + mergeResults(result, mirrorResult); + } + } + + return result; +} + +async function attachScheduleMirrors( + scheduleId: number, + scheduleName: string, + mirrors: ScheduleMirrorImport[], + repoByName: Map, + backupServiceModule: typeof import("../backups/backups.service"), +): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + try { + const existingMirrors = await backupServiceModule.backupsService.getMirrors(scheduleId); + const existingRepoIds = new Set(existingMirrors.map((m) => m.repositoryId)); + + const mirrorConfigs: Array<{ + repositoryId: string; + repositoryName: string; + enabled: boolean; + }> = []; + + for (const m of mirrors) { + // Schema ensures repository is a non-empty string + const repo = repoByName.get(m.repository.trim()); + if (!repo) { + logger.warn(`Mirror repository '${m.repository}' not found for schedule '${scheduleName}'`); + result.warnings++; + continue; + } + + mirrorConfigs.push({ + repositoryId: repo.id, + repositoryName: m.repository, + enabled: m.enabled ?? true, + }); + } + + // Filter out already attached mirrors and track skipped + const newMirrors: typeof mirrorConfigs = []; + for (const m of mirrorConfigs) { + if (existingRepoIds.has(m.repositoryId)) { + logger.info(`Mirror '${m.repositoryName}' already attached to schedule '${scheduleName}'`); + result.skipped++; + } else { + newMirrors.push(m); + } + } + if (newMirrors.length === 0) return result; + + // Merge existing with new (strip repositoryName for API call) + const mergedMirrors = [ + ...existingMirrors.map((m) => ({ + repositoryId: m.repositoryId, + enabled: m.enabled, + })), + ...newMirrors.map(({ repositoryName: _, ...rest }) => rest), + ]; + + await backupServiceModule.backupsService.updateMirrors(scheduleId, { mirrors: mergedMirrors }); + const mirrorNames = newMirrors.map((m) => m.repositoryName).join(", "); + logger.info(`Assigned mirror(s) [${mirrorNames}] to schedule '${scheduleName}'`); + result.succeeded += newMirrors.length; + } catch (e) { + logger.warn(`Failed to assign mirrors to schedule '${scheduleName}': ${toError(e).message}`); + result.warnings++; + } + return result; +} + +async function importUsers(users: UserImport[], recoveryKey: string | undefined): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + try { + const { authService } = await import("../auth/auth.service"); + const hasUsers = await authService.hasUsers(); + if (hasUsers) { + if (users.length > 0) { + logger.info("Users already exist; skipping user import from config"); + result.skipped++; + } + return result; + } + if (users.length === 0) return result; + + if (users.length > 1) { + logger.warn( + "Multiple users provided in config. Zerobyte currently supports a single initial user; extra entries will be ignored.", + ); + result.warnings++; + } + + for (const u of users) { + if (u.passwordHash) { + try { + await db.insert(usersTable).values({ + username: u.username, + passwordHash: u.passwordHash, + hasDownloadedResticPassword: u.hasDownloadedResticPassword ?? Boolean(recoveryKey), + }); + logger.info(`User '${u.username}' imported with password hash from config.`); + result.succeeded++; + break; + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + logger.warn(`User '${u.username}' not imported: ${err.message}`); + result.warnings++; + } + continue; + } + + if (u.password) { + try { + const { user } = await authService.register(u.username, u.password); + const hasDownloadedResticPassword = u.hasDownloadedResticPassword ?? Boolean(recoveryKey); + if (hasDownloadedResticPassword) { + await db.update(usersTable).set({ hasDownloadedResticPassword }).where(eq(usersTable.id, user.id)); + } + logger.info(`User '${u.username}' created from config.`); + result.succeeded++; + break; + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + logger.warn(`User '${u.username}' not created: ${err.message}`); + result.warnings++; + } + continue; + } + + logger.warn(`User '${u.username}' missing passwordHash/password; skipping`); + result.warnings++; + } + } catch (err) { + const e = err instanceof Error ? err : new Error(String(err)); + logger.error(`Automated user setup failed: ${e.message}`); + result.errors++; + } + + return result; +} + +type ImportOptions = { + overwriteRecoveryKey?: boolean; +}; + +async function runImport(config: ImportConfig, options: ImportOptions = {}): Promise { + const result: ImportResult = { succeeded: 0, skipped: 0, warnings: 0, errors: 0 }; + + mergeResults(result, await writeRecoveryKeyFromConfig(config.recoveryKey, options.overwriteRecoveryKey ?? false)); + + // Stop immediately if recovery key has errors (e.g., mismatch with existing key) + if (result.errors > 0) { + return result; + } + + mergeResults(result, await importVolumes(config.volumes ?? [])); + mergeResults(result, await importRepositories(config.repositories ?? [])); + mergeResults(result, await importNotificationDestinations(config.notificationDestinations ?? [])); + mergeResults(result, await importBackupSchedules(config.backupSchedules ?? [])); + mergeResults(result, await importUsers(config.users ?? [], config.recoveryKey)); + + return result; +} + +function logImportSummary(result: ImportResult): void { + const skippedMsg = result.skipped > 0 ? `, ${result.skipped} skipped` : ""; + if (result.errors > 0) { + logger.error( + `Config import completed with ${result.errors} error(s) and ${result.warnings} warning(s), ${result.succeeded} imported${skippedMsg}`, + ); + } else if (result.warnings > 0) { + logger.warn( + `Config import completed with ${result.warnings} warning(s), ${result.succeeded} imported${skippedMsg}`, + ); + } else if (result.succeeded > 0 || result.skipped > 0) { + logger.info(`Config import completed: ${result.succeeded} imported${skippedMsg}`); + } else { + logger.info("Config import completed: no items to import"); + } +} + +export type ApplyConfigResult = + | { success: true; result: ImportResult } + | { success: false; validationErrors: ConfigValidationError[] }; + +/** + * Import configuration from a raw config object (used by CLI) + * Returns validation errors upfront if the config doesn't match the schema. + */ +export async function applyConfigImport(configRaw: unknown, options: ImportOptions = {}): Promise { + logger.info("Starting config import..."); + + const parseResult = parseImportConfig(configRaw); + if (!parseResult.success) { + for (const error of parseResult.errors) { + logger.error(`Validation error at ${error.path}: ${error.message}`); + } + return { success: false, validationErrors: parseResult.errors }; + } + + const result = await runImport(parseResult.config, options); + logImportSummary(result); + return { success: true, result }; +} + +/** + * Validate configuration without importing (used by CLI --dry-run) + * Returns validation errors if the config doesn't match the schema. + */ +export function validateConfig(configRaw: unknown): ParseConfigResult { + return parseImportConfig(configRaw); +} diff --git a/app/server/modules/repositories/repositories.service.ts b/app/server/modules/repositories/repositories.service.ts index ea713699..9a6a1272 100644 --- a/app/server/modules/repositories/repositories.service.ts +++ b/app/server/modules/repositories/repositories.service.ts @@ -1,10 +1,10 @@ import crypto from "node:crypto"; import { eq, or } from "drizzle-orm"; -import { InternalServerError, NotFoundError } from "http-errors-enhanced"; +import { BadRequestError, ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; import { db } from "../../db/db"; import { repositoriesTable } from "../../db/schema"; import { toMessage } from "../../utils/errors"; -import { generateShortId } from "../../utils/id"; +import { generateShortId, isValidShortId } from "../../utils/id"; import { restic } from "../../utils/restic"; import { cryptoUtils } from "../../utils/crypto"; import { cache } from "../../utils/cache"; @@ -67,9 +67,32 @@ const encryptConfig = async (config: RepositoryConfig): Promise { +const createRepository = async ( + name: string, + config: RepositoryConfig, + compressionMode?: CompressionMode, + providedShortId?: string, +) => { const id = crypto.randomUUID(); - const shortId = generateShortId(); + + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.repositoriesTable.findFirst({ + where: eq(repositoriesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError( + `Repository shortId '${providedShortId}' is already in use by repository '${shortIdInUse.name}'`, + ); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } let processedConfig = config; if (config.backend === "local" && !config.isExistingRepository) { @@ -78,6 +101,24 @@ const createRepository = async (name: string, config: RepositoryConfig, compress const encryptedConfig = await encryptConfig(processedConfig); + const repoExists = await restic + .snapshots(encryptedConfig) + .then(() => true) + .catch(() => false); + + if (repoExists && !config.isExistingRepository) { + throw new ConflictError( + `A restic repository already exists at this location. ` + + `If you want to use the existing repository, set "isExistingRepository": true in the config.`, + ); + } + + if (!repoExists && config.isExistingRepository) { + throw new BadRequestError( + `Cannot access existing repository. Verify the path/credentials are correct and the repository exists.`, + ); + } + const [created] = await db .insert(repositoriesTable) .values({ @@ -97,14 +138,7 @@ const createRepository = async (name: string, config: RepositoryConfig, compress let error: string | null = null; - if (config.isExistingRepository) { - const result = await restic - .snapshots(encryptedConfig) - .then(() => ({ error: null })) - .catch((error) => ({ error })); - - error = result.error; - } else { + if (!repoExists) { const initResult = await restic.init(encryptedConfig); error = initResult.error; } diff --git a/app/server/modules/volumes/volume.service.ts b/app/server/modules/volumes/volume.service.ts index bde3be3b..e2a61ea8 100644 --- a/app/server/modules/volumes/volume.service.ts +++ b/app/server/modules/volumes/volume.service.ts @@ -2,13 +2,13 @@ import * as fs from "node:fs/promises"; import * as os from "node:os"; import * as path from "node:path"; import { and, eq, ne } from "drizzle-orm"; -import { ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; +import { BadRequestError, ConflictError, InternalServerError, NotFoundError } from "http-errors-enhanced"; import slugify from "slugify"; import { db } from "../../db/db"; import { volumesTable } from "../../db/schema"; import { cryptoUtils } from "../../utils/crypto"; import { toMessage } from "../../utils/errors"; -import { generateShortId } from "../../utils/id"; +import { generateShortId, isValidShortId } from "../../utils/id"; import { getStatFs, type StatFs } from "../../utils/mountinfo"; import { withTimeout } from "../../utils/timeout"; import { createVolumeBackend } from "../backends/backend"; @@ -48,7 +48,7 @@ const listVolumes = async () => { return volumes; }; -const createVolume = async (name: string, backendConfig: BackendConfig) => { +const createVolume = async (name: string, backendConfig: BackendConfig, providedShortId?: string) => { const slug = slugify(name, { lower: true, strict: true }); const existing = await db.query.volumesTable.findFirst({ @@ -59,7 +59,22 @@ const createVolume = async (name: string, backendConfig: BackendConfig) => { throw new ConflictError("Volume already exists"); } - const shortId = generateShortId(); + // Use provided shortId if valid, otherwise generate a new one + let shortId: string; + if (providedShortId) { + if (!isValidShortId(providedShortId)) { + throw new BadRequestError(`Invalid shortId format: '${providedShortId}'. Must be 8 base64url characters.`); + } + const shortIdInUse = await db.query.volumesTable.findFirst({ + where: eq(volumesTable.shortId, providedShortId), + }); + if (shortIdInUse) { + throw new ConflictError(`Volume shortId '${providedShortId}' is already in use by volume '${shortIdInUse.name}'`); + } + shortId = providedShortId; + } else { + shortId = generateShortId(); + } const encryptedConfig = await encryptSensitiveFields(backendConfig); const [created] = await db diff --git a/app/server/utils/errors.ts b/app/server/utils/errors.ts index 22379a36..3ff14939 100644 --- a/app/server/utils/errors.ts +++ b/app/server/utils/errors.ts @@ -18,6 +18,8 @@ export const toMessage = (err: unknown): string => { return sanitizeSensitiveData(message); }; +export const toError = (e: unknown): Error => (e instanceof Error ? e : new Error(String(e))); + const resticErrorCodes: Record = { 1: "Command failed: An error occurred while executing the command.", 2: "Go runtime error: A runtime error occurred in the Go program.", diff --git a/app/server/utils/id.ts b/app/server/utils/id.ts index 18bc2030..4a3143e0 100644 --- a/app/server/utils/id.ts +++ b/app/server/utils/id.ts @@ -1,6 +1,13 @@ import crypto from "node:crypto"; -export const generateShortId = (length = 8): string => { +const SHORT_ID_LENGTH = 8; + +export const generateShortId = (length = SHORT_ID_LENGTH): string => { const bytesNeeded = Math.ceil((length * 3) / 4); return crypto.randomBytes(bytesNeeded).toString("base64url").slice(0, length); }; + +export const isValidShortId = (value: string, length = SHORT_ID_LENGTH): boolean => { + const regex = new RegExp(`^[A-Za-z0-9_-]{${length}}$`); + return regex.test(value); +}; diff --git a/examples/README.md b/examples/README.md index 8d2e7657..97511a94 100644 --- a/examples/README.md +++ b/examples/README.md @@ -11,6 +11,7 @@ This folder contains runnable, copy/paste-friendly examples for running Zerobyte - [Bind-mount a local directory](directory-bind-mount/README.md) — back up a host folder by mounting it into the container. - [Mount an rclone config](rclone-config-mount/README.md) — use rclone-based repository backends by mounting your rclone config. - [Secret placeholders + Docker secrets](secrets-placeholders/README.md) — keep secrets out of the DB using `env://...` and `file://...` references. +- [Config file import (Infrastructure as Code)](config-file-import/README.md) — pre-configure volumes/repos/schedules/users from json file. ### Advanced setups diff --git a/examples/config-file-import/.env.example b/examples/config-file-import/.env.example new file mode 100644 index 00000000..d59d0377 --- /dev/null +++ b/examples/config-file-import/.env.example @@ -0,0 +1,23 @@ +# Copy to .env and fill values + +# Used by examples/config-file-import/zerobyte.config.example.json +RECOVERY_KEY=your-64-char-hex-recovery-key +ADMIN_PASSWORD=change-me + +# Optional: referenced by some config examples +ACCESS_KEY_ID= +SECRET_ACCESS_KEY= +GCS_CREDENTIALS= +AZURE_KEY= +SMB_PASSWORD= +WEBDAV_PASSWORD= +SFTP_PRIVATE_KEY= +SLACK_WEBHOOK_URL= +EMAIL_PASSWORD= +DISCORD_WEBHOOK_URL= +GOTIFY_TOKEN= +NTFY_PASSWORD= +PUSHOVER_USER_KEY= +PUSHOVER_API_TOKEN= +TELEGRAM_BOT_TOKEN= +SHOUTRRR_URL= diff --git a/examples/config-file-import/.gitignore b/examples/config-file-import/.gitignore new file mode 100644 index 00000000..481fcd55 --- /dev/null +++ b/examples/config-file-import/.gitignore @@ -0,0 +1,4 @@ +.env + +*.json +!zerobyte.config.example.json diff --git a/examples/config-file-import/README.md b/examples/config-file-import/README.md new file mode 100644 index 00000000..497d9c5b --- /dev/null +++ b/examples/config-file-import/README.md @@ -0,0 +1,519 @@ +# Config file import (Infrastructure as Code) + +Zerobyte supports **config file import** via the CLI. +This lets you pre-configure volumes, repositories, backup schedules, notification destinations, and an initial user. + +This example includes: + +- a runnable `docker-compose.yml` +- a comprehensive `zerobyte.config.example.json` template (trim it down to what you actually use) +- `.env.example` showing how to inject secrets via environment variables + +## Prerequisites + +- Docker + Docker Compose + +This example includes `SYS_ADMIN` and `/dev/fuse` because it's compatible with remote volume mounts (SMB/NFS/WebDAV). + +## Setup + +1. Copy the env file: + +```bash +cp .env.example .env +``` + +2. Create a local directory to mount as a sample volume: + +```bash +mkdir -p mydata +``` + +3. Create a working config file (copy the example template): + +```bash +cp zerobyte.config.example.json zerobyte.config.json +``` + +This is the recommended workflow for quick testing: if you don't have your own JSON config yet, start from the template. + +4. Review/edit `zerobyte.config.json`. + + The example template is intentionally "kitchen-sink" (lots of volume/repository/notification types) so you can copy what you need. + Delete the entries you don't plan to use, and keep only the ones you have credentials/mounts for. + +5. Start Zerobyte: + +```bash +docker compose up -d +``` + +6. Run the config import: + +```bash +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json +``` + +7. Access the UI at `http://localhost:4096`. + +## Notes + +### CLI import command + +Import configuration using the CLI: + +```bash +# Import from a mounted config file (starts a new temporary container) +docker compose run --rm zerobyte bun run cli import-config --config /app/zerobyte.config.json + +# Import from a mounted config file into an already-running container +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json + +# Import from stdin (into running container) +cat zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin + +# Import from stdin in PowerShell (into running container) +Get-Content zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin + +# Validate config without importing (dry run) +docker compose run --rm zerobyte bun run cli import-config --config /app/zerobyte.config.json --dry-run + +# Get JSON output for scripting +docker compose exec zerobyte bun run cli import-config --config /app/zerobyte.config.json --json +``` + +The `--stdin` option is useful when you don't want to mount the config file - just pipe it directly. + +### CLI options + +| Option | Description | +|--------|-------------| +| `--config ` | Path to the configuration file inside the container | +| `--stdin` | Read configuration from stdin | +| `--dry-run` | Validate the config without importing | +| `--json` | Output results in JSON format | +| `--log-level ` | Set log level (debug, info, warn, error) | +| `--overwrite-recovery-key` | Overwrite existing recovery key (only allowed if database is empty) | + +### Secrets via env vars + +Zerobyte supports **two different mechanisms** that are easy to confuse: + +1. **Config import interpolation** (this example) +2. **Secret placeholders** (`env://...` and `file://...`) + +#### 1) Config import interpolation: `${VAR_NAME}` + +During config import, any string value in the JSON can reference an environment variable using `${VAR_NAME}`. + +Example: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "repositories": [ + { + "name": "s3-repo", + "config": { + "backend": "s3", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + } + } + ] +} +``` + +Important properties of `${...}` interpolation: + +- It runs **only during import**. +- Values are **resolved before** they are written to the database (meaning the actual secret ends up in the DB for fields that are stored as secrets). +- Because it reads `process.env`, Docker Compose must inject those variables into the container. + +This example uses: + +- `env_file: .env` + +So to make `${VAR_NAME}` work, put the variables in `.env` (or otherwise provide them in the container environment). + +##### Host-side interpolation (alternative) + +You can also interpolate environment variables **on the host** before piping the config to the container. +This is useful in CI/CD pipelines where secrets are injected by the pipeline and you don't want them exposed to the container environment. + +**Linux/macOS** (using `envsubst`): + +```bash +# Load .env and substitute variables before piping +export $(grep -v '^#' .env | xargs) && envsubst < zerobyte.config.json | docker compose exec -T zerobyte bun run cli import-config --stdin +``` + +**PowerShell**: + +```powershell +# Load .env and substitute variables before piping +Get-Content .env | ForEach-Object { if ($_ -match '^([^#][^=]+)=(.*)$') { [Environment]::SetEnvironmentVariable($matches[1], $matches[2]) } } +(Get-Content zerobyte.config.json -Raw) -replace '\$\{(\w+)\}', { $env:($_.Groups[1].Value) } | docker compose exec -T zerobyte bun run cli import-config --stdin +``` + +#### 2) Secret placeholders: `env://...` and `file://...` + +Separately from config import, Zerobyte supports **secret placeholders** for *some sensitive fields*. +These placeholders are stored **as-is** in the database (the raw secret is not stored) and resolved at runtime. + +Supported formats: + +- `env://VAR_NAME` → reads `process.env.VAR_NAME` at runtime +- `file://secret_name` → reads `/run/secrets/secret_name` (Docker secrets) + +This is useful when you want to keep secrets out of the database and rotate them without editing Zerobyte's stored config. + +See the runnable example: + +- [examples/secrets-placeholders/README.md](../secrets-placeholders/README.md) + +### Config file behavior (create-only) + +The config file is applied using a **create-only** approach: + +- **Volumes, notifications, schedules**: Skipped if a resource with the same name already exists +- **Repositories**: Skipped if any of these conditions are met: + - A repository pointing to the same location (path/bucket/endpoint) is already registered + - For local repos: the path is already a restic repository (set `isExistingRepository: true` to import it) + - A repository with the same name already exists +- Changes made via the UI are preserved across imports +- To update a resource from config, either modify it via the UI or delete it first + +This makes the config file better suited as "initial setup" than as a "desired state sync". + +--- + +## Config structure reference + +This example is intended to be the primary, copy/paste-friendly reference for config import. + +### `zerobyte.config.json` structure + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "volumes": [ + "..." + ], + "repositories": [ + "..." + ], + "backupSchedules": [ + "..." + ], + "notificationDestinations": [ + "..." + ], + "users": [ + "..." + ] +} +``` + +### Volume types + +#### Local directory + +```json +{ + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/mydata", + "readOnly": true + } +} +``` + +#### NFS + +```json +{ + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } +} +``` + +#### SMB + +```json +{ + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } +} +``` + +#### WebDAV + +```json +{ + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } +} +``` + +#### SFTP + +```json +{ + "name": "sftp-volume", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": true + } +} +``` + +For key-based authentication: + +```json +{ + "name": "sftp-volume-key", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": false, + "knownHosts": "sftp.example.com ssh-ed25519 AAAA..." + } +} +``` + +### Repository types + +#### Local (new repository) + +Creates a new restic repository. The `path` is optional and defaults to `/var/lib/zerobyte/repositories`: + +```json +{ + "name": "local-repo", + "config": { + "backend": "local" + }, + "compressionMode": "auto" +} +``` + +The actual repository will be created at `{path}/{auto-generated-id}`. + +#### Local (existing repository) + +To import an existing restic repository, set `isExistingRepository: true` and provide the **full path to the repository root**: + +```json +{ + "name": "my-local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories/abc123", + "isExistingRepository": true + } +} +``` + +Note: The `path` must point directly to the restic repository root (the directory containing `config`, `data/`, `keys/`, etc.). + +#### S3-compatible + +```json +{ + "name": "backup-repo", + "config": { + "backend": "s3", + "endpoint": "s3.amazonaws.com", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" +} +``` + +#### Google Cloud Storage + +```json +{ + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } +} +``` + +#### Azure Blob Storage + +```json +{ + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } +} +``` + +### Backup schedules + +```json +{ + "name": "local-volume-local-repo", + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "excludeIfPresent": [".nobackup"], + "oneFileSystem": true, + "enabled": true, + "notifications": ["slack-alerts", "email-admin"], + "mirrors": [ + { "repository": "s3-repo" }, + { "repository": "azure-repo" } + ] +} +``` + +**Fields:** + +- `name`: Unique schedule name +- `volume`: Name of the source volume +- `repository`: Name of the primary destination repository +- `cronExpression`: Cron string for schedule timing +- `retentionPolicy`: Object with retention rules (`keepLast`, `keepHourly`, `keepDaily`, `keepWeekly`, `keepMonthly`, `keepYearly`, `keepWithinDuration`) +- `includePatterns` / `excludePatterns`: Arrays of file patterns +- `excludeIfPresent`: Array of filenames; if any of these files exist in a directory, that directory is excluded (e.g., `[".nobackup"]`) +- `oneFileSystem`: Boolean; if `true`, restic won't cross filesystem boundaries (useful when backing up `/` to avoid traversing into mounted volumes) +- `enabled`: Boolean +- `notifications`: Array of notification destination names or detailed objects (see below) +- `mirrors`: Array of mirror repositories (see below) + +#### Notifications (detailed) + +`notifications` can be strings (destination names) or objects with fine-grained control: + +```json +[ + { + "name": "slack-alerts", + "notifyOnStart": false, + "notifyOnSuccess": true, + "notifyOnWarning": true, + "notifyOnFailure": true + } +] +``` + +#### Mirrors + +Mirrors let you automatically copy snapshots to additional repositories after each backup. +Each mirror references a repository by name: + +```json +"mirrors": [ + { "repository": "s3-repo" }, + { "repository": "azure-repo", "enabled": false } +] +``` + +### User setup (automated) + +Zerobyte currently supports a **single user**. +If multiple entries are provided in `users[]`, only the first one will be applied. + +New instance: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "password": "${ADMIN_PASSWORD}" + } + ] +} +``` + +Migration: + +```json +{ + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "my-user", + "passwordHash": "$argon2id$v=19$m=19456,t=2,p=1$..." + } + ] +} +``` + +Use either `password` OR `passwordHash`, not both. + +### Recovery key + +The recovery key is a 64-character hex string that serves two critical purposes: + +1. Restic repository password (encrypts your backup data) +2. Database encryption key (encrypts credentials stored in Zerobyte) + +Generating a recovery key ahead of time: + +```bash +# Using OpenSSL (Linux/macOS) +openssl rand -hex 32 + +# Using Python +python3 -c "import secrets; print(secrets.token_hex(32))" + +# Using Docker (prints the key, container is removed) +docker run --rm python:3.12-alpine sh -lc 'echo "Key is on the next line:"; python -c "import secrets; print(secrets.token_hex(32))"' +``` diff --git a/examples/config-file-import/docker-compose.yml b/examples/config-file-import/docker-compose.yml new file mode 100644 index 00000000..fadfaea2 --- /dev/null +++ b/examples/config-file-import/docker-compose.yml @@ -0,0 +1,21 @@ +services: + zerobyte: + image: ghcr.io/nicotsx/zerobyte:latest + container_name: zerobyte + restart: unless-stopped + cap_add: + - SYS_ADMIN + devices: + - /dev/fuse:/dev/fuse + ports: + - "4096:4096" + env_file: + - .env + environment: + - TZ=${TZ:-UTC} + volumes: + - /etc/localtime:/etc/localtime:ro + - /var/lib/zerobyte:/var/lib/zerobyte + - ./zerobyte.config.json:/app/zerobyte.config.json:ro + - ./mydata:/mydata:ro + - ~/.config/rclone:/root/.config/rclone diff --git a/examples/config-file-import/zerobyte.config.example.json b/examples/config-file-import/zerobyte.config.example.json new file mode 100644 index 00000000..9680f6d2 --- /dev/null +++ b/examples/config-file-import/zerobyte.config.example.json @@ -0,0 +1,244 @@ +{ + "volumes": [ + { + "name": "local-volume", + "config": { + "backend": "directory", + "path": "/mydata", + "readOnly": true + } + }, + { + "name": "nfs-volume", + "config": { + "backend": "nfs", + "server": "nfs.example.com", + "exportPath": "/data", + "port": 2049, + "version": "4", + "readOnly": false + } + }, + { + "name": "smb-volume", + "config": { + "backend": "smb", + "server": "smb.example.com", + "share": "shared", + "username": "user", + "password": "${SMB_PASSWORD}", + "vers": "3.0", + "domain": "WORKGROUP", + "port": 445, + "readOnly": false + } + }, + { + "name": "webdav-volume", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "readOnly": false, + "ssl": true + } + }, + { + "name": "sftp-volume", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "username": "user", + "password": "${SFTP_PASSWORD}", + "path": "/data", + "readOnly": false, + "skipHostKeyCheck": true + } + } + ], + "repositories": [ + { + "name": "local-repo", + "config": { + "backend": "local" + }, + "compressionMode": "auto" + }, + { + "name": "existing-local-repo", + "config": { + "backend": "local", + "path": "/var/lib/zerobyte/repositories/abc123", + "isExistingRepository": true + } + }, + { + "name": "s3-repo", + "config": { + "backend": "s3", + "endpoint": "s3.amazonaws.com", + "bucket": "mybucket", + "accessKeyId": "${ACCESS_KEY_ID}", + "secretAccessKey": "${SECRET_ACCESS_KEY}" + }, + "compressionMode": "auto" + }, + { + "name": "gcs-repo", + "config": { + "backend": "gcs", + "bucket": "mybucket", + "projectId": "my-gcp-project", + "credentialsJson": "${GCS_CREDENTIALS}" + } + }, + { + "name": "azure-repo", + "config": { + "backend": "azure", + "container": "mycontainer", + "accountName": "myaccount", + "accountKey": "${AZURE_KEY}" + } + }, + { + "name": "rclone-repo", + "config": { + "backend": "rclone", + "remote": "myremote", + "path": "backups/zerobyte" + } + }, + { + "name": "webdav-repo", + "config": { + "backend": "webdav", + "server": "webdav.example.com", + "path": "/remote.php/webdav", + "username": "user", + "password": "${WEBDAV_PASSWORD}", + "port": 80, + "ssl": true + } + }, + { + "name": "sftp-repo", + "config": { + "backend": "sftp", + "host": "sftp.example.com", + "port": 22, + "user": "sftpuser", + "privateKey": "${SFTP_PRIVATE_KEY}", + "path": "/backups" + } + } + ], + "backupSchedules": [ + { + "name": "local-volume-local-repo", + "volume": "local-volume", + "repository": "local-repo", + "cronExpression": "0 2 * * *", + "retentionPolicy": { "keepLast": 7, "keepDaily": 7 }, + "includePatterns": ["important-folder"], + "excludePatterns": ["*.tmp", "*.log"], + "excludeIfPresent": [".nobackup"], + "oneFileSystem": false, + "enabled": true, + "notifications": ["slack-alerts"], + "mirrors": [{ "repository": "s3-repo" }] + } + ], + "notificationDestinations": [ + { + "name": "slack-alerts", + "config": { + "type": "slack", + "webhookUrl": "${SLACK_WEBHOOK_URL}", + "channel": "#backups", + "username": "zerobyte", + "iconEmoji": ":floppy_disk:" + } + }, + { + "name": "email-admin", + "config": { + "type": "email", + "smtpHost": "smtp.example.com", + "smtpPort": 587, + "username": "admin@example.com", + "password": "${EMAIL_PASSWORD}", + "from": "zerobyte@example.com", + "to": ["admin@example.com"], + "useTLS": true + } + }, + { + "name": "discord-backups", + "config": { + "type": "discord", + "webhookUrl": "${DISCORD_WEBHOOK_URL}", + "username": "zerobyte", + "avatarUrl": "https://example.com/avatar.png", + "threadId": "1234567890" + } + }, + { + "name": "gotify-notify", + "config": { + "type": "gotify", + "serverUrl": "https://gotify.example.com", + "token": "${GOTIFY_TOKEN}", + "path": "/message", + "priority": 5 + } + }, + { + "name": "ntfy-notify", + "config": { + "type": "ntfy", + "serverUrl": "https://ntfy.example.com", + "topic": "zerobyte-backups", + "priority": "high", + "username": "ntfyuser", + "password": "${NTFY_PASSWORD}" + } + }, + { + "name": "pushover-notify", + "config": { + "type": "pushover", + "userKey": "${PUSHOVER_USER_KEY}", + "apiToken": "${PUSHOVER_API_TOKEN}", + "devices": "phone,tablet", + "priority": 1 + } + }, + { + "name": "telegram-notify", + "config": { + "type": "telegram", + "botToken": "${TELEGRAM_BOT_TOKEN}", + "chatId": "123456789" + } + }, + { + "name": "custom-shoutrrr", + "config": { + "type": "custom", + "shoutrrrUrl": "${SHOUTRRR_URL}" + } + } + ], + "recoveryKey": "${RECOVERY_KEY}", + "users": [ + { + "username": "admin", + "password": "${ADMIN_PASSWORD}" + } + ] +}