diff --git a/core/database/foxx/api/repo.js b/core/database/foxx/api/repo.js index baea100b9..608984545 100644 --- a/core/database/foxx/api/repo.js +++ b/core/database/foxx/api/repo.js @@ -4,6 +4,8 @@ const g_db = require("@arangodb").db; const g_lib = require("./support"); const { errors } = require("@arangodb"); const pathModule = require("./posix_path"); +const { RepositoryOps } = require("./repository/operations"); +const { Result } = require("./repository/types"); /** * All DataFed repositories have the following path structure on a POSIX file system @@ -37,6 +39,10 @@ const PathType = { UNKNOWN: "UNKNOWN", }; +/** + * Legacy Repo class for backward compatibility + * Internally uses new repository patterns but maintains old API + */ class Repo { // ERROR code #error = null; @@ -47,6 +53,8 @@ class Repo { // The repo id simply the key prepended with 'repo/' #repo_id = null; #repo_key = null; + // New: store the repository object using new patterns + #repository = null; /** * Constructs a Repo object and checks if the key exists in the database. @@ -74,20 +82,17 @@ class Repo { this.#repo_key = a_key; } - // Check if the repo document exists - try { - if (collection.exists(this.#repo_key)) { - this.#exists = true; - } else { - this.#exists = false; - this.#error = g_lib.ERR_NOT_FOUND; - this.#err_msg = "Invalid repo: (" + a_key + "). No record found."; - } - } catch (e) { + // Use new repository operations to find the repo + const findResult = RepositoryOps.find(this.#repo_id); + + if (findResult.ok) { + this.#exists = true; + this.#repository = findResult.value; + } else { this.#exists = false; - this.#error = g_lib.ERR_INTERNAL_FAULT; - this.#err_msg = "Unknown error encountered."; - console.log(e); + this.#error = + findResult.error.code === 404 ? g_lib.ERR_NOT_FOUND : g_lib.ERR_INTERNAL_FAULT; + this.#err_msg = findResult.error.message; } } } @@ -126,6 +131,14 @@ class Repo { return this.#err_msg; } + /** + * Get the underlying repository object (new pattern) + * @returns {object|null} Repository object or null if not exists + */ + getRepository() { + return this.#repository; + } + /** * Detect what kind of POSIX path has been provided * @@ -138,13 +151,17 @@ class Repo { throw [g_lib.ERR_PERM_DENIED, "Repo does not exist " + this.#repo_id]; } - let repo = g_db._document(this.#repo_id); - if (!repo.path) { + const repoData = this.#repository.data; + if (!repoData.path) { + // Metadata-only repos don't have paths + if (repoData.type === "metadata_only") { + return PathType.UNKNOWN; + } throw [g_lib.ERR_INTERNAL_FAULT, "Repo document is missing path: " + this.#repo_id]; } // Get and sanitize the repo root path by removing the trailing slash if one exists - let repo_root_path = repo.path.replace(/\/$/, ""); + let repo_root_path = repoData.path.replace(/\/$/, ""); let sanitized_path = a_path.replace(/\/$/, ""); // Check if the sanitized path is exactly the repo root path diff --git a/core/database/foxx/api/repo_router_new.js b/core/database/foxx/api/repo_router_new.js new file mode 100644 index 000000000..d505f6b0d --- /dev/null +++ b/core/database/foxx/api/repo_router_new.js @@ -0,0 +1,283 @@ +"use strict"; + +const createRouter = require("@arangodb/foxx/router"); +const router = createRouter(); +const joi = require("joi"); +const { db: g_db } = require("@arangodb"); +const g_lib = require("./support"); +const { RepositoryType } = require("./repository/types"); +const { createRepositoryByType } = require("./repository/factory"); +const { RepositoryOps } = require("./repository/operations"); + +module.exports = router; + +// Helper to clean repository data +const cleanRepoData = (repo) => { + const { _id, _key, _rev, ...data } = repo.data; + return { ...data, id: _id }; +}; + +// List repositories +router + .get("/list", (req, res) => { + const client = req.queryParams.client + ? g_lib.getUserFromClientID(req.queryParams.client) + : null; + + if (req.queryParams.all && client && !client.is_admin) { + throw g_lib.ERR_PERM_DENIED; + } + + const filter = { + ...(req.queryParams.type && { type: req.queryParams.type }), + ...(!req.queryParams.all && client && { admin: client._id }), + }; + + const listResult = RepositoryOps.list(filter); + if (!listResult.ok) throw [listResult.error.code, listResult.error.message]; + + res.send(listResult.value.map(cleanRepoData)); + }) + .queryParam("client", joi.string().allow("").optional(), "Client ID") + .queryParam("details", joi.boolean().optional(), "Show additional record details") + .queryParam("all", joi.boolean().optional(), "List all repos (requires admin)") + .queryParam( + "type", + joi.string().valid("globus", "metadata_only").optional(), + "Filter by repository type", + ) + .summary("List repositories") + .description("List repositories administered by the client."); + +// View repository +router + .get("/view", (req, res) => { + try { + const findResult = RepositoryOps.find(req.queryParams.id); + if (!findResult.ok) throw [findResult.error.code, findResult.error.message]; + + const repo = cleanRepoData(findResult.value); + repo.admins = g_db.admin + .byExample({ _from: repo.id }) + .toArray() + .map((admin) => admin._to); + + res.send([repo]); + } catch (e) { + g_lib.handleException(e, res); + } + }) + .queryParam("id", joi.string().required(), "Repository ID") + .summary("View repository") + .description("View repository details."); + +// Create repository +router + .post("/create", (req, res) => { + try { + g_db._executeTransaction({ + collections: { read: ["u"], write: ["repo", "admin"] }, + action: () => { + const client = g_lib.getUserFromClientID(req.queryParams.client); + if (!client.is_admin) throw g_lib.ERR_PERM_DENIED; + + const config = { + ...req.body, + ...(req.body.type === RepositoryType.GLOBUS && { + pub_key: req.body.pub_key, + address: req.body.address, + endpoint: req.body.endpoint, + path: req.body.path, + exp_path: req.body.exp_path, + domain: req.body.domain, + }), + }; + + const createResult = createRepositoryByType(config); + if (!createResult.ok) + throw [createResult.error.code, createResult.error.message]; + + const repository = createResult.value; + + req.body.admins.forEach((admin) => { + if (!g_db._exists(admin)) + throw [g_lib.ERR_NOT_FOUND, `User ${admin} not found`]; + }); + + const saveResult = RepositoryOps.save(repository); + if (!saveResult.ok) throw [saveResult.error.code, saveResult.error.message]; + + const saved = saveResult.value; + req.body.admins.forEach((admin) => + g_db.admin.save({ _from: saved._id, _to: admin }), + ); + + res.send(cleanRepoData(saved)); + }, + }); + } catch (e) { + g_lib.handleException(e, res); + } + }) + .queryParam("client", joi.string().required(), "Client ID") + .body( + joi + .object({ + id: joi.string().required(), + type: joi.string().valid("globus", "metadata_only").required(), + title: joi.string().required(), + desc: joi.string().optional(), + capacity: joi.number().integer().min(1).required(), + admins: joi.array().items(joi.string()).required(), + // Globus-specific fields + pub_key: joi.string().when("type", { is: "globus", then: joi.required() }), + address: joi.string().when("type", { is: "globus", then: joi.required() }), + endpoint: joi.string().when("type", { is: "globus", then: joi.required() }), + path: joi.string().when("type", { is: "globus", then: joi.required() }), + exp_path: joi.string().optional(), + domain: joi.string().when("type", { is: "globus", then: joi.required() }), + }) + .required(), + "Repository fields", + ) + .summary("Create repository") + .description("Create a new repository."); + +// Update repository +router + .post("/update", (req, res) => { + try { + g_db._executeTransaction({ + collections: { read: ["u", "repo"], write: ["repo", "admin"] }, + action: () => { + const client = g_lib.getUserFromClientID(req.queryParams.client); + if (!client.is_admin) throw g_lib.ERR_PERM_DENIED; + + const findResult = RepositoryOps.find(req.body.id); + if (!findResult.ok) throw [findResult.error.code, findResult.error.message]; + + const repository = findResult.value; + const updates = {}; + + ["title", "desc", "capacity"].forEach((field) => { + g_lib.procInputParam(req.body, field, true, updates); + }); + + // Type-specific updates + if (repository.type === RepositoryType.GLOBUS) { + g_lib.procInputParam(req.body, "pub_key", true, updates); + g_lib.procInputParam(req.body, "address", true, updates); + g_lib.procInputParam(req.body, "exp_path", true, updates); + } + + const updateResult = RepositoryOps.update(repository, updates); + if (!updateResult.ok) + throw [updateResult.error.code, updateResult.error.message]; + + // Handle admin updates if provided + if (req.body.admins) { + // Remove old admins + g_db.admin.removeByExample({ _from: repository._id }); + // Add new admins + req.body.admins.forEach((admin) => { + if (!g_db._exists(admin)) + throw [g_lib.ERR_NOT_FOUND, `User ${admin} not found`]; + g_db.admin.save({ _from: repository._id, _to: admin }); + }); + } + + res.send([cleanRepoData(updateResult.value)]); + }, + }); + } catch (e) { + g_lib.handleException(e, res); + } + }) + .queryParam("client", joi.string().required(), "Client ID") + .body( + joi + .object({ + id: joi.string().required(), + title: joi.string().optional(), + desc: joi.string().optional(), + capacity: joi.number().integer().min(1).optional(), + pub_key: joi.string().optional(), + address: joi.string().optional(), + exp_path: joi.string().optional(), + admins: joi.array().items(joi.string()).optional(), + }) + .required(), + "Repository update fields", + ) + .summary("Update repository") + .description("Update an existing repository."); + +// Allocation endpoints +router + .post("/alloc/create", (req, res) => { + try { + const findResult = RepositoryOps.find(req.body.repo); + if (!findResult.ok) throw [findResult.error.code, findResult.error.message]; + + const repository = findResult.value; + const client = g_lib.getUserFromClientID(req.queryParams.client); + + const permResult = RepositoryOps.checkPermission(repository, client._id, "allocate"); + if (!permResult.ok || !permResult.value) throw g_lib.ERR_PERM_DENIED; + + const allocResult = RepositoryOps.createAllocation(repository, req.body); + if (!allocResult.ok) throw [allocResult.error.code, allocResult.error.message]; + + res.send(allocResult.value); + } catch (e) { + g_lib.handleException(e, res); + } + }) + .queryParam("client", joi.string().required(), "Client ID") + .body( + joi + .object({ + repo: joi.string().required(), + subject: joi.string().required(), + size: joi.number().integer().min(1).required(), + path: joi.string().optional(), + metadata: joi.object().optional(), + }) + .required(), + "Allocation parameters", + ) + .summary("Create allocation") + .description("Create an allocation in a repository."); + +router + .post("/alloc/delete", (req, res) => { + try { + const findResult = RepositoryOps.find(req.body.repo); + if (!findResult.ok) throw [findResult.error.code, findResult.error.message]; + + const repository = findResult.value; + const client = g_lib.getUserFromClientID(req.queryParams.client); + + const permResult = RepositoryOps.checkPermission(repository, client._id, "allocate"); + if (!permResult.ok || !permResult.value) throw g_lib.ERR_PERM_DENIED; + + const deleteResult = RepositoryOps.deleteAllocation(repository, req.body.subject); + if (!deleteResult.ok) throw [deleteResult.error.code, deleteResult.error.message]; + + res.send(deleteResult.value); + } catch (e) { + g_lib.handleException(e, res); + } + }) + .queryParam("client", joi.string().required(), "Client ID") + .body( + joi + .object({ + repo: joi.string().required(), + subject: joi.string().required(), + }) + .required(), + "Deletion parameters", + ) + .summary("Delete allocation") + .description("Delete an allocation from a repository."); diff --git a/core/database/foxx/api/repository/example.js b/core/database/foxx/api/repository/example.js new file mode 100644 index 000000000..851fc9df4 --- /dev/null +++ b/core/database/foxx/api/repository/example.js @@ -0,0 +1,231 @@ +"use strict"; + +/** + * Example usage of the new repository type system + * Demonstrates Rust-compatible patterns in JavaScript + */ + +const { RepositoryType, Result } = require("./types"); +const { createRepositoryByType } = require("./factory"); +const { RepositoryOps } = require("./operations"); + +/** + * Example 1: Creating repositories using factory pattern + * Factory pattern is common in Rust for complex object construction + * @returns {Promise<{globus: *, metadata: *}>} object containing created repositories + * @see https://doc.rust-lang.org/book/ch17-03-oo-design-patterns.html + */ +async function createRepositoryExample() { + // Create a Globus repository + const globusConfig = { + id: "science_data_repo", + type: RepositoryType.GLOBUS, + title: "Science Data Repository", + desc: "Repository for scientific datasets", + capacity: 10000000000, // 10GB + admins: ["u/scientist1", "u/scientist2"], + pub_key: "123ABC...", + address: "data.science.org", + endpoint: "endpoint-abc123", + path: "/mnt/storage/repos/science_data_repo", + domain: "science.org", + }; + + const globusResult = createRepositoryByType(globusConfig); + if (!globusResult.ok) { + console.error("Failed to create Globus repo:", globusResult.error); + return; + } + + // Create a metadata-only repository + const metadataConfig = { + id: "metadata_catalog", + type: RepositoryType.METADATA_ONLY, + title: "Metadata Catalog", + desc: "Repository for metadata records only", + capacity: 1000000, // Logical limit for records + admins: ["u/cataloger1"], + }; + + const metadataResult = createRepositoryByType(metadataConfig); + if (!metadataResult.ok) { + console.error("Failed to create metadata repo:", metadataResult.error); + return; + } + + return { globus: globusResult.value, metadata: metadataResult.value }; +} + +/** + * Example 2: Using trait-like operations + * Using traits allows polymorphic behavior without knowing concrete types + * @returns {Promise<*>} Allocation result + * @see https://doc.rust-lang.org/book/ch10-02-traits.html#traits-as-parameters + */ +async function useRepositoryOperations() { + // Find a repository + const findResult = RepositoryOps.find("repo/science_data_repo"); + if (!findResult.ok) { + console.error("Repository not found:", findResult.error); + return; + } + + const repository = findResult.value; + + // Check if it supports data operations + const supportsDataResult = RepositoryOps.supportsDataOperations(repository); + if (supportsDataResult.ok && supportsDataResult.value) { + console.log("Repository supports data operations"); + } + + // Create an allocation + const allocResult = RepositoryOps.createAllocation(repository, { + subject: "d/dataset_001", + size: 1000000000, // 1GB + path: "/datasets/2024/dataset_001", + metadata: { + project: "Climate Research", + created_by: "Dr. Smith", + }, + }); + + if (!allocResult.ok) { + console.error("Allocation failed:", allocResult.error); + return; + } + + // Handle result based on execution method + const allocation = allocResult.value; + if (allocation.execution_method === "task") { + console.log("Allocation queued as task:", allocation.task.task_id); + // Would monitor task progress... + } else { + console.log("Allocation completed directly:", allocation.result); + } + + return allocation; +} + +/** + * Example 3: Pattern matching on repository types + * Pattern matching is fundamental in Rust for handling enum variants + * @param {object} repository - Repository object with type and data fields + * @returns {{ok: boolean, error?: *, value?: *}} Result of handling repository + * @see https://doc.rust-lang.org/book/ch06-02-match.html + */ +function handleRepositoryByType(repository) { + // Similar to Rust match expression + switch (repository.type) { + case RepositoryType.GLOBUS: + return handleGlobusRepository(repository.data); + + case RepositoryType.METADATA_ONLY: + return handleMetadataRepository(repository.data); + + default: + return Result.err({ + code: 400, + message: `Unknown repository type: ${repository.type}`, + }); + } +} + +function handleGlobusRepository(repoData) { + console.log(`Globus repository at ${repoData.endpoint}`); + // Globus-specific logic... + return Result.ok({ + type: "globus", + endpoint: repoData.endpoint, + path: repoData.path, + }); +} + +function handleMetadataRepository(repoData) { + console.log(`Metadata-only repository: ${repoData.title}`); + // Metadata-specific logic... + return Result.ok({ + type: "metadata", + record_limit: repoData.capacity, + }); +} + +/** + * Example 4: Error handling with Result pattern + * Early returns emulate Rust's ? operator for error propagation + * @returns {Promise<{ok: boolean, value?: *, error?: *}>} Result of operations + * @see https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html#a-shortcut-for-propagating-errors-the--operator + */ +async function robustRepositoryOperation() { + // Chain operations with early return on error + const findResult = RepositoryOps.find("repo/test_repo"); + if (!findResult.ok) { + return findResult; // Propagate error - like ? in Rust + } + + const repository = findResult.value; + + const validateResult = RepositoryOps.validate(repository); + if (!validateResult.ok) { + return validateResult; // Propagate error + } + + const capacityResult = RepositoryOps.getCapacityInfo(repository); + if (!capacityResult.ok) { + return capacityResult; // Propagate error + } + + // All operations succeeded + return Result.ok({ + repository: repository, + capacity: capacityResult.value, + }); +} + +/** + * Example 5: Composition over inheritance + * Rust doesn't have inheritance - prefer composition of behaviors + * @type {object} + * @property {function(object, string): void} logAccess - Log repository access + * @property {function(object, number): {ok: boolean, error?: *, value?: *}} checkQuota - Check repository quota + * @property {function(object, object): Promise<{ok: boolean, error?: *, value?: *}>} allocateWithQuotaCheck - Allocate with quota check + * @see https://doc.rust-lang.org/book/ch17-03-oo-design-patterns.html + */ +const RepositoryBehaviors = { + // Shared behaviors as standalone functions + logAccess: (repository, userId) => { + console.log(`User ${userId} accessed repository ${repository.data._id}`); + }, + + checkQuota: (repository, requestedSize) => { + const capacityResult = RepositoryOps.getCapacityInfo(repository); + if (!capacityResult.ok) return capacityResult; + + const capacity = capacityResult.value; + if (capacity.available_capacity < requestedSize) { + return Result.err({ + code: 507, + message: "Insufficient storage capacity", + }); + } + return Result.ok(true); + }, + + // Type-specific behaviors composed from shared ones + allocateWithQuotaCheck: async (repository, params) => { + // Compose behaviors + const quotaResult = RepositoryBehaviors.checkQuota(repository, params.size); + if (!quotaResult.ok) return quotaResult; + + RepositoryBehaviors.logAccess(repository, params.requested_by); + + return RepositoryOps.createAllocation(repository, params); + }, +}; + +module.exports = { + createRepositoryExample, + useRepositoryOperations, + handleRepositoryByType, + robustRepositoryOperation, + RepositoryBehaviors, +}; diff --git a/core/database/foxx/api/repository/factory.js b/core/database/foxx/api/repository/factory.js new file mode 100644 index 000000000..4afb2c82b --- /dev/null +++ b/core/database/foxx/api/repository/factory.js @@ -0,0 +1,164 @@ +"use strict"; + +const { + RepositoryType, + Result, + createRepository, + createRepositoryData, + createGlobusConfig, +} = require("./types"); +const { validateGlobusConfig, validateMetadataConfig } = require("./validation"); +const globusRepo = require("./globus"); +const metadataRepo = require("./metadata"); +const g_lib = require("../support"); + +/** + * Repository factory using Rust-compatible patterns + * Uses switch/case for type-based polymorphism instead of inheritance + */ + +/** + * Create repository based on type (similar to Rust match expression) + * Rust's match expression provides exhaustive pattern matching + * JavaScript's switch is used here to emulate this pattern + * @param {object} config - Repository configuration object + * @param {string} config.id - Repository ID + * @param {string} config.type - Repository type (from RepositoryType enum) + * @param {string} config.title - Repository title + * @param {string} [config.desc] - Repository description + * @param {number} config.capacity - Storage capacity in bytes + * @param {string[]} config.admins - Array of admin user IDs + * @param {string} [config.endpoint] - Globus endpoint (required for GLOBUS type) + * @param {string} [config.path] - File path (required for GLOBUS type) + * @param {string} [config.pub_key] - Public SSH key (required for GLOBUS type) + * @param {string} [config.address] - Network address (required for GLOBUS type) + * @param {string} [config.exp_path] - Export path (optional for GLOBUS type) + * @param {string} [config.domain] - Domain name (required for GLOBUS type) + * @returns {{ok: boolean, error: *}|{ok: boolean, value: *}} Result object containing repository or error + * @see https://doc.rust-lang.org/book/ch06-02-match.html + */ +const createRepositoryByType = (config) => { + // Validate common fields + if (!config.id || !config.type || !config.title || !config.capacity || !config.admins) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: "Missing required repository fields", + }); + } + + /** + * Type-based creation using switch (Rust match pattern) + * Each case is like a match arm in Rust, handling a specific variant + * @see https://doc.rust-lang.org/book/ch18-03-pattern-syntax.html + */ + switch (config.type) { + case RepositoryType.GLOBUS: { + const validationResult = validateGlobusConfig(config); + if (!validationResult.ok) { + return validationResult; + } + + const globusConfig = createGlobusConfig({ + endpoint: config.endpoint, + path: config.path, + pub_key: config.pub_key, + address: config.address, + exp_path: config.exp_path, + domain: config.domain, + }); + + const repoData = createRepositoryData({ + id: config.id, + type: config.type, + title: config.title, + desc: config.desc, + capacity: config.capacity, + admins: config.admins, + typeSpecific: globusConfig, + }); + + return Result.ok(createRepository(RepositoryType.GLOBUS, repoData)); + } + + case RepositoryType.METADATA_ONLY: { + const validationResult = validateMetadataConfig(config); + if (!validationResult.ok) { + return validationResult; + } + + const repoData = createRepositoryData({ + id: config.id, + type: config.type, + title: config.title, + desc: config.desc, + capacity: config.capacity, + admins: config.admins, + }); + + return Result.ok(createRepository(RepositoryType.METADATA_ONLY, repoData)); + } + + default: + /** + * In Rust, match must be exhaustive - all cases must be handled + * The default case ensures we handle unknown variants + * @see https://doc.rust-lang.org/book/ch06-02-match.html#matching-with-option-t + */ + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `Unknown repository type: ${config.type}`, + }); + } +}; + +/** + * Get repository implementation based on type + * This emulates Rust's trait object dynamic dispatch + * @param {string} repositoryType - Repository type from RepositoryType enum + * @returns {object|null} Repository implementation object or null if not found + * @see https://doc.rust-lang.org/book/ch17-02-trait-objects.html + */ +const getRepositoryImplementation = (repositoryType) => { + switch (repositoryType) { + case RepositoryType.GLOBUS: + return globusRepo; + case RepositoryType.METADATA_ONLY: + return metadataRepo; + default: + return null; + } +}; + +/** + * Execute operation on repository using dynamic dispatch + * This pattern emulates Rust's trait method dispatch + * @param {object} repository - Repository object with type and data fields + * @param {string} operation - Operation name to execute + * @param {...*} args - Additional arguments to pass to the operation + * @returns {{ok: boolean, error: *}|*} Result of the operation + * @see https://doc.rust-lang.org/book/ch17-02-trait-objects.html#trait-objects-perform-dynamic-dispatch + */ +const executeRepositoryOperation = (repository, operation, ...args) => { + const impl = getRepositoryImplementation(repository.type); + if (!impl) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `No implementation for repository type: ${repository.type}`, + }); + } + + if (typeof impl[operation] !== "function") { + return Result.err({ + code: g_lib.ERR_NOT_IMPLEMENTED, + message: `Operation '${operation}' not implemented for type: ${repository.type}`, + }); + } + + return impl[operation](repository.data, ...args); +}; + +module.exports = { + createRepositoryByType, + getRepositoryImplementation, + executeRepositoryOperation, +}; diff --git a/core/database/foxx/api/repository/globus.js b/core/database/foxx/api/repository/globus.js new file mode 100644 index 000000000..9e716a930 --- /dev/null +++ b/core/database/foxx/api/repository/globus.js @@ -0,0 +1,131 @@ +"use strict"; + +const { Result, ExecutionMethod, createAllocationResult } = require("./types"); +const { validateAllocationParams } = require("./validation"); +const g_tasks = require("../tasks"); +const g_lib = require("../support"); + +/** + * @module globus + * Globus repository implementation + * Implements repository operations specific to Globus-backed repositories + */ + +/** + * This module acts like a trait implementation for the Globus repository type + * Each function implements a trait method for this specific type + * @see https://doc.rust-lang.org/book/ch10-02-traits.html#implementing-a-trait-on-a-type + */ + +// Validate Globus repository (already validated in factory) +const validate = (repoData) => { + return Result.ok(true); +}; + +// Create allocation in Globus repository (async via task) +const createAllocation = (repoData, params) => { + // Validate allocation parameters + const validationResult = validateAllocationParams(params); + if (!validationResult.ok) { + return validationResult; + } + + try { + // Create task for async Globus allocation + const task = g_tasks.repoAllocationCreateTask({ + repo_id: repoData._id, + subject: params.subject, + size: params.size, + path: params.path || null, + metadata: params.metadata || {}, + }); + + return Result.ok( + createAllocationResult(ExecutionMethod.TASK, { + task_id: task.task_id, + status: task.status, + queue_time: task.queue_time, + }), + ); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to create allocation task: ${e.message}`, + }); + } +}; + +// Delete allocation from Globus repository (async via task) +const deleteAllocation = (repoData, subjectId) => { + if (!subjectId || typeof subjectId !== "string") { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: "Subject ID is required for allocation deletion", + }); + } + + try { + // Create task for async Globus allocation deletion + const task = g_tasks.repoAllocationDeleteTask({ + repo_id: repoData._id, + subject: subjectId, + }); + + return Result.ok( + createAllocationResult(ExecutionMethod.TASK, { + task_id: task.task_id, + status: task.status, + queue_time: task.queue_time, + }), + ); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to create deletion task: ${e.message}`, + }); + } +}; + +// Globus repositories support data operations +const supportsDataOperations = (repoData) => { + return Result.ok(true); +}; + +// Get capacity information for Globus repository +const getCapacityInfo = (repoData) => { + try { + // For Globus repos, we'd typically query the actual filesystem + // For now, return the configured capacity + return Result.ok({ + total_capacity: repoData.capacity, + used_capacity: 0, // Would be populated from actual usage + available_capacity: repoData.capacity, + supports_quotas: true, + }); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to get capacity info: ${e.message}`, + }); + } +}; + +/** + * Export all operations (trait implementation) + * These exports define the trait implementation for Globus repository type + * allowing polymorphic behavior through dynamic dispatch + * @type {object} + * @property {function(object): {ok: boolean, value: boolean}} validate - Validate Globus repository + * @property {function(object, object): {ok: boolean, error?: *, value?: *}} createAllocation - Create allocation in Globus repository + * @property {function(object, string): {ok: boolean, error?: *, value?: *}} deleteAllocation - Delete allocation from Globus repository + * @property {function(object): {ok: boolean, value: boolean}} supportsDataOperations - Check if supports data operations + * @property {function(object): {ok: boolean, error?: *, value?: *}} getCapacityInfo - Get capacity information + * @see https://doc.rust-lang.org/book/ch17-02-trait-objects.html + */ +module.exports = { + validate, + createAllocation, + deleteAllocation, + supportsDataOperations, + getCapacityInfo, +}; diff --git a/core/database/foxx/api/repository/metadata.js b/core/database/foxx/api/repository/metadata.js new file mode 100644 index 000000000..2bf21c555 --- /dev/null +++ b/core/database/foxx/api/repository/metadata.js @@ -0,0 +1,136 @@ +"use strict"; + +const { Result, ExecutionMethod, createAllocationResult } = require("./types"); +const { validateAllocationParams } = require("./validation"); +const g_lib = require("../support"); + +/** + * @module metadata + * @description Metadata-only repository implementation + * Implements repository operations for repositories that only store metadata without actual data storage backend + */ + +/** + * This module provides a different trait implementation for metadata repositories + * demonstrating how the same trait can have different implementations per type + * @see https://doc.rust-lang.org/book/ch10-02-traits.html#implementing-a-trait-on-a-type + */ + +// Validate metadata repository (already validated in factory) +const validate = (repoData) => { + return Result.ok(true); +}; + +// Create allocation in metadata repository (direct/synchronous) +const createAllocation = (repoData, params) => { + // Validate allocation parameters + const validationResult = validateAllocationParams(params); + if (!validationResult.ok) { + return validationResult; + } + + try { + // For metadata-only repos, allocations are just database records + // No actual storage allocation happens + const allocation = { + _key: `alloc_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, + repo_id: repoData._id, + subject: params.subject, + size: params.size, + path: params.path || `/${params.subject}`, + metadata: params.metadata || {}, + created: new Date().toISOString(), + type: "metadata_only", + }; + + // Save to allocations collection (would need to be created) + // For now, return success with the allocation data + const result = { + allocation_id: allocation._key, + repo_id: allocation.repo_id, + subject: allocation.subject, + size: allocation.size, + path: allocation.path, + status: "completed", + }; + + return Result.ok(createAllocationResult(ExecutionMethod.DIRECT, result)); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to create metadata allocation: ${e.message}`, + }); + } +}; + +// Delete allocation from metadata repository (direct/synchronous) +const deleteAllocation = (repoData, subjectId) => { + if (!subjectId || typeof subjectId !== "string") { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: "Subject ID is required for allocation deletion", + }); + } + + try { + // For metadata-only repos, just remove the database record + // No actual storage deallocation needed + const result = { + repo_id: repoData._id, + subject: subjectId, + status: "completed", + message: "Metadata allocation removed", + }; + + return Result.ok(createAllocationResult(ExecutionMethod.DIRECT, result)); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to delete metadata allocation: ${e.message}`, + }); + } +}; + +// Metadata repositories do NOT support data operations +const supportsDataOperations = (repoData) => { + return Result.ok(false); +}; + +// Get capacity information for metadata repository +const getCapacityInfo = (repoData) => { + try { + // Metadata repos have logical capacity limits, not physical + return Result.ok({ + total_capacity: repoData.capacity, + used_capacity: 0, // Would track metadata record count/size + available_capacity: repoData.capacity, + supports_quotas: false, + is_metadata_only: true, + }); + } catch (e) { + return Result.err({ + code: g_lib.ERR_INTERNAL_FAULT, + message: `Failed to get capacity info: ${e.message}`, + }); + } +}; + +/** + * Export all operations (trait implementation) + * These exports define the trait implementation for metadata repository type + * Note how the same interface has different behavior than Globus implementation + * @type {object} + * @property {function(object): {ok: boolean, value: boolean}} validate - Validate metadata repository + * @property {function(object, object): {ok: boolean, error?: *, value?: *}} createAllocation - Create allocation in metadata repository + * @property {function(object, string): {ok: boolean, error?: *, value?: *}} deleteAllocation - Delete allocation from metadata repository + * @property {function(object): {ok: boolean, value: boolean}} supportsDataOperations - Check if supports data operations + * @property {function(object): {ok: boolean, error?: *, value?: *}} getCapacityInfo - Get capacity information + * @see https://doc.rust-lang.org/book/ch17-02-trait-objects.html + */ +module.exports = { + validate, + createAllocation, + deleteAllocation, + supportsDataOperations, + getCapacityInfo, +}; diff --git a/core/database/foxx/api/repository/operations.js b/core/database/foxx/api/repository/operations.js new file mode 100644 index 000000000..217a46914 --- /dev/null +++ b/core/database/foxx/api/repository/operations.js @@ -0,0 +1,158 @@ +"use strict"; + +const { Result } = require("./types"); +const { executeRepositoryOperation } = require("./factory"); +const g_db = require("@arangodb").db; + +/** + * Trait-like repository operations following Rust patterns + * All operations take repository as first parameter (like Rust &self) + * Operations return Result types for error handling + */ + +/** + * Repository operations following Rust trait patterns + * @type {object} + * @property {function(object): {ok: boolean, error?: *, value?: *}} validate - Validate repository configuration + * @property {function(object, object): {ok: boolean, error?: *, value?: *}} createAllocation - Create allocation for repository + * @property {function(object, string): {ok: boolean, error?: *, value?: *}} deleteAllocation - Delete allocation from repository + * @property {function(object): {ok: boolean, error?: *, value?: *}} supportsDataOperations - Check if repository supports data operations + * @property {function(object): {ok: boolean, error?: *, value?: *}} getCapacityInfo - Get repository capacity information + * @property {function(object): {ok: boolean, error?: *, value?: *}} save - Save repository to database + * @property {function(object, object): {ok: boolean, error?: *, value?: *}} update - Update repository in database + * @property {function(string): {ok: boolean, error?: *, value?: *}} find - Find repository by ID + * @property {function(object=): {ok: boolean, error?: *, value?: *}} list - List repositories with optional filter + * @property {function(object, string, string): {ok: boolean, value: boolean}} checkPermission - Check repository permissions + * @see https://doc.rust-lang.org/book/ch10-02-traits.html + * @description Traits define shared behavior in an abstract way + * @see https://doc.rust-lang.org/book/ch05-03-method-syntax.html + * @description The first parameter acts like &self in Rust methods + */ +const RepositoryOps = { + // Validate repository configuration + validate: (repository) => { + return executeRepositoryOperation(repository, "validate"); + }, + + // Create allocation for repository + createAllocation: (repository, allocationParams) => { + return executeRepositoryOperation(repository, "createAllocation", allocationParams); + }, + + // Delete allocation from repository + deleteAllocation: (repository, subjectId) => { + return executeRepositoryOperation(repository, "deleteAllocation", subjectId); + }, + + // Check if repository supports data operations + supportsDataOperations: (repository) => { + return executeRepositoryOperation(repository, "supportsDataOperations"); + }, + + // Get repository capacity information + getCapacityInfo: (repository) => { + return executeRepositoryOperation(repository, "getCapacityInfo"); + }, + + // Save repository to database + save: (repository) => { + try { + const saved = g_db.repo.save(repository.data, { returnNew: true }); + return Result.ok(saved.new); + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to save repository", + }); + } + }, + + // Update repository in database + update: (repository, updates) => { + try { + const updated = g_db.repo.update(repository.data._key, updates, { returnNew: true }); + return Result.ok(updated.new); + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to update repository", + }); + } + }, + + /** + * Find repository by ID + * This is an associated function (doesn't take self) + * @param {string} repoId - Repository ID (with or without "repo/" prefix) + * @returns {{ok: boolean, error?: *, value?: *}} Result containing repository or error + * @see https://doc.rust-lang.org/book/ch05-03-method-syntax.html#associated-functions + */ + find: (repoId) => { + try { + const key = repoId.startsWith("repo/") ? repoId.slice(5) : repoId; + const repo = g_db.repo.document(key); + + // Return as tagged union based on type + return Result.ok({ + type: repo.type, + data: repo, + }); + } catch (e) { + if (e.errorNum === 1202) { + // Document not found + return Result.err({ + code: 404, + message: `Repository not found: ${repoId}`, + }); + } + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to find repository", + }); + } + }, + + // List repositories with optional filter + list: (filter = {}) => { + try { + let query = "FOR r IN repo"; + const bindVars = {}; + + if (filter.type) { + query += " FILTER r.type == @type"; + bindVars.type = filter.type; + } + + if (filter.admin) { + query += " FILTER @admin IN r.admins"; + bindVars.admin = filter.admin; + } + + query += " RETURN r"; + + const results = g_db._query(query, bindVars).toArray(); + return Result.ok( + results.map((repo) => ({ + type: repo.type, + data: repo, + })), + ); + } catch (e) { + return Result.err({ + code: e.errorNum || 500, + message: e.errorMessage || "Failed to list repositories", + }); + } + }, + + // Check repository permissions + checkPermission: (repository, userId, permission) => { + // Simple admin check for now - can be extended + if (repository.data.admins && repository.data.admins.includes(userId)) { + return Result.ok(true); + } + return Result.ok(false); + }, +}; + +module.exports = { RepositoryOps }; diff --git a/core/database/foxx/api/repository/types.js b/core/database/foxx/api/repository/types.js new file mode 100644 index 000000000..ad42ba67f --- /dev/null +++ b/core/database/foxx/api/repository/types.js @@ -0,0 +1,129 @@ +"use strict"; + +/** + * Repository type system using Rust-compatible patterns + * This module defines types as enum-like constants and data structures + * following composition over inheritance principles + */ + +/** + * Repository type enum (similar to Rust enum) + * In Rust, enums are used to define a type that can be one of several variants + * @type {Readonly<{GLOBUS: string, METADATA_ONLY: string}>} + * @see https://doc.rust-lang.org/book/ch06-01-defining-an-enum.html + */ +const RepositoryType = Object.freeze({ + GLOBUS: "globus", + METADATA_ONLY: "metadata_only", +}); + +/** + * Result type for Rust-like error handling + * Rust's Result type is used for recoverable errors + * This pattern makes error handling explicit and composable + * @type {{ok: (function(*): {ok: boolean, value: *}), err: (function(*): {ok: boolean, error: *})}} + * @see https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html + */ +const Result = { + ok: (value) => ({ ok: true, value }), + err: (error) => ({ ok: false, error }), +}; + +/** + * Repository structure using composition + * Rust favors composition over inheritance - structs contain data, traits define behavior + * @param {object} config - Configuration object + * @param {string} config.id - Repository ID + * @param {string} config.type - Repository type (globus or metadata_only) + * @param {string} config.title - Repository title + * @param {string} [config.desc] - Repository description + * @param {number} config.capacity - Storage capacity in bytes + * @param {string[]} config.admins - Array of admin user IDs + * @param {object} [config.typeSpecific={}] - Type-specific configuration fields + * @returns {{_key: string, _id: string, type: string, title: string, desc: string, capacity: number, admins: string[]}} Repository data object with ArangoDB fields + * @see https://doc.rust-lang.org/book/ch05-01-defining-structs.html + */ +const createRepositoryData = ({ + id, + type, + title, + desc, + capacity, + admins, + // Type-specific fields handled through composition + typeSpecific = {}, +}) => ({ + _key: id, + _id: `repo/${id}`, + type, + title, + desc, + capacity, + admins, + ...typeSpecific, +}); + +/** + * Globus-specific configuration + * @param {object} config - Globus configuration object + * @param {string} config.endpoint - Globus endpoint identifier + * @param {string} config.path - Repository path on filesystem + * @param {string} config.pub_key - Public SSH key for authentication + * @param {string} config.address - Network address + * @param {string} [config.exp_path] - Export path + * @param {string} config.domain - Domain name + * @returns {{endpoint: string, path: string, pub_key: string, address: string, exp_path: string, domain: string}} Globus configuration object + */ +const createGlobusConfig = ({ endpoint, path, pub_key, address, exp_path, domain }) => ({ + endpoint, + path, + pub_key, + address, + exp_path, + domain, +}); + +/** + * Tagged union for repositories (type + data) + * Rust enums can contain data, creating tagged unions (also called algebraic data types) + * This pattern enables type-safe polymorphism without inheritance + * @param {string} type - Repository type (from RepositoryType enum) + * @param {object} data - Repository data object + * @returns {{type: string, data: object}} Tagged union with type and data fields + * @see https://doc.rust-lang.org/book/ch06-01-defining-an-enum.html#enum-values + */ +const createRepository = (type, data) => ({ + type, + data, +}); + +/** + * Allocation execution methods + * Another enum-like constant representing different execution strategies + * @type {Readonly<{TASK: string, DIRECT: string}>} + */ +const ExecutionMethod = Object.freeze({ + TASK: "task", + DIRECT: "direct", +}); + +/** + * Allocation result structure + * @param {string} method - Execution method (TASK or DIRECT) + * @param {object} payload - Result payload (task info or direct result) + * @returns {{execution_method: string, task?: object, result?: object}} Allocation result with execution method and appropriate payload + */ +const createAllocationResult = (method, payload) => ({ + execution_method: method, + ...(method === ExecutionMethod.TASK ? { task: payload } : { result: payload }), +}); + +module.exports = { + RepositoryType, + Result, + ExecutionMethod, + createRepositoryData, + createGlobusConfig, + createRepository, + createAllocationResult, +}; diff --git a/core/database/foxx/api/repository/validation.js b/core/database/foxx/api/repository/validation.js new file mode 100644 index 000000000..69b85a5ad --- /dev/null +++ b/core/database/foxx/api/repository/validation.js @@ -0,0 +1,226 @@ +"use strict"; + +const { Result } = require("./types"); +const g_lib = require("../support"); + +/** + * Standalone validation functions following Rust patterns + * Pure functions that return Result types for error handling + * + * See: https://doc.rust-lang.org/book/ch03-03-how-functions-work.html + * Functions in Rust are expressions that can return values + * + * See: https://doc.rust-lang.org/book/ch09-00-error-handling.html + * Rust emphasizes explicit error handling through Result types + */ + +// Validate that a value is a non-empty string +// Reusable helper following DRY principle +const validateNonEmptyString = (value, fieldName) => { + if (!value || typeof value !== "string" || value.trim() === "") { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `${fieldName} is required and must be a non-empty string`, + }); + } + return Result.ok(true); +}; + +// Validate common repository fields +// Pure function - no side effects, deterministic output +const validateCommonFields = (config) => { + const errors = []; + + const idValidation = validateNonEmptyString(config.id, "Repository ID"); + if (!idValidation.ok) { + errors.push(idValidation.error.message); + } + + const titleValidation = validateNonEmptyString(config.title, "Repository title"); + if (!titleValidation.ok) { + errors.push(titleValidation.error.message); + } + + if (typeof config.capacity !== "number" || config.capacity <= 0) { + errors.push("Repository capacity must be a positive number"); + } + + if (!Array.isArray(config.admins) || config.admins.length === 0) { + errors.push("Repository must have at least one admin"); + } + + if (errors.length > 0) { + // See: https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html#propagating-errors + // Early return with error - similar to Rust's ? operator + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: errors.join("; "), + }); + } + + return Result.ok(true); +}; + +// Validate POSIX path format +const validatePOSIXPath = (path, fieldName) => { + if (!path || typeof path !== "string") { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `${fieldName} must be a non-empty string`, + }); + } + + if (!path.startsWith("/")) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `${fieldName} must be an absolute path (start with '/')`, + }); + } + + // Check for invalid characters in path + if (path.includes("..") || path.includes("//")) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `${fieldName} contains invalid path sequences`, + }); + } + + return Result.ok(true); +}; + +// Validate repository path ends with ID +const validateRepositoryPath = (path, repoId) => { + const pathResult = validatePOSIXPath(path, "Repository path"); + if (!pathResult.ok) { + return pathResult; + } + + // Ensure path ends with / + const normalizedPath = path.endsWith("/") ? path : path + "/"; + + // Extract last component + const idx = normalizedPath.lastIndexOf("/", normalizedPath.length - 2); + const lastComponent = normalizedPath.slice(idx + 1, normalizedPath.length - 1); + + if (lastComponent !== repoId) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `Repository path must end with repository ID (${repoId})`, + }); + } + + return Result.ok(true); +}; + +// Validate Globus-specific configuration +const validateGlobusConfig = (config) => { + const commonResult = validateCommonFields(config); + if (!commonResult.ok) { + return commonResult; + } + + const errors = []; + + // Validate required Globus fields + const pubKeyValidation = validateNonEmptyString(config.pub_key, "Public key"); + if (!pubKeyValidation.ok) { + errors.push(pubKeyValidation.error.message); + } + + const addressValidation = validateNonEmptyString(config.address, "Address"); + if (!addressValidation.ok) { + errors.push(addressValidation.error.message); + } + + const endpointValidation = validateNonEmptyString(config.endpoint, "Endpoint"); + if (!endpointValidation.ok) { + errors.push(endpointValidation.error.message); + } + + const domainValidation = validateNonEmptyString(config.domain, "Domain"); + if (!domainValidation.ok) { + errors.push(domainValidation.error.message); + } + + if (errors.length > 0) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: errors.join("; "), + }); + } + + // Validate repository path + const pathResult = validateRepositoryPath(config.path, config.id); + if (!pathResult.ok) { + return pathResult; + } + + // Validate export path if provided + if (config.exp_path) { + const expPathResult = validatePOSIXPath(config.exp_path, "Export path"); + if (!expPathResult.ok) { + return expPathResult; + } + } + + return Result.ok(true); +}; + +// Validate metadata-only repository configuration +const validateMetadataConfig = (config) => { + const commonResult = validateCommonFields(config); + if (!commonResult.ok) { + return commonResult; + } + + // Metadata repositories don't need Globus-specific fields + // But should not have them either + const invalidFields = ["pub_key", "address", "endpoint", "path", "exp_path", "domain"]; + const presentInvalidFields = invalidFields.filter((field) => config[field] !== undefined); + + if (presentInvalidFields.length > 0) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: `Metadata-only repositories should not have: ${presentInvalidFields.join(", ")}`, + }); + } + + return Result.ok(true); +}; + +// Validate allocation parameters +const validateAllocationParams = (params) => { + const errors = []; + + const subjectValidation = validateNonEmptyString(params.subject, "Allocation subject"); + if (!subjectValidation.ok) { + errors.push(subjectValidation.error.message); + } + + if (typeof params.size !== "number" || params.size <= 0) { + errors.push("Allocation size must be a positive number"); + } + + if (params.path && typeof params.path !== "string") { + errors.push("Allocation path must be a string if provided"); + } + + if (errors.length > 0) { + return Result.err({ + code: g_lib.ERR_INVALID_PARAM, + message: errors.join("; "), + }); + } + + return Result.ok(true); +}; + +module.exports = { + validateNonEmptyString, + validateCommonFields, + validatePOSIXPath, + validateRepositoryPath, + validateGlobusConfig, + validateMetadataConfig, + validateAllocationParams, +}; diff --git a/core/database/foxx/tests/repository_types.test.js b/core/database/foxx/tests/repository_types.test.js new file mode 100644 index 000000000..65c02016a --- /dev/null +++ b/core/database/foxx/tests/repository_types.test.js @@ -0,0 +1,824 @@ +"use strict"; + +const chai = require("chai"); +const { expect } = chai; +const g_db = require("@arangodb").db; +const g_lib = require("../api/support"); + +// Import all modules to test +const { + RepositoryType, + Result, + ExecutionMethod, + createRepositoryData, + createGlobusConfig, + createRepository, + createAllocationResult, +} = require("../api/repository/types"); + +const { + validateNonEmptyString, + validateCommonFields, + validatePOSIXPath, + validateRepositoryPath, + validateGlobusConfig, + validateMetadataConfig, + validateAllocationParams, +} = require("../api/repository/validation"); + +const { + createRepositoryByType, + getRepositoryImplementation, + executeRepositoryOperation, +} = require("../api/repository/factory"); + +const { RepositoryOps } = require("../api/repository/operations"); +const globusImpl = require("../api/repository/globus"); +const metadataImpl = require("../api/repository/metadata"); + +describe("Repository Type System Tests", () => { + // Clean up database before each test + beforeEach(() => { + if (g_db._collection("repo")) g_db.repo.truncate(); + if (g_db._collection("alloc")) g_db.alloc.truncate(); + if (g_db._collection("task")) g_db.task.truncate(); + if (g_db._collection("test_allocations")) { + g_db._drop("test_allocations"); + } + }); + + describe("Types Module", () => { + it("unit_types: RepositoryType enum should have correct values", () => { + expect(RepositoryType.GLOBUS).to.equal("globus"); + expect(RepositoryType.METADATA_ONLY).to.equal("metadata_only"); + expect(Object.keys(RepositoryType).length).to.equal(2); + }); + + it("unit_types: RepositoryType enum should be immutable", () => { + expect(() => { + RepositoryType.NEW_TYPE = "new"; + }).to.throw(); + expect(() => { + RepositoryType.GLOBUS = "modified"; + }).to.throw(); + }); + + it("unit_types: Result type should create ok results", () => { + const result = Result.ok("success"); + expect(result.ok).to.be.true; + expect(result.value).to.equal("success"); + expect(result.error).to.be.undefined; + }); + + it("unit_types: Result type should create error results", () => { + const error = { code: 404, message: "Not found" }; + const result = Result.err(error); + expect(result.ok).to.be.false; + expect(result.error).to.deep.equal(error); + expect(result.value).to.be.undefined; + }); + + it("unit_types: ExecutionMethod enum should have correct values", () => { + expect(ExecutionMethod.TASK).to.equal("task"); + expect(ExecutionMethod.DIRECT).to.equal("direct"); + }); + + it("unit_types: createRepositoryData should create proper structure", () => { + const data = createRepositoryData({ + id: "test_repo", + type: RepositoryType.GLOBUS, + title: "Test Repository", + desc: "Description", + capacity: 1000000, + admins: ["u/admin1"], + typeSpecific: { endpoint: "ep1" }, + }); + + expect(data._key).to.equal("test_repo"); + expect(data._id).to.equal("repo/test_repo"); + expect(data.type).to.equal("globus"); + expect(data.title).to.equal("Test Repository"); + expect(data.capacity).to.equal(1000000); + expect(data.admins).to.deep.equal(["u/admin1"]); + expect(data.endpoint).to.equal("ep1"); + }); + + it("unit_types: createGlobusConfig should create proper config", () => { + const config = createGlobusConfig({ + endpoint: "ep123", + path: "/data/repo", + pub_key: "ssh-rsa...", + address: "server.org", + exp_path: "/export", + domain: "org", + }); + + expect(config.endpoint).to.equal("ep123"); + expect(config.path).to.equal("/data/repo"); + expect(config.pub_key).to.equal("ssh-rsa..."); + expect(config.address).to.equal("server.org"); + expect(config.exp_path).to.equal("/export"); + expect(config.domain).to.equal("org"); + }); + + it("unit_types: createRepository should create tagged union", () => { + const data = { _id: "repo/test", title: "Test" }; + const repo = createRepository(RepositoryType.GLOBUS, data); + + expect(repo.type).to.equal("globus"); + expect(repo.data).to.deep.equal(data); + }); + + it("unit_types: createAllocationResult should handle task method", () => { + const taskPayload = { task_id: "123", status: "pending" }; + const result = createAllocationResult(ExecutionMethod.TASK, taskPayload); + + expect(result.execution_method).to.equal("task"); + expect(result.task).to.deep.equal(taskPayload); + expect(result.result).to.be.undefined; + }); + + it("unit_types: createAllocationResult should handle direct method", () => { + const directPayload = { allocation_id: "456", status: "completed" }; + const result = createAllocationResult(ExecutionMethod.DIRECT, directPayload); + + expect(result.execution_method).to.equal("direct"); + expect(result.result).to.deep.equal(directPayload); + expect(result.task).to.be.undefined; + }); + }); + + describe("Validation Module", () => { + it("unit_validation: validateNonEmptyString should accept valid strings", () => { + const result = validateNonEmptyString("valid string", "Test field"); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + + it("unit_validation: validateNonEmptyString should reject empty strings", () => { + const result = validateNonEmptyString("", "Test field"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Test field is required"); + }); + + it("unit_validation: validateNonEmptyString should reject null/undefined", () => { + let result = validateNonEmptyString(null, "Test field"); + expect(result.ok).to.be.false; + + result = validateNonEmptyString(undefined, "Test field"); + expect(result.ok).to.be.false; + }); + + it("unit_validation: validateNonEmptyString should reject whitespace-only strings", () => { + const result = validateNonEmptyString(" ", "Test field"); + expect(result.ok).to.be.false; + }); + + it("unit_validation: validateCommonFields should accept valid config", () => { + const config = { + id: "test_repo", + title: "Test Repository", + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = validateCommonFields(config); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validateCommonFields should reject missing fields", () => { + const config = { + id: "test_repo", + // missing title + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = validateCommonFields(config); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Repository title"); + }); + + it("unit_validation: validateCommonFields should reject invalid capacity", () => { + const config = { + id: "test_repo", + title: "Test Repository", + capacity: -100, // negative + admins: ["u/admin1"], + }; + const result = validateCommonFields(config); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("positive number"); + }); + + it("unit_validation: validateCommonFields should reject empty admins", () => { + const config = { + id: "test_repo", + title: "Test Repository", + capacity: 1000000, + admins: [], // empty array + }; + const result = validateCommonFields(config); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("at least one admin"); + }); + + it("unit_validation: validatePOSIXPath should accept valid paths", () => { + const result = validatePOSIXPath("/data/repo", "Test path"); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validatePOSIXPath should reject relative paths", () => { + const result = validatePOSIXPath("data/repo", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("absolute path"); + }); + + it("unit_validation: validatePOSIXPath should reject paths with ..", () => { + const result = validatePOSIXPath("/data/../repo", "Test path"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("invalid path sequences"); + }); + + it("unit_validation: validateRepositoryPath should validate path ends with ID", () => { + const result = validateRepositoryPath("/data/repos/myrepo", "myrepo"); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validateRepositoryPath should reject path not ending with ID", () => { + const result = validateRepositoryPath("/data/repos/other", "myrepo"); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("must end with repository ID"); + }); + + it("unit_validation: validateGlobusConfig should accept complete config", () => { + const config = { + id: "test_repo", + title: "Test Repository", + capacity: 1000000, + admins: ["u/admin1"], + pub_key: "ssh-rsa...", + address: "server.org", + endpoint: "ep123", + path: "/data/test_repo", + domain: "org", + }; + const result = validateGlobusConfig(config); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validateGlobusConfig should reject missing Globus fields", () => { + const config = { + id: "test_repo", + title: "Test Repository", + capacity: 1000000, + admins: ["u/admin1"], + // missing Globus-specific fields + }; + const result = validateGlobusConfig(config); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Public key"); + }); + + it("unit_validation: validateMetadataConfig should accept metadata-only config", () => { + const config = { + id: "meta_repo", + title: "Metadata Repository", + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = validateMetadataConfig(config); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validateMetadataConfig should reject Globus fields", () => { + const config = { + id: "meta_repo", + title: "Metadata Repository", + capacity: 1000000, + admins: ["u/admin1"], + pub_key: "should not be here", + }; + const result = validateMetadataConfig(config); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("should not have"); + }); + + it("unit_validation: validateAllocationParams should accept valid params", () => { + const params = { + subject: "d/dataset1", + size: 1000000, + path: "/data/alloc1", + }; + const result = validateAllocationParams(params); + expect(result.ok).to.be.true; + }); + + it("unit_validation: validateAllocationParams should reject invalid size", () => { + const params = { + subject: "d/dataset1", + size: 0, // invalid + }; + const result = validateAllocationParams(params); + expect(result.ok).to.be.false; + expect(result.error.message).to.include("positive number"); + }); + }); + + describe("Factory Module", () => { + it("unit_factory: createRepositoryByType should create GLOBUS repository", () => { + const config = { + id: "globus_repo", + type: RepositoryType.GLOBUS, + title: "Globus Repository", + capacity: 1000000, + admins: ["u/admin1"], + pub_key: "ssh-rsa...", + address: "server.org", + endpoint: "ep123", + path: "/data/globus_repo", + domain: "org", + }; + const result = createRepositoryByType(config); + + expect(result.ok).to.be.true; + expect(result.value.type).to.equal(RepositoryType.GLOBUS); + expect(result.value.data.type).to.equal(RepositoryType.GLOBUS); + expect(result.value.data._id).to.equal("repo/globus_repo"); + }); + + it("unit_factory: createRepositoryByType should create METADATA_ONLY repository", () => { + const config = { + id: "meta_repo", + type: RepositoryType.METADATA_ONLY, + title: "Metadata Repository", + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = createRepositoryByType(config); + + expect(result.ok).to.be.true; + expect(result.value.type).to.equal(RepositoryType.METADATA_ONLY); + expect(result.value.data.type).to.equal(RepositoryType.METADATA_ONLY); + }); + + it("unit_factory: createRepositoryByType should reject unknown type", () => { + const config = { + id: "unknown_repo", + type: "unknown_type", + title: "Unknown Repository", + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = createRepositoryByType(config); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Unknown repository type"); + }); + + it("unit_factory: createRepositoryByType should reject missing required fields", () => { + const config = { + // missing id + type: RepositoryType.GLOBUS, + title: "Test Repository", + capacity: 1000000, + admins: ["u/admin1"], + }; + const result = createRepositoryByType(config); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Missing required"); + }); + + it("unit_factory: getRepositoryImplementation should return correct implementation", () => { + const globusImpl = getRepositoryImplementation(RepositoryType.GLOBUS); + expect(globusImpl).to.not.be.null; + expect(typeof globusImpl.validate).to.equal("function"); + + const metadataImpl = getRepositoryImplementation(RepositoryType.METADATA_ONLY); + expect(metadataImpl).to.not.be.null; + expect(typeof metadataImpl.validate).to.equal("function"); + }); + + it("unit_factory: getRepositoryImplementation should return null for unknown type", () => { + const impl = getRepositoryImplementation("unknown_type"); + expect(impl).to.be.null; + }); + + it("unit_factory: executeRepositoryOperation should dispatch to correct implementation", () => { + const repository = { + type: RepositoryType.METADATA_ONLY, + data: { _id: "repo/test" }, + }; + const result = executeRepositoryOperation(repository, "supportsDataOperations"); + + expect(result.ok).to.be.true; + expect(result.value).to.be.false; // metadata repos don't support data ops + }); + + it("unit_factory: executeRepositoryOperation should handle unknown operation", () => { + const repository = { + type: RepositoryType.GLOBUS, + data: { _id: "repo/test" }, + }; + const result = executeRepositoryOperation(repository, "unknownOperation"); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include("not implemented"); + }); + }); + + describe("Operations Module", () => { + beforeEach(() => { + // Create test repository in database + g_db.repo.save({ + _key: "test_repo", + _id: "repo/test_repo", + type: RepositoryType.GLOBUS, + title: "Test Repository", + capacity: 1000000, + admins: ["u/admin1"], + endpoint: "ep123", + path: "/data/test_repo", + }); + }); + + it("unit_operations: find should return existing repository", () => { + const result = RepositoryOps.find("repo/test_repo"); + + expect(result.ok).to.be.true; + expect(result.value.type).to.equal(RepositoryType.GLOBUS); + expect(result.value.data._id).to.equal("repo/test_repo"); + }); + + it("unit_operations: find should handle non-existent repository", () => { + const result = RepositoryOps.find("repo/nonexistent"); + + expect(result.ok).to.be.false; + expect(result.error.code).to.equal(404); + expect(result.error.message).to.include("not found"); + }); + + it("unit_operations: find should handle key without prefix", () => { + const result = RepositoryOps.find("test_repo"); + + expect(result.ok).to.be.true; + expect(result.value.data._id).to.equal("repo/test_repo"); + }); + + it("unit_operations: list should return all repositories", () => { + // Add another repository + g_db.repo.save({ + _key: "another_repo", + type: RepositoryType.METADATA_ONLY, + title: "Another Repository", + capacity: 1000, + admins: ["u/admin2"], + }); + + const result = RepositoryOps.list(); + + expect(result.ok).to.be.true; + expect(result.value).to.have.length(2); + expect(result.value[0].type).to.be.oneOf(["globus", "metadata_only"]); + }); + + it("unit_operations: list should filter by type", () => { + // Add metadata repository + g_db.repo.save({ + _key: "meta_repo", + type: RepositoryType.METADATA_ONLY, + title: "Metadata Repository", + capacity: 1000, + admins: ["u/admin1"], + }); + + const result = RepositoryOps.list({ type: RepositoryType.METADATA_ONLY }); + + expect(result.ok).to.be.true; + expect(result.value).to.have.length(1); + expect(result.value[0].type).to.equal(RepositoryType.METADATA_ONLY); + }); + + it("unit_operations: list should filter by admin", () => { + // Add repository with different admin + g_db.repo.save({ + _key: "other_admin_repo", + type: RepositoryType.GLOBUS, + title: "Other Admin Repository", + capacity: 1000, + admins: ["u/admin2"], + }); + + const result = RepositoryOps.list({ admin: "u/admin1" }); + + expect(result.ok).to.be.true; + expect(result.value).to.have.length(1); + expect(result.value[0].data.admins).to.include("u/admin1"); + }); + + it("unit_operations: save should persist repository", () => { + const repository = { + type: RepositoryType.METADATA_ONLY, + data: { + _key: "new_repo", + _id: "repo/new_repo", + type: RepositoryType.METADATA_ONLY, + title: "New Repository", + capacity: 5000, + admins: ["u/admin3"], + }, + }; + + const result = RepositoryOps.save(repository); + + expect(result.ok).to.be.true; + expect(result.value._id).to.equal("repo/new_repo"); + + // Verify it was saved + const saved = g_db.repo.document("new_repo"); + expect(saved.title).to.equal("New Repository"); + }); + + it("unit_operations: update should modify repository", () => { + const repository = { + type: RepositoryType.GLOBUS, + data: g_db.repo.document("test_repo"), + }; + + const updates = { + title: "Updated Title", + capacity: 2000000, + }; + + const result = RepositoryOps.update(repository, updates); + + expect(result.ok).to.be.true; + expect(result.value.title).to.equal("Updated Title"); + expect(result.value.capacity).to.equal(2000000); + + // Verify in database + const updated = g_db.repo.document("test_repo"); + expect(updated.title).to.equal("Updated Title"); + }); + + it("unit_operations: validate should use type-specific validation", () => { + const repository = { + type: RepositoryType.GLOBUS, + data: { _id: "repo/test" }, + }; + + const result = RepositoryOps.validate(repository); + expect(result.ok).to.be.true; + }); + + it("unit_operations: checkPermission should verify admin access", () => { + const repository = { + type: RepositoryType.GLOBUS, + data: { + _id: "repo/test", + admins: ["u/admin1", "u/admin2"], + }, + }; + + const result1 = RepositoryOps.checkPermission(repository, "u/admin1", "admin"); + expect(result1.ok).to.be.true; + expect(result1.value).to.be.true; + + const result2 = RepositoryOps.checkPermission(repository, "u/other", "admin"); + expect(result2.ok).to.be.true; + expect(result2.value).to.be.false; + }); + }); + + describe("Globus Implementation", () => { + it("unit_globus: validate should always return ok", () => { + const repoData = { _id: "repo/globus1" }; + const result = globusImpl.validate(repoData); + expect(result.ok).to.be.true; + }); + + it("unit_globus: supportsDataOperations should return true", () => { + const repoData = { _id: "repo/globus1" }; + const result = globusImpl.supportsDataOperations(repoData); + expect(result.ok).to.be.true; + expect(result.value).to.be.true; + }); + + it("unit_globus: getCapacityInfo should return capacity details", () => { + const repoData = { + _id: "repo/globus1", + capacity: 1000000000, + }; + const result = globusImpl.getCapacityInfo(repoData); + + expect(result.ok).to.be.true; + expect(result.value.total_capacity).to.equal(1000000000); + expect(result.value.supports_quotas).to.be.true; + }); + + it("unit_globus: createAllocation should return task result", () => { + const repoData = { _id: "repo/globus1" }; + const params = { + subject: "d/dataset1", + size: 1000000, + path: "/data/alloc1", + }; + + // Mock task creation (in real tests, g_tasks would be mocked) + const result = globusImpl.createAllocation(repoData, params); + + expect(result.ok).to.be.true; + expect(result.value.execution_method).to.equal(ExecutionMethod.TASK); + // Task creation would be tested with proper mocking + }); + + it("unit_globus: deleteAllocation should validate subject ID", () => { + const repoData = { _id: "repo/globus1" }; + const result = globusImpl.deleteAllocation(repoData, null); + + expect(result.ok).to.be.false; + expect(result.error.message).to.include("Subject ID is required"); + }); + }); + + describe("Metadata Implementation", () => { + it("unit_metadata: validate should always return ok", () => { + const repoData = { _id: "repo/meta1" }; + const result = metadataImpl.validate(repoData); + expect(result.ok).to.be.true; + }); + + it("unit_metadata: supportsDataOperations should return false", () => { + const repoData = { _id: "repo/meta1" }; + const result = metadataImpl.supportsDataOperations(repoData); + expect(result.ok).to.be.true; + expect(result.value).to.be.false; + }); + + it("unit_metadata: getCapacityInfo should indicate metadata-only", () => { + const repoData = { + _id: "repo/meta1", + capacity: 1000000, + }; + const result = metadataImpl.getCapacityInfo(repoData); + + expect(result.ok).to.be.true; + expect(result.value.total_capacity).to.equal(1000000); + expect(result.value.supports_quotas).to.be.false; + expect(result.value.is_metadata_only).to.be.true; + }); + + it("unit_metadata: createAllocation should return direct result", () => { + const repoData = { _id: "repo/meta1" }; + const params = { + subject: "d/dataset1", + size: 1000, + }; + + const result = metadataImpl.createAllocation(repoData, params); + + expect(result.ok).to.be.true; + expect(result.value.execution_method).to.equal(ExecutionMethod.DIRECT); + expect(result.value.result).to.exist; + expect(result.value.result.status).to.equal("completed"); + }); + + it("unit_metadata: deleteAllocation should return direct result", () => { + const repoData = { _id: "repo/meta1" }; + const result = metadataImpl.deleteAllocation(repoData, "d/dataset1"); + + expect(result.ok).to.be.true; + expect(result.value.execution_method).to.equal(ExecutionMethod.DIRECT); + expect(result.value.result.status).to.equal("completed"); + }); + }); + + describe("Integration Tests", () => { + it("unit_integration: full workflow - create, save, find, allocate", () => { + // Step 1: Create repository + const createResult = createRepositoryByType({ + id: "integration_repo", + type: RepositoryType.GLOBUS, + title: "Integration Test Repository", + capacity: 5000000000, + admins: ["u/test_admin"], + pub_key: "ssh-rsa integration...", + address: "integration.test.org", + endpoint: "integration-ep", + path: "/data/integration_repo", + domain: "test.org", + }); + + expect(createResult.ok).to.be.true; + const repository = createResult.value; + + // Step 2: Save repository + const saveResult = RepositoryOps.save(repository); + expect(saveResult.ok).to.be.true; + + // Step 3: Find repository + const findResult = RepositoryOps.find("integration_repo"); + expect(findResult.ok).to.be.true; + expect(findResult.value.type).to.equal(RepositoryType.GLOBUS); + + // Step 4: Create allocation + const allocResult = RepositoryOps.createAllocation(findResult.value, { + subject: "d/integration_dataset", + size: 1000000000, + path: "/data/integration_repo/dataset1", + }); + + expect(allocResult.ok).to.be.true; + expect(allocResult.value.execution_method).to.equal(ExecutionMethod.TASK); + }); + + it("unit_integration: error propagation through the system", () => { + // Create invalid repository + const createResult = createRepositoryByType({ + type: RepositoryType.GLOBUS, + // missing required fields + }); + + expect(createResult.ok).to.be.false; + expect(createResult.error.code).to.equal(g_lib.ERR_INVALID_PARAM); + }); + + it("unit_integration: different behavior for different repository types", () => { + // Create and save both types + const globusConfig = { + id: "globus_test", + type: RepositoryType.GLOBUS, + title: "Globus Test", + capacity: 1000000, + admins: ["u/admin"], + pub_key: "ssh-rsa...", + address: "server.org", + endpoint: "ep1", + path: "/data/globus_test", + domain: "org", + }; + + const metadataConfig = { + id: "metadata_test", + type: RepositoryType.METADATA_ONLY, + title: "Metadata Test", + capacity: 1000000, + admins: ["u/admin"], + }; + + const globusResult = createRepositoryByType(globusConfig); + const metadataResult = createRepositoryByType(metadataConfig); + + expect(globusResult.ok).to.be.true; + expect(metadataResult.ok).to.be.true; + + // Save both + RepositoryOps.save(globusResult.value); + RepositoryOps.save(metadataResult.value); + + // Test different behaviors + const globusRepo = RepositoryOps.find("globus_test").value; + const metadataRepo = RepositoryOps.find("metadata_test").value; + + // Data operations support + const globusDataOps = RepositoryOps.supportsDataOperations(globusRepo); + const metadataDataOps = RepositoryOps.supportsDataOperations(metadataRepo); + + expect(globusDataOps.value).to.be.true; + expect(metadataDataOps.value).to.be.false; + + // Allocation behavior + const allocParams = { subject: "d/test", size: 1000 }; + const globusAlloc = RepositoryOps.createAllocation(globusRepo, allocParams); + const metadataAlloc = RepositoryOps.createAllocation(metadataRepo, allocParams); + + expect(globusAlloc.value.execution_method).to.equal(ExecutionMethod.TASK); + expect(metadataAlloc.value.execution_method).to.equal(ExecutionMethod.DIRECT); + }); + + it("unit_integration: backward compatibility with legacy Repo class", () => { + // Create repository using new system + const createResult = createRepositoryByType({ + id: "legacy_compat", + type: RepositoryType.METADATA_ONLY, + title: "Legacy Compatible", + capacity: 1000000, + admins: ["u/admin"], + }); + + RepositoryOps.save(createResult.value); + + // Use legacy Repo class + const { Repo } = require("../api/repo"); + const legacyRepo = new Repo("legacy_compat"); + + expect(legacyRepo.exists()).to.be.true; + expect(legacyRepo.key()).to.equal("legacy_compat"); + expect(legacyRepo.id()).to.equal("repo/legacy_compat"); + + // Access new repository object + const newRepo = legacyRepo.getRepository(); + expect(newRepo).to.not.be.null; + expect(newRepo.type).to.equal(RepositoryType.METADATA_ONLY); + }); + }); +});