From 93a7c553b0336bbd78a62cccbd88238ff2ed3285 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Wed, 8 Oct 2025 14:50:44 -0400 Subject: [PATCH 1/6] binding-cacher addition --- Cargo.toml | 4 +- .../packages/hypermap-cacher/Cargo.toml | 1 + .../api/hypermap-cacher:sys-v0.wit | 83 - .../api/hypermap-cacher:sys-v1.wit | 162 ++ .../hypermap-cacher/binding-cacher/Cargo.toml | 33 + .../hypermap-cacher/binding-cacher/src/lib.rs | 1484 +++++++++++++++++ .../hypermap-cacher/Cargo.toml | 2 +- .../hypermap-cacher/src/lib.rs | 6 +- .../hypermap-cacher/pkg/manifest.json | 26 + .../packages/hypermap-cacher/pkg/scripts.json | 8 + .../hypermap-cacher/reset-cache/Cargo.toml | 2 +- .../hypermap-cacher/reset-cache/src/lib.rs | 34 +- .../hypermap-cacher/set-nodes/Cargo.toml | 2 +- .../hypermap-cacher/set-nodes/src/lib.rs | 32 +- .../start-providing/Cargo.toml | 2 +- .../start-providing/src/lib.rs | 31 +- .../hypermap-cacher/stop-providing/Cargo.toml | 2 +- .../hypermap-cacher/stop-providing/src/lib.rs | 31 +- .../register-ui/src/pages/CommitDotOsName.tsx | 2 +- 19 files changed, 1843 insertions(+), 104 deletions(-) delete mode 100644 hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v0.wit create mode 100644 hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v1.wit create mode 100644 hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml create mode 100644 hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs diff --git a/Cargo.toml b/Cargo.toml index 4ef04985a..2572ba3ea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,8 +22,8 @@ members = [ "hyperdrive/packages/homepage/homepage", "hyperdrive/packages/hns-indexer/hns-indexer", "hyperdrive/packages/hns-indexer/get-block", "hyperdrive/packages/settings/settings", "hyperdrive/packages/hns-indexer/reset", "hyperdrive/packages/hns-indexer/node-info", "hyperdrive/packages/hns-indexer/state", - "hyperdrive/packages/hypermap-cacher/hypermap-cacher", "hyperdrive/packages/hypermap-cacher/reset-cache", "hyperdrive/packages/hypermap-cacher/set-nodes", - "hyperdrive/packages/hypermap-cacher/start-providing", "hyperdrive/packages/hypermap-cacher/stop-providing", + "hyperdrive/packages/hypermap-cacher/binding-cacher", "hyperdrive/packages/hypermap-cacher/hypermap-cacher", "hyperdrive/packages/hypermap-cacher/reset-cache", + "hyperdrive/packages/hypermap-cacher/set-nodes", "hyperdrive/packages/hypermap-cacher/start-providing", "hyperdrive/packages/hypermap-cacher/stop-providing", "hyperdrive/packages/sign/sign", "hyperdrive/packages/spider/spider", "hyperdrive/packages/terminal/terminal", "hyperdrive/packages/terminal/add-node-provider", "hyperdrive/packages/terminal/add-rpcurl-provider", diff --git a/hyperdrive/packages/hypermap-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/Cargo.toml index ff6b64639..ff4a19f1f 100644 --- a/hyperdrive/packages/hypermap-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/Cargo.toml @@ -1,6 +1,7 @@ [workspace] resolver = "2" members = [ + "binding-cacher", "hypermap-cacher", "reset-cache", "set-nodes", diff --git a/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v0.wit b/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v0.wit deleted file mode 100644 index 21a3652ae..000000000 --- a/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v0.wit +++ /dev/null @@ -1,83 +0,0 @@ -interface hypermap-cacher { - // Metadata associated with a batch of Ethereum logs. - record logs-metadata { - chain-id: string, - from-block: string, - to-block: string, - time-created: string, - created-by: string, - signature: string, - } - - // Represents an item in the manifest, detailing a single log cache file. - record manifest-item { - metadata: logs-metadata, - is-empty: bool, - file-hash: string, - file-name: string, - } - - // The main manifest structure, listing all available log cache files. - // WIT does not support direct map types, so a list of key-value tuples is used. - record manifest { - // The key is the filename of the log cache. - items: list>, - manifest-filename: string, - chain-id: string, - protocol-version: string, - } - - record get-logs-by-range-request { - from-block: u64, - to-block: option, // If None, signifies to the latest available/relevant cached block. - } - - variant get-logs-by-range-ok-response { - logs(tuple), - latest(u64), - } - - // Defines the types of requests that can be sent to the Hypermap Cacher process. - variant cacher-request { - get-manifest, - get-log-cache-content(string), - get-status, - get-logs-by-range(get-logs-by-range-request), - start-providing, - stop-providing, - set-nodes(list), - reset(option>), - } - - // Represents the operational status of the cacher. - record cacher-status { - last-cached-block: u64, - chain-id: string, - protocol-version: string, - next-cache-attempt-in-seconds: option, - manifest-filename: string, - log-files-count: u32, - our-address: string, - is-providing: bool, - } - - // Defines the types of responses the Hypermap Cacher process can send. - variant cacher-response { - get-manifest(option), - get-log-cache-content(result, string>), - get-status(cacher-status), - get-logs-by-range(result), - start-providing(result), - stop-providing(result), - set-nodes(result), - reset(result), - rejected, - is-starting, - } -} - -world hypermap-cacher-sys-v0 { - import sign; - import hypermap-cacher; - include process-v1; -} diff --git a/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v1.wit b/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v1.wit new file mode 100644 index 000000000..472e9b6e2 --- /dev/null +++ b/hyperdrive/packages/hypermap-cacher/api/hypermap-cacher:sys-v1.wit @@ -0,0 +1,162 @@ +interface binding-cacher { + // Metadata associated with a batch of Ethereum logs. + record binding-logs-metadata { + chain-id: string, + from-block: string, + to-block: string, + time-created: string, + created-by: string, + signature: string, + } + + // Represents an item in the manifest, detailing a single log cache file. + record binding-manifest-item { + metadata: binding-logs-metadata, + is-empty: bool, + file-hash: string, + file-name: string, + } + + // The main manifest structure, listing all available log cache files. + // WIT does not support direct map types, so a list of key-value tuples is used. + record binding-manifest { + // The key is the filename of the log cache. + items: list>, + manifest-filename: string, + chain-id: string, + protocol-version: string, + } + + record binding-get-logs-by-range-request { + from-block: u64, + to-block: option, // If None, signifies to the latest available/relevant cached block. + } + + variant binding-get-logs-by-range-ok-response { + logs(tuple), + latest(u64), + } + + // Defines the types of requests that can be sent to the Hypermap Cacher process. + variant binding-cacher-request { + get-manifest, + get-log-cache-content(string), + get-status, + get-logs-by-range(binding-get-logs-by-range-request), + start-providing, + stop-providing, + set-nodes(list), + reset(option>), + } + + // Represents the operational status of the cacher. + record binding-cacher-status { + last-cached-block: u64, + chain-id: string, + protocol-version: string, + next-cache-attempt-in-seconds: option, + manifest-filename: string, + log-files-count: u32, + our-address: string, + is-providing: bool, + } + + // Defines the types of responses the Hypermap Cacher process can send. + variant binding-cacher-response { + get-manifest(option), + get-log-cache-content(result, string>), + get-status(binding-cacher-status), + get-logs-by-range(result), + start-providing(result), + stop-providing(result), + set-nodes(result), + reset(result), + rejected, + is-starting, + } +} + +interface hypermap-cacher { + // Metadata associated with a batch of Ethereum logs. + record logs-metadata { + chain-id: string, + from-block: string, + to-block: string, + time-created: string, + created-by: string, + signature: string, + } + + // Represents an item in the manifest, detailing a single log cache file. + record manifest-item { + metadata: logs-metadata, + is-empty: bool, + file-hash: string, + file-name: string, + } + + // The main manifest structure, listing all available log cache files. + // WIT does not support direct map types, so a list of key-value tuples is used. + record manifest { + // The key is the filename of the log cache. + items: list>, + manifest-filename: string, + chain-id: string, + protocol-version: string, + } + + record get-logs-by-range-request { + from-block: u64, + to-block: option, // If None, signifies to the latest available/relevant cached block. + } + + variant get-logs-by-range-ok-response { + logs(tuple), + latest(u64), + } + + // Defines the types of requests that can be sent to the Hypermap Cacher process. + variant cacher-request { + get-manifest, + get-log-cache-content(string), + get-status, + get-logs-by-range(get-logs-by-range-request), + start-providing, + stop-providing, + set-nodes(list), + reset(option>), + } + + // Represents the operational status of the cacher. + record cacher-status { + last-cached-block: u64, + chain-id: string, + protocol-version: string, + next-cache-attempt-in-seconds: option, + manifest-filename: string, + log-files-count: u32, + our-address: string, + is-providing: bool, + } + + // Defines the types of responses the Hypermap Cacher process can send. + variant cacher-response { + get-manifest(option), + get-log-cache-content(result, string>), + get-status(cacher-status), + get-logs-by-range(result), + start-providing(result), + stop-providing(result), + set-nodes(result), + reset(result), + rejected, + is-starting, + } +} + +world hypermap-cacher-sys-v1 { + import sign; + import binding-cacher; + import hypermap-cacher; + include process-v1; +} diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml new file mode 100644 index 000000000..93530db0c --- /dev/null +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "binding-cacher" +version = "0.1.0" +edition = "2021" +publish = false + +[features] +simulation-mode = ["hyperware_process_lib/simulation-mode"] + +[dependencies] +anyhow = "1.0" +alloy-primitives = "0.8.15" +alloy-sol-types = "0.8.15" +alloy = { version = "0.8.1", features = [ + "json-rpc", + "rpc-client", + "rpc-types", +] } +chrono = "0.4.41" +hex = "0.4.3" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e", features = ["logging"] } +process_macros = "0.1.0" +rand = "0.8" +rmp-serde = "1.1.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +wit-bindgen = "0.42.1" + +[lib] +crate-type = ["cdylib"] + +[package.metadata.component] +package = "hyperware:process" diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs new file mode 100644 index 000000000..e7593bdfb --- /dev/null +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs @@ -0,0 +1,1484 @@ +use std::{ + cmp::{max, min}, + collections::HashMap, + str::FromStr, +}; + +use alloy::hex; +use alloy_primitives::keccak256; +use rand::seq::SliceRandom; +use rand::thread_rng; +use serde::{Deserialize, Serialize}; + +use crate::hyperware::process::binding_cacher::{ + BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, + BindingCacherStatus as CacherStatus, + BindingGetLogsByRangeOkResponse as GetLogsByRangeOkResponse, + BindingGetLogsByRangeRequest as GetLogsByRangeRequest, BindingLogsMetadata as WitLogsMetadata, + BindingManifest as WitManifest, BindingManifestItem as WitManifestItem, +}; + +use hyperware_process_lib::{ + await_message, bindings, call_init, eth, get_state, http, hypermap, + logging::{debug, error, info, init_logging, warn, Level}, + net::{NetAction, NetResponse}, + our, set_state, sign, timer, vfs, Address, ProcessId, Request, Response, +}; + +wit_bindgen::generate!({ + path: "../target/wit", + world: "hypermap-cacher-sys-v1", + generate_unused_types: true, + additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], +}); + +const PROTOCOL_VERSION: &str = "0"; +const DEFAULT_BLOCK_BATCH_SIZE: u64 = 10; +const DEFAULT_CACHE_INTERVAL_S: u64 = 3_600; // 2s / block -> 1hr ~ 1800 blocks +const MAX_LOG_RETRIES: u8 = 3; +const RETRY_DELAY_S: u64 = 10; +const LOG_ITERATION_DELAY_MS: u64 = 200; + +#[cfg(not(feature = "simulation-mode"))] +const DEFAULT_NODES: &[&str] = &[ + "us-cacher-1.hypr", + "eu-cacher-1.hypr", + "nick.hypr", + "nick1udwig.os", +]; +#[cfg(feature = "simulation-mode")] +const DEFAULT_NODES: &[&str] = &["fake.os"]; + +// Internal representation of LogsMetadata, similar to WIT but for Rust logic. +#[derive(Serialize, Deserialize, Debug, Clone)] +struct LogsMetadataInternal { + #[serde(rename = "chainId")] + chain_id: String, + #[serde(rename = "fromBlock")] + from_block: String, + #[serde(rename = "toBlock")] + to_block: String, + #[serde(rename = "timeCreated")] + time_created: String, + #[serde(rename = "createdBy")] + created_by: String, + signature: String, // Keccak256 hash of the log file content. +} + +// Internal representation of a LogCache, containing metadata and actual logs. +#[derive(Serialize, Deserialize, Debug, Clone)] +struct LogCacheInternal { + metadata: LogsMetadataInternal, + logs: Vec, // The actual Ethereum logs. +} + +// Internal representation of a ManifestItem. +#[derive(Serialize, Deserialize, Debug, Clone)] +struct ManifestItemInternal { + metadata: LogsMetadataInternal, + #[serde(rename = "isEmpty")] + is_empty: bool, + #[serde(rename = "fileHash")] + file_hash: String, + #[serde(rename = "fileName")] + file_name: String, +} + +// Internal representation of the Manifest. +#[derive(Serialize, Deserialize, Debug, Clone, Default)] +struct ManifestInternal { + items: HashMap, + manifest_filename: String, + chain_id: String, + protocol_version: String, +} + +// The main state structure for the Hypermap Binding Cacher process. +#[derive(Serialize, Deserialize, Debug)] +struct State { + hypermap_binding_address: eth::Address, + manifest: ManifestInternal, + last_cached_block: u64, + chain_id: String, + protocol_version: String, + cache_interval_s: u64, + block_batch_size: u64, + is_cache_timer_live: bool, + drive_path: String, + is_providing: bool, + nodes: Vec, + #[serde(skip)] + is_starting: bool, +} + +// Generates a timestamp string. +fn get_current_timestamp_str() -> String { + let datetime = chrono::Utc::now(); + datetime.format("%Y%m%dT%H%M%SZ").to_string() +} + +fn is_local_request(our: &Address, source: &Address) -> bool { + our.node == source.node +} + +impl State { + fn new(drive_path: &str) -> Self { + let chain_id = bindings::BINDINGS_CHAIN_ID.to_string(); + let hypermap_binding_address = eth::Address::from_str(bindings::BINDINGS_ADDRESS) + .expect("Failed to parse BINDINGS_ADDRESS"); + + let manifest_filename = format!( + "manifest-chain{}-protocol{}.json", + chain_id, PROTOCOL_VERSION + ); + let initial_manifest = ManifestInternal { + items: HashMap::new(), + manifest_filename: manifest_filename.clone(), + chain_id: chain_id.clone(), + protocol_version: PROTOCOL_VERSION.to_string(), + }; + + State { + hypermap_binding_address, + manifest: initial_manifest, + last_cached_block: bindings::BINDINGS_FIRST_BLOCK, + chain_id, + protocol_version: PROTOCOL_VERSION.to_string(), + cache_interval_s: DEFAULT_CACHE_INTERVAL_S, + block_batch_size: 0, // Will be determined dynamically + is_cache_timer_live: false, + drive_path: drive_path.to_string(), + is_providing: false, + nodes: DEFAULT_NODES.iter().map(|s| s.to_string()).collect(), + is_starting: true, + } + } + + fn load(drive_path: &str) -> Self { + match get_state() { + Some(state_bytes) => match serde_json::from_slice::(&state_bytes) { + Ok(mut loaded_state) => { + info!("Successfully loaded state from checkpoint."); + // Always start in starting mode to bootstrap from other nodes + // is_starting is not serialized, so it defaults to false and we set it to true + loaded_state.is_starting = true; + loaded_state.drive_path = drive_path.to_string(); + + // Validate state against manifest file on disk + if let Err(e) = loaded_state.validate_state_against_manifest() { + warn!("State validation failed: {:?}. Clearing drive and creating fresh state.", e); + if let Err(clear_err) = loaded_state.clear_drive() { + error!("Failed to clear drive: {:?}", clear_err); + } + return Self::new(drive_path); + } + + loaded_state + } + Err(e) => { + warn!( + "Failed to deserialize saved state: {:?}. Creating new state.", + e + ); + Self::new(drive_path) + } + }, + None => { + info!("No saved state found. Creating new state."); + Self::new(drive_path) + } + } + } + + fn save(&self) { + match serde_json::to_vec(self) { + Ok(state_bytes) => set_state(&state_bytes), + Err(e) => error!("Fatal: Failed to serialize state for saving: {:?}", e), + } + info!( + "State checkpoint saved. Last cached block: {}", + self.last_cached_block + ); + } + + // Core logic for fetching logs, creating cache files, and updating the manifest. + fn cache_logs_and_update_manifest( + &mut self, + hypermap: &hypermap::Hypermap, + ) -> anyhow::Result<()> { + // Ensure batch size is determined + if self.block_batch_size == 0 { + self.determine_batch_size(hypermap)?; + } + + let current_chain_head = match hypermap.provider.get_block_number() { + Ok(block_num) => block_num, + Err(e) => { + error!( + "Failed to get current block number: {:?}. Skipping cycle.", + e + ); + return Err(anyhow::anyhow!("Failed to get block number: {:?}", e)); + } + }; + + if self.last_cached_block >= current_chain_head { + info!( + "Already caught up to chain head ({}). Nothing to cache.", + current_chain_head + ); + return Ok(()); + } + + while self.last_cached_block != current_chain_head { + self.cache_logs_and_update_manifest_step(hypermap, Some(current_chain_head))?; + + std::thread::sleep(std::time::Duration::from_millis(LOG_ITERATION_DELAY_MS)); + } + + Ok(()) + } + + fn cache_logs_and_update_manifest_step( + &mut self, + hypermap: &hypermap::Hypermap, + to_block: Option, + ) -> anyhow::Result<()> { + info!( + "Starting caching cycle. From block: {}", + self.last_cached_block + 1 + ); + + let current_chain_head = match to_block { + Some(b) => b, + None => match hypermap.provider.get_block_number() { + Ok(block_num) => block_num, + Err(e) => { + error!( + "Failed to get current block number: {:?}. Skipping cycle.", + e + ); + return Err(anyhow::anyhow!("Failed to get block number: {:?}", e)); + } + }, + }; + + if self.last_cached_block >= current_chain_head { + info!( + "Already caught up to chain head ({}). Nothing to cache.", + current_chain_head + ); + return Ok(()); + } + + let from_block = self.last_cached_block + 1; + let mut to_block = from_block + self.block_batch_size - 1; + if to_block > current_chain_head { + to_block = current_chain_head; + } + + if from_block > to_block { + info!("From_block {} is greater than to_block {}. Chain might not have advanced enough. Skipping.", from_block, to_block); + return Ok(()); + } + + let filter = eth::Filter::new() + .address(self.hypermap_binding_address) + .from_block(from_block) + .to_block(eth::BlockNumberOrTag::Number(to_block)); + + let logs = { + let mut attempt = 0; + loop { + match hypermap.provider.get_logs(&filter) { + Ok(logs) => break logs, + Err(e) => { + attempt += 1; + if attempt >= MAX_LOG_RETRIES { + error!( + "Failed to get logs after {} retries: {:?}", + MAX_LOG_RETRIES, e + ); + return Err(anyhow::anyhow!("Failed to get logs: {:?}", e)); + } + warn!( + "Error getting logs (attempt {}/{}): {:?}. Retrying in {}s...", + attempt, MAX_LOG_RETRIES, e, RETRY_DELAY_S + ); + std::thread::sleep(std::time::Duration::from_secs(RETRY_DELAY_S)); + } + } + } + }; + + info!( + "Fetched {} logs from block {} to {}.", + logs.len(), + from_block, + to_block + ); + + let our = our(); + + let metadata = LogsMetadataInternal { + chain_id: self.chain_id.clone(), + from_block: from_block.to_string(), + to_block: to_block.to_string(), + time_created: get_current_timestamp_str(), + created_by: our.to_string(), + signature: "".to_string(), + }; + + let mut log_cache = LogCacheInternal { + metadata, + logs: logs.clone(), + }; + + let mut logs_bytes_for_sig = serde_json::to_vec(&log_cache.logs).unwrap_or_default(); + logs_bytes_for_sig.extend_from_slice(&from_block.to_be_bytes()); + logs_bytes_for_sig.extend_from_slice(&to_block.to_be_bytes()); + let logs_hash_for_sig = keccak256(&logs_bytes_for_sig); + + let signature = sign::net_key_sign(logs_hash_for_sig.to_vec())?; + + log_cache.metadata.signature = format!("0x{}", hex::encode(signature)); + + // Final serialization of LogCacheInternal with the signature. + let final_log_cache_bytes = match serde_json::to_vec(&log_cache) { + Ok(bytes) => bytes, + Err(e) => { + error!( + "Failed to re-serialize LogCacheInternal with signature: {:?}", + e + ); + return Err(e.into()); + } + }; + + let file_hash_for_manifest = + format!("0x{}", hex::encode(keccak256(&final_log_cache_bytes))); + + let log_cache_filename = format!( + "{}-chain{}-from{}-to{}-protocol{}.json", + log_cache + .metadata + .time_created + .replace(":", "") + .replace("-", ""), // Make timestamp filename-safe + self.chain_id, + from_block, + to_block, + self.protocol_version + ); + + if !logs.is_empty() { + let log_cache_path = format!("{}/{}", self.drive_path, log_cache_filename); + let mut log_cache_file = vfs::open_file(&log_cache_path, true, None)?; + + if let Err(e) = log_cache_file.write_all(&final_log_cache_bytes) { + error!("Failed to write log cache file {}: {:?}", log_cache_path, e); + return Err(e.into()); + } + info!("Successfully wrote log cache file: {}", log_cache_path); + } + + let manifest_item = ManifestItemInternal { + metadata: log_cache.metadata.clone(), + is_empty: logs.is_empty(), + file_hash: file_hash_for_manifest, + file_name: if logs.is_empty() { + "".to_string() + } else { + log_cache_filename.clone() + }, + }; + self.manifest + .items + .insert(log_cache_filename.clone(), manifest_item); + self.manifest.chain_id = self.chain_id.clone(); + self.manifest.protocol_version = self.protocol_version.clone(); + + let manifest_bytes = match serde_json::to_vec(&self.manifest) { + Ok(bytes) => bytes, + Err(e) => { + error!("Failed to serialize manifest: {:?}", e); + return Err(e.into()); + } + }; + + let manifest_path = format!("{}/{}", self.drive_path, self.manifest.manifest_filename); + let manifest_file = vfs::open_file(&manifest_path, true, None)?; + + if let Err(e) = manifest_file.write(&manifest_bytes) { + error!("Failed to write manifest file {}: {:?}", manifest_path, e); + return Err(e.into()); + } + info!( + "Successfully updated and wrote manifest file: {}", + manifest_path + ); + + self.last_cached_block = to_block; + self.save(); + + Ok(()) + } + + // Validate that the in-memory state matches the manifest file on disk + fn validate_state_against_manifest(&self) -> anyhow::Result<()> { + let manifest_path = format!("{}/{}", self.drive_path, self.manifest.manifest_filename); + + // Check if manifest file exists + match vfs::open_file(&manifest_path, false, None) { + Ok(manifest_file) => { + match manifest_file.read() { + Ok(disk_manifest_bytes) => { + match serde_json::from_slice::(&disk_manifest_bytes) { + Ok(disk_manifest) => { + // Compare key aspects of the manifests + if self.manifest.chain_id != disk_manifest.chain_id { + return Err(anyhow::anyhow!( + "Chain ID mismatch: state has {}, disk has {}", + self.manifest.chain_id, + disk_manifest.chain_id + )); + } + + if self.manifest.protocol_version != disk_manifest.protocol_version + { + return Err(anyhow::anyhow!( + "Protocol version mismatch: state has {}, disk has {}", + self.manifest.protocol_version, + disk_manifest.protocol_version + )); + } + + // Check if all files mentioned in state manifest exist on disk + for (_filename, item) in &self.manifest.items { + if !item.file_name.is_empty() { + let file_path = + format!("{}/{}", self.drive_path, item.file_name); + if vfs::metadata(&file_path, None).is_err() { + return Err(anyhow::anyhow!( + "File {} mentioned in state manifest does not exist on disk", + item.file_name + )); + } + } + } + + // Check if disk manifest has more recent data than our state + let disk_max_block = disk_manifest + .items + .values() + .filter_map(|item| item.metadata.to_block.parse::().ok()) + .max() + .unwrap_or(0); + + let state_max_block = self + .manifest + .items + .values() + .filter_map(|item| item.metadata.to_block.parse::().ok()) + .max() + .unwrap_or(0); + + if disk_max_block > state_max_block { + return Err(anyhow::anyhow!( + "Disk manifest has more recent data (block {}) than state (block {})", + disk_max_block, state_max_block + )); + } + + info!("State validation passed - state matches manifest file"); + Ok(()) + } + Err(e) => { + Err(anyhow::anyhow!("Failed to parse manifest file: {:?}", e)) + } + } + } + Err(e) => Err(anyhow::anyhow!("Failed to read manifest file: {:?}", e)), + } + } + Err(_) => { + // Manifest file doesn't exist - this is okay for new installs + if self.manifest.items.is_empty() { + info!("No manifest file found, but state is also empty - validation passed"); + Ok(()) + } else { + Err(anyhow::anyhow!( + "State has manifest items but no manifest file exists on disk" + )) + } + } + } + } + + // Clear all files from the drive + fn clear_drive(&self) -> anyhow::Result<()> { + info!("Clearing all files from drive: {}", self.drive_path); + + // Remove the manifest file + let manifest_path = format!("{}/{}", self.drive_path, self.manifest.manifest_filename); + match vfs::remove_file(&manifest_path, None) { + Ok(_) => info!("Removed manifest file: {}", manifest_path), + Err(e) => warn!("Failed to remove manifest file {}: {:?}", manifest_path, e), + } + + // Remove all files mentioned in the manifest + for (_, item) in &self.manifest.items { + if !item.file_name.is_empty() { + let file_path = format!("{}/{}", self.drive_path, item.file_name); + match vfs::remove_file(&file_path, None) { + Ok(_) => info!("Removed cache file: {}", file_path), + Err(e) => warn!("Failed to remove cache file {}: {:?}", file_path, e), + } + } + } + + info!("Drive clearing completed"); + Ok(()) + } + + // Bootstrap state from other nodes, then fallback to RPC + fn bootstrap_state(&mut self, hypermap: &hypermap::Hypermap) -> anyhow::Result<()> { + info!("Starting state bootstrap process..."); + + // Try to bootstrap from other nodes first + if let Ok(()) = self.try_bootstrap_from_nodes() { + info!("Successfully bootstrapped from other nodes"); + } + + self.try_bootstrap_from_rpc(hypermap)?; + + // Mark as no longer starting + self.is_starting = false; + self.save(); + info!("Bootstrap process completed, cacher is now ready"); + Ok(()) + } + + // Try to bootstrap from other binding-cacher nodes + fn try_bootstrap_from_nodes(&mut self) -> anyhow::Result<()> { + // Create alternate drive for initfiles and read the test data + let alt_drive_path = vfs::create_drive(our().package_id(), "initfiles", None).unwrap(); + + // Try to read the cache_sources file from the initfiles drive + match vfs::open_file(&format!("{}/cache_sources", alt_drive_path), false, None) { + Ok(file) => { + match file.read() { + Ok(contents) => { + let content_str = String::from_utf8_lossy(&contents); + info!("Contents of cache_sources: {}", content_str); + + // Parse the JSON to get the vector of node names + match serde_json::from_str::>(&content_str) { + Ok(custom_cache_nodes) => { + if !custom_cache_nodes.is_empty() { + info!( + "Loading custom cache source nodes: {:?}", + custom_cache_nodes + ); + // Clear existing nodes and add custom ones + self.nodes.clear(); + for node_name in custom_cache_nodes { + self.nodes.push(node_name.clone()); + } + } else { + info!("Custom cache nodes list is empty, keeping existing node configuration"); + } + } + Err(e) => { + info!("Failed to parse cache_sources as JSON: {}, keeping existing node configuration", e); + } + } + } + Err(e) => { + info!( + "Failed to read cache_sources: {}, keeping existing node configuration", + e + ); + } + } + } + Err(e) => { + info!( + "Failed to open cache_sources: {}, keeping existing node configuration", + e + ); + } + } + + if self.nodes.is_empty() { + info!("No nodes configured for bootstrap, will fallback to RPC"); + return Err(anyhow::anyhow!("No nodes configured for bootstrap")); + } + + info!("Attempting to bootstrap from {} nodes", self.nodes.len()); + + let mut nodes = self.nodes.clone(); + + // If using default nodes, shuffle them for random order + let default_nodes: Vec = DEFAULT_NODES.iter().map(|s| s.to_string()).collect(); + if nodes == default_nodes { + nodes.shuffle(&mut thread_rng()); + } + + let mut nodes_not_yet_in_net = nodes.clone(); + let num_retries = 10; + for _ in 0..num_retries { + nodes_not_yet_in_net.retain(|node| { + let Ok(Ok(response)) = Request::new() + .target(("our", "net", "distro", "sys")) + .body(rmp_serde::to_vec(&NetAction::GetPeer(node.clone())).unwrap()) + .send_and_await_response(1) + else { + return true; // keep the node + }; + + !matches!( + rmp_serde::from_slice::(response.body()), + Ok(NetResponse::Peer(Some(_))), + ) + }); + if nodes_not_yet_in_net.is_empty() { + break; + } + std::thread::sleep(std::time::Duration::from_secs(1)); + } + if !nodes_not_yet_in_net.is_empty() { + error!("failed to get peering info for {nodes_not_yet_in_net:?}"); + } + + for node in nodes { + info!("Requesting logs from node: {}", node); + + let cacher_process_address = + Address::new(&node, ("binding-cacher", "hypermap-cacher", "sys")); + + if cacher_process_address == our() { + continue; + } + + // ping node for quicker failure if not online/providing/... + let Ok(Ok(response)) = Request::to(cacher_process_address.clone()) + .body(CacherRequest::GetStatus) + .send_and_await_response(3) + else { + warn!("Node {node} failed to respond to ping; trying next one..."); + continue; + }; + let Ok(CacherResponse::GetStatus(_)) = response.body().try_into() else { + warn!("Node {node} failed to respond to ping with expected GetStatus; trying next one..."); + continue; + }; + + // get the logs + let get_logs_request = GetLogsByRangeRequest { + from_block: self.last_cached_block + 1, + to_block: None, // Get all available logs + }; + + match Request::to(cacher_process_address.clone()) + .body(CacherRequest::GetLogsByRange(get_logs_request)) + .send_and_await_response(15) + { + Ok(Ok(response_msg)) => match response_msg.body().try_into() { + Ok(CacherResponse::GetLogsByRange(Ok(get_logs))) => { + match get_logs { + GetLogsByRangeOkResponse::Logs((block, json_string)) => { + if let Ok(log_caches) = + serde_json::from_str::>(&json_string) + { + self.process_received_log_caches(log_caches)?; + } + if block > self.last_cached_block { + self.last_cached_block = block; + } + } + GetLogsByRangeOkResponse::Latest(block) => { + if block > self.last_cached_block { + self.last_cached_block = block; + } + } + } + return Ok(()); + } + Ok(CacherResponse::GetLogsByRange(Err(e))) => { + warn!("Node {} returned error: {}", cacher_process_address, e); + } + Ok(CacherResponse::IsStarting) => { + info!( + "Node {} is still starting, trying next node", + cacher_process_address + ); + } + Ok(CacherResponse::Rejected) => { + warn!("Node {} rejected our request", cacher_process_address); + } + Ok(_) => { + warn!( + "Node {} returned unexpected response type", + cacher_process_address + ); + } + Err(e) => { + warn!( + "Failed to parse response from {}: {:?}", + cacher_process_address, e + ); + } + }, + Ok(Err(e)) => { + warn!("Error response from {}: {:?}", cacher_process_address, e); + } + Err(e) => { + warn!( + "Failed to send request to {}: {:?}", + cacher_process_address, e + ); + } + } + } + + Err(anyhow::anyhow!("Failed to bootstrap from any node")) + } + + // Helper function to write nodes to cache_sources file + fn write_nodes_to_file(&self) -> anyhow::Result<()> { + info!("Beginning of subroutine"); + let alt_drive_path = vfs::create_drive(our().package_id(), "initfiles", None)?; + info!("drive path defined"); + let nodes_json = serde_json::to_string(&self.nodes)?; + info!("nodes_json defined"); + let file_path = format!("{}/cache_sources", alt_drive_path); + info!("file_path defined"); + + // Open file in write mode which should truncate, but to be safe we'll write exact bytes + let mut file = vfs::open_file(&file_path, true, None)?; + + // Get the bytes to write + let bytes = nodes_json.as_bytes(); + + // Write all bytes + file.write_all(bytes)?; + + // Explicitly set the file length to the exact size of what we wrote + // This ensures any old content beyond this point is truncated + file.set_len(bytes.len() as u64)?; + + info!("Updated cache_sources with {} nodes", self.nodes.len()); + Ok(()) + } + + // Process received log caches and write them to VFS + fn process_received_log_caches( + &mut self, + log_caches: Vec, + ) -> anyhow::Result<()> { + info!("Processing {} received log caches", log_caches.len()); + + for log_cache in log_caches { + // Validate the log cache signature + if !self.validate_log_cache(&log_cache)? { + warn!("Invalid log cache signature, skipping"); + continue; + } + + // Generate filename from metadata + let filename = format!( + "{}-chain{}-from{}-to{}-protocol{}.json", + log_cache + .metadata + .time_created + .replace(":", "") + .replace("-", ""), + log_cache.metadata.chain_id, + log_cache.metadata.from_block, + log_cache.metadata.to_block, + PROTOCOL_VERSION + ); + + // Write log cache to VFS + let file_path = format!("{}/{}", self.drive_path, filename); + let log_cache_bytes = serde_json::to_vec(&log_cache)?; + + let mut file = vfs::open_file(&file_path, true, None)?; + file.write_all(&log_cache_bytes)?; + + info!("Wrote log cache file: {}", file_path); + + // Update manifest + let file_hash = format!("0x{}", hex::encode(keccak256(&log_cache_bytes))); + let manifest_item = ManifestItemInternal { + metadata: log_cache.metadata.clone(), + is_empty: log_cache.logs.is_empty(), + file_hash, + file_name: filename.clone(), + }; + + self.manifest.items.insert(filename, manifest_item); + + // Update last cached block if this cache goes beyond it + if let Ok(to_block) = log_cache.metadata.to_block.parse::() { + if to_block > self.last_cached_block { + self.last_cached_block = to_block; + } + } + } + + // Write updated manifest + self.write_manifest()?; + + Ok(()) + } + + // Validate a log cache signature + fn validate_log_cache(&self, log_cache: &LogCacheInternal) -> anyhow::Result { + let from_block = log_cache.metadata.from_block.parse::()?; + let to_block = log_cache.metadata.to_block.parse::()?; + + let mut bytes_to_verify = serde_json::to_vec(&log_cache.logs)?; + bytes_to_verify.extend_from_slice(&from_block.to_be_bytes()); + bytes_to_verify.extend_from_slice(&to_block.to_be_bytes()); + let hashed_data = keccak256(&bytes_to_verify); + + let signature_hex = log_cache.metadata.signature.trim_start_matches("0x"); + let signature_bytes = hex::decode(signature_hex)?; + + let created_by_address = log_cache.metadata.created_by.parse::
()?; + + Ok(sign::net_key_verify( + hashed_data.to_vec(), + &created_by_address, + signature_bytes, + )?) + } + + // Write manifest to VFS + fn write_manifest(&self) -> anyhow::Result<()> { + let manifest_bytes = serde_json::to_vec(&self.manifest)?; + let manifest_path = format!("{}/{}", self.drive_path, self.manifest.manifest_filename); + let manifest_file = vfs::open_file(&manifest_path, true, None)?; + manifest_file.write(&manifest_bytes)?; + info!("Updated manifest file: {}", manifest_path); + Ok(()) + } + + // Determine optimal batch size dynamically + fn determine_batch_size(&mut self, hypermap: &hypermap::Hypermap) -> anyhow::Result<()> { + if self.block_batch_size > 0 { + // Already determined + return Ok(()); + } + + let current_block = match hypermap.provider.get_block_number() { + Ok(block_num) => block_num, + Err(e) => { + error!("Failed to get current block number: {:?}", e); + // Fall back to default if we can't get the current block + self.block_batch_size = DEFAULT_BLOCK_BATCH_SIZE; + return Ok(()); + } + }; + + // Start with the difference between current block and HYPERMAP_FIRST_BLOCK + let mut batch_size = current_block.saturating_sub(hypermap::HYPERMAP_FIRST_BLOCK); + + // Ensure we have at least a minimum batch size + if batch_size < 1 { + batch_size = DEFAULT_BLOCK_BATCH_SIZE; + self.block_batch_size = batch_size; + info!("Using default batch size: {batch_size}"); + return Ok(()); + } + + info!("Determining optimal batch size starting from {batch_size}"); + + // Try progressively smaller batch sizes until we find one that works + loop { + let from_block = hypermap::HYPERMAP_FIRST_BLOCK; + let to_block = from_block + batch_size; + + let filter = eth::Filter::new() + .address(self.hypermap_binding_address) + .from_block(from_block) + .to_block(eth::BlockNumberOrTag::Number(to_block)); + + match hypermap.provider.get_logs(&filter) { + Ok(_) => { + // Success! This batch size works + self.block_batch_size = batch_size; + info!("Successfully determined batch size: {}", batch_size); + return Ok(()); + } + Err(e) => { + // Request failed or timed out, try smaller batch + warn!("Batch size {} failed: {:?}, halving...", batch_size, e); + batch_size = batch_size / 2; + + // Don't go below a minimum threshold + if batch_size < 10 { + warn!("Could not determine optimal batch size, using minimum: {DEFAULT_BLOCK_BATCH_SIZE}"); + self.block_batch_size = DEFAULT_BLOCK_BATCH_SIZE; + return Ok(()); + } + } + } + } + } + + // Fallback to RPC bootstrap - catch up from where we left off + fn try_bootstrap_from_rpc(&mut self, hypermap: &hypermap::Hypermap) -> anyhow::Result<()> { + info!( + "Bootstrapping from RPC, starting from block {}", + self.last_cached_block + 1 + ); + + // Catch up remainder (or as fallback) using RPC + self.cache_logs_and_update_manifest(hypermap)?; + + // run it twice for fresh boot case: + // - initial bootstrap takes much time + // - in that time, the block you are updating to is no longer the head of the chain + // - so run again to get to the head of the chain + self.cache_logs_and_update_manifest(hypermap)?; + + Ok(()) + } + + fn to_wit_manifest(&self) -> WitManifest { + let items = self + .manifest + .items + .iter() + .map(|(k, v)| { + let wit_meta = WitLogsMetadata { + chain_id: v.metadata.chain_id.clone(), + from_block: v.metadata.from_block.clone(), + to_block: v.metadata.to_block.clone(), + time_created: v.metadata.time_created.clone(), + created_by: v.metadata.created_by.clone(), + signature: v.metadata.signature.clone(), + }; + let wit_item = WitManifestItem { + metadata: wit_meta, + is_empty: v.is_empty, + file_hash: v.file_hash.clone(), + file_name: v.file_name.clone(), + }; + (k.clone(), wit_item) + }) + .collect::>(); + + WitManifest { + items, + manifest_filename: self.manifest.manifest_filename.clone(), + chain_id: self.manifest.chain_id.clone(), + protocol_version: self.manifest.protocol_version.clone(), + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +enum HttpApi { + GetManifest, + GetLogCacheFile(String), + GetStatus, +} + +fn http_handler( + state: &mut State, + path: &str, +) -> anyhow::Result<(http::server::HttpResponse, Vec)> { + let response = http::server::HttpResponse::new(http::StatusCode::OK) + .header("Content-Type", "application/json"); + + // Basic routing based on path + Ok(if path == "/manifest" || path == "/manifest.json" { + let manifest_path = format!("{}/{}", state.drive_path, state.manifest.manifest_filename); + let manifest_file = vfs::open_file(&manifest_path, true, None)?; + match manifest_file.read() { + Ok(content) => (response, content), + Err(e) => { + error!( + "HTTP: Failed to read manifest file {}: {:?}", + manifest_path, e + ); + ( + http::server::HttpResponse::new(http::StatusCode::NOT_FOUND), + b"Manifest not found".to_vec(), + ) + } + } + } else if path.starts_with("/log-cache/") { + let filename = path.trim_start_matches("/log-cache/"); + if filename.is_empty() || filename.contains("..") { + // Basic security check + return Ok(( + http::server::HttpResponse::new(http::StatusCode::BAD_REQUEST), + b"Invalid filename".to_vec(), + )); + } + let log_cache_path = format!("{}/{}", state.drive_path, filename); + let log_cache_file = vfs::open_file(&log_cache_path, true, None)?; + match log_cache_file.read() { + Ok(content) => (response, content), + Err(e) => { + error!( + "HTTP: Failed to read log cache file {}: {:?}", + log_cache_path, e + ); + ( + http::server::HttpResponse::new(http::StatusCode::NOT_FOUND), + b"Log cache file not found".to_vec(), + ) + } + } + } else if path == "/status" { + let status_info = CacherStatus { + last_cached_block: state.last_cached_block, + chain_id: state.chain_id.clone(), + protocol_version: state.protocol_version.clone(), + next_cache_attempt_in_seconds: if state.is_cache_timer_live { + Some(state.cache_interval_s) + } else { + None + }, + manifest_filename: state.manifest.manifest_filename.clone(), + log_files_count: state.manifest.items.len() as u32, + our_address: our().to_string(), + is_providing: state.is_providing, + }; + match serde_json::to_vec(&status_info) { + Ok(body) => (response, body), + Err(e) => { + error!("HTTP: Failed to serialize status: {:?}", e); + ( + http::server::HttpResponse::new(http::StatusCode::INTERNAL_SERVER_ERROR), + b"Error serializing status".to_vec(), + ) + } + } + } else { + ( + http::server::HttpResponse::new(http::StatusCode::NOT_FOUND), + b"Not Found".to_vec(), + ) + }) +} + +fn handle_request( + our: &Address, + source: &Address, + state: &mut State, + request: CacherRequest, +) -> anyhow::Result<()> { + let is_local = is_local_request(our, source); + + // If we're still starting, respond with IsStarting to all requests + if state.is_starting { + Response::new().body(CacherResponse::IsStarting).send()?; + return Ok(()); + } + + if !is_local && source.process.to_string() != "binding-cacher:hypermap-cacher:sys" { + warn!("Rejecting remote request from non-binding-cacher: {source}"); + Response::new().body(CacherResponse::Rejected).send()?; + return Ok(()); + } + + if !is_local + && !state.is_providing + && source.process.to_string() == "binding-cacher:hypermap-cacher:sys" + { + warn!("Rejecting remote request from {source} - not in provider mode"); + Response::new().body(CacherResponse::Rejected).send()?; + return Ok(()); + } + let response_body = match request { + CacherRequest::GetManifest => { + let manifest_path = + format!("{}/{}", state.drive_path, state.manifest.manifest_filename); + if state.manifest.items.is_empty() && vfs::metadata(&manifest_path, None).is_err() { + CacherResponse::GetManifest(None) + } else { + // Ensure manifest is loaded from VFS if state is fresh and manifest file exists + // This is usually handled by State::load, but as a fallback: + if state.manifest.items.is_empty() { + // If manifest in memory is empty, try to load it + let manifest_file = vfs::open_file(&manifest_path, true, None)?; + if let Ok(bytes) = manifest_file.read() { + if let Ok(disk_manifest) = + serde_json::from_slice::(&bytes) + { + state.manifest = disk_manifest; + } + } + } + CacherResponse::GetManifest(Some(state.to_wit_manifest())) + } + } + CacherRequest::GetLogCacheContent(filename) => { + let log_cache_path = format!("{}/{}", state.drive_path, filename); + let log_cache_file = vfs::open_file(&log_cache_path, true, None)?; + match log_cache_file.read() { + Ok(content_bytes) => { + // Content is raw JSON bytes of LogCacheInternal. + // The WIT expects a string. + match String::from_utf8(content_bytes) { + Ok(content_str) => { + CacherResponse::GetLogCacheContent(Ok(Some(content_str))) + } + Err(e) => { + error!("Failed to convert log cache content to UTF-8 string: {}", e); + CacherResponse::GetLogCacheContent(Err(format!( + "File content not valid UTF-8: {}", + e + ))) + } + } + } + Err(_) => CacherResponse::GetLogCacheContent(Ok(None)), + } + } + CacherRequest::GetStatus => { + let status = CacherStatus { + last_cached_block: state.last_cached_block, + chain_id: state.chain_id.clone(), + protocol_version: state.protocol_version.clone(), + next_cache_attempt_in_seconds: if state.is_cache_timer_live { + Some(state.cache_interval_s) + } else { + None + }, + manifest_filename: state.manifest.manifest_filename.clone(), + log_files_count: state.manifest.items.len() as u32, + our_address: our.to_string(), + is_providing: state.is_providing, + }; + CacherResponse::GetStatus(status) + } + CacherRequest::GetLogsByRange(req_params) => { + let mut relevant_caches: Vec = Vec::new(); + let req_from_block = req_params.from_block; + // If req_params.to_block is None, we effectively want to go up to the highest block available in caches. + // For simplicity in overlap calculation, we can treat None as u64::MAX here. + let effective_req_to_block = req_params.to_block.unwrap_or(u64::MAX); + + for item in state.manifest.items.values() { + // Skip items that don't have an actual file (e.g., empty log ranges not written to disk). + if item.file_name.is_empty() { + continue; + } + + let cache_from = match item.metadata.from_block.parse::() { + Ok(b) => b, + Err(_) => { + warn!( + "Could not parse from_block for cache item {}: {}", + item.file_name, item.metadata.from_block + ); + continue; + } + }; + let cache_to = match item.metadata.to_block.parse::() { + Ok(b) => b, + Err(_) => { + warn!( + "Could not parse to_block for cache item {}: {}", + item.file_name, item.metadata.to_block + ); + continue; + } + }; + + // Check for overlap: max(start1, start2) <= min(end1, end2) + if max(req_from_block, cache_from) <= min(effective_req_to_block, cache_to) { + // This cache file overlaps with the requested range. + let file_vfs_path = format!("{}/{}", state.drive_path, item.file_name); + match vfs::open_file(&file_vfs_path, false, None) { + Ok(file) => match file.read() { + Ok(content_bytes) => { + match serde_json::from_slice::(&content_bytes) { + Ok(log_cache) => relevant_caches.push(log_cache), + Err(e) => { + error!( + "Failed to deserialize LogCacheInternal from {}: {:?}", + item.file_name, e + ); + // Decide: return error or skip this cache? For now, skip. + } + } + } + Err(e) => error!("Failed to read VFS file {}: {:?}", item.file_name, e), + }, + Err(e) => error!("Failed to open VFS file {}: {e:?}", item.file_name), + } + } + } + + // Sort caches by their from_block. + relevant_caches + .sort_by_key(|cache| cache.metadata.from_block.parse::().unwrap_or(0)); + + if relevant_caches.is_empty() { + CacherResponse::GetLogsByRange(Ok(GetLogsByRangeOkResponse::Latest( + state.last_cached_block, + ))) + } else { + match serde_json::to_string(&relevant_caches) { + Ok(json_string) => CacherResponse::GetLogsByRange(Ok( + GetLogsByRangeOkResponse::Logs((state.last_cached_block, json_string)), + )), + Err(e) => CacherResponse::GetLogsByRange(Err(format!( + "Failed to serialize relevant caches: {e}" + ))), + } + } + } + CacherRequest::StartProviding => { + if !is_local { + // should never happen: should be caught in check above + Response::new().body(CacherResponse::Rejected).send()?; + return Ok(()); + } + state.is_providing = true; + state.save(); + info!("Provider mode enabled"); + CacherResponse::StartProviding(Ok("Provider mode enabled".to_string())) + } + CacherRequest::StopProviding => { + if !is_local { + Response::new().body(CacherResponse::Rejected).send()?; + warn!("Rejecting remote request from {source} to alter provider mode"); + return Ok(()); + } + state.is_providing = false; + state.save(); + info!("Provider mode disabled"); + CacherResponse::StopProviding(Ok("Provider mode disabled".to_string())) + } + CacherRequest::SetNodes(new_nodes) => { + if !is_local { + Response::new().body(CacherResponse::Rejected).send()?; + warn!("Rejecting remote request from {source} to set nodes"); + return Ok(()); + } + state.nodes = new_nodes; + state.save(); + if let Err(e) = state.write_nodes_to_file() { + error!("Failed to write nodes to cache_sources: {:?}", e); + } + info!("Nodes updated to: {:?}", state.nodes); + CacherResponse::SetNodes(Ok("Nodes updated successfully".to_string())) + } + CacherRequest::Reset(custom_nodes) => { + if !is_local { + Response::new().body(CacherResponse::Rejected).send()?; + warn!("Rejecting remote request from {source} to reset"); + return Ok(()); + } + + info!("Resetting binding-cacher state and clearing VFS..."); + + // Clear all files from the drive + if let Err(e) = state.clear_drive() { + error!("Failed to clear drive during reset: {:?}", e); + CacherResponse::Reset(Err(format!("Failed to clear drive: {:?}", e))) + } else { + // Create new state with custom nodes if provided, otherwise use defaults + let nodes = match custom_nodes { + Some(nodes) => nodes, + None => DEFAULT_NODES.iter().map(|s| s.to_string()).collect(), + }; + + *state = State::new(&state.drive_path); + state.nodes = nodes; + state.save(); + if let Err(e) = state.write_nodes_to_file() { + error!("Failed to write nodes to cache_sources: {:?}", e); + } + info!( + "binding-cacher reset complete. New nodes: {:?}", + state.nodes + ); + CacherResponse::Reset(Ok( + "Reset completed successfully. Binding Cacher will restart with new settings." + .to_string(), + )) + } + } + }; + + Response::new().body(response_body).send()?; + Ok(()) +} + +fn main_loop( + our: &Address, + state: &mut State, + hypermap: &hypermap::Hypermap, + server: &http::server::HttpServer, +) -> anyhow::Result<()> { + info!("Hypermap Binding Cacher main_loop started. Our address: {}", our); + info!( + "Monitoring Binding contract: {}", + state.hypermap_binding_address.to_string() + ); + info!( + "Chain ID: {}, Protocol Version: {}", + state.chain_id, state.protocol_version + ); + info!("Last cached block: {}", state.last_cached_block); + + // Always bootstrap on start to get latest state from other nodes or RPC + while state.is_starting { + match state.bootstrap_state(hypermap) { + Ok(_) => info!("Bootstrap process completed successfully."), + Err(e) => { + error!("Error during bootstrap process: {:?}", e); + std::thread::sleep(std::time::Duration::from_secs(RETRY_DELAY_S)); + } + } + } + + // Set up the main caching timer. + info!( + "Setting cache timer for {} seconds.", + state.cache_interval_s + ); + timer::set_timer(state.cache_interval_s * 1000, Some(b"cache_cycle".to_vec())); + state.is_cache_timer_live = true; + state.save(); + + loop { + let Ok(message) = await_message() else { + warn!("Failed to get message, continuing loop."); + continue; + }; + let source = message.source(); + + if message.is_request() { + if source.process == ProcessId::from_str("http-server:distro:sys").unwrap() { + // HTTP request from the system's HTTP server process + let Ok(http::server::HttpServerRequest::Http(http_request)) = + server.parse_request(message.body()) + else { + error!("Failed to parse HTTP request from http-server:distro:sys"); + // Potentially send an error response back if possible/expected + continue; + }; + let (http_response, body) = http_handler(state, &http_request.path()?)?; + Response::new() + .body(serde_json::to_vec(&http_response).unwrap()) + .blob_bytes(body) + .send()?; + } else { + // Standard process-to-process request + match serde_json::from_slice::(message.body()) { + Ok(request) => { + if let Err(e) = handle_request(our, &source, state, request) { + error!("Error handling request from {:?}: {:?}", source, e); + } + } + Err(e) => { + error!( + "Failed to deserialize CacherRequest from {:?}: {:?}", + source, e + ); + } + } + } + } else { + // It's a Response or other kind of message + if source.process == ProcessId::from_str("timer:distro:sys").unwrap() { + if message.context() == Some(b"cache_cycle") { + info!("Cache timer triggered."); + state.is_cache_timer_live = false; + match state.cache_logs_and_update_manifest(hypermap) { + Ok(_) => info!("Periodic cache cycle complete."), + Err(e) => error!("Error during periodic cache cycle: {:?}", e), + } + // Reset the timer for the next cycle + if !state.is_cache_timer_live { + timer::set_timer( + state.cache_interval_s * 1000, + Some(b"cache_cycle".to_vec()), + ); + state.is_cache_timer_live = true; + state.save(); + } + } + } else { + debug!( + "Received unhandled response or other message from {:?}.", + source + ); + } + } + } +} + +call_init!(init); +fn init(our: Address) { + init_logging(Level::INFO, Level::DEBUG, None, None, None).unwrap(); + info!("Hypermap Binding Cacher process starting..."); + + let drive_path = vfs::create_drive(our.package_id(), "binding-cache", None).unwrap(); + // Create alternate drive for initfiles and read the test data + let alt_drive_path = vfs::create_drive(our.package_id(), "initfiles", None).unwrap(); + + // Try to read the cache_sources file from the initfiles drive + match vfs::open_file(&format!("{}/cache_sources", alt_drive_path), false, None) { + Ok(file) => match file.read() { + Ok(contents) => { + let content_str = String::from_utf8_lossy(&contents); + info!("Contents of cache_sources: {}", content_str); + } + Err(e) => { + info!("Failed to read cache_sources: {}", e); + } + }, + Err(e) => { + info!("Failed to open cache_sources: {}", e); + } + } + + let bind_config = http::server::HttpBindingConfig::default().authenticated(false); + let mut server = http::server::HttpServer::new(5); + + let hypermap_provider = hypermap::Hypermap::default(60); + + server + .bind_http_path("/manifest", bind_config.clone()) + .expect("Failed to bind /manifest"); + server + .bind_http_path("/manifest.json", bind_config.clone()) + .expect("Failed to bind /manifest.json"); + server + .bind_http_path("/log-cache/*", bind_config.clone()) + .expect("Failed to bind /log-cache/*"); + server + .bind_http_path("/status", bind_config.clone()) + .expect("Failed to bind /status"); + info!("Bound HTTP paths: /manifest, /log-cache/*, /status"); + + let mut state = State::load(&drive_path); + + loop { + match main_loop(&our, &mut state, &hypermap_provider, &server) { + Ok(()) => { + // main_loop should not exit with Ok in normal operation as it's an infinite loop. + error!("main_loop exited unexpectedly with Ok. Restarting."); + } + Err(e) => { + error!("main_loop exited with error: {:?}. Restarting.", e); + std::thread::sleep(std::time::Duration::from_secs(5)); + } + } + // Reload state in case of restart, or re-initialize if necessary. + state = State::load(&drive_path); + } +} diff --git a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml index bb9b33d5d..7b2c28c2a 100644 --- a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml @@ -18,7 +18,7 @@ alloy = { version = "0.8.1", features = [ ] } chrono = "0.4.41" hex = "0.4.3" -hyperware_process_lib = { version = "2.1.0", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e", features = ["logging"] } process_macros = "0.1.0" rand = "0.8" rmp-serde = "1.1.2" diff --git a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs index e64affc86..f42b74ba0 100644 --- a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs @@ -24,7 +24,7 @@ use hyperware_process_lib::{ wit_bindgen::generate!({ path: "../target/wit", - world: "hypermap-cacher-sys-v0", + world: "hypermap-cacher-sys-v1", generate_unused_types: true, additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], }); @@ -1298,11 +1298,11 @@ fn handle_request( error!("Failed to write nodes to cache_sources: {:?}", e); } info!( - "Hypermap-cacher reset complete. New nodes: {:?}", + "hypermap-cacher reset complete. New nodes: {:?}", state.nodes ); CacherResponse::Reset(Ok( - "Reset completed successfully. Cacher will restart with new settings." + "Reset completed successfully. Hypermap Cacher will restart with new settings." .to_string(), )) } diff --git a/hyperdrive/packages/hypermap-cacher/pkg/manifest.json b/hyperdrive/packages/hypermap-cacher/pkg/manifest.json index ec498453e..c4a734dcd 100644 --- a/hyperdrive/packages/hypermap-cacher/pkg/manifest.json +++ b/hyperdrive/packages/hypermap-cacher/pkg/manifest.json @@ -1,4 +1,30 @@ [ + { + "process_name": "binding-cacher", + "process_wasm_path": "/binding-cacher.wasm", + "on_exit": "Restart", + "request_networking": true, + "request_capabilities": [ + "eth:distro:sys", + "http-server:distro:sys", + "net:distro:sys", + "sign:sign:sys", + "terminal:terminal:sys", + "timer:distro:sys", + "vfs:distro:sys" + ], + "grant_capabilities": [ + "eth:distro:sys", + "http-server:distro:sys", + "net:distro:sys", + "sign:sign:sys", + "terminal:terminal:sys", + "timer:distro:sys", + "vfs:distro:sys" + ], + "public": false, + "wit_version": 1 + }, { "process_name": "hypermap-cacher", "process_wasm_path": "/hypermap-cacher.wasm", diff --git a/hyperdrive/packages/hypermap-cacher/pkg/scripts.json b/hyperdrive/packages/hypermap-cacher/pkg/scripts.json index aab9de842..5b1bfaefe 100644 --- a/hyperdrive/packages/hypermap-cacher/pkg/scripts.json +++ b/hyperdrive/packages/hypermap-cacher/pkg/scripts.json @@ -4,9 +4,11 @@ "public": false, "request_networking": false, "request_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "grant_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "wit_version": 1 @@ -16,9 +18,11 @@ "public": false, "request_networking": false, "request_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "grant_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "wit_version": 1 @@ -28,9 +32,11 @@ "public": false, "request_networking": false, "request_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "grant_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "wit_version": 1 @@ -40,9 +46,11 @@ "public": false, "request_networking": false, "request_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "grant_capabilities": [ + "binding-cacher:hypermap-cacher:sys", "hypermap-cacher:hypermap-cacher:sys" ], "wit_version": 1 diff --git a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml index 64c780831..11f07278a 100644 --- a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = "2.1.0" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/reset-cache/src/lib.rs b/hyperdrive/packages/hypermap-cacher/reset-cache/src/lib.rs index 45adadd23..495847667 100644 --- a/hyperdrive/packages/hypermap-cacher/reset-cache/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/reset-cache/src/lib.rs @@ -12,12 +12,13 @@ //! reset:hypermap-cacher:sys # Reset with default nodes //! reset:hypermap-cacher:sys alice.os bob.os # Reset with custom nodes +use crate::hyperware::process::binding_cacher::{BindingCacherRequest, BindingCacherResponse}; use crate::hyperware::process::hypermap_cacher::{CacherRequest, CacherResponse}; use hyperware_process_lib::{await_next_message_body, call_init, println, Address, Request}; wit_bindgen::generate!({ path: "../target/wit", - world: "hypermap-cacher-sys-v0", + world: "hypermap-cacher-sys-v1", generate_unused_types: true, additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], }); @@ -33,13 +34,14 @@ fn init(_our: Address) { let parts: Vec<&str> = args.split_whitespace().collect(); let custom_nodes = if parts.is_empty() { - println!("Resetting hypermap-cacher with default nodes..."); + println!("Resetting cachers with default nodes..."); None } else { let nodes: Vec = parts.iter().map(|s| s.to_string()).collect(); - println!("Resetting hypermap-cacher with custom nodes: {:?}", nodes); + println!("Resetting cachers with custom nodes: {:?}", nodes); Some(nodes) }; + let binding_custom_nodes = custom_nodes.clone(); let response = Request::to(("our", "hypermap-cacher", "hypermap-cacher", "sys")) .body(CacherRequest::Reset(custom_nodes)) @@ -51,7 +53,7 @@ fn init(_our: Address) { println!("✓ {}", msg); } Ok(CacherResponse::Reset(Err(err))) => { - println!("✗ Failed to reset: {}", err); + println!("✗ Failed to reset hypermap-cacher: {}", err); } _ => { println!("✗ Unexpected response from hypermap-cacher"); @@ -64,4 +66,28 @@ fn init(_our: Address) { println!("✗ Communication error: {:?}", err); } } + + let response = Request::to(("our", "binding-cacher", "hypermap-cacher", "sys")) + .body(BindingCacherRequest::Reset(binding_custom_nodes)) + .send_and_await_response(10); // Give it more time for reset operations + + match response { + Ok(Ok(message)) => match message.body().try_into() { + Ok(BindingCacherResponse::Reset(Ok(msg))) => { + println!("✓ {}", msg); + } + Ok(BindingCacherResponse::Reset(Err(err))) => { + println!("✗ Failed to reset binding-cacher: {}", err); + } + _ => { + println!("✗ Unexpected response from binding-cacher"); + } + }, + Ok(Err(err)) => { + println!("✗ Request failed: {:?}", err); + } + Err(err) => { + println!("✗ Communication error: {:?}", err); + } + } } diff --git a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml index 6a00070f2..13874437d 100644 --- a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = "2.1.0" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/set-nodes/src/lib.rs b/hyperdrive/packages/hypermap-cacher/set-nodes/src/lib.rs index 9236ca28e..c8479ef9e 100644 --- a/hyperdrive/packages/hypermap-cacher/set-nodes/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/set-nodes/src/lib.rs @@ -10,12 +10,13 @@ //! Example: //! set-nodes:hypermap-cacher:sys alice.os bob.os charlie.os +use crate::hyperware::process::binding_cacher::{BindingCacherRequest, BindingCacherResponse}; use crate::hyperware::process::hypermap_cacher::{CacherRequest, CacherResponse}; use hyperware_process_lib::{await_next_message_body, call_init, println, Address, Request}; wit_bindgen::generate!({ path: "../target/wit", - world: "hypermap-cacher-sys-v0", + world: "hypermap-cacher-sys-v1", generate_unused_types: true, additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], }); @@ -37,6 +38,7 @@ fn init(_our: Address) { } let nodes: Vec = parts.iter().map(|s| s.to_string()).collect(); + let binding_nodes = nodes.clone(); println!("Setting hypermap-cacher nodes to: {:?}", nodes); @@ -50,7 +52,7 @@ fn init(_our: Address) { println!("✓ {}", msg); } Ok(CacherResponse::SetNodes(Err(err))) => { - println!("✗ Failed to set nodes: {}", err); + println!("✗ Failed to set nodes for hypermap-cacher: {}", err); } _ => { println!("✗ Unexpected response from hypermap-cacher"); @@ -63,4 +65,30 @@ fn init(_our: Address) { println!("✗ Communication error: {:?}", err); } } + + println!("Setting binding-cacher nodes to: {:?}", binding_nodes); + + let response = Request::to(("our", "binding-cacher", "hypermap-cacher", "sys")) + .body(BindingCacherRequest::SetNodes(binding_nodes)) + .send_and_await_response(5); + + match response { + Ok(Ok(message)) => match message.body().try_into() { + Ok(BindingCacherResponse::SetNodes(Ok(msg))) => { + println!("✓ {}", msg); + } + Ok(BindingCacherResponse::SetNodes(Err(err))) => { + println!("✗ Failed to set nodes for binding-cacher: {}", err); + } + _ => { + println!("✗ Unexpected response from binding-cacher"); + } + }, + Ok(Err(err)) => { + println!("✗ Request failed: {:?}", err); + } + Err(err) => { + println!("✗ Communication error: {:?}", err); + } + } } diff --git a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml index fd508eefc..198ebd7d4 100644 --- a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = "2.1.0" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/start-providing/src/lib.rs b/hyperdrive/packages/hypermap-cacher/start-providing/src/lib.rs index 8162c77ab..f74f7abe7 100644 --- a/hyperdrive/packages/hypermap-cacher/start-providing/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/start-providing/src/lib.rs @@ -1,9 +1,10 @@ +use crate::hyperware::process::binding_cacher::{BindingCacherRequest, BindingCacherResponse}; use crate::hyperware::process::hypermap_cacher::{CacherRequest, CacherResponse}; use hyperware_process_lib::{call_init, println, Address, Request}; wit_bindgen::generate!({ path: "../target/wit", - world: "hypermap-cacher-sys-v0", + world: "hypermap-cacher-sys-v1", generate_unused_types: true, additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], }); @@ -22,7 +23,7 @@ fn init(_our: Address) { println!("✓ {}", msg); } Ok(CacherResponse::StartProviding(Err(err))) => { - println!("✗ Failed to enable provider mode: {}", err); + println!("✗ Failed to enable hypermap-cacher provider mode: {}", err); } _ => { println!("✗ Unexpected response from hypermap-cacher"); @@ -35,4 +36,30 @@ fn init(_our: Address) { println!("✗ Communication error: {:?}", err); } } + + println!("Enabling binding-cacher provider mode..."); + + let response = Request::to(("our", "binding-cacher", "hypermap-cacher", "sys")) + .body(BindingCacherRequest::StartProviding) + .send_and_await_response(5); + + match response { + Ok(Ok(message)) => match message.body().try_into() { + Ok(BindingCacherResponse::StartProviding(Ok(msg))) => { + println!("✓ {}", msg); + } + Ok(BindingCacherResponse::StartProviding(Err(err))) => { + println!("✗ Failed to enable binding-cacher provider mode: {}", err); + } + _ => { + println!("✗ Unexpected response from binding-cacher"); + } + }, + Ok(Err(err)) => { + println!("✗ Request failed: {:?}", err); + } + Err(err) => { + println!("✗ Communication error: {:?}", err); + } + } } diff --git a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml index dfbc1efcf..937d0fd2e 100644 --- a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = "2.1.0" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/stop-providing/src/lib.rs b/hyperdrive/packages/hypermap-cacher/stop-providing/src/lib.rs index bf4491ca7..e6d55b831 100644 --- a/hyperdrive/packages/hypermap-cacher/stop-providing/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/stop-providing/src/lib.rs @@ -1,9 +1,10 @@ +use crate::hyperware::process::binding_cacher::{BindingCacherRequest, BindingCacherResponse}; use crate::hyperware::process::hypermap_cacher::{CacherRequest, CacherResponse}; use hyperware_process_lib::{call_init, println, Address, Request}; wit_bindgen::generate!({ path: "../target/wit", - world: "hypermap-cacher-sys-v0", + world: "hypermap-cacher-sys-v1", generate_unused_types: true, additional_derives: [serde::Deserialize, serde::Serialize, process_macros::SerdeJsonInto], }); @@ -22,7 +23,7 @@ fn init(_our: Address) { println!("✓ {}", msg); } Ok(CacherResponse::StopProviding(Err(err))) => { - println!("✗ Failed to disable provider mode: {}", err); + println!("✗ Failed to disable hypermap-cacher provider mode: {}", err); } _ => { println!("✗ Unexpected response from hypermap-cacher"); @@ -35,4 +36,30 @@ fn init(_our: Address) { println!("✗ Communication error: {:?}", err); } } + + println!("Disabling binding-cacher provider mode..."); + + let response = Request::to(("our", "binding-cacher", "hypermap-cacher", "sys")) + .body(BindingCacherRequest::StopProviding) + .send_and_await_response(5); + + match response { + Ok(Ok(message)) => match message.body().try_into() { + Ok(BindingCacherResponse::StopProviding(Ok(msg))) => { + println!("✓ {}", msg); + } + Ok(BindingCacherResponse::StopProviding(Err(err))) => { + println!("✗ Failed to disable binding-cacher provider mode: {}", err); + } + _ => { + println!("✗ Unexpected response from binding-cacher"); + } + }, + Ok(Err(err)) => { + println!("✗ Request failed: {:?}", err); + } + Err(err) => { + println!("✗ Communication error: {:?}", err); + } + } } diff --git a/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx b/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx index 23f7c8bb3..471d50e4d 100644 --- a/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx +++ b/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx @@ -1,5 +1,5 @@ import { useState, useEffect, FormEvent, useCallback } from "react"; -import { Link, useNavigate } from "react-router-dom"; +import { useNavigate } from "react-router-dom"; import { toAscii } from "idna-uts46-hx"; import EnterHnsName from "../components/EnterHnsName"; import Loader from "../components/Loader"; From 6b3d1b1ae38d89352e1af87f795645518cabc0a8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 19:28:36 +0000 Subject: [PATCH 2/6] Format Rust code using rustfmt --- .../packages/hypermap-cacher/binding-cacher/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs index e7593bdfb..9ed47a9e2 100644 --- a/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs @@ -1322,7 +1322,10 @@ fn main_loop( hypermap: &hypermap::Hypermap, server: &http::server::HttpServer, ) -> anyhow::Result<()> { - info!("Hypermap Binding Cacher main_loop started. Our address: {}", our); + info!( + "Hypermap Binding Cacher main_loop started. Our address: {}", + our + ); info!( "Monitoring Binding contract: {}", state.hypermap_binding_address.to_string() From 570d6e4eec1713875dcc881ebcbe3d4307c662d0 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Wed, 8 Oct 2025 20:02:37 -0400 Subject: [PATCH 3/6] emphasize direct nodes --- .../src/register-ui/src/components/CacheSourceTooltip.tsx | 2 +- hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx | 2 +- hyperdrive/src/register-ui/src/pages/Login.tsx | 2 +- hyperdrive/src/register-ui/src/pages/MintCustom.tsx | 2 +- hyperdrive/src/register-ui/src/pages/ResetName.tsx | 2 +- hyperdrive/src/register-ui/src/pages/SetPassword.tsx | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hyperdrive/src/register-ui/src/components/CacheSourceTooltip.tsx b/hyperdrive/src/register-ui/src/components/CacheSourceTooltip.tsx index ec83b1ea1..bab688062 100644 --- a/hyperdrive/src/register-ui/src/components/CacheSourceTooltip.tsx +++ b/hyperdrive/src/register-ui/src/components/CacheSourceTooltip.tsx @@ -6,7 +6,7 @@ export const CacheSourceTooltip: React.FC = () => { diff --git a/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx b/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx index 471d50e4d..2950e21ed 100644 --- a/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx +++ b/hyperdrive/src/register-ui/src/pages/CommitDotOsName.tsx @@ -237,7 +237,7 @@ function CommitDotOsName({ id="custom-routers" value={customRouters} onChange={(e) => handleCustomRoutersChange(e.target.value)} - placeholder="Enter one router name per line, e.g.: router-node-1.hypr other-router.hypr myrouter.os" + placeholder="Enter one router name per line, e.g.: direct-router-1.hypr direct-other.hypr mydirectrouter.os" className={`input resize-vertical min-h-[80px] ${ specifyRouters && !isCustomRoutersValid() ? 'border-red-500 focus:border-red-500' diff --git a/hyperdrive/src/register-ui/src/pages/Login.tsx b/hyperdrive/src/register-ui/src/pages/Login.tsx index fd6a08d13..84709693b 100644 --- a/hyperdrive/src/register-ui/src/pages/Login.tsx +++ b/hyperdrive/src/register-ui/src/pages/Login.tsx @@ -301,7 +301,7 @@ function Login({ id="custom-cache-sources-login" value={customCacheSources} onChange={(e) => handleCustomCacheSourcesChange(e.target.value)} - placeholder="Enter one cache source name per line, e.g.: cache-node-1.hypr other-cache.hypr mycache.os" + placeholder="Enter one cache source name per line, e.g.: direct-cache-1.hypr direct-other.hypr mydirectcache.os" className={`input resize-vertical min-h-[80px] ${ specifyCacheSources && !isCustomCacheSourcesValid() ? 'border-red-500 focus:border-red-500' diff --git a/hyperdrive/src/register-ui/src/pages/MintCustom.tsx b/hyperdrive/src/register-ui/src/pages/MintCustom.tsx index 543c01d4d..601fb1e40 100644 --- a/hyperdrive/src/register-ui/src/pages/MintCustom.tsx +++ b/hyperdrive/src/register-ui/src/pages/MintCustom.tsx @@ -226,7 +226,7 @@ function MintCustom({ id="custom-routers-mint" value={customRouters} onChange={(e) => handleCustomRoutersChange(e.target.value)} - placeholder="Enter one router name per line, e.g.: router-node-1.hypr other-router.hypr myrouter.os" + placeholder="Enter one router name per line, e.g.: direct-router-1.hypr direct-other.hypr mydirectrouter.os" className={`input resize-vertical min-h-[80px] ${ specifyRouters && !isCustomRoutersValid() ? 'border-red-500 focus:border-red-500' diff --git a/hyperdrive/src/register-ui/src/pages/ResetName.tsx b/hyperdrive/src/register-ui/src/pages/ResetName.tsx index ef447ba8e..7a03d17fd 100644 --- a/hyperdrive/src/register-ui/src/pages/ResetName.tsx +++ b/hyperdrive/src/register-ui/src/pages/ResetName.tsx @@ -270,7 +270,7 @@ function ResetHnsName({ id="custom-routers-reset" value={customRouters} onChange={(e) => handleCustomRoutersChange(e.target.value)} - placeholder="Enter one router name per line, e.g.: router-node-1.hypr other-router.hypr myrouter.os" + placeholder="Enter one router name per line, e.g.: direct-router-1.hypr direct-other.hypr mydirectrouter.os" className={`input resize-vertical min-h-[80px] ${ specifyRouters && !isCustomRoutersValid() ? 'border-red-500 focus:border-red-500' diff --git a/hyperdrive/src/register-ui/src/pages/SetPassword.tsx b/hyperdrive/src/register-ui/src/pages/SetPassword.tsx index ad1afbacf..1fc5348a8 100644 --- a/hyperdrive/src/register-ui/src/pages/SetPassword.tsx +++ b/hyperdrive/src/register-ui/src/pages/SetPassword.tsx @@ -350,7 +350,7 @@ function SetPassword({ id="custom-cache-sources-setpassword" value={customCacheSources} onChange={(e) => handleCustomCacheSourcesChange(e.target.value)} - placeholder="Enter one cache source name per line, e.g.: cache-node-1.hypr other-cache.hypr mycache.os" + placeholder="Enter one cache source name per line, e.g.: direct-cache-1.hypr direct-other.hypr mydirectcache.os" className={`input resize-vertical min-h-[80px] ${ specifyCacheSources && !isCustomCacheSourcesValid() ? 'border-red-500 focus:border-red-500' From dda22b75a761e245c637ae63e5b7528c872b745a Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Thu, 9 Oct 2025 08:38:44 -0400 Subject: [PATCH 4/6] Updated process_lib version --- hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml index 93530db0c..9f894e9b5 100644 --- a/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml @@ -18,7 +18,7 @@ alloy = { version = "0.8.1", features = [ ] } chrono = "0.4.41" hex = "0.4.3" -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf", features = ["logging"] } process_macros = "0.1.0" rand = "0.8" rmp-serde = "1.1.2" diff --git a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml index 7b2c28c2a..64d703e01 100644 --- a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml @@ -18,7 +18,7 @@ alloy = { version = "0.8.1", features = [ ] } chrono = "0.4.41" hex = "0.4.3" -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf", features = ["logging"] } process_macros = "0.1.0" rand = "0.8" rmp-serde = "1.1.2" diff --git a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml index 11f07278a..3c7343718 100644 --- a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml index 13874437d..946c9e970 100644 --- a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml index 198ebd7d4..3626f1261 100644 --- a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml index 937d0fd2e..57472f0a0 100644 --- a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "78a6a7e" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" From 3e1329629fc37c91b683707c0cfc48a3324b7dd6 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 13 Oct 2025 12:46:15 -0400 Subject: [PATCH 5/6] Skip log cache validation (direct + process_lib bootstrap) --- hyperdrive/packages/app-store/chain/Cargo.toml | 2 +- hyperdrive/packages/hns-indexer/hns-indexer/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs | 4 ++++ .../packages/hypermap-cacher/hypermap-cacher/Cargo.toml | 2 +- .../packages/hypermap-cacher/hypermap-cacher/src/lib.rs | 4 ++++ hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml | 2 +- .../packages/hypermap-cacher/start-providing/Cargo.toml | 2 +- hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml | 2 +- 10 files changed, 16 insertions(+), 8 deletions(-) diff --git a/hyperdrive/packages/app-store/chain/Cargo.toml b/hyperdrive/packages/app-store/chain/Cargo.toml index 66e73fc05..f859d495e 100644 --- a/hyperdrive/packages/app-store/chain/Cargo.toml +++ b/hyperdrive/packages/app-store/chain/Cargo.toml @@ -11,7 +11,7 @@ alloy-primitives = "0.8.15" alloy-sol-types = "0.8.15" anyhow = "1.0" bincode = "1.3.3" -hyperware_process_lib = "2.1.0" +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" } process_macros = "0.1" rand = "0.8" serde = { version = "1.0", features = ["derive"] } diff --git a/hyperdrive/packages/hns-indexer/hns-indexer/Cargo.toml b/hyperdrive/packages/hns-indexer/hns-indexer/Cargo.toml index 105d04b79..93d00cb3d 100644 --- a/hyperdrive/packages/hns-indexer/hns-indexer/Cargo.toml +++ b/hyperdrive/packages/hns-indexer/hns-indexer/Cargo.toml @@ -11,7 +11,7 @@ anyhow = "1.0" alloy-primitives = "0.8.15" alloy-sol-types = "0.8.15" hex = "0.4.3" -hyperware_process_lib = { version = "2.1.0", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" , features = ["logging"] } process_macros = "0.1" rmp-serde = "1.1.2" serde = { version = "1.0", features = ["derive"] } diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml index 9f894e9b5..4341c9353 100644 --- a/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/Cargo.toml @@ -18,7 +18,7 @@ alloy = { version = "0.8.1", features = [ ] } chrono = "0.4.41" hex = "0.4.3" -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d", features = ["logging"] } process_macros = "0.1.0" rand = "0.8" rmp-serde = "1.1.2" diff --git a/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs index 9ed47a9e2..f5643fbdb 100644 --- a/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/binding-cacher/src/lib.rs @@ -781,10 +781,14 @@ impl State { for log_cache in log_caches { // Validate the log cache signature + // TODO Remove or find other method + // Temporarily skip + /* if !self.validate_log_cache(&log_cache)? { warn!("Invalid log cache signature, skipping"); continue; } + */ // Generate filename from metadata let filename = format!( diff --git a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml index 64d703e01..730b54bd5 100644 --- a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/Cargo.toml @@ -18,7 +18,7 @@ alloy = { version = "0.8.1", features = [ ] } chrono = "0.4.41" hex = "0.4.3" -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf", features = ["logging"] } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d", features = ["logging"] } process_macros = "0.1.0" rand = "0.8" rmp-serde = "1.1.2" diff --git a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs index f42b74ba0..6a907b9e6 100644 --- a/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs +++ b/hyperdrive/packages/hypermap-cacher/hypermap-cacher/src/lib.rs @@ -778,10 +778,14 @@ impl State { for log_cache in log_caches { // Validate the log cache signature + // TODO Remove or find other method + // Temporarily skip + /* if !self.validate_log_cache(&log_cache)? { warn!("Invalid log cache signature, skipping"); continue; } + */ // Generate filename from metadata let filename = format!( diff --git a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml index 3c7343718..6e59bb18c 100644 --- a/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/reset-cache/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml index 946c9e970..d8a3cd911 100644 --- a/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/set-nodes/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml index 3626f1261..d94d592d5 100644 --- a/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/start-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml index 57472f0a0..96c75941f 100644 --- a/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml +++ b/hyperdrive/packages/hypermap-cacher/stop-providing/Cargo.toml @@ -5,7 +5,7 @@ edition = "2021" publish = false [dependencies] -hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "ac36caf" } +hyperware_process_lib = { git = "https://github.com/hyperware-ai/process_lib", rev = "e95ff8d" } process_macros = "0.1.0" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" From e8b90bf592b1cdb3d6562439762da19b0b2ec655 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 5 Nov 2025 15:50:33 +0000 Subject: [PATCH 6/6] Format Rust code using rustfmt --- hyperdrive/src/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hyperdrive/src/main.rs b/hyperdrive/src/main.rs index 956bb2f6b..ebb5318b8 100644 --- a/hyperdrive/src/main.rs +++ b/hyperdrive/src/main.rs @@ -489,7 +489,6 @@ async fn main() { // Create the cache_sources file with test content let data_file_path = initfiles_dir.join("cache_sources"); - #[cfg(not(feature = "simulation-mode"))] { // Write cache_source_vector to cache_sources as JSON