From 85333ffa122c82650d770a06cb7cbe2cd87d06a1 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 18 Aug 2025 09:47:11 -0400 Subject: [PATCH 01/31] JetBrains IDE .gitignore addition --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9fbba68..1e10f22 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ target *.swp .vscode +/.idea .DS_Store From 088332f477b5e74fcecca4a46733136674e570a8 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Thu, 21 Aug 2025 09:07:07 -0400 Subject: [PATCH 02/31] first step of bypermap binding caching --- hyperware-wit/hypermap-cacher-sys-v0.wit | 83 --------- hyperware-wit/hypermap-cacher-sys-v1.wit | 162 ++++++++++++++++++ ...llet:sys-v0.wit => hyperwallet-sys-v0.wit} | 0 src/hypermap.rs | 2 + 4 files changed, 164 insertions(+), 83 deletions(-) delete mode 100644 hyperware-wit/hypermap-cacher-sys-v0.wit create mode 100644 hyperware-wit/hypermap-cacher-sys-v1.wit rename hyperware-wit/{hyperwallet:sys-v0.wit => hyperwallet-sys-v0.wit} (100%) diff --git a/hyperware-wit/hypermap-cacher-sys-v0.wit b/hyperware-wit/hypermap-cacher-sys-v0.wit deleted file mode 100644 index 21a3652..0000000 --- a/hyperware-wit/hypermap-cacher-sys-v0.wit +++ /dev/null @@ -1,83 +0,0 @@ -interface hypermap-cacher { - // Metadata associated with a batch of Ethereum logs. - record logs-metadata { - chain-id: string, - from-block: string, - to-block: string, - time-created: string, - created-by: string, - signature: string, - } - - // Represents an item in the manifest, detailing a single log cache file. - record manifest-item { - metadata: logs-metadata, - is-empty: bool, - file-hash: string, - file-name: string, - } - - // The main manifest structure, listing all available log cache files. - // WIT does not support direct map types, so a list of key-value tuples is used. - record manifest { - // The key is the filename of the log cache. - items: list>, - manifest-filename: string, - chain-id: string, - protocol-version: string, - } - - record get-logs-by-range-request { - from-block: u64, - to-block: option, // If None, signifies to the latest available/relevant cached block. - } - - variant get-logs-by-range-ok-response { - logs(tuple), - latest(u64), - } - - // Defines the types of requests that can be sent to the Hypermap Cacher process. - variant cacher-request { - get-manifest, - get-log-cache-content(string), - get-status, - get-logs-by-range(get-logs-by-range-request), - start-providing, - stop-providing, - set-nodes(list), - reset(option>), - } - - // Represents the operational status of the cacher. - record cacher-status { - last-cached-block: u64, - chain-id: string, - protocol-version: string, - next-cache-attempt-in-seconds: option, - manifest-filename: string, - log-files-count: u32, - our-address: string, - is-providing: bool, - } - - // Defines the types of responses the Hypermap Cacher process can send. - variant cacher-response { - get-manifest(option), - get-log-cache-content(result, string>), - get-status(cacher-status), - get-logs-by-range(result), - start-providing(result), - stop-providing(result), - set-nodes(result), - reset(result), - rejected, - is-starting, - } -} - -world hypermap-cacher-sys-v0 { - import sign; - import hypermap-cacher; - include process-v1; -} diff --git a/hyperware-wit/hypermap-cacher-sys-v1.wit b/hyperware-wit/hypermap-cacher-sys-v1.wit new file mode 100644 index 0000000..d2e2c8a --- /dev/null +++ b/hyperware-wit/hypermap-cacher-sys-v1.wit @@ -0,0 +1,162 @@ +interface hypermap-binding-cacher { + // Metadata associated with a batch of Ethereum logs. + record logs-metadata { + chain-id: string, + from-block: string, + to-block: string, + time-created: string, + created-by: string, + signature: string, + } + + // Represents an item in the manifest, detailing a single log cache file. + record manifest-item { + metadata: logs-metadata, + is-empty: bool, + file-hash: string, + file-name: string, + } + + // The main manifest structure, listing all available log cache files. + // WIT does not support direct map types, so a list of key-value tuples is used. + record manifest { + // The key is the filename of the log cache. + items: list>, + manifest-filename: string, + chain-id: string, + protocol-version: string, + } + + record get-logs-by-range-request { + from-block: u64, + to-block: option, // If None, signifies to the latest available/relevant cached block. + } + + variant get-logs-by-range-ok-response { + logs(tuple), + latest(u64), + } + + // Defines the types of requests that can be sent to the Hypermap Binding Cacher process. + variant cacher-request { + get-manifest, + get-log-cache-content(string), + get-status, + get-logs-by-range(get-logs-by-range-request), + reset(option>), + start-providing, + stop-providing, + set-nodes(list), + } + + // Represents the operational status of the cacher. + record cacher-status { + last-cached-block: u64, + chain-id: string, + protocol-version: string, + next-cache-attempt-in-seconds: option, + manifest-filename: string, + log-files-count: u32, + our-address: string, + is-providing: bool, + } + + // Defines the types of responses the Hypermap Binding Cacher process can send. + variant cacher-response { + get-manifest(option), + get-log-cache-content(result, string>), + get-status(cacher-status), + get-logs-by-range(result), + start-providing(result), + stop-providing(result), + set-nodes(result), + reset(result), + rejected, + is-starting, + } +} + +interface hypermap-cacher { + // Metadata associated with a batch of Ethereum logs. + record logs-metadata { + chain-id: string, + from-block: string, + to-block: string, + time-created: string, + created-by: string, + signature: string, + } + + // Represents an item in the manifest, detailing a single log cache file. + record manifest-item { + metadata: logs-metadata, + is-empty: bool, + file-hash: string, + file-name: string, + } + + // The main manifest structure, listing all available log cache files. + // WIT does not support direct map types, so a list of key-value tuples is used. + record manifest { + // The key is the filename of the log cache. + items: list>, + manifest-filename: string, + chain-id: string, + protocol-version: string, + } + + record get-logs-by-range-request { + from-block: u64, + to-block: option, // If None, signifies to the latest available/relevant cached block. + } + + variant get-logs-by-range-ok-response { + logs(tuple), + latest(u64), + } + + // Defines the types of requests that can be sent to the Hypermap Cacher process. + variant cacher-request { + get-manifest, + get-log-cache-content(string), + get-status, + get-logs-by-range(get-logs-by-range-request), + reset(option>), + start-providing, + stop-providing, + set-nodes(list), + } + + // Represents the operational status of the cacher. + record cacher-status { + last-cached-block: u64, + chain-id: string, + protocol-version: string, + next-cache-attempt-in-seconds: option, + manifest-filename: string, + log-files-count: u32, + our-address: string, + is-providing: bool, + } + + // Defines the types of responses the Hypermap Cacher process can send. + variant cacher-response { + get-manifest(option), + get-log-cache-content(result, string>), + get-status(cacher-status), + get-logs-by-range(result), + start-providing(result), + stop-providing(result), + set-nodes(result), + reset(result), + rejected, + is-starting, + } +} + +world hypermap-cacher-sys-v1 { + import sign; + import hypermap-binding-cacher; + import hypermap-cacher; + include process-v1; +} diff --git a/hyperware-wit/hyperwallet:sys-v0.wit b/hyperware-wit/hyperwallet-sys-v0.wit similarity index 100% rename from hyperware-wit/hyperwallet:sys-v0.wit rename to hyperware-wit/hyperwallet-sys-v0.wit diff --git a/src/hypermap.rs b/src/hypermap.rs index 5e7b0af..4ebf691 100644 --- a/src/hypermap.rs +++ b/src/hypermap.rs @@ -25,6 +25,8 @@ use std::str::FromStr; /// hypermap deployment address on base pub const HYPERMAP_ADDRESS: &'static str = "0x000000000044C6B8Cb4d8f0F889a3E47664EAeda"; +/// hypermap binding deployment address on base +pub const HYPERMAP_BINDING_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; /// base chain id #[cfg(not(feature = "simulation-mode"))] pub const HYPERMAP_CHAIN_ID: u64 = 8453; // base From 8b6c2e5c32b543cc6358465ebd8cf51932b4b633 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 25 Aug 2025 10:02:48 -0400 Subject: [PATCH 03/31] new .wit adding binding-cacher interface --- hyperware-wit/hypermap-cacher-sys-v1.wit | 36 ++++++++++++------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/hyperware-wit/hypermap-cacher-sys-v1.wit b/hyperware-wit/hypermap-cacher-sys-v1.wit index d2e2c8a..c8e8874 100644 --- a/hyperware-wit/hypermap-cacher-sys-v1.wit +++ b/hyperware-wit/hypermap-cacher-sys-v1.wit @@ -1,6 +1,6 @@ -interface hypermap-binding-cacher { +interface binding-cacher { // Metadata associated with a batch of Ethereum logs. - record logs-metadata { + record binding-logs-metadata { chain-id: string, from-block: string, to-block: string, @@ -10,8 +10,8 @@ interface hypermap-binding-cacher { } // Represents an item in the manifest, detailing a single log cache file. - record manifest-item { - metadata: logs-metadata, + record binding-manifest-item { + metadata: binding-logs-metadata, is-empty: bool, file-hash: string, file-name: string, @@ -19,30 +19,30 @@ interface hypermap-binding-cacher { // The main manifest structure, listing all available log cache files. // WIT does not support direct map types, so a list of key-value tuples is used. - record manifest { + record binding-manifest { // The key is the filename of the log cache. - items: list>, + items: list>, manifest-filename: string, chain-id: string, protocol-version: string, } - record get-logs-by-range-request { + record binding-get-logs-by-range-request { from-block: u64, to-block: option, // If None, signifies to the latest available/relevant cached block. } - variant get-logs-by-range-ok-response { + variant binding-get-logs-by-range-ok-response { logs(tuple), latest(u64), } - // Defines the types of requests that can be sent to the Hypermap Binding Cacher process. - variant cacher-request { + // Defines the types of requests that can be sent to the Hypermap Cacher process. + variant binding-cacher-request { get-manifest, get-log-cache-content(string), get-status, - get-logs-by-range(get-logs-by-range-request), + get-logs-by-range(binding-get-logs-by-range-request), reset(option>), start-providing, stop-providing, @@ -50,7 +50,7 @@ interface hypermap-binding-cacher { } // Represents the operational status of the cacher. - record cacher-status { + record binding-cacher-status { last-cached-block: u64, chain-id: string, protocol-version: string, @@ -61,12 +61,12 @@ interface hypermap-binding-cacher { is-providing: bool, } - // Defines the types of responses the Hypermap Binding Cacher process can send. - variant cacher-response { - get-manifest(option), + // Defines the types of responses the Hypermap Cacher process can send. + variant binding-cacher-response { + get-manifest(option), get-log-cache-content(result, string>), - get-status(cacher-status), - get-logs-by-range(result), + get-status(binding-cacher-status), + get-logs-by-range(result), start-providing(result), stop-providing(result), set-nodes(result), @@ -156,7 +156,7 @@ interface hypermap-cacher { world hypermap-cacher-sys-v1 { import sign; - import hypermap-binding-cacher; + import binding-cacher; import hypermap-cacher; include process-v1; } From f18eb712b7cff47b90b5506e79e716a82d32ffa6 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 25 Aug 2025 11:52:28 -0400 Subject: [PATCH 04/31] First inclusion in the new bindings support --- src/bindings.rs | 2 ++ src/hypermap.rs | 4 +--- src/lib.rs | 5 +++++ 3 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 src/bindings.rs diff --git a/src/bindings.rs b/src/bindings.rs new file mode 100644 index 0000000..f422e42 --- /dev/null +++ b/src/bindings.rs @@ -0,0 +1,2 @@ +/// Hypermap binding deployment address on base +pub const HYPERMAP_BINDING_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; diff --git a/src/hypermap.rs b/src/hypermap.rs index 4ebf691..4b8600b 100644 --- a/src/hypermap.rs +++ b/src/hypermap.rs @@ -25,9 +25,7 @@ use std::str::FromStr; /// hypermap deployment address on base pub const HYPERMAP_ADDRESS: &'static str = "0x000000000044C6B8Cb4d8f0F889a3E47664EAeda"; -/// hypermap binding deployment address on base -pub const HYPERMAP_BINDING_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; -/// base chain id + #[cfg(not(feature = "simulation-mode"))] pub const HYPERMAP_CHAIN_ID: u64 = 8453; // base #[cfg(feature = "simulation-mode")] diff --git a/src/lib.rs b/src/lib.rs index 6cd68d9..32b8975 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -30,6 +30,10 @@ wit_bindgen::generate!({ generate_unused_types: true, }); +/// Interact with the tokenregistry contract data +pub mod bindings; +/// Currently nothing in here except for the contract address. + /// Interact with the eth provider module. pub mod eth; /// Your process must have the [`Capability`] to message @@ -91,6 +95,7 @@ pub mod scripting; pub mod hyperwallet_client; mod types; + pub use types::{ address::{Address, AddressParseError}, capability::Capability, From 6e1091e95288f4316c4be188a25aeaba30cdb04a Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 25 Aug 2025 16:27:31 -0400 Subject: [PATCH 05/31] binding-cacher supports bootstrapping --- hyperware-wit/process-lib.wit | 1 + src/bindings.rs | 2025 ++++++++++++++++++++++++++++++++- 2 files changed, 2024 insertions(+), 2 deletions(-) diff --git a/hyperware-wit/process-lib.wit b/hyperware-wit/process-lib.wit index c85f693..12113a4 100644 --- a/hyperware-wit/process-lib.wit +++ b/hyperware-wit/process-lib.wit @@ -1,6 +1,7 @@ world process-lib { import sign; import hypermap-cacher; + import binding-cacher; import hyperwallet; include lib; } diff --git a/src/bindings.rs b/src/bindings.rs index f422e42..293fd19 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1,2 +1,2023 @@ -/// Hypermap binding deployment address on base -pub const HYPERMAP_BINDING_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; +use crate::eth::{ + BlockNumberOrTag, EthError, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, +}; +use crate::bindings::contract::getCall; +use crate::hyperware::process::binding_cacher::{ + BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, BindingCacherStatus as CacherStatus, BindingGetLogsByRangeOkResponse as GetLogsByRangeOkResponse, BindingGetLogsByRangeRequest as GetLogsByRangeRequest, + BindingLogsMetadata as LogsMetadata, BindingManifest as Manifest, BindingManifestItem as ManifestItem, +}; + +use crate::{net, sign}; +use crate::{print_to_terminal, Address as BindingAddress, Request}; +use alloy::hex; +use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; +use alloy_primitives::{keccak256, Address, Bytes, FixedBytes, B256}; +use alloy_sol_types::{SolCall, SolEvent, SolValue}; +use contract::tokenCall; +use serde::{ + self, + de::{self, MapAccess, Visitor}, + ser::{SerializeMap, SerializeStruct}, + Deserialize, Deserializer, Serialize, Serializer, +}; +use std::error::Error; +use std::fmt; +use std::str::FromStr; + +/// bindings data deployment address on base +#[cfg(not(feature = "simulation-mode"))] +pub const BINDINGS_ADDRESS: &'static str = "0x0000000000000000000000000000000000000000"; +#[cfg(feature = "simulation-mode")] +pub const BINDINGS_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; +#[cfg(not(feature = "simulation-mode"))] +pub const BINDINGS_CHAIN_ID: u64 = 8453; // base +#[cfg(feature = "simulation-mode")] +pub const BINDINGS_CHAIN_ID: u64 = 31337; // fakenet +/// first block (minus one) of tokenregistry deployment on base +#[cfg(not(feature = "simulation-mode"))] +pub const BINDINGS_FIRST_BLOCK: u64 = 27_270_411; +#[cfg(feature = "simulation-mode")] +pub const BINDINGS_FIRST_BLOCK: u64 = 0; +/// the root hash of tokenregistry, empty bytes32 +pub const BINDINGS_ROOT_HASH: &'static str = + "0x0000000000000000000000000000000000000000000000000000000000000000"; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct LogCache { + pub metadata: LogsMetadata, + pub logs: Vec, +} + +const CACHER_REQUEST_TIMEOUT_S: u64 = 15; + +// TODO replace with tokenregistry structures +/// Sol structures for Hypermap requests +pub mod contract { + use alloy_sol_macro::sol; + + sol! { + /// Emitted when a new namespace entry is minted. + /// - parenthash: The hash of the parent namespace entry. + /// - childhash: The hash of the minted namespace entry's full path. + /// - labelhash: The hash of only the label (the final entry in the path). + /// - label: The label (the final entry in the path) of the new entry. + event Mint( + bytes32 indexed parenthash, + bytes32 indexed childhash, + bytes indexed labelhash, + bytes label + ); + + /// Emitted when a fact is created on an existing namespace entry. + /// Facts are immutable and may only be written once. A fact label is + /// prepended with an exclamation mark (!) to indicate that it is a fact. + /// - parenthash The hash of the parent namespace entry. + /// - facthash The hash of the newly created fact's full path. + /// - labelhash The hash of only the label (the final entry in the path). + /// - label The label of the fact. + /// - data The data stored at the fact. + event Fact( + bytes32 indexed parenthash, + bytes32 indexed facthash, + bytes indexed labelhash, + bytes label, + bytes data + ); + + /// Emitted when a new note is created on an existing namespace entry. + /// Notes are mutable. A note label is prepended with a tilde (~) to indicate + /// that it is a note. + /// - parenthash: The hash of the parent namespace entry. + /// - notehash: The hash of the newly created note's full path. + /// - labelhash: The hash of only the label (the final entry in the path). + /// - label: The label of the note. + /// - data: The data stored at the note. + event Note( + bytes32 indexed parenthash, + bytes32 indexed notehash, + bytes indexed labelhash, + bytes label, + bytes data + ); + + /// Emitted when a gene is set for an existing namespace entry. + /// A gene is a specific TBA implementation which will be applied to all + /// sub-entries of the namespace entry. + /// - entry: The namespace entry's namehash. + /// - gene: The address of the TBA implementation. + event Gene(bytes32 indexed entry, address indexed gene); + + /// Emitted when the zeroth namespace entry is minted. + /// Occurs exactly once at initialization. + /// - zeroTba: The address of the zeroth TBA + event Zero(address indexed zeroTba); + + /// Emitted when a namespace entry is transferred from one address + /// to another. + /// - from: The address of the sender. + /// - to: The address of the recipient. + /// - id: The namehash of the namespace entry (converted to uint256). + event Transfer( + address indexed from, + address indexed to, + uint256 indexed id + ); + + /// Emitted when a namespace entry is approved for transfer. + /// - owner: The address of the owner. + /// - spender: The address of the spender. + /// - id: The namehash of the namespace entry (converted to uint256). + event Approval( + address indexed owner, + address indexed spender, + uint256 indexed id + ); + + /// Emitted when an operator is approved for all of an owner's + /// namespace entries. + /// - owner: The address of the owner. + /// - operator: The address of the operator. + /// - approved: Whether the operator is approved. + event ApprovalForAll( + address indexed owner, + address indexed operator, + bool approved + ); + + /// Retrieves information about a specific namespace entry. + /// - namehash The namehash of the namespace entry to query. + /// + /// Returns: + /// - tba: The address of the token-bound account associated + /// with the entry. + /// - owner: The address of the entry owner. + /// - data: The note or fact bytes associated with the entry + /// (empty if not a note or fact). + function get( + bytes32 namehash + ) external view returns (address tba, address owner, bytes memory data); + + /// Mints a new namespace entry and creates a token-bound account for + /// it. Must be called by a parent namespace entry token-bound account. + /// - who: The address to own the new namespace entry. + /// - label: The label to mint beneath the calling parent entry. + /// - initialization: Initialization calldata applied to the new + /// minted entry's token-bound account. + /// - erc721Data: ERC-721 data -- passed to comply with + /// `ERC721TokenReceiver.onERC721Received()`. + /// - implementation: The address of the implementation contract for + /// the token-bound account: this will be overriden by the gene if the + /// parent entry has one set. + /// + /// Returns: + /// - tba: The address of the new entry's token-bound account. + function mint( + address who, + bytes calldata label, + bytes calldata initialization, + bytes calldata erc721Data, + address implementation + ) external returns (address tba); + + /// Sets the gene for the calling namespace entry. + /// - _gene: The address of the TBA implementation to set for all + /// children of the calling namespace entry. + function gene(address _gene) external; + + /// Creates a new fact beneath the calling namespace entry. + /// - fact: The fact label to create. Must be prepended with an + /// exclamation mark (!). + /// - data: The data to be stored at the fact. + /// + /// Returns: + /// - facthash: The namehash of the newly created fact. + function fact( + bytes calldata fact, + bytes calldata data + ) external returns (bytes32 facthash); + + /// Creates a new note beneath the calling namespace entry. + /// - note: The note label to create. Must be prepended with a tilde (~). + /// - data: The data to be stored at the note. + /// + /// Returns: + /// - notehash: The namehash of the newly created note. + function note( + bytes calldata note, + bytes calldata data + ) external returns (bytes32 notehash); + + /// Retrieves the token-bound account address of a namespace entry. + /// - entry: The entry namehash (as uint256) for which to get the + /// token-bound account. + /// + /// Returns: + /// - tba: The token-bound account address of the namespace entry. + function tbaOf(uint256 entry) external view returns (address tba); + + function balanceOf(address owner) external view returns (uint256); + + function getApproved(uint256 entry) external view returns (address); + + function isApprovedForAll( + address owner, + address operator + ) external view returns (bool); + + function ownerOf(uint256 entry) external view returns (address); + + function setApprovalForAll(address operator, bool approved) external; + + function approve(address spender, uint256 entry) external; + + function safeTransferFrom(address from, address to, uint256 id) external; + + function safeTransferFrom( + address from, + address to, + uint256 id, + bytes calldata data + ) external; + + function transferFrom(address from, address to, uint256 id) external; + + function supportsInterface(bytes4 interfaceId) external view returns (bool); + + /// Gets the token identifier that owns this token-bound account (TBA). + /// This is a core function of the ERC-6551 standard that returns the + /// identifying information about the NFT that owns this account. + /// The return values are constant and cannot change over time. + /// + /// Returns: + /// - chainId: The EIP-155 chain ID where the owning NFT exists + /// - tokenContract: The contract address of the owning NFT + /// - tokenId: The token ID of the owning NFT + function token() + external + view + returns (uint256 chainId, address tokenContract, uint256 tokenId); + } +} + +// TODO remove +/// A mint log from the hypermap, converted to a 'resolved' format using +/// namespace data saved in the hns-indexer. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Mint { + pub name: String, + pub parent_path: String, +} + +// TODO remove +/// A note log from the hypermap, converted to a 'resolved' format using +/// namespace data saved in the hns-indexer +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Note { + pub note: String, + pub parent_path: String, + pub data: Bytes, +} + +// TODO remove +/// A fact log from the hypermap, converted to a 'resolved' format using +/// namespace data saved in the hns-indexer +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Fact { + pub fact: String, + pub parent_path: String, + pub data: Bytes, +} + +// TODO remove +/// Errors that can occur when decoding a log from the hypermap using +/// [`decode_mint_log()`] or [`decode_note_log()`]. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum DecodeLogError { + /// The log's topic is not a mint or note event. + UnexpectedTopic(B256), + /// The name is not valid (according to [`valid_name`]). + InvalidName(String), + /// An error occurred while decoding the log. + DecodeError(String), + /// The parent name could not be resolved with `hns-indexer`. + UnresolvedParent(String), +} + +impl fmt::Display for DecodeLogError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + DecodeLogError::UnexpectedTopic(topic) => write!(f, "Unexpected topic: {:?}", topic), + DecodeLogError::InvalidName(name) => write!(f, "Invalid name: {}", name), + DecodeLogError::DecodeError(err) => write!(f, "Decode error: {}", err), + DecodeLogError::UnresolvedParent(parent) => { + write!(f, "Could not resolve parent: {}", parent) + } + } + } +} + +impl Error for DecodeLogError {} + +// TODO remove +/// Canonical function to determine if a hypermap entry is valid. +/// +/// This checks a **single name**, not the full path-name. A full path-name +/// is comprised of valid names separated by `.` +pub fn valid_entry(entry: &str, note: bool, fact: bool) -> bool { + if note && fact { + return false; + } + if note { + valid_note(entry) + } else if fact { + valid_fact(entry) + } else { + valid_name(entry) + } +} + +// TODO remove +pub fn valid_name(name: &str) -> bool { + name.is_ascii() + && name.len() >= 1 + && name + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') +} + +// TODO remove +pub fn valid_note(note: &str) -> bool { + note.is_ascii() + && note.len() >= 2 + && note.chars().next() == Some('~') + && note + .chars() + .skip(1) + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') +} + +// TODO remove +pub fn valid_fact(fact: &str) -> bool { + fact.is_ascii() + && fact.len() >= 2 + && fact.chars().next() == Some('!') + && fact + .chars() + .skip(1) + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') +} + +// TODO remove +/// Produce a namehash from a hypermap name. +pub fn namehash(name: &str) -> String { + let mut node = B256::default(); + + let mut labels: Vec<&str> = name.split('.').collect(); + labels.reverse(); + + for label in labels.iter() { + let l = keccak256(label); + node = keccak256((node, l).abi_encode_packed()); + } + format!("0x{}", hex::encode(node)) +} + +// TODO remove +/// Decode a mint log from the hypermap into a 'resolved' format. +/// +/// Uses [`valid_name()`] to check if the name is valid. +pub fn decode_mint_log(log: &crate::eth::Log) -> Result { + let contract::Note::SIGNATURE_HASH = log.topics()[0] else { + return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); + }; + let decoded = contract::Mint::decode_log_data(log.data(), true) + .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; + let name = String::from_utf8_lossy(&decoded.label).to_string(); + if !valid_name(&name) { + return Err(DecodeLogError::InvalidName(name)); + } + match resolve_parent(log, None) { + Some(parent_path) => Ok(Mint { name, parent_path }), + None => Err(DecodeLogError::UnresolvedParent(name)), + } +} + +// TODO remove +/// Decode a note log from the hypermap into a 'resolved' format. +/// +/// Uses [`valid_name()`] to check if the name is valid. +pub fn decode_note_log(log: &crate::eth::Log) -> Result { + let contract::Note::SIGNATURE_HASH = log.topics()[0] else { + return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); + }; + let decoded = contract::Note::decode_log_data(log.data(), true) + .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; + let note = String::from_utf8_lossy(&decoded.label).to_string(); + if !valid_note(¬e) { + return Err(DecodeLogError::InvalidName(note)); + } + match resolve_parent(log, None) { + Some(parent_path) => Ok(Note { + note, + parent_path, + data: decoded.data, + }), + None => Err(DecodeLogError::UnresolvedParent(note)), + } +} + +// TODO remove +pub fn decode_fact_log(log: &crate::eth::Log) -> Result { + let contract::Fact::SIGNATURE_HASH = log.topics()[0] else { + return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); + }; + let decoded = contract::Fact::decode_log_data(log.data(), true) + .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; + let fact = String::from_utf8_lossy(&decoded.label).to_string(); + if !valid_fact(&fact) { + return Err(DecodeLogError::InvalidName(fact)); + } + match resolve_parent(log, None) { + Some(parent_path) => Ok(Fact { + fact, + parent_path, + data: decoded.data, + }), + None => Err(DecodeLogError::UnresolvedParent(fact)), + } +} + +// TODO remove +/// Given a [`crate::eth::Log`] (which must be a log from hypermap), resolve the parent name +/// of the new entry or note. +pub fn resolve_parent(log: &crate::eth::Log, timeout: Option) -> Option { + let parent_hash = log.topics()[1].to_string(); + net::get_name(&parent_hash, log.block_number, timeout) +} + +// TODO remove +/// Given a [`crate::eth::Log`] (which must be a log from hypermap), resolve the full name +/// of the new entry or note. +/// +/// Uses [`valid_name()`] to check if the name is valid. +pub fn resolve_full_name(log: &crate::eth::Log, timeout: Option) -> Option { + let parent_hash = log.topics()[1].to_string(); + let parent_name = net::get_name(&parent_hash, log.block_number, timeout)?; + let log_name = match log.topics()[0] { + contract::Mint::SIGNATURE_HASH => { + let decoded = contract::Mint::decode_log_data(log.data(), true).unwrap(); + decoded.label + } + contract::Note::SIGNATURE_HASH => { + let decoded = contract::Note::decode_log_data(log.data(), true).unwrap(); + decoded.label + } + contract::Fact::SIGNATURE_HASH => { + let decoded = contract::Fact::decode_log_data(log.data(), true).unwrap(); + decoded.label + } + _ => return None, + }; + let name = String::from_utf8_lossy(&log_name); + if !valid_entry( + &name, + log.topics()[0] == contract::Note::SIGNATURE_HASH, + log.topics()[0] == contract::Fact::SIGNATURE_HASH, + ) { + return None; + } + Some(format!("{name}.{parent_name}")) +} + +// TODO remove +pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { + let mut matched_logs = Vec::new(); + + let (filter_from_block, filter_to_block) = match filter.block_option { + FilterBlockOption::Range { + from_block, + to_block, + } => { + let parse_block_num = |bn: Option| -> Option { + match bn { + Some(BlockNumberOrTag::Number(n)) => Some(n), + _ => None, + } + }; + (parse_block_num(from_block), parse_block_num(to_block)) + } + _ => (None, None), + }; + + for log in logs.iter() { + let mut match_address = filter.address.is_empty(); + if !match_address { + if filter.address.matches(&log.address()) { + match_address = true; + } + } + if !match_address { + continue; + } + + if let Some(log_bn) = log.block_number { + if let Some(filter_from) = filter_from_block { + if log_bn < filter_from { + continue; + } + } + if let Some(filter_to) = filter_to_block { + if log_bn > filter_to { + continue; + } + } + } else { + if filter_from_block.is_some() || filter_to_block.is_some() { + continue; + } + } + + let mut match_topics = true; + for (i, filter_topic_alternatives) in filter.topics.iter().enumerate() { + if filter_topic_alternatives.is_empty() { + continue; + } + + let log_topic = log.topics().get(i); + let mut current_topic_matched = false; + for filter_topic in filter_topic_alternatives.iter() { + if log_topic == Some(filter_topic) { + current_topic_matched = true; + break; + } + } + if !current_topic_matched { + match_topics = false; + break; + } + } + + if match_topics { + matched_logs.push(log.clone()); + } + } + matched_logs +} + +/// Helper struct for reading binding data. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct Bindings { + pub provider: Provider, + address: Address, +} + +impl Bindings { + /// Creates a new Bindings instance with a specified address. + /// + /// # Arguments + /// * `provider` - A reference to the Provider. + /// * `address` - The address of the Bindings contract. + pub fn new(provider: Provider, address: Address) -> Self { + Self { provider, address } + } + + /// Creates a new Bindings instance with the default address and chain ID. + pub fn default(timeout: u64) -> Self { + let provider = Provider::new(BINDINGS_CHAIN_ID, timeout); + Self::new(provider, Address::from_str(BINDINGS_ADDRESS).unwrap()) + } + + /// Returns the in-use Bindings contract address. + pub fn address(&self) -> &Address { + &self.address + } + + /// Gets an entry from the Bindings by its string-formatted name. + /// + /// # Parameters + /// - `path`: The name-path to get from the Bindings. + /// # Returns + /// A `Result<(Address, Address, Option), EthError>` representing the TBA, owner, + /// and value if the entry exists and is a note. + pub fn get(&self, path: &str) -> Result<(Address, Address, Option), EthError> { + let get_call = getCall { + namehash: FixedBytes::<32>::from_str(&namehash(path)) + .map_err(|_| EthError::InvalidParams)?, + } + .abi_encode(); + + let tx_req = TransactionRequest::default() + .input(TransactionInput::new(get_call.into())) + .to(self.address); + + let res_bytes = self.provider.call(tx_req, None)?; + + let res = getCall::abi_decode_returns(&res_bytes, false) + .map_err(|_| EthError::RpcMalformedResponse)?; + + let note_data = if res.data == Bytes::default() { + None + } else { + Some(res.data) + }; + + Ok((res.tba, res.owner, note_data)) + } + + /// Gets an entry from the Bindings by its hash. + /// + /// # Parameters + /// - `entryhash`: The entry to get from the Bindings. + /// # Returns + /// A `Result<(Address, Address, Option), EthError>` representing the TBA, owner, + /// and value if the entry exists and is a note. + pub fn get_hash(&self, entryhash: &str) -> Result<(Address, Address, Option), EthError> { + let get_call = getCall { + namehash: FixedBytes::<32>::from_str(entryhash).map_err(|_| EthError::InvalidParams)?, + } + .abi_encode(); + + let tx_req = TransactionRequest::default() + .input(TransactionInput::new(get_call.into())) + .to(self.address); + + let res_bytes = self.provider.call(tx_req, None)?; + + let res = getCall::abi_decode_returns(&res_bytes, false) + .map_err(|_| EthError::RpcMalformedResponse)?; + + let note_data = if res.data == Bytes::default() { + None + } else { + Some(res.data) + }; + + Ok((res.tba, res.owner, note_data)) + } + + /// Gets a namehash from an existing TBA address. + /// + /// # Parameters + /// - `tba`: The TBA to get the namehash of. + /// # Returns + /// A `Result` representing the namehash of the TBA. + pub fn get_namehash_from_tba(&self, tba: Address) -> Result { + let token_call = tokenCall {}.abi_encode(); + + let tx_req = TransactionRequest::default() + .input(TransactionInput::new(token_call.into())) + .to(tba); + + let res_bytes = self.provider.call(tx_req, None)?; + + let res = tokenCall::abi_decode_returns(&res_bytes, false) + .map_err(|_| EthError::RpcMalformedResponse)?; + + let namehash: FixedBytes<32> = res.tokenId.into(); + Ok(format!("0x{}", hex::encode(namehash))) + } + + /// Create a filter for all mint events. + pub fn mint_filter(&self) -> crate::eth::Filter { + crate::eth::Filter::new() + .address(self.address) + .event(contract::Mint::SIGNATURE) + } + + /// Create a filter for all note events. + pub fn note_filter(&self) -> crate::eth::Filter { + crate::eth::Filter::new() + .address(self.address) + .event(contract::Note::SIGNATURE) + } + + /// Create a filter for all fact events. + pub fn fact_filter(&self) -> crate::eth::Filter { + crate::eth::Filter::new() + .address(self.address) + .event(contract::Fact::SIGNATURE) + } + + /// Create a filter for a given set of specific notes. This function will + /// hash the note labels and use them as the topic3 filter. + /// + /// Example: + /// ```rust + /// let filter = hypermap.notes_filter(&["~note1", "~note2"]); + /// ``` + pub fn notes_filter(&self, notes: &[&str]) -> crate::eth::Filter { + self.note_filter().topic3( + notes + .into_iter() + .map(|note| keccak256(note)) + .collect::>(), + ) + } + + /// Create a filter for a given set of specific facts. This function will + /// hash the fact labels and use them as the topic3 filter. + /// + /// Example: + /// ```rust + /// let filter = hypermap.facts_filter(&["!fact1", "!fact2"]); + /// ``` + pub fn facts_filter(&self, facts: &[&str]) -> crate::eth::Filter { + self.fact_filter().topic3( + facts + .into_iter() + .map(|fact| keccak256(fact)) + .collect::>(), + ) + } + + fn get_bootstrap_log_cache_inner( + &self, + cacher_request: &CacherRequest, + cacher_process_address: &BindingAddress, + attempt: u64, + request_from_block_val: u64, + retry_delay_s: u64, + retry_count: Option, + chain: &Option, + ) -> anyhow::Result)>> { + let retry_count_str = retry_count + .map(|r| r.to_string()) + .unwrap_or_else(|| "inf".to_string()); + print_to_terminal( + 2, + &format!("Attempt {attempt}/{retry_count_str} to query local binding-cacher"), + ); + + let response_msg = match Request::to(cacher_process_address.clone()) + .body(serde_json::to_vec(cacher_request)?) + .send_and_await_response(CACHER_REQUEST_TIMEOUT_S) + { + Ok(Ok(msg)) => msg, + Ok(Err(e)) => { + print_to_terminal( + 1, + &format!( + "Error response from local cacher (attempt {}): {:?}", + attempt, e + ), + ); + if retry_count.is_none() || attempt < retry_count.unwrap() { + std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); + return Ok(None); + } else { + return Err(anyhow::anyhow!( + "Error response from local cacher after {retry_count_str} attempts: {e:?}" + )); + } + } + Err(e) => { + print_to_terminal( + 1, + &format!( + "Failed to send request to local cacher (attempt {}): {:?}", + attempt, e + ), + ); + if retry_count.is_none() || attempt < retry_count.unwrap() { + std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); + return Ok(None); + } else { + return Err(anyhow::anyhow!( + "Failed to send request to local cacher after {retry_count_str} attempts: {e:?}" + )); + } + } + }; + + match serde_json::from_slice::(response_msg.body())? { + CacherResponse::GetLogsByRange(res) => { + match res { + Ok(GetLogsByRangeOkResponse::Latest(block)) => { + return Ok(Some((block, vec![]))); + } + Ok(GetLogsByRangeOkResponse::Logs((block, json_string_of_vec_log_cache))) => { + if json_string_of_vec_log_cache.is_empty() + || json_string_of_vec_log_cache == "[]" + { + print_to_terminal( + 2, + &format!( + "Local cacher returned no log caches for the range from block {}.", + request_from_block_val, + ), + ); + return Ok(Some((block, vec![]))); + } + match serde_json::from_str::>(&json_string_of_vec_log_cache) { + Ok(retrieved_caches) => { + let target_chain_id = chain + .clone() + .unwrap_or_else(|| self.provider.get_chain_id().to_string()); + let mut filtered_caches = vec![]; + + for log_cache in retrieved_caches { + if log_cache.metadata.chain_id == target_chain_id { + // Further filter: ensure the cache's own from_block isn't completely after what we need, + // and to_block isn't completely before. + let cache_from = log_cache + .metadata + .from_block + .parse::() + .unwrap_or(u64::MAX); + let cache_to = + log_cache.metadata.to_block.parse::().unwrap_or(0); + + if cache_to >= request_from_block_val { + // Cache has some data at or after our request_from_block + filtered_caches.push(log_cache); + } else { + print_to_terminal(3, &format!("Cache from local cacher ({} to {}) does not meet request_from_block {}", + cache_from, cache_to, request_from_block_val)); + } + } else { + print_to_terminal(1,&format!("LogCache from local cacher has mismatched chain_id (expected {}, got {}). Skipping.", + target_chain_id, log_cache.metadata.chain_id)); + } + } + + print_to_terminal( + 2, + &format!( + "Retrieved {} log caches from local binding-cacher.", + filtered_caches.len(), + ), + ); + return Ok(Some((block, filtered_caches))); + } + Err(e) => { + return Err(anyhow::anyhow!( + "Failed to deserialize Vec from local cacher: {:?}. JSON: {:.100}", + e, json_string_of_vec_log_cache + )); + } + } + } + Err(e_str) => { + return Err(anyhow::anyhow!( + "Local cacher reported error for GetLogsByRange: {}", + e_str, + )); + } + } + } + CacherResponse::IsStarting => { + print_to_terminal( + 2, + &format!( + "Local binding-cacher is still starting (attempt {}/{}). Retrying in {}s...", + attempt, retry_count_str, retry_delay_s + ), + ); + if retry_count.is_none() || attempt < retry_count.unwrap() { + std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); + return Ok(None); + } else { + return Err(anyhow::anyhow!( + "Local binding-cacher is still starting after {retry_count_str} attempts" + )); + } + } + CacherResponse::Rejected => { + return Err(anyhow::anyhow!( + "Local binding-cacher rejected our request" + )); + } + _ => { + return Err(anyhow::anyhow!( + "Unexpected response type from local binding-cacher" + )); + } + } + } + + pub fn get_bootstrap_log_cache( + &self, + from_block: Option, + retry_params: Option<(u64, Option)>, + chain: Option, + ) -> anyhow::Result<(u64, Vec)> { + print_to_terminal(2, + &format!("get_bootstrap_log_cache (using local binding-cacher): from_block={:?}, retry_params={:?}, chain={:?}", + from_block, retry_params, chain) + ); + + let (retry_delay_s, retry_count) = retry_params.ok_or_else(|| { + anyhow::anyhow!("IsStarted check requires retry parameters (delay_s, max_tries)") + })?; + + let cacher_process_address = + BindingAddress::new("our", ("binding-cacher", "hypermap-cacher", "sys")); + + print_to_terminal( + 2, + &format!( + "Querying local cacher with GetLogsByRange: {}", + cacher_process_address.to_string(), + ), + ); + + let request_from_block_val = from_block.unwrap_or(0); + + let get_logs_by_range_payload = GetLogsByRangeRequest { + from_block: request_from_block_val, + to_block: None, // Request all logs from from_block onwards. Cacher will return what it has. + }; + let cacher_request = CacherRequest::GetLogsByRange(get_logs_by_range_payload); + + if let Some(retry_count) = retry_count { + for attempt in 1..=retry_count { + if let Some(return_vals) = self.get_bootstrap_log_cache_inner( + &cacher_request, + &cacher_process_address, + attempt, + request_from_block_val, + retry_delay_s, + Some(retry_count), + &chain, + )? { + return Ok(return_vals); + } + } + } else { + let mut attempt = 1; + loop { + if let Some(return_vals) = self.get_bootstrap_log_cache_inner( + &cacher_request, + &cacher_process_address, + attempt, + request_from_block_val, + retry_delay_s, + None, + &chain, + )? { + return Ok(return_vals); + } + attempt += 1; + } + } + + Err(anyhow::anyhow!( + "Failed to get response from local binding-cacher after {retry_count:?} attempts" + )) + } + + pub fn validate_log_cache(&self, log_cache: &LogCache) -> anyhow::Result { + let from_block = log_cache.metadata.from_block.parse::().map_err(|_| { + anyhow::anyhow!( + "Invalid from_block in metadata: {}", + log_cache.metadata.from_block + ) + })?; + let to_block = log_cache.metadata.to_block.parse::().map_err(|_| { + anyhow::anyhow!( + "Invalid to_block in metadata: {}", + log_cache.metadata.to_block + ) + })?; + + let mut bytes_to_verify = serde_json::to_vec(&log_cache.logs) + .map_err(|e| anyhow::anyhow!("Failed to serialize logs for validation: {:?}", e))?; + bytes_to_verify.extend_from_slice(&from_block.to_be_bytes()); + bytes_to_verify.extend_from_slice(&to_block.to_be_bytes()); + let hashed_data = keccak256(&bytes_to_verify); + + let signature_hex = log_cache.metadata.signature.trim_start_matches("0x"); + let signature_bytes = hex::decode(signature_hex) + .map_err(|e| anyhow::anyhow!("Failed to decode hex signature: {:?}", e))?; + + Ok(sign::net_key_verify( + hashed_data.to_vec(), + &log_cache.metadata.created_by.parse::()?, + signature_bytes, + )?) + } + + pub fn get_bootstrap( + &self, + from_block: Option, + retry_params: Option<(u64, Option)>, + chain: Option, + ) -> anyhow::Result<(u64, Vec)> { + print_to_terminal( + 2, + &format!( + "get_bootstrap: from_block={:?}, retry_params={:?}, chain={:?}", + from_block, retry_params, chain, + ), + ); + let (block, log_caches) = self.get_bootstrap_log_cache(from_block, retry_params, chain)?; + + let mut all_valid_logs: Vec = Vec::new(); + let request_from_block_val = from_block.unwrap_or(0); + + for log_cache in log_caches { + match self.validate_log_cache(&log_cache) { + Ok(true) => { + for log in log_cache.logs { + if let Some(log_block_number) = log.block_number { + if log_block_number >= request_from_block_val { + all_valid_logs.push(log); + } + } else { + if from_block.is_none() { + all_valid_logs.push(log); + } + } + } + } + Ok(false) => { + print_to_terminal( + 1, + &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", + log_cache.metadata.created_by, + log_cache.logs.len()) + ); + } + Err(e) => { + print_to_terminal( + 1, + &format!( + "Error validating LogCache from {}: {:?}. Discarding.", + log_cache.metadata.created_by, e, + ), + ); + } + } + } + + all_valid_logs.sort_by(|a, b| { + let block_cmp = a.block_number.cmp(&b.block_number); + if block_cmp == std::cmp::Ordering::Equal { + std::cmp::Ordering::Equal + } else { + block_cmp + } + }); + + let mut unique_logs = Vec::new(); + for log in all_valid_logs { + if !unique_logs.contains(&log) { + unique_logs.push(log); + } + } + + print_to_terminal( + 2, + &format!( + "get_bootstrap: Consolidated {} unique logs.", + unique_logs.len(), + ), + ); + Ok((block, unique_logs)) + } + + pub fn bootstrap( + &self, + from_block: Option, + filters: Vec, + retry_params: Option<(u64, Option)>, + chain: Option, + ) -> anyhow::Result<(u64, Vec>)> { + print_to_terminal( + 2, + &format!( + "bootstrap: from_block={:?}, num_filters={}, retry_params={:?}, chain={:?}", + from_block, + filters.len(), + retry_params, + chain, + ), + ); + + let (block, consolidated_logs) = self.get_bootstrap(from_block, retry_params, chain)?; + + if consolidated_logs.is_empty() { + print_to_terminal(2,"bootstrap: No logs retrieved after consolidation. Returning empty results for filters."); + return Ok((block, filters.iter().map(|_| Vec::new()).collect())); + } + + let mut results_per_filter: Vec> = Vec::new(); + for filter in filters { + let filtered_logs = eth_apply_filter(&consolidated_logs, &filter); + results_per_filter.push(filtered_logs); + } + + print_to_terminal( + 2, + &format!( + "bootstrap: Applied {} filters to bootstrapped logs.", + results_per_filter.len(), + ), + ); + Ok((block, results_per_filter)) + } +} + +impl Serialize for ManifestItem { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("ManifestItem", 4)?; + state.serialize_field("metadata", &self.metadata)?; + state.serialize_field("is_empty", &self.is_empty)?; + state.serialize_field("file_hash", &self.file_hash)?; + state.serialize_field("file_name", &self.file_name)?; + state.end() + } +} + +impl<'de> Deserialize<'de> for ManifestItem { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "snake_case")] + enum Field { + Metadata, + IsEmpty, + FileHash, + FileName, + } + + struct ManifestItemVisitor; + + impl<'de> Visitor<'de> for ManifestItemVisitor { + type Value = ManifestItem; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct ManifestItem") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut metadata = None; + let mut is_empty = None; + let mut file_hash = None; + let mut file_name = None; + + while let Some(key) = map.next_key()? { + match key { + Field::Metadata => { + if metadata.is_some() { + return Err(de::Error::duplicate_field("metadata")); + } + metadata = Some(map.next_value()?); + } + Field::IsEmpty => { + if is_empty.is_some() { + return Err(de::Error::duplicate_field("is_empty")); + } + is_empty = Some(map.next_value()?); + } + Field::FileHash => { + if file_hash.is_some() { + return Err(de::Error::duplicate_field("file_hash")); + } + file_hash = Some(map.next_value()?); + } + Field::FileName => { + if file_name.is_some() { + return Err(de::Error::duplicate_field("file_name")); + } + file_name = Some(map.next_value()?); + } + } + } + + let metadata = metadata.ok_or_else(|| de::Error::missing_field("metadata"))?; + let is_empty = is_empty.ok_or_else(|| de::Error::missing_field("is_empty"))?; + let file_hash = file_hash.ok_or_else(|| de::Error::missing_field("file_hash"))?; + let file_name = file_name.ok_or_else(|| de::Error::missing_field("file_name"))?; + + Ok(ManifestItem { + metadata, + is_empty, + file_hash, + file_name, + }) + } + } + + deserializer.deserialize_struct( + "ManifestItem", + &["metadata", "is_empty", "file_hash", "file_name"], + ManifestItemVisitor, + ) + } +} + +impl Serialize for Manifest { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("Manifest", 4)?; + state.serialize_field("items", &self.items)?; + state.serialize_field("manifest_filename", &self.manifest_filename)?; + state.serialize_field("chain_id", &self.chain_id)?; + state.serialize_field("protocol_version", &self.protocol_version)?; + state.end() + } +} + +impl<'de> Deserialize<'de> for Manifest { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "snake_case")] + enum Field { + Items, + ManifestFilename, + ChainId, + ProtocolVersion, + } + + struct ManifestVisitor; + + impl<'de> Visitor<'de> for ManifestVisitor { + type Value = Manifest; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct Manifest") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut items = None; + let mut manifest_filename = None; + let mut chain_id = None; + let mut protocol_version = None; + + while let Some(key) = map.next_key()? { + match key { + Field::Items => { + if items.is_some() { + return Err(de::Error::duplicate_field("items")); + } + items = Some(map.next_value()?); + } + Field::ManifestFilename => { + if manifest_filename.is_some() { + return Err(de::Error::duplicate_field("manifest_filename")); + } + manifest_filename = Some(map.next_value()?); + } + Field::ChainId => { + if chain_id.is_some() { + return Err(de::Error::duplicate_field("chain_id")); + } + chain_id = Some(map.next_value()?); + } + Field::ProtocolVersion => { + if protocol_version.is_some() { + return Err(de::Error::duplicate_field("protocol_version")); + } + protocol_version = Some(map.next_value()?); + } + } + } + + let items = items.ok_or_else(|| de::Error::missing_field("items"))?; + let manifest_filename = manifest_filename + .ok_or_else(|| de::Error::missing_field("manifest_filename"))?; + let chain_id = chain_id.ok_or_else(|| de::Error::missing_field("chain_id"))?; + let protocol_version = + protocol_version.ok_or_else(|| de::Error::missing_field("protocol_version"))?; + + Ok(Manifest { + items, + manifest_filename, + chain_id, + protocol_version, + }) + } + } + + deserializer.deserialize_struct( + "Manifest", + &["items", "manifest_filename", "chain_id", "protocol_version"], + ManifestVisitor, + ) + } +} + +impl Serialize for GetLogsByRangeRequest { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("GetLogsByRangeRequest", 2)?; + state.serialize_field("from_block", &self.from_block)?; + state.serialize_field("to_block", &self.to_block)?; + state.end() + } +} + +impl<'de> Deserialize<'de> for GetLogsByRangeRequest { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "snake_case")] + enum Field { + FromBlock, + ToBlock, + } + + struct GetLogsByRangeRequestVisitor; + + impl<'de> Visitor<'de> for GetLogsByRangeRequestVisitor { + type Value = GetLogsByRangeRequest; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct GetLogsByRangeRequest") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut from_block = None; + let mut to_block = None; + + while let Some(key) = map.next_key()? { + match key { + Field::FromBlock => { + if from_block.is_some() { + return Err(de::Error::duplicate_field("from_block")); + } + from_block = Some(map.next_value()?); + } + Field::ToBlock => { + if to_block.is_some() { + return Err(de::Error::duplicate_field("to_block")); + } + to_block = Some(map.next_value()?); + } + } + } + + let from_block = + from_block.ok_or_else(|| de::Error::missing_field("from_block"))?; + + Ok(GetLogsByRangeRequest { + from_block, + to_block, + }) + } + } + + deserializer.deserialize_struct( + "GetLogsByRangeRequest", + &["from_block", "to_block"], + GetLogsByRangeRequestVisitor, + ) + } +} + +impl Serialize for CacherStatus { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("CacherStatus", 8)?; + state.serialize_field("last_cached_block", &self.last_cached_block)?; + state.serialize_field("chain_id", &self.chain_id)?; + state.serialize_field("protocol_version", &self.protocol_version)?; + state.serialize_field( + "next_cache_attempt_in_seconds", + &self.next_cache_attempt_in_seconds, + )?; + state.serialize_field("manifest_filename", &self.manifest_filename)?; + state.serialize_field("log_files_count", &self.log_files_count)?; + state.serialize_field("our_address", &self.our_address)?; + state.serialize_field("is_providing", &self.is_providing)?; + state.end() + } +} + +impl<'de> Deserialize<'de> for CacherStatus { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "snake_case")] + enum Field { + LastCachedBlock, + ChainId, + ProtocolVersion, + NextCacheAttemptInSeconds, + ManifestFilename, + LogFilesCount, + OurAddress, + IsProviding, + } + + struct CacherStatusVisitor; + + impl<'de> Visitor<'de> for CacherStatusVisitor { + type Value = CacherStatus; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct CacherStatus") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut last_cached_block = None; + let mut chain_id = None; + let mut protocol_version = None; + let mut next_cache_attempt_in_seconds = None; + let mut manifest_filename = None; + let mut log_files_count = None; + let mut our_address = None; + let mut is_providing = None; + + while let Some(key) = map.next_key()? { + match key { + Field::LastCachedBlock => { + if last_cached_block.is_some() { + return Err(de::Error::duplicate_field("last_cached_block")); + } + last_cached_block = Some(map.next_value()?); + } + Field::ChainId => { + if chain_id.is_some() { + return Err(de::Error::duplicate_field("chain_id")); + } + chain_id = Some(map.next_value()?); + } + Field::ProtocolVersion => { + if protocol_version.is_some() { + return Err(de::Error::duplicate_field("protocol_version")); + } + protocol_version = Some(map.next_value()?); + } + Field::NextCacheAttemptInSeconds => { + if next_cache_attempt_in_seconds.is_some() { + return Err(de::Error::duplicate_field( + "next_cache_attempt_in_seconds", + )); + } + next_cache_attempt_in_seconds = Some(map.next_value()?); + } + Field::ManifestFilename => { + if manifest_filename.is_some() { + return Err(de::Error::duplicate_field("manifest_filename")); + } + manifest_filename = Some(map.next_value()?); + } + Field::LogFilesCount => { + if log_files_count.is_some() { + return Err(de::Error::duplicate_field("log_files_count")); + } + log_files_count = Some(map.next_value()?); + } + Field::OurAddress => { + if our_address.is_some() { + return Err(de::Error::duplicate_field("our_address")); + } + our_address = Some(map.next_value()?); + } + Field::IsProviding => { + if is_providing.is_some() { + return Err(de::Error::duplicate_field("is_providing")); + } + is_providing = Some(map.next_value()?); + } + } + } + + let last_cached_block = last_cached_block + .ok_or_else(|| de::Error::missing_field("last_cached_block"))?; + let chain_id = chain_id.ok_or_else(|| de::Error::missing_field("chain_id"))?; + let protocol_version = + protocol_version.ok_or_else(|| de::Error::missing_field("protocol_version"))?; + let manifest_filename = manifest_filename + .ok_or_else(|| de::Error::missing_field("manifest_filename"))?; + let log_files_count = + log_files_count.ok_or_else(|| de::Error::missing_field("log_files_count"))?; + let our_address = + our_address.ok_or_else(|| de::Error::missing_field("our_address"))?; + let is_providing = + is_providing.ok_or_else(|| de::Error::missing_field("is_providing"))?; + + Ok(CacherStatus { + last_cached_block, + chain_id, + protocol_version, + next_cache_attempt_in_seconds, + manifest_filename, + log_files_count, + our_address, + is_providing, + }) + } + } + + deserializer.deserialize_struct( + "CacherStatus", + &[ + "last_cached_block", + "chain_id", + "protocol_version", + "next_cache_attempt_in_seconds", + "manifest_filename", + "log_files_count", + "our_address", + "is_providing", + ], + CacherStatusVisitor, + ) + } +} + +impl Serialize for CacherRequest { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + CacherRequest::GetManifest => serializer.serialize_str("GetManifest"), + CacherRequest::GetLogCacheContent(path) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetLogCacheContent", path)?; + map.end() + } + CacherRequest::GetStatus => serializer.serialize_str("GetStatus"), + CacherRequest::GetLogsByRange(request) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetLogsByRange", request)?; + map.end() + } + CacherRequest::StartProviding => serializer.serialize_str("StartProviding"), + CacherRequest::StopProviding => serializer.serialize_str("StopProviding"), + CacherRequest::SetNodes(nodes) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("SetNodes", nodes)?; + map.end() + } + CacherRequest::Reset(nodes) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("Reset", nodes)?; + map.end() + } + } + } +} + +impl<'de> Deserialize<'de> for CacherRequest { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct CacherRequestVisitor; + + impl<'de> Visitor<'de> for CacherRequestVisitor { + type Value = CacherRequest; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string for unit variants or a map for other variants") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + match value { + "GetManifest" => Ok(CacherRequest::GetManifest), + "GetStatus" => Ok(CacherRequest::GetStatus), + "StartProviding" => Ok(CacherRequest::StartProviding), + "StopProviding" => Ok(CacherRequest::StopProviding), + _ => Err(de::Error::unknown_variant( + value, + &[ + "GetManifest", + "GetLogCacheContent", + "GetStatus", + "GetLogsByRange", + "StartProviding", + "StopProviding", + "SetNodes", + "Reset", + ], + )), + } + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let (variant, value) = map + .next_entry::()? + .ok_or_else(|| de::Error::invalid_length(0, &self))?; + + // Ensure there are no extra entries + if map.next_entry::()?.is_some() { + return Err(de::Error::custom("unexpected extra entries in map")); + } + + match variant.as_str() { + "GetLogCacheContent" => { + let path = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherRequest::GetLogCacheContent(path)) + } + "GetLogsByRange" => { + let request = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherRequest::GetLogsByRange(request)) + } + "SetNodes" => { + let nodes = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherRequest::SetNodes(nodes)) + } + "Reset" => { + let nodes = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherRequest::Reset(nodes)) + } + _ => Err(de::Error::unknown_variant( + &variant, + &[ + "GetManifest", + "GetLogCacheContent", + "GetStatus", + "GetLogsByRange", + "StartProviding", + "StopProviding", + "SetNodes", + "Reset", + ], + )), + } + } + } + + deserializer.deserialize_any(CacherRequestVisitor) + } +} + +impl Serialize for CacherResponse { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + CacherResponse::GetManifest(manifest) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetManifest", manifest)?; + map.end() + } + CacherResponse::GetLogCacheContent(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetLogCacheContent", result)?; + map.end() + } + CacherResponse::GetStatus(status) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetStatus", status)?; + map.end() + } + CacherResponse::GetLogsByRange(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("GetLogsByRange", result)?; + map.end() + } + CacherResponse::StartProviding(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("StartProviding", result)?; + map.end() + } + CacherResponse::StopProviding(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("StopProviding", result)?; + map.end() + } + CacherResponse::Rejected => serializer.serialize_str("Rejected"), + CacherResponse::IsStarting => serializer.serialize_str("IsStarting"), + CacherResponse::SetNodes(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("SetNodes", result)?; + map.end() + } + CacherResponse::Reset(result) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("Reset", result)?; + map.end() + } + } + } +} + +impl<'de> Deserialize<'de> for CacherResponse { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct CacherResponseVisitor; + + impl<'de> Visitor<'de> for CacherResponseVisitor { + type Value = CacherResponse; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a string for unit variants or a map for other variants") + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + match value { + "Rejected" => Ok(CacherResponse::Rejected), + "IsStarting" => Ok(CacherResponse::IsStarting), + _ => Err(de::Error::unknown_variant( + value, + &[ + "GetManifest", + "GetLogCacheContent", + "GetStatus", + "GetLogsByRange", + "StartProviding", + "StopProviding", + "Rejected", + "IsStarting", + "SetNodes", + "Reset", + ], + )), + } + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let (variant, value) = map + .next_entry::()? + .ok_or_else(|| de::Error::invalid_length(0, &self))?; + + // Ensure there are no extra entries + if map.next_entry::()?.is_some() { + return Err(de::Error::custom("unexpected extra entries in map")); + } + + match variant.as_str() { + "GetManifest" => { + let manifest = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::GetManifest(manifest)) + } + "GetLogCacheContent" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::GetLogCacheContent(result)) + } + "GetStatus" => { + let status = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::GetStatus(status)) + } + "GetLogsByRange" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::GetLogsByRange(result)) + } + "StartProviding" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::StartProviding(result)) + } + "StopProviding" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::StopProviding(result)) + } + "SetNodes" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::SetNodes(result)) + } + "Reset" => { + let result = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(CacherResponse::Reset(result)) + } + _ => Err(de::Error::unknown_variant( + &variant, + &[ + "GetManifest", + "GetLogCacheContent", + "GetStatus", + "GetLogsByRange", + "StartProviding", + "StopProviding", + "Rejected", + "IsStarting", + "SetNodes", + "Reset", + ], + )), + } + } + } + + deserializer.deserialize_any(CacherResponseVisitor) + } +} + +impl Serialize for LogsMetadata { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut state = serializer.serialize_struct("LogsMetadata", 6)?; + state.serialize_field("chainId", &self.chain_id)?; + state.serialize_field("fromBlock", &self.from_block)?; + state.serialize_field("toBlock", &self.to_block)?; + state.serialize_field("timeCreated", &self.time_created)?; + state.serialize_field("createdBy", &self.created_by)?; + state.serialize_field("signature", &self.signature)?; + state.end() + } +} + +impl<'de> Deserialize<'de> for LogsMetadata { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + #[derive(Deserialize)] + #[serde(field_identifier, rename_all = "camelCase")] + enum Field { + ChainId, + FromBlock, + ToBlock, + TimeCreated, + CreatedBy, + Signature, + } + + struct LogsMetadataVisitor; + + impl<'de> Visitor<'de> for LogsMetadataVisitor { + type Value = LogsMetadata; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("struct LogsMetadata") + } + + fn visit_map(self, mut map: V) -> Result + where + V: MapAccess<'de>, + { + let mut chain_id = None; + let mut from_block = None; + let mut to_block = None; + let mut time_created = None; + let mut created_by = None; + let mut signature = None; + + while let Some(key) = map.next_key()? { + match key { + Field::ChainId => { + if chain_id.is_some() { + return Err(de::Error::duplicate_field("chainId")); + } + chain_id = Some(map.next_value()?); + } + Field::FromBlock => { + if from_block.is_some() { + return Err(de::Error::duplicate_field("fromBlock")); + } + from_block = Some(map.next_value()?); + } + Field::ToBlock => { + if to_block.is_some() { + return Err(de::Error::duplicate_field("toBlock")); + } + to_block = Some(map.next_value()?); + } + Field::TimeCreated => { + if time_created.is_some() { + return Err(de::Error::duplicate_field("timeCreated")); + } + time_created = Some(map.next_value()?); + } + Field::CreatedBy => { + if created_by.is_some() { + return Err(de::Error::duplicate_field("createdBy")); + } + created_by = Some(map.next_value()?); + } + Field::Signature => { + if signature.is_some() { + return Err(de::Error::duplicate_field("signature")); + } + signature = Some(map.next_value()?); + } + } + } + + let chain_id = chain_id.ok_or_else(|| de::Error::missing_field("chainId"))?; + let from_block = from_block.ok_or_else(|| de::Error::missing_field("fromBlock"))?; + let to_block = to_block.ok_or_else(|| de::Error::missing_field("toBlock"))?; + let time_created = + time_created.ok_or_else(|| de::Error::missing_field("timeCreated"))?; + let created_by = created_by.ok_or_else(|| de::Error::missing_field("createdBy"))?; + let signature = signature.ok_or_else(|| de::Error::missing_field("signature"))?; + + Ok(LogsMetadata { + chain_id, + from_block, + to_block, + time_created, + created_by, + signature, + }) + } + } + + deserializer.deserialize_struct( + "LogsMetadata", + &[ + "chainId", + "fromBlock", + "toBlock", + "timeCreated", + "createdBy", + "signature", + ], + LogsMetadataVisitor, + ) + } +} + +impl Serialize for GetLogsByRangeOkResponse { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + GetLogsByRangeOkResponse::Logs(tuple) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("Logs", tuple)?; + map.end() + } + GetLogsByRangeOkResponse::Latest(block) => { + let mut map = serializer.serialize_map(Some(1))?; + map.serialize_entry("Latest", block)?; + map.end() + } + } + } +} + +impl<'de> Deserialize<'de> for GetLogsByRangeOkResponse { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + struct GetLogsByRangeOkResponseVisitor; + + impl<'de> Visitor<'de> for GetLogsByRangeOkResponseVisitor { + type Value = GetLogsByRangeOkResponse; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str( + "a map with a single key representing the GetLogsByRangeOkResponse variant", + ) + } + + fn visit_map(self, mut map: A) -> Result + where + A: MapAccess<'de>, + { + let (variant, value) = map + .next_entry::()? + .ok_or_else(|| de::Error::invalid_length(0, &self))?; + + match variant.as_str() { + "Logs" => { + let tuple = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(GetLogsByRangeOkResponse::Logs(tuple)) + } + "Latest" => { + let block = serde_json::from_value(value).map_err(de::Error::custom)?; + Ok(GetLogsByRangeOkResponse::Latest(block)) + } + _ => Err(de::Error::unknown_variant(&variant, &["Logs", "Latest"])), + } + } + } + + deserializer.deserialize_map(GetLogsByRangeOkResponseVisitor) + } +} From e11d17cad18618117f1ef83520e71d46275f0504 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 20:28:02 +0000 Subject: [PATCH 06/31] Format Rust code using rustfmt --- src/bindings.rs | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 293fd19..dca3b93 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1,10 +1,13 @@ +use crate::bindings::contract::getCall; use crate::eth::{ BlockNumberOrTag, EthError, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, }; -use crate::bindings::contract::getCall; use crate::hyperware::process::binding_cacher::{ - BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, BindingCacherStatus as CacherStatus, BindingGetLogsByRangeOkResponse as GetLogsByRangeOkResponse, BindingGetLogsByRangeRequest as GetLogsByRangeRequest, - BindingLogsMetadata as LogsMetadata, BindingManifest as Manifest, BindingManifestItem as ManifestItem, + BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, + BindingCacherStatus as CacherStatus, + BindingGetLogsByRangeOkResponse as GetLogsByRangeOkResponse, + BindingGetLogsByRangeRequest as GetLogsByRangeRequest, BindingLogsMetadata as LogsMetadata, + BindingManifest as Manifest, BindingManifestItem as ManifestItem, }; use crate::{net, sign}; @@ -341,8 +344,8 @@ pub fn valid_name(name: &str) -> bool { name.is_ascii() && name.len() >= 1 && name - .chars() - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') + .chars() + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') } // TODO remove @@ -351,9 +354,9 @@ pub fn valid_note(note: &str) -> bool { && note.len() >= 2 && note.chars().next() == Some('~') && note - .chars() - .skip(1) - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') + .chars() + .skip(1) + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') } // TODO remove @@ -362,9 +365,9 @@ pub fn valid_fact(fact: &str) -> bool { && fact.len() >= 2 && fact.chars().next() == Some('!') && fact - .chars() - .skip(1) - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') + .chars() + .skip(1) + .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') } // TODO remove @@ -604,7 +607,7 @@ impl Bindings { namehash: FixedBytes::<32>::from_str(&namehash(path)) .map_err(|_| EthError::InvalidParams)?, } - .abi_encode(); + .abi_encode(); let tx_req = TransactionRequest::default() .input(TransactionInput::new(get_call.into())) @@ -635,7 +638,7 @@ impl Bindings { let get_call = getCall { namehash: FixedBytes::<32>::from_str(entryhash).map_err(|_| EthError::InvalidParams)?, } - .abi_encode(); + .abi_encode(); let tx_req = TransactionRequest::default() .input(TransactionInput::new(get_call.into())) @@ -883,9 +886,7 @@ impl Bindings { } } CacherResponse::Rejected => { - return Err(anyhow::anyhow!( - "Local binding-cacher rejected our request" - )); + return Err(anyhow::anyhow!("Local binding-cacher rejected our request")); } _ => { return Err(anyhow::anyhow!( From 78a6a7e2b5ef3a39da44f676c87be0239676c645 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Wed, 8 Oct 2025 08:32:23 -0400 Subject: [PATCH 07/31] Production addresses and starting blocks --- src/bindings.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index dca3b93..f656a4d 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -29,7 +29,7 @@ use std::str::FromStr; /// bindings data deployment address on base #[cfg(not(feature = "simulation-mode"))] -pub const BINDINGS_ADDRESS: &'static str = "0x0000000000000000000000000000000000000000"; +pub const BINDINGS_ADDRESS: &'static str = "0x0000000000e8d224B902632757d5dbc51a451456"; #[cfg(feature = "simulation-mode")] pub const BINDINGS_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; #[cfg(not(feature = "simulation-mode"))] @@ -38,7 +38,7 @@ pub const BINDINGS_CHAIN_ID: u64 = 8453; // base pub const BINDINGS_CHAIN_ID: u64 = 31337; // fakenet /// first block (minus one) of tokenregistry deployment on base #[cfg(not(feature = "simulation-mode"))] -pub const BINDINGS_FIRST_BLOCK: u64 = 27_270_411; +pub const BINDINGS_FIRST_BLOCK: u64 = 36_283_831; #[cfg(feature = "simulation-mode")] pub const BINDINGS_FIRST_BLOCK: u64 = 0; /// the root hash of tokenregistry, empty bytes32 From 0d1ac8332e950124d4ea04ec82ca61248bae9fb7 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 13 Oct 2025 11:53:06 -0400 Subject: [PATCH 08/31] skipping validation of log_cache --- src/bindings.rs | 61 +++++++++++++------------- src/hypermap.rs | 111 +++++++++++++++++++++++++----------------------- 2 files changed, 90 insertions(+), 82 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index f656a4d..8186ab3 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1017,38 +1017,41 @@ impl Bindings { let request_from_block_val = from_block.unwrap_or(0); for log_cache in log_caches { - match self.validate_log_cache(&log_cache) { - Ok(true) => { - for log in log_cache.logs { - if let Some(log_block_number) = log.block_number { - if log_block_number >= request_from_block_val { - all_valid_logs.push(log); - } - } else { - if from_block.is_none() { - all_valid_logs.push(log); - } - } + // VALIDATION TEMPORARILY SKIPPED - For external reasons, validation is disabled + // and all logs are processed as if validation succeeded (Ok(true) case) + + // match self.validate_log_cache(&log_cache) { + // Ok(true) => { + for log in log_cache.logs { + if let Some(log_block_number) = log.block_number { + if log_block_number >= request_from_block_val { + all_valid_logs.push(log); + } + } else { + if from_block.is_none() { + all_valid_logs.push(log); } - } - Ok(false) => { - print_to_terminal( - 1, - &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", - log_cache.metadata.created_by, - log_cache.logs.len()) - ); - } - Err(e) => { - print_to_terminal( - 1, - &format!( - "Error validating LogCache from {}: {:?}. Discarding.", - log_cache.metadata.created_by, e, - ), - ); } } + // } + // Ok(false) => { + // print_to_terminal( + // 1, + // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", + // log_cache.metadata.created_by, + // log_cache.logs.len()) + // ); + // } + // Err(e) => { + // print_to_terminal( + // 1, + // &format!( + // "Error validating LogCache from {}: {:?}. Discarding.", + // log_cache.metadata.created_by, e, + // ), + // ); + // } + // } } all_valid_logs.sort_by(|a, b| { diff --git a/src/hypermap.rs b/src/hypermap.rs index 0fe5273..fa6e4e2 100644 --- a/src/hypermap.rs +++ b/src/hypermap.rs @@ -1144,40 +1144,42 @@ impl Hypermap { let request_from_block_val = from_block.unwrap_or(0); for log_cache in log_caches { - match self.validate_log_cache(&log_cache) { - Ok(true) => { - for log in log_cache.logs { - if let Some(log_block_number) = log.block_number { - if log_block_number >= request_from_block_val { - all_valid_logs.push(log); - } - } else { - if from_block.is_none() { - all_valid_logs.push(log); - } - } + // VALIDATION TEMPORARILY SKIPPED - For external reasons, validation is disabled + // and all logs are processed as if validation succeeded (Ok(true) case) + + // match self.validate_log_cache(&log_cache) { + // Ok(true) => { + for log in log_cache.logs { + if let Some(log_block_number) = log.block_number { + if log_block_number >= request_from_block_val { + all_valid_logs.push(log); + } + } else { + if from_block.is_none() { + all_valid_logs.push(log); } - } - Ok(false) => { - print_to_terminal( - 1, - &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", - log_cache.metadata.created_by, - log_cache.logs.len()) - ); - } - Err(e) => { - print_to_terminal( - 1, - &format!( - "Error validating LogCache from {}: {:?}. Discarding.", - log_cache.metadata.created_by, e, - ), - ); } } + // } + // Ok(false) => { + // print_to_terminal( + // 1, + // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", + // log_cache.metadata.created_by, + // log_cache.logs.len()) + // ); + // } + // Err(e) => { + // print_to_terminal( + // 1, + // &format!( + // "Error validating LogCache from {}: {:?}. Discarding.", + // log_cache.metadata.created_by, e, + // ), + // ); + // } + // } } - all_valid_logs.sort_by(|a, b| { let block_cmp = a.block_number.cmp(&b.block_number); if block_cmp == std::cmp::Ordering::Equal { @@ -1224,8 +1226,11 @@ impl Hypermap { let request_from_block_val = from_block.unwrap_or(0); for log_cache in log_caches { - match self.validate_log_cache(&log_cache).await { - Ok(true) => { + // VALIDATION TEMPORARILY SKIPPED - For external reasons, validation is disabled + // and all logs are processed as if validation succeeded (Ok(true) case) + + //match self.validate_log_cache(&log_cache).await { + //Ok(true) => { for log in log_cache.logs { if let Some(log_block_number) = log.block_number { if log_block_number >= request_from_block_val { @@ -1237,27 +1242,27 @@ impl Hypermap { } } } - } - Ok(false) => { - print_to_terminal( - 1, - &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", - log_cache.metadata.created_by, - log_cache.logs.len()) - ); - } - Err(e) => { - print_to_terminal( - 1, - &format!( - "Error validating LogCache from {}: {:?}. Discarding {} logs.", - log_cache.metadata.created_by, - e, - log_cache.logs.len() - ), - ); - } - } + //} + //Ok(false) => { + // print_to_terminal( + // 1, + // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", + // log_cache.metadata.created_by, + // log_cache.logs.len()) + // ); + //} + //Err(e) => { + // print_to_terminal( + // 1, + // &format!( + // "Error validating LogCache from {}: {:?}. Discarding {} logs.", + // log_cache.metadata.created_by, + // e, + // log_cache.logs.len() + // ), + // ); + //} + //} } all_valid_logs.sort_by(|a, b| { From e95ff8d720d73cd4b26dbabceb75b1b1f24874d7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 15:53:37 +0000 Subject: [PATCH 09/31] Format Rust code using rustfmt --- src/hypermap.rs | 62 ++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/src/hypermap.rs b/src/hypermap.rs index fa6e4e2..8ecf8a9 100644 --- a/src/hypermap.rs +++ b/src/hypermap.rs @@ -1230,38 +1230,38 @@ impl Hypermap { // and all logs are processed as if validation succeeded (Ok(true) case) //match self.validate_log_cache(&log_cache).await { - //Ok(true) => { - for log in log_cache.logs { - if let Some(log_block_number) = log.block_number { - if log_block_number >= request_from_block_val { - all_valid_logs.push(log); - } - } else { - if from_block.is_none() { - all_valid_logs.push(log); - } - } + //Ok(true) => { + for log in log_cache.logs { + if let Some(log_block_number) = log.block_number { + if log_block_number >= request_from_block_val { + all_valid_logs.push(log); + } + } else { + if from_block.is_none() { + all_valid_logs.push(log); } - //} - //Ok(false) => { - // print_to_terminal( - // 1, - // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", - // log_cache.metadata.created_by, - // log_cache.logs.len()) - // ); - //} - //Err(e) => { - // print_to_terminal( - // 1, - // &format!( - // "Error validating LogCache from {}: {:?}. Discarding {} logs.", - // log_cache.metadata.created_by, - // e, - // log_cache.logs.len() - // ), - // ); - //} + } + } + //} + //Ok(false) => { + // print_to_terminal( + // 1, + // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", + // log_cache.metadata.created_by, + // log_cache.logs.len()) + // ); + //} + //Err(e) => { + // print_to_terminal( + // 1, + // &format!( + // "Error validating LogCache from {}: {:?}. Discarding {} logs.", + // log_cache.metadata.created_by, + // e, + // log_cache.logs.len() + // ), + // ); + //} //} } From 9e15803dd9e58b35f68d211efac36fb04a818ae5 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 10:14:58 -0500 Subject: [PATCH 10/31] bindings.rs cleanup --- src/bindings.rs | 823 ++++++------------------------------------------ 1 file changed, 101 insertions(+), 722 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 8186ab3..4d70b46 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1,6 +1,5 @@ -use crate::bindings::contract::getCall; use crate::eth::{ - BlockNumberOrTag, EthError, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, + BlockNumberOrTag, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, }; use crate::hyperware::process::binding_cacher::{ BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, @@ -9,21 +8,15 @@ use crate::hyperware::process::binding_cacher::{ BindingGetLogsByRangeRequest as GetLogsByRangeRequest, BindingLogsMetadata as LogsMetadata, BindingManifest as Manifest, BindingManifestItem as ManifestItem, }; - -use crate::{net, sign}; use crate::{print_to_terminal, Address as BindingAddress, Request}; use alloy::hex; -use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; -use alloy_primitives::{keccak256, Address, Bytes, FixedBytes, B256}; -use alloy_sol_types::{SolCall, SolEvent, SolValue}; -use contract::tokenCall; +use alloy_primitives::{keccak256, Address}; use serde::{ self, de::{self, MapAccess, Visitor}, ser::{SerializeMap, SerializeStruct}, Deserialize, Deserializer, Serialize, Serializer, }; -use std::error::Error; use std::fmt; use std::str::FromStr; @@ -53,446 +46,9 @@ pub struct LogCache { const CACHER_REQUEST_TIMEOUT_S: u64 = 15; -// TODO replace with tokenregistry structures -/// Sol structures for Hypermap requests -pub mod contract { - use alloy_sol_macro::sol; - - sol! { - /// Emitted when a new namespace entry is minted. - /// - parenthash: The hash of the parent namespace entry. - /// - childhash: The hash of the minted namespace entry's full path. - /// - labelhash: The hash of only the label (the final entry in the path). - /// - label: The label (the final entry in the path) of the new entry. - event Mint( - bytes32 indexed parenthash, - bytes32 indexed childhash, - bytes indexed labelhash, - bytes label - ); - - /// Emitted when a fact is created on an existing namespace entry. - /// Facts are immutable and may only be written once. A fact label is - /// prepended with an exclamation mark (!) to indicate that it is a fact. - /// - parenthash The hash of the parent namespace entry. - /// - facthash The hash of the newly created fact's full path. - /// - labelhash The hash of only the label (the final entry in the path). - /// - label The label of the fact. - /// - data The data stored at the fact. - event Fact( - bytes32 indexed parenthash, - bytes32 indexed facthash, - bytes indexed labelhash, - bytes label, - bytes data - ); - - /// Emitted when a new note is created on an existing namespace entry. - /// Notes are mutable. A note label is prepended with a tilde (~) to indicate - /// that it is a note. - /// - parenthash: The hash of the parent namespace entry. - /// - notehash: The hash of the newly created note's full path. - /// - labelhash: The hash of only the label (the final entry in the path). - /// - label: The label of the note. - /// - data: The data stored at the note. - event Note( - bytes32 indexed parenthash, - bytes32 indexed notehash, - bytes indexed labelhash, - bytes label, - bytes data - ); - - /// Emitted when a gene is set for an existing namespace entry. - /// A gene is a specific TBA implementation which will be applied to all - /// sub-entries of the namespace entry. - /// - entry: The namespace entry's namehash. - /// - gene: The address of the TBA implementation. - event Gene(bytes32 indexed entry, address indexed gene); - - /// Emitted when the zeroth namespace entry is minted. - /// Occurs exactly once at initialization. - /// - zeroTba: The address of the zeroth TBA - event Zero(address indexed zeroTba); - - /// Emitted when a namespace entry is transferred from one address - /// to another. - /// - from: The address of the sender. - /// - to: The address of the recipient. - /// - id: The namehash of the namespace entry (converted to uint256). - event Transfer( - address indexed from, - address indexed to, - uint256 indexed id - ); - - /// Emitted when a namespace entry is approved for transfer. - /// - owner: The address of the owner. - /// - spender: The address of the spender. - /// - id: The namehash of the namespace entry (converted to uint256). - event Approval( - address indexed owner, - address indexed spender, - uint256 indexed id - ); - - /// Emitted when an operator is approved for all of an owner's - /// namespace entries. - /// - owner: The address of the owner. - /// - operator: The address of the operator. - /// - approved: Whether the operator is approved. - event ApprovalForAll( - address indexed owner, - address indexed operator, - bool approved - ); - - /// Retrieves information about a specific namespace entry. - /// - namehash The namehash of the namespace entry to query. - /// - /// Returns: - /// - tba: The address of the token-bound account associated - /// with the entry. - /// - owner: The address of the entry owner. - /// - data: The note or fact bytes associated with the entry - /// (empty if not a note or fact). - function get( - bytes32 namehash - ) external view returns (address tba, address owner, bytes memory data); - - /// Mints a new namespace entry and creates a token-bound account for - /// it. Must be called by a parent namespace entry token-bound account. - /// - who: The address to own the new namespace entry. - /// - label: The label to mint beneath the calling parent entry. - /// - initialization: Initialization calldata applied to the new - /// minted entry's token-bound account. - /// - erc721Data: ERC-721 data -- passed to comply with - /// `ERC721TokenReceiver.onERC721Received()`. - /// - implementation: The address of the implementation contract for - /// the token-bound account: this will be overriden by the gene if the - /// parent entry has one set. - /// - /// Returns: - /// - tba: The address of the new entry's token-bound account. - function mint( - address who, - bytes calldata label, - bytes calldata initialization, - bytes calldata erc721Data, - address implementation - ) external returns (address tba); - - /// Sets the gene for the calling namespace entry. - /// - _gene: The address of the TBA implementation to set for all - /// children of the calling namespace entry. - function gene(address _gene) external; - - /// Creates a new fact beneath the calling namespace entry. - /// - fact: The fact label to create. Must be prepended with an - /// exclamation mark (!). - /// - data: The data to be stored at the fact. - /// - /// Returns: - /// - facthash: The namehash of the newly created fact. - function fact( - bytes calldata fact, - bytes calldata data - ) external returns (bytes32 facthash); - - /// Creates a new note beneath the calling namespace entry. - /// - note: The note label to create. Must be prepended with a tilde (~). - /// - data: The data to be stored at the note. - /// - /// Returns: - /// - notehash: The namehash of the newly created note. - function note( - bytes calldata note, - bytes calldata data - ) external returns (bytes32 notehash); - - /// Retrieves the token-bound account address of a namespace entry. - /// - entry: The entry namehash (as uint256) for which to get the - /// token-bound account. - /// - /// Returns: - /// - tba: The token-bound account address of the namespace entry. - function tbaOf(uint256 entry) external view returns (address tba); - - function balanceOf(address owner) external view returns (uint256); - - function getApproved(uint256 entry) external view returns (address); - - function isApprovedForAll( - address owner, - address operator - ) external view returns (bool); - - function ownerOf(uint256 entry) external view returns (address); - - function setApprovalForAll(address operator, bool approved) external; - - function approve(address spender, uint256 entry) external; - - function safeTransferFrom(address from, address to, uint256 id) external; - - function safeTransferFrom( - address from, - address to, - uint256 id, - bytes calldata data - ) external; - - function transferFrom(address from, address to, uint256 id) external; - - function supportsInterface(bytes4 interfaceId) external view returns (bool); - - /// Gets the token identifier that owns this token-bound account (TBA). - /// This is a core function of the ERC-6551 standard that returns the - /// identifying information about the NFT that owns this account. - /// The return values are constant and cannot change over time. - /// - /// Returns: - /// - chainId: The EIP-155 chain ID where the owning NFT exists - /// - tokenContract: The contract address of the owning NFT - /// - tokenId: The token ID of the owning NFT - function token() - external - view - returns (uint256 chainId, address tokenContract, uint256 tokenId); - } -} - -// TODO remove -/// A mint log from the hypermap, converted to a 'resolved' format using -/// namespace data saved in the hns-indexer. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Mint { - pub name: String, - pub parent_path: String, -} - -// TODO remove -/// A note log from the hypermap, converted to a 'resolved' format using -/// namespace data saved in the hns-indexer -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Note { - pub note: String, - pub parent_path: String, - pub data: Bytes, -} - -// TODO remove -/// A fact log from the hypermap, converted to a 'resolved' format using -/// namespace data saved in the hns-indexer -#[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Fact { - pub fact: String, - pub parent_path: String, - pub data: Bytes, -} - -// TODO remove -/// Errors that can occur when decoding a log from the hypermap using -/// [`decode_mint_log()`] or [`decode_note_log()`]. -#[derive(Clone, Debug, Deserialize, Serialize)] -pub enum DecodeLogError { - /// The log's topic is not a mint or note event. - UnexpectedTopic(B256), - /// The name is not valid (according to [`valid_name`]). - InvalidName(String), - /// An error occurred while decoding the log. - DecodeError(String), - /// The parent name could not be resolved with `hns-indexer`. - UnresolvedParent(String), -} - -impl fmt::Display for DecodeLogError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - DecodeLogError::UnexpectedTopic(topic) => write!(f, "Unexpected topic: {:?}", topic), - DecodeLogError::InvalidName(name) => write!(f, "Invalid name: {}", name), - DecodeLogError::DecodeError(err) => write!(f, "Decode error: {}", err), - DecodeLogError::UnresolvedParent(parent) => { - write!(f, "Could not resolve parent: {}", parent) - } - } - } -} - -impl Error for DecodeLogError {} - -// TODO remove -/// Canonical function to determine if a hypermap entry is valid. -/// -/// This checks a **single name**, not the full path-name. A full path-name -/// is comprised of valid names separated by `.` -pub fn valid_entry(entry: &str, note: bool, fact: bool) -> bool { - if note && fact { - return false; - } - if note { - valid_note(entry) - } else if fact { - valid_fact(entry) - } else { - valid_name(entry) - } -} +// ... existing code ... -// TODO remove -pub fn valid_name(name: &str) -> bool { - name.is_ascii() - && name.len() >= 1 - && name - .chars() - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') -} - -// TODO remove -pub fn valid_note(note: &str) -> bool { - note.is_ascii() - && note.len() >= 2 - && note.chars().next() == Some('~') - && note - .chars() - .skip(1) - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') -} - -// TODO remove -pub fn valid_fact(fact: &str) -> bool { - fact.is_ascii() - && fact.len() >= 2 - && fact.chars().next() == Some('!') - && fact - .chars() - .skip(1) - .all(|c| c.is_ascii_lowercase() || c.is_ascii_digit() || c == '-') -} - -// TODO remove -/// Produce a namehash from a hypermap name. -pub fn namehash(name: &str) -> String { - let mut node = B256::default(); - - let mut labels: Vec<&str> = name.split('.').collect(); - labels.reverse(); - - for label in labels.iter() { - let l = keccak256(label); - node = keccak256((node, l).abi_encode_packed()); - } - format!("0x{}", hex::encode(node)) -} - -// TODO remove -/// Decode a mint log from the hypermap into a 'resolved' format. -/// -/// Uses [`valid_name()`] to check if the name is valid. -pub fn decode_mint_log(log: &crate::eth::Log) -> Result { - let contract::Note::SIGNATURE_HASH = log.topics()[0] else { - return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); - }; - let decoded = contract::Mint::decode_log_data(log.data(), true) - .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; - let name = String::from_utf8_lossy(&decoded.label).to_string(); - if !valid_name(&name) { - return Err(DecodeLogError::InvalidName(name)); - } - match resolve_parent(log, None) { - Some(parent_path) => Ok(Mint { name, parent_path }), - None => Err(DecodeLogError::UnresolvedParent(name)), - } -} - -// TODO remove -/// Decode a note log from the hypermap into a 'resolved' format. -/// -/// Uses [`valid_name()`] to check if the name is valid. -pub fn decode_note_log(log: &crate::eth::Log) -> Result { - let contract::Note::SIGNATURE_HASH = log.topics()[0] else { - return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); - }; - let decoded = contract::Note::decode_log_data(log.data(), true) - .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; - let note = String::from_utf8_lossy(&decoded.label).to_string(); - if !valid_note(¬e) { - return Err(DecodeLogError::InvalidName(note)); - } - match resolve_parent(log, None) { - Some(parent_path) => Ok(Note { - note, - parent_path, - data: decoded.data, - }), - None => Err(DecodeLogError::UnresolvedParent(note)), - } -} - -// TODO remove -pub fn decode_fact_log(log: &crate::eth::Log) -> Result { - let contract::Fact::SIGNATURE_HASH = log.topics()[0] else { - return Err(DecodeLogError::UnexpectedTopic(log.topics()[0])); - }; - let decoded = contract::Fact::decode_log_data(log.data(), true) - .map_err(|e| DecodeLogError::DecodeError(e.to_string()))?; - let fact = String::from_utf8_lossy(&decoded.label).to_string(); - if !valid_fact(&fact) { - return Err(DecodeLogError::InvalidName(fact)); - } - match resolve_parent(log, None) { - Some(parent_path) => Ok(Fact { - fact, - parent_path, - data: decoded.data, - }), - None => Err(DecodeLogError::UnresolvedParent(fact)), - } -} - -// TODO remove -/// Given a [`crate::eth::Log`] (which must be a log from hypermap), resolve the parent name -/// of the new entry or note. -pub fn resolve_parent(log: &crate::eth::Log, timeout: Option) -> Option { - let parent_hash = log.topics()[1].to_string(); - net::get_name(&parent_hash, log.block_number, timeout) -} - -// TODO remove -/// Given a [`crate::eth::Log`] (which must be a log from hypermap), resolve the full name -/// of the new entry or note. -/// -/// Uses [`valid_name()`] to check if the name is valid. -pub fn resolve_full_name(log: &crate::eth::Log, timeout: Option) -> Option { - let parent_hash = log.topics()[1].to_string(); - let parent_name = net::get_name(&parent_hash, log.block_number, timeout)?; - let log_name = match log.topics()[0] { - contract::Mint::SIGNATURE_HASH => { - let decoded = contract::Mint::decode_log_data(log.data(), true).unwrap(); - decoded.label - } - contract::Note::SIGNATURE_HASH => { - let decoded = contract::Note::decode_log_data(log.data(), true).unwrap(); - decoded.label - } - contract::Fact::SIGNATURE_HASH => { - let decoded = contract::Fact::decode_log_data(log.data(), true).unwrap(); - decoded.label - } - _ => return None, - }; - let name = String::from_utf8_lossy(&log_name); - if !valid_entry( - &name, - log.topics()[0] == contract::Note::SIGNATURE_HASH, - log.topics()[0] == contract::Fact::SIGNATURE_HASH, - ) { - return None; - } - Some(format!("{name}.{parent_name}")) -} - -// TODO remove +/// Apply an ETH log filter to a set of logs (topic/address/block-range only). pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { let mut matched_logs = Vec::new(); @@ -514,10 +70,8 @@ pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { for log in logs.iter() { let mut match_address = filter.address.is_empty(); - if !match_address { - if filter.address.matches(&log.address()) { - match_address = true; - } + if !match_address && filter.address.matches(&log.address()) { + match_address = true; } if !match_address { continue; @@ -534,27 +88,17 @@ pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { continue; } } - } else { - if filter_from_block.is_some() || filter_to_block.is_some() { - continue; - } + } else if filter_from_block.is_some() || filter_to_block.is_some() { + continue; } let mut match_topics = true; - for (i, filter_topic_alternatives) in filter.topics.iter().enumerate() { - if filter_topic_alternatives.is_empty() { + for (i, alts) in filter.topics.iter().enumerate() { + if alts.is_empty() { continue; } - let log_topic = log.topics().get(i); - let mut current_topic_matched = false; - for filter_topic in filter_topic_alternatives.iter() { - if log_topic == Some(filter_topic) { - current_topic_matched = true; - break; - } - } - if !current_topic_matched { + if !alts.iter().any(|t| Some(t) == log_topic) { match_topics = false; break; } @@ -567,7 +111,7 @@ pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { matched_logs } -/// Helper struct for reading binding data. +/// Helper struct for reading binding data and local cacher bootstrap. #[derive(Clone, Debug, Deserialize, Serialize)] pub struct Bindings { pub provider: Provider, @@ -576,10 +120,6 @@ pub struct Bindings { impl Bindings { /// Creates a new Bindings instance with a specified address. - /// - /// # Arguments - /// * `provider` - A reference to the Provider. - /// * `address` - The address of the Bindings contract. pub fn new(provider: Provider, address: Address) -> Self { Self { provider, address } } @@ -595,144 +135,6 @@ impl Bindings { &self.address } - /// Gets an entry from the Bindings by its string-formatted name. - /// - /// # Parameters - /// - `path`: The name-path to get from the Bindings. - /// # Returns - /// A `Result<(Address, Address, Option), EthError>` representing the TBA, owner, - /// and value if the entry exists and is a note. - pub fn get(&self, path: &str) -> Result<(Address, Address, Option), EthError> { - let get_call = getCall { - namehash: FixedBytes::<32>::from_str(&namehash(path)) - .map_err(|_| EthError::InvalidParams)?, - } - .abi_encode(); - - let tx_req = TransactionRequest::default() - .input(TransactionInput::new(get_call.into())) - .to(self.address); - - let res_bytes = self.provider.call(tx_req, None)?; - - let res = getCall::abi_decode_returns(&res_bytes, false) - .map_err(|_| EthError::RpcMalformedResponse)?; - - let note_data = if res.data == Bytes::default() { - None - } else { - Some(res.data) - }; - - Ok((res.tba, res.owner, note_data)) - } - - /// Gets an entry from the Bindings by its hash. - /// - /// # Parameters - /// - `entryhash`: The entry to get from the Bindings. - /// # Returns - /// A `Result<(Address, Address, Option), EthError>` representing the TBA, owner, - /// and value if the entry exists and is a note. - pub fn get_hash(&self, entryhash: &str) -> Result<(Address, Address, Option), EthError> { - let get_call = getCall { - namehash: FixedBytes::<32>::from_str(entryhash).map_err(|_| EthError::InvalidParams)?, - } - .abi_encode(); - - let tx_req = TransactionRequest::default() - .input(TransactionInput::new(get_call.into())) - .to(self.address); - - let res_bytes = self.provider.call(tx_req, None)?; - - let res = getCall::abi_decode_returns(&res_bytes, false) - .map_err(|_| EthError::RpcMalformedResponse)?; - - let note_data = if res.data == Bytes::default() { - None - } else { - Some(res.data) - }; - - Ok((res.tba, res.owner, note_data)) - } - - /// Gets a namehash from an existing TBA address. - /// - /// # Parameters - /// - `tba`: The TBA to get the namehash of. - /// # Returns - /// A `Result` representing the namehash of the TBA. - pub fn get_namehash_from_tba(&self, tba: Address) -> Result { - let token_call = tokenCall {}.abi_encode(); - - let tx_req = TransactionRequest::default() - .input(TransactionInput::new(token_call.into())) - .to(tba); - - let res_bytes = self.provider.call(tx_req, None)?; - - let res = tokenCall::abi_decode_returns(&res_bytes, false) - .map_err(|_| EthError::RpcMalformedResponse)?; - - let namehash: FixedBytes<32> = res.tokenId.into(); - Ok(format!("0x{}", hex::encode(namehash))) - } - - /// Create a filter for all mint events. - pub fn mint_filter(&self) -> crate::eth::Filter { - crate::eth::Filter::new() - .address(self.address) - .event(contract::Mint::SIGNATURE) - } - - /// Create a filter for all note events. - pub fn note_filter(&self) -> crate::eth::Filter { - crate::eth::Filter::new() - .address(self.address) - .event(contract::Note::SIGNATURE) - } - - /// Create a filter for all fact events. - pub fn fact_filter(&self) -> crate::eth::Filter { - crate::eth::Filter::new() - .address(self.address) - .event(contract::Fact::SIGNATURE) - } - - /// Create a filter for a given set of specific notes. This function will - /// hash the note labels and use them as the topic3 filter. - /// - /// Example: - /// ```rust - /// let filter = hypermap.notes_filter(&["~note1", "~note2"]); - /// ``` - pub fn notes_filter(&self, notes: &[&str]) -> crate::eth::Filter { - self.note_filter().topic3( - notes - .into_iter() - .map(|note| keccak256(note)) - .collect::>(), - ) - } - - /// Create a filter for a given set of specific facts. This function will - /// hash the fact labels and use them as the topic3 filter. - /// - /// Example: - /// ```rust - /// let filter = hypermap.facts_filter(&["!fact1", "!fact2"]); - /// ``` - pub fn facts_filter(&self, facts: &[&str]) -> crate::eth::Filter { - self.fact_filter().topic3( - facts - .into_iter() - .map(|fact| keccak256(fact)) - .collect::>(), - ) - } - fn get_bootstrap_log_cache_inner( &self, cacher_request: &CacherRequest, @@ -793,81 +195,75 @@ impl Bindings { }; match serde_json::from_slice::(response_msg.body())? { - CacherResponse::GetLogsByRange(res) => { - match res { - Ok(GetLogsByRangeOkResponse::Latest(block)) => { + CacherResponse::GetLogsByRange(res) => match res { + Ok(GetLogsByRangeOkResponse::Latest(block)) => Ok(Some((block, vec![]))), + Ok(GetLogsByRangeOkResponse::Logs((block, json))) => { + if json.is_empty() || json == "[]" { + print_to_terminal( + 2, + &format!( + "Local cacher returned no log caches for the range from block {}.", + request_from_block_val, + ), + ); return Ok(Some((block, vec![]))); } - Ok(GetLogsByRangeOkResponse::Logs((block, json_string_of_vec_log_cache))) => { - if json_string_of_vec_log_cache.is_empty() - || json_string_of_vec_log_cache == "[]" - { + match serde_json::from_str::>(&json) { + Ok(retrieved_caches) => { + let target_chain_id = chain + .clone() + .unwrap_or_else(|| self.provider.get_chain_id().to_string()); + let mut filtered_caches = vec![]; + + for log_cache in retrieved_caches { + if log_cache.metadata.chain_id == target_chain_id { + let cache_to = + log_cache.metadata.to_block.parse::().unwrap_or(0); + if cache_to >= request_from_block_val { + filtered_caches.push(log_cache); + } else { + print_to_terminal( + 3, + &format!( + "Cache from local cacher ({} to {}) does not meet request_from_block {}", + log_cache.metadata.from_block, + log_cache.metadata.to_block, + request_from_block_val + ), + ); + } + } else { + print_to_terminal( + 1, + &format!( + "LogCache from local cacher has mismatched chain_id (expected {}, got {}). Skipping.", + target_chain_id, log_cache.metadata.chain_id + ), + ); + } + } + print_to_terminal( 2, &format!( - "Local cacher returned no log caches for the range from block {}.", - request_from_block_val, + "Retrieved {} log caches from local binding-cacher.", + filtered_caches.len(), ), ); - return Ok(Some((block, vec![]))); + Ok(Some((block, filtered_caches))) } - match serde_json::from_str::>(&json_string_of_vec_log_cache) { - Ok(retrieved_caches) => { - let target_chain_id = chain - .clone() - .unwrap_or_else(|| self.provider.get_chain_id().to_string()); - let mut filtered_caches = vec![]; - - for log_cache in retrieved_caches { - if log_cache.metadata.chain_id == target_chain_id { - // Further filter: ensure the cache's own from_block isn't completely after what we need, - // and to_block isn't completely before. - let cache_from = log_cache - .metadata - .from_block - .parse::() - .unwrap_or(u64::MAX); - let cache_to = - log_cache.metadata.to_block.parse::().unwrap_or(0); - - if cache_to >= request_from_block_val { - // Cache has some data at or after our request_from_block - filtered_caches.push(log_cache); - } else { - print_to_terminal(3, &format!("Cache from local cacher ({} to {}) does not meet request_from_block {}", - cache_from, cache_to, request_from_block_val)); - } - } else { - print_to_terminal(1,&format!("LogCache from local cacher has mismatched chain_id (expected {}, got {}). Skipping.", - target_chain_id, log_cache.metadata.chain_id)); - } - } - - print_to_terminal( - 2, - &format!( - "Retrieved {} log caches from local binding-cacher.", - filtered_caches.len(), - ), - ); - return Ok(Some((block, filtered_caches))); - } - Err(e) => { - return Err(anyhow::anyhow!( - "Failed to deserialize Vec from local cacher: {:?}. JSON: {:.100}", - e, json_string_of_vec_log_cache - )); - } - } - } - Err(e_str) => { - return Err(anyhow::anyhow!( - "Local cacher reported error for GetLogsByRange: {}", - e_str, - )); + Err(e) => Err(anyhow::anyhow!( + "Failed to deserialize Vec from local cacher: {:?}. JSON: {:.100}", + e, + json + )), } } - } + Err(e_str) => Err(anyhow::anyhow!( + "Local cacher reported error for GetLogsByRange: {}", + e_str, + )), + }, CacherResponse::IsStarting => { print_to_terminal( 2, @@ -878,21 +274,19 @@ impl Bindings { ); if retry_count.is_none() || attempt < retry_count.unwrap() { std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); - return Ok(None); + Ok(None) } else { - return Err(anyhow::anyhow!( + Err(anyhow::anyhow!( "Local binding-cacher is still starting after {retry_count_str} attempts" - )); + )) } } - CacherResponse::Rejected => { - return Err(anyhow::anyhow!("Local binding-cacher rejected our request")); - } - _ => { - return Err(anyhow::anyhow!( - "Unexpected response type from local binding-cacher" - )); - } + CacherResponse::Rejected => Err(anyhow::anyhow!( + "Local binding-cacher rejected our request" + )), + _ => Err(anyhow::anyhow!( + "Unexpected response type from local binding-cacher" + )), } } @@ -902,9 +296,12 @@ impl Bindings { retry_params: Option<(u64, Option)>, chain: Option, ) -> anyhow::Result<(u64, Vec)> { - print_to_terminal(2, - &format!("get_bootstrap_log_cache (using local binding-cacher): from_block={:?}, retry_params={:?}, chain={:?}", - from_block, retry_params, chain) + print_to_terminal( + 2, + &format!( + "get_bootstrap_log_cache (using local binding-cacher): from_block={:?}, retry_params={:?}, chain={:?}", + from_block, retry_params, chain + ), ); let (retry_delay_s, retry_count) = retry_params.ok_or_else(|| { @@ -926,7 +323,7 @@ impl Bindings { let get_logs_by_range_payload = GetLogsByRangeRequest { from_block: request_from_block_val, - to_block: None, // Request all logs from from_block onwards. Cacher will return what it has. + to_block: None, }; let cacher_request = CacherRequest::GetLogsByRange(get_logs_by_range_payload); @@ -991,9 +388,12 @@ impl Bindings { let signature_bytes = hex::decode(signature_hex) .map_err(|e| anyhow::anyhow!("Failed to decode hex signature: {:?}", e))?; - Ok(sign::net_key_verify( + Ok(crate::sign::net_key_verify( hashed_data.to_vec(), - &log_cache.metadata.created_by.parse::()?, + &log_cache + .metadata + .created_by + .parse::()?, signature_bytes, )?) } @@ -1017,41 +417,15 @@ impl Bindings { let request_from_block_val = from_block.unwrap_or(0); for log_cache in log_caches { - // VALIDATION TEMPORARILY SKIPPED - For external reasons, validation is disabled - // and all logs are processed as if validation succeeded (Ok(true) case) - - // match self.validate_log_cache(&log_cache) { - // Ok(true) => { for log in log_cache.logs { if let Some(log_block_number) = log.block_number { if log_block_number >= request_from_block_val { all_valid_logs.push(log); } - } else { - if from_block.is_none() { - all_valid_logs.push(log); - } + } else if from_block.is_none() { + all_valid_logs.push(log); } } - // } - // Ok(false) => { - // print_to_terminal( - // 1, - // &format!("LogCache validation failed for cache created by {}. Discarding {} logs.", - // log_cache.metadata.created_by, - // log_cache.logs.len()) - // ); - // } - // Err(e) => { - // print_to_terminal( - // 1, - // &format!( - // "Error validating LogCache from {}: {:?}. Discarding.", - // log_cache.metadata.created_by, e, - // ), - // ); - // } - // } } all_valid_logs.sort_by(|a, b| { @@ -1101,7 +475,10 @@ impl Bindings { let (block, consolidated_logs) = self.get_bootstrap(from_block, retry_params, chain)?; if consolidated_logs.is_empty() { - print_to_terminal(2,"bootstrap: No logs retrieved after consolidation. Returning empty results for filters."); + print_to_terminal( + 2, + "bootstrap: No logs retrieved after consolidation. Returning empty results for filters.", + ); return Ok((block, filters.iter().map(|_| Vec::new()).collect())); } @@ -1122,6 +499,8 @@ impl Bindings { } } +// ... existing code ... + impl Serialize for ManifestItem { fn serialize(&self, serializer: S) -> Result where @@ -2024,4 +1403,4 @@ impl<'de> Deserialize<'de> for GetLogsByRangeOkResponse { deserializer.deserialize_map(GetLogsByRangeOkResponseVisitor) } -} +} \ No newline at end of file From 491ebbd454f067f1cf706f922a4d8c7ec1a5a430 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 10 Nov 2025 15:15:29 +0000 Subject: [PATCH 11/31] Format Rust code using rustfmt --- src/bindings.rs | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 4d70b46..8931bdb 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -281,9 +281,9 @@ impl Bindings { )) } } - CacherResponse::Rejected => Err(anyhow::anyhow!( - "Local binding-cacher rejected our request" - )), + CacherResponse::Rejected => { + Err(anyhow::anyhow!("Local binding-cacher rejected our request")) + } _ => Err(anyhow::anyhow!( "Unexpected response type from local binding-cacher" )), @@ -390,10 +390,7 @@ impl Bindings { Ok(crate::sign::net_key_verify( hashed_data.to_vec(), - &log_cache - .metadata - .created_by - .parse::()?, + &log_cache.metadata.created_by.parse::()?, signature_bytes, )?) } @@ -1403,4 +1400,4 @@ impl<'de> Deserialize<'de> for GetLogsByRangeOkResponse { deserializer.deserialize_map(GetLogsByRangeOkResponseVisitor) } -} \ No newline at end of file +} From 9d373bdad787b6aefa28e6c24502b1b1912d02bc Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 17:20:45 -0500 Subject: [PATCH 12/31] Initial support for tokenregistry calls --- src/bindings.rs | 386 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 383 insertions(+), 3 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 8931bdb..40edb6a 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1,5 +1,5 @@ use crate::eth::{ - BlockNumberOrTag, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, + BlockNumberOrTag, EthError, Filter as EthFilter, FilterBlockOption, Log as EthLog, Provider, }; use crate::hyperware::process::binding_cacher::{ BindingCacherRequest as CacherRequest, BindingCacherResponse as CacherResponse, @@ -10,7 +10,9 @@ use crate::hyperware::process::binding_cacher::{ }; use crate::{print_to_terminal, Address as BindingAddress, Request}; use alloy::hex; -use alloy_primitives::{keccak256, Address}; +use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; +use alloy_primitives::{keccak256, Address, B256, Bytes, FixedBytes, U256}; +use alloy_sol_types::{SolCall, SolEvent, SolValue}; use serde::{ self, de::{self, MapAccess, Visitor}, @@ -46,7 +48,188 @@ pub struct LogCache { const CACHER_REQUEST_TIMEOUT_S: u64 = 15; -// ... existing code ... +/// Sol structures for TokenRegistry requests/events. +pub mod contract { + use alloy_sol_macro::sol; + + sol! { + struct Bind { + uint256 amount; + uint256 endTime; + } + + error InvalidAdmin(); + error InvalidAmount(uint256 amount, uint256 minRequiredAmount, uint256 maxAmount); + error InvalidDuration(uint256 duration, uint256 minDuration, uint256 maxDuration); + error NoLockExists(); + error LockExpired(uint256 endTime); + error LockNotExpired(uint256 endTime); + error InvalidParam(uint256 param); + error UnsupportedToken(address token); + error ZeroAmount(); + error SourceNotExpired(bytes32 namehash, uint256 endTime); + error ZeroDurationForNewBind(); + error ZeroAmountForNewBind(); + error DefaultDestinationInvalidParams(uint256 amount, uint256 duration); + error InsufficientLockAmount(uint256 currentlyLocked, uint256 requested); + error OnlyGovernanceTokenCanCall(); + error GHyprAlreadySet(); + + event TokensLocked( + address indexed account, + uint256 amount, + uint256 duration, + uint256 balance, + uint256 endTime + ); + + event LockExtended( + address indexed account, + uint256 duration, + uint256 balance, + uint256 endTime + ); + + event TokensWithdrawn( + address indexed user, + uint256 amountWithdrawn, + uint256 remainingAmount, + uint256 endTime + ); + + event BindCreated( + address indexed user, + bytes32 indexed namehash, + uint256 amount, + uint256 endTime + ); + + event BindAmountIncreased( + address indexed user, + bytes32 indexed namehash, + uint256 amount, + uint256 endTime + ); + + event BindDurationExtended( + address indexed user, + bytes32 indexed namehash, + uint256 amount, + uint256 endTime + ); + + event TokensBound( + address indexed user, + bytes32 srcNamehash, + bytes32 dstNamehash, + uint256 amount + ); + + event ExpiredBindReclaimed( + address indexed user, + bytes32 indexed namehash, + uint256 amount + ); + + event Initialized(address indexed hypr, address indexed admin); + + event GHyprSet(address indexed gHypr); + + function initialize(address _hypr, address _admin) external; + + function manageLock(uint256 _amount, uint256 _duration) external; + + function isLockExpired(address _account) external view returns (bool); + + function withdraw() external returns (bool); + + function getLockDetails(address _user) + external + view + returns (uint256 amount, uint256 endTime, uint256 remainingTime); + + function getRegistrationDetails(bytes32 _namehash, address _user) + external + view + returns (uint256 amount, uint256 endTime, uint256 remainingTime); + + function transferRegistration( + bytes32 _srcNamehash, + bytes32 _dstNamehash, + uint256 _maxAmount, + uint256 _duration + ) external; + + function getUserBinds(address _user) external view returns (bytes32[] memory); + + function calculateVotingPower(uint256 _value, uint256 _lockDuration) + external + view + returns (uint256); + + function getMultiplier(address _account, uint256 _timepoint) + external + view + returns (uint256); + + function getUserUnlockStamp(address _account) external view returns (uint256); + + function getUserOrDelegatedUnlockStamp(address _account) external view returns (uint256); + + function updateDelegationMultipliers( + uint256 _unlockTime, + uint256 _movedVotes, + address _sender, + uint256 _senderVotesBefore, + address _dst, + uint256 _dstVotesBefore + ) external; + + function calculateWeightedUnlockStamp( + uint256 _remainingDuration, + uint256 _currentBalance, + uint256 _newLockDuration, + uint256 _newLockAmount + ) external view returns (uint256); + + function calculateNewLockDuration( + uint256 _unlockStamp, + uint256 _remainingDuration, + uint256 _currentBalance, + uint256 _newLockAmount + ) external view returns (uint256); + } +} + +/// Canonical helper used throughout to hash dotted Hypermap-name paths into bytes32. +pub fn namehash(name: &str) -> FixedBytes<32> { + let mut node = B256::ZERO; + let mut labels: Vec<&str> = name.split('.').collect(); + labels.reverse(); + + for label in labels.iter() { + let l = keccak256(label.as_bytes()); + node = keccak256((node, l).abi_encode_packed()); + } + + FixedBytes::from(node) +} + +/// Details returned from `getLockDetails`. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct LockDetails { + pub amount: U256, + pub end_time: U256, + pub remaining_time: U256, +} + +/// Details returned from `getRegistrationDetails`. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct RegistrationDetails { + pub amount: U256, + pub end_time: U256, + pub remaining_time: U256, +} /// Apply an ETH log filter to a set of logs (topic/address/block-range only). pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { @@ -135,6 +318,203 @@ impl Bindings { &self.address } + fn call_view(&self, call: Call) -> Result + where + Call: SolCall, + { + let tx_req = TransactionRequest::default() + .to(self.address) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))); + let res_bytes = self.provider.call(tx_req, None)?; + Call::abi_decode_returns(&res_bytes, false).map_err(|_| EthError::RpcMalformedResponse) + } + + /// Whether a user's lock is expired. + pub fn is_lock_expired(&self, account: Address) -> Result { + self.call_view(contract::isLockExpiredCall { + _account: account, + }) + } + + /// Get the lock details for a user. + pub fn get_lock_details(&self, user: Address) -> Result { + let res = self.call_view(contract::getLockDetailsCall { _user: user })?; + Ok(LockDetails { + amount: res.amount, + end_time: res.endTime, + remaining_time: res.remainingTime, + }) + } + + /// Get registration details by an already hashed name. + pub fn get_registration_details_by_hash( + &self, + namehash: FixedBytes<32>, + user: Address, + ) -> Result { + let res = self.call_view(contract::getRegistrationDetailsCall { + _namehash: namehash, + _user: user, + })?; + Ok(RegistrationDetails { + amount: res.amount, + end_time: res.endTime, + remaining_time: res.remainingTime, + }) + } + + /// Get registration details using a dotted Hypermap label. + pub fn get_registration_details( + &self, + name: &str, + user: Address, + ) -> Result { + self.get_registration_details_by_hash(namehash(name), user) + } + + /// Return all bind namehashes owned by a user. + pub fn get_user_binds(&self, user: Address) -> Result>, EthError> { + self.call_view(contract::getUserBindsCall { _user: user }) + } + + /// Calculate voting power for a balance/duration. + pub fn calculate_voting_power(&self, value: U256, lock_duration: U256) -> Result { + self.call_view(contract::calculateVotingPowerCall { + _value: value, + _lockDuration: lock_duration, + }) + } + + /// Retrieve the multiplier for an account (or supply if account == zero) at a timepoint. + pub fn get_multiplier(&self, account: Address, timepoint: U256) -> Result { + self.call_view(contract::getMultiplierCall { + _account: account, + _timepoint: timepoint, + }) + } + + pub fn get_user_unlock_stamp(&self, account: Address) -> Result { + self.call_view(contract::getUserUnlockStampCall { + _account: account, + }) + } + + pub fn get_user_or_delegated_unlock_stamp(&self, account: Address) -> Result { + self.call_view(contract::getUserOrDelegatedUnlockStampCall { + _account: account, + }) + } + + pub fn calculate_weighted_unlock_stamp( + &self, + remaining_duration: U256, + current_balance: U256, + new_lock_duration: U256, + new_lock_amount: U256, + ) -> Result { + self.call_view(contract::calculateWeightedUnlockStampCall { + _remainingDuration: remaining_duration, + _currentBalance: current_balance, + _newLockDuration: new_lock_duration, + _newLockAmount: new_lock_amount, + }) + } + + pub fn calculate_new_lock_duration( + &self, + unlock_stamp: U256, + remaining_duration: U256, + current_balance: U256, + new_lock_amount: U256, + ) -> Result { + self.call_view(contract::calculateNewLockDurationCall { + _unlockStamp: unlock_stamp, + _remainingDuration: remaining_duration, + _currentBalance: current_balance, + _newLockAmount: new_lock_amount, + }) + } + + /// Filter for `TokensLocked` events. + pub fn tokens_locked_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::TokensLocked::SIGNATURE) + } + + /// Filter for `LockExtended` events. + pub fn lock_extended_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::LockExtended::SIGNATURE) + } + + /// Filter for `TokensWithdrawn` events. + pub fn tokens_withdrawn_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::TokensWithdrawn::SIGNATURE) + } + + /// Filter for `BindCreated` events. + pub fn bind_created_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::BindCreated::SIGNATURE) + } + + /// Filter for `BindAmountIncreased` events. + pub fn bind_amount_increased_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::BindAmountIncreased::SIGNATURE) + } + + /// Filter for `BindDurationExtended` events. + pub fn bind_duration_extended_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::BindDurationExtended::SIGNATURE) + } + + /// Filter for `TokensBound` events. + pub fn tokens_bound_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::TokensBound::SIGNATURE) + } + + /// Filter for `ExpiredBindReclaimed` events. + pub fn expired_bind_reclaimed_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::ExpiredBindReclaimed::SIGNATURE) + } + + /// Filter for `GHyprSet` events. + pub fn ghypr_set_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::GHyprSet::SIGNATURE) + } + + /// Filter for `Initialized` events. + pub fn initialized_filter(&self) -> EthFilter { + EthFilter::new() + .address(self.address) + .event(contract::Initialized::SIGNATURE) + } + + /// Create a `BindCreated` filter scoped to specific namehashes. + pub fn named_bind_filter(&self, namehashes: &[FixedBytes<32>]) -> EthFilter { + self.bind_created_filter().topic2( + namehashes + .iter() + .map(|h| B256::from(*h)) + .collect::>(), + ) + } + fn get_bootstrap_log_cache_inner( &self, cacher_request: &CacherRequest, From 5d53f9b2af99d81151ef54b95d870d522e6a6013 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 17:35:42 -0500 Subject: [PATCH 13/31] Helper structs --- src/bindings.rs | 217 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 217 insertions(+) diff --git a/src/bindings.rs b/src/bindings.rs index 40edb6a..9efad85 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -231,6 +231,223 @@ pub struct RegistrationDetails { pub remaining_time: U256, } +#[derive(Clone, Debug, Deserialize, Serialize)] +pub enum DecodeBindingLogError { + UnexpectedTopic(B256), + MissingTopic(usize), + DecodeError(String), +} + +fn topic_as_address(topic: &B256) -> Address { + let bytes = topic.as_slice(); + Address::from_slice(&bytes[12..32]) +} + +fn expect_topic(log: &EthLog, expected: B256) -> Result<(), DecodeBindingLogError> { + match log.topics().first().copied() { + Some(topic) if topic == expected => Ok(()), + other => Err(DecodeBindingLogError::UnexpectedTopic( + other.unwrap_or_default(), + )), + } +} + +fn topic_at(log: &EthLog, idx: usize) -> Result { + log.topics() + .get(idx) + .copied() + .ok_or(DecodeBindingLogError::MissingTopic(idx)) +} + +fn decode_data(result: Result) -> Result { + result.map_err(|e| DecodeBindingLogError::DecodeError(e.to_string())) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct TokensLockedLog { + pub account: Address, + pub amount: U256, + pub duration: U256, + pub balance: U256, + pub end_time: U256, +} + +pub fn decode_tokens_locked_log(log: &EthLog) -> Result { + expect_topic(log, contract::TokensLocked::SIGNATURE_HASH)?; + let account = topic_as_address(&topic_at(log, 1)?); + let decoded = decode_data(contract::TokensLocked::decode_log_data(log.data(), true))?; + Ok(TokensLockedLog { + account, + amount: decoded.amount, + duration: decoded.duration, + balance: decoded.balance, + end_time: decoded.endTime, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct LockExtendedLog { + pub account: Address, + pub duration: U256, + pub balance: U256, + pub end_time: U256, +} + +pub fn decode_lock_extended_log(log: &EthLog) -> Result { + expect_topic(log, contract::LockExtended::SIGNATURE_HASH)?; + let account = topic_as_address(&topic_at(log, 1)?); + let decoded = decode_data(contract::LockExtended::decode_log_data(log.data(), true))?; + Ok(LockExtendedLog { + account, + duration: decoded.duration, + balance: decoded.balance, + end_time: decoded.endTime, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct TokensWithdrawnLog { + pub user: Address, + pub amount_withdrawn: U256, + pub remaining_amount: U256, + pub end_time: U256, +} + +pub fn decode_tokens_withdrawn_log( + log: &EthLog, +) -> Result { + expect_topic(log, contract::TokensWithdrawn::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let decoded = decode_data(contract::TokensWithdrawn::decode_log_data(log.data(), true))?; + Ok(TokensWithdrawnLog { + user, + amount_withdrawn: decoded.amountWithdrawn, + remaining_amount: decoded.remainingAmount, + end_time: decoded.endTime, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct BindLog { + pub user: Address, + pub namehash: FixedBytes<32>, + pub amount: U256, + pub end_time: U256, +} + +pub fn decode_bind_created_log(log: &EthLog) -> Result { + expect_topic(log, contract::BindCreated::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let namehash = topic_at(log, 2)?; + let decoded = decode_data(contract::BindCreated::decode_log_data(log.data(), true))?; + Ok(BindLog { + user, + namehash, + amount: decoded.amount, + end_time: decoded.endTime, + }) +} + +pub fn decode_bind_amount_increased_log(log: &EthLog) -> Result { + expect_topic(log, contract::BindAmountIncreased::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let namehash = topic_at(log, 2)?; + let decoded = decode_data(contract::BindAmountIncreased::decode_log_data( + log.data(), + true, + ))?; + Ok(BindLog { + user, + namehash, + amount: decoded.amount, + end_time: decoded.endTime, + }) +} + +pub fn decode_bind_duration_extended_log(log: &EthLog) -> Result { + expect_topic(log, contract::BindDurationExtended::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let namehash = topic_at(log, 2)?; + let decoded = decode_data(contract::BindDurationExtended::decode_log_data( + log.data(), + true, + ))?; + Ok(BindLog { + user, + namehash, + amount: decoded.amount, + end_time: decoded.endTime, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct TokensBoundLog { + pub user: Address, + pub src_namehash: FixedBytes<32>, + pub dst_namehash: FixedBytes<32>, + pub amount: U256, +} + +pub fn decode_tokens_bound_log(log: &EthLog) -> Result { + expect_topic(log, contract::TokensBound::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let decoded = decode_data(contract::TokensBound::decode_log_data(log.data(), true))?; + Ok(TokensBoundLog { + user, + src_namehash: decoded.srcNamehash, + dst_namehash: decoded.dstNamehash, + amount: decoded.amount, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct ExpiredBindReclaimedLog { + pub user: Address, + pub namehash: FixedBytes<32>, + pub amount: U256, +} + +pub fn decode_expired_bind_reclaimed_log( + log: &EthLog, +) -> Result { + expect_topic(log, contract::ExpiredBindReclaimed::SIGNATURE_HASH)?; + let user = topic_as_address(&topic_at(log, 1)?); + let namehash = topic_at(log, 2)?; + let decoded = decode_data(contract::ExpiredBindReclaimed::decode_log_data( + log.data(), + true, + ))?; + Ok(ExpiredBindReclaimedLog { + user, + namehash, + amount: decoded.amount, + }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct InitializedLog { + pub hypr: Address, + pub admin: Address, +} + +pub fn decode_initialized_log(log: &EthLog) -> Result { + expect_topic(log, contract::Initialized::SIGNATURE_HASH)?; + let hypr = topic_as_address(&topic_at(log, 1)?); + let admin = topic_as_address(&topic_at(log, 2)?); + Ok(InitializedLog { hypr, admin }) +} + +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct GHyprSetLog { + pub g_hypr: Address, +} + +pub fn decode_ghypr_set_log(log: &EthLog) -> Result { + expect_topic(log, contract::GHyprSet::SIGNATURE_HASH)?; + let g_hypr = topic_as_address(&topic_at(log, 1)?); + Ok(GHyprSetLog { g_hypr }) +} + /// Apply an ETH log filter to a set of logs (topic/address/block-range only). pub fn eth_apply_filter(logs: &[EthLog], filter: &EthFilter) -> Vec { let mut matched_logs = Vec::new(); From 0e41df06a9069d75183727e059289f116a5116c7 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 17:56:56 -0500 Subject: [PATCH 14/31] Support for building transactions for writes --- src/bindings.rs | 83 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/src/bindings.rs b/src/bindings.rs index 9efad85..234b9ae 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -546,6 +546,15 @@ impl Bindings { Call::abi_decode_returns(&res_bytes, false).map_err(|_| EthError::RpcMalformedResponse) } + fn build_tx(&self, call: Call) -> TransactionRequest + where + Call: SolCall, + { + TransactionRequest::default() + .to(self.address) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + /// Whether a user's lock is expired. pub fn is_lock_expired(&self, account: Address) -> Result { self.call_view(contract::isLockExpiredCall { @@ -652,6 +661,80 @@ impl Bindings { }) } + /// Build a transaction for `initialize`. + pub fn build_initialize_tx(&self, hypr: Address, admin: Address) -> TransactionRequest { + self.build_tx(contract::initializeCall { + _hypr: hypr, + _admin: admin, + }) + } + + /// Build a transaction for `manageLock`. + pub fn build_manage_lock_tx(&self, amount: U256, duration: U256) -> TransactionRequest { + self.build_tx(contract::manageLockCall { + _amount: amount, + _duration: duration, + }) + } + + /// Build a transaction for `withdraw`. + pub fn build_withdraw_tx(&self) -> TransactionRequest { + self.build_tx(contract::withdrawCall {}) + } + + /// Build a transaction for `transferRegistration` with pre-hashed names. + pub fn build_transfer_registration_tx( + &self, + src_namehash: FixedBytes<32>, + dst_namehash: FixedBytes<32>, + max_amount: U256, + duration: U256, + ) -> TransactionRequest { + self.build_tx(contract::transferRegistrationCall { + _srcNamehash: src_namehash, + _dstNamehash: dst_namehash, + _maxAmount: max_amount, + _duration: duration, + }) + } + + /// Build a transaction for `transferRegistration` using dotted names. + pub fn build_transfer_registration_by_name_tx( + &self, + src_name: &str, + dst_name: &str, + max_amount: U256, + duration: U256, + ) -> TransactionRequest { + self.build_transfer_registration_tx( + namehash(src_name), + namehash(dst_name), + max_amount, + duration, + ) + } + + /// Build a transaction for `updateDelegationMultipliers`. + #[allow(clippy::too_many_arguments)] + pub fn build_update_delegation_multipliers_tx( + &self, + unlock_time: U256, + moved_votes: U256, + sender: Address, + sender_votes_before: U256, + dst: Address, + dst_votes_before: U256, + ) -> TransactionRequest { + self.build_tx(contract::updateDelegationMultipliersCall { + _unlockTime: unlock_time, + _movedVotes: moved_votes, + _sender: sender, + _senderVotesBefore: sender_votes_before, + _dst: dst, + _dstVotesBefore: dst_votes_before, + }) + } + /// Filter for `TokensLocked` events. pub fn tokens_locked_filter(&self) -> EthFilter { EthFilter::new() From 2e3e47353c0897d2b12401b56e5ab2c459d77583 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 18:03:38 -0500 Subject: [PATCH 15/31] new event filters --- src/bindings.rs | 51 +++++++++++++++---------------------------------- 1 file changed, 15 insertions(+), 36 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 234b9ae..d68d529 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -735,84 +735,63 @@ impl Bindings { }) } + fn event_filter(signature: B256, address: Address) -> EthFilter { + EthFilter::new().address(address).event(signature) + } + /// Filter for `TokensLocked` events. pub fn tokens_locked_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::TokensLocked::SIGNATURE) + Self::event_filter(contract::TokensLocked::SIGNATURE_HASH, self.address) } /// Filter for `LockExtended` events. pub fn lock_extended_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::LockExtended::SIGNATURE) + Self::event_filter(contract::LockExtended::SIGNATURE_HASH, self.address) } /// Filter for `TokensWithdrawn` events. pub fn tokens_withdrawn_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::TokensWithdrawn::SIGNATURE) + Self::event_filter(contract::TokensWithdrawn::SIGNATURE_HASH, self.address) } /// Filter for `BindCreated` events. pub fn bind_created_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::BindCreated::SIGNATURE) + Self::event_filter(contract::BindCreated::SIGNATURE_HASH, self.address) } /// Filter for `BindAmountIncreased` events. pub fn bind_amount_increased_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::BindAmountIncreased::SIGNATURE) + Self::event_filter(contract::BindAmountIncreased::SIGNATURE_HASH, self.address) } /// Filter for `BindDurationExtended` events. pub fn bind_duration_extended_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::BindDurationExtended::SIGNATURE) + Self::event_filter(contract::BindDurationExtended::SIGNATURE_HASH, self.address) } /// Filter for `TokensBound` events. pub fn tokens_bound_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::TokensBound::SIGNATURE) + Self::event_filter(contract::TokensBound::SIGNATURE_HASH, self.address) } /// Filter for `ExpiredBindReclaimed` events. pub fn expired_bind_reclaimed_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::ExpiredBindReclaimed::SIGNATURE) + Self::event_filter(contract::ExpiredBindReclaimed::SIGNATURE_HASH, self.address) } /// Filter for `GHyprSet` events. pub fn ghypr_set_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::GHyprSet::SIGNATURE) + Self::event_filter(contract::GHyprSet::SIGNATURE_HASH, self.address) } /// Filter for `Initialized` events. pub fn initialized_filter(&self) -> EthFilter { - EthFilter::new() - .address(self.address) - .event(contract::Initialized::SIGNATURE) + Self::event_filter(contract::Initialized::SIGNATURE_HASH, self.address) } /// Create a `BindCreated` filter scoped to specific namehashes. pub fn named_bind_filter(&self, namehashes: &[FixedBytes<32>]) -> EthFilter { - self.bind_created_filter().topic2( - namehashes - .iter() - .map(|h| B256::from(*h)) - .collect::>(), - ) + self.bind_created_filter().topic2(namehashes.iter().map(B256::from).collect::>()) } fn get_bootstrap_log_cache_inner( From 5717ea750e38db0391adeff7f540a570cab943b1 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 10 Nov 2025 18:18:46 -0500 Subject: [PATCH 16/31] tiny change in doc --- src/bindings.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bindings.rs b/src/bindings.rs index d68d529..78911e9 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -201,7 +201,7 @@ pub mod contract { } } -/// Canonical helper used throughout to hash dotted Hypermap-name paths into bytes32. +/// Canonical helper used throughout to hash dotted Hypermap paths into bytes32. pub fn namehash(name: &str) -> FixedBytes<32> { let mut node = B256::ZERO; let mut labels: Vec<&str> = name.split('.').collect(); From 8c4a42795999ea2e69a2ccd666e580a99b2ec857 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Wed, 12 Nov 2025 16:32:01 -0500 Subject: [PATCH 17/31] additional support functions --- src/bindings.rs | 147 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 118 insertions(+), 29 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index 78911e9..d0abb1e 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -26,7 +26,7 @@ use std::str::FromStr; #[cfg(not(feature = "simulation-mode"))] pub const BINDINGS_ADDRESS: &'static str = "0x0000000000e8d224B902632757d5dbc51a451456"; #[cfg(feature = "simulation-mode")] -pub const BINDINGS_ADDRESS: &'static str = "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318"; +pub const BINDINGS_ADDRESS: &'static str = "0x2279B7A0a67DB372996a5FaB50D91eAA73d2eBe6"; #[cfg(not(feature = "simulation-mode"))] pub const BINDINGS_CHAIN_ID: u64 = 8453; // base #[cfg(feature = "simulation-mode")] @@ -198,6 +198,19 @@ pub mod contract { uint256 _currentBalance, uint256 _newLockAmount ) external view returns (uint256); + + function hypr() external view returns (address); + } +} + +mod erc20 { + use alloy_sol_macro::sol; + + sol! { + interface IERC20 { + function balanceOf(address account) external view returns (uint256); + function allowance(address owner, address spender) external view returns (uint256); + } } } @@ -536,11 +549,18 @@ impl Bindings { } fn call_view(&self, call: Call) -> Result + where + Call: SolCall, + { + self.call_view_at(self.address, call) + } + + fn call_view_at(&self, target: Address, call: Call) -> Result where Call: SolCall, { let tx_req = TransactionRequest::default() - .to(self.address) + .to(target) .input(TransactionInput::new(Bytes::from(call.abi_encode()))); let res_bytes = self.provider.call(tx_req, None)?; Call::abi_decode_returns(&res_bytes, false).map_err(|_| EthError::RpcMalformedResponse) @@ -557,9 +577,10 @@ impl Bindings { /// Whether a user's lock is expired. pub fn is_lock_expired(&self, account: Address) -> Result { - self.call_view(contract::isLockExpiredCall { + let res = self.call_view(contract::isLockExpiredCall { _account: account, - }) + })?; + Ok(res._0) } /// Get the lock details for a user. @@ -600,35 +621,40 @@ impl Bindings { /// Return all bind namehashes owned by a user. pub fn get_user_binds(&self, user: Address) -> Result>, EthError> { - self.call_view(contract::getUserBindsCall { _user: user }) + let res = self.call_view(contract::getUserBindsCall { _user: user })?; + Ok(res._0) } /// Calculate voting power for a balance/duration. pub fn calculate_voting_power(&self, value: U256, lock_duration: U256) -> Result { - self.call_view(contract::calculateVotingPowerCall { + let res = self.call_view(contract::calculateVotingPowerCall { _value: value, _lockDuration: lock_duration, - }) + })?; + Ok(res._0) } /// Retrieve the multiplier for an account (or supply if account == zero) at a timepoint. pub fn get_multiplier(&self, account: Address, timepoint: U256) -> Result { - self.call_view(contract::getMultiplierCall { + let res = self.call_view(contract::getMultiplierCall { _account: account, _timepoint: timepoint, - }) + })?; + Ok(res._0) } pub fn get_user_unlock_stamp(&self, account: Address) -> Result { - self.call_view(contract::getUserUnlockStampCall { + let res = self.call_view(contract::getUserUnlockStampCall { _account: account, - }) + })?; + Ok(res._0) } pub fn get_user_or_delegated_unlock_stamp(&self, account: Address) -> Result { - self.call_view(contract::getUserOrDelegatedUnlockStampCall { + let res = self.call_view(contract::getUserOrDelegatedUnlockStampCall { _account: account, - }) + })?; + Ok(res._0) } pub fn calculate_weighted_unlock_stamp( @@ -638,12 +664,13 @@ impl Bindings { new_lock_duration: U256, new_lock_amount: U256, ) -> Result { - self.call_view(contract::calculateWeightedUnlockStampCall { + let res = self.call_view(contract::calculateWeightedUnlockStampCall { _remainingDuration: remaining_duration, _currentBalance: current_balance, _newLockDuration: new_lock_duration, _newLockAmount: new_lock_amount, - }) + })?; + Ok(res._0) } pub fn calculate_new_lock_duration( @@ -653,12 +680,39 @@ impl Bindings { current_balance: U256, new_lock_amount: U256, ) -> Result { - self.call_view(contract::calculateNewLockDurationCall { + let res = self.call_view(contract::calculateNewLockDurationCall { _unlockStamp: unlock_stamp, _remainingDuration: remaining_duration, _currentBalance: current_balance, _newLockAmount: new_lock_amount, - }) + })?; + Ok(res._0) + } + + /// Returns the HYPR token address backing the registry. + pub fn get_hypr_address(&self) -> Result { + let res = self.call_view(contract::hyprCall {})?; + Ok(res._0) + } + + /// Returns the HYPR ERC20 balance for a given account. + pub fn get_hypr_balance(&self, account: Address) -> Result { + let hypr_address = self.get_hypr_address()?; + let res = self.call_view_at(hypr_address, erc20::IERC20::balanceOfCall { account })?; + Ok(res._0) + } + + /// Returns the HYPR ERC20 allowance granted to the TokenRegistry for an account. + pub fn get_hypr_allowance(&self, owner: Address) -> Result { + let hypr_address = self.get_hypr_address()?; + let res = self.call_view_at( + hypr_address, + erc20::IERC20::allowanceCall { + owner, + spender: self.address, + }, + )?; + Ok(res._0) } /// Build a transaction for `initialize`. @@ -735,63 +789,64 @@ impl Bindings { }) } - fn event_filter(signature: B256, address: Address) -> EthFilter { + fn event_filter(signature: &str, address: Address) -> EthFilter { EthFilter::new().address(address).event(signature) } /// Filter for `TokensLocked` events. pub fn tokens_locked_filter(&self) -> EthFilter { - Self::event_filter(contract::TokensLocked::SIGNATURE_HASH, self.address) + Self::event_filter(contract::TokensLocked::SIGNATURE, self.address) } /// Filter for `LockExtended` events. pub fn lock_extended_filter(&self) -> EthFilter { - Self::event_filter(contract::LockExtended::SIGNATURE_HASH, self.address) + Self::event_filter(contract::LockExtended::SIGNATURE, self.address) } /// Filter for `TokensWithdrawn` events. pub fn tokens_withdrawn_filter(&self) -> EthFilter { - Self::event_filter(contract::TokensWithdrawn::SIGNATURE_HASH, self.address) + Self::event_filter(contract::TokensWithdrawn::SIGNATURE, self.address) } /// Filter for `BindCreated` events. pub fn bind_created_filter(&self) -> EthFilter { - Self::event_filter(contract::BindCreated::SIGNATURE_HASH, self.address) + Self::event_filter(contract::BindCreated::SIGNATURE, self.address) } /// Filter for `BindAmountIncreased` events. pub fn bind_amount_increased_filter(&self) -> EthFilter { - Self::event_filter(contract::BindAmountIncreased::SIGNATURE_HASH, self.address) + Self::event_filter(contract::BindAmountIncreased::SIGNATURE, self.address) } /// Filter for `BindDurationExtended` events. pub fn bind_duration_extended_filter(&self) -> EthFilter { - Self::event_filter(contract::BindDurationExtended::SIGNATURE_HASH, self.address) + Self::event_filter(contract::BindDurationExtended::SIGNATURE, self.address) } /// Filter for `TokensBound` events. pub fn tokens_bound_filter(&self) -> EthFilter { - Self::event_filter(contract::TokensBound::SIGNATURE_HASH, self.address) + Self::event_filter(contract::TokensBound::SIGNATURE, self.address) } /// Filter for `ExpiredBindReclaimed` events. pub fn expired_bind_reclaimed_filter(&self) -> EthFilter { - Self::event_filter(contract::ExpiredBindReclaimed::SIGNATURE_HASH, self.address) + Self::event_filter(contract::ExpiredBindReclaimed::SIGNATURE, self.address) } /// Filter for `GHyprSet` events. pub fn ghypr_set_filter(&self) -> EthFilter { - Self::event_filter(contract::GHyprSet::SIGNATURE_HASH, self.address) + Self::event_filter(contract::GHyprSet::SIGNATURE, self.address) } /// Filter for `Initialized` events. pub fn initialized_filter(&self) -> EthFilter { - Self::event_filter(contract::Initialized::SIGNATURE_HASH, self.address) + Self::event_filter(contract::Initialized::SIGNATURE, self.address) } /// Create a `BindCreated` filter scoped to specific namehashes. pub fn named_bind_filter(&self, namehashes: &[FixedBytes<32>]) -> EthFilter { - self.bind_created_filter().topic2(namehashes.iter().map(B256::from).collect::>()) + self.bind_created_filter() + .topic2(namehashes.iter().map(|h| B256::from(*h)).collect::>()) } fn get_bootstrap_log_cache_inner( @@ -1023,6 +1078,7 @@ impl Bindings { )) } + #[cfg(not(feature = "hyperapp"))] pub fn validate_log_cache(&self, log_cache: &LogCache) -> anyhow::Result { let from_block = log_cache.metadata.from_block.parse::().map_err(|_| { anyhow::anyhow!( @@ -1054,6 +1110,39 @@ impl Bindings { )?) } + #[cfg(feature = "hyperapp")] + pub async fn validate_log_cache(&self, log_cache: &LogCache) -> anyhow::Result { + let from_block = log_cache.metadata.from_block.parse::().map_err(|_| { + anyhow::anyhow!( + "Invalid from_block in metadata: {}", + log_cache.metadata.from_block + ) + })?; + let to_block = log_cache.metadata.to_block.parse::().map_err(|_| { + anyhow::anyhow!( + "Invalid to_block in metadata: {}", + log_cache.metadata.to_block + ) + })?; + + let mut bytes_to_verify = serde_json::to_vec(&log_cache.logs) + .map_err(|e| anyhow::anyhow!("Failed to serialize logs for validation: {:?}", e))?; + bytes_to_verify.extend_from_slice(&from_block.to_be_bytes()); + bytes_to_verify.extend_from_slice(&to_block.to_be_bytes()); + let hashed_data = keccak256(&bytes_to_verify); + + let signature_hex = log_cache.metadata.signature.trim_start_matches("0x"); + let signature_bytes = hex::decode(signature_hex) + .map_err(|e| anyhow::anyhow!("Failed to decode hex signature: {:?}", e))?; + + Ok(crate::sign::net_key_verify( + hashed_data.to_vec(), + &log_cache.metadata.created_by.parse::()?, + signature_bytes, + ) + .await?) + } + pub fn get_bootstrap( &self, from_block: Option, From 009fb7bee15f7c9708fe7ed3ee25bfa1f00e922c Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Fri, 14 Nov 2025 10:33:42 -0500 Subject: [PATCH 18/31] Questionable changes to process_lib? --- src/hyperapp.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/hyperapp.rs b/src/hyperapp.rs index c2059bd..04222e4 100644 --- a/src/hyperapp.rs +++ b/src/hyperapp.rs @@ -14,7 +14,7 @@ use crate::{ logging::{error, info}, set_state, timer, Address, BuildError, LazyLoadBlob, Message, Request, SendError, }; -use futures_channel::{mpsc, oneshot}; +use futures_channel::oneshot; use futures_util::task::{waker_ref, ArcWake}; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -495,14 +495,13 @@ where pub fn setup_server( ui_config: Option<&HttpBindingConfig>, - ui_path: Option, endpoints: &[Binding], ) -> http::server::HttpServer { let mut server = http::server::HttpServer::new(5); if let Some(ui) = ui_config { if let Err(e) = server.serve_ui( - &ui_path.unwrap_or_else(|| "ui".to_string()), + "ui", vec!["/"], ui.clone(), ) { From 77e5014ddd0baf53f69c0a7f21b1100f9d20e2c8 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Fri, 14 Nov 2025 15:34:09 +0000 Subject: [PATCH 19/31] Format Rust code using rustfmt --- src/bindings.rs | 29 ++++++++++++++++------------- src/hyperapp.rs | 6 +----- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index d0abb1e..a9bcb9e 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -11,7 +11,7 @@ use crate::hyperware::process::binding_cacher::{ use crate::{print_to_terminal, Address as BindingAddress, Request}; use alloy::hex; use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; -use alloy_primitives::{keccak256, Address, B256, Bytes, FixedBytes, U256}; +use alloy_primitives::{keccak256, Address, Bytes, FixedBytes, B256, U256}; use alloy_sol_types::{SolCall, SolEvent, SolValue}; use serde::{ self, @@ -577,9 +577,7 @@ impl Bindings { /// Whether a user's lock is expired. pub fn is_lock_expired(&self, account: Address) -> Result { - let res = self.call_view(contract::isLockExpiredCall { - _account: account, - })?; + let res = self.call_view(contract::isLockExpiredCall { _account: account })?; Ok(res._0) } @@ -626,7 +624,11 @@ impl Bindings { } /// Calculate voting power for a balance/duration. - pub fn calculate_voting_power(&self, value: U256, lock_duration: U256) -> Result { + pub fn calculate_voting_power( + &self, + value: U256, + lock_duration: U256, + ) -> Result { let res = self.call_view(contract::calculateVotingPowerCall { _value: value, _lockDuration: lock_duration, @@ -644,16 +646,13 @@ impl Bindings { } pub fn get_user_unlock_stamp(&self, account: Address) -> Result { - let res = self.call_view(contract::getUserUnlockStampCall { - _account: account, - })?; + let res = self.call_view(contract::getUserUnlockStampCall { _account: account })?; Ok(res._0) } pub fn get_user_or_delegated_unlock_stamp(&self, account: Address) -> Result { - let res = self.call_view(contract::getUserOrDelegatedUnlockStampCall { - _account: account, - })?; + let res = + self.call_view(contract::getUserOrDelegatedUnlockStampCall { _account: account })?; Ok(res._0) } @@ -845,8 +844,12 @@ impl Bindings { /// Create a `BindCreated` filter scoped to specific namehashes. pub fn named_bind_filter(&self, namehashes: &[FixedBytes<32>]) -> EthFilter { - self.bind_created_filter() - .topic2(namehashes.iter().map(|h| B256::from(*h)).collect::>()) + self.bind_created_filter().topic2( + namehashes + .iter() + .map(|h| B256::from(*h)) + .collect::>(), + ) } fn get_bootstrap_log_cache_inner( diff --git a/src/hyperapp.rs b/src/hyperapp.rs index 04222e4..e70ec09 100644 --- a/src/hyperapp.rs +++ b/src/hyperapp.rs @@ -500,11 +500,7 @@ pub fn setup_server( let mut server = http::server::HttpServer::new(5); if let Some(ui) = ui_config { - if let Err(e) = server.serve_ui( - "ui", - vec!["/"], - ui.clone(), - ) { + if let Err(e) = server.serve_ui("ui", vec!["/"], ui.clone()) { panic!("failed to serve UI: {e}. Make sure that a ui folder is in /pkg"); } } From bd5b6b0a78429a2f36d9a546d168a9ae4222c617 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Tue, 18 Nov 2025 11:30:19 -0500 Subject: [PATCH 20/31] new duration preview function --- src/bindings.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/bindings.rs b/src/bindings.rs index a9bcb9e..db4ef16 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1247,6 +1247,29 @@ impl Bindings { } } +/// Preview the combined lock amount and weighted duration when additional HYPR is added to a lock. +/// +/// This uses the same weighted-average technique as the TokenRegistry: the resulting duration is +/// the sum of each lock's `amount * duration`, divided by the combined amount. If the total amount +/// is zero, the combined duration is also zero. +pub fn preview_combined_lock( + existing_amount: U256, + existing_duration: U256, + additional_amount: U256, + additional_duration: U256, +) -> (U256, U256) { + let total_amount = existing_amount + additional_amount; + if total_amount.is_zero() { + return (U256::ZERO, U256::ZERO); + } + + let existing_weighted = existing_amount.saturating_mul(existing_duration); + let additional_weighted = additional_amount.saturating_mul(additional_duration); + let combined_duration = (existing_weighted + additional_weighted) / total_amount; + + (total_amount, combined_duration) +} + // ... existing code ... impl Serialize for ManifestItem { From a46ad34d387cfb2250450aa427d0b9fce99fce71 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Tue, 18 Nov 2025 17:04:54 -0500 Subject: [PATCH 21/31] working our way backwards computation --- src/bindings.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/bindings.rs b/src/bindings.rs index db4ef16..ce30564 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1270,6 +1270,30 @@ pub fn preview_combined_lock( (total_amount, combined_duration) } +/// Given a desired weighted duration, compute the required additional lock duration. +/// +/// This inverts the weighted-average equation used by the TokenRegistry so callers can +/// determine which duration to supply to `manageLock` in order to reach a target lock end. +/// Returns `None` if the additional amount is zero or if the math underflows. +pub fn required_additional_duration( + existing_amount: U256, + existing_duration: U256, + additional_amount: U256, + desired_weighted_duration: U256, +) -> Option { + if additional_amount.is_zero() { + return None; + } + let total_amount = existing_amount + additional_amount; + let desired_total_weighted = desired_weighted_duration.saturating_mul(total_amount); + if desired_total_weighted < existing_amount.saturating_mul(existing_duration) { + return None; + } + let numerator = + desired_total_weighted - existing_amount.saturating_mul(existing_duration); + Some(numerator / additional_amount) +} + // ... existing code ... impl Serialize for ManifestItem { From 48b6715e6c663b74b58e56815fc74139e9c8c202 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 18 Nov 2025 22:05:20 +0000 Subject: [PATCH 22/31] Format Rust code using rustfmt --- src/bindings.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/bindings.rs b/src/bindings.rs index ce30564..6d0581e 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -1289,8 +1289,7 @@ pub fn required_additional_duration( if desired_total_weighted < existing_amount.saturating_mul(existing_duration) { return None; } - let numerator = - desired_total_weighted - existing_amount.saturating_mul(existing_duration); + let numerator = desired_total_weighted - existing_amount.saturating_mul(existing_duration); Some(numerator / additional_amount) } From 26565b9c7c53fd6b66845c15ca14caa9d801d821 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Fri, 21 Nov 2025 10:01:05 -0500 Subject: [PATCH 23/31] removing obsolete comment --- src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index fa4bdae..b62a036 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,7 +32,6 @@ wit_bindgen::generate!({ /// Interact with the tokenregistry contract data pub mod bindings; -/// Currently nothing in here except for the contract address. /// Interact with the eth provider module. pub mod eth; From 966445669bfb97b1c20fc9040e4ddca944af3699 Mon Sep 17 00:00:00 2001 From: hosted-fornet Date: Wed, 26 Nov 2025 21:49:17 -0800 Subject: [PATCH 24/31] bump version to 2.2.1 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index fac64fc..b8e183c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "hyperware_process_lib" authors = ["Sybil Technologies AG"] -version = "2.2.0" +version = "2.2.1" edition = "2021" description = "A library for writing Hyperware processes in Rust." homepage = "https://hyperware.ai" From b40a832637be01a8da056dafe76d891d51e65a41 Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Mon, 1 Dec 2025 09:23:18 -0500 Subject: [PATCH 25/31] Add DAO support --- src/dao.rs | 347 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 2 + 2 files changed, 349 insertions(+) create mode 100644 src/dao.rs diff --git a/src/dao.rs b/src/dao.rs new file mode 100644 index 0000000..75e465b --- /dev/null +++ b/src/dao.rs @@ -0,0 +1,347 @@ +use crate::eth::{BlockNumberOrTag, EthError, Filter as EthFilter, Provider}; +use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; +use alloy_primitives::{Address, Bytes, FixedBytes, U256, B256, keccak256}; +use alloy_sol_macro::sol; +use alloy_sol_types::{SolCall, SolEvent}; + +sol! { + /// Minimal TimelockController interface. + #[allow(non_camel_case_types)] + contract TimelockController { + function getMinDelay() external view returns (uint256); + function hasRole(bytes32 role, address account) external view returns (bool); + function PROPOSER_ROLE() external view returns (bytes32); + function EXECUTOR_ROLE() external view returns (bytes32); + function CANCELLER_ROLE() external view returns (bytes32); + function schedule( + address target, + uint256 value, + bytes data, + bytes32 predecessor, + bytes32 salt, + uint256 delay + ) external; + function execute( + address target, + uint256 value, + bytes data, + bytes32 predecessor, + bytes32 salt + ) external payable; + function cancel(bytes32 id) external; + function hashOperation( + address target, + uint256 value, + bytes data, + bytes32 predecessor, + bytes32 salt + ) external view returns (bytes32); + } + + /// Minimal Governor interface. + #[allow(non_camel_case_types)] + contract HyperwareGovernor { + function propose( + address[] targets, + uint256[] values, + bytes[] calldatas, + string description + ) external returns (uint256); + function hashProposal( + address[] targets, + uint256[] values, + bytes[] calldatas, + bytes32 descriptionHash + ) external view returns (uint256); + function state(uint256 proposalId) external view returns (uint8); + function proposalSnapshot(uint256 proposalId) external view returns (uint256); + function proposalDeadline(uint256 proposalId) external view returns (uint256); + function castVoteWithReason(uint256 proposalId, uint8 support, string reason) external returns (uint256); + + /// Standard OZ ProposalCreated event layout + event ProposalCreated( + uint256 proposalId, + address proposer, + address[] targets, + uint256[] values, + string[] signatures, + bytes[] calldatas, + uint256 startBlock, + uint256 endBlock, + string description + ); + } +} + +/// Convenience wrapper for Timelock/Governor interactions. +#[derive(Clone, Debug)] +pub struct DaoContracts { + pub provider: Provider, + pub timelock: Address, + pub governor: Address, +} + +impl DaoContracts { + pub fn new(provider: Provider, timelock: Address, governor: Address) -> Self { + Self { + provider, + timelock, + governor, + } + } + + fn call_view(&self, target: Address, call: Call) -> Result + where + Call: SolCall, + { + let tx_req = TransactionRequest::default() + .to(target) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))); + let res_bytes = self.provider.call(tx_req, None)?; + Call::abi_decode_returns(&res_bytes, false).map_err(|_| EthError::RpcMalformedResponse) + } + + /// Return the timelock's minimum delay. + pub fn timelock_delay(&self) -> Result { + let res = self.call_view(self.timelock, TimelockController::getMinDelayCall {})?; + Ok(res._0) + } + + /// Fetch role IDs from the timelock. + pub fn roles(&self) -> Result<(FixedBytes<32>, FixedBytes<32>, FixedBytes<32>), EthError> { + let proposer = self.call_view(self.timelock, TimelockController::PROPOSER_ROLECall {})?._0; + let executor = self.call_view(self.timelock, TimelockController::EXECUTOR_ROLECall {})?._0; + let canceller = self.call_view(self.timelock, TimelockController::CANCELLER_ROLECall {})?._0; + Ok((proposer, executor, canceller)) + } + + /// Check if an account has a specific timelock role. + pub fn has_role(&self, role: FixedBytes<32>, account: Address) -> Result { + let res = self.call_view( + self.timelock, + TimelockController::hasRoleCall { + role, + account, + }, + )?; + Ok(res._0) + } + + /// Build a schedule tx for a single operation. + pub fn build_schedule_tx( + &self, + target: Address, + value: U256, + data: Bytes, + predecessor: FixedBytes<32>, + salt: FixedBytes<32>, + delay: U256, + ) -> TransactionRequest { + let call = TimelockController::scheduleCall { + target, + value, + data, + predecessor, + salt, + delay, + }; + TransactionRequest::default() + .to(self.timelock) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + + /// Build an execute tx for a scheduled operation. + pub fn build_execute_tx( + &self, + target: Address, + value: U256, + data: Bytes, + predecessor: FixedBytes<32>, + salt: FixedBytes<32>, + ) -> TransactionRequest { + let call = TimelockController::executeCall { + target, + value, + data, + predecessor, + salt, + }; + TransactionRequest::default() + .to(self.timelock) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + + /// Build a cancel tx for an operation id (hashOperation output). + pub fn build_cancel_tx(&self, operation_id: FixedBytes<32>) -> TransactionRequest { + let call = TimelockController::cancelCall { id: operation_id }; + TransactionRequest::default() + .to(self.timelock) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + + /// Build a propose tx on the governor. + pub fn build_propose_tx( + &self, + targets: Vec
, + values: Vec, + calldatas: Vec, + description: String, + ) -> TransactionRequest { + let call = HyperwareGovernor::proposeCall { + targets, + values, + calldatas, + description, + }; + TransactionRequest::default() + .to(self.governor) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + + /// Compute the proposal id off-chain using the governor's hashProposal view. + /// (OZ proposalId = keccak256(abi.encode(targets, values, calldatas, descriptionHash))). + pub fn hash_proposal( + &self, + targets: Vec
, + values: Vec, + calldatas: Vec, + description: &str, + ) -> Result { + let description_hash = keccak256(description.as_bytes()); + let res = self.call_view( + self.governor, + HyperwareGovernor::hashProposalCall { + targets, + values, + calldatas, + descriptionHash: description_hash, + }, + )?; + Ok(res._0) + } + + /// Build a castVoteWithReason tx (support: 0=Against,1=For,2=Abstain in OZ Governor). + pub fn build_vote_tx( + &self, + proposal_id: U256, + support: u8, + reason: String, + ) -> TransactionRequest { + let call = HyperwareGovernor::castVoteWithReasonCall { + proposalId: proposal_id, + support, + reason, + }; + TransactionRequest::default() + .to(self.governor) + .input(TransactionInput::new(Bytes::from(call.abi_encode()))) + } + + /// Governor state (OZ enum: 0 Pending, 1 Active, 2 Canceled, 3 Defeated, 4 Succeeded, 5 Queued, 6 Expired, 7 Executed). + pub fn proposal_state(&self, proposal_id: U256) -> Result { + let res = self.call_view( + self.governor, + HyperwareGovernor::stateCall { + proposalId: proposal_id, + }, + )?; + Ok(res._0) + } + + /// Proposal snapshot block. + pub fn proposal_snapshot(&self, proposal_id: U256) -> Result { + let res = self.call_view( + self.governor, + HyperwareGovernor::proposalSnapshotCall { + proposalId: proposal_id, + }, + )?; + Ok(res._0) + } + + /// Proposal deadline block. + pub fn proposal_deadline(&self, proposal_id: U256) -> Result { + let res = self.call_view( + self.governor, + HyperwareGovernor::proposalDeadlineCall { + proposalId: proposal_id, + }, + )?; + Ok(res._0) + } + + /// Fetch ProposalCreated events within a block range. + pub fn fetch_proposals_created( + &self, + from_block: Option, + to_block: Option, + ) -> Result, EthError> { + let topic0 = HyperwareGovernor::ProposalCreated::SIGNATURE_HASH; + let mut filter = EthFilter::new() + .address(self.governor) + .event_signature(B256::from(topic0)); + if let Some(fb) = from_block { + filter = filter.from_block(fb); + } + if let Some(tb) = to_block { + filter = filter.to_block(tb); + } + let logs = self.provider.get_logs(&filter)?; + let mut out = Vec::new(); + for log in logs { + let prim_log = log.inner.clone(); + if let Ok(decoded) = + HyperwareGovernor::ProposalCreated::decode_log(&prim_log, true) + { + out.push(ProposalCreatedEvent { + proposal_id: decoded.proposalId, + proposer: decoded.proposer, + targets: decoded.targets.clone(), + values: decoded.values.clone(), + signatures: decoded.signatures.clone(), + calldatas: decoded.calldatas.clone(), + start_block: decoded.startBlock, + end_block: decoded.endBlock, + description: decoded.description.clone(), + }); + } + } + Ok(out) + } + + /// Hash a timelock operation (matches timelock.hashOperation). + pub fn hash_operation( + &self, + target: Address, + value: U256, + data: Bytes, + predecessor: FixedBytes<32>, + salt: FixedBytes<32>, + ) -> Result, EthError> { + let res = self.call_view( + self.timelock, + TimelockController::hashOperationCall { + target, + value, + data, + predecessor, + salt, + }, + )?; + Ok(res._0) + } +} + +/// Parsed ProposalCreated event. +#[derive(Clone, Debug)] +pub struct ProposalCreatedEvent { + pub proposal_id: U256, + pub proposer: Address, + pub targets: Vec
, + pub values: Vec, + pub signatures: Vec, + pub calldatas: Vec, + pub start_block: U256, + pub end_block: U256, + pub description: String, +} diff --git a/src/lib.rs b/src/lib.rs index b62a036..f294f2c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,6 +32,8 @@ wit_bindgen::generate!({ /// Interact with the tokenregistry contract data pub mod bindings; +/// Interact with DAO (Timelock / Governor) contracts +pub mod dao; /// Interact with the eth provider module. pub mod eth; From 1c463afb75f478bcd6fb8706e73287f42151c106 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 14:24:53 +0000 Subject: [PATCH 26/31] Format Rust code using rustfmt --- src/dao.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/dao.rs b/src/dao.rs index 75e465b..daac360 100644 --- a/src/dao.rs +++ b/src/dao.rs @@ -1,6 +1,6 @@ use crate::eth::{BlockNumberOrTag, EthError, Filter as EthFilter, Provider}; use alloy::rpc::types::request::{TransactionInput, TransactionRequest}; -use alloy_primitives::{Address, Bytes, FixedBytes, U256, B256, keccak256}; +use alloy_primitives::{keccak256, Address, Bytes, FixedBytes, B256, U256}; use alloy_sol_macro::sol; use alloy_sol_types::{SolCall, SolEvent}; @@ -109,9 +109,15 @@ impl DaoContracts { /// Fetch role IDs from the timelock. pub fn roles(&self) -> Result<(FixedBytes<32>, FixedBytes<32>, FixedBytes<32>), EthError> { - let proposer = self.call_view(self.timelock, TimelockController::PROPOSER_ROLECall {})?._0; - let executor = self.call_view(self.timelock, TimelockController::EXECUTOR_ROLECall {})?._0; - let canceller = self.call_view(self.timelock, TimelockController::CANCELLER_ROLECall {})?._0; + let proposer = self + .call_view(self.timelock, TimelockController::PROPOSER_ROLECall {})? + ._0; + let executor = self + .call_view(self.timelock, TimelockController::EXECUTOR_ROLECall {})? + ._0; + let canceller = self + .call_view(self.timelock, TimelockController::CANCELLER_ROLECall {})? + ._0; Ok((proposer, executor, canceller)) } @@ -119,10 +125,7 @@ impl DaoContracts { pub fn has_role(&self, role: FixedBytes<32>, account: Address) -> Result { let res = self.call_view( self.timelock, - TimelockController::hasRoleCall { - role, - account, - }, + TimelockController::hasRoleCall { role, account }, )?; Ok(res._0) } @@ -290,9 +293,7 @@ impl DaoContracts { let mut out = Vec::new(); for log in logs { let prim_log = log.inner.clone(); - if let Ok(decoded) = - HyperwareGovernor::ProposalCreated::decode_log(&prim_log, true) - { + if let Ok(decoded) = HyperwareGovernor::ProposalCreated::decode_log(&prim_log, true) { out.push(ProposalCreatedEvent { proposal_id: decoded.proposalId, proposer: decoded.proposer, From edb778774171c0ef78d064f3080a450043fbd01a Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Tue, 2 Dec 2025 08:59:14 -0500 Subject: [PATCH 27/31] Commented the imported tokenregistry sol --- src/bindings.rs | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/bindings.rs b/src/bindings.rs index 6d0581e..14fcbcb 100644 --- a/src/bindings.rs +++ b/src/bindings.rs @@ -135,24 +135,39 @@ pub mod contract { event GHyprSet(address indexed gHypr); + /// Initializes the TokenRegistry with HYPR token and admin. + /// Reverts InvalidAdmin if admin is zero; UnsupportedToken if hypr is zero. function initialize(address _hypr, address _admin) external; + /// Locks tokens or modifies an existing lock. + /// Emits TokensLocked/LockExtended. Reverts on zero amount, expired lock, + /// invalid amount, or invalid duration. function manageLock(uint256 _amount, uint256 _duration) external; + /// Returns true if the user's lock has expired. function isLockExpired(address _account) external view returns (bool); + /// Withdraws unlocked tokens, consolidating bindings first. + /// Emits TokensBound/TokensWithdrawn. May require multiple calls if many bindings. function withdraw() external returns (bool); + /// Retrieves lock details for a user. function getLockDetails(address _user) external view returns (uint256 amount, uint256 endTime, uint256 remainingTime); + /// Retrieves registration details for a user/namehash. function getRegistrationDetails(bytes32 _namehash, address _user) external view returns (uint256 amount, uint256 endTime, uint256 remainingTime); + /// Transfers tokens between registrations for the caller. + /// Source must be expired or default. Emits TokensBound/BindCreated/ + /// BindAmountIncreased/BindDurationExtended. Reverts on invalid duration, + /// invalid params for default dest, expired lock, unexpired source, or zero + /// amount/duration for new binds. function transferRegistration( bytes32 _srcNamehash, bytes32 _dstNamehash, @@ -160,22 +175,29 @@ pub mod contract { uint256 _duration ) external; + /// Returns all binding namehashes for a user. function getUserBinds(address _user) external view returns (bytes32[] memory); + /// Calculates sublinear voting power for a balance/duration. function calculateVotingPower(uint256 _value, uint256 _lockDuration) external view returns (uint256); + /// Gets the multiplier for an account (or total supply if zero) at a timepoint. function getMultiplier(address _account, uint256 _timepoint) external view returns (uint256); + /// Gets the user's unlock timestamp. function getUserUnlockStamp(address _account) external view returns (uint256); + /// Gets user's unlock or delegated unlock timestamp, whichever is later. function getUserOrDelegatedUnlockStamp(address _account) external view returns (uint256); + /// Updates voting multipliers when delegation changes. + /// Only callable by governance token; reverts otherwise. function updateDelegationMultipliers( uint256 _unlockTime, uint256 _movedVotes, @@ -185,6 +207,7 @@ pub mod contract { uint256 _dstVotesBefore ) external; + /// Calculates weighted unlock timestamp for locks. function calculateWeightedUnlockStamp( uint256 _remainingDuration, uint256 _currentBalance, @@ -192,6 +215,8 @@ pub mod contract { uint256 _newLockAmount ) external view returns (uint256); + /// Calculates required new lock duration to hit a desired unlock stamp. + /// Reverts InvalidParam if unlockStamp is in the past or newLockAmount is zero. function calculateNewLockDuration( uint256 _unlockStamp, uint256 _remainingDuration, From 4e915218b382e4b3f7f93cddeec30415f02a504b Mon Sep 17 00:00:00 2001 From: Johnathan Reale Date: Tue, 2 Dec 2025 10:55:41 -0500 Subject: [PATCH 28/31] new wait_for_process_ready fn --- src/hyperapp.rs | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) diff --git a/src/hyperapp.rs b/src/hyperapp.rs index 13a3693..d6f63d0 100644 --- a/src/hyperapp.rs +++ b/src/hyperapp.rs @@ -594,6 +594,88 @@ pub fn pretty_print_send_error(error: &SendError) { ); } +/// Classification for readiness polling of another process. +#[derive(Debug, PartialEq, Eq)] +pub enum WaitClassification { + /// The target responded but indicated it is still starting up. + Starting, + /// The target is ready (or responded with a payload we consider ready). + Ready, + /// The target responded with an unknown payload. + Unknown, +} + +/// Poll a target process until it reports ready. +/// +/// - `target`: process address to poll (e.g., hypermap-cacher). +/// - `request_body`: request payload to send each attempt. +/// - `timeout_s`: per-request timeout in seconds. +/// - `retry_delay_s`: delay between attempts when not ready or on error. +/// - `classify`: function to classify the response body. +/// - `treat_unknown_as_ready`: if true, any non-starting response is treated as ready. +pub fn wait_for_process_ready( + target: Address, + request_body: Vec, + timeout_s: u64, + retry_delay_s: u64, + mut classify: F, + treat_unknown_as_ready: bool, +) where + F: FnMut(&[u8]) -> WaitClassification, +{ + let mut attempt = 1; + loop { + match Request::to(target.clone()) + .body(request_body.clone()) + .send_and_await_response(timeout_s) + { + Ok(Ok(response)) => { + let classification = classify(response.body()); + match classification { + WaitClassification::Starting => { + info!( + "Target {} still starting (attempt {}), retrying in {}s", + target, attempt, retry_delay_s + ); + } + WaitClassification::Ready => { + info!("Target {} ready after {} attempt(s)", target, attempt); + break; + } + WaitClassification::Unknown => { + if treat_unknown_as_ready { + info!( + "Target {} responded with unknown payload, proceeding as ready", + target + ); + break; + } else { + info!( + "Target {} responded with unknown payload, retrying in {}s", + target, retry_delay_s + ); + } + } + } + } + Ok(Err(e)) => { + info!( + "Error response from {} (attempt {}): {:?}, retrying in {}s", + target, attempt, e, retry_delay_s + ); + } + Err(e) => { + info!( + "Failed to contact {} (attempt {}): {:?}, retrying in {}s", + target, attempt, e, retry_delay_s + ); + } + } + attempt += 1; + std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); + } +} + // For demonstration, we'll define them all in one place. // Make sure the signatures match the real function signatures you require! pub fn no_init_fn(_state: &mut S) { From eb26ea5233ca9351ce948f6fb8d006346149159b Mon Sep 17 00:00:00 2001 From: hosted-fornet Date: Tue, 2 Dec 2025 15:05:19 -0800 Subject: [PATCH 29/31] move wait_for_process_ready --- src/hyperapp.rs | 82 ------------------------------------------------- src/lib.rs | 82 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 82 deletions(-) diff --git a/src/hyperapp.rs b/src/hyperapp.rs index d6f63d0..13a3693 100644 --- a/src/hyperapp.rs +++ b/src/hyperapp.rs @@ -594,88 +594,6 @@ pub fn pretty_print_send_error(error: &SendError) { ); } -/// Classification for readiness polling of another process. -#[derive(Debug, PartialEq, Eq)] -pub enum WaitClassification { - /// The target responded but indicated it is still starting up. - Starting, - /// The target is ready (or responded with a payload we consider ready). - Ready, - /// The target responded with an unknown payload. - Unknown, -} - -/// Poll a target process until it reports ready. -/// -/// - `target`: process address to poll (e.g., hypermap-cacher). -/// - `request_body`: request payload to send each attempt. -/// - `timeout_s`: per-request timeout in seconds. -/// - `retry_delay_s`: delay between attempts when not ready or on error. -/// - `classify`: function to classify the response body. -/// - `treat_unknown_as_ready`: if true, any non-starting response is treated as ready. -pub fn wait_for_process_ready( - target: Address, - request_body: Vec, - timeout_s: u64, - retry_delay_s: u64, - mut classify: F, - treat_unknown_as_ready: bool, -) where - F: FnMut(&[u8]) -> WaitClassification, -{ - let mut attempt = 1; - loop { - match Request::to(target.clone()) - .body(request_body.clone()) - .send_and_await_response(timeout_s) - { - Ok(Ok(response)) => { - let classification = classify(response.body()); - match classification { - WaitClassification::Starting => { - info!( - "Target {} still starting (attempt {}), retrying in {}s", - target, attempt, retry_delay_s - ); - } - WaitClassification::Ready => { - info!("Target {} ready after {} attempt(s)", target, attempt); - break; - } - WaitClassification::Unknown => { - if treat_unknown_as_ready { - info!( - "Target {} responded with unknown payload, proceeding as ready", - target - ); - break; - } else { - info!( - "Target {} responded with unknown payload, retrying in {}s", - target, retry_delay_s - ); - } - } - } - } - Ok(Err(e)) => { - info!( - "Error response from {} (attempt {}): {:?}, retrying in {}s", - target, attempt, e, retry_delay_s - ); - } - Err(e) => { - info!( - "Failed to contact {} (attempt {}): {:?}, retrying in {}s", - target, attempt, e, retry_delay_s - ); - } - } - attempt += 1; - std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); - } -} - // For demonstration, we'll define them all in one place. // Make sure the signatures match the real function signatures you require! pub fn no_init_fn(_state: &mut S) { diff --git a/src/lib.rs b/src/lib.rs index f294f2c..859c2cb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -364,6 +364,88 @@ pub fn eval_our(address: &Address) -> Address { address } +/// Classification for readiness polling of another process. +#[derive(Debug, PartialEq, Eq)] +pub enum WaitClassification { + /// The target responded but indicated it is still starting up. + Starting, + /// The target is ready (or responded with a payload we consider ready). + Ready, + /// The target responded with an unknown payload. + Unknown, +} + +/// Poll a target process until it reports ready. +/// +/// - `target`: process address to poll (e.g., hypermap-cacher). +/// - `request_body`: request payload to send each attempt. +/// - `timeout_s`: per-request timeout in seconds. +/// - `retry_delay_s`: delay between attempts when not ready or on error. +/// - `classify`: function to classify the response body. +/// - `treat_unknown_as_ready`: if true, any non-starting response is treated as ready. +pub fn wait_for_process_ready( + target: Address, + request_body: Vec, + timeout_s: u64, + retry_delay_s: u64, + mut classify: F, + treat_unknown_as_ready: bool, +) where + F: FnMut(&[u8]) -> WaitClassification, +{ + let mut attempt = 1; + loop { + match Request::to(target.clone()) + .body(request_body.clone()) + .send_and_await_response(timeout_s) + { + Ok(Ok(response)) => { + let classification = classify(response.body()); + match classification { + WaitClassification::Starting => { + info!( + "Target {} still starting (attempt {}), retrying in {}s", + target, attempt, retry_delay_s + ); + } + WaitClassification::Ready => { + info!("Target {} ready after {} attempt(s)", target, attempt); + break; + } + WaitClassification::Unknown => { + if treat_unknown_as_ready { + info!( + "Target {} responded with unknown payload, proceeding as ready", + target + ); + break; + } else { + info!( + "Target {} responded with unknown payload, retrying in {}s", + target, retry_delay_s + ); + } + } + } + } + Ok(Err(e)) => { + info!( + "Error response from {} (attempt {}): {:?}, retrying in {}s", + target, attempt, e, retry_delay_s + ); + } + Err(e) => { + info!( + "Failed to contact {} (attempt {}): {:?}, retrying in {}s", + target, attempt, e, retry_delay_s + ); + } + } + attempt += 1; + std::thread::sleep(std::time::Duration::from_secs(retry_delay_s)); + } +} + /// The `Spawn!()` macro is defined here as a no-op. /// However, in practice, `kit build` will rewrite it during pre-processing. /// From bbe9f579903512547a6b6a5c72005c5bebc40aab Mon Sep 17 00:00:00 2001 From: hosted-fornet Date: Tue, 2 Dec 2025 15:27:27 -0800 Subject: [PATCH 30/31] fix logging (not always available -> use print_to_terminal) and add `max_attempts` option --- Cargo.lock | 2 +- src/lib.rs | 59 +++++++++++++++++++++++++++++++++++++++--------------- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ceba34b..2bde1f4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1617,7 +1617,7 @@ dependencies = [ [[package]] name = "hyperware_process_lib" -version = "2.2.0" +version = "2.2.1" dependencies = [ "alloy", "alloy-primitives", diff --git a/src/lib.rs b/src/lib.rs index 859c2cb..763d5a9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -383,6 +383,7 @@ pub enum WaitClassification { /// - `retry_delay_s`: delay between attempts when not ready or on error. /// - `classify`: function to classify the response body. /// - `treat_unknown_as_ready`: if true, any non-starting response is treated as ready. +/// - `max_attempts`: number of attempts before continuing without a ready response. pub fn wait_for_process_ready( target: Address, request_body: Vec, @@ -390,11 +391,19 @@ pub fn wait_for_process_ready( retry_delay_s: u64, mut classify: F, treat_unknown_as_ready: bool, + max_attempts: Option, ) where F: FnMut(&[u8]) -> WaitClassification, { let mut attempt = 1; loop { + let mut fail_message_suffix = format!(", retrying in {retry_delay_s}s"); + if let Some(ma) = max_attempts { + if attempt >= ma { + fail_message_suffix = ", abandoning waiting and proceeding as if ready".to_string() + } + } + match Request::to(target.clone()) .body(request_body.clone()) .send_and_await_response(timeout_s) @@ -403,41 +412,59 @@ pub fn wait_for_process_ready( let classification = classify(response.body()); match classification { WaitClassification::Starting => { - info!( - "Target {} still starting (attempt {}), retrying in {}s", - target, attempt, retry_delay_s + crate::print_to_terminal( + 2, + &format!( + "Target {} still starting (attempt {}){}", + target, attempt, fail_message_suffix + ), ); } WaitClassification::Ready => { - info!("Target {} ready after {} attempt(s)", target, attempt); + crate::print_to_terminal( + 2, + &format!("Target {} ready after {} attempt(s)", target, attempt), + ); break; } WaitClassification::Unknown => { if treat_unknown_as_ready { - info!( - "Target {} responded with unknown payload, proceeding as ready", - target + crate::print_to_terminal( + 2, + &format!( + "Target {} responded with unknown payload, proceeding as ready", + target + ), ); break; } else { - info!( - "Target {} responded with unknown payload, retrying in {}s", - target, retry_delay_s + crate::print_to_terminal( + 2, + &format!( + "Target {} responded with unknown payload{}", + target, fail_message_suffix + ), ); } } } } Ok(Err(e)) => { - info!( - "Error response from {} (attempt {}): {:?}, retrying in {}s", - target, attempt, e, retry_delay_s + crate::print_to_terminal( + 2, + &format!( + "Error response from {} (attempt {}): {:?}{}", + target, attempt, e, fail_message_suffix + ), ); } Err(e) => { - info!( - "Failed to contact {} (attempt {}): {:?}, retrying in {}s", - target, attempt, e, retry_delay_s + crate::print_to_terminal( + 2, + &format!( + "Failed to contact {} (attempt {}): {:?}{}", + target, attempt, e, fail_message_suffix + ), ); } } From f5a7adc56fb0407fab6d0ecd575552b2ec55046c Mon Sep 17 00:00:00 2001 From: hosted-fornet Date: Tue, 2 Dec 2025 15:34:48 -0800 Subject: [PATCH 31/31] update docs comment --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 763d5a9..d7102df 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -375,7 +375,7 @@ pub enum WaitClassification { Unknown, } -/// Poll a target process until it reports ready. +/// Poll a target process until it reports ready while blocking. /// /// - `target`: process address to poll (e.g., hypermap-cacher). /// - `request_body`: request payload to send each attempt.