diff --git a/Cargo.toml b/Cargo.toml index 757a43a..ce52aad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,13 +1,16 @@ -[package] -name = "spdk" -version = "0.1.0" -edition = "2021" +[workspace] +members = [ + "spdk-core", + "backend-blindbit-native", + "backend-blindbit-wasm", +] +resolver = "2" -[lib] -name = "spdk" -crate-type = ["lib", "staticlib", "cdylib"] +[workspace.dependencies] +# Internal workspace crates +spdk-core = { path = "spdk-core", version = "0.1.0" } -[dependencies] +# Core dependencies - shared across crates silentpayments = "0.4" anyhow = "1.0" serde = { version = "1.0.188", features = ["derive"] } @@ -15,11 +18,13 @@ serde_json = "1.0.107" bitcoin = { version = "0.31.1", features = ["serde", "rand", "base64"] } rayon = "1.10.0" futures = "0.3" -log = "0.4" +futures-util = "0.3.31" async-trait = "0.1" -reqwest = { version = "0.12.4", features = ["rustls-tls", "gzip", "json"], default-features = false, optional = true } -hex = { version = "0.4.3", features = ["serde"], optional = true } +log = "0.4" +hex = { version = "0.4.3", features = ["serde"] } bdk_coin_select = "0.4.0" +tokio = { version = "1.48.0", features = ["rt"], default-features = false } +reqwest = { version = "0.12.26", features = ["json", "gzip"], default-features = false } -[features] -blindbit-backend = ["reqwest", "hex"] +[workspace.package] +repository = "https://github.com/cygnet3/spdk" diff --git a/backend-blindbit-native/Cargo.toml b/backend-blindbit-native/Cargo.toml new file mode 100644 index 0000000..3c13216 --- /dev/null +++ b/backend-blindbit-native/Cargo.toml @@ -0,0 +1,60 @@ +[package] +name = "backend-blindbit-native" +version = "0.1.0" +edition = "2021" +repository.workspace = true + +[dependencies] +# Core client dependency +spdk-core.workspace = true + +# URL parsing (minimal: ~20KB) +url = "2.5" + +# Minimal HTTP client (optional, ~200KB + TLS adds ~600KB) +# TLS support via rustls is required for HTTPS connections to blindbit +ureq = { version = "2.10", default-features = false, features = ["json", "tls"], optional = true } + +# Logging +log.workspace = true + +# Re-export some core types for convenience +bitcoin.workspace = true +anyhow.workspace = true +silentpayments.workspace = true +serde.workspace = true +serde_json.workspace = true +hex.workspace = true + +# The HttpClient trait is async, so async-trait is always required +# Same futures is always required +async-trait.workspace = true +futures.workspace = true + +# Other async dependencies (optional) +futures-util = { workspace = true, optional = true } +tokio = { workspace = true, optional = true } +reqwest = { workspace = true, optional = true } + +[features] +# Default: both sync and async backends available, no bundled HTTP client +default = ["sync", "async"] + +# Enable sync backend (BlindbitBackend) - wraps async HTTP calls with block_on +# Note: Sequential processing only, no concurrency +sync = [] + +# Enable async backend (AsyncBlindbitBackend) - truly async with concurrent requests +# Recommended for production use with high throughput requirements +async = ["sync", "spdk-core/async", "dep:futures-util"] + +# Minimal HTTP client implementation using ureq (blocking, wrapped in async trait) +# Enables sync backend automatically since it provides the dependencies +# Good choice: ureq is blocking by nature, minimal overhead with sync backend +ureq-client = ["sync", "dep:ureq"] + +# Async HTTP client implementation using reqwest (truly async) +# Enables sync backend automatically since it provides the dependencies +# Warning: Using reqwest with sync backend is inefficient - pulls in tokio runtime +# but blocks on every request. Prefer ureq-client for sync or enable async feature. +reqwest-client = ["sync", "dep:tokio", "dep:reqwest"] diff --git a/backend-blindbit-native/README.md b/backend-blindbit-native/README.md new file mode 100644 index 0000000..0173bf4 --- /dev/null +++ b/backend-blindbit-native/README.md @@ -0,0 +1,241 @@ +# backend-blindbit-native + +Native Rust backend implementation for SPDK (Silent Payment Development Kit) that connects to Blindbit indexing servers. + +## Quick Start + +### For Sync/Blocking Applications +```toml +backend-blindbit-native = { version = "0.1.0", default-features = false, features = ["ureq-client"] } +``` + +```rust +use backend_blindbit_native::{BlindbitBackend, UreqClient}; + +let client = UreqClient::new(); +let backend = BlindbitBackend::new("https://blindbit.io".to_string(), client)?; +let height = backend.block_height()?; // Simple blocking call +``` + +**Why:** Minimal binary size (~800KB), truly blocking HTTP with no async overhead. + +### For Async Applications +```toml +backend-blindbit-native = { version = "0.1.0", default-features = false, features = ["async", "reqwest-client"] } +``` + +```rust +use backend_blindbit_native::{AsyncBlindbitBackend, ReqwestClient}; + +#[tokio::main] +async fn main() -> Result<()> { + let client = ReqwestClient::new(); + let backend = AsyncBlindbitBackend::new("https://blindbit.io".to_string(), client)?; + let height = backend.block_height().await?; // Async with concurrency + Ok(()) +} +``` + +**Why:** High performance with 200+ concurrent requests, full async/tokio benefits. + +### Bring Your Own HTTP Client +```toml +backend-blindbit-native = { version = "0.1.0", default-features = false, features = ["sync"] } +``` + +Then implement the `HttpClient` trait with your preferred HTTP library (see [Custom HTTP Client](#custom-http-client)). + +## Feature Flags + +### Core Dependencies +- **`async-trait`**: Always included - the `HttpClient` trait is async by design + +### Backend Features + +#### `sync` - Synchronous Backend +- **What it does:** Enables `BlindbitBackend` - wraps async HTTP calls with `block_on` +- **When to use:** Simple blocking API, command-line tools, scripting +- **Performance:** Sequential processing, no concurrency +- **Dependencies:** `futures` (for `block_on`) + +#### `async` - Asynchronous Backend +- **What it does:** Enables `AsyncBlindbitBackend` - truly async with concurrent requests +- **When to use:** Web servers, high-throughput applications, need for concurrency +- **Performance:** 200+ concurrent block requests, optimal throughput +- **Dependencies:** `futures`, `futures-util`, `spdk-core/async` +- **Note:** Also enables `sync` as a dependency + +### HTTP Client Features + +#### `ureq-client` - Blocking HTTP Client +- **What it does:** Bundles `UreqClient` (ureq-based HTTP implementation) +- **Binary size:** ~800KB (minimal footprint) +- **Best with:** `sync` backend (or alone - automatically enables sync) +- **Architecture:** Truly blocking I/O wrapped in async trait +- **Use case:** CLI tools, simple applications, minimal dependencies + +#### `reqwest-client` - Async HTTP Client +- **What it does:** Bundles `ReqwestClient` (reqwest/hyper-based HTTP implementation) +- **Binary size:** ~2MB+ (full-featured) +- **Best with:** `async` backend +- **Architecture:** True async HTTP with connection pooling, built on tokio +- **Use case:** High-performance async applications, web services + +## Performance Considerations + +### ✅ Efficient Combinations + +| Features | Backend | HTTP Client | Binary Size | Use Case | +|----------|---------|-------------|-------------|----------| +| `ureq-client` | Sync | ureq (blocking) | ~800KB | CLI, scripts, simple apps | +| `async,ureq-client` | Both | ureq (blocking) | ~1MB | Need both APIs, minimal size | +| `async,reqwest-client` | Both | reqwest (async) | ~2.5MB | High-performance async | + +### ⚠️ Valid But Inefficient + +| Features | Why Inefficient | Better Alternative | +|----------|-----------------|-------------------| +| `sync,reqwest-client` or `reqwest-client` alone | Pulls in tokio runtime but blocks on every request. No concurrency. | Use `ureq-client` for sync, or add `async` feature | + +**The Problem:** +```rust +// With sync backend + reqwest +let backend = BlindbitBackend::new(url, ReqwestClient::new())?; + +// Internally does this for EVERY call: +block_on(async { + tokio_spawn(async { reqwest.get().await }).await // Start runtime, immediately block +}) +// Result: ~2MB binary, zero async benefit, wasted overhead +``` + +## Advanced Usage + +### Custom HTTP Client + +Implement the `HttpClient` trait with any HTTP library (hyper, isahc, surf, platform-specific APIs, etc.): + +```rust +use async_trait::async_trait; +use backend_blindbit_native::HttpClient; +use anyhow::Result; + +#[derive(Clone)] +struct MyCustomClient { + // Your HTTP client here +} + +#[async_trait] +impl HttpClient for MyCustomClient { + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { + // Implement with your preferred library + todo!() + } + + async fn post_json(&self, url: &str, json_body: &str) -> Result { + // Implement POST + todo!() + } +} +``` + +Then use with either backend: + +```rust +// Sync +let backend = BlindbitBackend::new(url, MyCustomClient::new())?; + +// Async +let backend = AsyncBlindbitBackend::new(url, MyCustomClient::new())?; +``` + +### Platform-Specific HTTP + +Use native HTTP on each platform: + +```rust +#[cfg(target_arch = "wasm32")] +type MyClient = FetchClient; // Browser fetch API + +#[cfg(target_os = "ios")] +type MyClient = NSURLSessionClient; // NSURLSession + +#[cfg(not(any(target_arch = "wasm32", target_os = "ios")))] +type MyClient = ReqwestClient; // Standard async +``` + +## Default Configuration + +```toml +backend-blindbit-native = "0.1.0" +``` + +Enables: `sync` + `async` (both backends available) + +**Use when:** You want both API styles available and will provide your own HTTP client. Good for libraries that want to expose both sync and async APIs to their users. + +## Testing Feature Flags + +This crate includes a comprehensive test script that validates all 13 feature flag combinations compile successfully. + +### Running Tests Locally + +```bash +cd backend-blindbit-native +./test-features.sh # With colored output +./test-features.sh --ci # CI-friendly (no colors) +``` + +### Tested Combinations + +The script tests **13 feature flag combinations**: + +**Minimal Builds (3 tests):** +1. No features - just the `HttpClient` trait +2. `sync` - Sync backend only (bring your own HTTP client) +3. `async` - Async backend only (bring your own HTTP client) + +**Single HTTP Client (2 tests):** +4. `ureq-client` - Sync backend + ureq +5. `reqwest-client` - Sync backend + reqwest + +**Backend + Client Combinations (4 tests):** +6. `sync,ureq-client` - Explicit sync + ureq +7. `sync,reqwest-client` - Explicit sync + reqwest +8. `async,ureq-client` - Async + ureq +9. `async,reqwest-client` - Async + reqwest + +**Multiple Clients (3 tests):** +10. `sync,ureq-client,reqwest-client` - Both clients, sync backend +11. `async,ureq-client,reqwest-client` - Both clients, async backend +12. `sync,async,ureq-client,reqwest-client` - Everything enabled +13. Default features (`sync,async`) + +### CI Integration + +**GitHub Actions:** +```yaml +- name: Test all feature combinations + run: | + cd backend-blindbit-native + ./test-features.sh --ci +``` + +**GitLab CI:** +```yaml +test-features: + script: + - cd backend-blindbit-native + - ./test-features.sh --ci +``` + +**Exit Codes:** +- `0` - All tests passed ✓ +- `1` - One or more tests failed ✗ + +**Note:** The test script validates compilation, not performance characteristics. See [Performance Considerations](#performance-considerations) for efficiency guidance. + +## License + +See the workspace root for license information. + diff --git a/backend-blindbit-native/src/backend.rs b/backend-blindbit-native/src/backend.rs new file mode 100644 index 0000000..8786e42 --- /dev/null +++ b/backend-blindbit-native/src/backend.rs @@ -0,0 +1,96 @@ +use std::ops::RangeInclusive; + +use bitcoin::{absolute::Height, Amount}; +use futures::executor::block_on; + +use anyhow::Result; + +use crate::client::{BlindbitClient, HttpClient}; +use spdk_core::{BlockData, BlockDataIterator, ChainBackend, SpentIndexData, UtxoData}; + +/// Synchronous Blindbit backend implementation, generic over the HTTP client. +/// +/// This uses `block_on` to convert async HTTP calls to synchronous operations. +/// Requests are processed sequentially with no concurrency. +/// +/// # Performance Note +/// +/// For better performance, consider using `AsyncBlindbitBackend` with the `async` feature, +/// which supports 200+ concurrent requests. +/// +/// # HTTP Client Recommendation +/// +/// - **Efficient:** Use with `UreqClient` (blocking HTTP, minimal overhead) +/// - **Inefficient:** Using with `ReqwestClient` pulls in tokio runtime but blocks every request +/// +/// Consumers can also implement their own `HttpClient` trait. +pub struct BlindbitBackend { + client: BlindbitClient, +} + +impl BlindbitBackend { + /// Create a new synchronous Blindbit backend with a custom HTTP client. + /// + /// # Arguments + /// * `blindbit_url` - Base URL of the Blindbit server + /// * `http_client` - HTTP client implementation + pub fn new(blindbit_url: String, http_client: H) -> Result { + Ok(Self { + client: BlindbitClient::new(blindbit_url, http_client)?, + }) + } +} + +impl ChainBackend for BlindbitBackend { + /// High-level function to get block data for a range of blocks. + /// Block data includes all the information needed to determine if a block is relevant for scanning, + /// but does not include utxos, or spent index. + /// These need to be fetched separately afterwards, if it is determined this block is relevant. + fn get_block_data_for_range( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> BlockDataIterator { + let client = self.client.clone(); + + // Convert range to iterator that fetches block data synchronously + let iter = range.map(move |n| { + let client = client.clone(); + block_on(async move { + let blkheight = Height::from_consensus(n)?; + let tweaks = match with_cutthrough { + true => client.tweaks(blkheight, dust_limit).await?, + false => client.tweak_index(blkheight, dust_limit).await?, + }; + let new_utxo_filter = client.filter_new_utxos(blkheight).await?; + let spent_filter = client.filter_spent(blkheight).await?; + let blkhash = new_utxo_filter.block_hash; + Ok(BlockData { + blkheight, + blkhash, + tweaks, + new_utxo_filter: new_utxo_filter.into(), + spent_filter: spent_filter.into(), + }) + }) + }); + + Box::new(iter) + } + + fn spent_index(&self, block_height: Height) -> Result { + block_on(self.client.spent_index(block_height)).map(Into::into) + } + + fn utxos(&self, block_height: Height) -> Result> { + Ok(block_on(self.client.utxos(block_height))? + .into_iter() + .map(Into::into) + .collect()) + } + + fn block_height(&self) -> Result { + block_on(self.client.block_height()) + } +} diff --git a/backend-blindbit-native/src/backend_async.rs b/backend-blindbit-native/src/backend_async.rs new file mode 100644 index 0000000..8eb61ca --- /dev/null +++ b/backend-blindbit-native/src/backend_async.rs @@ -0,0 +1,128 @@ +use std::{ops::RangeInclusive, pin::Pin, sync::Arc}; + +use bitcoin::{absolute::Height, Amount}; +use futures::{stream, Stream, StreamExt}; + +use anyhow::Result; + +use crate::client::{BlindbitClient, HttpClient}; +use spdk_core::{AsyncChainBackend, BlockData, BlockDataStream, SpentIndexData, UtxoData}; + +const CONCURRENT_FILTER_REQUESTS: usize = 200; + +/// Asynchronous Blindbit backend implementation, generic over the HTTP client. +/// +/// This provides high-performance async methods with concurrent request handling. +/// Enable with the `async` feature flag. +/// +/// Consumers must provide their own HTTP client implementation by implementing the `HttpClient` trait. +pub struct AsyncBlindbitBackend { + client: BlindbitClient, +} + +impl AsyncBlindbitBackend { + /// Create a new async Blindbit backend with a custom HTTP client. + /// + /// # Arguments + /// * `blindbit_url` - Base URL of the Blindbit server + /// * `http_client` - HTTP client implementation + pub fn new(blindbit_url: String, http_client: H) -> Result { + Ok(Self { + client: BlindbitClient::new(blindbit_url, http_client)?, + }) + } + + /// Get block data for a range of blocks as a Stream (async iterator). + /// + /// This fetches blocks concurrently for better performance. + /// + /// # Arguments + /// * `range` - Range of block heights to fetch + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + /// + /// # Returns + /// A Stream of BlockData results + pub fn get_block_data_stream( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Pin> + Send + 'static>> { + let client = Arc::new(self.client.clone()); + + let res = stream::iter(range) + .map(move |n| { + let client = client.clone(); + + async move { + let blkheight = Height::from_consensus(n)?; + let tweaks = match with_cutthrough { + true => client.tweaks(blkheight, dust_limit).await?, + false => client.tweak_index(blkheight, dust_limit).await?, + }; + let new_utxo_filter = client.filter_new_utxos(blkheight).await?; + let spent_filter = client.filter_spent(blkheight).await?; + let blkhash = new_utxo_filter.block_hash; + Ok(BlockData { + blkheight, + blkhash, + tweaks, + new_utxo_filter: new_utxo_filter.into(), + spent_filter: spent_filter.into(), + }) + } + }) + .buffered(CONCURRENT_FILTER_REQUESTS); + + Box::pin(res) + } + + /// Get spent index data for a block height + pub async fn spent_index(&self, block_height: Height) -> Result { + self.client.spent_index(block_height).await.map(Into::into) + } + + /// Get UTXO data for a block height + pub async fn utxos(&self, block_height: Height) -> Result> { + Ok(self + .client + .utxos(block_height) + .await? + .into_iter() + .map(Into::into) + .collect()) + } + + /// Get the current block height from the server + pub async fn block_height(&self) -> Result { + self.client.block_height().await + } +} + +// Implement the AsyncChainBackend trait for AsyncBlindbitBackend +// Native backend only - not available for WASM +#[cfg(not(target_arch = "wasm32"))] +#[async_trait::async_trait] +impl AsyncChainBackend for AsyncBlindbitBackend { + fn get_block_data_stream( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> BlockDataStream { + self.get_block_data_stream(range, dust_limit, with_cutthrough) + } + + async fn spent_index(&self, block_height: Height) -> Result { + self.spent_index(block_height).await + } + + async fn utxos(&self, block_height: Height) -> Result> { + self.utxos(block_height).await + } + + async fn block_height(&self) -> Result { + self.block_height().await + } +} diff --git a/src/backend/blindbit/client/client.rs b/backend-blindbit-native/src/client/client.rs similarity index 51% rename from src/backend/blindbit/client/client.rs rename to backend-blindbit-native/src/client/client.rs index 8b1e730..7b3371b 100644 --- a/src/backend/blindbit/client/client.rs +++ b/backend-blindbit-native/src/client/client.rs @@ -1,58 +1,62 @@ -use std::time::Duration; - use bitcoin::{absolute::Height, secp256k1::PublicKey, Amount, Txid}; -use reqwest::{Client, Url}; +use url::Url; use anyhow::Result; -use crate::backend::blindbit::client::structs::InfoResponse; +use crate::client::structs::InfoResponse; +use super::http_trait::HttpClient; use super::structs::{ BlockHeightResponse, FilterResponse, ForwardTxRequest, SpentIndexResponse, UtxoResponse, }; -#[derive(Clone, Debug)] -pub struct BlindbitClient { - client: Client, +/// Client for interacting with a Blindbit server. +/// +/// Generic over the HTTP client implementation, allowing consumers to provide +/// their own HTTP client by implementing the `HttpClient` trait. +#[derive(Clone)] +pub struct BlindbitClient { + http_client: H, host_url: Url, } -impl BlindbitClient { - pub fn new(host_url: String) -> Result { +impl BlindbitClient { + /// Create a new Blindbit client with a custom HTTP client implementation. + /// + /// # Arguments + /// * `host_url` - Base URL of the Blindbit server + /// * `http_client` - HTTP client implementation + pub fn new(host_url: String, http_client: H) -> Result { let mut host_url = Url::parse(&host_url)?; - let client = reqwest::Client::new(); // we need a trailing slash, if not present we append it if !host_url.path().ends_with('/') { host_url.set_path(&format!("{}/", host_url.path())); } - Ok(BlindbitClient { client, host_url }) + Ok(BlindbitClient { + http_client, + host_url, + }) } pub async fn block_height(&self) -> Result { let url = self.host_url.join("block-height")?; - - let res = self - .client - .get(url) - .timeout(Duration::from_secs(5)) - .send() - .await?; - let blkheight: BlockHeightResponse = serde_json::from_str(&res.text().await?)?; + let body = self.http_client.get(url.as_str(), &[]).await?; + let blkheight: BlockHeightResponse = serde_json::from_str(&body)?; Ok(blkheight.block_height) } pub async fn tweaks(&self, block_height: Height, dust_limit: Amount) -> Result> { let url = self.host_url.join(&format!("tweaks/{}", block_height))?; - - let res = self - .client - .get(url) - .query(&[("dustLimit", format!("{}", dust_limit.to_sat()))]) - .send() + let body = self + .http_client + .get( + url.as_str(), + &[("dustLimit", dust_limit.to_sat().to_string())], + ) .await?; - Ok(serde_json::from_str(&res.text().await?)?) + Ok(serde_json::from_str(&body)?) } pub async fn tweak_index( @@ -63,66 +67,57 @@ impl BlindbitClient { let url = self .host_url .join(&format!("tweak-index/{}", block_height))?; - - let res = self - .client - .get(url) - .query(&[("dustLimit", format!("{}", dust_limit.to_sat()))]) - .send() + let body = self + .http_client + .get( + url.as_str(), + &[("dustLimit", dust_limit.to_sat().to_string())], + ) .await?; - Ok(serde_json::from_str(&res.text().await?)?) + Ok(serde_json::from_str(&body)?) } pub async fn utxos(&self, block_height: Height) -> Result> { let url = self.host_url.join(&format!("utxos/{}", block_height))?; - let res = self.client.get(url).send().await?; - - Ok(serde_json::from_str(&res.text().await?)?) + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) } pub async fn spent_index(&self, block_height: Height) -> Result { let url = self .host_url .join(&format!("spent-index/{}", block_height))?; - let res = self.client.get(url).send().await?; - - Ok(serde_json::from_str(&res.text().await?)?) + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) } pub async fn filter_new_utxos(&self, block_height: Height) -> Result { let url = self .host_url .join(&format!("filter/new-utxos/{}", block_height))?; - - let res = self.client.get(url).send().await?; - - Ok(serde_json::from_str(&res.text().await?)?) + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) } pub async fn filter_spent(&self, block_height: Height) -> Result { let url = self .host_url .join(&format!("filter/spent/{}", block_height))?; - - let res = self.client.get(url).send().await?; - - Ok(serde_json::from_str(&res.text().await?)?) + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) } pub async fn forward_tx(&self, tx_hex: String) -> Result { let url = self.host_url.join("forward-tx")?; - - let body = ForwardTxRequest::new(tx_hex); - - let res = self.client.post(url).json(&body).send().await?; - - Ok(serde_json::from_str(&res.text().await?)?) + let request = ForwardTxRequest::new(tx_hex); + let json_body = serde_json::to_string(&request)?; + let body = self.http_client.post_json(url.as_str(), &json_body).await?; + Ok(serde_json::from_str(&body)?) } pub async fn info(&self) -> Result { let url = self.host_url.join("info")?; - - let res = self.client.get(url).send().await?; - Ok(serde_json::from_str(&res.text().await?)?) + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) } } diff --git a/backend-blindbit-native/src/client/http_trait.rs b/backend-blindbit-native/src/client/http_trait.rs new file mode 100644 index 0000000..b3af50d --- /dev/null +++ b/backend-blindbit-native/src/client/http_trait.rs @@ -0,0 +1,63 @@ +use anyhow::Result; +use async_trait::async_trait; + +/// Minimal async HTTP client trait that can be implemented with any HTTP library. +/// +/// This allows consumers to bring their own HTTP client implementation. +/// You can use any HTTP library you prefer: hyper, isahc, surf, ureq, +/// platform-specific APIs (NSURLSession, fetch, etc.), or any other HTTP client. +/// +/// The trait is async because HTTP I/O is inherently async and the library +/// benefits from concurrent requests (e.g., 200 parallel block fetches). +/// +/// # Implementing the trait +/// +/// Simply implement the two methods with your preferred HTTP library: +/// +/// ```ignore +/// use anyhow::Result; +/// use async_trait::async_trait; +/// use backend_blindbit_native::HttpClient; +/// +/// #[derive(Clone)] +/// struct MyHttpClient { +/// // Your HTTP client here +/// } +/// +/// #[async_trait] +/// impl HttpClient for MyHttpClient { +/// async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { +/// // Implement GET request with your HTTP library +/// // Build URL with query params and return response body +/// Ok("response".to_string()) +/// } +/// +/// async fn post_json(&self, url: &str, json_body: &str) -> Result { +/// // Implement POST request with your HTTP library +/// // Send JSON body and return response body +/// Ok("response".to_string()) +/// } +/// } +/// ``` +#[async_trait] +pub trait HttpClient: Send + Sync + Clone { + /// Perform a GET request with optional query parameters. + /// + /// # Arguments + /// * `url` - The full URL to request + /// * `query_params` - Optional query parameters as key-value pairs + /// + /// # Returns + /// The response body as a string + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result; + + /// Perform a POST request with a JSON body. + /// + /// # Arguments + /// * `url` - The full URL to request + /// * `json_body` - The JSON body as a string + /// + /// # Returns + /// The response body as a string + async fn post_json(&self, url: &str, json_body: &str) -> Result; +} diff --git a/backend-blindbit-native/src/client/mod.rs b/backend-blindbit-native/src/client/mod.rs new file mode 100644 index 0000000..1af74ea --- /dev/null +++ b/backend-blindbit-native/src/client/mod.rs @@ -0,0 +1,15 @@ +mod client; +mod http_trait; +#[cfg(feature = "reqwest-client")] +mod reqwest_impl; +pub mod structs; +#[cfg(feature = "ureq-client")] +mod ureq_impl; + +pub use client::BlindbitClient; +pub use http_trait::HttpClient; + +#[cfg(feature = "reqwest-client")] +pub use reqwest_impl::ReqwestClient; +#[cfg(feature = "ureq-client")] +pub use ureq_impl::UreqClient; diff --git a/backend-blindbit-native/src/client/reqwest_impl.rs b/backend-blindbit-native/src/client/reqwest_impl.rs new file mode 100644 index 0000000..70e2f99 --- /dev/null +++ b/backend-blindbit-native/src/client/reqwest_impl.rs @@ -0,0 +1,126 @@ +use anyhow::{anyhow, Result}; +use async_trait::async_trait; + +use super::http_trait::HttpClient; + +/// Async HTTP client implementation using reqwest. +/// +/// This is a fully async HTTP client built on top of tokio/hyper. +/// It's more feature-rich than ureq but requires an async runtime. +/// +/// # Performance Recommendation +/// +/// **Use with `AsyncBlindbitBackend` for best performance:** +/// - Enables 200+ concurrent requests +/// - Full connection pooling and async benefits +/// - Proper utilization of tokio runtime +/// +/// **Avoid with `BlindbitBackend` (sync):** +/// - Each request blocks the thread via `block_on` +/// - No concurrency (sequential processing) +/// - Pulls in ~2MB tokio runtime with zero async benefit +/// - Consider `UreqClient` instead for sync usage (~800KB, truly blocking) +/// +/// # Example (Recommended - Async) +/// +/// ```ignore +/// use backend_blindbit_native::{ReqwestClient, AsyncBlindbitBackend}; +/// +/// #[tokio::main] +/// async fn main() { +/// let http_client = ReqwestClient::new(); +/// let backend = AsyncBlindbitBackend::new("https://blindbit.io".to_string(), http_client)?; +/// // Benefits from concurrent requests +/// } +/// ``` +/// +/// # Example (Not Recommended - Sync) +/// +/// ```ignore +/// use backend_blindbit_native::{ReqwestClient, BlindbitBackend}; +/// +/// // This works but is inefficient - use UreqClient instead +/// let http_client = ReqwestClient::new(); +/// let backend = BlindbitBackend::new("https://blindbit.io".to_string(), http_client)?; +/// ``` +#[derive(Clone)] +pub struct ReqwestClient { + client: reqwest::Client, +} + +impl ReqwestClient { + /// Create a new reqwest HTTP client with default settings. + pub fn new() -> Self { + Self { + client: reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(30)) + .build() + .expect("Failed to build reqwest client"), + } + } + + /// Create a new reqwest HTTP client with a custom timeout. + pub fn with_timeout(timeout_secs: u64) -> Self { + Self { + client: reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(timeout_secs)) + .build() + .expect("Failed to build reqwest client"), + } + } + + /// Create a new reqwest HTTP client with a custom client configuration. + pub fn with_client(client: reqwest::Client) -> Self { + Self { client } + } +} + +impl Default for ReqwestClient { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl HttpClient for ReqwestClient { + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { + // Build request with query parameters + let mut request = self.client.get(url); + + for (key, value) in query_params { + request = request.query(&[(key, value)]); + } + + // Perform async request + let response = request + .send() + .await + .map_err(|e| anyhow!("HTTP GET request failed: {}", e))? + .error_for_status() + .map_err(|e| anyhow!("HTTP GET request returned error status: {}", e))? + .text() + .await + .map_err(|e| anyhow!("Failed to read response body: {}", e))?; + + Ok(response) + } + + async fn post_json(&self, url: &str, json_body: &str) -> Result { + // Perform async request + let response = self + .client + .post(url) + .header("Content-Type", "application/json") + .body(json_body.to_string()) + .send() + .await + .map_err(|e| anyhow!("HTTP POST request failed: {}", e))? + .error_for_status() + .map_err(|e| anyhow!("HTTP POST request returned error status: {}", e))? + .text() + .await + .map_err(|e| anyhow!("Failed to read response body: {}", e))?; + + Ok(response) + } +} diff --git a/src/backend/blindbit/client/structs.rs b/backend-blindbit-native/src/client/structs.rs similarity index 97% rename from src/backend/blindbit/client/structs.rs rename to backend-blindbit-native/src/client/structs.rs index e4c3f13..5bfaf58 100644 --- a/src/backend/blindbit/client/structs.rs +++ b/backend-blindbit-native/src/client/structs.rs @@ -2,7 +2,7 @@ use bitcoin::{absolute::Height, Amount, BlockHash, Network, ScriptBuf, Txid}; use serde::{Deserialize, Deserializer, Serialize}; -use crate::{FilterData, SpentIndexData, UtxoData}; +use spdk_core::{FilterData, SpentIndexData, UtxoData}; #[derive(Debug, Deserialize)] pub struct BlockHeightResponse { diff --git a/backend-blindbit-native/src/client/ureq_impl.rs b/backend-blindbit-native/src/client/ureq_impl.rs new file mode 100644 index 0000000..b238760 --- /dev/null +++ b/backend-blindbit-native/src/client/ureq_impl.rs @@ -0,0 +1,92 @@ +use anyhow::{anyhow, Result}; +use async_trait::async_trait; + +use super::http_trait::HttpClient; + +/// Minimal HTTP client implementation using ureq. +/// +/// This is a lightweight, blocking HTTP client that's perfect for basic needs. +/// It uses about ~200KB of binary size and has minimal dependencies. +/// +/// # Example +/// +/// ```ignore +/// use backend_blindbit_native::{UreqClient, BlindbitBackend}; +/// +/// let http_client = UreqClient::new(); +/// let backend = BlindbitBackend::new("https://blindbit.io".to_string(), http_client)?; +/// ``` +#[derive(Clone)] +pub struct UreqClient { + agent: ureq::Agent, +} + +impl UreqClient { + /// Create a new ureq HTTP client with default settings. + pub fn new() -> Self { + Self { + agent: ureq::AgentBuilder::new() + .timeout(std::time::Duration::from_secs(30)) + .build(), + } + } + + /// Create a new ureq HTTP client with a custom timeout. + pub fn with_timeout(timeout_secs: u64) -> Self { + Self { + agent: ureq::AgentBuilder::new() + .timeout(std::time::Duration::from_secs(timeout_secs)) + .build(), + } + } +} + +impl Default for UreqClient { + fn default() -> Self { + Self::new() + } +} + +#[async_trait] +impl HttpClient for UreqClient { + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { + // Build URL with query parameters + let mut full_url = url.to_string(); + if !query_params.is_empty() { + full_url.push('?'); + for (i, (key, value)) in query_params.iter().enumerate() { + if i > 0 { + full_url.push('&'); + } + full_url.push_str(key); + full_url.push('='); + full_url.push_str(value); + } + } + + // Perform blocking request (wrapped in async for trait compatibility) + let response = self + .agent + .get(&full_url) + .call() + .map_err(|e| anyhow!("HTTP GET request failed: {}", e))? + .into_string() + .map_err(|e| anyhow!("Failed to read response body: {}", e))?; + + Ok(response) + } + + async fn post_json(&self, url: &str, json_body: &str) -> Result { + // Perform blocking request (wrapped in async for trait compatibility) + let response = self + .agent + .post(url) + .set("Content-Type", "application/json") + .send_string(json_body) + .map_err(|e| anyhow!("HTTP POST request failed: {}", e))? + .into_string() + .map_err(|e| anyhow!("Failed to read response body: {}", e))?; + + Ok(response) + } +} diff --git a/backend-blindbit-native/src/lib.rs b/backend-blindbit-native/src/lib.rs new file mode 100644 index 0000000..4799938 --- /dev/null +++ b/backend-blindbit-native/src/lib.rs @@ -0,0 +1,31 @@ +// Sync backend - wraps async calls with block_on, requires futures and async-trait +#[cfg(feature = "sync")] +mod backend; + +// Async backend - available with "async" feature +#[cfg(feature = "async")] +mod backend_async; + +mod client; + +// Re-export backend functionality +#[cfg(feature = "sync")] +pub use backend::BlindbitBackend; + +#[cfg(feature = "async")] +pub use backend_async::AsyncBlindbitBackend; + +pub use client::{BlindbitClient, HttpClient}; + +#[cfg(feature = "ureq-client")] +pub use client::UreqClient; + +#[cfg(feature = "reqwest-client")] +pub use client::ReqwestClient; + +#[cfg(feature = "async")] +pub use async_trait; +#[cfg(feature = "async")] +pub use futures; +#[cfg(feature = "async")] +pub use futures_util; diff --git a/backend-blindbit-native/test-features.sh b/backend-blindbit-native/test-features.sh new file mode 100755 index 0000000..7fcc677 --- /dev/null +++ b/backend-blindbit-native/test-features.sh @@ -0,0 +1,103 @@ +#!/bin/bash +# Test script for all feature flag combinations +# Ensures that all valid combinations compile successfully +# +# Usage: +# ./test-features.sh # Normal output with colors +# ./test-features.sh --ci # CI-friendly output without colors + +# Check for CI mode +CI_MODE=false +if [ "$1" = "--ci" ] || [ "$CI" = "true" ]; then + CI_MODE=true + RED='' + GREEN='' + YELLOW='' + NC='' +else + RED='\033[0;31m' + GREEN='\033[0;32m' + YELLOW='\033[1;33m' + NC='\033[0m' # No Color +fi + +FAILED=0 +PASSED=0 + +test_features() { + local name="$1" + local flags="$2" + + echo -n "Testing $name... " + + # Run cargo check and capture output + local output + output=$(cargo check $flags 2>&1) + local exit_code=$? + + if [ $exit_code -eq 0 ] && echo "$output" | grep -q "Finished"; then + echo -e "${GREEN}✓ PASS${NC}" + ((PASSED++)) + else + echo -e "${RED}✗ FAIL${NC}" + echo " Command: cargo check $flags" + echo " Exit code: $exit_code" + echo "$output" | tail -20 + ((FAILED++)) + fi +} + +echo "================================================" +echo "Testing backend-blindbit-native feature flags" +echo "================================================" +echo "" + +# Minimal builds +echo "=== Minimal Builds ===" +test_features "No features (trait only)" "--no-default-features" +test_features "Sync backend only" "--no-default-features --features sync" +test_features "Async backend only" "--no-default-features --features async" +echo "" + +# Single client builds +echo "=== Single HTTP Client Builds ===" +test_features "ureq-client only" "--no-default-features --features ureq-client" +test_features "reqwest-client only" "--no-default-features --features reqwest-client" +echo "" + +# Backend + Client combinations +echo "=== Backend + Client Combinations ===" +test_features "Sync + ureq" "--no-default-features --features sync,ureq-client" +test_features "Sync + reqwest" "--no-default-features --features sync,reqwest-client" +test_features "Async + ureq" "--no-default-features --features async,ureq-client" +test_features "Async + reqwest" "--no-default-features --features async,reqwest-client" +echo "" + +# Multiple clients +echo "=== Multiple Client Builds ===" +test_features "Both clients (sync)" "--no-default-features --features sync,ureq-client,reqwest-client" +test_features "Both clients (async)" "--no-default-features --features async,ureq-client,reqwest-client" +test_features "Both clients + backends" "--no-default-features --features sync,async,ureq-client,reqwest-client" +echo "" + +# Default +echo "=== Default Configuration ===" +test_features "Default features" "" +echo "" + +# Summary +echo "================================================" +echo "Summary" +echo "================================================" +echo -e "Passed: ${GREEN}$PASSED${NC}" +echo -e "Failed: ${RED}$FAILED${NC}" +echo "" + +if [ $FAILED -eq 0 ]; then + echo -e "${GREEN}All feature combinations passed!${NC}" + exit 0 +else + echo -e "${RED}Some feature combinations failed!${NC}" + exit 1 +fi + diff --git a/backend-blindbit-wasm/Cargo.toml b/backend-blindbit-wasm/Cargo.toml new file mode 100644 index 0000000..a6c586d --- /dev/null +++ b/backend-blindbit-wasm/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "backend-blindbit-wasm" +version = "0.1.0" +edition = "2021" +repository.workspace = true + +[lib] +crate-type = ["rlib", "cdylib"] + +[dependencies] +# Core client dependency +spdk-core.workspace = true + +# Async and futures +futures = "0.3" +futures-util = "0.3.31" +async-trait = "0.1" +log.workspace = true + +# URL parsing (minimal: ~20KB) +url = "2.5" + +# HTTP client - optional, heavy (~1.5MB with rustls + tokio) +# Consumers can opt out and provide their own HttpClient implementation +reqwest = { version = "0.12.4", features = ["gzip", "json"], default-features = false, optional = true } + +# WASM-specific dependencies +wasm-bindgen = "0.2.105" +wasm-bindgen-futures = "0.4.55" + +# Re-export some core types for convenience +bitcoin.workspace = true +anyhow.workspace = true +silentpayments.workspace = true +serde.workspace = true +serde_json.workspace = true +hex.workspace = true + +[features] +# Default includes the full-featured reqwest client +default = ["reqwest-client"] + +# Include the ReqwestClient implementation +# Disable this if you want to provide your own minimal HttpClient +reqwest-client = ["reqwest"] diff --git a/backend-blindbit-wasm/src/backend.rs b/backend-blindbit-wasm/src/backend.rs new file mode 100644 index 0000000..ed93e40 --- /dev/null +++ b/backend-blindbit-wasm/src/backend.rs @@ -0,0 +1,85 @@ +use std::ops::RangeInclusive; + +use bitcoin::{absolute::Height, Amount}; +use futures::executor::block_on; + +use anyhow::Result; + +use crate::client::{BlindbitClient, HttpClient}; +use spdk_core::{BlockData, BlockDataIterator, ChainBackend, SpentIndexData, UtxoData}; + +/// Blindbit backend implementation, generic over the HTTP client. +/// +/// This allows consumers to choose their HTTP implementation: +/// - Use `ReqwestClient` for the default full-featured option +/// - Implement `HttpClient` for custom/lightweight alternatives +pub struct BlindbitBackend { + client: BlindbitClient, +} + +impl BlindbitBackend { + /// Create a new Blindbit backend with a custom HTTP client. + /// + /// # Arguments + /// * `blindbit_url` - Base URL of the Blindbit server + /// * `http_client` - HTTP client implementation + pub fn new(blindbit_url: String, http_client: H) -> Result { + Ok(Self { + client: BlindbitClient::new(blindbit_url, http_client)?, + }) + } +} + +impl ChainBackend for BlindbitBackend { + /// High-level function to get block data for a range of blocks. + /// Block data includes all the information needed to determine if a block is relevant for scanning, + /// but does not include utxos, or spent index. + /// These need to be fetched separately afterwards, if it is determined this block is relevant. + fn get_block_data_for_range( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> BlockDataIterator { + let client = self.client.clone(); + + // Convert range to iterator that fetches block data synchronously + let iter = range.map(move |n| { + let client = client.clone(); + block_on(async move { + let blkheight = Height::from_consensus(n)?; + let tweaks = match with_cutthrough { + true => client.tweaks(blkheight, dust_limit).await?, + false => client.tweak_index(blkheight, dust_limit).await?, + }; + let new_utxo_filter = client.filter_new_utxos(blkheight).await?; + let spent_filter = client.filter_spent(blkheight).await?; + let blkhash = new_utxo_filter.block_hash; + Ok(BlockData { + blkheight, + blkhash, + tweaks, + new_utxo_filter: new_utxo_filter.into(), + spent_filter: spent_filter.into(), + }) + }) + }); + + Box::new(iter) + } + + fn spent_index(&self, block_height: Height) -> Result { + block_on(self.client.spent_index(block_height)).map(Into::into) + } + + fn utxos(&self, block_height: Height) -> Result> { + Ok(block_on(self.client.utxos(block_height))? + .into_iter() + .map(Into::into) + .collect()) + } + + fn block_height(&self) -> Result { + block_on(self.client.block_height()) + } +} diff --git a/backend-blindbit-wasm/src/client/client.rs b/backend-blindbit-wasm/src/client/client.rs new file mode 100644 index 0000000..aa0ba1f --- /dev/null +++ b/backend-blindbit-wasm/src/client/client.rs @@ -0,0 +1,123 @@ +use bitcoin::{absolute::Height, secp256k1::PublicKey, Amount, Txid}; +use url::Url; + +use anyhow::Result; + +use crate::client::structs::InfoResponse; + +use super::http_trait::HttpClient; +use super::structs::{ + BlockHeightResponse, FilterResponse, ForwardTxRequest, SpentIndexResponse, UtxoResponse, +}; + +/// Client for interacting with a Blindbit server. +/// +/// Generic over the HTTP client implementation, allowing consumers to choose +/// between full-featured (reqwest) or lightweight alternatives. +#[derive(Clone)] +pub struct BlindbitClient { + http_client: H, + host_url: Url, +} + +impl BlindbitClient { + /// Create a new Blindbit client with a custom HTTP client implementation. + /// + /// # Arguments + /// * `host_url` - Base URL of the Blindbit server + /// * `http_client` - HTTP client implementation (reqwest, custom, etc.) + pub fn new(host_url: String, http_client: H) -> Result { + let mut host_url = Url::parse(&host_url)?; + + // we need a trailing slash, if not present we append it + if !host_url.path().ends_with('/') { + host_url.set_path(&format!("{}/", host_url.path())); + } + + Ok(BlindbitClient { + http_client, + host_url, + }) + } + + pub async fn block_height(&self) -> Result { + let url = self.host_url.join("block-height")?; + let body = self.http_client.get(url.as_str(), &[]).await?; + let blkheight: BlockHeightResponse = serde_json::from_str(&body)?; + Ok(blkheight.block_height) + } + + pub async fn tweaks(&self, block_height: Height, dust_limit: Amount) -> Result> { + let url = self.host_url.join(&format!("tweaks/{}", block_height))?; + let body = self + .http_client + .get( + url.as_str(), + &[("dustLimit", dust_limit.to_sat().to_string())], + ) + .await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn tweak_index( + &self, + block_height: Height, + dust_limit: Amount, + ) -> Result> { + let url = self + .host_url + .join(&format!("tweak-index/{}", block_height))?; + let body = self + .http_client + .get( + url.as_str(), + &[("dustLimit", dust_limit.to_sat().to_string())], + ) + .await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn utxos(&self, block_height: Height) -> Result> { + let url = self.host_url.join(&format!("utxos/{}", block_height))?; + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn spent_index(&self, block_height: Height) -> Result { + let url = self + .host_url + .join(&format!("spent-index/{}", block_height))?; + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn filter_new_utxos(&self, block_height: Height) -> Result { + let url = self + .host_url + .join(&format!("filter/new-utxos/{}", block_height))?; + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn filter_spent(&self, block_height: Height) -> Result { + let url = self + .host_url + .join(&format!("filter/spent/{}", block_height))?; + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn forward_tx(&self, tx_hex: String) -> Result { + let url = self.host_url.join("forward-tx")?; + let request = ForwardTxRequest::new(tx_hex); + let json_body = serde_json::to_string(&request)?; + let body = self.http_client.post_json(url.as_str(), &json_body).await?; + Ok(serde_json::from_str(&body)?) + } + + pub async fn info(&self) -> Result { + let url = self.host_url.join("info")?; + let body = self.http_client.get(url.as_str(), &[]).await?; + Ok(serde_json::from_str(&body)?) + } +} diff --git a/backend-blindbit-wasm/src/client/http_trait.rs b/backend-blindbit-wasm/src/client/http_trait.rs new file mode 100644 index 0000000..5705764 --- /dev/null +++ b/backend-blindbit-wasm/src/client/http_trait.rs @@ -0,0 +1,63 @@ +use anyhow::Result; +use async_trait::async_trait; + +/// Minimal async HTTP client trait that can be implemented with any HTTP library. +/// +/// This allows consumers to bring their own HTTP client implementation. +/// You can use any HTTP library you prefer: reqwest, hyper, isahc, surf, +/// platform-specific APIs (NSURLSession, fetch, etc.), or any other HTTP client. +/// +/// The trait is async because HTTP I/O is inherently async and the library +/// benefits from concurrent requests (e.g., 200 parallel block fetches). +/// +/// # Implementing the trait +/// +/// Simply implement the two methods with your preferred HTTP library: +/// +/// ```ignore +/// use anyhow::Result; +/// use async_trait::async_trait; +/// use backend_blindbit_wasm::HttpClient; +/// +/// #[derive(Clone)] +/// struct MyHttpClient { +/// // Your HTTP client here +/// } +/// +/// #[async_trait(?Send)] +/// impl HttpClient for MyHttpClient { +/// async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { +/// // Implement GET request with your HTTP library +/// // Build URL with query params and return response body +/// Ok("response".to_string()) +/// } +/// +/// async fn post_json(&self, url: &str, json_body: &str) -> Result { +/// // Implement POST request with your HTTP library +/// // Send JSON body and return response body +/// Ok("response".to_string()) +/// } +/// } +/// ``` +#[async_trait(?Send)] +pub trait HttpClient: Send + Sync + Clone { + /// Perform a GET request with optional query parameters. + /// + /// # Arguments + /// * `url` - The full URL to request + /// * `query_params` - Optional query parameters as key-value pairs + /// + /// # Returns + /// The response body as a string + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result; + + /// Perform a POST request with a JSON body. + /// + /// # Arguments + /// * `url` - The full URL to request + /// * `json_body` - The JSON body as a string + /// + /// # Returns + /// The response body as a string + async fn post_json(&self, url: &str, json_body: &str) -> Result; +} diff --git a/backend-blindbit-wasm/src/client/mod.rs b/backend-blindbit-wasm/src/client/mod.rs new file mode 100644 index 0000000..31b3509 --- /dev/null +++ b/backend-blindbit-wasm/src/client/mod.rs @@ -0,0 +1,11 @@ +mod client; +mod http_trait; +#[cfg(feature = "reqwest-client")] +mod reqwest_impl; +pub mod structs; + +pub use client::BlindbitClient; +pub use http_trait::HttpClient; + +#[cfg(feature = "reqwest-client")] +pub use reqwest_impl::ReqwestClient; diff --git a/backend-blindbit-wasm/src/client/reqwest_impl.rs b/backend-blindbit-wasm/src/client/reqwest_impl.rs new file mode 100644 index 0000000..e9bfef0 --- /dev/null +++ b/backend-blindbit-wasm/src/client/reqwest_impl.rs @@ -0,0 +1,64 @@ +use anyhow::Result; +use async_trait::async_trait; +use reqwest::Client; +use std::time::Duration; + +use super::http_trait::HttpClient; + +/// Default HTTP client implementation using reqwest. +/// +/// This is the full-featured but heavier option (~1.5MB with rustls + tokio). +/// +/// **Binary size impact:** +/// - reqwest + hyper: ~200KB +/// - rustls + ring: ~1.1MB +/// - tokio runtime: ~500KB +/// +/// If you need smaller binaries, implement `HttpClient` with a lighter alternative. +#[derive(Clone, Debug)] +pub struct ReqwestClient { + client: Client, +} + +impl ReqwestClient { + pub fn new() -> Self { + Self { + client: Client::new(), + } + } + + pub fn with_client(client: Client) -> Self { + Self { client } + } +} + +impl Default for ReqwestClient { + fn default() -> Self { + Self::new() + } +} + +#[async_trait(?Send)] +impl HttpClient for ReqwestClient { + async fn get(&self, url: &str, query_params: &[(&str, String)]) -> Result { + let mut req = self.client.get(url).timeout(Duration::from_secs(5)); + + for (key, val) in query_params { + req = req.query(&[(key, val)]); + } + + let res = req.send().await?; + Ok(res.text().await?) + } + + async fn post_json(&self, url: &str, json_body: &str) -> Result { + let res = self + .client + .post(url) + .header("Content-Type", "application/json") + .body(json_body.to_string()) + .send() + .await?; + Ok(res.text().await?) + } +} diff --git a/backend-blindbit-wasm/src/client/structs.rs b/backend-blindbit-wasm/src/client/structs.rs new file mode 100644 index 0000000..5bfaf58 --- /dev/null +++ b/backend-blindbit-wasm/src/client/structs.rs @@ -0,0 +1,103 @@ +#![allow(dead_code)] +use bitcoin::{absolute::Height, Amount, BlockHash, Network, ScriptBuf, Txid}; +use serde::{Deserialize, Deserializer, Serialize}; + +use spdk_core::{FilterData, SpentIndexData, UtxoData}; + +#[derive(Debug, Deserialize)] +pub struct BlockHeightResponse { + pub block_height: Height, +} + +#[derive(Debug, Deserialize)] +pub struct UtxoResponse { + pub txid: Txid, + pub vout: u32, + pub value: Amount, + pub scriptpubkey: ScriptBuf, + pub block_height: Height, + pub block_hash: BlockHash, + pub timestamp: i32, + pub spent: bool, +} + +impl From for UtxoData { + fn from(value: UtxoResponse) -> Self { + Self { + txid: value.txid, + vout: value.vout, + value: value.value, + scriptpubkey: value.scriptpubkey, + spent: value.spent, + } + } +} + +#[derive(Debug, Deserialize)] +pub struct SpentIndexResponse { + pub block_hash: BlockHash, + pub data: Vec, +} + +impl From for SpentIndexData { + fn from(value: SpentIndexResponse) -> Self { + Self { + data: value.data.into_iter().map(|x| x.hex).collect(), + } + } +} + +#[derive(Deserialize, Debug)] +#[serde(transparent)] +pub struct MyHex { + #[serde(with = "hex::serde")] + pub hex: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct FilterResponse { + pub block_hash: BlockHash, + pub block_height: Height, + pub data: MyHex, + pub filter_type: i32, +} + +impl From for FilterData { + fn from(value: FilterResponse) -> Self { + Self { + block_hash: value.block_hash, + data: value.data.hex, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ForwardTxRequest { + data: String, +} + +impl ForwardTxRequest { + pub fn new(tx_hex: String) -> Self { + Self { data: tx_hex } + } +} + +#[derive(Debug, Deserialize)] +pub struct InfoResponse { + #[serde(deserialize_with = "deserialize_network")] + pub network: Network, + pub height: Height, + pub tweaks_only: bool, + pub tweaks_full_basic: bool, + pub tweaks_full_with_dust_filter: bool, + pub tweaks_cut_through_with_dust_filter: bool, +} + +fn deserialize_network<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let buf = String::deserialize(deserializer)?; + + Network::from_core_arg(&buf).map_err(serde::de::Error::custom) +} diff --git a/backend-blindbit-wasm/src/lib.rs b/backend-blindbit-wasm/src/lib.rs new file mode 100644 index 0000000..c25cde0 --- /dev/null +++ b/backend-blindbit-wasm/src/lib.rs @@ -0,0 +1,41 @@ +mod backend; +mod client; + +// Re-export backend functionality +pub use backend::BlindbitBackend; +pub use client::{BlindbitClient, HttpClient}; + +#[cfg(feature = "reqwest-client")] +pub use client::ReqwestClient; + +pub use async_trait; +pub use futures; +pub use futures_util; + +// Re-export core types and traits (avoiding module name conflicts) +pub use spdk_core::{ + BlockData, + BlockDataIterator, + ChainBackend, + // Re-exported external types + FeeRate, + FilterData, + OutputSpendStatus, + OwnedOutput, + Recipient, + RecipientAddress, + SilentPaymentUnsignedTransaction, + SpClient, + SpendKey, + SpentIndexData, + Updater, + UtxoData, + // Constants + DATA_CARRIER_SIZE, + DUST_THRESHOLD, + NUMS, + PSBT_SP_ADDRESS_KEY, + PSBT_SP_PREFIX, + PSBT_SP_SUBTYPE, + PSBT_SP_TWEAK_KEY, +}; diff --git a/spdk-core/Cargo.toml b/spdk-core/Cargo.toml new file mode 100644 index 0000000..8118a7e --- /dev/null +++ b/spdk-core/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "spdk-core" +version = "0.1.0" +edition = "2021" +repository.workspace = true + +[lib] +crate-type = ["rlib"] + +[features] +# Default: both sync and async APIs available, with parallelization +default = ["parallel", "async"] + +# Sync-only mode - excludes async code and async dependencies +sync = [] + +# Enable async APIs (included by default, excluded with sync) +async = ["dep:futures", "dep:async-trait"] + +# Enable CPU parallelization with rayon (native only, recommended) +parallel = ["dep:rayon"] + +[dependencies] +# Core dependencies - always available +silentpayments.workspace = true +anyhow.workspace = true +serde.workspace = true +serde_json.workspace = true +bitcoin.workspace = true +hex.workspace = true +bdk_coin_select.workspace = true + +# Async dependencies - optional +futures = { workspace = true, optional = true } +async-trait = { workspace = true, optional = true } + +# Parallelization (not available on WASM) +[target.'cfg(not(target_arch = "wasm32"))'.dependencies] +rayon = { workspace = true, optional = true } diff --git a/spdk-core/src/backend/backend.rs b/spdk-core/src/backend/backend.rs new file mode 100644 index 0000000..f23b55c --- /dev/null +++ b/spdk-core/src/backend/backend.rs @@ -0,0 +1,33 @@ +use std::ops::RangeInclusive; + +use anyhow::Result; +use bitcoin::{absolute::Height, Amount}; + +use crate::{BlockData, SpentIndexData, UtxoData}; + +/// Iterator type for block data that conditionally includes `Send` bound. +/// +/// - For native targets: includes `Send` bound for thread safety +/// - For WASM targets: omits `Send` since WASM is single-threaded +// For native targets, we require Send +#[cfg(not(target_arch = "wasm32"))] +pub type BlockDataIterator = Box> + Send>; + +// For WASM targets, we don't require Send +#[cfg(target_arch = "wasm32")] +pub type BlockDataIterator = Box>>; + +pub trait ChainBackend { + fn get_block_data_for_range( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> BlockDataIterator; + + fn spent_index(&self, block_height: Height) -> Result; + + fn utxos(&self, block_height: Height) -> Result>; + + fn block_height(&self) -> Result; +} diff --git a/spdk-core/src/backend/backend_async.rs b/spdk-core/src/backend/backend_async.rs new file mode 100644 index 0000000..de22c3f --- /dev/null +++ b/spdk-core/src/backend/backend_async.rs @@ -0,0 +1,65 @@ +use std::ops::RangeInclusive; +use std::pin::Pin; + +use anyhow::Result; +use bitcoin::{absolute::Height, Amount}; +use futures::Stream; + +use crate::{BlockData, SpentIndexData, UtxoData}; + +/// Async stream type for block data that conditionally includes `Send` bound. +/// +/// - For native targets: includes `Send` bound for thread safety +/// - For WASM targets: omits `Send` since WASM is single-threaded +// For native targets, we require Send +#[cfg(not(target_arch = "wasm32"))] +pub type BlockDataStream = Pin> + Send>>; + +// For WASM targets, we don't require Send +#[cfg(target_arch = "wasm32")] +pub type BlockDataStream = Pin>>>; + +/// Async version of ChainBackend for non-blocking I/O operations +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] +pub trait AsyncChainBackend: Send + Sync { + /// Get a stream of block data for a range of blocks + /// + /// # Arguments + /// * `range` - Range of block heights to fetch + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + /// + /// # Returns + /// * Stream of block data results + fn get_block_data_stream( + &self, + range: RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> BlockDataStream; + + /// Get spent index data for a specific block height + /// + /// # Arguments + /// * `block_height` - Block height to query + /// + /// # Returns + /// * Spent index data for the block + async fn spent_index(&self, block_height: Height) -> Result; + + /// Get UTXOs for a specific block height + /// + /// # Arguments + /// * `block_height` - Block height to query + /// + /// # Returns + /// * Vector of UTXO data + async fn utxos(&self, block_height: Height) -> Result>; + + /// Get the current blockchain tip height + /// + /// # Returns + /// * Current block height + async fn block_height(&self) -> Result; +} diff --git a/spdk-core/src/backend/mod.rs b/spdk-core/src/backend/mod.rs new file mode 100644 index 0000000..35616f7 --- /dev/null +++ b/spdk-core/src/backend/mod.rs @@ -0,0 +1,10 @@ +mod backend; + +// Async backend - available by default, excluded when "sync" feature is enabled +#[cfg(feature = "async")] +mod backend_async; + +pub use backend::{BlockDataIterator, ChainBackend}; + +#[cfg(feature = "async")] +pub use backend_async::{AsyncChainBackend, BlockDataStream}; diff --git a/src/client/client.rs b/spdk-core/src/client/client.rs similarity index 75% rename from src/client/client.rs rename to spdk-core/src/client/client.rs index 0a4c1d0..21ca242 100644 --- a/src/client/client.rs +++ b/spdk-core/src/client/client.rs @@ -1,14 +1,17 @@ -use std::{collections::HashMap, io::Write, str::FromStr}; +use std::{io::Write, str::FromStr}; use bitcoin::hashes::Hash; use bitcoin::{ key::constants::ONE, - secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey}, + secp256k1::{Scalar, Secp256k1, SecretKey}, Network, XOnlyPublicKey, }; use serde::{Deserialize, Serialize}; +use bitcoin::secp256k1::PublicKey; use silentpayments::utils as sp_utils; +use std::collections::HashMap; + use silentpayments::Network as SpNetwork; use silentpayments::{ bitcoin_hashes::sha256, @@ -108,16 +111,42 @@ impl SpClient { &self, tweak_data_vec: Vec, ) -> Result> { - use rayon::prelude::*; let b_scan = &self.get_scan_key(); + // Use parallel iteration for CPU-intensive ECDH calculations + #[cfg(all(not(target_arch = "wasm32"), feature = "parallel"))] + let shared_secrets: Vec = { + use rayon::prelude::*; + tweak_data_vec + .into_par_iter() + .map(|tweak| sp_utils::receiving::calculate_ecdh_shared_secret(&tweak, b_scan)) + .collect() + }; + + // Sequential fallback (WASM or no parallel feature) + #[cfg(not(all(not(target_arch = "wasm32"), feature = "parallel")))] let shared_secrets: Vec = tweak_data_vec - .into_par_iter() + .into_iter() .map(|tweak| sp_utils::receiving::calculate_ecdh_shared_secret(&tweak, b_scan)) .collect(); + // Use parallel iteration for CPU-intensive SPK derivation + #[cfg(all(not(target_arch = "wasm32"), feature = "parallel"))] + let items: Result> = { + use rayon::prelude::*; + shared_secrets + .into_par_iter() + .map(|secret| { + let spks = self.sp_receiver.get_spks_from_shared_secret(&secret)?; + Ok((secret, spks.into_values())) + }) + .collect() + }; + + // Sequential fallback (WASM or no parallel feature) + #[cfg(not(all(not(target_arch = "wasm32"), feature = "parallel")))] let items: Result> = shared_secrets - .into_par_iter() + .into_iter() .map(|secret| { let spks = self.sp_receiver.get_spks_from_shared_secret(&secret)?; diff --git a/src/client/mod.rs b/spdk-core/src/client/mod.rs similarity index 100% rename from src/client/mod.rs rename to spdk-core/src/client/mod.rs diff --git a/src/client/spend.rs b/spdk-core/src/client/spend.rs similarity index 100% rename from src/client/spend.rs rename to spdk-core/src/client/spend.rs diff --git a/src/client/structs.rs b/spdk-core/src/client/structs.rs similarity index 100% rename from src/client/structs.rs rename to spdk-core/src/client/structs.rs diff --git a/src/constants.rs b/spdk-core/src/constants.rs similarity index 100% rename from src/constants.rs rename to spdk-core/src/constants.rs diff --git a/spdk-core/src/lib.rs b/spdk-core/src/lib.rs new file mode 100644 index 0000000..9818219 --- /dev/null +++ b/spdk-core/src/lib.rs @@ -0,0 +1,25 @@ +pub mod backend; +pub mod client; +pub mod constants; +pub mod scanner; +pub mod types; +pub mod updater; + +// Re-export core functionality +pub use backend::{BlockDataIterator, ChainBackend}; +pub use client::*; +pub use constants::*; +// SpScanner is the concrete implementation - consumers don't implement traits anymore +pub use scanner::SpScanner; +pub use types::*; +pub use updater::Updater; +// Re-export commonly used external types +pub use bdk_coin_select::FeeRate; +pub use bitcoin; +pub use silentpayments; + +// Async types available when "async" feature is enabled +#[cfg(feature = "async")] +pub use backend::{AsyncChainBackend, BlockDataStream}; +#[cfg(feature = "async")] +pub use updater::AsyncUpdater; diff --git a/spdk-core/src/scanner/mod.rs b/spdk-core/src/scanner/mod.rs new file mode 100644 index 0000000..88e1be0 --- /dev/null +++ b/spdk-core/src/scanner/mod.rs @@ -0,0 +1,1380 @@ +use std::collections::{HashMap, HashSet}; +use std::sync::atomic::AtomicBool; + +#[cfg(not(all(not(target_arch = "wasm32"), feature = "parallel")))] +use anyhow::Error; +use anyhow::Result; +use bitcoin::{ + absolute::Height, bip158::BlockFilter, hashes::Hash as _, Amount, BlockHash, OutPoint, Txid, + XOnlyPublicKey, +}; +use silentpayments::receiving::Label; + +#[cfg(all(not(target_arch = "wasm32"), feature = "parallel"))] +use rayon::prelude::*; + +use crate::{ + BlockData, ChainBackend, FilterData, OutputSpendStatus, OwnedOutput, SpClient, Updater, + UtxoData, +}; + +/// Internal trait for synchronous scanning of silent payment blocks +/// +/// This trait abstracts the core scanning functionality. +/// Consumers should use the concrete `SpScanner` type instead of implementing this trait. +pub(crate) trait SyncSpScannerTrait { + /// Scan a range of blocks for silent payment outputs and inputs + /// + /// # Arguments + /// * `start` - Starting block height (inclusive) + /// * `end` - Ending block height (inclusive) + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()>; + + /// Process a single block's data + /// + /// # Arguments + /// * `blockdata` - Block data containing tweaks and filters + /// + /// # Returns + /// * `(found_outputs, found_inputs)` - Tuple of found outputs and spent inputs + fn process_block( + &mut self, + blockdata: BlockData, + ) -> Result<(HashMap, HashSet)>; + + /// Process block outputs to find owned silent payment outputs + /// + /// # Arguments + /// * `blkheight` - Block height + /// * `tweaks` - List of tweak public keys + /// * `new_utxo_filter` - Filter data for new UTXOs + /// + /// # Returns + /// * Map of outpoints to owned outputs + fn process_block_outputs( + &self, + blkheight: Height, + tweaks: Vec, + new_utxo_filter: FilterData, + ) -> Result>; + + /// Process block inputs to find spent outputs + /// + /// # Arguments + /// * `blkheight` - Block height + /// * `spent_filter` - Filter data for spent outputs + /// + /// # Returns + /// * Set of spent outpoints + fn process_block_inputs( + &self, + blkheight: Height, + spent_filter: FilterData, + ) -> Result>; + + /// Get the block data iterator for a range of blocks + /// + /// # Arguments + /// * `range` - Range of block heights + /// * `dust_limit` - Minimum amount to consider + /// * `with_cutthrough` - Whether to use cutthrough optimization + /// + /// # Returns + /// * Iterator of block data results + fn get_block_data_iterator( + &self, + range: std::ops::RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> crate::BlockDataIterator; + + /// Check if scanning should be interrupted + /// + /// # Returns + /// * `true` if scanning should stop, `false` otherwise + fn should_interrupt(&self) -> bool; + + /// Save current state to persistent storage + fn save_state(&mut self) -> Result<()>; + + /// Record found outputs for a block + /// + /// # Arguments + /// * `height` - Block height + /// * `block_hash` - Block hash + /// * `outputs` - Found outputs + fn record_outputs( + &mut self, + height: Height, + block_hash: BlockHash, + outputs: HashMap, + ) -> Result<()>; + + /// Record spent inputs for a block + /// + /// # Arguments + /// * `height` - Block height + /// * `block_hash` - Block hash + /// * `inputs` - Spent inputs + fn record_inputs( + &mut self, + height: Height, + block_hash: BlockHash, + inputs: HashSet, + ) -> Result<()>; + + /// Record scan progress + /// + /// # Arguments + /// * `start` - Start height + /// * `current` - Current height + /// * `end` - End height + fn record_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()>; + + /// Get the silent payment client + fn client(&self) -> &SpClient; + + /// Get the chain backend + fn backend(&self) -> &dyn ChainBackend; + + /// Get the updater + fn updater(&mut self) -> &mut dyn Updater; + + // Helper methods with default implementations + + /// Process multiple blocks from an iterator + /// + /// This is a default implementation that can be overridden if needed + fn process_blocks(&mut self, start: Height, end: Height, block_data_iter: I) -> Result<()> + where + I: Iterator>, + { + use std::time::{Duration, Instant}; + + let mut update_time = Instant::now(); + + for blockdata_result in block_data_iter { + let blockdata = blockdata_result?; + let blkheight = blockdata.blkheight; + let blkhash = blockdata.blkhash; + + // stop scanning and return if interrupted + if self.should_interrupt() { + self.save_state()?; + return Ok(()); + } + + let mut save_to_storage = false; + + // always save on last block or after 30 seconds since last save + if blkheight == end || update_time.elapsed() > Duration::from_secs(30) { + save_to_storage = true; + } + + let (found_outputs, found_inputs) = self.process_block(blockdata)?; + + if !found_outputs.is_empty() { + save_to_storage = true; + self.record_outputs(blkheight, blkhash, found_outputs)?; + } + + if !found_inputs.is_empty() { + save_to_storage = true; + self.record_inputs(blkheight, blkhash, found_inputs)?; + } + + // tell the updater we scanned this block + self.record_progress(start, blkheight, end)?; + + if save_to_storage { + self.save_state()?; + update_time = Instant::now(); + } + } + + Ok(()) + } + + /// Helper method to process blocks sequentially + /// + /// # Arguments + /// * `start` - Start height + /// * `end` - End height + /// * `block_data_iter` - Iterator of block data + /// * `with_cutthrough` - Whether cutthrough is enabled (unused, kept for API compatibility) + /// + /// # Returns + /// * Result indicating success or failure + fn process_blocks_auto( + &mut self, + start: Height, + end: Height, + block_data_iter: I, + _with_cutthrough: bool, + ) -> Result<()> + where + I: Iterator>, + { + // Always use sequential processing + self.process_blocks(start, end, block_data_iter) + } + + /// Scan UTXOs for a given block and secrets map + /// + /// This is a default implementation that can be overridden if needed + fn scan_utxos( + &self, + blkheight: Height, + secrets_map: HashMap<[u8; 34], bitcoin::secp256k1::PublicKey>, + ) -> Result, UtxoData, bitcoin::secp256k1::Scalar)>> { + let utxos = self.backend().utxos(blkheight)?; + + // group utxos by the txid + let mut txmap: HashMap> = HashMap::new(); + for utxo in utxos { + txmap.entry(utxo.txid).or_default().push(utxo); + } + + let client = self.client(); + + // Parallel transaction scanning on native platforms with parallel feature + #[cfg(all(not(target_arch = "wasm32"), feature = "parallel"))] + let res: Vec<_> = txmap + .into_par_iter() + .filter_map(|(_, utxos)| { + // check if we know the secret to any of the spks + let secret = utxos.iter().find_map(|utxo| { + let spk = utxo.scriptpubkey.as_bytes(); + secrets_map.get(spk) + })?; + + let output_keys: Vec = utxos + .iter() + .filter_map(|x| { + if x.scriptpubkey.is_p2tr() { + XOnlyPublicKey::from_slice(&x.scriptpubkey.as_bytes()[2..]).ok() + } else { + None + } + }) + .collect(); + + // CPU-intensive cryptographic operation + let ours = client + .sp_receiver + .scan_transaction(secret, output_keys) + .ok()?; + + // Match UTXOs against our keys + let matched: Vec<_> = utxos + .into_iter() + .filter(|utxo| utxo.scriptpubkey.is_p2tr() && !utxo.spent) + .filter_map(|utxo| { + let xonly = + XOnlyPublicKey::from_slice(&utxo.scriptpubkey.as_bytes()[2..]).ok()?; + ours.iter().find_map(|(label, map)| { + map.get(&xonly) + .map(|scalar| (label.clone(), utxo.clone(), *scalar)) + }) + }) + .collect(); + + if matched.is_empty() { + None + } else { + Some(matched) + } + }) + .flatten() + .collect(); + + // Sequential fallback (WASM or no parallel feature) + #[cfg(not(all(not(target_arch = "wasm32"), feature = "parallel")))] + let res: Vec<_> = { + let mut result = Vec::new(); + for utxos in txmap.into_values() { + // check if we know the secret to any of the spks + let mut secret = None; + for utxo in utxos.iter() { + let spk = utxo.scriptpubkey.as_bytes(); + if let Some(s) = secrets_map.get(spk) { + secret = Some(s); + break; + } + } + + // skip this tx if no secret is found + let secret = match secret { + Some(secret) => secret, + None => continue, + }; + + let output_keys: Result> = utxos + .iter() + .filter_map(|x| { + if x.scriptpubkey.is_p2tr() { + Some( + XOnlyPublicKey::from_slice(&x.scriptpubkey.as_bytes()[2..]) + .map_err(Error::new), + ) + } else { + None + } + }) + .collect(); + + let ours = client.sp_receiver.scan_transaction(secret, output_keys?)?; + + for utxo in utxos { + if !utxo.scriptpubkey.is_p2tr() || utxo.spent { + continue; + } + + match XOnlyPublicKey::from_slice(&utxo.scriptpubkey.as_bytes()[2..]) { + Ok(xonly) => { + for (label, map) in ours.iter() { + if let Some(scalar) = map.get(&xonly) { + result.push((label.clone(), utxo, *scalar)); + break; + } + } + } + Err(_) => todo!(), + } + } + } + result + }; + + Ok(res) + } + + /// Check if block contains relevant output transactions + /// + /// This is a default implementation that can be overridden if needed + fn check_block_outputs( + created_utxo_filter: BlockFilter, + blkhash: BlockHash, + candidate_spks: Vec<&[u8; 34]>, + ) -> Result { + // check output scripts + let output_keys: Vec<_> = candidate_spks + .into_iter() + .map(|spk| spk[2..].as_ref()) + .collect(); + + // note: match will always return true for an empty query! + if !output_keys.is_empty() { + Ok(created_utxo_filter.match_any(&blkhash, &mut output_keys.into_iter())?) + } else { + Ok(false) + } + } + + /// Get input hashes for owned outpoints + fn get_input_hashes(&self, blkhash: BlockHash) -> Result>; + + /// Check if block contains relevant input transactions + /// + /// This is a default implementation that can be overridden if needed + fn check_block_inputs( + &self, + spent_filter: BlockFilter, + blkhash: BlockHash, + input_hashes: Vec<[u8; 8]>, + ) -> Result { + // note: match will always return true for an empty query! + if !input_hashes.is_empty() { + Ok(spent_filter.match_any(&blkhash, &mut input_hashes.into_iter())?) + } else { + Ok(false) + } + } +} + +/// Internal trait for async scanning of silent payment blocks +/// +/// This trait provides async methods for scanning silent payment blocks. +/// Consumers should use the concrete `SpScanner` type instead of implementing this trait. +#[cfg(feature = "async")] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] +pub(crate) trait AsyncSpScannerTrait: Send + Sync { + /// Scan a range of blocks for silent payment outputs and inputs + /// + /// # Arguments + /// * `start` - Starting block height (inclusive) + /// * `end` - Ending block height (inclusive) + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + async fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()>; + + /// Process a single block's data + /// + /// # Arguments + /// * `blockdata` - Block data containing tweaks and filters + /// + /// # Returns + /// * `(found_outputs, found_inputs)` - Tuple of found outputs and spent inputs + async fn process_block( + &mut self, + blockdata: BlockData, + ) -> Result<(HashMap, HashSet)>; + + /// Process block outputs to find owned silent payment outputs + /// + /// # Arguments + /// * `blkheight` - Block height + /// * `tweaks` - List of tweak public keys + /// * `new_utxo_filter` - Filter data for new UTXOs + /// + /// # Returns + /// * Map of outpoints to owned outputs + async fn process_block_outputs( + &self, + blkheight: Height, + tweaks: Vec, + new_utxo_filter: FilterData, + ) -> Result>; + + /// Process block inputs to find spent outputs + /// + /// # Arguments + /// * `blkheight` - Block height + /// * `spent_filter` - Filter data for spent outputs + /// + /// # Returns + /// * Set of spent outpoints + async fn process_block_inputs( + &self, + blkheight: Height, + spent_filter: FilterData, + ) -> Result>; + + /// Get the block data stream for a range of blocks + /// + /// # Arguments + /// * `range` - Range of block heights + /// * `dust_limit` - Minimum amount to consider + /// * `with_cutthrough` - Whether to use cutthrough optimization + /// + /// # Returns + /// * Stream of block data results + fn get_block_data_stream( + &self, + range: std::ops::RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> crate::backend::BlockDataStream; + + /// Check if scanning should be interrupted + /// + /// # Returns + /// * `true` if scanning should stop, `false` otherwise + fn should_interrupt(&self) -> bool; + + /// Save current state to persistent storage + async fn save_state(&mut self) -> Result<()>; + + /// Record found outputs for a block + /// + /// # Arguments + /// * `height` - Block height + /// * `block_hash` - Block hash + /// * `outputs` - Found outputs + async fn record_outputs( + &mut self, + height: Height, + block_hash: BlockHash, + outputs: HashMap, + ) -> Result<()>; + + /// Record spent inputs for a block + /// + /// # Arguments + /// * `height` - Block height + /// * `block_hash` - Block hash + /// * `inputs` - Spent inputs + async fn record_inputs( + &mut self, + height: Height, + block_hash: BlockHash, + inputs: HashSet, + ) -> Result<()>; + + /// Record scan progress + /// + /// # Arguments + /// * `start` - Start height + /// * `current` - Current height + /// * `end` - End height + async fn record_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()>; + + /// Get the silent payment client + fn client(&self) -> &SpClient; + + /// Get the async chain backend + fn backend(&self) -> &dyn crate::backend::AsyncChainBackend; + + /// Get the async updater + fn updater(&mut self) -> &mut dyn crate::updater::AsyncUpdater; + + // Helper methods with default implementations + + /// Process multiple blocks from a stream + /// + /// This is a default implementation that can be overridden if needed + #[cfg(not(target_arch = "wasm32"))] + async fn process_blocks( + &mut self, + start: Height, + end: Height, + mut block_data_stream: crate::backend::BlockDataStream, + ) -> Result<()> { + use futures::StreamExt; + use std::time::{Duration, Instant}; + + let mut update_time = Instant::now(); + + while let Some(blockdata_result) = block_data_stream.next().await { + let blockdata = blockdata_result?; + let blkheight = blockdata.blkheight; + let blkhash = blockdata.blkhash; + + // stop scanning and return if interrupted + if self.should_interrupt() { + self.save_state().await?; + return Ok(()); + } + + let mut save_to_storage = false; + + // always save on last block or after 30 seconds since last save + if blkheight == end || update_time.elapsed() > Duration::from_secs(30) { + save_to_storage = true; + } + + let (found_outputs, found_inputs) = self.process_block(blockdata).await?; + + if !found_outputs.is_empty() { + save_to_storage = true; + self.record_outputs(blkheight, blkhash, found_outputs) + .await?; + } + + if !found_inputs.is_empty() { + save_to_storage = true; + self.record_inputs(blkheight, blkhash, found_inputs).await?; + } + + // tell the updater we scanned this block + self.record_progress(start, blkheight, end).await?; + + if save_to_storage { + self.save_state().await?; + update_time = Instant::now(); + } + } + + Ok(()) + } + + /// Process multiple blocks from a stream (WASM version) + /// + /// This is a default implementation that can be overridden if needed + #[cfg(target_arch = "wasm32")] + async fn process_blocks( + &mut self, + start: Height, + end: Height, + mut block_data_stream: crate::backend::BlockDataStream, + ) -> Result<()> { + use futures::StreamExt; + use std::time::{Duration, Instant}; + + let mut update_time = Instant::now(); + + while let Some(blockdata_result) = block_data_stream.next().await { + let blockdata = blockdata_result?; + let blkheight = blockdata.blkheight; + let blkhash = blockdata.blkhash; + + // stop scanning and return if interrupted + if self.should_interrupt() { + self.save_state().await?; + return Ok(()); + } + + let mut save_to_storage = false; + + // always save on last block or after 30 seconds since last save + if blkheight == end || update_time.elapsed() > Duration::from_secs(30) { + save_to_storage = true; + } + + let (found_outputs, found_inputs) = self.process_block(blockdata).await?; + + if !found_outputs.is_empty() { + save_to_storage = true; + self.record_outputs(blkheight, blkhash, found_outputs) + .await?; + } + + if !found_inputs.is_empty() { + save_to_storage = true; + self.record_inputs(blkheight, blkhash, found_inputs).await?; + } + + // tell the updater we scanned this block + self.record_progress(start, blkheight, end).await?; + + if save_to_storage { + self.save_state().await?; + update_time = Instant::now(); + } + } + + Ok(()) + } + + /// Scan UTXOs for a given block and secrets map + /// + /// This is a default implementation that can be overridden if needed + async fn scan_utxos( + &self, + blkheight: Height, + secrets_map: HashMap<[u8; 34], bitcoin::secp256k1::PublicKey>, + ) -> Result, UtxoData, bitcoin::secp256k1::Scalar)>> { + let utxos = self.backend().utxos(blkheight).await?; + + // Group utxos by the txid + let mut txmap: HashMap> = HashMap::new(); + for utxo in utxos { + txmap.entry(utxo.txid).or_default().push(utxo); + } + + let client = self.client(); + + // Parallel transaction scanning on native platforms with parallel feature + // This uses Rayon for CPU parallelism. Rayon uses its own thread pool internally, + // so while this blocks the current async task, it doesn't block the entire runtime + // on multi-threaded executors. The CPU work benefits significantly from parallelism. + #[cfg(all(not(target_arch = "wasm32"), feature = "parallel"))] + let res = { + use rayon::prelude::*; + use std::sync::Arc; + + // Clone data needed for parallel processing + let secrets_map = Arc::new(secrets_map); + let client = Arc::new(client.clone()); + + // Run CPU-intensive Rayon work + // Rayon uses its own thread pool, so this parallelizes across CPU cores + txmap + .into_par_iter() + .filter_map(|(_, utxos)| { + // check if we know the secret to any of the spks + let secret = utxos.iter().find_map(|utxo| { + let spk = utxo.scriptpubkey.as_bytes(); + secrets_map.get(spk) + })?; + + let output_keys: Vec = utxos + .iter() + .filter_map(|x| { + if x.scriptpubkey.is_p2tr() { + XOnlyPublicKey::from_slice(&x.scriptpubkey.as_bytes()[2..]).ok() + } else { + None + } + }) + .collect(); + + // CPU-intensive cryptographic operation + let ours = client + .sp_receiver + .scan_transaction(secret, output_keys) + .ok()?; + + // Match UTXOs against our keys + let matched: Vec<_> = utxos + .into_iter() + .filter(|utxo| utxo.scriptpubkey.is_p2tr() && !utxo.spent) + .filter_map(|utxo| { + let xonly = + XOnlyPublicKey::from_slice(&utxo.scriptpubkey.as_bytes()[2..]) + .ok()?; + ours.iter().find_map(|(label, map)| { + map.get(&xonly) + .map(|scalar| (label.clone(), utxo.clone(), *scalar)) + }) + }) + .collect(); + + if matched.is_empty() { + None + } else { + Some(matched) + } + }) + .flatten() + .collect() + }; + + // Sequential fallback (WASM or no parallel feature) + #[cfg(not(all(not(target_arch = "wasm32"), feature = "parallel")))] + let res: Vec<_> = { + let mut result = Vec::new(); + for utxos in txmap.into_values() { + // check if we know the secret to any of the spks + let mut secret = None; + for utxo in utxos.iter() { + let spk = utxo.scriptpubkey.as_bytes(); + if let Some(s) = secrets_map.get(spk) { + secret = Some(s); + break; + } + } + + // skip this tx if no secret is found + let secret = match secret { + Some(secret) => secret, + None => continue, + }; + + let output_keys: Result> = utxos + .iter() + .filter_map(|x| { + if x.scriptpubkey.is_p2tr() { + Some( + XOnlyPublicKey::from_slice(&x.scriptpubkey.as_bytes()[2..]) + .map_err(|e| anyhow::Error::new(e)), + ) + } else { + None + } + }) + .collect(); + + let ours = client.sp_receiver.scan_transaction(secret, output_keys?)?; + + for utxo in utxos { + if !utxo.scriptpubkey.is_p2tr() || utxo.spent { + continue; + } + + match XOnlyPublicKey::from_slice(&utxo.scriptpubkey.as_bytes()[2..]) { + Ok(xonly) => { + for (label, map) in ours.iter() { + if let Some(scalar) = map.get(&xonly) { + result.push((label.clone(), utxo.clone(), *scalar)); + break; + } + } + } + Err(_) => todo!(), + } + } + } + result + }; + + Ok(res) + } + + /// Check if block contains relevant output transactions + /// + /// This is a default implementation that can be overridden if needed + fn check_block_outputs( + created_utxo_filter: BlockFilter, + blkhash: BlockHash, + candidate_spks: Vec<&[u8; 34]>, + ) -> Result { + // check output scripts + let output_keys: Vec<_> = candidate_spks + .into_iter() + .map(|spk| spk[2..].as_ref()) + .collect(); + + // note: match will always return true for an empty query! + if !output_keys.is_empty() { + Ok(created_utxo_filter.match_any(&blkhash, &mut output_keys.into_iter())?) + } else { + Ok(false) + } + } + + /// Get input hashes for owned outpoints + async fn get_input_hashes(&self, blkhash: BlockHash) -> Result>; + + /// Check if block contains relevant input transactions + /// + /// This is a default implementation that can be overridden if needed + fn check_block_inputs( + &self, + spent_filter: BlockFilter, + blkhash: BlockHash, + input_hashes: Vec<[u8; 8]>, + ) -> Result { + // note: match will always return true for an empty query! + if !input_hashes.is_empty() { + Ok(spent_filter.match_any(&blkhash, &mut input_hashes.into_iter())?) + } else { + Ok(false) + } + } +} + +// ============================================================================= +// Concrete SpScanner Implementation +// ============================================================================= + +/// Public scanner implementation for silent payments +/// +/// This type conditionally implements either synchronous or asynchronous scanning +/// based on the `async` feature flag. Consumers should use this type instead of +/// implementing the scanner traits directly. +/// +/// # Type Parameters +/// * `B` - The chain backend type (sync or async depending on feature flags) +/// * `U` - The updater type (sync or async depending on feature flags) +#[cfg(not(feature = "async"))] +pub struct SpScanner<'a, B: ChainBackend, U: Updater> { + client: SpClient, + backend: B, + updater: U, + owned_outpoints: HashSet, + keep_scanning: &'a AtomicBool, +} + +/// Public scanner implementation for silent payments (async variant) +/// +/// This type conditionally implements either synchronous or asynchronous scanning +/// based on the `async` feature flag. Consumers should use this type instead of +/// implementing the scanner traits directly. +/// +/// # Type Parameters +/// * `B` - The async chain backend type +/// * `U` - The async updater type +#[cfg(feature = "async")] +pub struct SpScanner<'a, B: crate::backend::AsyncChainBackend, U: crate::updater::AsyncUpdater> { + client: SpClient, + backend: B, + updater: U, + owned_outpoints: HashSet, + keep_scanning: &'a AtomicBool, +} + +// Synchronous implementation +#[cfg(not(feature = "async"))] +impl<'a, B: ChainBackend, U: Updater> SpScanner<'a, B, U> { + /// Create a new SpScanner instance + /// + /// # Arguments + /// * `client` - The silent payment client + /// * `backend` - The chain backend for blockchain data access + /// * `updater` - The updater for tracking scan progress + /// * `owned_outpoints` - Set of outpoints to track for spent detection + /// * `keep_scanning` - Atomic bool reference for interrupting the scan + pub fn new( + client: SpClient, + backend: B, + updater: U, + owned_outpoints: HashSet, + keep_scanning: &'a AtomicBool, + ) -> Self { + Self { + client, + backend, + updater, + owned_outpoints, + keep_scanning, + } + } + + /// Scan a range of blocks for silent payment outputs and inputs + /// + /// # Arguments + /// * `start` - Starting block height (inclusive) + /// * `end` - Ending block height (inclusive) + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + pub fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()> { + SyncSpScannerTrait::scan_blocks(self, start, end, dust_limit, with_cutthrough) + } + + fn interrupt_requested(&self) -> bool { + !self + .keep_scanning + .load(std::sync::atomic::Ordering::Relaxed) + } +} + +#[cfg(not(feature = "async"))] +impl<'a, B: ChainBackend, U: Updater> SyncSpScannerTrait for SpScanner<'a, B, U> { + fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()> { + let range = start.to_consensus_u32()..=end.to_consensus_u32(); + let block_data_iter = self.get_block_data_iterator(range, dust_limit, with_cutthrough); + self.process_blocks_auto(start, end, block_data_iter, with_cutthrough) + } + + fn process_block( + &mut self, + blockdata: BlockData, + ) -> Result<(HashMap, HashSet)> { + let blkheight = blockdata.blkheight; + let tweaks = blockdata.tweaks; + let new_utxo_filter = blockdata.new_utxo_filter; + let spent_filter = blockdata.spent_filter; + + let found_outputs = self.process_block_outputs(blkheight, tweaks, new_utxo_filter)?; + + // Update owned outpoints with newly found outputs + for outpoint in found_outputs.keys() { + self.owned_outpoints.insert(*outpoint); + } + + let found_inputs = self.process_block_inputs(blkheight, spent_filter)?; + + Ok((found_outputs, found_inputs)) + } + + fn process_block_outputs( + &self, + blkheight: Height, + tweaks: Vec, + new_utxo_filter: FilterData, + ) -> Result> { + if tweaks.is_empty() { + return Ok(HashMap::new()); + } + + let secrets_map = self.client.get_script_to_secret_map(tweaks)?; + if secrets_map.is_empty() { + return Ok(HashMap::new()); + } + + let candidate_spks: Vec<_> = secrets_map.keys().collect(); + let filter = BlockFilter::new(&new_utxo_filter.data); + let matched = + Self::check_block_outputs(filter, new_utxo_filter.block_hash, candidate_spks)?; + + if !matched { + return Ok(HashMap::new()); + } + + let found_utxos = self.scan_utxos(blkheight, secrets_map)?; + + let mut outputs = HashMap::new(); + for (label, utxo, tweak) in found_utxos { + let outpoint = OutPoint::new(utxo.txid, utxo.vout); + let owned_output = OwnedOutput { + blockheight: blkheight, + tweak: tweak.to_be_bytes(), + amount: utxo.value, + script: utxo.scriptpubkey, + label, + spend_status: OutputSpendStatus::Unspent, + }; + outputs.insert(outpoint, owned_output); + } + + Ok(outputs) + } + + fn process_block_inputs( + &self, + blkheight: Height, + spent_filter: FilterData, + ) -> Result> { + if self.owned_outpoints.is_empty() { + return Ok(HashSet::new()); + } + + let input_hashes = self.get_input_hashes(spent_filter.block_hash)?; + if input_hashes.is_empty() { + return Ok(HashSet::new()); + } + + let filter = BlockFilter::new(&spent_filter.data); + let matched = self.check_block_inputs( + filter, + spent_filter.block_hash, + input_hashes.keys().copied().collect(), + )?; + + if !matched { + return Ok(HashSet::new()); + } + + // Get the actual spent inputs from spent index + let spent_index = self.backend.spent_index(blkheight)?; + let spent_inputs: HashSet = spent_index + .data + .into_iter() + .filter_map(|bytes| { + if bytes.len() >= 36 { + let mut txid_bytes = [0u8; 32]; + txid_bytes.copy_from_slice(&bytes[0..32]); + let hash = bitcoin::hashes::sha256d::Hash::from_byte_array(txid_bytes); + let txid = Txid::from_raw_hash(hash); + let vout = u32::from_le_bytes([bytes[32], bytes[33], bytes[34], bytes[35]]); + let outpoint = OutPoint::new(txid, vout); + if self.owned_outpoints.contains(&outpoint) { + Some(outpoint) + } else { + None + } + } else { + None + } + }) + .collect(); + + Ok(spent_inputs) + } + + fn get_block_data_iterator( + &self, + range: std::ops::RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> crate::BlockDataIterator { + self.backend + .get_block_data_for_range(range, dust_limit, with_cutthrough) + } + + fn should_interrupt(&self) -> bool { + self.interrupt_requested() + } + + fn save_state(&mut self) -> Result<()> { + self.updater.save_to_persistent_storage() + } + + fn record_outputs( + &mut self, + height: Height, + block_hash: BlockHash, + outputs: HashMap, + ) -> Result<()> { + self.updater + .record_block_outputs(height, block_hash, outputs) + } + + fn record_inputs( + &mut self, + height: Height, + block_hash: BlockHash, + inputs: HashSet, + ) -> Result<()> { + self.updater.record_block_inputs(height, block_hash, inputs) + } + + fn record_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()> { + self.updater.record_scan_progress(start, current, end) + } + + fn client(&self) -> &SpClient { + &self.client + } + + fn backend(&self) -> &dyn ChainBackend { + &self.backend + } + + fn updater(&mut self) -> &mut dyn Updater { + &mut self.updater + } + + fn get_input_hashes(&self, _blkhash: BlockHash) -> Result> { + let mut input_hashes = HashMap::new(); + for outpoint in &self.owned_outpoints { + let mut hash = [0u8; 8]; + hash.copy_from_slice(&outpoint.txid[..8]); + input_hashes.insert(hash, *outpoint); + } + Ok(input_hashes) + } +} + +// Async implementation +#[cfg(feature = "async")] +impl<'a, B: crate::backend::AsyncChainBackend, U: crate::updater::AsyncUpdater> + SpScanner<'a, B, U> +{ + /// Create a new SpScanner instance + /// + /// # Arguments + /// * `client` - The silent payment client + /// * `backend` - The async chain backend for blockchain data access + /// * `updater` - The async updater for tracking scan progress + /// * `owned_outpoints` - Set of outpoints to track for spent detection + /// * `keep_scanning` - Atomic bool reference for interrupting the scan + pub fn new( + client: SpClient, + backend: B, + updater: U, + owned_outpoints: HashSet, + keep_scanning: &'a AtomicBool, + ) -> Self { + Self { + client, + backend, + updater, + owned_outpoints, + keep_scanning, + } + } + + /// Scan a range of blocks for silent payment outputs and inputs + /// + /// # Arguments + /// * `start` - Starting block height (inclusive) + /// * `end` - Ending block height (inclusive) + /// * `dust_limit` - Minimum amount to consider (dust outputs are ignored) + /// * `with_cutthrough` - Whether to use cutthrough optimization + pub async fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()> { + AsyncSpScannerTrait::scan_blocks(self, start, end, dust_limit, with_cutthrough).await + } + + fn interrupt_requested(&self) -> bool { + !self + .keep_scanning + .load(std::sync::atomic::Ordering::Relaxed) + } +} + +#[cfg(feature = "async")] +#[async_trait::async_trait] +impl<'a, B: crate::backend::AsyncChainBackend, U: crate::updater::AsyncUpdater> AsyncSpScannerTrait + for SpScanner<'a, B, U> +{ + async fn scan_blocks( + &mut self, + start: Height, + end: Height, + dust_limit: Amount, + with_cutthrough: bool, + ) -> Result<()> { + let range = start.to_consensus_u32()..=end.to_consensus_u32(); + let block_data_stream = self.get_block_data_stream(range, dust_limit, with_cutthrough); + self.process_blocks(start, end, block_data_stream).await + } + + async fn process_block( + &mut self, + blockdata: BlockData, + ) -> Result<(HashMap, HashSet)> { + let blkheight = blockdata.blkheight; + let tweaks = blockdata.tweaks; + let new_utxo_filter = blockdata.new_utxo_filter; + let spent_filter = blockdata.spent_filter; + + let found_outputs = self + .process_block_outputs(blkheight, tweaks, new_utxo_filter) + .await?; + + // Update owned outpoints with newly found outputs + for outpoint in found_outputs.keys() { + self.owned_outpoints.insert(*outpoint); + } + + let found_inputs = self.process_block_inputs(blkheight, spent_filter).await?; + + Ok((found_outputs, found_inputs)) + } + + async fn process_block_outputs( + &self, + blkheight: Height, + tweaks: Vec, + new_utxo_filter: FilterData, + ) -> Result> { + if tweaks.is_empty() { + return Ok(HashMap::new()); + } + + let secrets_map = self.client.get_script_to_secret_map(tweaks)?; + if secrets_map.is_empty() { + return Ok(HashMap::new()); + } + + let candidate_spks: Vec<_> = secrets_map.keys().collect(); + let filter = BlockFilter::new(&new_utxo_filter.data); + let matched = + Self::check_block_outputs(filter, new_utxo_filter.block_hash, candidate_spks)?; + + if !matched { + return Ok(HashMap::new()); + } + + let found_utxos = self.scan_utxos(blkheight, secrets_map).await?; + + let mut outputs = HashMap::new(); + for (label, utxo, tweak) in found_utxos { + let outpoint = OutPoint::new(utxo.txid, utxo.vout); + let owned_output = OwnedOutput { + blockheight: blkheight, + tweak: tweak.to_be_bytes(), + amount: utxo.value, + script: utxo.scriptpubkey, + label, + spend_status: OutputSpendStatus::Unspent, + }; + outputs.insert(outpoint, owned_output); + } + + Ok(outputs) + } + + async fn process_block_inputs( + &self, + blkheight: Height, + spent_filter: FilterData, + ) -> Result> { + if self.owned_outpoints.is_empty() { + return Ok(HashSet::new()); + } + + let input_hashes = self.get_input_hashes(spent_filter.block_hash).await?; + if input_hashes.is_empty() { + return Ok(HashSet::new()); + } + + let filter = BlockFilter::new(&spent_filter.data); + let matched = self.check_block_inputs( + filter, + spent_filter.block_hash, + input_hashes.keys().copied().collect(), + )?; + + if !matched { + return Ok(HashSet::new()); + } + + // Get the actual spent inputs from spent index + let spent_index = self.backend.spent_index(blkheight).await?; + let spent_inputs: HashSet = spent_index + .data + .into_iter() + .filter_map(|bytes| { + if bytes.len() >= 36 { + let mut txid_bytes = [0u8; 32]; + txid_bytes.copy_from_slice(&bytes[0..32]); + let hash = bitcoin::hashes::sha256d::Hash::from_byte_array(txid_bytes); + let txid = Txid::from_raw_hash(hash); + let vout = u32::from_le_bytes([bytes[32], bytes[33], bytes[34], bytes[35]]); + let outpoint = OutPoint::new(txid, vout); + if self.owned_outpoints.contains(&outpoint) { + Some(outpoint) + } else { + None + } + } else { + None + } + }) + .collect(); + + Ok(spent_inputs) + } + + fn get_block_data_stream( + &self, + range: std::ops::RangeInclusive, + dust_limit: Amount, + with_cutthrough: bool, + ) -> crate::backend::BlockDataStream { + self.backend + .get_block_data_stream(range, dust_limit, with_cutthrough) + } + + fn should_interrupt(&self) -> bool { + self.interrupt_requested() + } + + async fn save_state(&mut self) -> Result<()> { + self.updater.save_to_persistent_storage().await + } + + async fn record_outputs( + &mut self, + height: Height, + block_hash: BlockHash, + outputs: HashMap, + ) -> Result<()> { + self.updater + .record_block_outputs(height, block_hash, outputs) + .await + } + + async fn record_inputs( + &mut self, + height: Height, + block_hash: BlockHash, + inputs: HashSet, + ) -> Result<()> { + self.updater + .record_block_inputs(height, block_hash, inputs) + .await + } + + async fn record_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()> { + self.updater.record_scan_progress(start, current, end).await + } + + fn client(&self) -> &SpClient { + &self.client + } + + fn backend(&self) -> &dyn crate::backend::AsyncChainBackend { + &self.backend + } + + fn updater(&mut self) -> &mut dyn crate::updater::AsyncUpdater { + &mut self.updater + } + + async fn get_input_hashes(&self, _blkhash: BlockHash) -> Result> { + let mut input_hashes = HashMap::new(); + for outpoint in &self.owned_outpoints { + let mut hash = [0u8; 8]; + hash.copy_from_slice(&outpoint.txid[..8]); + input_hashes.insert(hash, *outpoint); + } + Ok(input_hashes) + } +} diff --git a/src/backend/structs.rs b/spdk-core/src/types.rs similarity index 97% rename from src/backend/structs.rs rename to spdk-core/src/types.rs index b5ee039..3cd9ad4 100644 --- a/src/backend/structs.rs +++ b/spdk-core/src/types.rs @@ -8,6 +8,7 @@ pub struct BlockData { pub spent_filter: FilterData, } +#[derive(Clone)] pub struct UtxoData { pub txid: Txid, pub vout: u32, diff --git a/spdk-core/src/updater/mod.rs b/spdk-core/src/updater/mod.rs new file mode 100644 index 0000000..c82b597 --- /dev/null +++ b/spdk-core/src/updater/mod.rs @@ -0,0 +1,5 @@ +mod updater; + +#[cfg(feature = "async")] +pub use updater::AsyncUpdater; +pub use updater::Updater; diff --git a/spdk-core/src/updater/updater.rs b/spdk-core/src/updater/updater.rs new file mode 100644 index 0000000..bdfd2c4 --- /dev/null +++ b/spdk-core/src/updater/updater.rs @@ -0,0 +1,87 @@ +use std::collections::{HashMap, HashSet}; + +use bitcoin::{absolute::Height, BlockHash, OutPoint}; + +use anyhow::Result; + +use crate::OwnedOutput; + +/// Trait for persisting scan results and progress +/// +/// This trait provides synchronous methods for recording scanning progress, +/// found outputs, and spent inputs. Implementations should handle persistence +/// to storage (database, file system, etc.). +/// +/// For async operations, see `AsyncUpdater`. +pub trait Updater { + /// Ask the updater to record the scanning progress. + fn record_scan_progress(&mut self, start: Height, current: Height, end: Height) -> Result<()>; + + /// Ask the updater to record the outputs found in a block. + fn record_block_outputs( + &mut self, + height: Height, + blkhash: BlockHash, + found_outputs: HashMap, + ) -> Result<()>; + + /// Ask the updater to record the inputs found in a block. + fn record_block_inputs( + &mut self, + blkheight: Height, + blkhash: BlockHash, + found_inputs: HashSet, + ) -> Result<()>; + + /// Ask the updater to save all recorded changes to persistent storage. + fn save_to_persistent_storage(&mut self) -> Result<()>; +} + +/// Async version of Updater for non-blocking I/O operations +/// Available by default, excluded when "sync" feature is enabled +#[cfg(feature = "async")] +#[cfg_attr(not(target_arch = "wasm32"), async_trait::async_trait)] +#[cfg_attr(target_arch = "wasm32", async_trait::async_trait(?Send))] +pub trait AsyncUpdater: Send + Sync { + /// Ask the updater to record the scanning progress. + /// + /// # Arguments + /// * `start` - Starting block height + /// * `current` - Current block height being processed + /// * `end` - Ending block height + async fn record_scan_progress( + &mut self, + start: Height, + current: Height, + end: Height, + ) -> Result<()>; + + /// Ask the updater to record the outputs found in a block. + /// + /// # Arguments + /// * `height` - Block height + /// * `blkhash` - Block hash + /// * `found_outputs` - Map of found outputs + async fn record_block_outputs( + &mut self, + height: Height, + blkhash: BlockHash, + found_outputs: HashMap, + ) -> Result<()>; + + /// Ask the updater to record the inputs found in a block. + /// + /// # Arguments + /// * `blkheight` - Block height + /// * `blkhash` - Block hash + /// * `found_inputs` - Set of spent inputs + async fn record_block_inputs( + &mut self, + blkheight: Height, + blkhash: BlockHash, + found_inputs: HashSet, + ) -> Result<()>; + + /// Ask the updater to save all recorded changes to persistent storage. + async fn save_to_persistent_storage(&mut self) -> Result<()>; +} diff --git a/src/backend/backend.rs b/src/backend/backend.rs deleted file mode 100644 index d1892f0..0000000 --- a/src/backend/backend.rs +++ /dev/null @@ -1,24 +0,0 @@ -use std::{ops::RangeInclusive, pin::Pin}; - -use anyhow::Result; -use async_trait::async_trait; -use bitcoin::{absolute::Height, Amount}; -use futures::Stream; - -use super::structs::{BlockData, SpentIndexData, UtxoData}; - -#[async_trait] -pub trait ChainBackend { - fn get_block_data_for_range( - &self, - range: RangeInclusive, - dust_limit: Amount, - with_cutthrough: bool, - ) -> Pin> + Send>>; - - async fn spent_index(&self, block_height: Height) -> Result; - - async fn utxos(&self, block_height: Height) -> Result>; - - async fn block_height(&self) -> Result; -} diff --git a/src/backend/blindbit/backend/backend.rs b/src/backend/blindbit/backend/backend.rs deleted file mode 100644 index ba47e70..0000000 --- a/src/backend/blindbit/backend/backend.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::{ops::RangeInclusive, pin::Pin, sync::Arc}; - -use async_trait::async_trait; -use bitcoin::{absolute::Height, Amount}; -use futures::{stream, Stream, StreamExt}; - -use anyhow::Result; - -use crate::{backend::blindbit::BlindbitClient, BlockData, ChainBackend, SpentIndexData, UtxoData}; - -const CONCURRENT_FILTER_REQUESTS: usize = 200; - -#[derive(Debug)] -pub struct BlindbitBackend { - client: BlindbitClient, -} - -impl BlindbitBackend { - pub fn new(blindbit_url: String) -> Result { - Ok(Self { - client: BlindbitClient::new(blindbit_url)?, - }) - } -} - -#[async_trait] -impl ChainBackend for BlindbitBackend { - /// High-level function to get block data for a range of blocks. - /// Block data includes all the information needed to determine if a block is relevant for scanning, - /// but does not include utxos, or spent index. - /// These need to be fetched separately afterwards, if it is determined this block is relevant. - fn get_block_data_for_range( - &self, - range: RangeInclusive, - dust_limit: Amount, - with_cutthrough: bool, - ) -> Pin> + Send>> { - let client = Arc::new(self.client.clone()); - - let res = stream::iter(range) - .map(move |n| { - let client = client.clone(); - - async move { - let blkheight = Height::from_consensus(n)?; - let tweaks = match with_cutthrough { - true => client.tweaks(blkheight, dust_limit).await?, - false => client.tweak_index(blkheight, dust_limit).await?, - }; - let new_utxo_filter = client.filter_new_utxos(blkheight).await?; - let spent_filter = client.filter_spent(blkheight).await?; - let blkhash = new_utxo_filter.block_hash; - Ok(BlockData { - blkheight, - blkhash, - tweaks, - new_utxo_filter: new_utxo_filter.into(), - spent_filter: spent_filter.into(), - }) - } - }) - .buffered(CONCURRENT_FILTER_REQUESTS); - - Box::pin(res) - } - - async fn spent_index(&self, block_height: Height) -> Result { - self.client.spent_index(block_height).await.map(Into::into) - } - - async fn utxos(&self, block_height: Height) -> Result> { - Ok(self - .client - .utxos(block_height) - .await? - .into_iter() - .map(Into::into) - .collect()) - } - - async fn block_height(&self) -> Result { - self.client.block_height().await - } -} diff --git a/src/backend/blindbit/backend/mod.rs b/src/backend/blindbit/backend/mod.rs deleted file mode 100644 index e48b365..0000000 --- a/src/backend/blindbit/backend/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod backend; - -pub use backend::BlindbitBackend; diff --git a/src/backend/blindbit/client/mod.rs b/src/backend/blindbit/client/mod.rs deleted file mode 100644 index 82f00d4..0000000 --- a/src/backend/blindbit/client/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -mod client; -pub mod structs; - -pub use client::BlindbitClient; diff --git a/src/backend/blindbit/mod.rs b/src/backend/blindbit/mod.rs deleted file mode 100644 index 4dc245c..0000000 --- a/src/backend/blindbit/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod backend; -mod client; - -pub use backend::BlindbitBackend; -pub use client::BlindbitClient; diff --git a/src/backend/mod.rs b/src/backend/mod.rs deleted file mode 100644 index d81e60c..0000000 --- a/src/backend/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod backend; -#[cfg(feature = "blindbit-backend")] -mod blindbit; -mod structs; - -pub use backend::ChainBackend; -pub use structs::*; - -#[cfg(feature = "blindbit-backend")] -pub use blindbit::BlindbitBackend; - -#[cfg(feature = "blindbit-backend")] -pub use blindbit::BlindbitClient; diff --git a/src/lib.rs b/src/lib.rs deleted file mode 100644 index c055981..0000000 --- a/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod backend; -mod client; -pub mod constants; -mod scanner; -mod updater; - -pub use bdk_coin_select::FeeRate; -pub use bitcoin; -pub use silentpayments; - -pub use backend::*; -pub use client::*; -pub use scanner::SpScanner; -pub use updater::Updater; diff --git a/src/scanner/mod.rs b/src/scanner/mod.rs deleted file mode 100644 index 2d6bea2..0000000 --- a/src/scanner/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod scanner; - -pub use scanner::SpScanner; diff --git a/src/scanner/scanner.rs b/src/scanner/scanner.rs deleted file mode 100644 index 5ecc765..0000000 --- a/src/scanner/scanner.rs +++ /dev/null @@ -1,380 +0,0 @@ -use std::{ - collections::{HashMap, HashSet}, - sync::atomic::AtomicBool, - time::{Duration, Instant}, -}; - -use anyhow::{bail, Error, Result}; -use bitcoin::{ - absolute::Height, - bip158::BlockFilter, - hashes::{sha256, Hash}, - secp256k1::{PublicKey, Scalar}, - Amount, BlockHash, OutPoint, Txid, XOnlyPublicKey, -}; -use futures::{pin_mut, Stream, StreamExt}; -use log::info; -use silentpayments::receiving::Label; - -use crate::{ - backend::{BlockData, ChainBackend, FilterData, UtxoData}, - client::{OutputSpendStatus, OwnedOutput, SpClient}, - updater::Updater, -}; - -pub struct SpScanner<'a> { - updater: Box, - backend: Box, - client: SpClient, - keep_scanning: &'a AtomicBool, // used to interrupt scanning - owned_outpoints: HashSet, // used to scan block inputs -} - -impl<'a> SpScanner<'a> { - pub fn new( - client: SpClient, - updater: Box, - backend: Box, - owned_outpoints: HashSet, - keep_scanning: &'a AtomicBool, - ) -> Self { - Self { - client, - updater, - backend, - owned_outpoints, - keep_scanning, - } - } - - pub async fn scan_blocks( - &mut self, - start: Height, - end: Height, - dust_limit: Amount, - with_cutthrough: bool, - ) -> Result<()> { - if start > end { - bail!("bigger start than end: {} > {}", start, end); - } - - info!("start: {} end: {}", start, end); - let start_time: Instant = Instant::now(); - - // get block data stream - let range = start.to_consensus_u32()..=end.to_consensus_u32(); - let block_data_stream = - self.backend - .get_block_data_for_range(range, dust_limit, with_cutthrough); - - // process blocks using block data stream - self.process_blocks(start, end, block_data_stream).await?; - - // time elapsed for the scan - info!( - "Blindbit scan complete in {} seconds", - start_time.elapsed().as_secs() - ); - - Ok(()) - } - - async fn process_blocks( - &mut self, - start: Height, - end: Height, - block_data_stream: impl Stream>, - ) -> Result<()> { - pin_mut!(block_data_stream); - - let mut update_time: Instant = Instant::now(); - - while let Some(blockdata) = block_data_stream.next().await { - let blockdata = blockdata?; - let blkheight = blockdata.blkheight; - let blkhash = blockdata.blkhash; - - // stop scanning and return if interrupted - if self.interrupt_requested() { - self.updater.save_to_persistent_storage()?; - return Ok(()); - } - - let mut save_to_storage = false; - - // always save on last block or after 30 seconds since last save - if blkheight == end || update_time.elapsed() > Duration::from_secs(30) { - save_to_storage = true; - } - - let (found_outputs, found_inputs) = self.process_block(blockdata).await?; - - if !found_outputs.is_empty() { - save_to_storage = true; - self.updater - .record_block_outputs(blkheight, blkhash, found_outputs)?; - } - - if !found_inputs.is_empty() { - save_to_storage = true; - self.updater - .record_block_inputs(blkheight, blkhash, found_inputs)?; - } - - // tell the updater we scanned this block - self.updater.record_scan_progress(start, blkheight, end)?; - - if save_to_storage { - self.updater.save_to_persistent_storage()?; - update_time = Instant::now(); - } - } - - Ok(()) - } - - async fn process_block( - &mut self, - blockdata: BlockData, - ) -> Result<(HashMap, HashSet)> { - let BlockData { - blkheight, - tweaks, - new_utxo_filter, - spent_filter, - .. - } = blockdata; - - let outs = self - .process_block_outputs(blkheight, tweaks, new_utxo_filter) - .await?; - - // after processing outputs, we add the found outputs to our list - self.owned_outpoints.extend(outs.keys()); - - let ins = self.process_block_inputs(blkheight, spent_filter).await?; - - // after processing inputs, we remove the found inputs - self.owned_outpoints.retain(|item| !ins.contains(item)); - - Ok((outs, ins)) - } - - async fn process_block_outputs( - &self, - blkheight: Height, - tweaks: Vec, - new_utxo_filter: FilterData, - ) -> Result> { - let mut res = HashMap::new(); - - if !tweaks.is_empty() { - let secrets_map = self.client.get_script_to_secret_map(tweaks)?; - - //last_scan = last_scan.max(n as u32); - let candidate_spks: Vec<&[u8; 34]> = secrets_map.keys().collect(); - - //get block gcs & check match - let blkfilter = BlockFilter::new(&new_utxo_filter.data); - let blkhash = new_utxo_filter.block_hash; - - let matched_outputs = Self::check_block_outputs(blkfilter, blkhash, candidate_spks)?; - - //if match: fetch and scan utxos - if matched_outputs { - info!("matched outputs on: {}", blkheight); - let found = self.scan_utxos(blkheight, secrets_map).await?; - - if !found.is_empty() { - for (label, utxo, tweak) in found { - let outpoint = OutPoint { - txid: utxo.txid, - vout: utxo.vout, - }; - - let out = OwnedOutput { - blockheight: blkheight, - tweak: tweak.to_be_bytes(), - amount: utxo.value, - script: utxo.scriptpubkey, - label, - spend_status: OutputSpendStatus::Unspent, - }; - - res.insert(outpoint, out); - } - } - } - } - Ok(res) - } - - async fn process_block_inputs( - &self, - blkheight: Height, - spent_filter: FilterData, - ) -> Result> { - let mut res = HashSet::new(); - - let blkhash = spent_filter.block_hash; - - // first get the 8-byte hashes used to construct the input filter - let input_hashes_map = self.get_input_hashes(blkhash)?; - - // check against filter - let blkfilter = BlockFilter::new(&spent_filter.data); - let matched_inputs = self.check_block_inputs( - blkfilter, - blkhash, - input_hashes_map.keys().cloned().collect(), - )?; - - // if match: download spent data, collect the outpoints that are spent - if matched_inputs { - info!("matched inputs on: {}", blkheight); - let spent = self.backend.spent_index(blkheight).await?.data; - - for spent in spent { - let hex: &[u8] = spent.as_ref(); - - if let Some(outpoint) = input_hashes_map.get(hex) { - res.insert(*outpoint); - } - } - } - Ok(res) - } - - async fn scan_utxos( - &self, - blkheight: Height, - secrets_map: HashMap<[u8; 34], PublicKey>, - ) -> Result, UtxoData, Scalar)>> { - let utxos = self.backend.utxos(blkheight).await?; - - let mut res: Vec<(Option