From 332f7269b85e80f599f1972ce7b54afc21ca0032 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 9 Jun 2025 03:47:23 +0530 Subject: [PATCH 1/7] setup utoipa foundations --- Cargo.lock | 55 +++ core/Cargo.toml | 1 + core/src/defs.rs | 8 +- core/src/error.rs | 9 +- core/src/execution_options/aa.rs | 45 +- core/src/execution_options/auto.rs | 19 + core/src/execution_options/mod.rs | 86 ++-- core/src/transaction.rs | 7 +- executors/src/external_bundler/send.rs | 2 +- server/Cargo.toml | 3 + server/Dockerfile | 6 + server/bacon.toml | 115 +++++ server/res/scalar.html | 52 +++ server/src/execution_router/mod.rs | 8 +- server/src/http/dyn_contract.rs | 4 +- server/src/http/error.rs | 2 +- server/src/http/routes/contract_encode.rs | 4 +- server/src/http/routes/contract_write.rs | 81 ++-- server/src/http/routes/mod.rs | 5 - .../src/http/routes/working_read_contract.rs | 406 ------------------ server/src/http/server.rs | 123 +++--- thirdweb-core/Cargo.toml | 1 + thirdweb-core/src/error.rs | 6 +- 23 files changed, 436 insertions(+), 612 deletions(-) create mode 100644 core/src/execution_options/auto.rs create mode 100644 server/bacon.toml create mode 100644 server/res/scalar.html delete mode 100644 server/src/http/routes/working_read_contract.rs diff --git a/Cargo.lock b/Cargo.lock index 406ac70..e161ad7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2386,6 +2386,7 @@ dependencies = [ "tower", "tracing", "twmq", + "utoipa", "uuid", "vault-sdk", "vault-types", @@ -5343,6 +5344,7 @@ dependencies = [ "thiserror 2.0.12", "tracing", "url", + "utoipa", ] [[package]] @@ -5370,6 +5372,9 @@ dependencies = [ "tracing", "tracing-subscriber", "twmq", + "utoipa", + "utoipa-axum", + "utoipa-scalar", "vault-sdk", "vault-types", ] @@ -5943,6 +5948,56 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "utoipa" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fcc29c80c21c31608227e0912b2d7fddba57ad76b606890627ba8ee7964e993" +dependencies = [ + "indexmap 2.9.0", + "serde", + "serde_json", + "utoipa-gen", +] + +[[package]] +name = "utoipa-axum" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c25bae5bccc842449ec0c5ddc5cbb6a3a1eaeac4503895dc105a1138f8234a0" +dependencies = [ + "axum", + "paste", + "tower-layer", + "tower-service", + "utoipa", +] + +[[package]] +name = "utoipa-gen" +version = "5.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d79d08d92ab8af4c5e8a6da20c47ae3f61a0f1dabc1997cdf2d082b757ca08b" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.101", + "uuid", +] + +[[package]] +name = "utoipa-scalar" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59559e1509172f6b26c1cdbc7247c4ddd1ac6560fe94b584f81ee489b141f719" +dependencies = [ + "axum", + "serde", + "serde_json", + "utoipa", +] + [[package]] name = "uuid" version = "1.17.0" diff --git a/core/Cargo.toml b/core/Cargo.toml index 94a4efa..506d31f 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -17,3 +17,4 @@ async-nats = "0.40.0" twmq = { version = "0.1.0", path = "../twmq" } thirdweb-core = { version = "0.1.0", path = "../thirdweb-core" } uuid = { version = "1.17.0", features = ["v4"] } +utoipa = "5.4.0" diff --git a/core/src/defs.rs b/core/src/defs.rs index d74d111..5209204 100644 --- a/core/src/defs.rs +++ b/core/src/defs.rs @@ -2,19 +2,19 @@ use alloy::primitives::{Address, Bytes}; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -#[derive(JsonSchema, Serialize, Deserialize, Clone)] +#[derive(JsonSchema, Serialize, Deserialize, Clone, utoipa::ToSchema)] #[serde(remote = "Address", transparent)] -/// # Address +/// ### Address /// Used to represent an EVM address. This is a string of length 42 with a `0x` prefix. Non-checksummed addresses are also supported, but will be converted to checksummed. pub struct AddressDef(pub String); -#[derive(JsonSchema, Serialize, Deserialize, Clone)] +#[derive(JsonSchema, Serialize, Deserialize, Clone, utoipa::ToSchema)] #[serde(remote = "Bytes", transparent)] /// # Bytes /// Used to represent "bytes". This is a 0x prefixed hex string. pub struct BytesDef(pub String); -#[derive(JsonSchema, Serialize, Deserialize, Clone)] +#[derive(JsonSchema, Serialize, Deserialize, Clone, utoipa::ToSchema)] /// # U256 /// Used to represent a 256-bit unsigned integer. Engine can parse these from any valid encoding of the Ethereum "quantity" format. pub struct U256Def(pub String); diff --git a/core/src/error.rs b/core/src/error.rs index 13f0d2a..1a20f86 100644 --- a/core/src/error.rs +++ b/core/src/error.rs @@ -13,7 +13,7 @@ use twmq::error::TwmqError; use crate::chain::Chain; -#[derive(Debug, Error, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Error, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub enum RpcErrorKind { /// Server returned an error response. #[error("server returned an error response: {0}")] @@ -58,7 +58,7 @@ pub enum RpcErrorKind { OtherTransportError(String), } -#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema)] +#[derive(Debug, Serialize, Deserialize, Clone, JsonSchema, utoipa::ToSchema)] pub struct RpcErrorResponse { /// The error code. pub code: i64, @@ -99,7 +99,7 @@ pub struct RpcErrorInfo { } /// A serializable contract interaction error type -#[derive(Debug, Error, Serialize, Deserialize, Clone, JsonSchema)] +#[derive(Debug, Error, Serialize, Deserialize, Clone, JsonSchema, utoipa::ToSchema)] pub enum ContractInteractionErrorKind { /// Unknown function referenced. #[error("unknown function: function {0} does not exist")] @@ -159,7 +159,7 @@ pub enum ContractInteractionErrorKind { FunctionResolutionFailed(String), } -#[derive(Error, Debug, Serialize, Clone, Deserialize, JsonSchema)] +#[derive(Error, Debug, Serialize, Clone, Deserialize, JsonSchema, utoipa::ToSchema)] pub enum EngineError { #[error("RPC error on chain {chain_id} at {rpc_url}: {message}")] RpcError { @@ -198,6 +198,7 @@ pub enum EngineError { ContractInteractionError { /// Contract address #[schemars(with = "Option")] + #[schema(value_type = Option)] contract_address: Option
, /// Chain ID chain_id: u64, diff --git a/core/src/execution_options/aa.rs b/core/src/execution_options/aa.rs index 9c958f1..57746a8 100644 --- a/core/src/execution_options/aa.rs +++ b/core/src/execution_options/aa.rs @@ -8,7 +8,7 @@ use serde::{Deserialize, Deserializer, Serialize}; use crate::constants::{DEFAULT_FACTORY_ADDRESS_V0_7, ENTRYPOINT_ADDRESS_V0_7}; -#[derive(Deserialize, Serialize, Debug, JsonSchema, Clone, Copy)] +#[derive(Deserialize, Serialize, Debug, JsonSchema, Clone, Copy, utoipa::ToSchema)] pub enum EntrypointVersion { #[serde(rename = "0.6")] V0_6, @@ -28,48 +28,50 @@ pub struct EntrypointAndFactoryDetails { pub factory_address: Address, } -/// # ERC-4337 Execution Options +/// ### ERC-4337 Execution Options /// This struct allows flexible configuration of ERC-4337 execution options, /// with intelligent defaults and inferences based on provided values. /// -/// ## Field Inference +/// ### Field Inference /// When fields are omitted, the system uses the following inference rules: /// -/// 1. **Version Inference**: -/// - If `entrypointVersion` is provided, it's used directly -/// - Otherwise, tries to infer from `entrypointAddress` (if provided) -/// - If that fails, tries to infer from `factoryAddress` (if provided) -/// - Defaults to version 0.7 if no inference is possible +/// 1. Version Inference: +/// - If `entrypointVersion` is provided, it's used directly +/// - Otherwise, tries to infer from `entrypointAddress` (if provided) +/// - If that fails, tries to infer from `factoryAddress` (if provided) +/// - Defaults to version 0.7 if no inference is possible /// -/// 2. **Entrypoint Address Inference**: +/// 2. Entrypoint Address Inference: /// - If provided explicitly, it's used as-is /// - Otherwise, uses the default address corresponding to the inferred version: /// - V0.6: 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 /// - V0.7: 0x0576a174D229E3cFA37253523E645A78A0C91B57 /// -/// 3. **Factory Address Inference**: +/// 3. Factory Address Inference: /// - If provided explicitly, it's used as-is /// - Otherwise, uses the default factory corresponding to the inferred version: -/// - V0.6: [DEFAULT_FACTORY_ADDRESS_V0_6] -/// - V0.7: [DEFAULT_FACTORY_ADDRESS_V0_7] +/// - V0.6: 0x85e23b94e7F5E9cC1fF78BCe78cfb15B81f0DF00 [DEFAULT_FACTORY_ADDRESS_V0_6] +/// - V0.7: 0x4bE0ddfebcA9A5A4a617dee4DeCe99E7c862dceb [DEFAULT_FACTORY_ADDRESS_V0_7] /// -/// 4. **Account Salt**: +/// 4. Account Salt: /// - If provided explicitly, it's used as-is /// - Otherwise, defaults to "0x" (commonly used as the defauult "null" salt for smart accounts) /// -/// 5. **Smart Account Address**: +/// 5. Smart Account Address: /// - If provided explicitly, it's used as-is /// - Otherwise, it's read from the smart account factory /// /// All optional fields can be omitted for a minimal configuration using version 0.7 defaults. -#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema)] +#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct Erc4337ExecutionOptions { #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] pub signer_address: Address, #[serde(flatten)] #[schemars(with = "EntrypointAndFactoryDetailsDeserHelper")] + #[schema(value_type = EntrypointAndFactoryDetailsDeserHelper)] pub entrypoint_details: EntrypointAndFactoryDetails, #[serde(default = "default_account_salt")] @@ -77,6 +79,7 @@ pub struct Erc4337ExecutionOptions { #[serde(skip_serializing_if = "Option::is_none")] #[schemars(with = "Option::")] + #[schema(value_type = Option)] pub smart_account_address: Option
, } @@ -91,9 +94,9 @@ pub fn default_entrypoint_address() -> Address { pub fn default_account_salt() -> String { "0x".to_string() } -#[derive(Deserialize, JsonSchema)] +#[derive(Deserialize, JsonSchema, utoipa::ToSchema)] struct EntrypointAndFactoryDetailsDeserHelper { - /// # Entrypoint Contract Address + /// ### Entrypoint Contract Address /// The address of the ERC-4337 entrypoint contract. /// /// If omitted, defaults to the standard address for the specified/inferred version. @@ -101,13 +104,13 @@ struct EntrypointAndFactoryDetailsDeserHelper { /// Known addresses: /// /// - V0.6: 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 - /// /// - V0.7: 0x0000000071727De22E5E9d8BAf0edAc6f37da032 #[serde(rename = "entrypointAddress")] #[schemars(with = "Option")] + #[schema(value_type = Option)] entrypoint_address: Option
, - /// # Entrypoint Version + /// ### Entrypoint Version /// The version of the ERC-4337 standard to use. /// /// If omitted, the version will be inferred from the entrypoint address, @@ -115,17 +118,17 @@ struct EntrypointAndFactoryDetailsDeserHelper { #[serde(rename = "entrypointVersion")] version: Option, - /// # Account Factory Address + /// ### Account Factory Address /// The address of the smart account factory contract. /// If omitted, defaults to the thirweb default account factory for the specified/inferred version. /// /// Known addresses: /// /// - V0.6: 0x85e23b94e7F5E9cC1fF78BCe78cfb15B81f0DF00 - /// /// - V0.7: 0x4bE0ddfebcA9A5A4a617dee4DeCe99E7c862dceb #[serde(rename = "factoryAddress")] #[schemars(with = "Option")] + #[schema(value_type = Option)] factory_address: Option
, } diff --git a/core/src/execution_options/auto.rs b/core/src/execution_options/auto.rs new file mode 100644 index 0000000..6404332 --- /dev/null +++ b/core/src/execution_options/auto.rs @@ -0,0 +1,19 @@ +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[schema(title = "Auto Determine Execution")] +#[serde(rename_all = "camelCase")] +/// This is the default execution option. +/// If you do not specify an execution type, and only specify a "from" string, +/// engine will automatically determine the most optimal options for you. +/// If you would like to specify granular options about execution strategy +/// choose one of the other executionOptions type and provide them. +pub struct AutoExecutionOptions { + /// The identifier of the entity to send the transaction from. + /// Automatically picks best execution strategy based on the identifier. + /// - If EOA address, execution uses EIP7702 based smart-wallet execution + /// - If 7702 not supported on chain, falls back to smart-contract wallet (ERC4337) with default smart account for this EOA (v0.7) + /// - UNLESS this is a zk-chain, in which case, zk-sync native-aa is used + pub from: String, +} diff --git a/core/src/execution_options/mod.rs b/core/src/execution_options/mod.rs index eb21543..58be87b 100644 --- a/core/src/execution_options/mod.rs +++ b/core/src/execution_options/mod.rs @@ -1,12 +1,15 @@ use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; +use serde::de::{Error, IntoDeserializer}; +use serde::{Deserialize, Deserializer, Serialize}; +use std::collections::HashMap; use crate::transaction::InnerTransaction; pub mod aa; +pub mod auto; -/// Base execution options for all transactions -/// All specific execution options share this -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +// Base execution options for all transactions +// All specific execution options share this +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct BaseExecutionOptions { pub chain_id: u64, @@ -19,57 +22,49 @@ fn default_idempotency_key() -> String { } /// All supported specific execution options are contained here -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(tag = "type")] +#[schema(title = "Execution Option Variants")] pub enum SpecificExecutionOptions { - /// # ERC-4337 Execution Options - /// This struct allows flexible configuration of ERC-4337 execution options, - /// with intelligent defaults and inferences based on provided values. - /// - /// ## Field Inference - /// When fields are omitted, the system uses the following inference rules: - /// - /// 1. **Version Inference**: - /// - If `entrypointVersion` is provided, it's used directly - /// - Otherwise, tries to infer from `entrypointAddress` (if provided) - /// - If that fails, tries to infer from `factoryAddress` (if provided) - /// - Defaults to version 0.7 if no inference is possible - /// - /// 2. **Entrypoint Address Inference**: - /// - If provided explicitly, it's used as-is - /// - Otherwise, uses the default address corresponding to the inferred version: - /// - V0.6: 0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789 - /// - V0.7: 0x0576a174D229E3cFA37253523E645A78A0C91B57 - /// - /// 3. **Factory Address Inference**: - /// - If provided explicitly, it's used as-is - /// - Otherwise, uses the default factory corresponding to the inferred version: - /// - V0.6: [DEFAULT_FACTORY_ADDRESS_V0_6] - /// - V0.7: [DEFAULT_FACTORY_ADDRESS_V0_7] - /// - /// 4. **Account Salt**: - /// - If provided explicitly, it's used as-is - /// - Otherwise, defaults to "0x" (commonly used as the defauult "null" salt for smart accounts) - /// - /// 5. **Smart Account Address**: - /// - If provided explicitly, it's used as-is - /// - Otherwise, it's read from the smart account factory - /// - /// All optional fields can be omitted for a minimal configuration using version 0.7 defaults. + #[serde(rename = "auto")] + #[schema(title = "Auto Determine Execution")] + Auto(auto::AutoExecutionOptions), + + #[schema(title = "ERC-4337 Execution Options")] ERC4337(aa::Erc4337ExecutionOptions), } +fn deserialize_with_default_auto<'de, D>( + deserializer: D, +) -> Result +where + D: Deserializer<'de>, +{ + let mut map: HashMap = HashMap::deserialize(deserializer)?; + + // If no "type" field exists, add it with "Auto" + if !map.contains_key("type") { + map.insert( + "type".to_string(), + serde_json::Value::String("Auto".to_string()), + ); + } + + // Convert HashMap back to deserializer and deserialize normally + SpecificExecutionOptions::deserialize(map.into_deserializer()).map_err(D::Error::custom) +} + /// This is the exposed API for execution options /// Base and specific execution options are both flattened together -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct ExecutionOptions { #[serde(flatten)] pub base: BaseExecutionOptions, - #[serde(flatten)] + #[serde(flatten, deserialize_with = "deserialize_with_default_auto")] pub specific: SpecificExecutionOptions, } -#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[derive(Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] pub struct WebhookOptions { pub url: String, pub secret: Option, @@ -77,7 +72,7 @@ pub struct WebhookOptions { /// Incoming transaction request, parsed into InnerTransaction /// Exposed API will have varying `params` but will all parse into InnerTransaction before execution -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct SendTransactionRequest { pub execution_options: ExecutionOptions, @@ -87,7 +82,7 @@ pub struct SendTransactionRequest { /// # QueuedTransaction /// Response for any request that queues one or more transactions -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct QueuedTransaction { /// The idempotency key this transaction was queued with @@ -111,7 +106,7 @@ pub struct QueuedTransaction { pub transaction_params: Vec, } -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct QueuedTransactionsResponse { pub transactions: Vec, } @@ -126,6 +121,7 @@ impl ExecutionOptions { pub fn executor_type(&self) -> ExecutorType { match &self.specific { SpecificExecutionOptions::ERC4337(_) => ExecutorType::Erc4337, + SpecificExecutionOptions::Auto(_) => ExecutorType::Erc4337, } } diff --git a/core/src/transaction.rs b/core/src/transaction.rs index 4184b9b..b8f5587 100644 --- a/core/src/transaction.rs +++ b/core/src/transaction.rs @@ -5,12 +5,17 @@ use serde::{Deserialize, Serialize}; /// # InnerTransaction /// This is the actual encoded inner transaction data that will be sent to the blockchain. -#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema)] +#[derive(Deserialize, Serialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] pub struct InnerTransaction { #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] pub to: Address, + #[schemars(with = "BytesDef")] + #[schema(value_type = BytesDef)] pub data: Bytes, + #[schemars(with = "U256Def")] + #[schema(value_type = U256Def)] pub value: U256, } diff --git a/executors/src/external_bundler/send.rs b/executors/src/external_bundler/send.rs index 96e83a3..142ec80 100644 --- a/executors/src/external_bundler/send.rs +++ b/executors/src/external_bundler/send.rs @@ -124,7 +124,7 @@ pub enum ExternalBundlerSendError { impl From for ExternalBundlerSendError { fn from(error: TwmqError) -> Self { ExternalBundlerSendError::InternalError { - message: format!("Deserialization error for job data: {}", error.to_string()), + message: format!("Deserialization error for job data: {}", error), } } } diff --git a/server/Cargo.toml b/server/Cargo.toml index 60b5910..328b1a3 100644 --- a/server/Cargo.toml +++ b/server/Cargo.toml @@ -32,3 +32,6 @@ aide = { version = "0.14.2", features = [ "scalar", ] } schemars = "0.8.22" +utoipa = { version = "5.4.0", features = ["macros", "chrono", "uuid", "axum_extras"] } +utoipa-axum = "0.2.0" +utoipa-scalar = { version = "0.3.0", features = ["axum"] } diff --git a/server/Dockerfile b/server/Dockerfile index 1429aef..f6e89ca 100644 --- a/server/Dockerfile +++ b/server/Dockerfile @@ -2,6 +2,9 @@ FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef WORKDIR /app +# Install ssh and git for GitHub Actions environment +RUN apt-get update && apt-get install -y ssh git && rm -rf /var/lib/apt/lists/* + FROM chef AS planner COPY . . RUN cargo chef prepare --recipe-path recipe.json @@ -9,6 +12,9 @@ RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder COPY --from=planner /app/recipe.json recipe.json +# Setup SSH for GitHub in clean CI environment +RUN mkdir -p ~/.ssh && ssh-keyscan github.com >> ~/.ssh/known_hosts + # Force cargo to use git CLI for SSH ENV CARGO_NET_GIT_FETCH_WITH_CLI=true diff --git a/server/bacon.toml b/server/bacon.toml new file mode 100644 index 0000000..824b971 --- /dev/null +++ b/server/bacon.toml @@ -0,0 +1,115 @@ +# This is a configuration file for the bacon tool +# +# Complete help on configuration: https://dystroy.org/bacon/config/ +# +# You may check the current default at +# https://github.com/Canop/bacon/blob/main/defaults/default-bacon.toml + +default_job = "check" +env.CARGO_TERM_COLOR = "always" + +[jobs.check] +command = ["cargo", "check"] +need_stdout = false + +[jobs.check-all] +command = ["cargo", "check", "--all-targets"] +need_stdout = false + +# Run clippy on the default target +[jobs.clippy] +command = ["cargo", "clippy"] +need_stdout = false + +# Run clippy on all targets +# To disable some lints, you may change the job this way: +# [jobs.clippy-all] +# command = [ +# "cargo", "clippy", +# "--all-targets", +# "--", +# "-A", "clippy::bool_to_int_with_if", +# "-A", "clippy::collapsible_if", +# "-A", "clippy::derive_partial_eq_without_eq", +# ] +# need_stdout = false +[jobs.clippy-all] +command = ["cargo", "clippy", "--all-targets"] +need_stdout = false + +# This job lets you run +# - all tests: bacon test +# - a specific test: bacon test -- config::test_default_files +# - the tests of a package: bacon test -- -- -p config +[jobs.test] +command = ["cargo", "test"] +need_stdout = true + +[jobs.nextest] +command = [ + "cargo", "nextest", "run", + "--hide-progress-bar", "--failure-output", "final" +] +need_stdout = true +analyzer = "nextest" + +[jobs.doc] +command = ["cargo", "doc", "--no-deps"] +need_stdout = false + +# If the doc compiles, then it opens in your browser and bacon switches +# to the previous job +[jobs.doc-open] +command = ["cargo", "doc", "--no-deps", "--open"] +need_stdout = false +on_success = "back" # so that we don't open the browser at each change + +# You can run your application and have the result displayed in bacon, +# if it makes sense for this crate. +[jobs.run] +command = [ + "cargo", "run", + # put launch parameters for your program behind a `--` separator +] +need_stdout = true +allow_warnings = true +background = true + +# Run your long-running application (eg server) and have the result displayed in bacon. +# For programs that never stop (eg a server), `background` is set to false +# to have the cargo run output immediately displayed instead of waiting for +# program's end. +# 'on_change_strategy' is set to `kill_then_restart` to have your program restart +# on every change (an alternative would be to use the 'F5' key manually in bacon). +# If you often use this job, it makes sense to override the 'r' key by adding +# a binding `r = job:run-long` at the end of this file . +# A custom kill command such as the one suggested below is frequently needed to kill +# long running programs (uncomment it if you need it) +[jobs.run-long] +command = [ + "cargo", "run", + # put launch parameters for your program behind a `--` separator +] +need_stdout = true +allow_warnings = true +background = false +on_change_strategy = "kill_then_restart" +# kill = ["pkill", "-TERM", "-P"]' + +# This parameterized job runs the example of your choice, as soon +# as the code compiles. +# Call it as +# bacon ex -- my-example +[jobs.ex] +command = ["cargo", "run", "--example"] +need_stdout = true +allow_warnings = true + +# You may define here keybindings that would be specific to +# a project, for example a shortcut to launch a specific job. +# Shortcuts to internal functions (scrolling, toggling, etc.) +# should go in your personal global prefs.toml file instead. +[keybindings] +# alt-m = "job:my-job" +c = "job:clippy-all" # comment this to have 'c' run clippy on only the default target +r = "job:run-long" diff --git a/server/res/scalar.html b/server/res/scalar.html new file mode 100644 index 0000000..c7d4763 --- /dev/null +++ b/server/res/scalar.html @@ -0,0 +1,52 @@ + + + + + Engine API Reference + + + + + + + + + + + + + + \ No newline at end of file diff --git a/server/src/execution_router/mod.rs b/server/src/execution_router/mod.rs index 69de708..c2a0308 100644 --- a/server/src/execution_router/mod.rs +++ b/server/src/execution_router/mod.rs @@ -59,6 +59,10 @@ impl ExecutionRouter { Ok(vec![queued_transaction]) } + + SpecificExecutionOptions::Auto(auto_execution_options) => { + todo!() + } } } @@ -67,14 +71,14 @@ impl ExecutionRouter { base_execution_options: &BaseExecutionOptions, erc4337_execution_options: &Erc4337ExecutionOptions, webhook_options: &Option>, - transactions: &Vec, + transactions: &[InnerTransaction], rpc_credentials: RpcCredentials, signing_credential: SigningCredential, ) -> Result<(), TwmqError> { let job_data = ExternalBundlerSendJobData { transaction_id: base_execution_options.idempotency_key.clone(), chain_id: base_execution_options.chain_id, - transactions: transactions.clone(), + transactions: transactions.to_vec(), execution_options: erc4337_execution_options.clone(), signing_credential, webhook_options: webhook_options.clone(), diff --git a/server/src/http/dyn_contract.rs b/server/src/http/dyn_contract.rs index 3459f83..5b278c7 100644 --- a/server/src/http/dyn_contract.rs +++ b/server/src/http/dyn_contract.rs @@ -18,11 +18,12 @@ use thirdweb_core::{ /// This is the base type used by all contract interaction endpoints. /// It supports both function names and full function signatures, with /// automatic ABI resolution when needed. -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct ContractCall { /// The address of the smart contract to call #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] pub contract_address: Address, /// The function to call - can be a name like "transfer" or full signature like "transfer(address,uint256)" pub method: String, @@ -30,6 +31,7 @@ pub struct ContractCall { pub params: Vec, /// Optional ABI to use instead of fetching from contract verification services #[schemars(with = "Option")] + #[schema(value_type = Option)] pub abi: Option, } diff --git a/server/src/http/error.rs b/server/src/http/error.rs index f371efa..bd06ed2 100644 --- a/server/src/http/error.rs +++ b/server/src/http/error.rs @@ -7,7 +7,7 @@ use serde_json::json; // Extension trait that lets you pair an error with a status code /// Extension trait for EngineError to add HTTP response conversion -#[derive(OperationIo, JsonSchema)] +#[derive(OperationIo, JsonSchema, utoipa::ToSchema)] #[schemars(transparent)] pub struct ApiEngineError(#[schemars(with = "ErrorResponse")] pub EngineError); diff --git a/server/src/http/routes/contract_encode.rs b/server/src/http/routes/contract_encode.rs index d358521..132de67 100644 --- a/server/src/http/routes/contract_encode.rs +++ b/server/src/http/routes/contract_encode.rs @@ -156,8 +156,8 @@ pub async fn encode_contract( OptionalRpcCredentialsExtractor(rpc_credentials): OptionalRpcCredentialsExtractor, EngineJson(request): EngineJson, ) -> Result { - let auth: Option = rpc_credentials.and_then(|creds| match creds { - engine_core::chain::RpcCredentials::Thirdweb(auth) => Some(auth), + let auth: Option = rpc_credentials.map(|creds| match creds { + engine_core::chain::RpcCredentials::Thirdweb(auth) => auth, }); let chain_id: ChainId = request.encode_options.chain_id.parse().map_err(|_| { diff --git a/server/src/http/routes/contract_write.rs b/server/src/http/routes/contract_write.rs index 0567366..4e44b02 100644 --- a/server/src/http/routes/contract_write.rs +++ b/server/src/http/routes/contract_write.rs @@ -1,9 +1,11 @@ -// 8:12 PM - COLOCATION: Contract Write Operations - -use aide::{axum::IntoApiResponse, transform::TransformOperation}; use alloy::primitives::{ChainId, U256}; -use axum::{extract::State, http::StatusCode, response::Json}; +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json}, +}; use engine_core::{ + defs::U256Def, error::EngineError, execution_options::{ ExecutionOptions, QueuedTransactionsResponse, SendTransactionRequest, WebhookOptions, @@ -26,7 +28,7 @@ use crate::http::{ // ===== REQUEST/RESPONSE TYPES ===== /// A contract function call with optional ETH value to send -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct ContractWrite { /// The contract function call details @@ -37,15 +39,17 @@ pub struct ContractWrite { /// If omitted, no ETH will be sent (value = 0). /// Uses U256 to handle large numbers precisely. #[schemars(with = "Option")] + #[schema(value_type = Option)] pub value: Option, } /// Request to execute write transactions to smart contracts -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct WriteContractRequest { /// Execution configuration including chain, account, and transaction options pub execution_options: ExecutionOptions, + /// List of contract function calls to execute /// /// All calls will be executed in a single transaction if possible, @@ -71,14 +75,34 @@ impl ContractWrite { } // ===== ROUTE HANDLER ===== - -/// Execute write transactions to smart contracts +#[utoipa::path( + post, + operation_id = "writeContract", + path = "/write/contract", + tag = "Write", + request_body(content = WriteContractRequest, description = "Write contract request", content_type = "application/json"), + responses( + (status = 202, description = "Transaction(s) queued successfully", body = QueuedTransactionsResponse, content_type = "application/json", + example = json!({"transactions": [{"id": "1", "batchIndex": 0, "executionParams": {"chainId": 1, "idempotencyKey": "123", "executorType": "ERC4337"}, "transactionParams": [{"to": "0x123", "data": "0x123", "value": "0x123"}]}]}) + ), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + + ("x-vault-access-token" = Option, Header, description = "Vault access token"), + ) +)] +/// Write Contract +/// +/// Call a contract function with a transaction pub async fn write_contract( State(state): State, RpcCredentialsExtractor(rpc_credentials): RpcCredentialsExtractor, SigningCredentialsExtractor(signing_credential): SigningCredentialsExtractor, Json(request): Json, -) -> Result { +) -> Result { let auth: Option = match &rpc_credentials { engine_core::chain::RpcCredentials::Thirdweb(auth) => Some(auth.clone()), }; @@ -158,7 +182,7 @@ pub async fn write_contract( .execution_router .execute(transaction_request, rpc_credentials, signing_credential) .await - .map_err(|e| ApiEngineError(e))?; + .map_err(ApiEngineError)?; tracing::info!( transaction_id = %transaction_id, @@ -173,40 +197,3 @@ pub async fn write_contract( })), )) } - -// ===== DOCUMENTATION ===== - -pub fn write_contract_docs(op: TransformOperation) -> TransformOperation { - op.id("writeContract") - .description( - "Execute write transactions to smart contracts.\n\n\ - This endpoint executes state-changing contract function calls as blockchain \ - transactions. The transactions are queued for execution using the specified \ - execution options (e.g., ERC-4337 account abstraction).\n\n\ - ## Features\n\ - - Execute multiple contract calls in one request\n\ - - Support for sending ETH value with calls\n\ - - Automatic ABI resolution and parameter encoding\n\ - - Integration with account abstraction and signing systems\n\ - - Transaction queuing with status tracking\n\n\ - ## Authentication\n\ - - Required: RPC credentials for transaction submission\n\ - - Required: Vault access token for transaction signing\n\n\ - ## Execution Options\n\ - - Chain ID and transaction configuration\n\ - - Account abstraction settings (ERC-4337)\n\ - - Gas and fee configurations\n\ - - Idempotency key for duplicate prevention", - ) - .summary("Execute contract write functions") - .response_with::<202, Json>, _>(|res| { - res.description("Transaction queued successfully") - }) - .response_with::<400, Json, _>(|res| { - res.description("Bad request - invalid parameters or missing credentials") - }) - .response_with::<500, Json, _>(|res| { - res.description("Internal server error") - }) - .tag("Contract Operations") -} diff --git a/server/src/http/routes/mod.rs b/server/src/http/routes/mod.rs index 2585a09..15d5d2e 100644 --- a/server/src/http/routes/mod.rs +++ b/server/src/http/routes/mod.rs @@ -1,8 +1,3 @@ -// pub mod create_userop; -// pub mod smart_account; - -// pub mod working_read_contract; - pub mod contract_encode; pub mod contract_read; pub mod contract_write; diff --git a/server/src/http/routes/working_read_contract.rs b/server/src/http/routes/working_read_contract.rs deleted file mode 100644 index 113fc77..0000000 --- a/server/src/http/routes/working_read_contract.rs +++ /dev/null @@ -1,406 +0,0 @@ -use aide::{ - OperationIo, - axum::{ApiRouter, IntoApiResponse, routing::post_with}, -}; -use alloy::providers::RootProvider; -use alloy::{ - dyn_abi::FunctionExt, - primitives::{Address, ChainId, address}, -}; -use alloy::{ - providers::Provider, rpc::types::eth::TransactionRequest as AlloyTransactionRequest, sol, - sol_types::SolCall, -}; -use axum::{debug_handler, extract::State, http::StatusCode, response::Json}; -use engine_core::{ - chain::{Chain, ChainService}, - defs::AddressDef, - error::EngineError, -}; -use futures::future::join_all; -use schemars::JsonSchema; -use serde::{Deserialize, Serialize}; -use serde_json::Value as JsonValue; -use thirdweb_core::auth::ThirdwebAuth; - -use crate::http::{ - dyn_contract::{ - ContractCall, ContractOperationResult, PreparedContractCall, dyn_sol_value_to_json, - }, - error::ApiEngineError, - extractors::OptionalRpcCredentialsExtractor, - server::EngineServerState, - types::ErrorResponse, -}; - -// Multicall3 contract ABI for aggregate3 function -sol! { - struct Call3 { - address target; - bool allowFailure; - bytes callData; - } - - struct Result3 { - bool success; - bytes returnData; - } - - function aggregate3(Call3[] calls) external payable returns (Result3[] returnData); -} - -const MULTICALL3_DEFAULT_ADDRESS: Address = address!("0xcA11bde05977b3631167028862bE2a173976CA11"); - -// ===== REQUEST/RESPONSE TYPES ===== - -/// Options for reading from smart contracts -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -#[serde(rename_all = "camelCase")] -pub struct ReadOptions { - /// The blockchain network ID to read from - pub chain_id: String, - /// Address of the Multicall3 contract to use for batching calls - /// - /// Defaults to the standard Multicall3 address: 0xcA11bde05977b3631167028862bE2a173976CA11 - /// which is deployed on most networks - #[serde(default = "default_multicall_address")] - #[schemars(with = "AddressDef")] - pub multicall_address: Address, - /// Optional address to use as the caller for view functions - /// - /// This can be useful for functions that return different values - /// based on the caller's address or permissions - #[serde(skip_serializing_if = "Option::is_none")] - #[schemars(with = "Option")] - pub from: Option
, -} - -fn default_multicall_address() -> Address { - MULTICALL3_DEFAULT_ADDRESS -} - -/// Request to read from multiple smart contracts -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -#[serde(rename_all = "camelCase")] -pub struct ReadRequest { - /// Configuration options for the read operation - pub read_options: ReadOptions, - /// List of contract function calls to execute - /// - /// All calls will be batched together using Multicall3 for efficiency - pub params: Vec, -} - -/// Result of a single contract read operation -/// -/// Each result can either be successful (containing the function return value) -/// or failed (containing detailed error information). The `success` field -/// indicates which case applies. -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -#[serde(untagged)] -pub enum ReadResultItem { - Success(ReadResultSuccessItem), - Failure(ReadResultFailureItem), -} - -/// Successful result from a contract read operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct ReadResultSuccessItem { - /// Always true for successful operations - #[schemars(with = "bool")] - pub success: serde_bool::True, - /// The decoded return value from the contract function - /// - /// For functions returning a single value, this will be that value. - /// For functions returning multiple values, this will be an array. - pub result: JsonValue, -} - -/// Failed result from a contract read operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct ReadResultFailureItem { - /// Always false for failed operations - #[schemars(with = "bool")] - pub success: serde_bool::False, - /// Detailed error information describing what went wrong - /// - /// This includes the error type, chain information, and specific - /// failure details to help with debugging - pub error: EngineError, -} - -/// Collection of results from multiple contract read operations -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct ReadResults { - /// Array of results, one for each input contract call - /// - /// Results are returned in the same order as the input parameters - pub results: Vec, -} - -/// Response from the contract read endpoint -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] -pub struct ReadResponse { - /// Container for all read operation results - pub result: ReadResults, -} - -// ===== CONVENIENCE CONSTRUCTORS ===== - -impl ReadResultSuccessItem { - /// Create a new successful read result - pub fn new(result: JsonValue) -> Self { - Self { - success: serde_bool::True, - result, - } - } -} - -impl ReadResultFailureItem { - /// Create a new failed read result - pub fn new(error: EngineError) -> Self { - Self { - success: serde_bool::False, - error, - } - } -} - -impl ReadResultItem { - /// Create a successful read result item - pub fn success(result: JsonValue) -> Self { - ReadResultItem::Success(ReadResultSuccessItem::new(result)) - } - - /// Create a failed read result item - pub fn failure(error: EngineError) -> Self { - ReadResultItem::Failure(ReadResultFailureItem::new(error)) - } -} - -// ===== ROUTE HANDLER ===== - -/// Read from multiple smart contracts using multicall -/// -/// This endpoint allows you to efficiently call multiple read-only contract functions -/// in a single request. All calls are batched using the Multicall3 contract for -/// optimal gas usage and performance. -/// -/// ## Features -/// - Batch multiple contract calls in one request -/// - Automatic ABI resolution or use provided ABIs -/// - Support for function names, signatures, or full function declarations -/// - Detailed error information for each failed call -/// - Preserves original parameter order in results -/// -/// ## Authentication -/// - Optional: Provide `x-thirdweb-secret-key` or `x-thirdweb-client-id` + `x-thirdweb-service-key` -/// - If provided, will be used for ABI resolution from verified contracts -/// -/// ## Error Handling -/// - Individual call failures don't affect other calls -/// - Each result includes success status and detailed error information -/// - Preparation errors (ABI resolution, parameter encoding) are preserved -/// - Execution errors (multicall failures) are clearly identified -#[debug_handler] -async fn read_contract_handler( - State(state): State, - OptionalRpcCredentialsExtractor(rpc_credentials): OptionalRpcCredentialsExtractor, - Json(request): Json, -) -> Result { - let auth: Option = rpc_credentials.and_then(|creds| match creds { - engine_core::chain::RpcCredentials::Thirdweb(auth) => Some(auth), - }); - - let chain_id: ChainId = request.read_options.chain_id.parse().map_err(|_| { - ApiEngineError(EngineError::ValidationError { - message: "Invalid chain ID".to_string(), - }) - })?; - - // Prepare all contract calls in parallel - let prepare_futures = request.params.iter().map(|contract_read| { - contract_read.prepare_call_with_error_tracking(&state.abi_service, chain_id, auth.clone()) - }); - - let preparation_results: Vec> = - join_all(prepare_futures).await; - - // Separate successful calls for multicall while preserving original order and errors - let (multicall_calls, call_indices): (Vec, Vec) = preparation_results - .iter() - .enumerate() - .filter_map(|(index, result)| match result { - ContractOperationResult::Success(prepared) => { - let call = Call3 { - target: prepared.target, - allowFailure: true, - callData: prepared.call_data.clone(), - }; - Some((call, index)) - } - ContractOperationResult::Failure(_) => None, - }) - .unzip(); - - // Execute multicall if we have any valid calls - let multicall_results = if !multicall_calls.is_empty() { - let chain = state.chains.get_chain(chain_id).map_err(ApiEngineError)?; - match execute_multicall( - &request.read_options.multicall_address, - multicall_calls, - chain.provider(), - chain_id, - ) - .await - { - Ok(results) => Some(results), - Err(e) => { - tracing::error!("Multicall failed: {}", e); - None - } - } - } else { - None - }; - - // Map results back to original order, preserving all errors - let results = map_results_to_original_order( - &preparation_results, - multicall_results.as_deref(), - &call_indices, - ); - - Ok(( - StatusCode::OK, - Json(ReadResponse { - result: ReadResults { results }, - }), - )) -} - -// ===== HELPER FUNCTIONS ===== - -/// Execute the multicall and return results -async fn execute_multicall( - multicall_address: &Address, - multicall_calls: Vec, - provider: &RootProvider, - chain_id: ChainId, -) -> Result, EngineError> { - let multicall_call = aggregate3Call { - calls: multicall_calls, - }; - - let call_request = AlloyTransactionRequest::default() - .to(*multicall_address) - .input(multicall_call.abi_encode().into()); - - let result = provider.call(call_request).await.map_err(|e| { - EngineError::contract_multicall_error(chain_id, format!("Multicall failed: {}", e)) - })?; - - let decoded = aggregate3Call::abi_decode_returns(&result).map_err(|e| { - EngineError::contract_multicall_error( - chain_id, - format!("Failed to decode multicall result: {}", e), - ) - })?; - - Ok(decoded) -} - -/// Map multicall results back to the original parameter order, preserving all errors -fn map_results_to_original_order( - preparation_results: &[ContractOperationResult], - multicall_results: Option<&[Result3]>, - call_indices: &[usize], -) -> Vec { - let mut multicall_iter = multicall_results.unwrap_or(&[]).iter(); - - preparation_results - .iter() - .enumerate() - .map(|(original_index, prep_result)| { - match prep_result { - ContractOperationResult::Success(prepared_call) => { - if call_indices.contains(&original_index) { - if let Some(multicall_result) = multicall_iter.next() { - process_multicall_result(multicall_result, prepared_call) - } else { - ReadResultItem::failure(EngineError::contract_multicall_error( - 0, // Chain ID not available here - "Multicall execution failed".to_string(), - )) - } - } else { - ReadResultItem::failure(EngineError::InternalError( - "Internal error: prepared call not found in multicall".to_string(), - )) - } - } - ContractOperationResult::Failure(error) => ReadResultItem::failure(error.clone()), - } - }) - .collect() -} - -/// Process a single multicall result with the prepared call -fn process_multicall_result( - multicall_result: &Result3, - prepared_call: &PreparedContractCall, -) -> ReadResultItem { - if !multicall_result.success { - return ReadResultItem::failure(EngineError::contract_multicall_error( - 0, // Chain ID not available here - "Multicall execution failed".to_string(), - )); - } - - match prepared_call - .function - .abi_decode_output(&multicall_result.returnData) - { - Ok(decoded_values) => { - let result_json = match decoded_values.len() { - 1 => dyn_sol_value_to_json(&decoded_values[0]), - _ => JsonValue::Array(decoded_values.iter().map(dyn_sol_value_to_json).collect()), - }; - ReadResultItem::success(result_json) - } - Err(e) => { - ReadResultItem::failure(EngineError::contract_decoding_error( - Some(prepared_call.target), - 0, // Chain ID not available here - format!("Failed to decode result: {}", e), - )) - } - } -} - -// ===== ROUTER ===== - -pub fn read_routes() -> ApiRouter { - ApiRouter::new().api_route( - "/read/contract", - post_with(read_contract_handler, |op| { - op.id("readContract") - .description("Read from multiple smart contracts using multicall") - .summary("Batch read contract functions") - .response_with::<200, Json, _>(|res| { - res.description("Successfully read contract data") - .example(ReadResponse { - result: ReadResults { results: vec![] }, - }) - }) - .response_with::<400, Json, _>(|res| { - res.description("Bad request - invalid parameters or chain ID") - }) - .response_with::<500, Json, _>(|res| { - res.description("Internal server error") - }) - .tag("Contract Operations") - }), - ) -} diff --git a/server/src/http/server.rs b/server/src/http/server.rs index 1c2905c..7f26a01 100644 --- a/server/src/http/server.rs +++ b/server/src/http/server.rs @@ -1,25 +1,14 @@ use std::sync::Arc; -use aide::{ - axum::{ApiRouter, IntoApiResponse, routing::post_with}, - openapi::{Info, OpenApi}, - scalar::Scalar, -}; -use axum::{Extension, Json, Router, routing::get}; +use axum::{Json, Router, routing::get}; use engine_core::userop::UserOpSigner; use thirdweb_core::abi::ThirdwebAbiService; use tokio::{sync::watch, task::JoinHandle}; +use utoipa::OpenApi; +use utoipa_axum::{router::OpenApiRouter, routes}; +use utoipa_scalar::{Scalar, Servable}; -use crate::{ - chains::ThirdwebChainService, - execution_router::ExecutionRouter, - http::routes::{ - contract_encode::{encode_contract, encode_contract_docs}, - contract_read::{read_contract, read_contract_docs}, - contract_write::{write_contract, write_contract_docs}, - transaction_write::{write_transaction, write_transaction_docs}, - }, -}; +use crate::{chains::ThirdwebChainService, execution_router::ExecutionRouter}; use tower_http::{ cors::{Any, CorsLayer}, trace::TraceLayer, @@ -40,31 +29,12 @@ pub struct EngineServer { app: Router, } -// Note that this clones the document on each request. -// To be more efficient, we could wrap it into an Arc, -// or even store it as a serialized string. -async fn serve_api(Extension(api): Extension>) -> impl IntoApiResponse { - Json(api) -} +const SCALAR_HTML: &str = include_str!("../../res/scalar.html"); impl EngineServer { pub async fn new(state: EngineServerState) -> Self { - aide::generate::on_error(|error| { - println!("{error}"); - }); - - aide::generate::extract_schemas(true); - - let mut api = OpenApi { - info: Info { - description: Some("Engine Core API".to_string()), - title: "Engine Core API".to_string(), - ..Info::default() - }, - components: None, - - ..OpenApi::default() - }; + #[derive(OpenApi)] + struct ApiDoc; let cors = CorsLayer::new() .allow_origin(Any) @@ -72,41 +42,56 @@ impl EngineServer { .allow_headers(Any) .allow_credentials(false); - let v1_router = ApiRouter::new() - // generate Scalar API References using the openapi spec route - .api_route( - "/read/contract", - post_with(read_contract, read_contract_docs), - ) - .api_route( - "/encode/contract", - post_with(encode_contract, encode_contract_docs), - ) - .api_route( - "/write/contract", - post_with(write_contract, write_contract_docs), - ) - .api_route( - "/write/transaction", - post_with(write_transaction, write_transaction_docs), - ) - // We'll serve our generated document here. - .route("/api.json", get(serve_api)) - // .route("/smart-account/status", post(smart_account_status)) - // .route("/userop/create", post(create_user_op)) - // .route("/test", post(read_contract)) + let v1_router = OpenApiRouter::new() + .routes(routes!(crate::http::routes::contract_write::write_contract)) .layer(cors) .layer(TraceLayer::new_for_http()) - // Generate the documentation. - .route("/reference", Scalar::new("/v1/api.json").axum_route()) .with_state(state); - let router = ApiRouter::new() - .nest_api_service("/v1", v1_router) - .finish_api(&mut api) - // Expose the documentation to the handlers. - .layer(Extension(Arc::new(api))); - // Add more routes here + let (router, api) = OpenApiRouter::with_openapi(ApiDoc::openapi()) + .nest("/v1", v1_router) + .split_for_parts(); + + let api_clone = api.clone(); + let router = router + .merge(Scalar::with_url("/reference", api).custom_html(SCALAR_HTML)) + .route("/api.json", get(|| async { Json(api_clone) })); + + // let v1_router = ApiRouter::new() + // // generate Scalar API References using the openapi spec route + // .api_route( + // "/read/contract", + // post_with(read_contract, read_contract_docs), + // ) + // .api_route( + // "/encode/contract", + // post_with(encode_contract, encode_contract_docs), + // ) + // .api_route( + // "/write/contract", + // post_with(write_contract, write_contract_docs), + // ) + // .api_route( + // "/write/transaction", + // post_with(write_transaction, write_transaction_docs), + // ) + // // We'll serve our generated document here. + // .route("/api.json", get(serve_api)) + // // .route("/smart-account/status", post(smart_account_status)) + // // .route("/userop/create", post(create_user_op)) + // // .route("/test", post(read_contract)) + // .layer(cors) + // .layer(TraceLayer::new_for_http()) + // // Generate the documentation. + // .route("/reference", Scalar::new("/v1/api.json").axum_route()) + // .with_state(state); + + // let router = ApiRouter::new() + // .nest_api_service("/v1", v1_router) + // .finish_api(&mut api) + // // Expose the documentation to the handlers. + // .layer(Extension(Arc::new(api))); + // // Add more routes here Self { handle: None, diff --git a/thirdweb-core/Cargo.toml b/thirdweb-core/Cargo.toml index 795e644..f47cd7b 100644 --- a/thirdweb-core/Cargo.toml +++ b/thirdweb-core/Cargo.toml @@ -13,3 +13,4 @@ serde_json = "1.0.140" thiserror = "2.0.12" tracing = "0.1.41" url = "2.5.4" +utoipa = "5.4.0" diff --git a/thirdweb-core/src/error.rs b/thirdweb-core/src/error.rs index d59826e..728fe21 100644 --- a/thirdweb-core/src/error.rs +++ b/thirdweb-core/src/error.rs @@ -2,7 +2,7 @@ use schemars::JsonSchema; use serde::{Deserialize, Serialize}; use thiserror::Error; -#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] pub enum ThirdwebError { #[error("SerializationError: {0}")] SerializationError(ThirdwebSerializationError), @@ -17,7 +17,7 @@ pub enum ThirdwebError { HttpError(#[from] SerializableReqwestError), } -#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] pub enum ThirdwebSerializationError { #[error("InvalidHeaderValue: {value:?}")] HeaderValue { value: String }, @@ -46,7 +46,7 @@ impl From for ThirdwebError { } } -#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema)] +#[derive(Error, Serialize, Deserialize, Debug, Clone, JsonSchema, utoipa::ToSchema)] pub enum SerializableReqwestError { #[error("builder error")] Builder { From f8b13bb32e978b3fcd5a0c6472ccfee25dbab011 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Tue, 10 Jun 2025 01:23:41 +0530 Subject: [PATCH 2/7] move all routes --- server/src/http/routes/contract_encode.rs | 72 ++++++++--------- server/src/http/routes/contract_read.rs | 85 +++++++++------------ server/src/http/routes/transaction_write.rs | 59 ++++++-------- server/src/http/server.rs | 9 ++- 4 files changed, 97 insertions(+), 128 deletions(-) diff --git a/server/src/http/routes/contract_encode.rs b/server/src/http/routes/contract_encode.rs index 132de67..adbc04c 100644 --- a/server/src/http/routes/contract_encode.rs +++ b/server/src/http/routes/contract_encode.rs @@ -1,8 +1,11 @@ // 8:12 PM - COLOCATION: Contract Encode Operations -use aide::{axum::IntoApiResponse, transform::TransformOperation}; use alloy::{hex, primitives::ChainId}; -use axum::{extract::State, http::StatusCode, response::Json}; +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json}, +}; use engine_core::error::EngineError; use futures::future::join_all; use schemars::JsonSchema; @@ -20,7 +23,7 @@ use crate::http::{ // ===== REQUEST/RESPONSE TYPES ===== /// Options for encoding contract function calls -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct EncodeOptions { /// The blockchain network ID to encode for @@ -30,7 +33,7 @@ pub struct EncodeOptions { } /// Request to encode contract function calls -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct EncodeRequest { /// Configuration options for encoding @@ -45,7 +48,7 @@ pub struct EncodeRequest { /// /// Each result can either be successful (containing the encoded transaction data) /// or failed (containing detailed error information). -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(untagged)] pub enum EncodeResultItem { Success(EncodeResultSuccessItem), @@ -53,11 +56,12 @@ pub enum EncodeResultItem { } /// Successful result from a contract encode operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct EncodeResultSuccessItem { /// Always true for successful operations #[schemars(with = "bool")] + #[schema(value_type = bool)] pub success: serde_bool::True, /// The contract address that would be called pub target: String, @@ -73,24 +77,25 @@ pub struct EncodeResultSuccessItem { } /// Failed result from a contract encode operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct EncodeResultFailureItem { /// Always false for failed operations #[schemars(with = "bool")] + #[schema(value_type = bool)] pub success: serde_bool::False, /// Detailed error information describing what went wrong pub error: EngineError, } /// Collection of results from multiple contract encode operations -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct EncodeResults { /// Array of results, one for each input contract call pub results: Vec, } /// Response from the contract encode endpoint -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct EncodeResponse { /// Container for all encode operation results pub result: EncodeResults, @@ -150,12 +155,29 @@ impl EncodeResultItem { // ===== ROUTE HANDLER ===== +#[utoipa::path( + post, + operation_id = "encodeContract", + path = "/encode/contract", + tag = "Read", + request_body(content = EncodeRequest, description = "Encode contract request", content_type = "application/json"), + responses( + (status = 200, description = "Successfully encoded contract calls", body = EncodeResponse, content_type = "application/json"), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + ) +)] +/// Encode Contract +/// /// Encode contract function calls without execution pub async fn encode_contract( State(state): State, OptionalRpcCredentialsExtractor(rpc_credentials): OptionalRpcCredentialsExtractor, EngineJson(request): EngineJson, -) -> Result { +) -> Result { let auth: Option = rpc_credentials.map(|creds| match creds { engine_core::chain::RpcCredentials::Thirdweb(auth) => auth, }); @@ -203,33 +225,3 @@ pub async fn encode_contract( }), )) } - -// ===== DOCUMENTATION ===== - -pub fn encode_contract_docs(op: TransformOperation) -> TransformOperation { - op.id("encodeContract") - .description( - "Encode contract function calls without execution.\n\n\ - This endpoint prepares contract function calls and returns the encoded \ - transaction data without executing them. This is useful for:\n\ - - Preparing transaction data for later execution\n\ - - Debugging contract interactions\n\ - - Building complex transaction batches\n\ - - Integration with external signing systems\n\n\ - ## Features\n\ - - Encode multiple contract calls in one request\n\ - - Automatic ABI resolution or use provided ABIs\n\ - - Returns function selector, call data, and metadata\n\ - - Detailed error information for each failed encoding\n\n\ - ## Authentication\n\ - - Optional: Same as read endpoint for ABI resolution", - ) - .summary("Encode contract call data") - .response_with::<200, Json, _>(|res| { - res.description("Successfully encoded contract calls") - }) - .response_with::<400, Json, _>(|res| { - res.description("Bad request - invalid parameters") - }) - .tag("Contract Operations") -} diff --git a/server/src/http/routes/contract_read.rs b/server/src/http/routes/contract_read.rs index 29a4853..fef36e2 100644 --- a/server/src/http/routes/contract_read.rs +++ b/server/src/http/routes/contract_read.rs @@ -1,6 +1,5 @@ // 8:12 PM - COLOCATION: Contract Read Operations -use aide::{axum::IntoApiResponse, transform::TransformOperation}; use alloy::dyn_abi::FunctionExt; use alloy::primitives::{Address, ChainId, address}; use alloy::providers::RootProvider; @@ -8,7 +7,11 @@ use alloy::{ providers::Provider, rpc::types::eth::TransactionRequest as AlloyTransactionRequest, sol, sol_types::SolCall, }; -use axum::{extract::State, http::StatusCode, response::Json}; +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json}, +}; use engine_core::{ chain::{Chain, ChainService}, defs::AddressDef, @@ -52,7 +55,7 @@ const MULTICALL3_DEFAULT_ADDRESS: Address = address!("0xcA11bde05977b36311670288 // ===== REQUEST/RESPONSE TYPES ===== /// Options for reading from smart contracts -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct ReadOptions { /// The blockchain network ID to read from @@ -63,6 +66,7 @@ pub struct ReadOptions { /// which is deployed on most networks #[serde(default = "default_multicall_address")] #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] pub multicall_address: Address, /// Optional address to use as the caller for view functions /// @@ -70,6 +74,7 @@ pub struct ReadOptions { /// based on the caller's address or permissions #[serde(skip_serializing_if = "Option::is_none")] #[schemars(with = "Option")] + #[schema(value_type = Option)] pub from: Option
, } @@ -78,7 +83,7 @@ fn default_multicall_address() -> Address { } /// Request to read from multiple smart contracts -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct ReadRequest { /// Configuration options for the read operation @@ -94,7 +99,7 @@ pub struct ReadRequest { /// Each result can either be successful (containing the function return value) /// or failed (containing detailed error information). The `success` field /// indicates which case applies. -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(untagged)] pub enum ReadResultItem { Success(ReadResultSuccessItem), @@ -102,10 +107,11 @@ pub enum ReadResultItem { } /// Successful result from a contract read operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct ReadResultSuccessItem { /// Always true for successful operations #[schemars(with = "bool")] + #[schema(value_type = bool)] pub success: serde_bool::True, /// The decoded return value from the contract function /// @@ -115,10 +121,11 @@ pub struct ReadResultSuccessItem { } /// Failed result from a contract read operation -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct ReadResultFailureItem { /// Always false for failed operations #[schemars(with = "bool")] + #[schema(value_type = bool)] pub success: serde_bool::False, /// Detailed error information describing what went wrong /// @@ -128,7 +135,7 @@ pub struct ReadResultFailureItem { } /// Collection of results from multiple contract read operations -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct ReadResults { /// Array of results, one for each input contract call /// @@ -137,7 +144,7 @@ pub struct ReadResults { } /// Response from the contract read endpoint -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] pub struct ReadResponse { /// Container for all read operation results pub result: ReadResults, @@ -179,12 +186,29 @@ impl ReadResultItem { // ===== ROUTE HANDLER ===== +#[utoipa::path( + post, + operation_id = "readContract", + path = "/read/contract", + tag = "Read", + request_body(content = ReadRequest, description = "Read contract request", content_type = "application/json"), + responses( + (status = 200, description = "Successfully read contract data", body = ReadResponse, content_type = "application/json"), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + ) +)] +/// Read Contract +/// /// Read from multiple smart contracts using multicall pub async fn read_contract( State(state): State, OptionalRpcCredentialsExtractor(rpc_credentials): OptionalRpcCredentialsExtractor, EngineJson(request): EngineJson, -) -> Result { +) -> Result { let auth: Option = rpc_credentials.and_then(|creds| match creds { engine_core::chain::RpcCredentials::Thirdweb(auth) => Some(auth), }); @@ -355,44 +379,3 @@ fn process_multicall_result( } } -// ===== DOCUMENTATION ===== - -pub fn read_contract_docs(op: TransformOperation) -> TransformOperation { - op.id("readContract") - .description( - "Read from multiple smart contracts using multicall.\n\n\ - This endpoint allows you to efficiently call multiple read-only contract functions \ - in a single request. All calls are batched using the Multicall3 contract for \ - optimal gas usage and performance.\n\n\ - ## Features\n\ - - Batch multiple contract calls in one request\n\ - - Automatic ABI resolution or use provided ABIs\n\ - - Support for function names, signatures, or full function declarations\n\ - - Detailed error information for each failed call\n\ - - Preserves original parameter order in results\n\n\ - ## Authentication\n\ - - Optional: Provide `x-thirdweb-secret-key` or `x-thirdweb-client-id` + `x-thirdweb-service-key`\n\ - - If provided, will be used for ABI resolution from verified contracts\n\n\ - ## Error Handling\n\ - - Individual call failures don't affect other calls\n\ - - Each result includes success status and detailed error information\n\ - - Preparation errors (ABI resolution, parameter encoding) are preserved\n\ - - Execution errors (multicall failures) are clearly identified" - ) - .summary("Batch read contract functions") - .response_with::<200, Json, _>(|res| { - res.description("Successfully read contract data") - .example(ReadResponse { - result: ReadResults { - results: vec![], - }, - }) - }) - .response_with::<400, Json, _>(|res| { - res.description("Bad request - invalid parameters or chain ID") - }) - .response_with::<500, Json, _>(|res| { - res.description("Internal server error") - }) - .tag("Contract Operations") -} diff --git a/server/src/http/routes/transaction_write.rs b/server/src/http/routes/transaction_write.rs index 82b7924..ba289b0 100644 --- a/server/src/http/routes/transaction_write.rs +++ b/server/src/http/routes/transaction_write.rs @@ -1,7 +1,9 @@ // 8:12 PM - COLOCATION: Transaction Write Operations -use aide::{axum::IntoApiResponse, transform::TransformOperation}; -use axum::{debug_handler, extract::State, http::StatusCode, response::Json}; +use axum::{ + debug_handler, extract::State, http::StatusCode, + response::{IntoResponse, Json}, +}; use engine_core::execution_options::{QueuedTransactionsResponse, SendTransactionRequest}; use crate::http::{ @@ -13,6 +15,24 @@ use crate::http::{ // ===== ROUTE HANDLER ===== +#[utoipa::path( + post, + operation_id = "writeTransaction", + path = "/write/transaction", + tag = "Write", + request_body(content = SendTransactionRequest, description = "Transaction request", content_type = "application/json"), + responses( + (status = 202, description = "Transaction queued successfully", body = QueuedTransactionsResponse, content_type = "application/json"), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + ("x-vault-access-token" = Option, Header, description = "Vault access token"), + ) +)] +/// Write Transaction +/// /// Execute raw transactions #[debug_handler] pub async fn write_transaction( @@ -20,7 +40,7 @@ pub async fn write_transaction( RpcCredentialsExtractor(rpc_credentials): RpcCredentialsExtractor, SigningCredentialsExtractor(signing_credential): SigningCredentialsExtractor, EngineJson(request): EngineJson, -) -> Result { +) -> Result { let transaction_id = request.execution_options.transaction_id().to_string(); let executor_type = request.execution_options.executor_type(); @@ -51,36 +71,3 @@ pub async fn write_transaction( )) } -// ===== DOCUMENTATION ===== - -pub fn write_transaction_docs(op: TransformOperation) -> TransformOperation { - op.id("writeTransaction") - .description( - "Execute raw transactions.\n\n\ - This endpoint executes pre-prepared transactions without additional processing. \ - Use this for raw transaction data that has already been encoded.\n\n\ - ## Features\n\ - - Execute multiple raw transactions\n\ - - Support for any transaction type\n\ - - Integration with account abstraction\n\ - - Transaction queuing and status tracking\n\n\ - ## Authentication\n\ - - Required: RPC credentials and vault access token\n\n\ - ## Request Format\n\ - Uses the standard `SendTransactionRequest` from engine_core which includes:\n\ - - Execution options (chain, account abstraction, etc.)\n\ - - Array of `InnerTransaction` objects with `to`, `data`, and `value`\n\ - - Optional webhook configuration", - ) - .summary("Execute prepared transactions") - .response_with::<202, Json>, _>(|res| { - res.description("Transaction queued successfully") - }) - .response_with::<400, Json, _>(|res| { - res.description("Bad request - invalid parameters or missing credentials") - }) - .response_with::<500, Json, _>(|res| { - res.description("Internal server error") - }) - .tag("Transaction Operations") -} diff --git a/server/src/http/server.rs b/server/src/http/server.rs index 7f26a01..0865143 100644 --- a/server/src/http/server.rs +++ b/server/src/http/server.rs @@ -43,7 +43,14 @@ impl EngineServer { .allow_credentials(false); let v1_router = OpenApiRouter::new() - .routes(routes!(crate::http::routes::contract_write::write_contract)) + .routes(routes!(crate::http::routes::contract_write::write_contract,)) + .routes(routes!( + crate::http::routes::contract_encode::encode_contract + )) + .routes(routes!(crate::http::routes::contract_read::read_contract,)) + .routes(routes!( + crate::http::routes::transaction_write::write_transaction + )) .layer(cors) .layer(TraceLayer::new_for_http()) .with_state(state); From 1ced39d397f3859ef43396e88081b6fd99a1ee32 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Tue, 10 Jun 2025 05:52:07 +0530 Subject: [PATCH 3/7] twmq improvememnts, transaction cancellation --- executors/src/external_bundler/confirm.rs | 87 ++- executors/src/external_bundler/send.rs | 74 ++- executors/src/lib.rs | 1 + executors/src/transaction_registry.rs | 68 +++ executors/src/webhook/envelope.rs | 30 +- executors/src/webhook/mod.rs | 83 +-- server/src/execution_router/mod.rs | 11 + server/src/http/routes/mod.rs | 1 + server/src/http/routes/transaction.rs | 155 +++++ server/src/http/server.rs | 6 +- server/src/main.rs | 2 + server/src/queue/manager.rs | 11 + twmq/src/job.rs | 26 + twmq/src/lib.rs | 700 +++++++++++++++------- twmq/tests/basic_hook.rs | 25 +- twmq/tests/delay.rs | 20 +- twmq/tests/fixtures.rs | 22 +- twmq/tests/lease_expiry.rs | 18 +- twmq/tests/nack.rs | 33 +- 19 files changed, 1006 insertions(+), 367 deletions(-) create mode 100644 executors/src/transaction_registry.rs create mode 100644 server/src/http/routes/transaction.rs diff --git a/executors/src/external_bundler/confirm.rs b/executors/src/external_bundler/confirm.rs index 5e56c67..cb4c628 100644 --- a/executors/src/external_bundler/confirm.rs +++ b/executors/src/external_bundler/confirm.rs @@ -8,15 +8,18 @@ use engine_core::{ use serde::{Deserialize, Serialize}; use std::{sync::Arc, time::Duration}; use twmq::{ - DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, + DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, error::TwmqError, hooks::TransactionContext, - job::{Job, JobResult, RequeuePosition, ToJobResult}, + job::{BorrowedJob, JobResult, RequeuePosition, ToJobResult}, }; -use crate::webhook::{ - WebhookJobHandler, - envelope::{ExecutorStage, HasWebhookOptions, WebhookCapable}, +use crate::{ + webhook::{ + WebhookJobHandler, + envelope::{ExecutorStage, HasWebhookOptions, WebhookCapable}, + }, + transaction_registry::TransactionRegistry, }; use super::deployment::RedisDeploymentLock; @@ -66,6 +69,9 @@ pub enum UserOpConfirmationError { #[error("Internal error: {message}")] InternalError { message: String }, + + #[error("Transaction cancelled by user")] + UserCancelled, } impl From for UserOpConfirmationError { @@ -76,6 +82,12 @@ impl From for UserOpConfirmationError { } } +impl UserCancellable for UserOpConfirmationError { + fn user_cancelled() -> Self { + UserOpConfirmationError::UserCancelled + } +} + // --- Handler --- pub struct UserOpConfirmationHandler where @@ -84,6 +96,7 @@ where pub chain_service: Arc, pub deployment_lock: RedisDeploymentLock, pub webhook_queue: Arc>, + pub transaction_registry: Arc, pub max_confirmation_attempts: u32, pub confirmation_retry_delay: Duration, } @@ -96,11 +109,13 @@ where chain_service: Arc, deployment_lock: RedisDeploymentLock, webhook_queue: Arc>, + transaction_registry: Arc, ) -> Self { Self { chain_service, deployment_lock, webhook_queue, + transaction_registry, max_confirmation_attempts: 20, // ~5 minutes with 15 second delays confirmation_retry_delay: Duration::from_secs(15), } @@ -121,9 +136,9 @@ where type ErrorData = UserOpConfirmationError; type JobData = UserOpConfirmationJobData; - #[tracing::instrument(skip(self, job), fields(transaction_id = job.id, stage = Self::stage_name(), executor = Self::executor_name()))] - async fn process(&self, job: &Job) -> JobResult { - let job_data = &job.data; + #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] + async fn process(&self, job: &BorrowedJob) -> JobResult { + let job_data = &job.job.data; // 1. Get Chain let chain = self @@ -136,7 +151,7 @@ where .map_err_fail()?; let chain = chain.with_new_default_headers( - job.data + job.job.data .rpc_credentials .to_header_map() .map_err(|e| UserOpConfirmationError::InternalError { @@ -161,17 +176,17 @@ where Some(receipt) => receipt, None => { // Receipt not available and max attempts reached - permanent failure - if job.attempts >= self.max_confirmation_attempts { + if job.job.attempts >= self.max_confirmation_attempts { return Err(UserOpConfirmationError::ReceiptNotAvailable { user_op_hash: job_data.user_op_hash.clone(), - attempt_number: job.attempts, + attempt_number: job.job.attempts, }) .map_err_fail(); // FAIL - triggers on_fail hook which will release lock } return Err(UserOpConfirmationError::ReceiptNotAvailable { user_op_hash: job_data.user_op_hash.clone(), - attempt_number: job.attempts, + attempt_number: job.job.attempts, }) .map_err_nack(Some(self.confirmation_retry_delay), RequeuePosition::Last); // NACK - triggers on_nack hook which keeps lock for retry @@ -197,23 +212,29 @@ where async fn on_success( &self, - job: &Job, + job: &BorrowedJob, success_data: SuccessHookData<'_, Self::Output>, tx: &mut TransactionContext<'_>, ) { + // Remove transaction from registry since confirmation is complete + self.transaction_registry.add_remove_command( + tx.pipeline(), + &job.job.data.transaction_id, + ); + // Atomic cleanup: release lock + update cache if lock was acquired - if job.data.deployment_lock_acquired { + if job.job.data.deployment_lock_acquired { self.deployment_lock .release_lock_and_update_cache_with_pipeline( tx.pipeline(), - job.data.chain_id, - &job.data.account_address, + job.job.data.chain_id, + &job.job.data.account_address, true, // is_deployed = true ); tracing::info!( - transaction_id = %job.data.transaction_id, - account_address = ?job.data.account_address, + transaction_id = %job.job.data.transaction_id, + account_address = ?job.job.data.account_address, "Added atomic lock release and cache update to transaction pipeline" ); } @@ -221,7 +242,7 @@ where // Queue success webhook if let Err(e) = self.queue_success_webhook(job, success_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue success webhook" ); @@ -230,7 +251,7 @@ where async fn on_nack( &self, - job: &Job, + job: &BorrowedJob, nack_data: NackHookData<'_, Self::ErrorData>, tx: &mut TransactionContext<'_>, ) { @@ -238,31 +259,37 @@ where // Just queue webhook with current status if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue nack webhook" ); } tracing::debug!( - transaction_id = %job.data.transaction_id, - attempt = %job.attempts, + transaction_id = %job.job.data.transaction_id, + attempt = %job.job.attempts, "Confirmation job NACKed, retaining lock for retry" ); } async fn on_fail( &self, - job: &Job, + job: &BorrowedJob, fail_data: FailHookData<'_, Self::ErrorData>, tx: &mut TransactionContext<'_>, ) { + // Remove transaction from registry since it failed permanently + self.transaction_registry.add_remove_command( + tx.pipeline(), + &job.job.data.transaction_id, + ); + // Always release lock on permanent failure - if job.data.deployment_lock_acquired { + if job.job.data.deployment_lock_acquired { self.deployment_lock.release_lock_with_pipeline( tx.pipeline(), - job.data.chain_id, - &job.data.account_address, + job.job.data.chain_id, + &job.job.data.account_address, ); let failure_reason = match fail_data.error { @@ -273,8 +300,8 @@ where }; tracing::error!( - transaction_id = %job.data.transaction_id, - account_address = ?job.data.account_address, + transaction_id = %job.job.data.transaction_id, + account_address = ?job.job.data.account_address, reason = %failure_reason, "Added lock release to transaction pipeline due to permanent failure" ); @@ -283,7 +310,7 @@ where // Queue failure webhook if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue fail webhook" ); diff --git a/executors/src/external_bundler/send.rs b/executors/src/external_bundler/send.rs index 142ec80..90f8de7 100644 --- a/executors/src/external_bundler/send.rs +++ b/executors/src/external_bundler/send.rs @@ -21,15 +21,18 @@ use engine_core::{ use serde::{Deserialize, Serialize}; use std::{sync::Arc, time::Duration}; use twmq::{ - FailHookData, NackHookData, Queue, SuccessHookData, + FailHookData, NackHookData, Queue, SuccessHookData, UserCancellable, error::TwmqError, hooks::TransactionContext, - job::{DelayOptions, Job, JobResult, RequeuePosition, ToJobError, ToJobResult}, + job::{BorrowedJob, DelayOptions, JobResult, RequeuePosition, ToJobError, ToJobResult}, }; -use crate::webhook::{ - WebhookJobHandler, - envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, +use crate::{ + webhook::{ + WebhookJobHandler, + envelope::{ExecutorStage, HasTransactionMetadata, HasWebhookOptions, WebhookCapable}, + }, + transaction_registry::TransactionRegistry, }; use super::{ @@ -119,6 +122,9 @@ pub enum ExternalBundlerSendError { #[error("Internal error: {message}")] InternalError { message: String }, + + #[error("Transaction cancelled by user")] + UserCancelled, } impl From for ExternalBundlerSendError { @@ -129,6 +135,12 @@ impl From for ExternalBundlerSendError { } } +impl UserCancellable for ExternalBundlerSendError { + fn user_cancelled() -> Self { + ExternalBundlerSendError::UserCancelled + } +} + impl ExternalBundlerSendError { /// Returns the account address if a lock was acquired while processing /// Otherwise returns None @@ -172,6 +184,7 @@ where pub deployment_lock: RedisDeploymentLock, pub webhook_queue: Arc>, pub confirm_queue: Arc>>, + pub transaction_registry: Arc, } impl ExternalBundlerSendHandler @@ -219,12 +232,12 @@ where type ErrorData = ExternalBundlerSendError; type JobData = ExternalBundlerSendJobData; - #[tracing::instrument(skip(self, job), fields(transaction_id = job.id, stage = Self::stage_name(), executor = Self::executor_name()))] + #[tracing::instrument(skip(self, job), fields(transaction_id = job.job.id, stage = Self::stage_name(), executor = Self::executor_name()))] async fn process( &self, - job: &twmq::job::Job, + job: &BorrowedJob, ) -> JobResult { - let job_data = &job.data; + let job_data = &job.job.data; // 1. Get Chain let chain = self @@ -237,7 +250,7 @@ where .map_err_fail()?; let chain_auth_headers = job - .data + .job.data .rpc_credentials .to_header_map() .map_err(|e| ExternalBundlerSendError::InvalidRpcCredentials { @@ -427,7 +440,7 @@ where ); // if is_bundler_error_retryable(&e) { - if job.attempts < 100 { + if job.job.attempts < 100 { mapped_error.nack(Some(Duration::from_secs(10)), RequeuePosition::Last) } else { mapped_error.fail() @@ -451,24 +464,31 @@ where async fn on_success( &self, - job: &Job, + job: &BorrowedJob, success_data: SuccessHookData<'_, ExternalBundlerSendResult>, tx: &mut TransactionContext<'_>, ) { + // Update transaction registry: move from send queue to confirm queue + self.transaction_registry.add_set_command( + tx.pipeline(), + &job.job.data.transaction_id, + "userop_confirm", + ); + let confirmation_job = self .confirm_queue .clone() .job(UserOpConfirmationJobData { account_address: success_data.result.account_address, - chain_id: job.data.chain_id, + chain_id: job.job.data.chain_id, nonce: success_data.result.nonce, user_op_hash: success_data.result.user_op_hash.clone(), - transaction_id: job.data.transaction_id.clone(), - webhook_options: job.data.webhook_options().clone(), - rpc_credentials: job.data.rpc_credentials.clone(), + transaction_id: job.job.data.transaction_id.clone(), + webhook_options: job.job.data.webhook_options().clone(), + rpc_credentials: job.job.data.rpc_credentials.clone(), deployment_lock_acquired: success_data.result.deployment_lock_acquired, }) - .with_id(job.transaction_id()) + .with_id(job.job.transaction_id()) .with_delay(DelayOptions { delay: Duration::from_secs(3), position: RequeuePosition::Last, @@ -476,7 +496,7 @@ where if let Err(e) = tx.queue_job(confirmation_job) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue confirmation job" ); @@ -484,7 +504,7 @@ where if let Err(e) = self.queue_success_webhook(job, success_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue success webhook" ); @@ -493,21 +513,21 @@ where async fn on_nack( &self, - job: &Job, + job: &BorrowedJob, nack_data: NackHookData<'_, ExternalBundlerSendError>, tx: &mut TransactionContext<'_>, ) { if let Some(account_address) = nack_data.error.did_acquire_lock() { self.deployment_lock.release_lock_with_pipeline( tx.pipeline(), - job.data.chain_id, + job.job.data.chain_id, &account_address, ); } if let Err(e) = self.queue_nack_webhook(job, nack_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue nack webhook" ); @@ -516,21 +536,27 @@ where async fn on_fail( &self, - job: &Job, + job: &BorrowedJob, fail_data: FailHookData<'_, ExternalBundlerSendError>, tx: &mut TransactionContext<'_>, ) { + // Remove transaction from registry since it failed permanently + self.transaction_registry.add_remove_command( + tx.pipeline(), + &job.job.data.transaction_id, + ); + if let Some(account_address) = fail_data.error.did_acquire_lock() { self.deployment_lock.release_lock_with_pipeline( tx.pipeline(), - job.data.chain_id, + job.job.data.chain_id, &account_address, ); } if let Err(e) = self.queue_fail_webhook(job, fail_data, tx) { tracing::error!( - transaction_id = %job.data.transaction_id, + transaction_id = %job.job.data.transaction_id, error = %e, "Failed to queue fail webhook" ); diff --git a/executors/src/lib.rs b/executors/src/lib.rs index 4157145..0e9dd3e 100644 --- a/executors/src/lib.rs +++ b/executors/src/lib.rs @@ -1,2 +1,3 @@ pub mod external_bundler; pub mod webhook; +pub mod transaction_registry; diff --git a/executors/src/transaction_registry.rs b/executors/src/transaction_registry.rs new file mode 100644 index 0000000..dbee92a --- /dev/null +++ b/executors/src/transaction_registry.rs @@ -0,0 +1,68 @@ +use twmq::redis::{AsyncCommands, aio::ConnectionManager, Pipeline}; +use engine_core::error::EngineError; +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum TransactionRegistryError { + #[error("Redis error: {0}")] + RedisError(#[from] twmq::redis::RedisError), + + #[error("Transaction not found: {transaction_id}")] + TransactionNotFound { transaction_id: String }, +} + +impl From for EngineError { + fn from(err: TransactionRegistryError) -> Self { + EngineError::InternalError(err.to_string()) + } +} + +pub struct TransactionRegistry { + redis: ConnectionManager, + namespace: Option, +} + +impl TransactionRegistry { + pub fn new(redis: ConnectionManager, namespace: Option) -> Self { + Self { redis, namespace } + } + + fn registry_key(&self) -> String { + match &self.namespace { + Some(ns) => format!("{}:tx_registry", ns), + None => "tx_registry".to_string(), + } + } + + pub async fn get_transaction_queue(&self, transaction_id: &str) -> Result, TransactionRegistryError> { + let mut conn = self.redis.clone(); + let queue_name: Option = conn.hget(self.registry_key(), transaction_id).await?; + Ok(queue_name) + } + + pub async fn set_transaction_queue( + &self, + transaction_id: &str, + queue_name: &str, + ) -> Result<(), TransactionRegistryError> { + let mut conn = self.redis.clone(); + let _: () = conn.hset(self.registry_key(), transaction_id, queue_name).await?; + Ok(()) + } + + pub async fn remove_transaction(&self, transaction_id: &str) -> Result<(), TransactionRegistryError> { + let mut conn = self.redis.clone(); + let _: u32 = conn.hdel(self.registry_key(), transaction_id).await?; + Ok(()) + } + + /// Add registry update commands to a Redis pipeline for atomic execution + pub fn add_set_command(&self, pipeline: &mut Pipeline, transaction_id: &str, queue_name: &str) { + pipeline.hset(self.registry_key(), transaction_id, queue_name); + } + + /// Add registry removal commands to a Redis pipeline for atomic execution + pub fn add_remove_command(&self, pipeline: &mut Pipeline, transaction_id: &str) { + pipeline.hdel(self.registry_key(), transaction_id); + } +} \ No newline at end of file diff --git a/executors/src/webhook/envelope.rs b/executors/src/webhook/envelope.rs index 9fc18d8..1887615 100644 --- a/executors/src/webhook/envelope.rs +++ b/executors/src/webhook/envelope.rs @@ -6,7 +6,7 @@ use twmq::{ DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, error::TwmqError, hooks::TransactionContext, - job::{Job, RequeuePosition}, + job::{BorrowedJob, Job, RequeuePosition}, }; use uuid::Uuid; @@ -94,7 +94,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { fn queue_success_webhook( &self, - job: &Job, + job: &BorrowedJob, success_data: SuccessHookData<'_, Self::Output>, tx: &mut TransactionContext<'_>, ) -> Result<(), TwmqError> @@ -102,7 +102,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { Self::JobData: HasWebhookOptions, Self::Output: Serialize + Clone, { - let webhook_options = match job.data.webhook_options() { + let webhook_options = match job.job.data.webhook_options() { Some(w) => w, None => return Ok(()), // No webhook configured, skip silently }; @@ -110,7 +110,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { for w in webhook_options { let envelope = WebhookNotificationEnvelope { notification_id: Uuid::new_v4().to_string(), - transaction_id: job.transaction_id(), + transaction_id: job.job.transaction_id(), timestamp: chrono::Utc::now().timestamp().try_into().unwrap(), executor_name: Self::executor_name().to_string(), stage_name: Self::stage_name().to_string(), @@ -129,7 +129,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { fn queue_nack_webhook( &self, - job: &Job, + job: &BorrowedJob, nack_data: NackHookData<'_, Self::ErrorData>, tx: &mut TransactionContext<'_>, ) -> Result<(), TwmqError> @@ -137,7 +137,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { Self::JobData: HasWebhookOptions, Self::ErrorData: Serialize + Clone, { - let webhook_options = match job.data.webhook_options() { + let webhook_options = match job.job.data.webhook_options() { Some(w) => w, None => return Ok(()), // No webhook configured, skip silently }; @@ -147,7 +147,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { let envelope = WebhookNotificationEnvelope { notification_id: Uuid::new_v4().to_string(), - transaction_id: job.transaction_id(), + transaction_id: job.job.transaction_id(), timestamp: chrono::Utc::now().timestamp().try_into().unwrap(), executor_name: Self::executor_name().to_string(), stage_name: Self::stage_name().to_string(), @@ -156,7 +156,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { error: nack_data.error.clone(), delay_ms: nack_data.delay.map(|d| d.as_millis() as u64), position: nack_data.position, - attempt_number: job.attempts, + attempt_number: job.job.attempts, max_attempts: None, // TODO: Get from job config if available next_retry_at, }, @@ -170,7 +170,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { fn queue_fail_webhook( &self, - job: &Job, + job: &BorrowedJob, fail_data: FailHookData<'_, Self::ErrorData>, tx: &mut TransactionContext<'_>, ) -> Result<(), TwmqError> @@ -178,21 +178,21 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { Self::JobData: HasWebhookOptions, Self::ErrorData: Serialize + Clone, { - let webhook_options = match job.data.webhook_options() { + let webhook_options = match job.job.data.webhook_options() { Some(w) => w, None => return Ok(()), // No webhook configured, skip silently }; for w in webhook_options { let envelope = WebhookNotificationEnvelope { notification_id: Uuid::new_v4().to_string(), - transaction_id: job.transaction_id(), + transaction_id: job.job.transaction_id(), timestamp: chrono::Utc::now().timestamp().try_into().unwrap(), executor_name: Self::executor_name().to_string(), stage_name: Self::stage_name().to_string(), event_type: StageEvent::Failure, payload: SerializableFailData { error: fail_data.error.clone(), - final_attempt_number: job.attempts, + final_attempt_number: job.job.attempts, }, delivery_target_url: Some(w.url.clone()), }; @@ -207,7 +207,7 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { &self, envelope: WebhookNotificationEnvelope, webhook_options: WebhookOptions, - job: &Job, + job: &BorrowedJob, tx: &mut TransactionContext<'_>, ) -> Result<(), TwmqError> where @@ -234,14 +234,14 @@ pub trait WebhookCapable: DurableExecution + ExecutorStage { let mut webhook_job = self.webhook_queue().clone().job(webhook_payload); webhook_job.options.id = format!( "{}_{}_webhook", - job.transaction_id(), + job.job.transaction_id(), envelope.notification_id ); tx.queue_job(webhook_job)?; tracing::info!( - transaction_id = %job.transaction_id(), + transaction_id = %job.job.transaction_id(), executor = %Self::executor_name(), stage = %Self::stage_name(), event = ?envelope.event_type, diff --git a/executors/src/webhook/mod.rs b/executors/src/webhook/mod.rs index a8ae03e..3408d26 100644 --- a/executors/src/webhook/mod.rs +++ b/executors/src/webhook/mod.rs @@ -8,8 +8,8 @@ use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; use serde::{Deserialize, Serialize}; use twmq::error::TwmqError; use twmq::hooks::TransactionContext; -use twmq::job::{Job, JobError, JobResult, RequeuePosition, ToJobResult}; -use twmq::{DurableExecution, FailHookData, NackHookData, SuccessHookData}; +use twmq::job::{BorrowedJob, JobError, JobResult, RequeuePosition, ToJobResult}; +use twmq::{DurableExecution, FailHookData, NackHookData, SuccessHookData, UserCancellable}; pub mod envelope; @@ -89,6 +89,9 @@ pub enum WebhookError { #[error("Internal queue error: {0}")] InternalQueueError(String), + + #[error("Transaction cancelled by user")] + UserCancelled, } impl From for WebhookError { @@ -97,15 +100,21 @@ impl From for WebhookError { } } +impl UserCancellable for WebhookError { + fn user_cancelled() -> Self { + WebhookError::UserCancelled + } +} + // --- DurableExecution Implementation --- impl DurableExecution for WebhookJobHandler { type Output = WebhookJobOutput; type ErrorData = WebhookError; type JobData = WebhookJobPayload; - #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.id))] - async fn process(&self, job: &Job) -> JobResult { - let payload = &job.data; + #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.job.id))] + async fn process(&self, job: &BorrowedJob) -> JobResult { + let payload = &job.job.data; let mut request_headers = HeaderMap::new(); // Set default Content-Type if body is present, can be overridden by payload.headers @@ -221,10 +230,10 @@ impl DurableExecution for WebhookJobHandler { .body(payload.body.clone()); tracing::debug!( - job_id = %job.id, + job_id = %job.job.id, url = %payload.url, method = %http_method_str, - attempt = %job.attempts, + attempt = %job.job.attempts, "Sending webhook request" ); @@ -243,13 +252,13 @@ impl DurableExecution for WebhookJobHandler { )); return Err(err).map_err_fail(); } - tracing::warn!(job_id = %job.id, "Failed to read response body for error status {}: {}", status, e); + tracing::warn!(job_id = %job.job.id, "Failed to read response body for error status {}: {}", status, e); None } }; if status.is_success() { - tracing::info!(job_id = %job.id, status = %status, "Webhook delivered successfully"); + tracing::info!(job_id = %job.job.id, status = %status, "Webhook delivered successfully"); Ok(WebhookJobOutput { status_code: status.as_u16(), response_body: response_body_text, @@ -271,20 +280,20 @@ impl DurableExecution for WebhookJobHandler { }; if status.is_server_error() || status.as_u16() == 429 { - if job.attempts < self.retry_config.max_attempts { + if job.job.attempts < self.retry_config.max_attempts { let delay_ms = self.retry_config.initial_delay_ms as f64 * self .retry_config .backoff_factor - .powi(job.attempts as i32 - 1); // Use current attempts for backoff + .powi(job.job.attempts as i32 - 1); // Use current attempts for backoff let delay_ms = (delay_ms.min(self.retry_config.max_delay_ms as f64)) as u64; let delay = Duration::from_millis(delay_ms); tracing::warn!( - job_id = %job.id, + job_id = %job.job.id, status = %status, - attempt = %job.attempts, + attempt = %job.job.attempts, max_attempts = %self.retry_config.max_attempts, delay_ms = %delay.as_millis(), "Webhook failed with retryable status, NACKing." @@ -296,16 +305,16 @@ impl DurableExecution for WebhookJobHandler { }) } else { tracing::error!( - job_id = %job.id, + job_id = %job.job.id, status = %status, - attempt = %job.attempts, + attempt = %job.job.attempts, "Webhook failed after max attempts, FAILING." ); Err(JobError::Fail(webhook_error)) } } else { tracing::error!( - job_id = %job.id, + job_id = %job.job.id, status = %status, "Webhook failed with non-retryable client error, FAILING." ); @@ -326,23 +335,23 @@ impl DurableExecution for WebhookJobHandler { && !e.is_connect() && !e.is_timeout() { - tracing::error!(job_id = %job.id, error = %webhook_error, "Webhook construction error, FAILING."); + tracing::error!(job_id = %job.job.id, error = %webhook_error, "Webhook construction error, FAILING."); return Err(JobError::Fail(webhook_error)); } - if job.attempts < self.retry_config.max_attempts { + if job.job.attempts < self.retry_config.max_attempts { let delay_ms = self.retry_config.initial_delay_ms as f64 * self .retry_config .backoff_factor - .powi(job.attempts as i32 - 1); // Use current attempts for backoff + .powi(job.job.attempts as i32 - 1); // Use current attempts for backoff let delay_ms = (delay_ms.min(self.retry_config.max_delay_ms as f64)) as u64; let delay = Duration::from_millis(delay_ms); tracing::warn!( - job_id = %job.id, + job_id = %job.job.id, error = %webhook_error, - attempt = %job.attempts, + attempt = %job.job.attempts, max_attempts = %self.retry_config.max_attempts, delay_ms = %delay.as_millis(), "Webhook request failed, NACKing." @@ -355,9 +364,9 @@ impl DurableExecution for WebhookJobHandler { }) } else { tracing::error!( - job_id = %job.id, + job_id = %job.job.id, error = %webhook_error, - attempt = %job.attempts, + attempt = %job.job.attempts, "Webhook request failed after max attempts, FAILING." ); Err(JobError::Fail(webhook_error)) @@ -366,49 +375,49 @@ impl DurableExecution for WebhookJobHandler { } } - #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.id))] + #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.job.id))] async fn on_success( &self, - job: &Job, + job: &BorrowedJob, d: SuccessHookData<'_, Self::Output>, _tx: &mut TransactionContext<'_>, ) { tracing::info!( - job_id = %job.id, - url = %job.data.url, + job_id = %job.job.id, + url = %job.job.data.url, status = %d.result.status_code, "Webhook successfully processed (on_success hook)." ); } - #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.id))] + #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.job.id))] async fn on_nack( &self, - job: &Job, + job: &BorrowedJob, d: NackHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { tracing::warn!( - job_id = %job.id, - url = %job.data.url, - attempt = %job.attempts, + job_id = %job.job.id, + url = %job.job.data.url, + attempt = %job.job.attempts, error = ?d.error, delay_ms = %d.delay.map_or(0, |dur| dur.as_millis()), "Webhook NACKed (on_nack hook)." ); } - #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.id))] + #[tracing::instrument(skip_all, fields(queue = "webhook", job_id = job.job.id))] async fn on_fail( &self, - job: &Job, + job: &BorrowedJob, d: FailHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { tracing::error!( - job_id = %job.id, - url = %job.data.url, - attempt = %job.attempts, + job_id = %job.job.id, + url = %job.job.data.url, + attempt = %job.job.attempts, error = ?d.error, "Webhook FAILED permanently (on_fail hook)." ); diff --git a/server/src/execution_router/mod.rs b/server/src/execution_router/mod.rs index c2a0308..78492e8 100644 --- a/server/src/execution_router/mod.rs +++ b/server/src/execution_router/mod.rs @@ -16,6 +16,7 @@ use engine_executors::{ send::{ExternalBundlerSendHandler, ExternalBundlerSendJobData}, }, webhook::WebhookJobHandler, + transaction_registry::TransactionRegistry, }; use twmq::{Queue, error::TwmqError}; @@ -25,6 +26,7 @@ pub struct ExecutionRouter { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub transaction_registry: Arc, } impl ExecutionRouter { @@ -85,6 +87,15 @@ impl ExecutionRouter { rpc_credentials, }; + // Register transaction in registry first + self.transaction_registry + .set_transaction_queue( + &base_execution_options.idempotency_key, + "external_bundler_send", + ) + .await + .map_err(|e| TwmqError::Runtime(format!("Failed to register transaction: {}", e)))?; + // Create job with transaction ID as the job ID for idempotency self.external_bundler_send_queue .clone() diff --git a/server/src/http/routes/mod.rs b/server/src/http/routes/mod.rs index 15d5d2e..a3efece 100644 --- a/server/src/http/routes/mod.rs +++ b/server/src/http/routes/mod.rs @@ -2,4 +2,5 @@ pub mod contract_encode; pub mod contract_read; pub mod contract_write; +pub mod transaction; pub mod transaction_write; diff --git a/server/src/http/routes/transaction.rs b/server/src/http/routes/transaction.rs new file mode 100644 index 0000000..9f8d42c --- /dev/null +++ b/server/src/http/routes/transaction.rs @@ -0,0 +1,155 @@ +// Transaction Management Operations + +use axum::{ + debug_handler, extract::{Path, State}, http::StatusCode, + response::{IntoResponse, Json}, +}; +use serde::{Deserialize, Serialize}; +use twmq::{error::TwmqError, CancelResult as TwmqCancelResult}; +use utoipa::ToSchema; + +use crate::http::{ + error::ApiEngineError, + server::EngineServerState, + types::{ErrorResponse, SuccessResponse}, +}; + +// ===== TYPES ===== + +#[derive(Debug, Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct TransactionCancelResponse { + pub transaction_id: String, + pub result: CancelResult, +} + +#[derive(Debug, Serialize, ToSchema)] +#[serde(rename_all = "SCREAMING_SNAKE_CASE")] +pub enum CancelResult { + CancelledImmediately, + CancellationPending, + CannotCancel { reason: String }, + NotFound, +} + +// ===== ROUTE HANDLER ===== + +#[utoipa::path( + post, + operation_id = "cancelTransaction", + path = "/transaction/{id}/cancel", + tag = "Transaction", + responses( + (status = 200, description = "Transaction cancellation result", body = TransactionCancelResponse, content_type = "application/json"), + (status = 404, description = "Transaction not found", body = ApiEngineError, content_type = "application/json"), + ), + params( + ("id" = String, Path, description = "Transaction ID to cancel"), + ) +)] +/// Cancel Transaction +/// +/// Attempt to cancel a queued transaction. Transactions that have been sent and are waiting for mine cannot be cancelled. +#[debug_handler] +pub async fn cancel_transaction( + State(state): State, + Path(transaction_id): Path, +) -> Result { + tracing::info!( + transaction_id = %transaction_id, + "Processing transaction cancellation request" + ); + + // Check which queue the transaction is in + let queue_name = state + .queue_manager + .transaction_registry + .get_transaction_queue(&transaction_id) + .await + .map_err(|e| ApiEngineError(e.into()))?; + + let result = match queue_name.as_deref() { + Some("external_bundler_send") => { + // Transaction is in send queue - try to cancel + match state + .queue_manager + .external_bundler_send_queue + .cancel_job(&transaction_id) + .await + .map_err(|e| ApiEngineError(TwmqError::into(e)))? + { + TwmqCancelResult::CancelledImmediately => { + // Remove from registry since it's cancelled + state + .queue_manager + .transaction_registry + .remove_transaction(&transaction_id) + .await + .map_err(|e| ApiEngineError(e.into()))?; + + tracing::info!( + transaction_id = %transaction_id, + "Transaction cancelled immediately" + ); + + CancelResult::CancelledImmediately + } + TwmqCancelResult::CancellationPending => { + tracing::info!( + transaction_id = %transaction_id, + "Transaction cancellation pending" + ); + + CancelResult::CancellationPending + } + TwmqCancelResult::NotFound => { + tracing::warn!( + transaction_id = %transaction_id, + "Transaction not found in send queue" + ); + + CancelResult::NotFound + } + } + } + Some("userop_confirm") => { + tracing::info!( + transaction_id = %transaction_id, + "Cannot cancel transaction - already sent and waiting for mine" + ); + + CancelResult::CannotCancel { + reason: "Transaction has been sent and is waiting for mine".to_string(), + } + } + Some(other_queue) => { + tracing::warn!( + transaction_id = %transaction_id, + queue = %other_queue, + "Transaction in unsupported queue for cancellation" + ); + + CancelResult::CannotCancel { + reason: format!("Transaction in unsupported queue: {}", other_queue), + } + } + None => { + tracing::warn!( + transaction_id = %transaction_id, + "Transaction not found in registry" + ); + + CancelResult::NotFound + } + }; + + let response = TransactionCancelResponse { + transaction_id, + result, + }; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(response)), + )) +} \ No newline at end of file diff --git a/server/src/http/server.rs b/server/src/http/server.rs index 0865143..bc12cd1 100644 --- a/server/src/http/server.rs +++ b/server/src/http/server.rs @@ -8,7 +8,7 @@ use utoipa::OpenApi; use utoipa_axum::{router::OpenApiRouter, routes}; use utoipa_scalar::{Scalar, Servable}; -use crate::{chains::ThirdwebChainService, execution_router::ExecutionRouter}; +use crate::{chains::ThirdwebChainService, execution_router::ExecutionRouter, queue::manager::QueueManager}; use tower_http::{ cors::{Any, CorsLayer}, trace::TraceLayer, @@ -21,6 +21,7 @@ pub struct EngineServerState { pub abi_service: Arc, pub execution_router: Arc, + pub queue_manager: Arc, } pub struct EngineServer { @@ -51,6 +52,9 @@ impl EngineServer { .routes(routes!( crate::http::routes::transaction_write::write_transaction )) + .routes(routes!( + crate::http::routes::transaction::cancel_transaction + )) .layer(cors) .layer(TraceLayer::new_for_http()) .with_state(state); diff --git a/server/src/main.rs b/server/src/main.rs index 7dfa8c3..3438f41 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -59,6 +59,7 @@ async fn main() -> anyhow::Result<()> { webhook_queue: queue_manager.webhook_queue.clone(), external_bundler_send_queue: queue_manager.external_bundler_send_queue.clone(), userop_confirm_queue: queue_manager.userop_confirm_queue.clone(), + transaction_registry: queue_manager.transaction_registry.clone(), }; let mut server = EngineServer::new(EngineServerState { @@ -66,6 +67,7 @@ async fn main() -> anyhow::Result<()> { abi_service: Arc::new(abi_service), chains, execution_router: Arc::new(execution_router), + queue_manager: Arc::new(queue_manager), }) .await; diff --git a/server/src/queue/manager.rs b/server/src/queue/manager.rs index 05df218..def61a9 100644 --- a/server/src/queue/manager.rs +++ b/server/src/queue/manager.rs @@ -10,6 +10,7 @@ use engine_executors::{ send::ExternalBundlerSendHandler, }, webhook::{WebhookJobHandler, WebhookRetryConfig}, + transaction_registry::TransactionRegistry, }; use twmq::{Queue, queue::QueueOptions, shutdown::ShutdownHandle}; @@ -22,6 +23,7 @@ pub struct QueueManager { pub webhook_queue: Arc>, pub external_bundler_send_queue: Arc>>, pub userop_confirm_queue: Arc>>, + pub transaction_registry: Arc, } fn get_queue_name_for_namespace(namespace: &Option, name: &str) -> String { @@ -44,6 +46,12 @@ impl QueueManager { ) -> Result { // Create Redis clients let redis_client = twmq::redis::Client::open(redis_config.url.as_str())?; + + // Create transaction registry + let transaction_registry = Arc::new(TransactionRegistry::new( + redis_client.get_connection_manager().await?, + queue_config.execution_namespace.clone(), + )); // Create deployment cache and lock let deployment_cache = RedisDeploymentCache::new(redis_client.clone()).await?; @@ -102,6 +110,7 @@ impl QueueManager { chain_service.clone(), deployment_lock.clone(), webhook_queue.clone(), + transaction_registry.clone(), ); let userop_confirm_queue = Queue::builder() @@ -121,6 +130,7 @@ impl QueueManager { deployment_lock, webhook_queue: webhook_queue.clone(), confirm_queue: userop_confirm_queue.clone(), + transaction_registry: transaction_registry.clone(), }; let external_bundler_send_queue = Queue::builder() @@ -136,6 +146,7 @@ impl QueueManager { webhook_queue, external_bundler_send_queue, userop_confirm_queue, + transaction_registry, }) } diff --git a/twmq/src/job.rs b/twmq/src/job.rs index 2f56c5d..43b37d3 100644 --- a/twmq/src/job.rs +++ b/twmq/src/job.rs @@ -145,6 +145,32 @@ impl Job { } } +// A job that has been borrowed from the queue with a lease token +#[derive(Debug, Clone)] +pub struct BorrowedJob { + pub job: Job, + pub lease_token: String, +} + +impl BorrowedJob { + pub fn new(job: Job, lease_token: String) -> Self { + Self { job, lease_token } + } + + // Convenience methods to access job fields + pub fn id(&self) -> &str { + &self.job.id + } + + pub fn data(&self) -> &T { + &self.job.data + } + + pub fn attempts(&self) -> u32 { + self.job.attempts + } +} + // Job status enum #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum JobStatus { diff --git a/twmq/src/lib.rs b/twmq/src/lib.rs index 0d3c114..e047dec 100644 --- a/twmq/src/lib.rs +++ b/twmq/src/lib.rs @@ -13,6 +13,7 @@ use job::{ DelayOptions, Job, JobError, JobErrorRecord, JobErrorType, JobOptions, JobResult, JobStatus, PushableJob, RequeuePosition, }; +pub use job::BorrowedJob; use queue::QueueOptions; use redis::Pipeline; use redis::{AsyncCommands, RedisResult, aio::ConnectionManager}; @@ -24,6 +25,18 @@ use tokio::time::sleep; pub use redis; use tracing::Instrument; +// Trait for error types to implement user cancellation +pub trait UserCancellable { + fn user_cancelled() -> Self; +} + +#[derive(Debug)] +pub enum CancelResult { + CancelledImmediately, + CancellationPending, + NotFound, +} + pub struct SuccessHookData<'a, O> { pub result: &'a O, } @@ -45,18 +58,18 @@ pub struct QueueInternalErrorHookData<'a> { // Main DurableExecution trait pub trait DurableExecution: Sized + Send + Sync + 'static { type Output: Serialize + DeserializeOwned + Send + Sync; - type ErrorData: Serialize + DeserializeOwned + From + Send + Sync; + type ErrorData: Serialize + DeserializeOwned + From + UserCancellable + Send + Sync; type JobData: Serialize + DeserializeOwned + Clone + Send + Sync + 'static; // Required method to process a job fn process( &self, - job: &Job, + job: &BorrowedJob, ) -> impl Future> + Send; fn on_success( &self, - _job: &Job, + _job: &BorrowedJob, _d: SuccessHookData, _tx: &mut TransactionContext<'_>, ) -> impl Future + Send + Sync { @@ -65,7 +78,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { fn on_nack( &self, - _job: &Job, + _job: &BorrowedJob, _d: NackHookData, _tx: &mut TransactionContext<'_>, ) -> impl Future + Send + Sync { @@ -74,7 +87,7 @@ pub trait DurableExecution: Sized + Send + Sync + 'static { fn on_fail( &self, - _job: &Job, + _job: &BorrowedJob, _d: FailHookData, _tx: &mut TransactionContext<'_>, ) -> impl Future + Send + Sync { @@ -190,6 +203,14 @@ impl Queue { format!("twmq:{}:dedup", self.name) } + pub fn pending_cancellation_set_name(&self) -> String { + format!("twmq:{}:pending_cancellations", self.name) + } + + pub fn lease_key_name(&self, job_id: &str, lease_token: &str) -> String { + format!("twmq:{}:job:{}:lease:{}", self.name, job_id, lease_token) + } + pub async fn push( &self, job_options: JobOptions, @@ -352,6 +373,80 @@ impl Queue { Ok(count) } + pub async fn cancel_job(&self, job_id: &str) -> Result { + let script = redis::Script::new( + r#" + local job_id = ARGV[1] + + local pending_list = KEYS[1] + local delayed_zset = KEYS[2] + local active_hash = KEYS[3] + local failed_list = KEYS[4] + local pending_cancellation_set = KEYS[5] + local job_meta_hash = KEYS[6] + + -- Try to remove from pending queue + if redis.call('LREM', pending_list, 0, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', ARGV[2]) + return "cancelled_immediately" + end + + -- Try to remove from delayed queue + if redis.call('ZREM', delayed_zset, job_id) > 0 then + -- Move to failed state with cancellation + redis.call('LPUSH', failed_list, job_id) + redis.call('HSET', job_meta_hash, 'finished_at', ARGV[2]) + return "cancelled_immediately" + end + + -- Check if job is active + if redis.call('HEXISTS', active_hash, job_id) == 1 then + -- Add to pending cancellations set + redis.call('SADD', pending_cancellation_set, job_id) + return "cancellation_pending" + end + + return "not_found" + "#, + ); + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let result: String = script + .key(self.pending_list_name()) + .key(self.delayed_zset_name()) + .key(self.active_hash_name()) + .key(self.failed_list_name()) + .key(self.pending_cancellation_set_name()) + .key(self.job_meta_hash_name(job_id)) + .arg(job_id) + .arg(now) + .invoke_async(&mut self.redis.clone()) + .await?; + + match result.as_str() { + "cancelled_immediately" => { + // Process the cancellation through hook system + if let Err(e) = self.process_cancelled_job(job_id).await { + tracing::error!( + job_id = %job_id, + error = ?e, + "Failed to process immediately cancelled job" + ); + } + Ok(CancelResult::CancelledImmediately) + }, + "cancellation_pending" => Ok(CancelResult::CancellationPending), + "not_found" => Ok(CancelResult::NotFound), + _ => Err(TwmqError::Runtime(format!("Unexpected cancel result: {}", result))), + } + } + pub fn work(self: &Arc) -> WorkerHandle { let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel::<()>(); // Local semaphore to limit concurrency per instance @@ -392,124 +487,20 @@ impl Queue { for job in jobs { let permit = semaphore.clone().acquire_owned().await.unwrap(); let queue_clone = queue_clone.clone(); - let job_id = job.id.clone(); + let job_id = job.id().to_string(); let handler_clone = handler_clone.clone(); tokio::spawn(async move { // Process job - note we don't pass a context here let result = handler_clone.process(&job).await; - // Mark job as complete (automatically happens in completion handlers) - - match result { - Ok(output) => { - // Create transaction pipeline for atomicity - let mut pipeline = redis::pipe(); - pipeline.atomic(); // Use MULTI/EXEC - - // Create transaction context with mutable access to pipeline - let mut tx_context = TransactionContext::new( - &mut pipeline, - queue_clone.name().to_string(), - ); - - let success_hook_data = SuccessHookData { - result: &output, - }; - - // Call success hook to populate transaction context - handler_clone.on_success(&job, success_hook_data, &mut tx_context).await; - - // Complete job with success and execute transaction - if let Err(e) = queue_clone - .complete_job_success( - &job, - &output, - tx_context.pipeline(), - ) - .await - { - tracing::error!( - "Failed to complete job {} complete handling successfully: {:?}", - job.id, - e - ); - } - } - Err(JobError::Nack { - error, - delay, - position, - }) => { - // Create transaction pipeline for atomicity - let mut pipeline = redis::pipe(); - pipeline.atomic(); // Use MULTI/EXEC - - // Create transaction context with mutable access to pipeline - let mut tx_context = TransactionContext::new( - &mut pipeline, - queue_clone.name().to_string(), - ); - - let nack_hook_data = NackHookData { - error: &error, - delay, - position, - }; - - // Call nack hook to populate transaction context - handler_clone - .on_nack(&job, nack_hook_data, &mut tx_context) - .await; - - // Complete job with nack and execute transaction - if let Err(e) = queue_clone - .complete_job_nack( - &job, - &error, - delay, - position, - tx_context.pipeline(), - ) - .await - { - tracing::error!( - "Failed to complete job {} complete nack handling successfully: {:?}", - job.id, - e - ); - } - } - Err(JobError::Fail(error)) => { - // Create transaction pipeline for atomicity - let mut pipeline = redis::pipe(); - pipeline.atomic(); // Use MULTI/EXEC - - // Create transaction context with mutable access to pipeline - let mut tx_context = TransactionContext::new( - &mut pipeline, - queue_clone.name.clone(), - ); - - let fail_hook_data = FailHookData { - error: &error - }; - - // Call fail hook to populate transaction context - handler_clone.on_fail(&job, fail_hook_data, &mut tx_context).await; - - // Complete job with fail and execute transaction - if let Err(e) = queue_clone - .complete_job_fail(&job.to_option_data(), &error, tx_context.pipeline()) - .await - { - tracing::error!( - "Failed to complete job {} complete fail handling successfully: {:?}", - job.id, - e - ); - } - } + // Complete job using unified method with hooks and retry logic + if let Err(e) = queue_clone.complete_job(&job, result).await { + tracing::error!( + "Failed to complete job {} handling: {:?}", + job.id(), + e + ); } // Release permit when done @@ -566,11 +557,12 @@ impl Queue { async fn pop_batch_jobs( self: &Arc, batch_size: usize, - ) -> RedisResult>> { + ) -> RedisResult>> { // Lua script that does: - // 1. Process expired delayed jobs - // 2. Check for timed out active jobs - // 3. Pop up to batch_size jobs from pending + // 1. Clean up expired leases (with lease token validation) + // 2. Process pending cancellations + // 3. Process expired delayed jobs + // 4. Pop up to batch_size jobs from pending (with new lease tokens) let script = redis::Script::new( r#" local now = tonumber(ARGV[1]) @@ -582,40 +574,82 @@ impl Queue { local pending_list_name = KEYS[3] local active_hash_name = KEYS[4] local job_data_hash_name = KEYS[5] - + local pending_cancellation_set = KEYS[6] + local failed_list_name = KEYS[7] + local success_list_name = KEYS[8] local result_jobs = {} - local timed_out_jobs = {} + local cancelled_jobs = {} + local completed_jobs = {} - -- Step 1: Clean up all expired leases - -- Get all active jobs + -- Step 1: Clean up expired leases by checking lease keys stored in job meta + -- Get all active jobs (now just contains job_id -> attempts) local active_jobs = redis.call('HGETALL', active_hash_name) - -- Process in pairs (job_id, lease_expiry) + -- Process in pairs (job_id, attempts) for i = 1, #active_jobs, 2 do local job_id = active_jobs[i] - local lease_expiry = tonumber(active_jobs[i + 1]) + local attempts = active_jobs[i + 1] local job_meta_hash_name = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':meta' - - -- Check if lease has expired - if lease_expiry < now then + + -- Get the current lease token from job metadata + local current_lease_token = redis.call('HGET', job_meta_hash_name, 'lease_token') + + if current_lease_token then + -- Build the lease key and check if it exists (Redis auto-expires) + local lease_key = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. current_lease_token + local lease_exists = redis.call('EXISTS', lease_key) + + -- If lease doesn't exist (expired), move job back to pending + if lease_exists == 0 then + redis.call('HINCRBY', job_meta_hash_name, 'attempts', 1) + redis.call('HDEL', job_meta_hash_name, 'lease_token') + + -- Move job back to pending + redis.call('HDEL', active_hash_name, job_id) + redis.call('LPUSH', pending_list_name, job_id) + + -- Add to list of timed out jobs + table.insert(timed_out_jobs, job_id) + end + else + -- No lease token in meta, something's wrong - move back to pending redis.call('HINCRBY', job_meta_hash_name, 'attempts', 1) - - -- Move job back to pending redis.call('HDEL', active_hash_name, job_id) redis.call('LPUSH', pending_list_name, job_id) - - -- Add to list of timed out jobs table.insert(timed_out_jobs, job_id) end end - -- Step 2: Move expired delayed jobs to pending + -- Step 2: Process pending cancellations AFTER lease cleanup + local cancel_requests = redis.call('SMEMBERS', pending_cancellation_set) + + for i, job_id in ipairs(cancel_requests) do + -- Check if job is still active + if redis.call('HEXISTS', active_hash_name, job_id) == 1 then + -- Still processing, keep in cancellation set + else + -- Job finished processing, check outcome + if redis.call('LPOS', success_list_name, job_id) then + -- Job succeeded, just remove from cancellation set + table.insert(completed_jobs, job_id) + else + -- Job not successful, cancel it now + redis.call('LPUSH', failed_list_name, job_id) + -- Add cancellation timestamp + local job_meta_hash_name = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':meta' + redis.call('HSET', job_meta_hash_name, 'finished_at', now) + table.insert(cancelled_jobs, job_id) + end + -- Remove from pending cancellations + redis.call('SREM', pending_cancellation_set, job_id) + end + end + + -- Step 3: Move expired delayed jobs to pending local delayed_jobs = redis.call('ZRANGEBYSCORE', delayed_zset_name, 0, now) for i, job_id in ipairs(delayed_jobs) do - -- Check position information - local job_meta_hash_name = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':meta' local reentry_position = redis.call('HGET', job_meta_hash_name, 'reentry_position') or 'last' @@ -631,15 +665,13 @@ impl Queue { end end - -- Finally Step 3: Try to pop jobs from pending (up to batch_size) - -- Try to pop jobs from pending (up to batch_size) + -- Step 4: Pop jobs from pending and create lease keys (up to batch_size) local popped_job_ids = {} for i = 1, batch_size do local job_id = redis.call('LPOP', pending_list_name) if not job_id then break end - table.insert(popped_job_ids, job_id) end @@ -654,24 +686,30 @@ impl Queue { if job_data then -- Update metadata local job_meta_hash_name = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':meta' - - redis.call('HSET', job_meta_hash_name, 'processed_at', now) local created_at = redis.call('HGET', job_meta_hash_name, 'created_at') or now local attempts = redis.call('HINCRBY', job_meta_hash_name, 'attempts', 1) - -- Set lease expiration - local lease_expiry = now + lease_seconds + -- Generate unique lease token + local lease_token = now .. '_' .. job_id .. '_' .. attempts - -- Add to active set with lease expiry as score - redis.call('HSET', active_hash_name, job_id, lease_expiry) + -- Create separate lease key with TTL + local lease_key = 'twmq:' .. queue_id .. ':job:' .. job_id .. ':lease:' .. lease_token + redis.call('SET', lease_key, '1') + redis.call('EXPIRE', lease_key, lease_seconds) - -- Add to result with both id and data - table.insert(result_jobs, {job_id, job_data, tostring(attempts), tostring(created_at), tostring(now)}) + -- Store lease token in job metadata + redis.call('HSET', job_meta_hash_name, 'lease_token', lease_token) + + -- Add to active hash (just job_id -> attempts, no lease info) + redis.call('HSET', active_hash_name, job_id, attempts) + + -- Add to result with job data and lease token + table.insert(result_jobs, {job_id, job_data, tostring(attempts), tostring(created_at), tostring(now), lease_token}) end end - return result_jobs + return {result_jobs, cancelled_jobs, timed_out_jobs} "#, ); @@ -680,21 +718,34 @@ impl Queue { .unwrap() .as_secs(); - let results_from_lua: Vec<(String, String, String, String, String)> = script + let results_from_lua: (Vec<(String, String, String, String, String, String)>, Vec, Vec) = script .key(self.name()) .key(self.delayed_zset_name()) .key(self.pending_list_name()) .key(self.active_hash_name()) .key(self.job_data_hash_name()) + .key(self.pending_cancellation_set_name()) + .key(self.failed_list_name()) + .key(self.success_list_name()) .arg(now) .arg(batch_size) .arg(self.options.lease_duration.as_secs()) .invoke_async(&mut self.redis.clone()) .await?; + let (job_results, cancelled_jobs, timed_out_jobs) = results_from_lua; + + // Log individual lease timeouts and cancellations + for job_id in &timed_out_jobs { + tracing::warn!(job_id = %job_id, "Job lease expired, moved back to pending"); + } + for job_id in &cancelled_jobs { + tracing::info!(job_id = %job_id, "Job cancelled by user request"); + } + let mut jobs = Vec::new(); - for (job_id_str, job_data_t_json, attempts_str, created_at_str, processed_at_str) in - results_from_lua + for (job_id_str, job_data_t_json, attempts_str, created_at_str, processed_at_str, lease_token) in + job_results { match serde_json::from_str::(&job_data_t_json) { Ok(data_t) => { @@ -702,14 +753,16 @@ impl Queue { let created_at: u64 = created_at_str.parse().unwrap_or(now); // Default or handle error let processed_at: u64 = processed_at_str.parse().unwrap_or(now); // Default or handle error - jobs.push(Job { + let job = Job { id: job_id_str, data: data_t, attempts, created_at, processed_at: Some(processed_at), finished_at: None, // Not finished yet - }); + }; + + jobs.push(BorrowedJob::new(job, lease_token)); } Err(e) => { // Log error: failed to deserialize job data T for job_id_str @@ -726,7 +779,7 @@ impl Queue { let mut pipeline = redis::pipe(); pipeline.atomic(); // Use MULTI/EXEC - let mut tx_context = + let mut _tx_context = TransactionContext::new(&mut pipeline, queue_clone.name().to_string()); let job: Job> = Job { @@ -739,17 +792,10 @@ impl Queue { }; let twmq_error: TwmqError = e.into(); - let fail_hook_data = QueueInternalErrorHookData { error: &twmq_error }; - - // Call fail hook to populate transaction context - queue_clone - .handler - .on_queue_error(&job, fail_hook_data, &mut tx_context) - .await; - - // Complete job with fail and execute transaction + + // Complete job using queue error method with lease token if let Err(e) = queue_clone - .complete_job_fail(&job, &twmq_error.into(), tx_context.pipeline()) + .complete_job_queue_error(&job, &lease_token, &twmq_error.into()) .await { tracing::error!( @@ -763,13 +809,74 @@ impl Queue { } } + // Process cancelled jobs through hook system + for job_id in cancelled_jobs { + let queue_clone = self.clone(); + tokio::spawn(async move { + if let Err(e) = queue_clone.process_cancelled_job(&job_id).await { + tracing::error!( + job_id = %job_id, + error = ?e, + "Failed to process cancelled job" + ); + } + }); + } + Ok(jobs) } - #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.name()))] - async fn complete_job_success( + /// Process a cancelled job through the hook system with user cancellation error + async fn process_cancelled_job(&self, job_id: &str) -> Result<(), TwmqError> { + // Get job data for the cancelled job + match self.get_job(job_id).await? { + Some(job) => { + // Create cancellation error using the trait + let cancellation_error = H::ErrorData::user_cancelled(); + + // Create transaction pipeline for atomicity + let mut pipeline = redis::pipe(); + pipeline.atomic(); + + // Create transaction context with mutable access to pipeline + let mut tx_context = TransactionContext::new( + &mut pipeline, + self.name().to_string(), + ); + + let fail_hook_data = FailHookData { + error: &cancellation_error, + }; + + // Create a BorrowedJob with a dummy lease token since cancelled jobs don't have active leases + let borrowed_job = BorrowedJob::new(job, "cancelled".to_string()); + + // Call fail hook for user cancellation + self.handler.on_fail(&borrowed_job, fail_hook_data, &mut tx_context).await; + + // Execute the pipeline (just hook commands, job already moved to failed) + pipeline.query_async::<()>(&mut self.redis.clone()).await?; + + tracing::info!( + job_id = %job_id, + "Successfully processed job cancellation hooks" + ); + + Ok(()) + }, + None => { + tracing::warn!( + job_id = %job_id, + "Cancelled job not found when trying to process hooks" + ); + Ok(()) + } + } + } + + fn add_success_operations( &self, - job: &Job, + job: &BorrowedJob, result: &H::Output, pipeline: &mut Pipeline, ) -> Result<(), TwmqError> { @@ -778,19 +885,25 @@ impl Queue { .unwrap() .as_secs(); - // Add basic job completion operations to pipeline + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + // Delete the lease key to consume it + pipeline.del(&lease_key); + + // Add job completion operations pipeline - .hdel(self.active_hash_name(), &job.id) - .lpush(self.success_list_name(), &job.id) - // Set finished_at in the job's metadata hash - .hset(self.job_meta_hash_name(&job.id), "finished_at", now); + .hdel(self.active_hash_name(), &job.job.id) + .lpush(self.success_list_name(), &job.job.id) + .hset(self.job_meta_hash_name(&job.job.id), "finished_at", now) + .hdel(self.job_meta_hash_name(&job.job.id), "lease_token"); let result_json = serde_json::to_string(result)?; - pipeline.hset(self.job_result_hash_name(), &job.id, result_json); + pipeline.hset(self.job_result_hash_name(), &job.job.id, result_json); - // Execute main pipeline first - pipeline.query_async::<()>(&mut self.redis.clone()).await?; + Ok(()) + } + async fn post_success_completion(&self) -> Result<(), TwmqError> { // Separate call for pruning with data deletion using Lua let trim_script = redis::Script::new( r#" @@ -838,10 +951,9 @@ impl Queue { Ok(()) } - #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.name()))] - async fn complete_job_nack( + fn add_nack_operations( &self, - job: &Job, + job: &BorrowedJob, error: &H::ErrorData, delay: Option, position: RequeuePosition, @@ -852,57 +964,60 @@ impl Queue { .unwrap() .as_secs(); - // Remove from active - pipeline.hdel(self.active_hash_name(), &job.id); + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + // Delete the lease key to consume it + pipeline.del(&lease_key); + + // Remove from active and clear lease token + pipeline + .hdel(self.active_hash_name(), &job.job.id) + .hdel(self.job_meta_hash_name(&job.job.id), "lease_token"); let error_record = JobErrorRecord { - attempt: job.attempts, + attempt: job.job.attempts, error, details: JobErrorType::nack(delay, position), created_at: now, }; let error_json = serde_json::to_string(&error_record)?; - - pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); // Add to proper queue based on delay and position if let Some(delay_duration) = delay { let delay_until = now + delay_duration.as_secs(); - - // Store position for when delay expires let pos_str = position.to_string(); pipeline .hset( - self.job_meta_hash_name(&job.id), + self.job_meta_hash_name(&job.job.id), "reentry_position", pos_str, ) - .zadd(self.delayed_zset_name(), &job.id, delay_until); + .zadd(self.delayed_zset_name(), &job.job.id, delay_until); } else { match position { RequeuePosition::First => { - pipeline.lpush(self.pending_list_name(), &job.id); + pipeline.lpush(self.pending_list_name(), &job.job.id); } RequeuePosition::Last => { - pipeline.rpush(self.pending_list_name(), &job.id); + pipeline.rpush(self.pending_list_name(), &job.job.id); } } } - // Execute pipeline - pipeline.query_async::<()>(&mut self.redis.clone()).await?; - - tracing::debug!("Completed job nack handling"); + Ok(()) + } + async fn post_nack_completion(&self) -> Result<(), TwmqError> { + // No pruning needed for nack Ok(()) } - #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.name()))] - async fn complete_job_fail( + fn add_fail_operations( &self, - job: &Job>, + job: &BorrowedJob, error: &H::ErrorData, pipeline: &mut Pipeline, ) -> Result<(), TwmqError> { @@ -911,25 +1026,32 @@ impl Queue { .unwrap() .as_secs(); - // Remove from active, add to failed + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + // Delete the lease key to consume it + pipeline.del(&lease_key); + + // Remove from active, add to failed, clear lease token pipeline - .hdel(self.active_hash_name(), &job.id) - .lpush(self.failed_list_name(), &job.id) - // Set finished_at in the job's metadata hash - .hset(self.job_meta_hash_name(&job.id), "finished_at", now); + .hdel(self.active_hash_name(), &job.job.id) + .lpush(self.failed_list_name(), &job.job.id) + .hset(self.job_meta_hash_name(&job.job.id), "finished_at", now) + .hdel(self.job_meta_hash_name(&job.job.id), "lease_token"); // Store error let error_record = JobErrorRecord { - attempt: job.attempts, + attempt: job.job.attempts, error, details: JobErrorType::fail(), created_at: now, }; let error_json = serde_json::to_string(&error_record)?; + pipeline.lpush(self.job_errors_list_name(&job.job.id), error_json); - pipeline.lpush(self.job_errors_list_name(&job.id), error_json); - pipeline.query_async::<()>(&mut self.redis.clone()).await?; + Ok(()) + } + async fn post_fail_completion(&self) -> Result<(), TwmqError> { // Separate call for pruning with data deletion using Lua let trim_script = redis::Script::new( r#" @@ -963,15 +1085,179 @@ impl Queue { .key(self.failed_list_name()) .key(self.job_data_hash_name()) .key(self.dedupe_set_name()) - .arg(self.options.max_failed) // max_len (LTRIM is 0 to max_failed-1) + .arg(self.options.max_failed) .invoke_async(&mut self.redis.clone()) .await?; - tracing::debug!("completed job fail handling"); if trimmed_count > 0 { tracing::info!("Pruned {} failed jobs", trimmed_count); } Ok(()) } + + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id(), queue = self.name()))] + async fn complete_job( + &self, + job: &BorrowedJob, + result: JobResult, + ) -> Result<(), TwmqError> { + // 1. Run hook once and build pipeline with all operations + let mut hook_pipeline = redis::pipe(); + let mut tx_context = TransactionContext::new(&mut hook_pipeline, self.name().to_string()); + + match &result { + Ok(output) => { + let success_hook_data = SuccessHookData { result: output }; + self.handler.on_success(job, success_hook_data, &mut tx_context).await; + self.add_success_operations(job, output, &mut hook_pipeline)?; + } + Err(JobError::Nack { error, delay, position }) => { + let nack_hook_data = NackHookData { + error, + delay: *delay, + position: *position, + }; + self.handler.on_nack(job, nack_hook_data, &mut tx_context).await; + self.add_nack_operations(job, error, *delay, *position, &mut hook_pipeline)?; + } + Err(JobError::Fail(error)) => { + let fail_hook_data = FailHookData { error }; + self.handler.on_fail(job, fail_hook_data, &mut tx_context).await; + self.add_fail_operations(job, error, &mut hook_pipeline)?; + } + } + + // 2. Now use this pipeline in unlimited retry loop with lease check + let lease_key = self.lease_key_name(&job.job.id, &job.lease_token); + + loop { + let mut conn = self.redis.clone(); + + // WATCH the lease key + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + // Check if lease exists - if not, job was cancelled or timed out + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH") + .query_async::<()>(&mut conn) + .await?; + tracing::warn!(job_id = %job.job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + // Clone the pipeline and make it atomic for this attempt + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + // Execute atomically with WATCH/MULTI/EXEC + match atomic_pipeline.query_async::>(&mut conn).await { + Ok(_) => { + // Success! Now run post-completion methods + match &result { + Ok(_) => self.post_success_completion().await?, + Err(JobError::Nack { .. }) => self.post_nack_completion().await?, + Err(JobError::Fail(_)) => self.post_fail_completion().await?, + } + + tracing::debug!(job_id = %job.job.id, "Job completion successful"); + return Ok(()); + } + Err(_) => { + // WATCH failed (lease key changed), retry + tracing::debug!(job_id = %job.job.id, "WATCH failed during completion, retrying"); + continue; + } + } + } + } + + // Special completion method for queue errors (deserialization failures) with lease token + #[tracing::instrument(level = "debug", skip_all, fields(job_id = job.id, queue = self.name()))] + async fn complete_job_queue_error( + &self, + job: &Job>, + lease_token: &str, + error: &H::ErrorData, + ) -> Result<(), TwmqError> { + // 1. Run queue error hook once and build pipeline + let mut hook_pipeline = redis::pipe(); + let mut tx_context = TransactionContext::new(&mut hook_pipeline, self.name().to_string()); + + let twmq_error = TwmqError::Runtime("Job processing failed with user error".to_string()); + let queue_error_hook_data = QueueInternalErrorHookData { error: &twmq_error }; + self.handler.on_queue_error(job, queue_error_hook_data, &mut tx_context).await; + + // Add fail operations to pipeline + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); + + let lease_key = self.lease_key_name(&job.id, lease_token); + + // Delete the lease key to consume it + hook_pipeline.del(&lease_key); + + // Remove from active, add to failed, clear lease token + hook_pipeline + .hdel(self.active_hash_name(), &job.id) + .lpush(self.failed_list_name(), &job.id) + .hset(self.job_meta_hash_name(&job.id), "finished_at", now) + .hdel(self.job_meta_hash_name(&job.id), "lease_token"); + + // Store error + let error_record = JobErrorRecord { + attempt: job.attempts, + error, + details: JobErrorType::fail(), + created_at: now, + }; + let error_json = serde_json::to_string(&error_record)?; + hook_pipeline.lpush(self.job_errors_list_name(&job.id), error_json); + + // 2. Use pipeline in unlimited retry loop with lease check + loop { + let mut conn = self.redis.clone(); + + // WATCH the lease key + redis::cmd("WATCH") + .arg(&lease_key) + .query_async::<()>(&mut conn) + .await?; + + // Check if lease exists - if not, job was cancelled or timed out + let lease_exists: bool = conn.exists(&lease_key).await?; + if !lease_exists { + redis::cmd("UNWATCH") + .query_async::<()>(&mut conn) + .await?; + tracing::warn!(job_id = %job.id, "Lease no longer exists, job was cancelled or timed out"); + return Ok(()); + } + + // Clone the pipeline and make it atomic for this attempt + let mut atomic_pipeline = hook_pipeline.clone(); + atomic_pipeline.atomic(); + + // Execute atomically with WATCH/MULTI/EXEC + match atomic_pipeline.query_async::>(&mut conn).await { + Ok(_) => { + // Success! Run post-completion + self.post_fail_completion().await?; + tracing::debug!(job_id = %job.id, "Queue error job completion successful"); + return Ok(()); + } + Err(_) => { + // WATCH failed (lease key changed), retry + tracing::debug!(job_id = %job.id, "WATCH failed during queue error completion, retrying"); + continue; + } + } + } + } } diff --git a/twmq/tests/basic_hook.rs b/twmq/tests/basic_hook.rs index 0366a31..48a49c7 100644 --- a/twmq/tests/basic_hook.rs +++ b/twmq/tests/basic_hook.rs @@ -16,7 +16,7 @@ use serde::{Deserialize, Serialize}; use twmq::{ DurableExecution, Queue, SuccessHookData, hooks::TransactionContext, - job::{Job, JobResult, JobStatus}, + job::{BorrowedJob, JobResult, JobStatus}, queue::QueueOptions, }; @@ -54,6 +54,7 @@ pub struct WebhookJobOutput { pub response: String, } + // Main job that queues webhook jobs #[derive(Serialize, Deserialize, Clone)] pub struct MainJobPayload { @@ -81,20 +82,20 @@ impl DurableExecution for MainJobHandler { type ErrorData = TestJobErrorData; type JobData = MainJobPayload; - async fn process(&self, job: &Job) -> JobResult { - println!("MAIN_JOB: Processing job with id: {}", job.id); + async fn process(&self, job: &BorrowedJob) -> JobResult { + println!("MAIN_JOB: Processing job with id: {}", job.job.id); tokio::time::sleep(Duration::from_millis(50)).await; MAIN_JOB_PROCESSED.store(true, Ordering::SeqCst); Ok(TestJobOutput { - reply: format!("Main job processed: {}", job.data.message), + reply: format!("Main job processed: {}", job.job.data.message), }) } async fn on_success( &self, - job: &Job, + job: &BorrowedJob, d: SuccessHookData<'_, Self::Output>, tx: &mut TransactionContext<'_>, ) { @@ -103,13 +104,13 @@ impl DurableExecution for MainJobHandler { let webhook_job = WebhookJobPayload { url: "https://api.example.com/webhook".to_string(), payload: serde_json::to_string(d.result).unwrap(), - parent_job_id: job.data.id_to_check.clone(), + parent_job_id: job.job.data.id_to_check.clone(), }; // Use the type-safe API! let mut webhook_builder = self.webhook_queue.clone().job(webhook_job); - webhook_builder.options.id = format!("{}_webhook", job.data.id_to_check); + webhook_builder.options.id = format!("{}_webhook", job.job.data.id_to_check); if let Err(e) = tx.queue_job(webhook_builder) { tracing::error!("Failed to queue webhook job: {:?}", e); @@ -124,9 +125,9 @@ impl DurableExecution for WebhookJobHandler { type ErrorData = TestJobErrorData; type JobData = WebhookJobPayload; - async fn process(&self, job: &Job) -> JobResult { - println!("WEBHOOK_JOB: Sending webhook to: {}", job.data.url); - println!("WEBHOOK_JOB: Payload: {}", job.data.payload); + async fn process(&self, job: &BorrowedJob) -> JobResult { + println!("WEBHOOK_JOB: Sending webhook to: {}", job.job.data.url); + println!("WEBHOOK_JOB: Payload: {}", job.job.data.payload); tokio::time::sleep(Duration::from_millis(25)).await; WEBHOOK_JOB_PROCESSED.store(true, Ordering::SeqCst); @@ -140,13 +141,13 @@ impl DurableExecution for WebhookJobHandler { async fn on_success( &self, - job: &Job, + job: &BorrowedJob, _d: SuccessHookData<'_, Self::Output>, _tx: &mut TransactionContext<'_>, ) { tracing::info!( "WEBHOOK_JOB: Webhook delivered successfully for parent: {}", - job.data.parent_job_id + job.job.data.parent_job_id ); } } diff --git a/twmq/tests/delay.rs b/twmq/tests/delay.rs index 51ee130..c8d12f4 100644 --- a/twmq/tests/delay.rs +++ b/twmq/tests/delay.rs @@ -13,7 +13,7 @@ use fixtures::TestJobErrorData; use twmq::{ DurableExecution, Queue, SuccessHookData, hooks::TransactionContext, - job::{DelayOptions, Job, JobResult, JobStatus, RequeuePosition}, + job::{BorrowedJob, DelayOptions, JobResult, JobStatus, RequeuePosition}, queue::QueueOptions, redis::aio::ConnectionManager, }; @@ -60,41 +60,41 @@ impl DurableExecution for DelayTestJobHandler { type ErrorData = TestJobErrorData; type JobData = DelayTestJobData; - async fn process(&self, job: &Job) -> JobResult { + async fn process(&self, job: &BorrowedJob) -> JobResult { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs(); - let actual_delay = now - job.created_at; + let actual_delay = now - job.job.created_at; tracing::info!( "DELAY_JOB: Processing job {}, expected delay: {}s, actual delay: {}s", - job.id, - job.data.expected_delay_seconds, + job.job.id, + job.job.data.expected_delay_seconds, actual_delay ); Ok(DelayTestOutput { actual_delay_seconds: actual_delay, - message: format!("Job {} processed after {}s delay", job.id, actual_delay), - test_id: job.data.test_id.clone(), + message: format!("Job {} processed after {}s delay", job.job.id, actual_delay), + test_id: job.job.data.test_id.clone(), }) } async fn on_success( &self, - job: &Job, + job: &BorrowedJob, d: SuccessHookData<'_, Self::Output>, tx: &mut TransactionContext<'_>, ) { tracing::info!("DELAY_JOB: on_success hook - {}", d.result.message); // Store processing order in Redis using test-specific key - let order_key = format!("test:{}:processing_order", job.data.test_id); + let order_key = format!("test:{}:processing_order", job.job.data.test_id); // Use pipeline to add to processing order list - tx.pipeline().rpush(&order_key, &job.id); + tx.pipeline().rpush(&order_key, &job.job.id); tx.pipeline().expire(&order_key, 300); // Expire after 5 minutes } } diff --git a/twmq/tests/fixtures.rs b/twmq/tests/fixtures.rs index 8ff94b5..5e26c8a 100644 --- a/twmq/tests/fixtures.rs +++ b/twmq/tests/fixtures.rs @@ -7,8 +7,8 @@ use serde::{Deserialize, Serialize}; use twmq::error::TwmqError; use twmq::hooks::TransactionContext; -use twmq::job::{Job, JobResult}; -use twmq::{DurableExecution, SuccessHookData}; +use twmq::job::{BorrowedJob, JobResult}; +use twmq::{DurableExecution, SuccessHookData, UserCancellable}; // --- Test Job Definition --- @@ -36,6 +36,14 @@ impl From for TestJobErrorData { } } +impl UserCancellable for TestJobErrorData { + fn user_cancelled() -> Self { + TestJobErrorData { + reason: "Transaction cancelled by user".to_string(), + } + } +} + // Use a static AtomicBool to signal from the job process to the test // In a real scenario, you'd check queue state or results in Redis. pub static TEST_JOB_PROCESSED_SUCCESSFULLY: AtomicBool = AtomicBool::new(false); @@ -49,28 +57,28 @@ impl DurableExecution for TestJobHandler { // If not using async_trait, the signature is: // fn process(&self) -> impl std::future::Future> + Send + Sync { - async fn process(&self, job: &Job) -> JobResult { + async fn process(&self, job: &BorrowedJob) -> JobResult { println!( "TEST_JOB: Processing job with id_to_check: {}", - job.data.id_to_check + job.job.data.id_to_check ); // Simulate some work tokio::time::sleep(Duration::from_millis(50)).await; TEST_JOB_PROCESSED_SUCCESSFULLY.store(true, Ordering::SeqCst); Ok(TestJobOutput { - reply: format!("Successfully processed '{}'", job.data.message), + reply: format!("Successfully processed '{}'", job.job.data.message), }) } async fn on_success( &self, - job: &Job, + job: &BorrowedJob, _d: SuccessHookData<'_, Self::Output>, _tx: &mut TransactionContext<'_>, ) { tracing::info!( "TEST_JOB: on_success hook for id_to_check: {}", - job.data.id_to_check + job.job.data.id_to_check ); } } diff --git a/twmq/tests/lease_expiry.rs b/twmq/tests/lease_expiry.rs index 7167431..b2ece8a 100644 --- a/twmq/tests/lease_expiry.rs +++ b/twmq/tests/lease_expiry.rs @@ -10,7 +10,7 @@ use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitEx use twmq::{ DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, hooks::TransactionContext, - job::{Job, JobResult, JobStatus}, + job::{BorrowedJob, JobResult, JobStatus}, queue::QueueOptions, redis::aio::ConnectionManager, }; @@ -65,11 +65,11 @@ impl DurableExecution for SleepForeverHandler { type ErrorData = TestJobErrorData; type JobData = SleepForeverJobData; - async fn process(&self, job: &Job) -> JobResult { + async fn process(&self, job: &BorrowedJob) -> JobResult { tracing::info!( "SLEEP_JOB: Starting to process job {}, attempt {}", - job.id, - job.attempts + job.job.id, + job.job.attempts ); // Signal that we started processing @@ -80,16 +80,16 @@ impl DurableExecution for SleepForeverHandler { tokio::time::sleep(Duration::from_millis(100)).await; } - tracing::info!("SLEEP_JOB: Job {} woke up, finishing", job.id); + tracing::info!("SLEEP_JOB: Job {} woke up, finishing", job.job.id); Ok(SleepJobOutput { - message: format!("Job {} completed after sleeping", job.id), + message: format!("Job {} completed after sleeping", job.job.id), }) } async fn on_success( &self, - _job: &Job, + _job: &BorrowedJob, d: SuccessHookData<'_, Self::Output>, _tx: &mut TransactionContext<'_>, ) { @@ -98,7 +98,7 @@ impl DurableExecution for SleepForeverHandler { async fn on_nack( &self, - _job: &Job, + _job: &BorrowedJob, d: NackHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { @@ -110,7 +110,7 @@ impl DurableExecution for SleepForeverHandler { async fn on_fail( &self, - _job: &Job, + _job: &BorrowedJob, d: FailHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { diff --git a/twmq/tests/nack.rs b/twmq/tests/nack.rs index 1148462..f75dc72 100644 --- a/twmq/tests/nack.rs +++ b/twmq/tests/nack.rs @@ -13,7 +13,7 @@ use tracing_subscriber::{EnvFilter, layer::SubscriberExt, util::SubscriberInitEx use twmq::{ DurableExecution, FailHookData, NackHookData, Queue, SuccessHookData, hooks::TransactionContext, - job::{Job, JobError, JobResult, JobStatus, RequeuePosition}, + job::{BorrowedJob, JobError, JobResult, JobStatus, RequeuePosition}, queue::QueueOptions, redis::aio::ConnectionManager, }; @@ -63,30 +63,33 @@ impl DurableExecution for RetryJobHandler { type ErrorData = TestJobErrorData; type JobData = RetryJobPayload; - async fn process(&self, job: &Job) -> JobResult { - let current_attempt = job.attempts; + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { + let current_attempt = job.job.attempts; tracing::info!( "RETRY_JOB: Processing job {}, attempt {}/{}", - job.id, + job.job.id, current_attempt, - job.data.desired_attempts + job.job.data.desired_attempts ); - if current_attempt < job.data.desired_attempts { + if current_attempt < job.job.data.desired_attempts { // Not enough attempts yet, nack it tracing::info!( "RETRY_JOB: Nacking job {} (attempt {}/{})", - job.id, + job.job.id, current_attempt, - job.data.desired_attempts + job.job.data.desired_attempts ); Err(JobError::Nack { error: TestJobErrorData { reason: format!( "Need {} attempts, only at {}", - job.data.desired_attempts, current_attempt + job.job.data.desired_attempts, current_attempt ), }, delay: None, @@ -97,7 +100,7 @@ impl DurableExecution for RetryJobHandler { tracing::info!( "RETRY_JOB: Success on attempt {}/{}", current_attempt, - job.data.desired_attempts + job.job.data.desired_attempts ); RETRY_JOB_FINAL_SUCCESS.store(true, Ordering::SeqCst); @@ -111,7 +114,7 @@ impl DurableExecution for RetryJobHandler { async fn on_success( &self, - _job: &Job, + _job: &BorrowedJob, d: SuccessHookData<'_, Self::Output>, _tx: &mut TransactionContext<'_>, ) { @@ -123,13 +126,13 @@ impl DurableExecution for RetryJobHandler { async fn on_nack( &self, - job: &Job, + job: &BorrowedJob, d: NackHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { tracing::info!( "RETRY_JOB: on_nack hook - attempt {} failed: {}", - job.attempts, + job.job.attempts, d.error.reason ); if let Some(delay_duration) = d.delay { @@ -139,13 +142,13 @@ impl DurableExecution for RetryJobHandler { async fn on_fail( &self, - job: &Job, + job: &BorrowedJob, _d: FailHookData<'_, Self::ErrorData>, _tx: &mut TransactionContext<'_>, ) { tracing::error!( "RETRY_JOB: on_fail hook - permanently failed at attempt {}", - job.attempts + job.job.attempts ); } From ab83a2234d70eca04d40efc2bdb20b432e6653e0 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Wed, 11 Jun 2025 04:18:56 +0530 Subject: [PATCH 4/7] fix throughput benchmark --- twmq/benches/throughput.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/twmq/benches/throughput.rs b/twmq/benches/throughput.rs index 67cf19f..67fd4d0 100644 --- a/twmq/benches/throughput.rs +++ b/twmq/benches/throughput.rs @@ -10,6 +10,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::runtime::Runtime; use twmq::error::TwmqError; use twmq::job::JobError; +use twmq::{BorrowedJob, UserCancellable}; use twmq::{ DurableExecution, Queue, @@ -44,6 +45,14 @@ impl From for BenchmarkErrorData { } } +impl UserCancellable for BenchmarkErrorData { + fn user_cancelled() -> Self { + BenchmarkErrorData { + reason: "Transaction cancelled by user".to_string(), + } + } +} + // Shared metrics across all benchmark jobs #[derive(Clone)] pub struct BenchmarkMetrics { @@ -100,7 +109,10 @@ impl DurableExecution for BenchmarkJobHandler { type ErrorData = BenchmarkErrorData; type JobData = BenchmarkJobData; - async fn process(&self, job: &Job) -> JobResult { + async fn process( + &self, + job: &BorrowedJob, + ) -> JobResult { let start_time = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() @@ -119,7 +131,7 @@ impl DurableExecution for BenchmarkJobHandler { .fetch_add(processing_time, Ordering::SeqCst); // Fresh random decision each processing attempt - if rand::thread_rng().gen_bool(job.data.nack_probability) { + if rand::thread_rng().gen_bool(job.job.data.nack_probability) { self.metrics.jobs_nacked.fetch_add(1, Ordering::SeqCst); // Random position for nacks as requested @@ -140,7 +152,7 @@ impl DurableExecution for BenchmarkJobHandler { self.metrics.jobs_succeeded.fetch_add(1, Ordering::SeqCst); Ok(BenchmarkOutput { - job_id: job.id.clone(), + job_id: job.id().to_string(), processed_at: end_time, }) } From ed8f5490300704fc96e1290b904fcf184c8dacc3 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 16 Jun 2025 04:38:26 +0530 Subject: [PATCH 5/7] wip smart account signatures --- aa-core/src/account_factory/chained.rs | 14 + aa-core/src/account_factory/default.rs | 5 + aa-core/src/account_factory/mod.rs | 9 + aa-core/src/lib.rs | 1 + aa-core/src/signer.rs | 386 ++++++++++++++++++++++ aa-core/src/smart_account/mod.rs | 1 + core/src/execution_options/aa.rs | 2 +- core/src/lib.rs | 1 + core/src/signer.rs | 184 +++++++++++ server/src/http/routes/mod.rs | 2 + server/src/http/routes/sign_message.rs | 249 ++++++++++++++ server/src/http/routes/sign_typed_data.rs | 206 ++++++++++++ server/src/http/server.rs | 9 +- server/src/main.rs | 6 +- 14 files changed, 1069 insertions(+), 6 deletions(-) create mode 100644 aa-core/src/signer.rs create mode 100644 core/src/signer.rs create mode 100644 server/src/http/routes/sign_message.rs create mode 100644 server/src/http/routes/sign_typed_data.rs diff --git a/aa-core/src/account_factory/chained.rs b/aa-core/src/account_factory/chained.rs index 07edc1b..749658b 100644 --- a/aa-core/src/account_factory/chained.rs +++ b/aa-core/src/account_factory/chained.rs @@ -18,6 +18,7 @@ sol! { #[sol(rpc)] contract AccountFactoryContract { function getAddress(address _adminSigner, bytes _data) view returns (address); + function accountImplementation() external view returns (address); } } @@ -42,4 +43,17 @@ impl AccountFactory for ChainedAccountFactory<'_, C> { Ok(predicted_address) } + + async fn implementation_address(&self) -> Result { + let account_factory_contract = + AccountFactoryContract::new(self.factory_address, self.chain.provider().clone()); + + let predicted_address = account_factory_contract + .accountImplementation() + .call() + .await + .map_err(|e| e.to_engine_error(self.chain.chain_id(), Some(self.factory_address)))?; + + Ok(predicted_address) + } } diff --git a/aa-core/src/account_factory/default.rs b/aa-core/src/account_factory/default.rs index f100dcd..98e7c8c 100644 --- a/aa-core/src/account_factory/default.rs +++ b/aa-core/src/account_factory/default.rs @@ -66,6 +66,11 @@ impl AccountFactory for DefaultAccountFactory { let address = SyncAccountFactory::predict_address_sync(self, signer, salt_data); std::future::ready(Ok(address)) } + + fn implementation_address(&self) -> impl Future> { + // Use the sync implementation but return as a ready future + std::future::ready(Ok(self.implementation_address)) + } } #[cfg(test)] diff --git a/aa-core/src/account_factory/mod.rs b/aa-core/src/account_factory/mod.rs index 1709f7e..153978f 100644 --- a/aa-core/src/account_factory/mod.rs +++ b/aa-core/src/account_factory/mod.rs @@ -42,6 +42,8 @@ pub trait AccountFactory { } .abi_encode() } + + fn implementation_address(&self) -> impl Future> + Send; } /// A factory that can use either implementation based on the provided addresses @@ -69,6 +71,13 @@ impl<'a, C: Chain> AccountFactory for SmartAccountFactory<'a, C> { Self::Chained(factory) => factory.predict_address(signer, salt_data).await, } } + + async fn implementation_address(&self) -> Result { + match self { + Self::Default(factory) => factory.implementation_address().await, + Self::Chained(factory) => factory.implementation_address().await, + } + } } /// Get the appropriate account factory based on the factory address diff --git a/aa-core/src/lib.rs b/aa-core/src/lib.rs index 547df5c..1523de2 100644 --- a/aa-core/src/lib.rs +++ b/aa-core/src/lib.rs @@ -1,3 +1,4 @@ pub mod account_factory; +pub mod signer; pub mod smart_account; pub mod userop; diff --git a/aa-core/src/signer.rs b/aa-core/src/signer.rs new file mode 100644 index 0000000..55db094 --- /dev/null +++ b/aa-core/src/signer.rs @@ -0,0 +1,386 @@ +use alloy::{ + dyn_abi::TypedData, + hex::FromHex, + primitives::{Address, B256, Bytes, hex, keccak256}, + sol, + sol_types::{SolCall, SolStruct, SolValue, eip712_domain}, +}; +use engine_core::{ + chain::Chain, + credentials::SigningCredential, + error::EngineError, + signer::{AccountSigner, EoaSigner, EoaSigningOptions, SmartAccountSigningOptions}, +}; +use vault_types::enclave::encrypted::eoa::MessageFormat; + +use crate::{ + account_factory::{AccountFactory, get_account_factory}, + smart_account::{DeterminedSmartAccount, SmartAccount, SmartAccountFromSalt}, +}; + +sol! { + #[sol(rpc)] + contract ERC1271Contract { + function isValidSignature(bytes32 hash, bytes signature) external view returns (bytes4 magicValue); + } +} + +sol! { + #[sol(rpc)] + contract AccountImplementationContract { + function getMessageHash(bytes32 _hash) external view returns (bytes32); + } +} + +sol! { + struct AccountMessage { + bytes message; + } +} + +/// ERC-6492 magic suffix +const ERC6492_MAGIC_SUFFIX: [u8; 32] = + hex!("6492649264926492649264926492649264926492649264926492649264926492"); + +/// Builder for creating SmartAccountSigner with computed address and factory pattern detection +pub struct SmartAccountSignerBuilder { + eoa_signer: EoaSigner, + credentials: SigningCredential, + options: SmartAccountSigningOptions, + chain: C, +} + +impl SmartAccountSignerBuilder { + pub fn new( + eoa_signer: EoaSigner, + credentials: SigningCredential, + options: SmartAccountSigningOptions, + chain: C, + ) -> Self { + Self { + eoa_signer, + credentials, + options, + chain, + } + } + + /// Build the signer with computed address and factory pattern detection + pub async fn build(self) -> Result, EngineError> { + // 1. Parse Account Salt + let salt_data = if self.options.account_salt.starts_with("0x") { + Bytes::from_hex(self.options.account_salt.clone()).map_err(|e| { + EngineError::ValidationError { + message: format!("Failed to parse hex salt: {}", e), + } + })? + } else { + let hex_string = hex::encode(self.options.account_salt.clone()); + Bytes::from_hex(hex_string).map_err(|e| EngineError::ValidationError { + message: format!("Failed to encode salt as hex: {}", e), + })? + }; + + // 2. Determine Smart Account + let smart_account = match self.options.smart_account_address { + Some(address) => DeterminedSmartAccount { address }, + None => SmartAccountFromSalt { + admin_address: self.options.signer_address, + chain: &self.chain, + factory_address: self.options.entrypoint_details.factory_address, + salt_data: &salt_data, + } + .to_determined_smart_account() + .await + .map_err(|e| EngineError::ValidationError { + message: format!("Failed to determine smart account: {}", e), + })?, + }; + + let factory = get_account_factory( + &self.chain, + self.options.entrypoint_details.factory_address, + None, + ); + let init_calldata = factory.init_calldata(self.options.signer_address, salt_data); + let impl_address = factory.implementation_address().await?; + + // Check if factory supports 712 pattern + let supports_712_factory = self + .check_712_factory_support(impl_address) + .await + .unwrap_or(false); + + Ok(SmartAccountSigner { + options: self.options, + chain: self.chain, + eoa_signer: self.eoa_signer, + credentials: self.credentials, + smart_account, + supports_712_factory, + init_calldata, + }) + } + + async fn check_712_factory_support(&self, impl_address: Address) -> Result { + let impl_contract = + AccountImplementationContract::new(impl_address, self.chain.provider().clone()); + + // Test with a dummy hash + let dummy_hash = B256::ZERO; + + match impl_contract.getMessageHash(dummy_hash).call().await { + Ok(response) => Ok(response != B256::ZERO), + Err(_) => Ok(false), + } + } +} + +/// Smart Account signer with pre-computed address and factory pattern support +#[derive(Clone)] +pub struct SmartAccountSigner { + options: SmartAccountSigningOptions, + credentials: SigningCredential, + chain: C, + eoa_signer: EoaSigner, + smart_account: DeterminedSmartAccount, + init_calldata: Vec, + supports_712_factory: bool, +} + +impl SmartAccountSigner { + /// Sign message with 712 factory wrapping if supported + async fn sign_message_with_factory_pattern( + &self, + message: &str, + format: MessageFormat, + ) -> Result { + if self.supports_712_factory { + // Wrap message in EIP-712 domain for 712 factory pattern + let message_hash = self.hash_message(message, format); + self.sign_712_wrapped_hash(message_hash).await + } else { + // Direct EOA signing + self.eoa_signer + .sign_message( + EoaSigningOptions { + chain_id: Some(self.chain.chain_id()), + from: self.options.signer_address, + }, + message, + format, + self.credentials.clone(), + ) + .await + } + } + + /// Sign typed data with 712 factory wrapping if supported + async fn sign_typed_data_with_factory_pattern( + &self, + typed_data: &TypedData, + ) -> Result { + // Check if self-verifying contract (e.g., session key operations) + let is_self_verifying = typed_data + .domain + .verifying_contract + .map(|addr| addr == self.smart_account.address) + .unwrap_or(false); + + if is_self_verifying { + // Direct EOA signing for self-verifying contracts + return self + .eoa_signer + .sign_typed_data( + EoaSigningOptions { + chain_id: Some(self.chain.chain_id()), + from: self.options.signer_address, + }, + typed_data, + self.credentials.clone(), + ) + .await; + } + + if self.supports_712_factory { + // Wrap typed data hash in EIP-712 domain for 712 factory pattern + let typed_data_hash = + typed_data + .eip712_signing_hash() + .map_err(|_e| EngineError::ValidationError { + message: "Failed to compute typed data hash".to_string(), + })?; + self.sign_712_wrapped_hash(typed_data_hash).await + } else { + // Direct EOA signing + self.eoa_signer + .sign_typed_data( + EoaSigningOptions { + chain_id: Some(self.chain.chain_id()), + from: self.options.signer_address, + }, + typed_data, + self.credentials.clone(), + ) + .await + } + } + + /// Sign hash wrapped in AccountMessage EIP-712 structure for 712 factory pattern + async fn sign_712_wrapped_hash(&self, hash: B256) -> Result { + let domain = eip712_domain! { + name: "Account", + version: "1", + chain_id: self.options.chain_id, + verifying_contract: self.smart_account.address, + }; + + let account_message = AccountMessage { + message: hash.abi_encode().into(), + }; + + // Get the EIP712 signing hash using alloy's native functionality + let signing_hash = account_message.eip712_signing_hash(&domain); + + // Sign the hash directly with EOA + self.eoa_signer + .sign_message( + EoaSigningOptions { + chain_id: Some(self.chain.chain_id()), + from: self.options.signer_address, + }, + &format!("0x{}", hex::encode(signing_hash)), + MessageFormat::Hex, + self.credentials.clone(), + ) + .await + } + + /// Verify ERC-1271 signature + async fn verify_erc1271(&self, hash: B256, signature: &str) -> Result { + let signature_bytes = hex::decode(signature.strip_prefix("0x").unwrap_or(signature)) + .map_err(|_| EngineError::ValidationError { + message: "Invalid signature hex".to_string(), + })?; + + let contract = + ERC1271Contract::new(self.smart_account.address, self.chain.provider().clone()); + + match contract + .isValidSignature(hash, signature_bytes.into()) + .call() + .await + { + Ok(response) => { + let expected_magic = ERC1271Contract::isValidSignatureCall::SELECTOR; + Ok(response.as_slice() == expected_magic) + } + Err(_) => Ok(false), + } + } + + /// Create ERC-6492 signature for undeployed accounts + async fn create_erc6492_signature(&self, signature: &str) -> Result { + let signature_bytes = hex::decode(signature.strip_prefix("0x").unwrap_or(signature)) + .map_err(|_| EngineError::ValidationError { + message: "Invalid signature hex".to_string(), + })?; + + let mut output_buffer = Vec::new(); + + // Factory address (20 bytes) + output_buffer.extend_from_slice(self.options.entrypoint_details.factory_address.as_slice()); + + // Factory calldata length (32 bytes) + calldata + let init_code_len = alloy::primitives::U256::from(self.init_calldata.len()); + output_buffer.extend_from_slice(&init_code_len.to_be_bytes::<32>()); + output_buffer.extend_from_slice(&self.init_calldata); + + // Signature length (32 bytes) + signature + let sig_len = alloy::primitives::U256::from(signature_bytes.len()); + output_buffer.extend_from_slice(&sig_len.to_be_bytes::<32>()); + output_buffer.extend_from_slice(&signature_bytes); + + // Magic suffix + output_buffer.extend_from_slice(&ERC6492_MAGIC_SUFFIX); + + Ok(format!("0x{}", hex::encode(output_buffer))) + } + + /// Hash message according to format + fn hash_message(&self, message: &str, format: MessageFormat) -> B256 { + match format { + MessageFormat::Text => { + let prefixed = + format!("\x19Ethereum Signed Message:\n{}{}", message.len(), message); + keccak256(prefixed.as_bytes()) + } + MessageFormat::Hex => { + let bytes = hex::decode(message.strip_prefix("0x").unwrap_or(message)) + .unwrap_or_else(|_| message.as_bytes().to_vec()); + keccak256(bytes) + } + } + } + + pub async fn sign_message( + &self, + message: &str, + format: MessageFormat, + ) -> Result { + let is_deployed = self.smart_account.is_deployed(&self.chain).await?; + + // Get signature with appropriate factory pattern handling + let signature = self + .sign_message_with_factory_pattern(message, format) + .await?; + + if is_deployed { + // Verify ERC-1271 signature for deployed accounts + let message_hash = self.hash_message(message, format); + let is_valid = self.verify_erc1271(message_hash, &signature).await?; + + if is_valid { + Ok(signature) + } else { + Err(EngineError::ValidationError { + message: "ERC-1271 signature validation failed".to_string(), + }) + } + } else { + // Create ERC-6492 signature for undeployed accounts + self.create_erc6492_signature(&signature).await + } + } + + pub async fn sign_typed_data(&self, typed_data: &TypedData) -> Result { + let is_deployed = self.smart_account.is_deployed(&self.chain).await?; + + // Get signature with appropriate factory pattern handling + let signature = self + .sign_typed_data_with_factory_pattern(typed_data) + .await?; + + if is_deployed { + // Verify ERC-1271 signature for deployed accounts + let typed_data_hash = + typed_data + .eip712_signing_hash() + .map_err(|_e| EngineError::ValidationError { + message: "Failed to compute typed data hash".to_string(), + })?; + let is_valid = self.verify_erc1271(typed_data_hash, &signature).await?; + + if is_valid { + Ok(signature) + } else { + Err(EngineError::ValidationError { + message: "ERC-1271 signature validation failed".to_string(), + }) + } + } else { + // Create ERC-6492 signature for undeployed accounts + self.create_erc6492_signature(&signature).await + } + } +} diff --git a/aa-core/src/smart_account/mod.rs b/aa-core/src/smart_account/mod.rs index 7392a9f..941f42b 100644 --- a/aa-core/src/smart_account/mod.rs +++ b/aa-core/src/smart_account/mod.rs @@ -65,6 +65,7 @@ pub struct SmartAccountFromSalt<'a, C: Chain> { pub chain: &'a C, } +#[derive(Clone, Debug)] pub struct DeterminedSmartAccount { pub address: Address, } diff --git a/core/src/execution_options/aa.rs b/core/src/execution_options/aa.rs index 57746a8..56a8558 100644 --- a/core/src/execution_options/aa.rs +++ b/core/src/execution_options/aa.rs @@ -95,7 +95,7 @@ pub fn default_account_salt() -> String { "0x".to_string() } #[derive(Deserialize, JsonSchema, utoipa::ToSchema)] -struct EntrypointAndFactoryDetailsDeserHelper { +pub struct EntrypointAndFactoryDetailsDeserHelper { /// ### Entrypoint Contract Address /// The address of the ERC-4337 entrypoint contract. /// diff --git a/core/src/lib.rs b/core/src/lib.rs index 75d809d..043844b 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -5,5 +5,6 @@ pub mod defs; pub mod error; pub mod execution_options; pub mod rpc_clients; +pub mod signer; pub mod transaction; pub mod userop; diff --git a/core/src/signer.rs b/core/src/signer.rs new file mode 100644 index 0000000..19ce85c --- /dev/null +++ b/core/src/signer.rs @@ -0,0 +1,184 @@ +use std::option; + +use alloy::{ + dyn_abi::TypedData, + primitives::{Address, ChainId}, +}; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use vault_sdk::VaultClient; +use vault_types::enclave::encrypted::eoa::MessageFormat; + +use crate::{ + chain::Chain, + credentials::{self, SigningCredential}, + defs::AddressDef, + error::EngineError, + execution_options::aa::{EntrypointAndFactoryDetails, EntrypointAndFactoryDetailsDeserHelper}, +}; + +/// EOA signing options +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct EoaSigningOptions { + /// The EOA address to sign with + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub from: Address, + /// Optional chain ID for the signature + #[serde(skip_serializing_if = "Option::is_none")] + pub chain_id: Option, +} + +/// Smart Account signing options +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SmartAccountSigningOptions { + /// The smart account address (if deployed) + #[schemars(with = "Option")] + #[schema(value_type = Option)] + #[serde(skip_serializing_if = "Option::is_none")] + pub smart_account_address: Option
, + + /// The EOA that controls the smart account + #[schemars(with = "AddressDef")] + #[schema(value_type = AddressDef)] + pub signer_address: Address, + + /// Entrypoint and factory configuration + #[serde(flatten)] + #[schemars(with = "EntrypointAndFactoryDetailsDeserHelper")] + #[schema(value_type = EntrypointAndFactoryDetailsDeserHelper)] + pub entrypoint_details: EntrypointAndFactoryDetails, + + /// Account salt for deterministic addresses + #[serde(default = "default_account_salt")] + pub account_salt: String, + + /// Chain ID for smart account operations + pub chain_id: ChainId, +} + +/// Configuration options for signing operations +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(tag = "type", rename_all = "camelCase")] +pub enum SigningOptions { + /// Standard EOA (Externally Owned Account) signing + #[serde(rename = "eoa")] + Eoa(EoaSigningOptions), + /// Smart Account signing with advanced signature patterns + #[serde(rename = "smart_account")] + SmartAccount(SmartAccountSigningOptions), +} + +/// Account signer trait using impl Future pattern like TWMQ +pub trait AccountSigner { + type SigningOptions; + /// Sign a message + fn sign_message( + &self, + options: Self::SigningOptions, + message: &str, + format: MessageFormat, + credentials: SigningCredential, + ) -> impl std::future::Future> + Send; + + /// Sign typed data + fn sign_typed_data( + &self, + options: Self::SigningOptions, + typed_data: &TypedData, + credentials: SigningCredential, + ) -> impl std::future::Future> + Send; +} + +/// EOA signer implementation +#[derive(Clone)] +pub struct EoaSigner { + pub vault_client: VaultClient, +} + +impl EoaSigner { + /// Create a new EOA signer + pub fn new(vault_client: VaultClient) -> Self { + Self { vault_client } + } +} + +impl AccountSigner for EoaSigner { + type SigningOptions = EoaSigningOptions; + + async fn sign_message( + &self, + options: EoaSigningOptions, + message: &str, + format: MessageFormat, + credentials: SigningCredential, + ) -> Result { + match credentials { + SigningCredential::Vault(auth_method) => { + let vault_result = self + .vault_client + .sign_message( + auth_method.clone(), + message.to_string(), + options.from, + options.chain_id, + Some(format), + ) + .await + .map_err(|e| { + tracing::error!("Error signing message with EOA: {:?}", e); + EngineError::VaultError { + message: e.to_string(), + } + })?; + + Ok(vault_result.signature) + } + } + } + + async fn sign_typed_data( + &self, + options: EoaSigningOptions, + typed_data: &TypedData, + credentials: SigningCredential, + ) -> Result { + match &credentials { + SigningCredential::Vault(auth_method) => { + let vault_result = self + .vault_client + .sign_typed_data(auth_method.clone(), typed_data.clone(), options.from) + .await + .map_err(|e| { + tracing::error!("Error signing typed data with EOA: {:?}", e); + EngineError::VaultError { + message: e.to_string(), + } + })?; + + Ok(vault_result.signature) + } + } + } +} + +/// Parameters for signing a message (used in routes) +pub struct MessageSignerParams { + pub credentials: SigningCredential, + pub message: String, + pub format: MessageFormat, + pub signing_options: SigningOptions, +} + +/// Parameters for signing typed data (used in routes) +pub struct TypedDataSignerParams { + pub credentials: SigningCredential, + pub typed_data: TypedData, + pub signing_options: SigningOptions, +} + +fn default_account_salt() -> String { + "0x".to_string() +} diff --git a/server/src/http/routes/mod.rs b/server/src/http/routes/mod.rs index a3efece..6c674db 100644 --- a/server/src/http/routes/mod.rs +++ b/server/src/http/routes/mod.rs @@ -2,5 +2,7 @@ pub mod contract_encode; pub mod contract_read; pub mod contract_write; +pub mod sign_message; +pub mod sign_typed_data; pub mod transaction; pub mod transaction_write; diff --git a/server/src/http/routes/sign_message.rs b/server/src/http/routes/sign_message.rs new file mode 100644 index 0000000..8adf7bc --- /dev/null +++ b/server/src/http/routes/sign_message.rs @@ -0,0 +1,249 @@ +// Sign Message Operations + +use alloy::primitives::{Address, ChainId}; +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json}, +}; +use engine_core::{ + error::EngineError, + signer::{EoaSigner, SigningOptions, SmartAccountSigningOptions}, + credentials::SigningCredential, +}; +use engine_aa_core::signer::{SmartAccountSigner, SmartAccountSignerBuilder}; +use futures::future::join_all; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use thirdweb_core::auth::ThirdwebAuth; +use vault_types::enclave::encrypted::eoa::MessageFormat; + +use crate::http::{ + error::ApiEngineError, + extractors::{EngineJson, SigningCredentialsExtractor}, + server::EngineServerState, + types::ErrorResponse, +}; + +// ===== REQUEST/RESPONSE TYPES ===== + +/// Options for signing messages +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignOptions { + /// Configuration options for signing + #[serde(flatten)] + pub signing_options: SigningOptions, +} + +/// Individual message to sign +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct MessageInput { + /// The message to sign + pub message: String, + /// Message format (text or hex) + #[serde(default = "default_message_format")] + pub format: MessageFormat, +} + +/// Request to sign messages +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignMessageRequest { + /// Configuration options for signing + pub sign_options: SignOptions, + /// List of messages to sign + pub params: Vec, +} + +/// Result of a single message signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(untagged)] +pub enum SignResultItem { + Success(SignResultSuccessItem), + Failure(SignResultFailureItem), +} + +/// Successful result from a message signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignResultSuccessItem { + /// Always true for successful operations + #[schemars(with = "bool")] + #[schema(value_type = bool)] + pub success: serde_bool::True, + /// The signing result data + pub result: SignResultData, +} + +/// Data returned from successful signing +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignResultData { + /// The resulting signature + pub signature: String, + /// The data that was signed (original message) + pub signed_data: String, +} + +/// Failed result from a message signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignResultFailureItem { + /// Always false for failed operations + #[schemars(with = "bool")] + #[schema(value_type = bool)] + pub success: serde_bool::False, + /// Detailed error information describing what went wrong + pub error: EngineError, +} + +/// Collection of results from multiple message signing operations +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignResults { + /// Array of results, one for each input message + pub results: Vec, +} + +/// Response from the sign message endpoint +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignMessageResponse { + /// Container for all message signing results + pub result: SignResults, +} + +// ===== CONVENIENCE CONSTRUCTORS ===== + +impl SignResultSuccessItem { + /// Create a new successful sign result + pub fn new(signature: String, signed_data: String) -> Self { + Self { + success: serde_bool::True, + result: SignResultData { + signature, + signed_data, + }, + } + } +} + +impl SignResultFailureItem { + /// Create a new failed sign result + pub fn new(error: EngineError) -> Self { + Self { + success: serde_bool::False, + error, + } + } +} + +impl SignResultItem { + /// Create a successful sign result item + pub fn success(signature: String, signed_data: String) -> Self { + SignResultItem::Success(SignResultSuccessItem::new(signature, signed_data)) + } + + /// Create a failed sign result item + pub fn failure(error: EngineError) -> Self { + SignResultItem::Failure(SignResultFailureItem::new(error)) + } +} + +// ===== ROUTE HANDLER ===== + +#[utoipa::path( + post, + operation_id = "signMessage", + path = "/sign/message", + tag = "Signature", + request_body(content = SignMessageRequest, description = "Sign message request", content_type = "application/json"), + responses( + (status = 200, description = "Successfully signed messages", body = SignMessageResponse, content_type = "application/json"), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + ("x-vault-access-token" = Option, Header, description = "Vault access token"), + ) +)] +/// Sign Message +/// +/// Sign messages using either EOA or Smart Account +pub async fn sign_message( + State(state): State, + SigningCredentialsExtractor(signing_credential): SigningCredentialsExtractor, + EngineJson(request): EngineJson, +) -> Result { + // Process all messages in parallel + let sign_futures = request.params.iter().map(|message_input| { + sign_single_message(&state, &signing_credential, &request.sign_options.signing_options, message_input) + }); + + let results: Vec = join_all(sign_futures).await; + + Ok(( + StatusCode::OK, + Json(SignMessageResponse { + result: SignResults { results }, + }), + )) +} + +// ===== HELPER FUNCTIONS ===== + +async fn sign_single_message( + state: &EngineServerState, + signing_credential: &SigningCredential, + signing_options: &SigningOptions, + message_input: &MessageInput, +) -> SignResultItem { + let result = match signing_options { + SigningOptions::Eoa(eoa_options) => { + // Direct EOA signing + state.eoa_signer + .sign_message( + eoa_options.clone(), + &message_input.message, + message_input.format, + signing_credential.clone(), + ) + .await + } + SigningOptions::SmartAccount(smart_account_options) => { + // Smart account signing via builder + match state.chains.get_chain(smart_account_options.chain_id) { + Ok(chain) => { + match SmartAccountSignerBuilder::new( + state.eoa_signer.clone(), + signing_credential.clone(), + smart_account_options.clone(), + chain, + ) + .build() + .await + { + Ok(smart_signer) => { + smart_signer + .sign_message(&message_input.message, message_input.format) + .await + } + Err(e) => Err(e), + } + } + Err(e) => Err(EngineError::ValidationError { + message: format!("Failed to get chain {}: {}", smart_account_options.chain_id, e), + }), + } + } + }; + + match result { + Ok(signature) => SignResultItem::success(signature, message_input.message.clone()), + Err(e) => SignResultItem::failure(e), + } +} + +fn default_message_format() -> MessageFormat { + MessageFormat::Text +} \ No newline at end of file diff --git a/server/src/http/routes/sign_typed_data.rs b/server/src/http/routes/sign_typed_data.rs new file mode 100644 index 0000000..28c3b78 --- /dev/null +++ b/server/src/http/routes/sign_typed_data.rs @@ -0,0 +1,206 @@ +// Sign Typed Data Operations + +use alloy::{dyn_abi::TypedData, primitives::Address}; +use axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json}, +}; +use engine_core::{ + error::EngineError, + signer::{EoaSigner, SigningOptions, SmartAccountSigningOptions}, + credentials::SigningCredential, +}; +use engine_aa_core::signer::{SmartAccountSigner, SmartAccountSignerBuilder}; +use futures::future::join_all; +use schemars::JsonSchema; +use serde::{Deserialize, Serialize}; +use thirdweb_core::auth::ThirdwebAuth; + +use crate::http::{ + error::ApiEngineError, + extractors::{EngineJson, SigningCredentialsExtractor}, + server::EngineServerState, + types::ErrorResponse, +}; + +// ===== REQUEST/RESPONSE TYPES ===== + +/// Options for signing typed data +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignOptions { + /// Configuration options for signing + #[serde(flatten)] + pub signing_options: SigningOptions, +} + +/// Request to sign typed data +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignTypedDataRequest { + /// Configuration options for signing + pub sign_options: SignOptions, + /// List of typed data to sign + pub params: Vec, +} + +/// Result of a single typed data signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(untagged)] +pub enum SignResultItem { + Success(SignResultSuccessItem), + Failure(SignResultFailureItem), +} + +/// Successful result from a typed data signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignResultSuccessItem { + /// Always true for successful operations + #[schemars(with = "bool")] + #[schema(value_type = bool)] + pub success: serde_bool::True, + /// The signing result data + pub result: SignResultData, +} + +/// Data returned from successful signing +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct SignResultData { + /// The resulting signature + pub signature: String, + /// The data that was signed (stringified typed data) + pub signed_data: String, +} + +/// Failed result from a typed data signing operation +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignResultFailureItem { + /// Always false for failed operations + #[schemars(with = "bool")] + #[schema(value_type = bool)] + pub success: serde_bool::False, + /// Detailed error information describing what went wrong + pub error: EngineError, +} + +/// Collection of results from multiple typed data signing operations +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignResults { + /// Array of results, one for each input typed data + pub results: Vec, +} + +/// Response from the sign typed data endpoint +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +pub struct SignTypedDataResponse { + /// Container for all typed data signing results + pub result: SignResults, +} + +// ===== CONVENIENCE CONSTRUCTORS ===== + +impl SignResultSuccessItem { + /// Create a new successful sign result + pub fn new(signature: String, signed_data: String) -> Self { + Self { + success: serde_bool::True, + result: SignResultData { + signature, + signed_data, + }, + } + } +} + +impl SignResultFailureItem { + /// Create a new failed sign result + pub fn new(error: EngineError) -> Self { + Self { + success: serde_bool::False, + error, + } + } +} + +impl SignResultItem { + /// Create a successful sign result item + pub fn success(signature: String, signed_data: String) -> Self { + SignResultItem::Success(SignResultSuccessItem::new(signature, signed_data)) + } + + /// Create a failed sign result item + pub fn failure(error: EngineError) -> Self { + SignResultItem::Failure(SignResultFailureItem::new(error)) + } +} + +// ===== ROUTE HANDLER ===== + +#[utoipa::path( + post, + operation_id = "signTypedData", + path = "/sign/typed-data", + tag = "Signature", + request_body(content = SignTypedDataRequest, description = "Sign typed data request", content_type = "application/json"), + responses( + (status = 200, description = "Successfully signed typed data", body = SignTypedDataResponse, content_type = "application/json"), + ), + params( + ("x-thirdweb-client-id" = Option, Header, description = "Thirdweb client ID, passed along with the service key"), + ("x-thirdweb-service-key" = Option, Header, description = "Thirdweb service key, passed when using the client ID"), + ("x-thirdweb-secret-key" = Option, Header, description = "Thirdweb secret key, passed standalone"), + ("x-vault-access-token" = Option, Header, description = "Vault access token"), + ) +)] +/// Sign Typed Data +/// +/// Sign EIP-712 typed data using either EOA or Smart Account +pub async fn sign_typed_data( + State(state): State, + SigningCredentialsExtractor(signing_credential): SigningCredentialsExtractor, + EngineJson(request): EngineJson, +) -> Result { + // Process all typed data in parallel + let sign_futures = request.params.iter().map(|typed_data| { + sign_single_typed_data(&state.userop_signer, &signing_credential, &request.sign_options.signing_options, typed_data) + }); + + let results: Vec = join_all(sign_futures).await; + + Ok(( + StatusCode::OK, + Json(SignTypedDataResponse { + result: SignResults { results }, + }), + )) +} + +// ===== HELPER FUNCTIONS ===== + +async fn sign_single_typed_data( + signer: &Signer, + signing_credential: &SigningCredential, + signing_options: &SigningOptions, + typed_data: &TypedData, +) -> SignResultItem { + let params = TypedDataSignerParams { + credentials: signing_credential.clone(), + typed_data: typed_data.clone(), + signing_options: signing_options.clone(), + }; + + let result = signer.sign_typed_data(params).await; + + match result { + Ok(signature) => { + // Convert typed data to JSON string for signed_data field + let signed_data = serde_json::to_string(typed_data) + .unwrap_or_else(|_| "Failed to serialize typed data".to_string()); + SignResultItem::success(signature, signed_data) + }, + Err(e) => SignResultItem::failure(e), + } +} \ No newline at end of file diff --git a/server/src/http/server.rs b/server/src/http/server.rs index bc12cd1..617b132 100644 --- a/server/src/http/server.rs +++ b/server/src/http/server.rs @@ -1,14 +1,16 @@ use std::sync::Arc; use axum::{Json, Router, routing::get}; -use engine_core::userop::UserOpSigner; +use engine_core::{signer::EoaSigner, userop::UserOpSigner}; use thirdweb_core::abi::ThirdwebAbiService; use tokio::{sync::watch, task::JoinHandle}; use utoipa::OpenApi; use utoipa_axum::{router::OpenApiRouter, routes}; use utoipa_scalar::{Scalar, Servable}; -use crate::{chains::ThirdwebChainService, execution_router::ExecutionRouter, queue::manager::QueueManager}; +use crate::{ + chains::ThirdwebChainService, execution_router::ExecutionRouter, queue::manager::QueueManager, +}; use tower_http::{ cors::{Any, CorsLayer}, trace::TraceLayer, @@ -17,7 +19,8 @@ use tower_http::{ #[derive(Clone)] pub struct EngineServerState { pub chains: Arc, - pub signer: Arc, + pub userop_signer: Arc, + pub eoa_signer: Arc, pub abi_service: Arc, pub execution_router: Arc, diff --git a/server/src/main.rs b/server/src/main.rs index 3438f41..c1bc715 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use engine_core::userop::UserOpSigner; +use engine_core::{signer::EoaSigner, userop::UserOpSigner}; use thirdweb_core::{abi::ThirdwebAbiServiceBuilder, auth::ThirdwebAuth}; use thirdweb_engine::{ chains::ThirdwebChainService, @@ -39,6 +39,7 @@ async fn main() -> anyhow::Result<()> { }); let signer = Arc::new(UserOpSigner { vault_client }); + let eoa_signer = Arc::new(EoaSigner { vault_client }); let queue_manager = QueueManager::new(&config.redis, &config.queue, chains.clone(), signer.clone()).await?; @@ -63,7 +64,8 @@ async fn main() -> anyhow::Result<()> { }; let mut server = EngineServer::new(EngineServerState { - signer: signer.clone(), + userop_signer: signer.clone(), + eoa_signer: eoa_signer.clone(), abi_service: Arc::new(abi_service), chains, execution_router: Arc::new(execution_router), From 70dd5e2df5274f9e9bc9df79b45d02b843e55b06 Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 23 Jun 2025 23:02:54 +0530 Subject: [PATCH 6/7] signing routes compiling --- aa-core/src/signer.rs | 8 +- core/src/signer.rs | 5 +- server/src/http/routes/sign_message.rs | 53 ++++++----- server/src/http/routes/sign_typed_data.rs | 107 ++++++++++++++++++---- server/src/http/server.rs | 4 + server/src/main.rs | 4 +- 6 files changed, 133 insertions(+), 48 deletions(-) diff --git a/aa-core/src/signer.rs b/aa-core/src/signer.rs index 55db094..a8e7650 100644 --- a/aa-core/src/signer.rs +++ b/aa-core/src/signer.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use alloy::{ dyn_abi::TypedData, hex::FromHex, @@ -44,7 +46,7 @@ const ERC6492_MAGIC_SUFFIX: [u8; 32] = /// Builder for creating SmartAccountSigner with computed address and factory pattern detection pub struct SmartAccountSignerBuilder { - eoa_signer: EoaSigner, + eoa_signer: Arc, credentials: SigningCredential, options: SmartAccountSigningOptions, chain: C, @@ -52,7 +54,7 @@ pub struct SmartAccountSignerBuilder { impl SmartAccountSignerBuilder { pub fn new( - eoa_signer: EoaSigner, + eoa_signer: Arc, credentials: SigningCredential, options: SmartAccountSigningOptions, chain: C, @@ -142,7 +144,7 @@ pub struct SmartAccountSigner { options: SmartAccountSigningOptions, credentials: SigningCredential, chain: C, - eoa_signer: EoaSigner, + eoa_signer: Arc, smart_account: DeterminedSmartAccount, init_calldata: Vec, supports_712_factory: bool, diff --git a/core/src/signer.rs b/core/src/signer.rs index 19ce85c..962e6b7 100644 --- a/core/src/signer.rs +++ b/core/src/signer.rs @@ -10,8 +10,7 @@ use vault_sdk::VaultClient; use vault_types::enclave::encrypted::eoa::MessageFormat; use crate::{ - chain::Chain, - credentials::{self, SigningCredential}, + credentials::SigningCredential, defs::AddressDef, error::EngineError, execution_options::aa::{EntrypointAndFactoryDetails, EntrypointAndFactoryDetailsDeserHelper}, @@ -65,9 +64,11 @@ pub struct SmartAccountSigningOptions { pub enum SigningOptions { /// Standard EOA (Externally Owned Account) signing #[serde(rename = "eoa")] + #[schema(title = "EOA Signing Options")] Eoa(EoaSigningOptions), /// Smart Account signing with advanced signature patterns #[serde(rename = "smart_account")] + #[schema(title = "Smart Account Signing Options")] SmartAccount(SmartAccountSigningOptions), } diff --git a/server/src/http/routes/sign_message.rs b/server/src/http/routes/sign_message.rs index 8adf7bc..75277d2 100644 --- a/server/src/http/routes/sign_message.rs +++ b/server/src/http/routes/sign_message.rs @@ -1,58 +1,52 @@ -// Sign Message Operations - -use alloy::primitives::{Address, ChainId}; use axum::{ extract::State, http::StatusCode, response::{IntoResponse, Json}, }; +use engine_aa_core::signer::SmartAccountSignerBuilder; use engine_core::{ - error::EngineError, - signer::{EoaSigner, SigningOptions, SmartAccountSigningOptions}, + chain::ChainService, credentials::SigningCredential, + error::EngineError, + signer::{AccountSigner, SigningOptions}, }; -use engine_aa_core::signer::{SmartAccountSigner, SmartAccountSignerBuilder}; use futures::future::join_all; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use thirdweb_core::auth::ThirdwebAuth; use vault_types::enclave::encrypted::eoa::MessageFormat; use crate::http::{ error::ApiEngineError, extractors::{EngineJson, SigningCredentialsExtractor}, server::EngineServerState, - types::ErrorResponse, }; // ===== REQUEST/RESPONSE TYPES ===== - -/// Options for signing messages -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] -#[serde(rename_all = "camelCase")] -pub struct SignOptions { - /// Configuration options for signing - #[serde(flatten)] - pub signing_options: SigningOptions, -} - /// Individual message to sign -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct MessageInput { /// The message to sign pub message: String, /// Message format (text or hex) #[serde(default = "default_message_format")] + #[schema(value_type = MessageFormatDef)] pub format: MessageFormat, } +#[derive(Serialize, Deserialize, Debug, Clone, Copy, utoipa::ToSchema)] +#[serde(rename_all = "lowercase")] +pub enum MessageFormatDef { + Text, + Hex, +} + /// Request to sign messages -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct SignMessageRequest { /// Configuration options for signing - pub sign_options: SignOptions, + pub signing_options: SigningOptions, /// List of messages to sign pub params: Vec, } @@ -177,7 +171,12 @@ pub async fn sign_message( ) -> Result { // Process all messages in parallel let sign_futures = request.params.iter().map(|message_input| { - sign_single_message(&state, &signing_credential, &request.sign_options.signing_options, message_input) + sign_single_message( + &state, + &signing_credential, + &request.signing_options, + message_input, + ) }); let results: Vec = join_all(sign_futures).await; @@ -201,7 +200,8 @@ async fn sign_single_message( let result = match signing_options { SigningOptions::Eoa(eoa_options) => { // Direct EOA signing - state.eoa_signer + state + .eoa_signer .sign_message( eoa_options.clone(), &message_input.message, @@ -232,7 +232,10 @@ async fn sign_single_message( } } Err(e) => Err(EngineError::ValidationError { - message: format!("Failed to get chain {}: {}", smart_account_options.chain_id, e), + message: format!( + "Failed to get chain {}: {}", + smart_account_options.chain_id, e + ), }), } } @@ -246,4 +249,4 @@ async fn sign_single_message( fn default_message_format() -> MessageFormat { MessageFormat::Text -} \ No newline at end of file +} diff --git a/server/src/http/routes/sign_typed_data.rs b/server/src/http/routes/sign_typed_data.rs index 28c3b78..4cf353a 100644 --- a/server/src/http/routes/sign_typed_data.rs +++ b/server/src/http/routes/sign_typed_data.rs @@ -1,27 +1,28 @@ // Sign Typed Data Operations -use alloy::{dyn_abi::TypedData, primitives::Address}; +use alloy::dyn_abi::TypedData; use axum::{ extract::State, http::StatusCode, response::{IntoResponse, Json}, }; +use engine_aa_core::signer::SmartAccountSignerBuilder; use engine_core::{ - error::EngineError, - signer::{EoaSigner, SigningOptions, SmartAccountSigningOptions}, + chain::ChainService, credentials::SigningCredential, + defs::{AddressDef, U256Def}, + error::EngineError, + signer::{AccountSigner, SigningOptions}, }; -use engine_aa_core::signer::{SmartAccountSigner, SmartAccountSignerBuilder}; use futures::future::join_all; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; -use thirdweb_core::auth::ThirdwebAuth; +use serde_json::Value; use crate::http::{ error::ApiEngineError, extractors::{EngineJson, SigningCredentialsExtractor}, server::EngineServerState, - types::ErrorResponse, }; // ===== REQUEST/RESPONSE TYPES ===== @@ -36,15 +37,55 @@ pub struct SignOptions { } /// Request to sign typed data -#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] +#[derive(Debug, Clone, Serialize, Deserialize, utoipa::ToSchema)] #[serde(rename_all = "camelCase")] pub struct SignTypedDataRequest { /// Configuration options for signing pub sign_options: SignOptions, /// List of typed data to sign + #[schema(value_type = Vec)] pub params: Vec, } +#[derive(utoipa::ToSchema)] +pub struct TypedDataDef { + /// Signing domain metadata. The signing domain is the intended context for + /// the signature (e.g. the dapp, protocol, etc. that it's intended for). + /// This data is used to construct the domain separator of the message. + pub domain: TypedDataDomainDef, + + /// The custom types used by this message. + #[schema(rename = "types")] + pub resolver: Value, + + /// The type of the message. + #[schema(rename = "primaryType")] + pub primary_type: String, + + /// The message to be signed. + pub message: serde_json::Value, +} + +#[derive(utoipa::ToSchema)] +pub struct TypedDataDomainDef { + pub name: Option, + + /// The current major version of the signing domain. Signatures from + /// different versions are not compatible. + pub version: Option, + + /// The EIP-155 chain ID. The user-agent should refuse signing if it does + /// not match the currently active chain. + pub chain_id: Option, + + /// The address of the contract that will verify the signature. + pub verifying_contract: Option, + + /// A disambiguating salt for the protocol. This can be used as a domain + /// separator of last resort. + pub salt: Option, +} + /// Result of a single typed data signing operation #[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, utoipa::ToSchema)] #[serde(untagged)] @@ -165,7 +206,12 @@ pub async fn sign_typed_data( ) -> Result { // Process all typed data in parallel let sign_futures = request.params.iter().map(|typed_data| { - sign_single_typed_data(&state.userop_signer, &signing_credential, &request.sign_options.signing_options, typed_data) + sign_single_typed_data( + &state, + &signing_credential, + &request.sign_options.signing_options, + typed_data, + ) }); let results: Vec = join_all(sign_futures).await; @@ -181,26 +227,53 @@ pub async fn sign_typed_data( // ===== HELPER FUNCTIONS ===== async fn sign_single_typed_data( - signer: &Signer, + state: &EngineServerState, signing_credential: &SigningCredential, signing_options: &SigningOptions, typed_data: &TypedData, ) -> SignResultItem { - let params = TypedDataSignerParams { - credentials: signing_credential.clone(), - typed_data: typed_data.clone(), - signing_options: signing_options.clone(), + let result = match signing_options { + SigningOptions::Eoa(eoa_options) => { + // Direct EOA signing + state + .eoa_signer + .sign_typed_data(eoa_options.clone(), typed_data, signing_credential.clone()) + .await + } + SigningOptions::SmartAccount(smart_account_options) => { + // Smart account signing via builder + match state.chains.get_chain(smart_account_options.chain_id) { + Ok(chain) => { + match SmartAccountSignerBuilder::new( + state.eoa_signer.clone(), + signing_credential.clone(), + smart_account_options.clone(), + chain, + ) + .build() + .await + { + Ok(smart_signer) => smart_signer.sign_typed_data(typed_data).await, + Err(e) => Err(e), + } + } + Err(e) => Err(EngineError::ValidationError { + message: format!( + "Failed to get chain {}: {}", + smart_account_options.chain_id, e + ), + }), + } + } }; - let result = signer.sign_typed_data(params).await; - match result { Ok(signature) => { // Convert typed data to JSON string for signed_data field let signed_data = serde_json::to_string(typed_data) .unwrap_or_else(|_| "Failed to serialize typed data".to_string()); SignResultItem::success(signature, signed_data) - }, + } Err(e) => SignResultItem::failure(e), } -} \ No newline at end of file +} diff --git a/server/src/http/server.rs b/server/src/http/server.rs index 617b132..492e3f1 100644 --- a/server/src/http/server.rs +++ b/server/src/http/server.rs @@ -58,6 +58,10 @@ impl EngineServer { .routes(routes!( crate::http::routes::transaction::cancel_transaction )) + .routes(routes!(crate::http::routes::sign_message::sign_message)) + .routes(routes!( + crate::http::routes::sign_typed_data::sign_typed_data + )) .layer(cors) .layer(TraceLayer::new_for_http()) .with_state(state); diff --git a/server/src/main.rs b/server/src/main.rs index c1bc715..b549646 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -38,7 +38,9 @@ async fn main() -> anyhow::Result<()> { rpc_base_url: config.thirdweb.urls.rpc, }); - let signer = Arc::new(UserOpSigner { vault_client }); + let signer = Arc::new(UserOpSigner { + vault_client: vault_client.clone(), + }); let eoa_signer = Arc::new(EoaSigner { vault_client }); let queue_manager = From db2d5d8cab8a75a6850011cc18d14f0cf8d45c8b Mon Sep 17 00:00:00 2001 From: Prithvish Baidya Date: Mon, 23 Jun 2025 23:17:08 +0530 Subject: [PATCH 7/7] address review feedback --- core/src/execution_options/mod.rs | 2 +- core/src/signer.rs | 2 -- core/src/userop.rs | 4 ++-- server/src/http/routes/sign_typed_data.rs | 9 ++++++--- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/core/src/execution_options/mod.rs b/core/src/execution_options/mod.rs index 58be87b..bca4e49 100644 --- a/core/src/execution_options/mod.rs +++ b/core/src/execution_options/mod.rs @@ -46,7 +46,7 @@ where if !map.contains_key("type") { map.insert( "type".to_string(), - serde_json::Value::String("Auto".to_string()), + serde_json::Value::String("auto".to_string()), ); } diff --git a/core/src/signer.rs b/core/src/signer.rs index 962e6b7..dc5a2da 100644 --- a/core/src/signer.rs +++ b/core/src/signer.rs @@ -1,5 +1,3 @@ -use std::option; - use alloy::{ dyn_abi::TypedData, primitives::{Address, ChainId}, diff --git a/core/src/userop.rs b/core/src/userop.rs index 75c220f..e537c38 100644 --- a/core/src/userop.rs +++ b/core/src/userop.rs @@ -43,7 +43,7 @@ impl UserOpVersion { pre_verification_gas: userop.pre_verification_gas, max_fee_per_gas: userop.max_fee_per_gas, verification_gas_limit: userop.verification_gas_limit, - sender: userop.sender.clone(), + sender: userop.sender, paymaster_and_data: userop.paymaster_and_data.clone(), signature: userop.signature.clone(), call_gas_limit: userop.call_gas_limit, @@ -58,7 +58,7 @@ impl UserOpVersion { pre_verification_gas: userop.pre_verification_gas, max_fee_per_gas: userop.max_fee_per_gas, verification_gas_limit: userop.verification_gas_limit, - sender: userop.sender.clone(), + sender: userop.sender, paymaster_data: userop.paymaster_data.clone().unwrap_or_default(), factory: userop.factory.unwrap_or_default(), factory_data: userop.factory_data.clone().unwrap_or_default(), diff --git a/server/src/http/routes/sign_typed_data.rs b/server/src/http/routes/sign_typed_data.rs index 4cf353a..3667a2e 100644 --- a/server/src/http/routes/sign_typed_data.rs +++ b/server/src/http/routes/sign_typed_data.rs @@ -270,9 +270,12 @@ async fn sign_single_typed_data( match result { Ok(signature) => { // Convert typed data to JSON string for signed_data field - let signed_data = serde_json::to_string(typed_data) - .unwrap_or_else(|_| "Failed to serialize typed data".to_string()); - SignResultItem::success(signature, signed_data) + match serde_json::to_string(typed_data) { + Ok(signed_data) => SignResultItem::success(signature, signed_data), + Err(e) => SignResultItem::failure(EngineError::ValidationError { + message: format!("Failed to serialize typed data: {}", e), + }), + } } Err(e) => SignResultItem::failure(e), }