Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
- Fixed incorrectly classifying private notes with the network attachment as network notes ([#1378](https://github.com/0xMiden/node/pull/1738)).
- Fixed accept header version negotiation rejecting all pre-release versions; pre-release label matching is now lenient, accepting any numeric suffix within the same label (e.g. `alpha.3` accepts `alpha.1`) ([#1755](https://github.com/0xMiden/node/pull/1755)).
- Fixed `GetAccount` returning an internal error for `AllEntries` requests on storage maps where all entries are in a single block (e.g. genesis accounts) ([#1816](https://github.com/0xMiden/node/pull/1816)).
- Fixed `GetAccount` returning empty storage map entries instead of `too_many_entries` when a genesis account's map exceeds the pagination limit ([#1816](https://github.com/0xMiden/node/pull/1816)).

## v0.13.8 (2026-03-12)

Expand Down
6 changes: 6 additions & 0 deletions crates/store/src/db/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -649,6 +649,12 @@ impl Db {
values.extend(page.values);
let mut last_block_included = page.last_block_included;

// If the first page returned no values, the block at block_range_start has more
// entries than the limit allows (e.g. genesis accounts with large storage maps).
if values.is_empty() && last_block_included == block_range_start {
return Ok(AccountStorageMapDetails::limit_exceeded(slot_name));
}

loop {
if page.last_block_included == block_num || page.last_block_included < block_range_start
{
Expand Down
25 changes: 17 additions & 8 deletions crates/store/src/db/models/queries/accounts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -468,21 +468,20 @@ pub(crate) fn select_account_vault_assets(
.limit(i64::try_from(MAX_ROWS + 1).expect("should fit within i64"))
.load::<(i64, Vec<u8>, Option<Vec<u8>>)>(conn)?;

// Discard the last block in the response (assumes more than one block may be present)
// If we got more rows than the limit, the last block may be incomplete so we
// drop it entirely and derive last_block_included from the remaining rows.
let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last()
&& raw.len() > MAX_ROWS
{
// NOTE: If the query contains at least one more row than the amount of storage map updates
// allowed in a single block for an account, then the response is guaranteed to have at
// least two blocks

let values = raw
.into_iter()
.take_while(|(bn, ..)| *bn != last_block_num)
.map(AccountVaultValue::from_raw_row)
.collect::<Result<Vec<_>, DatabaseError>>()?;

(BlockNumber::from_raw_sql(last_block_num.saturating_sub(1))?, values)
let last_block_included = values.last().map_or(*block_range.start(), |v| v.block_num);

(last_block_included, values)
} else {
(
*block_range.end(),
Expand Down Expand Up @@ -592,6 +591,14 @@ pub(crate) fn select_all_network_account_ids(
// SAFETY: We just checked that len > MAX_ROWS, so the vec is not empty.
let last_created_at_block = account_ids_raw.last().expect("vec is not empty").1;

// Find the last kept block by scanning backward to the first row that won't be
// dropped. This avoids an extra allocation (AccountId has no block_num field).
let last_kept_block = account_ids_raw
.iter()
.rev()
.find(|(_, created_at_block)| *created_at_block != last_created_at_block)
.map(|(_, bn)| *bn);

let account_ids = account_ids_raw
.into_iter()
.take_while(|(_, created_at_block)| *created_at_block != last_created_at_block)
Expand All @@ -600,8 +607,10 @@ pub(crate) fn select_all_network_account_ids(
})
.collect::<Result<Vec<AccountId>, DatabaseError>>()?;

let last_block_included =
BlockNumber::from_raw_sql(last_created_at_block.saturating_sub(1))?;
let last_block_included = match last_kept_block {
Some(bn) => BlockNumber::from_raw_sql(bn)?,
None => *block_range.start(),
};

Ok((account_ids, last_block_included))
} else {
Expand Down
16 changes: 10 additions & 6 deletions crates/store/src/db/models/queries/transactions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -296,13 +296,17 @@ pub fn select_transactions_records(
let last_block_num = last_block_num.expect(
"guaranteed to have processed at least one transaction when size limit is reached",
);
let filtered_transactions = vec_raw_try_into(
all_transactions.into_iter().take_while(|row| row.block_num != last_block_num),
)?;
let filtered: Vec<_> = all_transactions
.into_iter()
.take_while(|row| row.block_num != last_block_num)
.collect();

// SAFETY: block_num came from the database and was previously validated
let last_included_block = BlockNumber::from_raw_sql(last_block_num.saturating_sub(1))?;
Ok((last_included_block, filtered_transactions))
let last_included_block = match filtered.last() {
Some(row) => BlockNumber::from_raw_sql(row.block_num)?,
None => *block_range.start(),
};

Ok((last_included_block, vec_raw_try_into(filtered)?))
} else {
Ok((*block_range.end(), vec_raw_try_into(all_transactions)?))
}
Expand Down
51 changes: 51 additions & 0 deletions crates/store/src/db/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1434,6 +1434,57 @@ async fn reconstruct_storage_map_from_db_pages_until_latest() {
});
}

/// Tests that `reconstruct_storage_map_from_db` returns `LimitExceeded` when the first
/// block in the range has more entries than the limit allows. Previously this returned
/// `AllEntries([])` because the pagination loop exited immediately (`last_block_included` ==
/// `block_num`) without checking that no values were actually returned.
#[tokio::test]
#[miden_node_test_macro::enable_logging]
async fn reconstruct_storage_map_from_db_returns_limit_exceeded_for_single_block_overflow() {
let temp_dir = tempdir().unwrap();
let db_path = temp_dir.path().join("store.sqlite");

let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap();
let slot_name = StorageSlotName::mock(12);

let block5 = BlockNumber::from(5);

let db = crate::db::Db::load(db_path).await.unwrap();
let slot_name_for_db = slot_name.clone();
db.query("insert entries in single block", move |db_conn| {
db_conn.transaction(|db_conn| {
apply_migrations(db_conn)?;
create_block(db_conn, block5);

queries::upsert_accounts(db_conn, &[mock_block_account_update(account_id, 0)], block5)?;

// Insert 3 entries, all in the same block
for i in 1..=3 {
queries::insert_account_storage_map_value(
db_conn,
account_id,
block5,
slot_name_for_db.clone(),
num_to_storage_map_key(i),
num_to_word(i * 10),
)?;
}
Ok::<_, DatabaseError>(())
})
})
.await
.unwrap();

// Use limit=1 so that 3 entries in a single block exceed the limit.
// block_range_start is block5 (the first block with data), and the target is also block5.
let details = db
.reconstruct_storage_map_from_db(account_id, slot_name.clone(), block5, Some(1))
.await
.unwrap();

assert_matches!(details.entries, StorageMapEntries::LimitExceeded);
}

// UTILITIES
// -------------------------------------------------------------------------------------------
fn num_to_word(n: u64) -> Word {
Expand Down
Loading