Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
88 commits
Select commit Hold shift + click to select a range
37c03e8
add timescale db hypertable
aditya1702 Feb 2, 2026
a963d23
update docker and test files to use timescaledb
aditya1702 Feb 2, 2026
ce9d7d0
Fix tests
aditya1702 Feb 2, 2026
8badbef
convert junction tables to hypertables
aditya1702 Feb 2, 2026
f2bff57
fix failing tests
aditya1702 Feb 2, 2026
2bc2180
remove comments
aditya1702 Feb 2, 2026
34124ee
changes to indexes
aditya1702 Feb 2, 2026
63de994
Merge branch 'db-indexes' into timescale
aditya1702 Feb 2, 2026
e51e697
Update 2025-06-10.2-transactions.sql
aditya1702 Feb 2, 2026
3fb3a6c
Update 2025-06-10.2-transactions.sql
aditya1702 Feb 2, 2026
66c39be
compress chunks at the end
aditya1702 Feb 3, 2026
1bf6580
parallely compress chunks
aditya1702 Feb 3, 2026
2fffe5d
Update ingest_backfill.go
aditya1702 Feb 3, 2026
fc165bf
Update ingest_backfill.go
aditya1702 Feb 3, 2026
38d08c1
Update ingest_backfill.go
aditya1702 Feb 3, 2026
369e79a
Cherry-pick opxdr-bytea-2 files (types, indexer, graphql, processors,…
aditya1702 Feb 10, 2026
1224c17
Migrate TEXT columns to BYTEA in transaction/operation/statechange sc…
aditya1702 Feb 10, 2026
9ef4af8
Apply BYTEA type conversions to data layer (transactions, operations,…
aditya1702 Feb 10, 2026
864d23b
Update test files for BYTEA types while preserving TimescaleDB patterns
aditya1702 Feb 10, 2026
ec91fe7
Fix shadow variable warnings in BatchCopy address conversions
aditya1702 Feb 10, 2026
d013efa
Update go.yaml
aditya1702 Feb 10, 2026
235aa88
merge branch opxr-bytea-2
aditya1702 Feb 10, 2026
3abf801
Update go.yaml
aditya1702 Feb 10, 2026
2b9e98e
Update go.yaml
aditya1702 Feb 10, 2026
6c55228
Update docker-compose.yaml
aditya1702 Feb 10, 2026
9f41d08
Update query_utils.go
aditya1702 Feb 10, 2026
d34dc37
remove flaky RPC test
aditya1702 Feb 10, 2026
516251b
Update helpers.go
aditya1702 Feb 10, 2026
bb35d75
Update helpers.go
aditya1702 Feb 10, 2026
71869d7
add bloom filter on account_id
aditya1702 Feb 10, 2026
147a67f
remove BatchInsert
aditya1702 Feb 11, 2026
1561630
update hypertable config - 1
aditya1702 Feb 11, 2026
ff75bf7
remove Duplicate failure tests
aditya1702 Feb 11, 2026
e85fb63
Add extra orderby columns and other hypertable configration
aditya1702 Feb 11, 2026
7a6334b
Update normal B-tree indexes
aditya1702 Feb 11, 2026
6768f8d
enable chunk skipping
aditya1702 Feb 11, 2026
a415099
Update dbtest.go
aditya1702 Feb 11, 2026
d0e76df
Update containers.go
aditya1702 Feb 11, 2026
ed71815
Compress backfill chunks parallelly using goroutine
aditya1702 Feb 11, 2026
b9305e5
remove schemas for mainnet and testnet
aditya1702 Feb 12, 2026
5a5ac6e
Enable direct compress and recompression for backfilling
aditya1702 Feb 12, 2026
77dbbc7
Add command line configuration to set retention policy and chunk size
aditya1702 Feb 12, 2026
7ef1866
Update ingest.go
aditya1702 Feb 12, 2026
6488155
add batch index tracking to logs
aditya1702 Feb 12, 2026
3db78c8
Update ingest_backfill.go
aditya1702 Feb 13, 2026
7f578ea
update auto-vaccum settings for balances tables
aditya1702 Feb 13, 2026
cefebf5
add explanations for the values
aditya1702 Feb 13, 2026
22d6f33
Disable FK checks during checkpoint population
aditya1702 Feb 13, 2026
4c7afba
Update ingest_live.go
aditya1702 Feb 13, 2026
88586d1
Update ingest_live.go
aditya1702 Feb 13, 2026
5896323
Update ingest_live.go
aditya1702 Feb 13, 2026
b4e4abe
Update ingest_backfill.go
aditya1702 Feb 13, 2026
0183370
fix ledger_created_at bug
aditya1702 Feb 13, 2026
5ccac7f
update oldest ledger using timescaledb job
aditya1702 Feb 13, 2026
89b11ae
Use ledger_created_at in the cursor
aditya1702 Feb 15, 2026
2d614a1
Decompose cursor comparison into individual OR clauses
aditya1702 Feb 15, 2026
9892960
Add since/until time range params to Account GraphQL schema
aditya1702 Feb 15, 2026
52f06d3
Regenerate gqlgen code with since/until resolver params
aditya1702 Feb 15, 2026
0dac379
Add TimeRange struct and appendTimeRangeConditions helper
aditya1702 Feb 15, 2026
8414f54
Add timeRange parameter to BatchGetByAccountAddress methods
aditya1702 Feb 15, 2026
2be7686
Wire since/until through resolvers to data layer
aditya1702 Feb 15, 2026
e75d828
Add since/until time range params to wbclient methods
aditya1702 Feb 15, 2026
7c29982
Add tests for time range filtering feature
aditya1702 Feb 15, 2026
dd65f24
fix make check
aditya1702 Feb 15, 2026
a8a8057
Update query_utils.go
aditya1702 Feb 15, 2026
7f83fe8
Add config option for tweaking schedule_interval for compression
aditya1702 Feb 15, 2026
2743365
Merge branch 'timescale' into query-optimize
aditya1702 Feb 15, 2026
b1af680
Add CLI variable for compress_after setting
aditya1702 Feb 15, 2026
f456cae
Merge branch 'timescale' into query-optimize
aditya1702 Feb 15, 2026
2a4d469
remove accountsByStateChange data loader
aditya1702 Feb 16, 2026
18891c9
remove account {address} when getting an account's state changes
aditya1702 Feb 16, 2026
b30ce33
Update statechanges.go
aditya1702 Feb 16, 2026
6ebcca2
Always set endCursor to edgers[len-1]
aditya1702 Feb 16, 2026
eb21d11
Set client_sorted = True since we will rebuild_columnstore at the end
aditya1702 Feb 16, 2026
8d2c8ff
Update ingest_backfill.go
aditya1702 Feb 16, 2026
1f66469
Merge branch 'timescale' into query-optimize
aditya1702 Feb 16, 2026
1f7fe71
Update ingest.go
aditya1702 Feb 16, 2026
4b644d3
Merge branch 'timescale' into query-optimize
aditya1702 Feb 16, 2026
01bc06a
Update ingest_backfill.go
aditya1702 Feb 17, 2026
407fc57
Merge branch 'timescale' into query-optimize
aditya1702 Feb 17, 2026
e78ede0
Add parallel recompression logic
aditya1702 Feb 20, 2026
f052c40
Merge branch 'timescale' into query-optimize
aditya1702 Feb 20, 2026
64df87b
Merge branch 'timescale' into query-optimize
aditya1702 Feb 21, 2026
c801128
remove account id validation
aditya1702 Feb 21, 2026
b54bbed
Merge query-optimize into remove-filtering base
aditya1702 Feb 22, 2026
8cd6492
Regenerate GraphQL code after merge
aditya1702 Feb 22, 2026
cfe9581
Remove unused AccountModel.BatchGetByStateChangeIDs
aditya1702 Feb 22, 2026
dbd119a
Merge branch 'remove-filtering' into query-optimize-2
aditya1702 Feb 22, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 0 additions & 31 deletions internal/data/accounts.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package data
import (
"context"
"fmt"
"strings"
"time"

"github.com/lib/pq"
Expand Down Expand Up @@ -77,33 +76,3 @@ func (m *AccountModel) BatchGetByOperationIDs(ctx context.Context, operationIDs
m.MetricsService.IncDBQuery("BatchGetByOperationIDs", "operations_accounts")
return accounts, nil
}

// BatchGetByStateChangeIDs gets the accounts that are associated with the given state change IDs.
func (m *AccountModel) BatchGetByStateChangeIDs(ctx context.Context, scToIDs []int64, scOpIDs []int64, scOrders []int64, columns string) ([]*types.AccountWithStateChangeID, error) {
// Build tuples for the IN clause. Since (to_id, operation_id, state_change_order) is the primary key of state_changes,
// it will be faster to search on this tuple.
tuples := make([]string, len(scOrders))
for i := range scOrders {
tuples[i] = fmt.Sprintf("(%d, %d, %d)", scToIDs[i], scOpIDs[i], scOrders[i])
}

query := fmt.Sprintf(`
SELECT account_id AS stellar_address, CONCAT(to_id, '-', operation_id, '-', state_change_order) AS state_change_id
FROM state_changes
WHERE (to_id, operation_id, state_change_order) IN (%s)
ORDER BY ledger_created_at DESC
`, strings.Join(tuples, ", "))

var accountsWithStateChanges []*types.AccountWithStateChangeID
start := time.Now()
err := m.DB.SelectContext(ctx, &accountsWithStateChanges, query)
duration := time.Since(start).Seconds()
m.MetricsService.ObserveDBQueryDuration("BatchGetByStateChangeIDs", "state_changes", duration)
m.MetricsService.ObserveDBBatchSize("BatchGetByStateChangeIDs", "state_changes", len(scOrders))
if err != nil {
m.MetricsService.IncDBQueryError("BatchGetByStateChangeIDs", "state_changes", utils.GetDBErrorType(err))
return nil, fmt.Errorf("getting accounts by state change IDs: %w", err)
}
m.MetricsService.IncDBQuery("BatchGetByStateChangeIDs", "state_changes")
return accountsWithStateChanges, nil
}
66 changes: 0 additions & 66 deletions internal/data/accounts_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -151,69 +151,3 @@ func TestAccountModel_IsAccountFeeBumpEligible(t *testing.T) {
require.NoError(t, err)
assert.True(t, isFeeBumpEligible)
}

func TestAccountModelBatchGetByStateChangeIDs(t *testing.T) {
dbt := dbtest.Open(t)
defer dbt.Close()
dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN)
require.NoError(t, err)
defer dbConnectionPool.Close()

mockMetricsService := metrics.NewMockMetricsService()
mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return()
mockMetricsService.On("IncDBQuery", "BatchGetByStateChangeIDs", "state_changes").Return()
mockMetricsService.On("ObserveDBBatchSize", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return().Maybe()
defer mockMetricsService.AssertExpectations(t)

m := &AccountModel{
DB: dbConnectionPool,
MetricsService: mockMetricsService,
}

ctx := context.Background()
address1 := keypair.MustRandom().Address()
address2 := keypair.MustRandom().Address()
toID1 := int64(4096)
toID2 := int64(8192)
stateChangeOrder1 := int64(1)
stateChangeOrder2 := int64(1)

// Insert test transactions first (hash is BYTEA, using valid 64-char hex strings)
testHash1 := types.HashBytea("0000000000000000000000000000000000000000000000000000000000000001")
testHash2 := types.HashBytea("0000000000000000000000000000000000000000000000000000000000000002")
_, err = m.DB.ExecContext(ctx, "INSERT INTO transactions (hash, to_id, envelope_xdr, fee_charged, result_code, meta_xdr, ledger_number, ledger_created_at) VALUES ($1, 4096, 'env1', 100, 'TransactionResultCodeTxSuccess', 'meta1', 1, NOW()), ($2, 8192, 'env2', 200, 'TransactionResultCodeTxSuccess', 'meta2', 2, NOW())", testHash1, testHash2)
require.NoError(t, err)

// Insert test operations (IDs must be in TOID range for each transaction)
xdr1 := types.XDRBytea([]byte("xdr1"))
xdr2 := types.XDRBytea([]byte("xdr2"))
_, err = m.DB.ExecContext(ctx, "INSERT INTO operations (id, operation_type, operation_xdr, result_code, successful, ledger_number, ledger_created_at) VALUES (4097, 'PAYMENT', $1, 'op_success', true, 1, NOW()), (8193, 'PAYMENT', $2, 'op_success', true, 2, NOW())", xdr1, xdr2)
require.NoError(t, err)

// Insert test state changes that reference the accounts (state_changes.account_id is TEXT)
_, err = m.DB.ExecContext(ctx, `
INSERT INTO state_changes (
to_id, state_change_order, state_change_category, ledger_created_at,
ledger_number, account_id, operation_id
) VALUES
($1, $2, 'BALANCE', NOW(), 1, $3, 4097),
($4, $5, 'BALANCE', NOW(), 2, $6, 8193)
`, toID1, stateChangeOrder1, types.AddressBytea(address1), toID2, stateChangeOrder2, types.AddressBytea(address2))
require.NoError(t, err)

// Test BatchGetByStateChangeIDs function
scToIDs := []int64{toID1, toID2}
scOpIDs := []int64{4097, 8193}
scOrders := []int64{stateChangeOrder1, stateChangeOrder2}
accounts, err := m.BatchGetByStateChangeIDs(ctx, scToIDs, scOpIDs, scOrders, "")
require.NoError(t, err)
assert.Len(t, accounts, 2)

// Verify accounts are returned with correct state_change_id (format: to_id-operation_id-state_change_order)
addressSet := make(map[string]string)
for _, acc := range accounts {
addressSet[string(acc.StellarAddress)] = acc.StateChangeID
}
assert.Equal(t, "4096-4097-1", addressSet[address1])
assert.Equal(t, "8192-8193-1", addressSet[address2])
}
119 changes: 89 additions & 30 deletions internal/data/operations.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,36 +38,44 @@ func (m *OperationModel) GetByID(ctx context.Context, id int64, columns string)
return &operation, nil
}

func (m *OperationModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *int64, sortOrder SortOrder) ([]*types.OperationWithCursor, error) {
func (m *OperationModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *types.CompositeCursor, sortOrder SortOrder) ([]*types.OperationWithCursor, error) {
columns = prepareColumnsWithID(columns, types.Operation{}, "", "id")
queryBuilder := strings.Builder{}
queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, id as cursor FROM operations`, columns))
var args []interface{}
argIndex := 1

queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM operations`, columns))

// Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so
// TimescaleDB ColumnarScan can push filters into vectorized batch processing.
if cursor != nil {
if sortOrder == DESC {
queryBuilder.WriteString(fmt.Sprintf(" WHERE id < %d", *cursor))
} else {
queryBuilder.WriteString(fmt.Sprintf(" WHERE id > %d", *cursor))
}
clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{
{Name: "ledger_created_at", Value: cursor.LedgerCreatedAt},
{Name: "id", Value: cursor.ID},
}, sortOrder, argIndex)
queryBuilder.WriteString(" WHERE " + clause)
args = append(args, cursorArgs...)
argIndex = nextIdx
}

if sortOrder == DESC {
queryBuilder.WriteString(" ORDER BY id DESC")
queryBuilder.WriteString(" ORDER BY ledger_created_at DESC, id DESC")
} else {
queryBuilder.WriteString(" ORDER BY id ASC")
queryBuilder.WriteString(" ORDER BY ledger_created_at ASC, id ASC")
}

if limit != nil {
queryBuilder.WriteString(fmt.Sprintf(" LIMIT %d", *limit))
queryBuilder.WriteString(fmt.Sprintf(" LIMIT $%d", argIndex))
args = append(args, *limit)
}
query := queryBuilder.String()
if sortOrder == DESC {
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query)
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query)
}

var operations []*types.OperationWithCursor
start := time.Now()
err := m.DB.SelectContext(ctx, &operations, query)
err := m.DB.SelectContext(ctx, &operations, query, args...)
duration := time.Since(start).Seconds()
m.MetricsService.ObserveDBQueryDuration("GetAll", "operations", duration)
if err != nil {
Expand Down Expand Up @@ -131,15 +139,15 @@ func (m *OperationModel) BatchGetByToIDs(ctx context.Context, toIDs []int64, col
JOIN
inputs i ON o.id > i.to_id AND o.id < i.to_id + 4096
)
SELECT %s, id as cursor FROM ranked_operations_per_to_id
SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM ranked_operations_per_to_id
`
queryBuilder.WriteString(fmt.Sprintf(query, sortOrder, columns))
if limit != nil {
queryBuilder.WriteString(fmt.Sprintf(" WHERE rn <= %d", *limit))
}
query = queryBuilder.String()
if sortOrder == DESC {
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query)
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query)
}

var operations []*types.OperationWithCursor
Expand All @@ -162,7 +170,7 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns
columns = prepareColumnsWithID(columns, types.Operation{}, "", "id")
queryBuilder := strings.Builder{}
// Operations for a tx_to_id are in range (tx_to_id, tx_to_id + 4096) based on TOID encoding.
queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, id as cursor FROM operations WHERE id > $1 AND id < $1 + 4096`, columns))
queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM operations WHERE id > $1 AND id < $1 + 4096`, columns))

args := []interface{}{toID}
argIndex := 2
Expand Down Expand Up @@ -190,7 +198,7 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns

query := queryBuilder.String()
if sortOrder == DESC {
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query)
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query)
}

var operations []*types.OperationWithCursor
Expand All @@ -207,21 +215,72 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns
}

// BatchGetByAccountAddress gets the operations that are associated with a single account address.
func (m *OperationModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *int64, orderBy SortOrder) ([]*types.OperationWithCursor, error) {
columns = prepareColumnsWithID(columns, types.Operation{}, "operations", "id")
// Uses a MATERIALIZED CTE + LATERAL join pattern to allow TimescaleDB ChunkAppend optimization
// on the operations_accounts hypertable by ordering on ledger_created_at first.
func (m *OperationModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *types.CompositeCursor, orderBy SortOrder, timeRange *TimeRange) ([]*types.OperationWithCursor, error) {
columns = prepareColumnsWithID(columns, types.Operation{}, "o", "id")

var queryBuilder strings.Builder
args := []interface{}{types.AddressBytea(accountAddress)}
argIndex := 2

// MATERIALIZED CTE scans operations_accounts with ledger_created_at leading the ORDER BY,
// enabling TimescaleDB ChunkAppend on the hypertable.
queryBuilder.WriteString(`
WITH account_ops AS MATERIALIZED (
SELECT operation_id, ledger_created_at
FROM operations_accounts
WHERE account_id = $1`)

// Time range filter: enables TimescaleDB chunk pruning at the earliest query stage
args, argIndex = appendTimeRangeConditions(&queryBuilder, "ledger_created_at", timeRange, args, argIndex)

// Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so
// TimescaleDB ColumnarScan can push filters into vectorized batch processing.
if cursor != nil {
clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{
{Name: "ledger_created_at", Value: cursor.LedgerCreatedAt},
{Name: "operation_id", Value: cursor.ID},
}, orderBy, argIndex)
queryBuilder.WriteString("\n\t\t\tAND " + clause)
args = append(args, cursorArgs...)
argIndex = nextIdx
}

if orderBy == DESC {
queryBuilder.WriteString(`
ORDER BY ledger_created_at DESC, operation_id DESC`)
} else {
queryBuilder.WriteString(`
ORDER BY ledger_created_at ASC, operation_id ASC`)
}

if limit != nil {
queryBuilder.WriteString(fmt.Sprintf(` LIMIT $%d`, argIndex))
args = append(args, *limit)
}

// Build paginated query using shared utility
query, args := buildGetByAccountAddressQuery(paginatedQueryConfig{
TableName: "operations",
CursorColumn: "id",
JoinTable: "operations_accounts",
JoinCondition: "operations_accounts.operation_id = operations.id",
Columns: columns,
AccountAddress: accountAddress,
Limit: limit,
Cursor: cursor,
OrderBy: orderBy,
})
// Close CTE and LATERAL join to fetch full operation rows
queryBuilder.WriteString(fmt.Sprintf(`
)
SELECT %s, o.ledger_created_at as "cursor.cursor_ledger_created_at", o.id as "cursor.cursor_id"
FROM account_ops ao,
LATERAL (SELECT * FROM operations o WHERE o.id = ao.operation_id AND o.ledger_created_at = ao.ledger_created_at LIMIT 1) o`, columns))

if orderBy == DESC {
queryBuilder.WriteString(`
ORDER BY o.ledger_created_at DESC, o.id DESC`)
} else {
queryBuilder.WriteString(`
ORDER BY o.ledger_created_at ASC, o.id ASC`)
}

query := queryBuilder.String()

// For backward pagination, wrap query to reverse the final order
if orderBy == DESC {
query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query)
}

var operations []*types.OperationWithCursor
start := time.Now()
Expand Down
12 changes: 6 additions & 6 deletions internal/data/operations_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -263,17 +263,17 @@ func TestOperationModel_GetAll(t *testing.T) {
operations, err := m.GetAll(ctx, "", nil, nil, ASC)
require.NoError(t, err)
assert.Len(t, operations, 3)
assert.Equal(t, int64(2), operations[0].Cursor)
assert.Equal(t, int64(4098), operations[1].Cursor)
assert.Equal(t, int64(8194), operations[2].Cursor)
assert.Equal(t, int64(2), operations[0].Cursor.ID)
assert.Equal(t, int64(4098), operations[1].Cursor.ID)
assert.Equal(t, int64(8194), operations[2].Cursor.ID)

// Test GetAll with smaller limit
limit := int32(2)
operations, err = m.GetAll(ctx, "", &limit, nil, ASC)
require.NoError(t, err)
assert.Len(t, operations, 2)
assert.Equal(t, int64(2), operations[0].Cursor)
assert.Equal(t, int64(4098), operations[1].Cursor)
assert.Equal(t, int64(2), operations[0].Cursor.ID)
assert.Equal(t, int64(4098), operations[1].Cursor.ID)
}

func TestOperationModel_BatchGetByToIDs(t *testing.T) {
Expand Down Expand Up @@ -582,7 +582,7 @@ func TestOperationModel_BatchGetByAccountAddresses(t *testing.T) {
require.NoError(t, err)

// Test BatchGetByAccount
operations, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, "ASC")
operations, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, ASC, nil)
require.NoError(t, err)
assert.Len(t, operations, 2)
assert.Equal(t, int64(4097), operations[0].Operation.ID)
Expand Down
Loading
Loading