diff --git a/internal/data/accounts.go b/internal/data/accounts.go index 1b0ff6a8..6e722076 100644 --- a/internal/data/accounts.go +++ b/internal/data/accounts.go @@ -5,7 +5,6 @@ package data import ( "context" "fmt" - "strings" "time" "github.com/lib/pq" @@ -77,33 +76,3 @@ func (m *AccountModel) BatchGetByOperationIDs(ctx context.Context, operationIDs m.MetricsService.IncDBQuery("BatchGetByOperationIDs", "operations_accounts") return accounts, nil } - -// BatchGetByStateChangeIDs gets the accounts that are associated with the given state change IDs. -func (m *AccountModel) BatchGetByStateChangeIDs(ctx context.Context, scToIDs []int64, scOpIDs []int64, scOrders []int64, columns string) ([]*types.AccountWithStateChangeID, error) { - // Build tuples for the IN clause. Since (to_id, operation_id, state_change_order) is the primary key of state_changes, - // it will be faster to search on this tuple. - tuples := make([]string, len(scOrders)) - for i := range scOrders { - tuples[i] = fmt.Sprintf("(%d, %d, %d)", scToIDs[i], scOpIDs[i], scOrders[i]) - } - - query := fmt.Sprintf(` - SELECT account_id AS stellar_address, CONCAT(to_id, '-', operation_id, '-', state_change_order) AS state_change_id - FROM state_changes - WHERE (to_id, operation_id, state_change_order) IN (%s) - ORDER BY ledger_created_at DESC - `, strings.Join(tuples, ", ")) - - var accountsWithStateChanges []*types.AccountWithStateChangeID - start := time.Now() - err := m.DB.SelectContext(ctx, &accountsWithStateChanges, query) - duration := time.Since(start).Seconds() - m.MetricsService.ObserveDBQueryDuration("BatchGetByStateChangeIDs", "state_changes", duration) - m.MetricsService.ObserveDBBatchSize("BatchGetByStateChangeIDs", "state_changes", len(scOrders)) - if err != nil { - m.MetricsService.IncDBQueryError("BatchGetByStateChangeIDs", "state_changes", utils.GetDBErrorType(err)) - return nil, fmt.Errorf("getting accounts by state change IDs: %w", err) - } - m.MetricsService.IncDBQuery("BatchGetByStateChangeIDs", "state_changes") - return accountsWithStateChanges, nil -} diff --git a/internal/data/accounts_test.go b/internal/data/accounts_test.go index 623db9d8..118bee18 100644 --- a/internal/data/accounts_test.go +++ b/internal/data/accounts_test.go @@ -151,69 +151,3 @@ func TestAccountModel_IsAccountFeeBumpEligible(t *testing.T) { require.NoError(t, err) assert.True(t, isFeeBumpEligible) } - -func TestAccountModelBatchGetByStateChangeIDs(t *testing.T) { - dbt := dbtest.Open(t) - defer dbt.Close() - dbConnectionPool, err := db.OpenDBConnectionPool(dbt.DSN) - require.NoError(t, err) - defer dbConnectionPool.Close() - - mockMetricsService := metrics.NewMockMetricsService() - mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return() - mockMetricsService.On("IncDBQuery", "BatchGetByStateChangeIDs", "state_changes").Return() - mockMetricsService.On("ObserveDBBatchSize", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return().Maybe() - defer mockMetricsService.AssertExpectations(t) - - m := &AccountModel{ - DB: dbConnectionPool, - MetricsService: mockMetricsService, - } - - ctx := context.Background() - address1 := keypair.MustRandom().Address() - address2 := keypair.MustRandom().Address() - toID1 := int64(4096) - toID2 := int64(8192) - stateChangeOrder1 := int64(1) - stateChangeOrder2 := int64(1) - - // Insert test transactions first (hash is BYTEA, using valid 64-char hex strings) - testHash1 := types.HashBytea("0000000000000000000000000000000000000000000000000000000000000001") - testHash2 := types.HashBytea("0000000000000000000000000000000000000000000000000000000000000002") - _, err = m.DB.ExecContext(ctx, "INSERT INTO transactions (hash, to_id, envelope_xdr, fee_charged, result_code, meta_xdr, ledger_number, ledger_created_at) VALUES ($1, 4096, 'env1', 100, 'TransactionResultCodeTxSuccess', 'meta1', 1, NOW()), ($2, 8192, 'env2', 200, 'TransactionResultCodeTxSuccess', 'meta2', 2, NOW())", testHash1, testHash2) - require.NoError(t, err) - - // Insert test operations (IDs must be in TOID range for each transaction) - xdr1 := types.XDRBytea([]byte("xdr1")) - xdr2 := types.XDRBytea([]byte("xdr2")) - _, err = m.DB.ExecContext(ctx, "INSERT INTO operations (id, operation_type, operation_xdr, result_code, successful, ledger_number, ledger_created_at) VALUES (4097, 'PAYMENT', $1, 'op_success', true, 1, NOW()), (8193, 'PAYMENT', $2, 'op_success', true, 2, NOW())", xdr1, xdr2) - require.NoError(t, err) - - // Insert test state changes that reference the accounts (state_changes.account_id is TEXT) - _, err = m.DB.ExecContext(ctx, ` - INSERT INTO state_changes ( - to_id, state_change_order, state_change_category, ledger_created_at, - ledger_number, account_id, operation_id - ) VALUES - ($1, $2, 'BALANCE', NOW(), 1, $3, 4097), - ($4, $5, 'BALANCE', NOW(), 2, $6, 8193) - `, toID1, stateChangeOrder1, types.AddressBytea(address1), toID2, stateChangeOrder2, types.AddressBytea(address2)) - require.NoError(t, err) - - // Test BatchGetByStateChangeIDs function - scToIDs := []int64{toID1, toID2} - scOpIDs := []int64{4097, 8193} - scOrders := []int64{stateChangeOrder1, stateChangeOrder2} - accounts, err := m.BatchGetByStateChangeIDs(ctx, scToIDs, scOpIDs, scOrders, "") - require.NoError(t, err) - assert.Len(t, accounts, 2) - - // Verify accounts are returned with correct state_change_id (format: to_id-operation_id-state_change_order) - addressSet := make(map[string]string) - for _, acc := range accounts { - addressSet[string(acc.StellarAddress)] = acc.StateChangeID - } - assert.Equal(t, "4096-4097-1", addressSet[address1]) - assert.Equal(t, "8192-8193-1", addressSet[address2]) -} diff --git a/internal/data/operations.go b/internal/data/operations.go index ad4ddda3..4af74f5e 100644 --- a/internal/data/operations.go +++ b/internal/data/operations.go @@ -38,36 +38,44 @@ func (m *OperationModel) GetByID(ctx context.Context, id int64, columns string) return &operation, nil } -func (m *OperationModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *int64, sortOrder SortOrder) ([]*types.OperationWithCursor, error) { +func (m *OperationModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *types.CompositeCursor, sortOrder SortOrder) ([]*types.OperationWithCursor, error) { columns = prepareColumnsWithID(columns, types.Operation{}, "", "id") queryBuilder := strings.Builder{} - queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, id as cursor FROM operations`, columns)) + var args []interface{} + argIndex := 1 + queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM operations`, columns)) + + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(" WHERE id < %d", *cursor)) - } else { - queryBuilder.WriteString(fmt.Sprintf(" WHERE id > %d", *cursor)) - } + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "id", Value: cursor.ID}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" WHERE " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx } if sortOrder == DESC { - queryBuilder.WriteString(" ORDER BY id DESC") + queryBuilder.WriteString(" ORDER BY ledger_created_at DESC, id DESC") } else { - queryBuilder.WriteString(" ORDER BY id ASC") + queryBuilder.WriteString(" ORDER BY ledger_created_at ASC, id ASC") } if limit != nil { - queryBuilder.WriteString(fmt.Sprintf(" LIMIT %d", *limit)) + queryBuilder.WriteString(fmt.Sprintf(" LIMIT $%d", argIndex)) + args = append(args, *limit) } query := queryBuilder.String() if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query) } var operations []*types.OperationWithCursor start := time.Now() - err := m.DB.SelectContext(ctx, &operations, query) + err := m.DB.SelectContext(ctx, &operations, query, args...) duration := time.Since(start).Seconds() m.MetricsService.ObserveDBQueryDuration("GetAll", "operations", duration) if err != nil { @@ -131,7 +139,7 @@ func (m *OperationModel) BatchGetByToIDs(ctx context.Context, toIDs []int64, col JOIN inputs i ON o.id > i.to_id AND o.id < i.to_id + 4096 ) - SELECT %s, id as cursor FROM ranked_operations_per_to_id + SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM ranked_operations_per_to_id ` queryBuilder.WriteString(fmt.Sprintf(query, sortOrder, columns)) if limit != nil { @@ -139,7 +147,7 @@ func (m *OperationModel) BatchGetByToIDs(ctx context.Context, toIDs []int64, col } query = queryBuilder.String() if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query) } var operations []*types.OperationWithCursor @@ -162,7 +170,7 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns columns = prepareColumnsWithID(columns, types.Operation{}, "", "id") queryBuilder := strings.Builder{} // Operations for a tx_to_id are in range (tx_to_id, tx_to_id + 4096) based on TOID encoding. - queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, id as cursor FROM operations WHERE id > $1 AND id < $1 + 4096`, columns)) + queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", id as "cursor.cursor_id" FROM operations WHERE id > $1 AND id < $1 + 4096`, columns)) args := []interface{}{toID} argIndex := 2 @@ -190,7 +198,7 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns query := queryBuilder.String() if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY cursor ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query) } var operations []*types.OperationWithCursor @@ -207,21 +215,72 @@ func (m *OperationModel) BatchGetByToID(ctx context.Context, toID int64, columns } // BatchGetByAccountAddress gets the operations that are associated with a single account address. -func (m *OperationModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *int64, orderBy SortOrder) ([]*types.OperationWithCursor, error) { - columns = prepareColumnsWithID(columns, types.Operation{}, "operations", "id") +// Uses a MATERIALIZED CTE + LATERAL join pattern to allow TimescaleDB ChunkAppend optimization +// on the operations_accounts hypertable by ordering on ledger_created_at first. +func (m *OperationModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *types.CompositeCursor, orderBy SortOrder, timeRange *TimeRange) ([]*types.OperationWithCursor, error) { + columns = prepareColumnsWithID(columns, types.Operation{}, "o", "id") + + var queryBuilder strings.Builder + args := []interface{}{types.AddressBytea(accountAddress)} + argIndex := 2 + + // MATERIALIZED CTE scans operations_accounts with ledger_created_at leading the ORDER BY, + // enabling TimescaleDB ChunkAppend on the hypertable. + queryBuilder.WriteString(` + WITH account_ops AS MATERIALIZED ( + SELECT operation_id, ledger_created_at + FROM operations_accounts + WHERE account_id = $1`) + + // Time range filter: enables TimescaleDB chunk pruning at the earliest query stage + args, argIndex = appendTimeRangeConditions(&queryBuilder, "ledger_created_at", timeRange, args, argIndex) + + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. + if cursor != nil { + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "operation_id", Value: cursor.ID}, + }, orderBy, argIndex) + queryBuilder.WriteString("\n\t\t\tAND " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx + } + + if orderBy == DESC { + queryBuilder.WriteString(` + ORDER BY ledger_created_at DESC, operation_id DESC`) + } else { + queryBuilder.WriteString(` + ORDER BY ledger_created_at ASC, operation_id ASC`) + } + + if limit != nil { + queryBuilder.WriteString(fmt.Sprintf(` LIMIT $%d`, argIndex)) + args = append(args, *limit) + } - // Build paginated query using shared utility - query, args := buildGetByAccountAddressQuery(paginatedQueryConfig{ - TableName: "operations", - CursorColumn: "id", - JoinTable: "operations_accounts", - JoinCondition: "operations_accounts.operation_id = operations.id", - Columns: columns, - AccountAddress: accountAddress, - Limit: limit, - Cursor: cursor, - OrderBy: orderBy, - }) + // Close CTE and LATERAL join to fetch full operation rows + queryBuilder.WriteString(fmt.Sprintf(` + ) + SELECT %s, o.ledger_created_at as "cursor.cursor_ledger_created_at", o.id as "cursor.cursor_id" + FROM account_ops ao, + LATERAL (SELECT * FROM operations o WHERE o.id = ao.operation_id AND o.ledger_created_at = ao.ledger_created_at LIMIT 1) o`, columns)) + + if orderBy == DESC { + queryBuilder.WriteString(` + ORDER BY o.ledger_created_at DESC, o.id DESC`) + } else { + queryBuilder.WriteString(` + ORDER BY o.ledger_created_at ASC, o.id ASC`) + } + + query := queryBuilder.String() + + // For backward pagination, wrap query to reverse the final order + if orderBy == DESC { + query = fmt.Sprintf(`SELECT * FROM (%s) AS operations ORDER BY operations."cursor.cursor_ledger_created_at" ASC, operations."cursor.cursor_id" ASC`, query) + } var operations []*types.OperationWithCursor start := time.Now() diff --git a/internal/data/operations_test.go b/internal/data/operations_test.go index 4b3fbf1d..86b33af3 100644 --- a/internal/data/operations_test.go +++ b/internal/data/operations_test.go @@ -263,17 +263,17 @@ func TestOperationModel_GetAll(t *testing.T) { operations, err := m.GetAll(ctx, "", nil, nil, ASC) require.NoError(t, err) assert.Len(t, operations, 3) - assert.Equal(t, int64(2), operations[0].Cursor) - assert.Equal(t, int64(4098), operations[1].Cursor) - assert.Equal(t, int64(8194), operations[2].Cursor) + assert.Equal(t, int64(2), operations[0].Cursor.ID) + assert.Equal(t, int64(4098), operations[1].Cursor.ID) + assert.Equal(t, int64(8194), operations[2].Cursor.ID) // Test GetAll with smaller limit limit := int32(2) operations, err = m.GetAll(ctx, "", &limit, nil, ASC) require.NoError(t, err) assert.Len(t, operations, 2) - assert.Equal(t, int64(2), operations[0].Cursor) - assert.Equal(t, int64(4098), operations[1].Cursor) + assert.Equal(t, int64(2), operations[0].Cursor.ID) + assert.Equal(t, int64(4098), operations[1].Cursor.ID) } func TestOperationModel_BatchGetByToIDs(t *testing.T) { @@ -582,7 +582,7 @@ func TestOperationModel_BatchGetByAccountAddresses(t *testing.T) { require.NoError(t, err) // Test BatchGetByAccount - operations, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, "ASC") + operations, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, operations, 2) assert.Equal(t, int64(4097), operations[0].Operation.ID) diff --git a/internal/data/query_utils.go b/internal/data/query_utils.go index 35fbd70d..388dbc62 100644 --- a/internal/data/query_utils.go +++ b/internal/data/query_utils.go @@ -5,6 +5,7 @@ import ( "fmt" "reflect" "strings" + "time" set "github.com/deckarep/golang-set/v2" "github.com/jackc/pgx/v5/pgtype" @@ -12,6 +13,34 @@ import ( "github.com/stellar/wallet-backend/internal/indexer/types" ) +// TimeRange represents an optional time window for filtering queries by ledger_created_at. +// Both fields are optional: omit both for all data, use Since alone for "from this point", +// use Until alone for "up to this point", or both for a bounded window. +type TimeRange struct { + Since *time.Time + Until *time.Time +} + +// appendTimeRangeConditions appends ledger_created_at >= and/or <= conditions to the query builder. +// Returns the updated args slice and next arg index. The column parameter allows specifying +// a table-qualified column name (e.g., "ta.ledger_created_at" for CTEs). +func appendTimeRangeConditions(qb *strings.Builder, column string, timeRange *TimeRange, args []interface{}, argIndex int) ([]interface{}, int) { + if timeRange == nil { + return args, argIndex + } + if timeRange.Since != nil { + fmt.Fprintf(qb, " AND %s >= $%d", column, argIndex) + args = append(args, *timeRange.Since) + argIndex++ + } + if timeRange.Until != nil { + fmt.Fprintf(qb, " AND %s <= $%d", column, argIndex) + args = append(args, *timeRange.Until) + argIndex++ + } + return args, argIndex +} + type SortOrder string const ( @@ -19,24 +48,6 @@ const ( DESC SortOrder = "DESC" ) -// PaginatedQueryConfig contains configuration for building paginated queries -type paginatedQueryConfig struct { - // Base table configuration - TableName string // e.g., "operations" or "transactions" - CursorColumn string // e.g., "id" or "to_id" - - // Join configuration - JoinTable string // e.g., "operations_accounts" or "transactions_accounts" - JoinCondition string // e.g., "operations_accounts.operation_id = operations.id" - - // Query parameters - Columns string - AccountAddress string - Limit *int32 - Cursor *int64 - OrderBy SortOrder -} - // pgtypeTextFromNullString converts sql.NullString to pgtype.Text for efficient binary COPY. func pgtypeTextFromNullString(ns sql.NullString) pgtype.Text { return pgtype.Text{String: ns.String, Valid: ns.Valid} @@ -80,65 +91,59 @@ func pgtypeBytesFromNullAddressBytea(na types.NullAddressBytea) ([]byte, error) return val.([]byte), nil } -// BuildPaginatedQuery constructs a paginated SQL query with cursor-based pagination -func buildGetByAccountAddressQuery(config paginatedQueryConfig) (string, []any) { - var queryBuilder strings.Builder - var args []any - argIndex := 1 - - // Base query with join - queryBuilder.WriteString(fmt.Sprintf(` - SELECT %s, %s.%s as cursor - FROM %s - INNER JOIN %s - ON %s - WHERE %s.account_id = $%d`, - config.Columns, - config.TableName, - config.CursorColumn, - config.TableName, - config.JoinTable, - config.JoinCondition, - config.JoinTable, - argIndex)) - args = append(args, types.AddressBytea(config.AccountAddress)) - argIndex++ - - // Add cursor condition if provided - if config.Cursor != nil { - // When paginating in descending order, we are going from greater cursor id to smaller cursor id - if config.OrderBy == DESC { - queryBuilder.WriteString(fmt.Sprintf(` AND %s.%s < $%d`, config.TableName, config.CursorColumn, argIndex)) - } else { - queryBuilder.WriteString(fmt.Sprintf(` AND %s.%s > $%d`, config.TableName, config.CursorColumn, argIndex)) - } - args = append(args, *config.Cursor) - argIndex++ - } +// CursorColumn represents a column name and its cursor value for decomposed pagination. +type CursorColumn struct { + Name string + Value interface{} +} - // Add ordering - if config.OrderBy == DESC { - queryBuilder.WriteString(fmt.Sprintf(" ORDER BY %s.%s DESC", config.TableName, config.CursorColumn)) - } else { - queryBuilder.WriteString(fmt.Sprintf(" ORDER BY %s.%s ASC", config.TableName, config.CursorColumn)) - } +// buildDecomposedCursorCondition decomposes a ROW() tuple comparison into an equivalent +// OR clause that TimescaleDB's ColumnarScan can push into vectorized filters. +// +// For example, (a, b, c) < ($1, $2, $3) becomes: +// +// (a < $1 OR (a = $1 AND b < $2) OR (a = $1 AND b = $2 AND c < $3)) +// +// DESC uses "<", ASC uses ">". Returns the clause string, args slice, and next arg index. +func buildDecomposedCursorCondition(columns []CursorColumn, sortOrder SortOrder, startArgIndex int) (string, []interface{}, int) { + if len(columns) == 0 { + return "", nil, startArgIndex + } + + op := "<" + if sortOrder == ASC { + op = ">" + } + + argIdx := startArgIndex + var args []interface{} + var orParts []string + + for i := range columns { + var parts []string + // Add equality conditions for all preceding columns + for j := 0; j < i; j++ { + parts = append(parts, fmt.Sprintf("%s = $%d", columns[j].Name, argIdx)) + args = append(args, columns[j].Value) + argIdx++ + } + // Add the comparison condition for the current column + parts = append(parts, fmt.Sprintf("%s %s $%d", columns[i].Name, op, argIdx)) + args = append(args, columns[i].Value) + argIdx++ - // Add limit if provided - if config.Limit != nil { - queryBuilder.WriteString(fmt.Sprintf(` LIMIT $%d`, argIndex)) - args = append(args, *config.Limit) + orParts = append(orParts, strings.Join(parts, " AND ")) } - query := queryBuilder.String() - - // For backward pagination, wrap query to reverse the final order - // This ensures we always display the oldest items first in the output - if config.OrderBy == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS %s ORDER BY %s.cursor ASC`, - query, config.TableName, config.TableName) + // Wrap each OR branch in parens if it has multiple conditions + for i, part := range orParts { + if i > 0 { + orParts[i] = "(" + part + ")" + } } - return query, args + clause := "(" + strings.Join(orParts, " OR ") + ")" + return clause, args, argIdx } func getDBColumns(model any) set.Set[string] { diff --git a/internal/data/query_utils_test.go b/internal/data/query_utils_test.go new file mode 100644 index 00000000..4134ac6c --- /dev/null +++ b/internal/data/query_utils_test.go @@ -0,0 +1,207 @@ +// Tests for query utility functions: time range conditions and decomposed cursor conditions. +package data + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_buildDecomposedCursorCondition(t *testing.T) { + testTime := time.Date(2024, 1, 15, 10, 30, 0, 0, time.UTC) + + tests := []struct { + name string + columns []CursorColumn + sortOrder SortOrder + startArgIndex int + wantClause string + wantArgs []interface{} + wantNextIndex int + }{ + { + name: "empty columns returns empty", + columns: []CursorColumn{}, + sortOrder: DESC, + startArgIndex: 1, + wantClause: "", + wantArgs: nil, + wantNextIndex: 1, + }, + { + name: "single column DESC", + columns: []CursorColumn{ + {Name: "ledger_created_at", Value: testTime}, + }, + sortOrder: DESC, + startArgIndex: 1, + wantClause: "(ledger_created_at < $1)", + wantArgs: []interface{}{testTime}, + wantNextIndex: 2, + }, + { + name: "single column ASC", + columns: []CursorColumn{ + {Name: "id", Value: int64(42)}, + }, + sortOrder: ASC, + startArgIndex: 3, + wantClause: "(id > $3)", + wantArgs: []interface{}{int64(42)}, + wantNextIndex: 4, + }, + { + name: "two columns DESC", + columns: []CursorColumn{ + {Name: "ledger_created_at", Value: testTime}, + {Name: "to_id", Value: int64(100)}, + }, + sortOrder: DESC, + startArgIndex: 1, + wantClause: "(ledger_created_at < $1 OR (ledger_created_at = $2 AND to_id < $3))", + wantArgs: []interface{}{testTime, testTime, int64(100)}, + wantNextIndex: 4, + }, + { + name: "two columns ASC", + columns: []CursorColumn{ + {Name: "ledger_created_at", Value: testTime}, + {Name: "id", Value: int64(50)}, + }, + sortOrder: ASC, + startArgIndex: 2, + wantClause: "(ledger_created_at > $2 OR (ledger_created_at = $3 AND id > $4))", + wantArgs: []interface{}{testTime, testTime, int64(50)}, + wantNextIndex: 5, + }, + { + name: "three columns DESC", + columns: []CursorColumn{ + {Name: "to_id", Value: int64(10)}, + {Name: "operation_id", Value: int64(20)}, + {Name: "state_change_order", Value: int64(30)}, + }, + sortOrder: DESC, + startArgIndex: 2, + wantClause: "(to_id < $2 OR (to_id = $3 AND operation_id < $4) OR (to_id = $5 AND operation_id = $6 AND state_change_order < $7))", + wantArgs: []interface{}{int64(10), int64(10), int64(20), int64(10), int64(20), int64(30)}, + wantNextIndex: 8, + }, + { + name: "four columns DESC", + columns: []CursorColumn{ + {Name: "ledger_created_at", Value: testTime}, + {Name: "to_id", Value: int64(10)}, + {Name: "operation_id", Value: int64(20)}, + {Name: "state_change_order", Value: int64(30)}, + }, + sortOrder: DESC, + startArgIndex: 1, + wantClause: "(ledger_created_at < $1 OR (ledger_created_at = $2 AND to_id < $3) OR (ledger_created_at = $4 AND to_id = $5 AND operation_id < $6) OR (ledger_created_at = $7 AND to_id = $8 AND operation_id = $9 AND state_change_order < $10))", + wantArgs: []interface{}{testTime, testTime, int64(10), testTime, int64(10), int64(20), testTime, int64(10), int64(20), int64(30)}, + wantNextIndex: 11, + }, + { + name: "four columns ASC", + columns: []CursorColumn{ + {Name: "ledger_created_at", Value: testTime}, + {Name: "to_id", Value: int64(10)}, + {Name: "operation_id", Value: int64(20)}, + {Name: "state_change_order", Value: int64(30)}, + }, + sortOrder: ASC, + startArgIndex: 5, + wantClause: "(ledger_created_at > $5 OR (ledger_created_at = $6 AND to_id > $7) OR (ledger_created_at = $8 AND to_id = $9 AND operation_id > $10) OR (ledger_created_at = $11 AND to_id = $12 AND operation_id = $13 AND state_change_order > $14))", + wantArgs: []interface{}{testTime, testTime, int64(10), testTime, int64(10), int64(20), testTime, int64(10), int64(20), int64(30)}, + wantNextIndex: 15, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotClause, gotArgs, gotNextIndex := buildDecomposedCursorCondition(tt.columns, tt.sortOrder, tt.startArgIndex) + assert.Equal(t, tt.wantClause, gotClause) + assert.Equal(t, tt.wantArgs, gotArgs) + assert.Equal(t, tt.wantNextIndex, gotNextIndex) + }) + } +} + +func Test_appendTimeRangeConditions(t *testing.T) { + since := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + until := time.Date(2024, 6, 1, 0, 0, 0, 0, time.UTC) + + tests := []struct { + name string + timeRange *TimeRange + column string + startArgs []interface{} + startArgIndex int + wantSQL string + wantArgs []interface{} + wantNextIndex int + }{ + { + name: "nil time range appends nothing", + timeRange: nil, + column: "ledger_created_at", + startArgs: []interface{}{"existing"}, + startArgIndex: 2, + wantSQL: "", + wantArgs: []interface{}{"existing"}, + wantNextIndex: 2, + }, + { + name: "since only", + timeRange: &TimeRange{Since: &since}, + column: "ledger_created_at", + startArgs: []interface{}{"existing"}, + startArgIndex: 2, + wantSQL: " AND ledger_created_at >= $2", + wantArgs: []interface{}{"existing", since}, + wantNextIndex: 3, + }, + { + name: "until only", + timeRange: &TimeRange{Until: &until}, + column: "ledger_created_at", + startArgs: []interface{}{"existing"}, + startArgIndex: 2, + wantSQL: " AND ledger_created_at <= $2", + wantArgs: []interface{}{"existing", until}, + wantNextIndex: 3, + }, + { + name: "both since and until", + timeRange: &TimeRange{Since: &since, Until: &until}, + column: "ledger_created_at", + startArgs: []interface{}{"existing"}, + startArgIndex: 2, + wantSQL: " AND ledger_created_at >= $2 AND ledger_created_at <= $3", + wantArgs: []interface{}{"existing", since, until}, + wantNextIndex: 4, + }, + { + name: "table-qualified column name", + timeRange: &TimeRange{Since: &since}, + column: "ta.ledger_created_at", + startArgs: nil, + startArgIndex: 1, + wantSQL: " AND ta.ledger_created_at >= $1", + wantArgs: []interface{}{since}, + wantNextIndex: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var qb strings.Builder + gotArgs, gotNextIndex := appendTimeRangeConditions(&qb, tt.column, tt.timeRange, tt.startArgs, tt.startArgIndex) + assert.Equal(t, tt.wantSQL, qb.String()) + assert.Equal(t, tt.wantArgs, gotArgs) + assert.Equal(t, tt.wantNextIndex, gotNextIndex) + }) + } +} diff --git a/internal/data/statechanges.go b/internal/data/statechanges.go index cb5ab3d6..548d4562 100644 --- a/internal/data/statechanges.go +++ b/internal/data/statechanges.go @@ -23,18 +23,21 @@ type StateChangeModel struct { // BatchGetByAccountAddress gets the state changes that are associated with the given account address. // Optional filters: txHash, operationID, category, and reason can be used to further filter results. -func (m *StateChangeModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, txHash *string, operationID *int64, category *string, reason *string, columns string, limit *int32, cursor *types.StateChangeCursor, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") +func (m *StateChangeModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, txHash *string, operationID *int64, category *string, reason *string, columns string, limit *int32, cursor *types.StateChangeCursor, sortOrder SortOrder, timeRange *TimeRange) ([]*types.StateChangeWithCursor, error) { + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder args := []interface{}{types.AddressBytea(accountAddress)} argIndex := 2 queryBuilder.WriteString(fmt.Sprintf(` - SELECT %s, to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" + SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" FROM state_changes WHERE account_id = $1 `, columns)) + // Time range filter: enables TimescaleDB chunk pruning on the state_changes hypertable + args, argIndex = appendTimeRangeConditions(&queryBuilder, "ledger_created_at", timeRange, args, argIndex) + // Add transaction hash filter if provided (uses subquery to find to_id by hash) if txHash != nil { queryBuilder.WriteString(fmt.Sprintf(" AND to_id = (SELECT to_id FROM transactions WHERE hash = $%d)", argIndex)) @@ -63,29 +66,25 @@ func (m *StateChangeModel) BatchGetByAccountAddress(ctx context.Context, account argIndex++ } - // Add cursor-based pagination using 3-column comparison (to_id, operation_id, state_change_order) + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) < ($%d, $%d, $%d) - `, argIndex, argIndex+1, argIndex+2)) - args = append(args, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder) - argIndex += 3 - } else { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) > ($%d, $%d, $%d) - `, argIndex, argIndex+1, argIndex+2)) - args = append(args, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder) - argIndex += 3 - } - } - - // TODO: Extract the ordering code to separate function in utils and use everywhere - // Add ordering + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "to_id", Value: cursor.ToID}, + {Name: "operation_id", Value: cursor.OperationID}, + {Name: "state_change_order", Value: cursor.StateChangeOrder}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" AND " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx + } + + // Add ordering with ledger_created_at as leading column for TimescaleDB ChunkAppend if sortOrder == DESC { - queryBuilder.WriteString(" ORDER BY to_id DESC, operation_id DESC, state_change_order DESC") + queryBuilder.WriteString(" ORDER BY ledger_created_at DESC, to_id DESC, operation_id DESC, state_change_order DESC") } else { - queryBuilder.WriteString(" ORDER BY to_id ASC, operation_id ASC, state_change_order ASC") + queryBuilder.WriteString(" ORDER BY ledger_created_at ASC, to_id ASC, operation_id ASC, state_change_order ASC") } // Add limit using parameterized query @@ -97,10 +96,10 @@ func (m *StateChangeModel) BatchGetByAccountAddress(ctx context.Context, account query := queryBuilder.String() // For backward pagination, wrap query to reverse the final order. - // We use cursor alias columns (e.g., "cursor.cursor_to_id") in ORDER BY to avoid + // We use cursor alias columns (e.g., "cursor.cursor_ledger_created_at") in ORDER BY to avoid // ambiguity since the inner SELECT includes both original columns and cursor aliases. if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS statechanges ORDER BY statechanges."cursor.cursor_to_id" ASC, statechanges."cursor.cursor_operation_id" ASC, statechanges."cursor.cursor_state_change_order" ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS statechanges ORDER BY statechanges."cursor.cursor_ledger_created_at" ASC, statechanges."cursor.cursor_to_id" ASC, statechanges."cursor.cursor_operation_id" ASC, statechanges."cursor.cursor_state_change_order" ASC`, query) } var stateChanges []*types.StateChangeWithCursor @@ -117,47 +116,54 @@ func (m *StateChangeModel) BatchGetByAccountAddress(ctx context.Context, account } func (m *StateChangeModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *types.StateChangeCursor, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder + var args []interface{} + argIndex := 1 + queryBuilder.WriteString(fmt.Sprintf(` - SELECT %s, to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" + SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" FROM state_changes `, columns)) + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(` - WHERE (to_id, operation_id, state_change_order) < (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } else { - queryBuilder.WriteString(fmt.Sprintf(` - WHERE (to_id, operation_id, state_change_order) > (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } - } - + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "to_id", Value: cursor.ToID}, + {Name: "operation_id", Value: cursor.OperationID}, + {Name: "state_change_order", Value: cursor.StateChangeOrder}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" WHERE " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx + } + + // Order with ledger_created_at as leading column for TimescaleDB ChunkAppend if sortOrder == DESC { - queryBuilder.WriteString(" ORDER BY to_id DESC, operation_id DESC, state_change_order DESC") + queryBuilder.WriteString(" ORDER BY ledger_created_at DESC, to_id DESC, operation_id DESC, state_change_order DESC") } else { - queryBuilder.WriteString(" ORDER BY to_id ASC, operation_id ASC, state_change_order ASC") + queryBuilder.WriteString(" ORDER BY ledger_created_at ASC, to_id ASC, operation_id ASC, state_change_order ASC") } if limit != nil && *limit > 0 { - queryBuilder.WriteString(fmt.Sprintf(" LIMIT %d", *limit)) + queryBuilder.WriteString(fmt.Sprintf(" LIMIT $%d", argIndex)) + args = append(args, *limit) } query := queryBuilder.String() // For backward pagination, wrap query to reverse the final order. - // We use cursor alias columns (e.g., "cursor.cursor_to_id") in ORDER BY to avoid + // We use cursor alias columns (e.g., "cursor.cursor_ledger_created_at") in ORDER BY to avoid // ambiguity since the inner SELECT includes both original columns and cursor aliases. if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS statechanges ORDER BY statechanges."cursor.cursor_to_id" ASC, statechanges."cursor.cursor_operation_id" ASC, statechanges."cursor.cursor_state_change_order" ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS statechanges ORDER BY statechanges."cursor.cursor_ledger_created_at" ASC, statechanges."cursor.cursor_to_id" ASC, statechanges."cursor.cursor_operation_id" ASC, statechanges."cursor.cursor_state_change_order" ASC`, query) } var stateChanges []*types.StateChangeWithCursor start := time.Now() - err := m.DB.SelectContext(ctx, &stateChanges, query) + err := m.DB.SelectContext(ctx, &stateChanges, query, args...) duration := time.Since(start).Seconds() m.MetricsService.ObserveDBQueryDuration("GetAll", "state_changes", duration) if err != nil { @@ -287,7 +293,7 @@ func (m *StateChangeModel) BatchCopy( // BatchGetByToID gets state changes for a single transaction with pagination support. func (m *StateChangeModel) BatchGetByToID(ctx context.Context, toID int64, columns string, limit *int32, cursor *types.StateChangeCursor, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder queryBuilder.WriteString(fmt.Sprintf(` SELECT %s, to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" @@ -298,16 +304,17 @@ func (m *StateChangeModel) BatchGetByToID(ctx context.Context, toID int64, colum args := []interface{}{toID} argIndex := 2 + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) < (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } else { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) > (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "to_id", Value: cursor.ToID}, + {Name: "operation_id", Value: cursor.OperationID}, + {Name: "state_change_order", Value: cursor.StateChangeOrder}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" AND " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx } if sortOrder == DESC { @@ -345,7 +352,7 @@ func (m *StateChangeModel) BatchGetByToID(ctx context.Context, toID int64, colum // BatchGetByToIDs gets the state changes that are associated with the given to_ids. func (m *StateChangeModel) BatchGetByToIDs(ctx context.Context, toIDs []int64, columns string, limit *int32, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder // This CTE query implements per-transaction pagination to ensure balanced results. // Instead of applying a global LIMIT that could return all state changes from just a few @@ -397,7 +404,7 @@ func (m *StateChangeModel) BatchGetByToIDs(ctx context.Context, toIDs []int64, c // BatchGetByOperationID gets state changes for a single operation with pagination support. func (m *StateChangeModel) BatchGetByOperationID(ctx context.Context, operationID int64, columns string, limit *int32, cursor *types.StateChangeCursor, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder queryBuilder.WriteString(fmt.Sprintf(` SELECT %s, to_id as "cursor.cursor_to_id", operation_id as "cursor.cursor_operation_id", state_change_order as "cursor.cursor_state_change_order" @@ -408,16 +415,17 @@ func (m *StateChangeModel) BatchGetByOperationID(ctx context.Context, operationI args := []interface{}{operationID} argIndex := 2 + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) < (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } else { - queryBuilder.WriteString(fmt.Sprintf(` - AND (to_id, operation_id, state_change_order) > (%d, %d, %d) - `, cursor.ToID, cursor.OperationID, cursor.StateChangeOrder)) - } + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "to_id", Value: cursor.ToID}, + {Name: "operation_id", Value: cursor.OperationID}, + {Name: "state_change_order", Value: cursor.StateChangeOrder}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" AND " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx } if sortOrder == DESC { @@ -455,7 +463,7 @@ func (m *StateChangeModel) BatchGetByOperationID(ctx context.Context, operationI // BatchGetByOperationIDs gets the state changes that are associated with the given operation IDs. func (m *StateChangeModel) BatchGetByOperationIDs(ctx context.Context, operationIDs []int64, columns string, limit *int32, sortOrder SortOrder) ([]*types.StateChangeWithCursor, error) { - columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order") + columns = prepareColumnsWithID(columns, types.StateChange{}, "", "to_id", "operation_id", "state_change_order", "account_id") var queryBuilder strings.Builder // This CTE query implements per-operation pagination to ensure balanced results. // Instead of applying a global LIMIT that could return all state changes from just a few diff --git a/internal/data/statechanges_test.go b/internal/data/statechanges_test.go index 7710cdbc..383d0295 100644 --- a/internal/data/statechanges_test.go +++ b/internal/data/statechanges_test.go @@ -279,7 +279,7 @@ func TestStateChangeModel_BatchGetByAccountAddress(t *testing.T) { } // Test BatchGetByAccount for address1 - stateChanges, err := m.BatchGetByAccountAddress(ctx, address1, nil, nil, nil, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address1, nil, nil, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 2) for _, sc := range stateChanges { @@ -287,7 +287,7 @@ func TestStateChangeModel_BatchGetByAccountAddress(t *testing.T) { } // Test BatchGetByAccount for address2 - stateChanges, err = m.BatchGetByAccountAddress(ctx, address2, nil, nil, nil, nil, "", nil, nil, ASC) + stateChanges, err = m.BatchGetByAccountAddress(ctx, address2, nil, nil, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 1) for _, sc := range stateChanges { @@ -346,7 +346,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { } txHash := testHash1 - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) // tx1 has to_id=1, so we get state changes where to_id=1 (2 state changes now) assert.Len(t, stateChanges, 2) @@ -368,7 +368,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { } operationID := int64(123) - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, &operationID, nil, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, &operationID, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) // Only 1 state change has operation_id=123 (the first one with to_id=1) assert.Len(t, stateChanges, 1) @@ -391,7 +391,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { txHash := testHash1 operationID := int64(123) - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, &operationID, nil, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, &operationID, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) // Should get only state changes that match BOTH filters (to_id=1 from tx1 hash, operation_id=123) assert.Len(t, stateChanges, 1) @@ -414,7 +414,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { } category := "BALANCE" - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, &category, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, &category, nil, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 3) for _, sc := range stateChanges { @@ -435,7 +435,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { } reason := "ADD" - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, nil, &reason, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, nil, &reason, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 2) for _, sc := range stateChanges { @@ -457,7 +457,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { category := "SIGNER" reason := "ADD" - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, &category, &reason, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, nil, nil, &category, &reason, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 2) for _, sc := range stateChanges { @@ -482,7 +482,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { operationID := int64(123) category := "BALANCE" reason := "CREDIT" - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, &operationID, &category, &reason, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, &operationID, &category, &reason, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 1) for _, sc := range stateChanges { @@ -506,7 +506,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { } txHash := testHashNonExistent - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", nil, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Empty(t, stateChanges) }) @@ -524,7 +524,7 @@ func TestStateChangeModel_BatchGetByAccountAddress_WithFilters(t *testing.T) { txHash := testHash1 limit := int32(1) - stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", &limit, nil, ASC) + stateChanges, err := m.BatchGetByAccountAddress(ctx, address, &txHash, nil, nil, nil, "", &limit, nil, ASC, nil) require.NoError(t, err) assert.Len(t, stateChanges, 1) assert.Equal(t, int64(1), stateChanges[0].ToID) diff --git a/internal/data/transactions.go b/internal/data/transactions.go index 5209ffcf..0546e71c 100644 --- a/internal/data/transactions.go +++ b/internal/data/transactions.go @@ -39,37 +39,45 @@ func (m *TransactionModel) GetByHash(ctx context.Context, hash string, columns s return &transaction, nil } -func (m *TransactionModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *int64, sortOrder SortOrder) ([]*types.TransactionWithCursor, error) { +func (m *TransactionModel) GetAll(ctx context.Context, columns string, limit *int32, cursor *types.CompositeCursor, sortOrder SortOrder) ([]*types.TransactionWithCursor, error) { columns = prepareColumnsWithID(columns, types.Transaction{}, "", "to_id") queryBuilder := strings.Builder{} - queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, to_id as cursor FROM transactions`, columns)) + var args []interface{} + argIndex := 1 + queryBuilder.WriteString(fmt.Sprintf(`SELECT %s, ledger_created_at as "cursor.cursor_ledger_created_at", to_id as "cursor.cursor_id" FROM transactions`, columns)) + + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. if cursor != nil { - if sortOrder == DESC { - queryBuilder.WriteString(fmt.Sprintf(" WHERE to_id < %d", *cursor)) - } else { - queryBuilder.WriteString(fmt.Sprintf(" WHERE to_id > %d", *cursor)) - } + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "to_id", Value: cursor.ID}, + }, sortOrder, argIndex) + queryBuilder.WriteString(" WHERE " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx } if sortOrder == DESC { - queryBuilder.WriteString(" ORDER BY to_id DESC") + queryBuilder.WriteString(" ORDER BY ledger_created_at DESC, to_id DESC") } else { - queryBuilder.WriteString(" ORDER BY to_id ASC") + queryBuilder.WriteString(" ORDER BY ledger_created_at ASC, to_id ASC") } if limit != nil { - queryBuilder.WriteString(fmt.Sprintf(" LIMIT %d", *limit)) + queryBuilder.WriteString(fmt.Sprintf(" LIMIT $%d", argIndex)) + args = append(args, *limit) } query := queryBuilder.String() if sortOrder == DESC { - query = fmt.Sprintf(`SELECT * FROM (%s) AS transactions ORDER BY cursor ASC`, query) + query = fmt.Sprintf(`SELECT * FROM (%s) AS transactions ORDER BY transactions."cursor.cursor_ledger_created_at" ASC, transactions."cursor.cursor_id" ASC`, query) } var transactions []*types.TransactionWithCursor start := time.Now() - err := m.DB.SelectContext(ctx, &transactions, query) + err := m.DB.SelectContext(ctx, &transactions, query, args...) duration := time.Since(start).Seconds() m.MetricsService.ObserveDBQueryDuration("GetAll", "transactions", duration) if err != nil { @@ -81,21 +89,72 @@ func (m *TransactionModel) GetAll(ctx context.Context, columns string, limit *in } // BatchGetByAccountAddress gets the transactions that are associated with a single account address. -func (m *TransactionModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *int64, orderBy SortOrder) ([]*types.TransactionWithCursor, error) { - columns = prepareColumnsWithID(columns, types.Transaction{}, "transactions", "to_id") +// Uses a MATERIALIZED CTE + LATERAL join pattern to allow TimescaleDB ChunkAppend optimization +// on the transactions_accounts hypertable by ordering on ledger_created_at first. +func (m *TransactionModel) BatchGetByAccountAddress(ctx context.Context, accountAddress string, columns string, limit *int32, cursor *types.CompositeCursor, orderBy SortOrder, timeRange *TimeRange) ([]*types.TransactionWithCursor, error) { + columns = prepareColumnsWithID(columns, types.Transaction{}, "t", "to_id") + + var queryBuilder strings.Builder + args := []interface{}{types.AddressBytea(accountAddress)} + argIndex := 2 + + // MATERIALIZED CTE scans transactions_accounts with ledger_created_at leading the ORDER BY, + // enabling TimescaleDB ChunkAppend on the hypertable. + queryBuilder.WriteString(` + WITH account_txns AS MATERIALIZED ( + SELECT tx_to_id, ledger_created_at + FROM transactions_accounts + WHERE account_id = $1`) - // Build paginated query using shared utility - query, args := buildGetByAccountAddressQuery(paginatedQueryConfig{ - TableName: "transactions", - CursorColumn: "to_id", - JoinTable: "transactions_accounts", - JoinCondition: "transactions_accounts.tx_to_id = transactions.to_id", - Columns: columns, - AccountAddress: accountAddress, - Limit: limit, - Cursor: cursor, - OrderBy: orderBy, - }) + // Time range filter: enables TimescaleDB chunk pruning at the earliest query stage + args, argIndex = appendTimeRangeConditions(&queryBuilder, "ledger_created_at", timeRange, args, argIndex) + + // Decomposed cursor pagination: expands ROW() tuple comparison into OR clauses so + // TimescaleDB ColumnarScan can push filters into vectorized batch processing. + if cursor != nil { + clause, cursorArgs, nextIdx := buildDecomposedCursorCondition([]CursorColumn{ + {Name: "ledger_created_at", Value: cursor.LedgerCreatedAt}, + {Name: "tx_to_id", Value: cursor.ID}, + }, orderBy, argIndex) + queryBuilder.WriteString("\n\t\t\tAND " + clause) + args = append(args, cursorArgs...) + argIndex = nextIdx + } + + if orderBy == DESC { + queryBuilder.WriteString(` + ORDER BY ledger_created_at DESC, tx_to_id DESC`) + } else { + queryBuilder.WriteString(` + ORDER BY ledger_created_at ASC, tx_to_id ASC`) + } + + if limit != nil { + queryBuilder.WriteString(fmt.Sprintf(` LIMIT $%d`, argIndex)) + args = append(args, *limit) + } + + // Close CTE and LATERAL join to fetch full transaction rows + queryBuilder.WriteString(fmt.Sprintf(` + ) + SELECT %s, t.ledger_created_at as "cursor.cursor_ledger_created_at", t.to_id as "cursor.cursor_id" + FROM account_txns ta, + LATERAL (SELECT * FROM transactions t WHERE t.to_id = ta.tx_to_id AND t.ledger_created_at = ta.ledger_created_at LIMIT 1) t`, columns)) + + if orderBy == DESC { + queryBuilder.WriteString(` + ORDER BY t.ledger_created_at DESC, t.to_id DESC`) + } else { + queryBuilder.WriteString(` + ORDER BY t.ledger_created_at ASC, t.to_id ASC`) + } + + query := queryBuilder.String() + + // For backward pagination, wrap query to reverse the final order + if orderBy == DESC { + query = fmt.Sprintf(`SELECT * FROM (%s) AS transactions ORDER BY transactions."cursor.cursor_ledger_created_at" ASC, transactions."cursor.cursor_id" ASC`, query) + } var transactions []*types.TransactionWithCursor start := time.Now() diff --git a/internal/data/transactions_test.go b/internal/data/transactions_test.go index 14a12a0b..076d0572 100644 --- a/internal/data/transactions_test.go +++ b/internal/data/transactions_test.go @@ -291,17 +291,17 @@ func TestTransactionModel_GetAll(t *testing.T) { transactions, err := m.GetAll(ctx, "", nil, nil, ASC) require.NoError(t, err) assert.Len(t, transactions, 3) - assert.Equal(t, int64(1), transactions[0].Cursor) - assert.Equal(t, int64(2), transactions[1].Cursor) - assert.Equal(t, int64(3), transactions[2].Cursor) + assert.Equal(t, int64(1), transactions[0].Cursor.ID) + assert.Equal(t, int64(2), transactions[1].Cursor.ID) + assert.Equal(t, int64(3), transactions[2].Cursor.ID) // Test GetAll with smaller limit limit := int32(2) transactions, err = m.GetAll(ctx, "", &limit, nil, ASC) require.NoError(t, err) assert.Len(t, transactions, 2) - assert.Equal(t, int64(1), transactions[0].Cursor) - assert.Equal(t, int64(2), transactions[1].Cursor) + assert.Equal(t, int64(1), transactions[0].Cursor.ID) + assert.Equal(t, int64(2), transactions[1].Cursor.ID) } func TestTransactionModel_BatchGetByAccountAddress(t *testing.T) { @@ -351,12 +351,12 @@ func TestTransactionModel_BatchGetByAccountAddress(t *testing.T) { require.NoError(t, err) // Test BatchGetByAccount - transactions, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, "ASC") + transactions, err := m.BatchGetByAccountAddress(ctx, address1, "", nil, nil, ASC, nil) require.NoError(t, err) assert.Len(t, transactions, 2) - assert.Equal(t, int64(1), transactions[0].Cursor) - assert.Equal(t, int64(2), transactions[1].Cursor) + assert.Equal(t, int64(1), transactions[0].Cursor.ID) + assert.Equal(t, int64(2), transactions[1].Cursor.ID) } func TestTransactionModel_BatchGetByOperationIDs(t *testing.T) { diff --git a/internal/indexer/types/types.go b/internal/indexer/types/types.go index 07ec54c6..b7c3b294 100644 --- a/internal/indexer/types/types.go +++ b/internal/indexer/types/types.go @@ -312,9 +312,17 @@ type Transaction struct { InnerTransactionHash string `json:"innerTransactionHash,omitempty" db:"-"` } +// CompositeCursor encodes both ledger_created_at and an entity ID for TimescaleDB-friendly +// cursor-based pagination. Using ledger_created_at as the leading sort column allows +// TimescaleDB to use ChunkAppend optimization on hypertables. +type CompositeCursor struct { + LedgerCreatedAt time.Time `db:"cursor_ledger_created_at"` + ID int64 `db:"cursor_id"` +} + type TransactionWithCursor struct { Transaction - Cursor int64 `json:"cursor,omitempty" db:"cursor"` + Cursor CompositeCursor `json:"cursor,omitempty" db:"cursor"` } type TransactionWithStateChangeID struct { @@ -420,7 +428,7 @@ type Operation struct { type OperationWithCursor struct { Operation - Cursor int64 `json:"cursor,omitempty" db:"cursor"` + Cursor CompositeCursor `json:"cursor,omitempty" db:"cursor"` } type OperationWithStateChangeID struct { @@ -428,11 +436,6 @@ type OperationWithStateChangeID struct { StateChangeID string `db:"state_change_id"` } -type AccountWithStateChangeID struct { - Account - StateChangeID string `db:"state_change_id"` -} - type StateChangeCategory string const ( @@ -617,9 +620,10 @@ type StateChangeWithCursor struct { } type StateChangeCursor struct { - ToID int64 `db:"cursor_to_id"` - OperationID int64 `db:"cursor_operation_id"` - StateChangeOrder int64 `db:"cursor_state_change_order"` + LedgerCreatedAt time.Time `db:"cursor_ledger_created_at"` + ToID int64 `db:"cursor_to_id"` + OperationID int64 `db:"cursor_operation_id"` + StateChangeOrder int64 `db:"cursor_state_change_order"` } type StateChangeCursorGetter interface { diff --git a/internal/integrationtests/data_validation_test.go b/internal/integrationtests/data_validation_test.go index 2ebc2c9f..cd1089d5 100644 --- a/internal/integrationtests/data_validation_test.go +++ b/internal/integrationtests/data_validation_test.go @@ -71,7 +71,7 @@ func (suite *DataValidationTestSuite) fetchStateChangesInParallel( query := q // capture variable group.Submit(func() { sc, err := suite.testEnv.WBClient.GetAccountStateChanges( - ctx, query.account, query.txHash, nil, query.category, query.reason, first, nil, nil, nil) + ctx, query.account, query.txHash, nil, query.category, query.reason, nil, nil, first, nil, nil, nil) if err != nil { errMu.Lock() errs = append(errs, fmt.Errorf("%s: %w", query.name, err)) @@ -137,30 +137,27 @@ func validateStateChangeBase(suite *DataValidationTestSuite, sc types.StateChang } // validateBalanceChange validates a balance state change -func validateBalanceChange(suite *DataValidationTestSuite, bc *types.StandardBalanceChange, expectedTokenID, expectedAmount, expectedAccount string, expectedReason types.StateChangeReason) { +func validateBalanceChange(suite *DataValidationTestSuite, bc *types.StandardBalanceChange, expectedTokenID, expectedAmount string, expectedReason types.StateChangeReason) { suite.Require().NotNil(bc, "balance change should not be nil") suite.Require().Equal(types.StateChangeCategoryBalance, bc.GetType(), "should be BALANCE type") suite.Require().Equal(expectedReason, bc.GetReason(), "reason mismatch") suite.Require().Equal(expectedTokenID, bc.TokenID, "token ID mismatch") suite.Require().Equal(expectedAmount, bc.Amount, "amount mismatch") - suite.Require().Equal(expectedAccount, bc.GetAccountID(), "account ID mismatch") } // validateAccountChange validates an account state change -func validateAccountChange(suite *DataValidationTestSuite, ac *types.AccountChange, expectedAccount, expectedFunderAddress string, expectedReason types.StateChangeReason) { +func validateAccountChange(suite *DataValidationTestSuite, ac *types.AccountChange, expectedFunderAddress string, expectedReason types.StateChangeReason) { suite.Require().NotNil(ac, "account change should not be nil") suite.Require().Equal(types.StateChangeCategoryAccount, ac.GetType(), "should be ACCOUNT type") suite.Require().Equal(expectedReason, ac.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, ac.GetAccountID(), "account ID mismatch") suite.Require().Equal(expectedFunderAddress, *ac.FunderAddress, "funder address mismatch") } // validateSignerChange validates a signer state change -func validateSignerChange(suite *DataValidationTestSuite, sc *types.SignerChange, expectedAccount string, expectedSignerAddress string, expectedSignerWeights int32, expectedReason types.StateChangeReason) { +func validateSignerChange(suite *DataValidationTestSuite, sc *types.SignerChange, expectedSignerAddress string, expectedSignerWeights int32, expectedReason types.StateChangeReason) { suite.Require().NotNil(sc, "signer change should not be nil") suite.Require().Equal(types.StateChangeCategorySigner, sc.GetType(), "should be SIGNER type") suite.Require().Equal(expectedReason, sc.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, sc.GetAccountID(), "account ID mismatch") suite.Require().NotNil(sc.SignerAddress, "signer address should not be nil") suite.Require().Equal(expectedSignerAddress, *sc.SignerAddress, "signer address mismatch") @@ -175,12 +172,11 @@ func validateSignerChange(suite *DataValidationTestSuite, sc *types.SignerChange } // validateMetadataChange validates a metadata state change -func validateMetadataChange(suite *DataValidationTestSuite, mc *types.MetadataChange, expectedAccount string, expectedReason types.StateChangeReason, expectedKey, expectedInnerKey, expectedValue string) { +func validateMetadataChange(suite *DataValidationTestSuite, mc *types.MetadataChange, expectedReason types.StateChangeReason, expectedKey, expectedInnerKey, expectedValue string) { suite.Require().NotNil(mc, "metadata change should not be nil") suite.Require().Equal(types.StateChangeCategoryMetadata, mc.GetType(), "should be METADATA type") suite.Require().Equal(expectedReason, mc.GetReason(), "reason mismatch") suite.Require().NotEmpty(mc.KeyValue, "key value should not be empty") - suite.Require().Equal(expectedAccount, mc.GetAccountID(), "account ID mismatch") // Decode the key value var result map[string]map[string]string @@ -194,23 +190,21 @@ func validateMetadataChange(suite *DataValidationTestSuite, mc *types.MetadataCh } // validateReservesChange validates a reserves state change -func validateReservesSponsorshipChangeForSponsoredAccount(suite *DataValidationTestSuite, rc *types.ReservesChange, expectedAccount string, +func validateReservesSponsorshipChangeForSponsoredAccount(suite *DataValidationTestSuite, rc *types.ReservesChange, expectedReason types.StateChangeReason, expectedSponsorAddress string, ) { suite.Require().NotNil(rc, "reserves sponsorship change should not be nil") suite.Require().Equal(types.StateChangeCategoryReserves, rc.GetType(), "should be RESERVES type") suite.Require().Equal(expectedReason, rc.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, rc.GetAccountID(), "account ID mismatch") suite.Require().Equal(expectedSponsorAddress, *rc.SponsorAddress, "sponsor address mismatch") } -func validateReservesSponsorshipChangeForSponsoringAccount(suite *DataValidationTestSuite, rc *types.ReservesChange, expectedAccount string, +func validateReservesSponsorshipChangeForSponsoringAccount(suite *DataValidationTestSuite, rc *types.ReservesChange, expectedReason types.StateChangeReason, expectedSponsoredAddress string, ) { suite.Require().NotNil(rc, "reserves sponsorship change should not be nil") suite.Require().Equal(types.StateChangeCategoryReserves, rc.GetType(), "should be RESERVES type") suite.Require().Equal(expectedReason, rc.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, rc.GetAccountID(), "account ID mismatch") if expectedSponsoredAddress != "" { suite.Require().Equal(expectedSponsoredAddress, *rc.SponsoredAddress, "sponsored address mismatch") } @@ -238,11 +232,10 @@ func sumAmounts(suite *DataValidationTestSuite, sc *types.StateChangeConnection, } // validateTrustlineChangeDetailed validates a trustline state change with detailed checks -func validateTrustlineChange(suite *DataValidationTestSuite, tc *types.TrustlineChange, expectedAccount string, expectedTokenID string, expectedLiquidityPoolID string, expectedReason types.StateChangeReason) { +func validateTrustlineChange(suite *DataValidationTestSuite, tc *types.TrustlineChange, expectedTokenID string, expectedLiquidityPoolID string, expectedReason types.StateChangeReason) { suite.Require().NotNil(tc, "trustline change should not be nil") suite.Require().Equal(types.StateChangeCategoryTrustline, tc.GetType(), "should be TRUSTLINE type") suite.Require().Equal(expectedReason, tc.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, tc.GetAccountID(), "account ID mismatch") if expectedTokenID != "" { suite.Require().NotNil(tc.TokenID, "token ID should not be nil") suite.Require().Equal(expectedTokenID, *tc.TokenID, "token ID mismatch") @@ -258,13 +251,12 @@ func validateTrustlineChange(suite *DataValidationTestSuite, tc *types.Trustline } // validateBalanceAuthorizationChange validates a balance authorization state change -func validateBalanceAuthorizationChange(suite *DataValidationTestSuite, bac *types.BalanceAuthorizationChange, expectedAccount string, +func validateBalanceAuthorizationChange(suite *DataValidationTestSuite, bac *types.BalanceAuthorizationChange, expectedReason types.StateChangeReason, expectedFlags []string, expectedTokenID string, ) { suite.Require().NotNil(bac, "balance authorization change should not be nil") suite.Require().Equal(types.StateChangeCategoryBalanceAuthorization, bac.GetType(), "should be BALANCE_AUTHORIZATION type") suite.Require().Equal(expectedReason, bac.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, bac.GetAccountID(), "account ID mismatch") suite.Require().Equal(len(expectedFlags), len(bac.Flags), "flags count mismatch") for _, expectedFlag := range expectedFlags { suite.Require().Contains(bac.Flags, expectedFlag, "expected flag not found: %s", expectedFlag) @@ -275,11 +267,10 @@ func validateBalanceAuthorizationChange(suite *DataValidationTestSuite, bac *typ } // validateFlagsChange validates a flags state change -func validateFlagsChange(suite *DataValidationTestSuite, fc *types.FlagsChange, expectedAccount string, expectedReason types.StateChangeReason, expectedFlags []string) { +func validateFlagsChange(suite *DataValidationTestSuite, fc *types.FlagsChange, expectedReason types.StateChangeReason, expectedFlags []string) { suite.Require().NotNil(fc, "flags change should not be nil") suite.Require().Equal(types.StateChangeCategoryFlags, fc.GetType(), "should be FLAGS type") suite.Require().Equal(expectedReason, fc.GetReason(), "reason mismatch") - suite.Require().Equal(expectedAccount, fc.GetAccountID(), "account ID mismatch") suite.Require().Equal(len(expectedFlags), len(fc.Flags), "flags count mismatch") for _, expectedFlag := range expectedFlags { suite.Require().Contains(fc.Flags, expectedFlag, "expected flag not found: %s", expectedFlag) @@ -355,12 +346,12 @@ func (suite *DataValidationTestSuite) validatePaymentStateChanges(ctx context.Co // 1 DEBIT change for primary account suite.Require().Len(primaryStateChanges.Edges, 1, "should have exactly 1 state change for primary account") sc := primaryStateChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, sc, xlmContractAddress, "100000000", primaryAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, sc, xlmContractAddress, "100000000", types.StateChangeReasonDebit) // 1 CREDIT change for secondary account suite.Require().Len(secondaryStateChanges.Edges, 1, "should have exactly 1 state change for secondary account") sc = secondaryStateChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, sc, xlmContractAddress, "100000000", secondaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, sc, xlmContractAddress, "100000000", types.StateChangeReasonCredit) } func (suite *DataValidationTestSuite) TestSponsoredAccountCreationDataValidation() { @@ -461,37 +452,37 @@ func (suite *DataValidationTestSuite) validateSponsoredAccountCreationStateChang // 1 BALANCE/DEBIT change for primary account (sending starting balance) suite.Require().Len(primaryBalanceChanges.Edges, 1, "should have exactly 1 BALANCE/DEBIT balance change for primary account") balanceChange := primaryBalanceChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceChange, xlmContractAddress, "50000000", primaryAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, balanceChange, xlmContractAddress, "50000000", types.StateChangeReasonDebit) // 1 BALANCE/CREDIT change for sponsored account (receiving starting balance) suite.Require().Len(sponsoredBalanceChanges.Edges, 1, "should have exactly 1 BALANCE/CREDIT balance change for sponsored account") balanceChange = sponsoredBalanceChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceChange, xlmContractAddress, "50000000", sponsoredNewAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, balanceChange, xlmContractAddress, "50000000", types.StateChangeReasonCredit) // 1 ACCOUNT/CREATE account change for sponsored account suite.Require().Len(sponsoredAccountChanges.Edges, 1, "should have exactly 1 ACCOUNT/CREATE account change") accountChange := sponsoredAccountChanges.Edges[0].Node.(*types.AccountChange) - validateAccountChange(suite, accountChange, sponsoredNewAccount, primaryAccount, types.StateChangeReasonCreate) + validateAccountChange(suite, accountChange, primaryAccount, types.StateChangeReasonCreate) // 1 METADATA/DATA_ENTRY metadata change for primary account suite.Require().Len(primaryMetadataChanges.Edges, 1, "should have exactly 1 METADATA/DATA_ENTRY metadata change for primary account") metadataChange := primaryMetadataChanges.Edges[0].Node.(*types.MetadataChange) - validateMetadataChange(suite, metadataChange, primaryAccount, types.StateChangeReasonDataEntry, "foo", "new", "bar") + validateMetadataChange(suite, metadataChange, types.StateChangeReasonDataEntry, "foo", "new", "bar") // 1 RESERVES/SPONSOR change for sponsored account - sponsorship begin suite.Require().Len(sponsoredReservesChanges.Edges, 1, "should have exactly 1 RESERVES/SPONSOR reserves change for sponsored account") reserveChange := sponsoredReservesChanges.Edges[0].Node.(*types.ReservesChange) - validateReservesSponsorshipChangeForSponsoredAccount(suite, reserveChange, sponsoredNewAccount, types.StateChangeReasonSponsor, primaryAccount) + validateReservesSponsorshipChangeForSponsoredAccount(suite, reserveChange, types.StateChangeReasonSponsor, primaryAccount) // 1 RESERVES/SPONSOR change for sponsoring account - sponsorship begin suite.Require().Len(primaryReservesChanges.Edges, 1, "should have exactly 1 RESERVES/SPONSOR reserves change for sponsoring account") reserveChange = primaryReservesChanges.Edges[0].Node.(*types.ReservesChange) - validateReservesSponsorshipChangeForSponsoringAccount(suite, reserveChange, primaryAccount, types.StateChangeReasonSponsor, sponsoredNewAccount) + validateReservesSponsorshipChangeForSponsoringAccount(suite, reserveChange, types.StateChangeReasonSponsor, sponsoredNewAccount) // 1 SIGNER/ADD change for sponsored account with default signer weight = 1 suite.Require().Len(sponsoredSignerChanges.Edges, 1, "should have exactly 1 SIGNER/CREATE signer change for sponsored account") signerChange := sponsoredSignerChanges.Edges[0].Node.(*types.SignerChange) - validateSignerChange(suite, signerChange, sponsoredNewAccount, sponsoredNewAccount, 1, types.StateChangeReasonAdd) + validateSignerChange(suite, signerChange, sponsoredNewAccount, 1, types.StateChangeReasonAdd) } func (suite *DataValidationTestSuite) TestCustomAssetsOpsDataValidation() { @@ -629,10 +620,10 @@ func (suite *DataValidationTestSuite) validateCustomAssetsStateChanges(ctx conte tc := edge.Node.(*types.TrustlineChange) if tc.GetReason() == types.StateChangeReasonAdd { - validateTrustlineChange(suite, tc, secondaryAccount, test2ContractAddress, "", types.StateChangeReasonAdd) + validateTrustlineChange(suite, tc, test2ContractAddress, "", types.StateChangeReasonAdd) foundAdd = true } else if tc.GetReason() == types.StateChangeReasonRemove { - validateTrustlineChange(suite, tc, secondaryAccount, test2ContractAddress, "", types.StateChangeReasonRemove) + validateTrustlineChange(suite, tc, test2ContractAddress, "", types.StateChangeReasonRemove) foundRemove = true } } @@ -642,14 +633,13 @@ func (suite *DataValidationTestSuite) validateCustomAssetsStateChanges(ctx conte // 3b. BALANCE_AUTHORIZATION Changes: Secondary should have exactly 1 (SET with authorized flag) suite.Require().Len(authChanges.Edges, 1, "should have exactly 1 BALANCE_AUTHORIZATION/SET change") authChange := authChanges.Edges[0].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authChange, secondaryAccount, types.StateChangeReasonSet, []string{"authorized"}, test2ContractAddress) + validateBalanceAuthorizationChange(suite, authChange, types.StateChangeReasonSet, []string{"authorized"}, test2ContractAddress) // 4. SPECIFIC BALANCE CHANGE VALIDATIONS // 4a. Validate MINT changes have correct token ID and account for _, edge := range mintChanges.Edges { bc := edge.Node.(*types.StandardBalanceChange) suite.Require().Equal(test2ContractAddress, bc.TokenID, "MINT token should be TEST2") - suite.Require().Equal(primaryAccount, bc.GetAccountID(), "MINT account should be Primary") suite.Require().NotEmpty(bc.Amount, "MINT amount should not be empty") } @@ -657,7 +647,6 @@ func (suite *DataValidationTestSuite) validateCustomAssetsStateChanges(ctx conte for _, edge := range burnChanges.Edges { bc := edge.Node.(*types.StandardBalanceChange) suite.Require().Equal(test2ContractAddress, bc.TokenID, "BURN token should be TEST2") - suite.Require().Equal(primaryAccount, bc.GetAccountID(), "BURN account should be Primary") suite.Require().NotEmpty(bc.Amount, "BURN amount should not be empty") } @@ -666,7 +655,6 @@ func (suite *DataValidationTestSuite) validateCustomAssetsStateChanges(ctx conte for _, edge := range creditChanges.Edges { bc := edge.Node.(*types.StandardBalanceChange) suite.Require().True(tokenSet.Contains(bc.TokenID), "CREDIT token should be TEST2 or XLM") - suite.Require().Equal(secondaryAccount, bc.GetAccountID(), "CREDIT account should be Secondary") suite.Require().NotEmpty(bc.Amount, "CREDIT amount should not be empty") } @@ -674,7 +662,6 @@ func (suite *DataValidationTestSuite) validateCustomAssetsStateChanges(ctx conte for _, edge := range debitChanges.Edges { bc := edge.Node.(*types.StandardBalanceChange) suite.Require().True(tokenSet.Contains(bc.TokenID), "DEBIT token should be TEST2 or XLM") - suite.Require().Equal(secondaryAccount, bc.GetAccountID(), "DEBIT account should be Secondary") suite.Require().NotEmpty(bc.Amount, "DEBIT amount should not be empty") } } @@ -787,7 +774,7 @@ func (suite *DataValidationTestSuite) validateAuthRequiredIssuerSetupStateChange expectedFlags := []string{"auth_required", "auth_revocable", "auth_clawback_enabled"} flagsSetChange := flagsSetPrimary.Edges[0].Node.(*types.FlagsChange) validateStateChangeBase(suite, flagsSetChange, ledgerNumber) - validateFlagsChange(suite, flagsSetChange, primaryAccount, types.StateChangeReasonSet, expectedFlags) + validateFlagsChange(suite, flagsSetChange, types.StateChangeReasonSet, expectedFlags) } func (suite *DataValidationTestSuite) validateAuthRequiredAssetStateChanges(ctx context.Context, txHash string, ledgerNumber int64) { @@ -870,26 +857,26 @@ func (suite *DataValidationTestSuite) validateAuthRequiredAssetStateChanges(ctx // First SET change: clawback_enabled flag from trustline creation authSetSecondaryClawback := balanceAuthSetSecondary.Edges[0].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authSetSecondaryClawback, secondaryAccount, types.StateChangeReasonSet, []string{"clawback_enabled"}, test1ContractAddress) + validateBalanceAuthorizationChange(suite, authSetSecondaryClawback, types.StateChangeReasonSet, []string{"clawback_enabled"}, test1ContractAddress) // Second SET change: authorized flag from SetTrustLineFlags authSetSecondaryAuthorized := balanceAuthSetSecondary.Edges[1].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authSetSecondaryAuthorized, secondaryAccount, types.StateChangeReasonSet, []string{"authorized"}, test1ContractAddress) + validateBalanceAuthorizationChange(suite, authSetSecondaryAuthorized, types.StateChangeReasonSet, []string{"authorized"}, test1ContractAddress) // Secondary account: BALANCE_AUTHORIZATION/CLEAR with "authorized" flag suite.Require().Len(balanceAuthClearSecondary.Edges, 1, "should have exactly 1 BALANCE_AUTHORIZATION/CLEAR for secondary") authClearSecondary := balanceAuthClearSecondary.Edges[0].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authClearSecondary, secondaryAccount, types.StateChangeReasonClear, []string{"authorized"}, test1ContractAddress) + validateBalanceAuthorizationChange(suite, authClearSecondary, types.StateChangeReasonClear, []string{"authorized"}, test1ContractAddress) // 5. TRUSTLINE STATE CHANGES VALIDATION FOR SECONDARY ACCOUNT suite.Require().Len(trustlineAdd.Edges, 1, "should have exactly 1 TRUSTLINE/ADD") suite.Require().Len(trustlineRemove.Edges, 1, "should have exactly 1 TRUSTLINE/REMOVE") trustlineAddChange := trustlineAdd.Edges[0].Node.(*types.TrustlineChange) - validateTrustlineChange(suite, trustlineAddChange, secondaryAccount, test1ContractAddress, "", types.StateChangeReasonAdd) + validateTrustlineChange(suite, trustlineAddChange, test1ContractAddress, "", types.StateChangeReasonAdd) trustlineRemoveChange := trustlineRemove.Edges[0].Node.(*types.TrustlineChange) - validateTrustlineChange(suite, trustlineRemoveChange, secondaryAccount, test1ContractAddress, "", types.StateChangeReasonRemove) + validateTrustlineChange(suite, trustlineRemoveChange, test1ContractAddress, "", types.StateChangeReasonRemove) // 6. BALANCE STATE CHANGES VALIDATION // Validate counts @@ -900,19 +887,19 @@ func (suite *DataValidationTestSuite) validateAuthRequiredAssetStateChanges(ctx // Validate MINT mintChange := balanceMint.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, mintChange, test1ContractAddress, "10000000000", primaryAccount, types.StateChangeReasonMint) + validateBalanceChange(suite, mintChange, test1ContractAddress, "10000000000", types.StateChangeReasonMint) // Validate CREDIT creditChange := balanceCredit.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, creditChange, test1ContractAddress, "10000000000", secondaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, creditChange, test1ContractAddress, "10000000000", types.StateChangeReasonCredit) // Validate BURN burnChange := balanceBurn.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, burnChange, test1ContractAddress, "10000000000", primaryAccount, types.StateChangeReasonBurn) + validateBalanceChange(suite, burnChange, test1ContractAddress, "10000000000", types.StateChangeReasonBurn) // Validate DEBIT (from clawback) debitChange := balanceDebit.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, debitChange, test1ContractAddress, "10000000000", secondaryAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, debitChange, test1ContractAddress, "10000000000", types.StateChangeReasonDebit) // 7. CONSERVATION LAW VALIDATIONS totalMint := sumAmounts(suite, balanceMint, test1ContractAddress) @@ -1011,32 +998,31 @@ func (suite *DataValidationTestSuite) validateAccountMergeStateChanges(ctx conte accountChange := accountMergeChanges.Edges[0].Node.(*types.AccountChange) suite.Require().Equal(types.StateChangeCategoryAccount, accountChange.GetType(), "should be ACCOUNT type") suite.Require().Equal(types.StateChangeReasonMerge, accountChange.GetReason(), "reason should be MERGE") - suite.Require().Equal(primaryAccount, accountChange.GetAccountID(), "account ID should be the destination account (receiving the merge)") // Validate BALANCE/CREDIT change suite.Require().Len(balanceCreditChanges.Edges, 1, "should have exactly 1 BALANCE/CREDIT change") balanceCreditChange := balanceCreditChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceCreditChange, xlmContractAddress, "50000000", primaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, balanceCreditChange, xlmContractAddress, "50000000", types.StateChangeReasonCredit) // 5. RESERVES/UNSPONSOR STATE CHANGES VALIDATION FOR SPONSORED ACCOUNT suite.Require().Len(sponsoredReservesUnsponsorChanges.Edges, 1, "should have exactly 1 RESERVES/UNSPONSOR for sponsored account") sponsoredReservesChange := sponsoredReservesUnsponsorChanges.Edges[0].Node.(*types.ReservesChange) - validateReservesSponsorshipChangeForSponsoredAccount(suite, sponsoredReservesChange, sponsoredNewAccount, types.StateChangeReasonUnsponsor, primaryAccount) + validateReservesSponsorshipChangeForSponsoredAccount(suite, sponsoredReservesChange, types.StateChangeReasonUnsponsor, primaryAccount) // Validate BALANCE/DEBIT change suite.Require().Len(balanceDebitChanges.Edges, 1, "should have exactly 1 BALANCE/DEBIT change") balanceDebitChange := balanceDebitChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceDebitChange, xlmContractAddress, "50000000", sponsoredNewAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, balanceDebitChange, xlmContractAddress, "50000000", types.StateChangeReasonDebit) // Validate RESERVES/UNSPONSOR for sponsored account suite.Require().Len(sponsoredReservesUnsponsorChanges.Edges, 1, "should have exactly 1 RESERVES/UNSPONSOR for sponsored account") sponsoredReservesChange = sponsoredReservesUnsponsorChanges.Edges[0].Node.(*types.ReservesChange) - validateReservesSponsorshipChangeForSponsoredAccount(suite, sponsoredReservesChange, sponsoredNewAccount, types.StateChangeReasonUnsponsor, primaryAccount) + validateReservesSponsorshipChangeForSponsoredAccount(suite, sponsoredReservesChange, types.StateChangeReasonUnsponsor, primaryAccount) // Validate RESERVES/UNSPONSOR for sponsor account suite.Require().Len(sponsorReservesUnsponsorChanges.Edges, 1, "should have exactly 1 RESERVES/UNSPONSOR for sponsor account") sponsorReservesChange := sponsorReservesUnsponsorChanges.Edges[0].Node.(*types.ReservesChange) - validateReservesSponsorshipChangeForSponsoringAccount(suite, sponsorReservesChange, primaryAccount, types.StateChangeReasonUnsponsor, sponsoredNewAccount) + validateReservesSponsorshipChangeForSponsoringAccount(suite, sponsorReservesChange, types.StateChangeReasonUnsponsor, sponsoredNewAccount) } func (suite *DataValidationTestSuite) TestInvokeContractOpsDataValidation() { @@ -1118,12 +1104,12 @@ func (suite *DataValidationTestSuite) validateInvokeContractStateChanges(ctx con // Validate BALANCE/CREDIT change suite.Require().Len(balanceCreditChanges.Edges, 1, "should have exactly 1 BALANCE/CREDIT change") balanceCreditChange := balanceCreditChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceCreditChange, xlmContractAddress, "100000000", primaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, balanceCreditChange, xlmContractAddress, "100000000", types.StateChangeReasonCredit) // Validate BALANCE/DEBIT change suite.Require().Len(balanceDebitChanges.Edges, 1, "should have exactly 1 BALANCE/DEBIT change") balanceDebitChange := balanceDebitChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceDebitChange, xlmContractAddress, "100000000", primaryAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, balanceDebitChange, xlmContractAddress, "100000000", types.StateChangeReasonDebit) } func (suite *DataValidationTestSuite) TestCreateClaimableBalanceOpsDataValidation() { @@ -1191,16 +1177,6 @@ func (suite *DataValidationTestSuite) validateCreateClaimableBalanceStateChanges suite.Require().NoError(err, "failed to marshal state change") fmt.Printf("%s\n", string(jsonBytes)) validateStateChangeBase(suite, edge.Node, ledgerNumber) - - // Validate that no state changes have claimable balance IDs as accounts - accountID := edge.Node.GetAccountID() - suite.Require().NotEmpty(accountID, "account ID should not be empty") - - // Decode the account ID to check its version byte - versionByte, _, err := strkey.DecodeAny(accountID) - suite.Require().NoError(err, "account ID should be a valid strkey: %s", accountID) - suite.Require().NotEqual(strkey.VersionByteClaimableBalance, versionByte, - "state change should not have claimable balance ID as account: %s", accountID) } fmt.Printf("primary account: %s\n", primaryAccount) fmt.Printf("secondary account: %s\n", secondaryAccount) @@ -1230,7 +1206,7 @@ func (suite *DataValidationTestSuite) validateCreateClaimableBalanceStateChanges // 3. TRUSTLINE STATE CHANGES VALIDATION FOR SECONDARY ACCOUNT suite.Require().Len(trustlineAdd.Edges, 1, "should have exactly 1 TRUSTLINE/ADD") trustlineAddChange := trustlineAdd.Edges[0].Node.(*types.TrustlineChange) - validateTrustlineChange(suite, trustlineAddChange, secondaryAccount, test3ContractAddress, "", types.StateChangeReasonAdd) + validateTrustlineChange(suite, trustlineAddChange, test3ContractAddress, "", types.StateChangeReasonAdd) // 4. BALANCE_AUTHORIZATION STATE CHANGES VALIDATION // Secondary account should have 2 BALANCE_AUTHORIZATION/SET changes: @@ -1238,28 +1214,28 @@ func (suite *DataValidationTestSuite) validateCreateClaimableBalanceStateChanges // - One with authorized flag (from SetTrustLineFlags operation) suite.Require().Len(balanceAuthSet.Edges, 2, "should have exactly 2 BALANCE_AUTHORIZATION/SET for secondary") authSetSecondary := balanceAuthSet.Edges[0].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authSetSecondary, secondaryAccount, types.StateChangeReasonSet, []string{"clawback_enabled"}, test3ContractAddress) + validateBalanceAuthorizationChange(suite, authSetSecondary, types.StateChangeReasonSet, []string{"clawback_enabled"}, test3ContractAddress) // Second SET change: authorized flag from SetTrustLineFlags authSetSecondaryAuthorized := balanceAuthSet.Edges[1].Node.(*types.BalanceAuthorizationChange) - validateBalanceAuthorizationChange(suite, authSetSecondaryAuthorized, secondaryAccount, types.StateChangeReasonSet, []string{"authorized"}, test3ContractAddress) + validateBalanceAuthorizationChange(suite, authSetSecondaryAuthorized, types.StateChangeReasonSet, []string{"authorized"}, test3ContractAddress) // 5. BALANCE STATE CHANGES VALIDATION - 2 claimable balances are created suite.Require().Len(balanceMint.Edges, 2, "should have exactly 2 BALANCE/MINT") for _, edge := range balanceMint.Edges { mintChange := edge.Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, mintChange, test3ContractAddress, "10000000", primaryAccount, types.StateChangeReasonMint) + validateBalanceChange(suite, mintChange, test3ContractAddress, "10000000", types.StateChangeReasonMint) } // 6. 2 RESERVES/SPONSOR STATE CHANGES VALIDATION FOR SPONSORING ACCOUNT for 2 claimable balances suite.Require().Len(reservesSponsorForSponsor.Edges, 2, "should have exactly 2 RESERVES/SPONSOR for sponsor") change := reservesSponsorForSponsor.Edges[0].Node.(*types.ReservesChange) suite.Require().Equal(suite.testEnv.ClaimBalanceID, *change.ClaimableBalanceID, "claimable balance ID does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, change, primaryAccount, types.StateChangeReasonSponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, change, types.StateChangeReasonSponsor, "") change = reservesSponsorForSponsor.Edges[1].Node.(*types.ReservesChange) suite.Require().Equal(suite.testEnv.ClawbackBalanceID, *change.ClaimableBalanceID, "claimable balance ID for clawback does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, change, primaryAccount, types.StateChangeReasonSponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, change, types.StateChangeReasonSponsor, "") } func (suite *DataValidationTestSuite) TestClaimClaimableBalanceDataValidation() { @@ -1336,13 +1312,13 @@ func (suite *DataValidationTestSuite) validateClaimClaimableBalanceStateChanges( // 3. VALIDATE BALANCE/CREDIT CHANGE suite.Require().Len(balanceCreditChanges.Edges, 1, "should have exactly 1 BALANCE/CREDIT change") balanceCreditChange := balanceCreditChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceCreditChange, test3ContractAddress, "10000000", secondaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, balanceCreditChange, test3ContractAddress, "10000000", types.StateChangeReasonCredit) // 4. RESERVES/UNSPONSOR STATE CHANGES VALIDATION FOR SPONSORING ACCOUNT suite.Require().Len(reservesUnsponsorForSponsor.Edges, 1, "should have exactly 1 RESERVES/UNSPONSOR for sponsor") reservesUnsponsorForSponsorChange := reservesUnsponsorForSponsor.Edges[0].Node.(*types.ReservesChange) suite.Require().Equal(suite.testEnv.ClaimBalanceID, *reservesUnsponsorForSponsorChange.ClaimableBalanceID, "claimable balance ID does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, reservesUnsponsorForSponsorChange, primaryAccount, types.StateChangeReasonUnsponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, reservesUnsponsorForSponsorChange, types.StateChangeReasonUnsponsor, "") } func (suite *DataValidationTestSuite) TestClawbackClaimableBalanceDataValidation() { @@ -1418,13 +1394,13 @@ func (suite *DataValidationTestSuite) validateClawbackClaimableBalanceStateChang // 3. VALIDATE BALANCE/BURN CHANGE suite.Require().Len(balanceBurnChanges.Edges, 1, "should have exactly 1 BALANCE/BURN change") balanceBurnChange := balanceBurnChanges.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, balanceBurnChange, test3ContractAddress, "10000000", primaryAccount, types.StateChangeReasonBurn) + validateBalanceChange(suite, balanceBurnChange, test3ContractAddress, "10000000", types.StateChangeReasonBurn) // 4. RESERVES/UNSPONSOR STATE CHANGES VALIDATION FOR SPONSORING ACCOUNT FOR CLAWBACK BALANCE suite.Require().Len(reservesUnsponsorForSponsor.Edges, 1, "should have exactly 1 RESERVES/UNSPONSOR for sponsor") reservesUnsponsorForSponsorChange := reservesUnsponsorForSponsor.Edges[0].Node.(*types.ReservesChange) suite.Require().Equal(suite.testEnv.ClawbackBalanceID, *reservesUnsponsorForSponsorChange.ClaimableBalanceID, "claimable balance ID for clawback does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, reservesUnsponsorForSponsorChange, primaryAccount, types.StateChangeReasonUnsponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, reservesUnsponsorForSponsorChange, types.StateChangeReasonUnsponsor, "") } func (suite *DataValidationTestSuite) TestClearAuthFlagsOpsDataValidation() { @@ -1498,7 +1474,7 @@ func (suite *DataValidationTestSuite) validateClearAuthFlagsStateChanges(ctx con suite.Require().Len(flagsClearPrimary.Edges, 1, "should have exactly 1 FLAGS/CLEAR change for primary") expectedFlags := []string{"auth_required", "auth_revocable", "auth_clawback_enabled"} flagsClearChange := flagsClearPrimary.Edges[0].Node.(*types.FlagsChange) - validateFlagsChange(suite, flagsClearChange, primaryAccount, types.StateChangeReasonClear, expectedFlags) + validateFlagsChange(suite, flagsClearChange, types.StateChangeReasonClear, expectedFlags) } func (suite *DataValidationTestSuite) TestLiquidityPoolOpsDataValidation() { @@ -1609,38 +1585,38 @@ func (suite *DataValidationTestSuite) validateLiquidityPoolStateChanges(ctx cont suite.Require().Len(balanceAuthSet.Edges, 1, "should have exactly 1 BALANCE_AUTHORIZATION/SET for liquidity pool") balanceAuth := balanceAuthSet.Edges[0].Node.(*types.BalanceAuthorizationChange) suite.Require().Equal(suite.testEnv.LiquidityPoolID, *balanceAuth.LiquidityPoolID, "balance auth change liquidity pool ID does not match") - validateBalanceAuthorizationChange(suite, balanceAuth, primaryAccount, types.StateChangeReasonSet, []string{}, "") + validateBalanceAuthorizationChange(suite, balanceAuth, types.StateChangeReasonSet, []string{}, "") // 4. TRUSTLINE VALIDATION // LP trustlines should have null tokenId and pool ID in liquidityPoolId suite.Require().Len(trustlineAdd.Edges, 1, "should have exactly 1 TRUSTLINE/ADD for liquidity pool") trustlineAddChange := trustlineAdd.Edges[0].Node.(*types.TrustlineChange) - validateTrustlineChange(suite, trustlineAddChange, primaryAccount, "", suite.testEnv.LiquidityPoolID, types.StateChangeReasonAdd) + validateTrustlineChange(suite, trustlineAddChange, "", suite.testEnv.LiquidityPoolID, types.StateChangeReasonAdd) suite.Require().Len(trustlineRemove.Edges, 1, "should have exactly 1 TRUSTLINE/REMOVE for liquidity pool") trustlineRemoveChange := trustlineRemove.Edges[0].Node.(*types.TrustlineChange) - validateTrustlineChange(suite, trustlineRemoveChange, primaryAccount, "", suite.testEnv.LiquidityPoolID, types.StateChangeReasonRemove) + validateTrustlineChange(suite, trustlineRemoveChange, "", suite.testEnv.LiquidityPoolID, types.StateChangeReasonRemove) // 5. BALANCE CHANGES VALIDATION // DEBIT: XLM deposited into pool (amount = 1000000000) suite.Require().Len(balanceDebit.Edges, 1, "should have exactly 1 BALANCE/DEBIT") debitChange := balanceDebit.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, debitChange, xlmContractAddress, "1000000000", primaryAccount, types.StateChangeReasonDebit) + validateBalanceChange(suite, debitChange, xlmContractAddress, "1000000000", types.StateChangeReasonDebit) // CREDIT: XLM withdrawn from pool (amount = 1000000000) suite.Require().Len(balanceCredit.Edges, 1, "should have exactly 1 BALANCE/CREDIT") creditChange := balanceCredit.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, creditChange, xlmContractAddress, "1000000000", primaryAccount, types.StateChangeReasonCredit) + validateBalanceChange(suite, creditChange, xlmContractAddress, "1000000000", types.StateChangeReasonCredit) // MINT: TEST2 minted to LP (amount = 1000000000) suite.Require().Len(balanceMint.Edges, 1, "should have exactly 1 BALANCE/MINT") mintChange := balanceMint.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, mintChange, test2ContractAddress, "1000000000", primaryAccount, types.StateChangeReasonMint) + validateBalanceChange(suite, mintChange, test2ContractAddress, "1000000000", types.StateChangeReasonMint) // BURN: TEST2 burned from LP back to issuer (amount = 1000000000) suite.Require().Len(balanceBurn.Edges, 1, "should have exactly 1 BALANCE/BURN") burnChange := balanceBurn.Edges[0].Node.(*types.StandardBalanceChange) - validateBalanceChange(suite, burnChange, test2ContractAddress, "1000000000", primaryAccount, types.StateChangeReasonBurn) + validateBalanceChange(suite, burnChange, test2ContractAddress, "1000000000", types.StateChangeReasonBurn) } func (suite *DataValidationTestSuite) TestRevokeSponsorshipOpsDataValidation() { @@ -1730,15 +1706,15 @@ func (suite *DataValidationTestSuite) validateRevokeSponsorshipStateChanges(ctx // Validate sponsorship revocation mc := metadataDataEntry.Edges[0].Node.(*types.MetadataChange) - validateMetadataChange(suite, mc, secondaryAccount, types.StateChangeReasonDataEntry, "sponsored_data", "new", "test_value") + validateMetadataChange(suite, mc, types.StateChangeReasonDataEntry, "sponsored_data", "new", "test_value") mc = metadataDataEntry.Edges[1].Node.(*types.MetadataChange) - validateMetadataChange(suite, mc, secondaryAccount, types.StateChangeReasonDataEntry, "sponsored_data", "old", "test_value") + validateMetadataChange(suite, mc, types.StateChangeReasonDataEntry, "sponsored_data", "old", "test_value") // 5. RESERVES STATE CHANGES VALIDATION rc := primaryReservesSponsor.Edges[0].Node.(*types.ReservesChange) suite.Require().Equal("sponsored_data", *rc.SponsoredData, "sponsored data value does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, rc, primaryAccount, types.StateChangeReasonSponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, rc, types.StateChangeReasonSponsor, "") rc = primaryReservesUnsponsor.Edges[0].Node.(*types.ReservesChange) suite.Require().Equal("sponsored_data", *rc.SponsoredData, "sponsored data value does not match") - validateReservesSponsorshipChangeForSponsoringAccount(suite, rc, primaryAccount, types.StateChangeReasonUnsponsor, "") + validateReservesSponsorshipChangeForSponsoringAccount(suite, rc, types.StateChangeReasonUnsponsor, "") } diff --git a/internal/serve/graphql/dataloaders/account_loaders.go b/internal/serve/graphql/dataloaders/account_loaders.go index ed2c1a29..5116c12d 100644 --- a/internal/serve/graphql/dataloaders/account_loaders.go +++ b/internal/serve/graphql/dataloaders/account_loaders.go @@ -2,7 +2,6 @@ package dataloaders import ( "context" - "fmt" "github.com/vikstrous/dataloadgen" @@ -11,10 +10,9 @@ import ( ) type AccountColumnsKey struct { - ToID int64 - OperationID int64 - StateChangeID string - Columns string + ToID int64 + OperationID int64 + Columns string } // accountsByToIDLoader creates a dataloader for fetching accounts by transaction ToID @@ -66,32 +64,3 @@ func accountsByOperationIDLoader(models *data.Models) *dataloadgen.Loader[Accoun }, ) } - -// accountByStateChangeIDLoader creates a dataloader for fetching accounts by state change ID -// This prevents N+1 queries when multiple state changes request their accounts -// The loader batches multiple state change IDs into a single database query -func accountByStateChangeIDLoader(models *data.Models) *dataloadgen.Loader[AccountColumnsKey, *types.Account] { - return newOneToOneLoader( - func(ctx context.Context, keys []AccountColumnsKey) ([]*types.AccountWithStateChangeID, error) { - columns := keys[0].Columns - scIDs := make([]string, len(keys)) - for i, key := range keys { - scIDs[i] = key.StateChangeID - } - scToIDs, scOpIDs, scOrders, err := parseStateChangeIDs(scIDs) - if err != nil { - return nil, fmt.Errorf("parsing state change IDs: %w", err) - } - return models.Account.BatchGetByStateChangeIDs(ctx, scToIDs, scOpIDs, scOrders, columns) - }, - func(item *types.AccountWithStateChangeID) string { - return item.StateChangeID - }, - func(key AccountColumnsKey) string { - return key.StateChangeID - }, - func(item *types.AccountWithStateChangeID) types.Account { - return item.Account - }, - ) -} diff --git a/internal/serve/graphql/dataloaders/loaders.go b/internal/serve/graphql/dataloaders/loaders.go index f144c2b7..e7b6c689 100644 --- a/internal/serve/graphql/dataloaders/loaders.go +++ b/internal/serve/graphql/dataloaders/loaders.go @@ -48,10 +48,6 @@ type Dataloaders struct { // TransactionByStateChangeIDLoader batches requests for transactions by state change ID // Used by StateChange.transaction field resolver to prevent N+1 queries TransactionByStateChangeIDLoader *dataloadgen.Loader[TransactionColumnsKey, *types.Transaction] - - // AccountByStateChangeIDLoader batches requests for accounts by state change ID - // Used by StateChange.account field resolver to prevent N+1 queries - AccountByStateChangeIDLoader *dataloadgen.Loader[AccountColumnsKey, *types.Account] } // NewDataloaders creates a new instance of all dataloaders @@ -68,7 +64,6 @@ func NewDataloaders(models *data.Models) *Dataloaders { StateChangesByOperationIDLoader: stateChangesByOperationIDLoader(models), AccountsByToIDLoader: accountsByToIDLoader(models), AccountsByOperationIDLoader: accountsByOperationIDLoader(models), - AccountByStateChangeIDLoader: accountByStateChangeIDLoader(models), } } diff --git a/internal/serve/graphql/generated/generated.go b/internal/serve/graphql/generated/generated.go index a61ca301..9fd4244a 100644 --- a/internal/serve/graphql/generated/generated.go +++ b/internal/serve/graphql/generated/generated.go @@ -63,9 +63,9 @@ type DirectiveRoot struct { type ComplexityRoot struct { Account struct { Address func(childComplexity int) int - Operations func(childComplexity int, first *int32, after *string, last *int32, before *string) int - StateChanges func(childComplexity int, filter *AccountStateChangeFilterInput, first *int32, after *string, last *int32, before *string) int - Transactions func(childComplexity int, first *int32, after *string, last *int32, before *string) int + Operations func(childComplexity int, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) int + StateChanges func(childComplexity int, filter *AccountStateChangeFilterInput, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) int + Transactions func(childComplexity int, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) int } AccountBalances struct { @@ -334,9 +334,9 @@ type ComplexityRoot struct { type AccountResolver interface { Address(ctx context.Context, obj *types.Account) (string, error) - Transactions(ctx context.Context, obj *types.Account, first *int32, after *string, last *int32, before *string) (*TransactionConnection, error) - Operations(ctx context.Context, obj *types.Account, first *int32, after *string, last *int32, before *string) (*OperationConnection, error) - StateChanges(ctx context.Context, obj *types.Account, filter *AccountStateChangeFilterInput, first *int32, after *string, last *int32, before *string) (*StateChangeConnection, error) + Transactions(ctx context.Context, obj *types.Account, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*TransactionConnection, error) + Operations(ctx context.Context, obj *types.Account, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*OperationConnection, error) + StateChanges(ctx context.Context, obj *types.Account, filter *AccountStateChangeFilterInput, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*StateChangeConnection, error) } type AccountChangeResolver interface { Type(ctx context.Context, obj *types.AccountStateChangeModel) (types.StateChangeCategory, error) @@ -495,7 +495,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return 0, false } - return e.complexity.Account.Operations(childComplexity, args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true + return e.complexity.Account.Operations(childComplexity, args["since"].(*time.Time), args["until"].(*time.Time), args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true case "Account.stateChanges": if e.complexity.Account.StateChanges == nil { @@ -507,7 +507,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return 0, false } - return e.complexity.Account.StateChanges(childComplexity, args["filter"].(*AccountStateChangeFilterInput), args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true + return e.complexity.Account.StateChanges(childComplexity, args["filter"].(*AccountStateChangeFilterInput), args["since"].(*time.Time), args["until"].(*time.Time), args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true case "Account.transactions": if e.complexity.Account.Transactions == nil { @@ -519,7 +519,7 @@ func (e *executableSchema) Complexity(ctx context.Context, typeName, field strin return 0, false } - return e.complexity.Account.Transactions(childComplexity, args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true + return e.complexity.Account.Transactions(childComplexity, args["since"].(*time.Time), args["until"].(*time.Time), args["first"].(*int32), args["after"].(*string), args["last"].(*int32), args["before"].(*string)), true case "AccountBalances.address": if e.complexity.AccountBalances.Address == nil { @@ -1976,18 +1976,22 @@ type Account{ # GraphQL Relationships - these fields use resolvers for data fetching # Each relationship resolver will be called when the field is requested - + # All transactions associated with this account - transactions(first: Int, after: String, last: Int, before: String): TransactionConnection - + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at + transactions(since: Time, until: Time, first: Int, after: String, last: Int, before: String): TransactionConnection + # All operations associated with this account - operations(first: Int, after: String, last: Int, before: String): OperationConnection - + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at + operations(since: Time, until: Time, first: Int, after: String, last: Int, before: String): OperationConnection + # All state changes associated with this account # Uses resolver to fetch related state changes # Optional filter parameter allows filtering by transaction hash and/or operation ID + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at stateChanges( filter: AccountStateChangeFilterInput + since: Time, until: Time first: Int, after: String, last: Int, before: String ): StateChangeConnection } @@ -2495,28 +2499,64 @@ var parsedSchema = gqlparser.MustLoadSchema(sources...) func (ec *executionContext) field_Account_operations_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Account_operations_argsFirst(ctx, rawArgs) + arg0, err := ec.field_Account_operations_argsSince(ctx, rawArgs) if err != nil { return nil, err } - args["first"] = arg0 - arg1, err := ec.field_Account_operations_argsAfter(ctx, rawArgs) + args["since"] = arg0 + arg1, err := ec.field_Account_operations_argsUntil(ctx, rawArgs) if err != nil { return nil, err } - args["after"] = arg1 - arg2, err := ec.field_Account_operations_argsLast(ctx, rawArgs) + args["until"] = arg1 + arg2, err := ec.field_Account_operations_argsFirst(ctx, rawArgs) if err != nil { return nil, err } - args["last"] = arg2 - arg3, err := ec.field_Account_operations_argsBefore(ctx, rawArgs) + args["first"] = arg2 + arg3, err := ec.field_Account_operations_argsAfter(ctx, rawArgs) if err != nil { return nil, err } - args["before"] = arg3 + args["after"] = arg3 + arg4, err := ec.field_Account_operations_argsLast(ctx, rawArgs) + if err != nil { + return nil, err + } + args["last"] = arg4 + arg5, err := ec.field_Account_operations_argsBefore(ctx, rawArgs) + if err != nil { + return nil, err + } + args["before"] = arg5 return args, nil } +func (ec *executionContext) field_Account_operations_argsSince( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("since")) + if tmp, ok := rawArgs["since"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Account_operations_argsUntil( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("until")) + if tmp, ok := rawArgs["until"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + func (ec *executionContext) field_Account_operations_argsFirst( ctx context.Context, rawArgs map[string]any, @@ -2577,26 +2617,36 @@ func (ec *executionContext) field_Account_stateChanges_args(ctx context.Context, return nil, err } args["filter"] = arg0 - arg1, err := ec.field_Account_stateChanges_argsFirst(ctx, rawArgs) + arg1, err := ec.field_Account_stateChanges_argsSince(ctx, rawArgs) + if err != nil { + return nil, err + } + args["since"] = arg1 + arg2, err := ec.field_Account_stateChanges_argsUntil(ctx, rawArgs) if err != nil { return nil, err } - args["first"] = arg1 - arg2, err := ec.field_Account_stateChanges_argsAfter(ctx, rawArgs) + args["until"] = arg2 + arg3, err := ec.field_Account_stateChanges_argsFirst(ctx, rawArgs) if err != nil { return nil, err } - args["after"] = arg2 - arg3, err := ec.field_Account_stateChanges_argsLast(ctx, rawArgs) + args["first"] = arg3 + arg4, err := ec.field_Account_stateChanges_argsAfter(ctx, rawArgs) if err != nil { return nil, err } - args["last"] = arg3 - arg4, err := ec.field_Account_stateChanges_argsBefore(ctx, rawArgs) + args["after"] = arg4 + arg5, err := ec.field_Account_stateChanges_argsLast(ctx, rawArgs) if err != nil { return nil, err } - args["before"] = arg4 + args["last"] = arg5 + arg6, err := ec.field_Account_stateChanges_argsBefore(ctx, rawArgs) + if err != nil { + return nil, err + } + args["before"] = arg6 return args, nil } func (ec *executionContext) field_Account_stateChanges_argsFilter( @@ -2612,6 +2662,32 @@ func (ec *executionContext) field_Account_stateChanges_argsFilter( return zeroVal, nil } +func (ec *executionContext) field_Account_stateChanges_argsSince( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("since")) + if tmp, ok := rawArgs["since"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Account_stateChanges_argsUntil( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("until")) + if tmp, ok := rawArgs["until"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + func (ec *executionContext) field_Account_stateChanges_argsFirst( ctx context.Context, rawArgs map[string]any, @@ -2667,28 +2743,64 @@ func (ec *executionContext) field_Account_stateChanges_argsBefore( func (ec *executionContext) field_Account_transactions_args(ctx context.Context, rawArgs map[string]any) (map[string]any, error) { var err error args := map[string]any{} - arg0, err := ec.field_Account_transactions_argsFirst(ctx, rawArgs) + arg0, err := ec.field_Account_transactions_argsSince(ctx, rawArgs) if err != nil { return nil, err } - args["first"] = arg0 - arg1, err := ec.field_Account_transactions_argsAfter(ctx, rawArgs) + args["since"] = arg0 + arg1, err := ec.field_Account_transactions_argsUntil(ctx, rawArgs) if err != nil { return nil, err } - args["after"] = arg1 - arg2, err := ec.field_Account_transactions_argsLast(ctx, rawArgs) + args["until"] = arg1 + arg2, err := ec.field_Account_transactions_argsFirst(ctx, rawArgs) if err != nil { return nil, err } - args["last"] = arg2 - arg3, err := ec.field_Account_transactions_argsBefore(ctx, rawArgs) + args["first"] = arg2 + arg3, err := ec.field_Account_transactions_argsAfter(ctx, rawArgs) if err != nil { return nil, err } - args["before"] = arg3 + args["after"] = arg3 + arg4, err := ec.field_Account_transactions_argsLast(ctx, rawArgs) + if err != nil { + return nil, err + } + args["last"] = arg4 + arg5, err := ec.field_Account_transactions_argsBefore(ctx, rawArgs) + if err != nil { + return nil, err + } + args["before"] = arg5 return args, nil } +func (ec *executionContext) field_Account_transactions_argsSince( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("since")) + if tmp, ok := rawArgs["since"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + +func (ec *executionContext) field_Account_transactions_argsUntil( + ctx context.Context, + rawArgs map[string]any, +) (*time.Time, error) { + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField("until")) + if tmp, ok := rawArgs["until"]; ok { + return ec.unmarshalOTime2ᚖtimeᚐTime(ctx, tmp) + } + + var zeroVal *time.Time + return zeroVal, nil +} + func (ec *executionContext) field_Account_transactions_argsFirst( ctx context.Context, rawArgs map[string]any, @@ -3545,7 +3657,7 @@ func (ec *executionContext) _Account_transactions(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Account().Transactions(rctx, obj, fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) + return ec.resolvers.Account().Transactions(rctx, obj, fc.Args["since"].(*time.Time), fc.Args["until"].(*time.Time), fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) }) if err != nil { ec.Error(ctx, err) @@ -3603,7 +3715,7 @@ func (ec *executionContext) _Account_operations(ctx context.Context, field graph }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Account().Operations(rctx, obj, fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) + return ec.resolvers.Account().Operations(rctx, obj, fc.Args["since"].(*time.Time), fc.Args["until"].(*time.Time), fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) }) if err != nil { ec.Error(ctx, err) @@ -3661,7 +3773,7 @@ func (ec *executionContext) _Account_stateChanges(ctx context.Context, field gra }() resTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (any, error) { ctx = rctx // use context from middleware stack in children - return ec.resolvers.Account().StateChanges(rctx, obj, fc.Args["filter"].(*AccountStateChangeFilterInput), fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) + return ec.resolvers.Account().StateChanges(rctx, obj, fc.Args["filter"].(*AccountStateChangeFilterInput), fc.Args["since"].(*time.Time), fc.Args["until"].(*time.Time), fc.Args["first"].(*int32), fc.Args["after"].(*string), fc.Args["last"].(*int32), fc.Args["before"].(*string)) }) if err != nil { ec.Error(ctx, err) @@ -20492,6 +20604,24 @@ func (ec *executionContext) marshalOString2ᚖstring(ctx context.Context, sel as return res } +func (ec *executionContext) unmarshalOTime2ᚖtimeᚐTime(ctx context.Context, v any) (*time.Time, error) { + if v == nil { + return nil, nil + } + res, err := graphql.UnmarshalTime(v) + return &res, graphql.ErrorOnPath(ctx, err) +} + +func (ec *executionContext) marshalOTime2ᚖtimeᚐTime(ctx context.Context, sel ast.SelectionSet, v *time.Time) graphql.Marshaler { + if v == nil { + return graphql.Null + } + _ = sel + _ = ctx + res := graphql.MarshalTime(*v) + return res +} + func (ec *executionContext) marshalOTransaction2ᚖgithubᚗcomᚋstellarᚋwalletᚑbackendᚋinternalᚋindexerᚋtypesᚐTransaction(ctx context.Context, sel ast.SelectionSet, v *types.Transaction) graphql.Marshaler { if v == nil { return graphql.Null diff --git a/internal/serve/graphql/resolvers/account.resolvers.go b/internal/serve/graphql/resolvers/account.resolvers.go index 42cce7eb..f185bd6d 100644 --- a/internal/serve/graphql/resolvers/account.resolvers.go +++ b/internal/serve/graphql/resolvers/account.resolvers.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "strings" + "time" "github.com/stellar/wallet-backend/internal/indexer/types" graphql1 "github.com/stellar/wallet-backend/internal/serve/graphql/generated" @@ -22,21 +23,26 @@ func (r *accountResolver) Address(ctx context.Context, obj *types.Account) (stri // This is a field resolver - it resolves the "transactions" field on an Account object // gqlgen calls this when a GraphQL query requests the transactions field on an Account // Field resolvers receive the parent object (Account) and return the field value -func (r *accountResolver) Transactions(ctx context.Context, obj *types.Account, first *int32, after *string, last *int32, before *string) (*graphql1.TransactionConnection, error) { - params, err := parsePaginationParams(first, after, last, before, false) +func (r *accountResolver) Transactions(ctx context.Context, obj *types.Account, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*graphql1.TransactionConnection, error) { + params, err := parsePaginationParams(first, after, last, before, CursorTypeComposite) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } queryLimit := *params.Limit + 1 // +1 to check if there is a next page + timeRange, err := buildTimeRange(since, until) + if err != nil { + return nil, err + } + dbColumns := GetDBColumnsForFields(ctx, types.Transaction{}) - transactions, err := r.models.Transactions.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), strings.Join(dbColumns, ", "), &queryLimit, params.Cursor, params.SortOrder) + transactions, err := r.models.Transactions.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), strings.Join(dbColumns, ", "), &queryLimit, params.CompositeCursor, params.SortOrder, timeRange) if err != nil { return nil, fmt.Errorf("getting transactions from db for account %s: %w", obj.StellarAddress, err) } - conn := NewConnectionWithRelayPagination(transactions, params, func(tx *types.TransactionWithCursor) int64 { - return tx.Cursor + conn := NewConnectionWithRelayPagination(transactions, params, func(tx *types.TransactionWithCursor) string { + return fmt.Sprintf("%d:%d", tx.Cursor.LedgerCreatedAt.UnixNano(), tx.Cursor.ID) }) edges := make([]*graphql1.TransactionEdge, len(conn.Edges)) @@ -55,21 +61,26 @@ func (r *accountResolver) Transactions(ctx context.Context, obj *types.Account, // Operations is the resolver for the operations field. // This field resolver handles the "operations" field on an Account object -func (r *accountResolver) Operations(ctx context.Context, obj *types.Account, first *int32, after *string, last *int32, before *string) (*graphql1.OperationConnection, error) { - params, err := parsePaginationParams(first, after, last, before, false) +func (r *accountResolver) Operations(ctx context.Context, obj *types.Account, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*graphql1.OperationConnection, error) { + params, err := parsePaginationParams(first, after, last, before, CursorTypeComposite) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } queryLimit := *params.Limit + 1 // +1 to check if there is a next page + timeRange, err := buildTimeRange(since, until) + if err != nil { + return nil, err + } + dbColumns := GetDBColumnsForFields(ctx, types.Operation{}) - operations, err := r.models.Operations.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), strings.Join(dbColumns, ", "), &queryLimit, params.Cursor, params.SortOrder) + operations, err := r.models.Operations.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), strings.Join(dbColumns, ", "), &queryLimit, params.CompositeCursor, params.SortOrder, timeRange) if err != nil { return nil, fmt.Errorf("getting operations from db for account %s: %w", obj.StellarAddress, err) } - conn := NewConnectionWithRelayPagination(operations, params, func(op *types.OperationWithCursor) int64 { - return op.Cursor + conn := NewConnectionWithRelayPagination(operations, params, func(op *types.OperationWithCursor) string { + return fmt.Sprintf("%d:%d", op.Cursor.LedgerCreatedAt.UnixNano(), op.Cursor.ID) }) edges := make([]*graphql1.OperationEdge, len(conn.Edges)) @@ -87,8 +98,8 @@ func (r *accountResolver) Operations(ctx context.Context, obj *types.Account, fi } // StateChanges is the resolver for the stateChanges field. -func (r *accountResolver) StateChanges(ctx context.Context, obj *types.Account, filter *graphql1.AccountStateChangeFilterInput, first *int32, after *string, last *int32, before *string) (*graphql1.StateChangeConnection, error) { - params, err := parsePaginationParams(first, after, last, before, true) +func (r *accountResolver) StateChanges(ctx context.Context, obj *types.Account, filter *graphql1.AccountStateChangeFilterInput, since *time.Time, until *time.Time, first *int32, after *string, last *int32, before *string) (*graphql1.StateChangeConnection, error) { + params, err := parsePaginationParams(first, after, last, before, CursorTypeStateChange) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } @@ -114,15 +125,20 @@ func (r *accountResolver) StateChanges(ctx context.Context, obj *types.Account, } } + timeRange, err := buildTimeRange(since, until) + if err != nil { + return nil, err + } + dbColumns := GetDBColumnsForFields(ctx, types.StateChange{}) - stateChanges, err := r.models.StateChanges.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), txHash, operationID, category, reason, strings.Join(dbColumns, ", "), &queryLimit, params.StateChangeCursor, params.SortOrder) + stateChanges, err := r.models.StateChanges.BatchGetByAccountAddress(ctx, string(obj.StellarAddress), txHash, operationID, category, reason, strings.Join(dbColumns, ", "), &queryLimit, params.StateChangeCursor, params.SortOrder, timeRange) if err != nil { return nil, fmt.Errorf("getting state changes from db for account %s: %w", obj.StellarAddress, err) } convertedStateChanges := convertStateChangeToBaseStateChange(stateChanges) conn := NewConnectionWithRelayPagination(convertedStateChanges, params, func(sc *baseStateChangeWithCursor) string { - return fmt.Sprintf("%d:%d:%d", sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) + return fmt.Sprintf("%d:%d:%d:%d", sc.cursor.LedgerCreatedAt.UnixNano(), sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) }) edges := make([]*graphql1.StateChangeEdge, len(conn.Edges)) diff --git a/internal/serve/graphql/resolvers/account_resolvers_test.go b/internal/serve/graphql/resolvers/account_resolvers_test.go index 609786c5..90b9b4bf 100644 --- a/internal/serve/graphql/resolvers/account_resolvers_test.go +++ b/internal/serve/graphql/resolvers/account_resolvers_test.go @@ -4,6 +4,7 @@ import ( "encoding/base64" "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -42,7 +43,7 @@ func TestAccountResolver_Transactions(t *testing.T) { t.Run("get all transactions", func(t *testing.T) { ctx := getTestCtx("transactions", []string{"hash"}) - transactions, err := resolver.Transactions(ctx, parentAccount, nil, nil, nil, nil) + transactions, err := resolver.Transactions(ctx, parentAccount, nil, nil, nil, nil, nil, nil) require.NoError(t, err) require.Len(t, transactions.Edges, 4) @@ -56,7 +57,7 @@ func TestAccountResolver_Transactions(t *testing.T) { t.Run("get transactions with first/after limit and cursor", func(t *testing.T) { ctx := getTestCtx("transactions", []string{"hash"}) first := int32(2) - txs, err := resolver.Transactions(ctx, parentAccount, &first, nil, nil, nil) + txs, err := resolver.Transactions(ctx, parentAccount, nil, nil, &first, nil, nil, nil) require.NoError(t, err) assert.Len(t, txs.Edges, 2) assert.Equal(t, testTxHash1, txs.Edges[0].Node.Hash.String()) @@ -67,7 +68,7 @@ func TestAccountResolver_Transactions(t *testing.T) { // Get the next cursor nextCursor := txs.PageInfo.EndCursor assert.NotNil(t, nextCursor) - txs, err = resolver.Transactions(ctx, parentAccount, &first, nextCursor, nil, nil) + txs, err = resolver.Transactions(ctx, parentAccount, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.Len(t, txs.Edges, 2) assert.Equal(t, testTxHash3, txs.Edges[0].Node.Hash.String()) @@ -80,7 +81,7 @@ func TestAccountResolver_Transactions(t *testing.T) { t.Run("get transactions with last/before limit and cursor", func(t *testing.T) { ctx := getTestCtx("transactions", []string{"hash"}) last := int32(2) - txs, err := resolver.Transactions(ctx, parentAccount, nil, nil, &last, nil) + txs, err := resolver.Transactions(ctx, parentAccount, nil, nil, nil, nil, &last, nil) require.NoError(t, err) assert.Len(t, txs.Edges, 2) assert.Equal(t, testTxHash3, txs.Edges[0].Node.Hash.String()) @@ -88,21 +89,21 @@ func TestAccountResolver_Transactions(t *testing.T) { assert.False(t, txs.PageInfo.HasNextPage) assert.True(t, txs.PageInfo.HasPreviousPage) - // Get the next cursor + // Get the next cursor (going backward, use StartCursor per Relay spec) last = int32(1) - nextCursor := txs.PageInfo.EndCursor + nextCursor := txs.PageInfo.StartCursor assert.NotNil(t, nextCursor) - txs, err = resolver.Transactions(ctx, parentAccount, nil, nil, &last, nextCursor) + txs, err = resolver.Transactions(ctx, parentAccount, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, txs.Edges, 1) assert.Equal(t, testTxHash2, txs.Edges[0].Node.Hash.String()) assert.True(t, txs.PageInfo.HasNextPage) assert.True(t, txs.PageInfo.HasPreviousPage) - nextCursor = txs.PageInfo.EndCursor + nextCursor = txs.PageInfo.StartCursor assert.NotNil(t, nextCursor) last = int32(10) - txs, err = resolver.Transactions(ctx, parentAccount, nil, nil, &last, nextCursor) + txs, err = resolver.Transactions(ctx, parentAccount, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, txs.Edges, 1) assert.Equal(t, testTxHash1, txs.Edges[0].Node.Hash.String()) @@ -114,7 +115,7 @@ func TestAccountResolver_Transactions(t *testing.T) { t.Run("account with no transactions", func(t *testing.T) { nonExistentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedNonExistentAccountAddress)} ctx := getTestCtx("transactions", []string{"hash"}) - transactions, err := resolver.Transactions(ctx, nonExistentAccount, nil, nil, nil, nil) + transactions, err := resolver.Transactions(ctx, nonExistentAccount, nil, nil, nil, nil, nil, nil) require.NoError(t, err) assert.Empty(t, transactions.Edges) @@ -127,24 +128,24 @@ func TestAccountResolver_Transactions(t *testing.T) { last := int32(1) after := encodeCursor(int64(4)) before := encodeCursor(int64(1)) - _, err := resolver.Transactions(ctx, parentAccount, &first, &after, nil, nil) + _, err := resolver.Transactions(ctx, parentAccount, nil, nil, &first, &after, nil, nil) require.Error(t, err) assert.Contains(t, err.Error(), "validating pagination params: first must be greater than 0") first = int32(1) - _, err = resolver.Transactions(ctx, parentAccount, &first, nil, &last, nil) + _, err = resolver.Transactions(ctx, parentAccount, nil, nil, &first, nil, &last, nil) require.Error(t, err) assert.Contains(t, err.Error(), "validating pagination params: first and last cannot be used together") - _, err = resolver.Transactions(ctx, parentAccount, nil, &after, nil, &before) + _, err = resolver.Transactions(ctx, parentAccount, nil, nil, nil, &after, nil, &before) require.Error(t, err) assert.Contains(t, err.Error(), "validating pagination params: after and before cannot be used together") - _, err = resolver.Transactions(ctx, parentAccount, &first, nil, nil, &before) + _, err = resolver.Transactions(ctx, parentAccount, nil, nil, &first, nil, nil, &before) require.Error(t, err) assert.Contains(t, err.Error(), "validating pagination params: first and before cannot be used together") - _, err = resolver.Transactions(ctx, parentAccount, nil, &after, &last, nil) + _, err = resolver.Transactions(ctx, parentAccount, nil, nil, nil, &after, &last, nil) require.Error(t, err) assert.Contains(t, err.Error(), "validating pagination params: last and after cannot be used together") }) @@ -169,7 +170,7 @@ func TestAccountResolver_Operations(t *testing.T) { t.Run("get all operations", func(t *testing.T) { ctx := getTestCtx("operations", []string{"operation_xdr"}) - operations, err := resolver.Operations(ctx, parentAccount, nil, nil, nil, nil) + operations, err := resolver.Operations(ctx, parentAccount, nil, nil, nil, nil, nil, nil) require.NoError(t, err) require.Len(t, operations.Edges, 8) @@ -182,7 +183,7 @@ func TestAccountResolver_Operations(t *testing.T) { t.Run("get operations with first/after limit and cursor", func(t *testing.T) { ctx := getTestCtx("operations", []string{"operation_xdr"}) first := int32(2) - ops, err := resolver.Operations(ctx, parentAccount, &first, nil, nil, nil) + ops, err := resolver.Operations(ctx, parentAccount, nil, nil, &first, nil, nil, nil) require.NoError(t, err) assert.Len(t, ops.Edges, 2) assert.Equal(t, testOpXDRAcc(1), ops.Edges[0].Node.OperationXDR.String()) @@ -193,7 +194,7 @@ func TestAccountResolver_Operations(t *testing.T) { // Get the next cursor nextCursor := ops.PageInfo.EndCursor assert.NotNil(t, nextCursor) - ops, err = resolver.Operations(ctx, parentAccount, &first, nextCursor, nil, nil) + ops, err = resolver.Operations(ctx, parentAccount, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.Len(t, ops.Edges, 2) assert.Equal(t, testOpXDRAcc(3), ops.Edges[0].Node.OperationXDR.String()) @@ -204,7 +205,7 @@ func TestAccountResolver_Operations(t *testing.T) { first = int32(10) nextCursor = ops.PageInfo.EndCursor assert.NotNil(t, nextCursor) - ops, err = resolver.Operations(ctx, parentAccount, &first, nextCursor, nil, nil) + ops, err = resolver.Operations(ctx, parentAccount, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.Len(t, ops.Edges, 4) assert.Equal(t, testOpXDRAcc(5), ops.Edges[0].Node.OperationXDR.String()) @@ -218,7 +219,7 @@ func TestAccountResolver_Operations(t *testing.T) { t.Run("get operations with last/before limit and cursor", func(t *testing.T) { ctx := getTestCtx("operations", []string{"operation_xdr"}) last := int32(2) - ops, err := resolver.Operations(ctx, parentAccount, nil, nil, &last, nil) + ops, err := resolver.Operations(ctx, parentAccount, nil, nil, nil, nil, &last, nil) require.NoError(t, err) assert.Len(t, ops.Edges, 2) assert.Equal(t, testOpXDRAcc(7), ops.Edges[0].Node.OperationXDR.String()) @@ -226,10 +227,10 @@ func TestAccountResolver_Operations(t *testing.T) { assert.True(t, ops.PageInfo.HasPreviousPage) assert.False(t, ops.PageInfo.HasNextPage) - // Get the next cursor - nextCursor := ops.PageInfo.EndCursor + // Get the next cursor (going backward, use StartCursor per Relay spec) + nextCursor := ops.PageInfo.StartCursor assert.NotNil(t, nextCursor) - ops, err = resolver.Operations(ctx, parentAccount, nil, nil, &last, nextCursor) + ops, err = resolver.Operations(ctx, parentAccount, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, ops.Edges, 2) assert.Equal(t, testOpXDRAcc(5), ops.Edges[0].Node.OperationXDR.String()) @@ -237,10 +238,10 @@ func TestAccountResolver_Operations(t *testing.T) { assert.True(t, ops.PageInfo.HasNextPage) assert.True(t, ops.PageInfo.HasPreviousPage) - nextCursor = ops.PageInfo.EndCursor + nextCursor = ops.PageInfo.StartCursor assert.NotNil(t, nextCursor) last = int32(10) - ops, err = resolver.Operations(ctx, parentAccount, nil, nil, &last, nextCursor) + ops, err = resolver.Operations(ctx, parentAccount, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, ops.Edges, 4) assert.Equal(t, testOpXDRAcc(1), ops.Edges[0].Node.OperationXDR.String()) @@ -254,7 +255,7 @@ func TestAccountResolver_Operations(t *testing.T) { t.Run("account with no operations", func(t *testing.T) { nonExistentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedNonExistentAccountAddress)} ctx := getTestCtx("operations", []string{"id"}) - operations, err := resolver.Operations(ctx, nonExistentAccount, nil, nil, nil, nil) + operations, err := resolver.Operations(ctx, nonExistentAccount, nil, nil, nil, nil, nil, nil) require.NoError(t, err) assert.Empty(t, operations.Edges) @@ -280,7 +281,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { t.Run("get all state changes", func(t *testing.T) { ctx := getTestCtx("state_changes", []string{""}) - stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, nil, nil, nil, nil, nil, nil) require.NoError(t, err) require.Len(t, stateChanges.Edges, 20) @@ -326,7 +327,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { tx2Op2ID := toid.New(1000, 2, 2).ToInt64() first := int32(3) - stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, &first, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &first, nil, nil, nil) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 3) @@ -352,7 +353,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { // Get the next cursor nextCursor := stateChanges.PageInfo.EndCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, &first, nextCursor, nil, nil) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 3) @@ -379,7 +380,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { first = int32(100) nextCursor = stateChanges.PageInfo.EndCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, &first, nextCursor, nil, nil) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 14) @@ -420,7 +421,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { tx4Op2ID := toid.New(1000, 4, 2).ToInt64() last := int32(3) - stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &last, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, nil, nil, nil, nil, nil, &last, nil) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 3) @@ -443,10 +444,10 @@ func TestAccountResolver_StateChanges(t *testing.T) { assert.True(t, stateChanges.PageInfo.HasPreviousPage) assert.False(t, stateChanges.PageInfo.HasNextPage) - // Get the next cursor (going backward) - nextCursor := stateChanges.PageInfo.EndCursor + // Get the next cursor (going backward, use StartCursor per Relay spec) + nextCursor := stateChanges.PageInfo.StartCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &last, nextCursor) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 3) @@ -469,10 +470,10 @@ func TestAccountResolver_StateChanges(t *testing.T) { assert.True(t, stateChanges.PageInfo.HasNextPage) assert.True(t, stateChanges.PageInfo.HasPreviousPage) - nextCursor = stateChanges.PageInfo.EndCursor + nextCursor = stateChanges.PageInfo.StartCursor assert.NotNil(t, nextCursor) last = int32(100) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, &last, nextCursor) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, nil, nil, nil, nil, nil, &last, nextCursor) require.NoError(t, err) assert.Len(t, stateChanges.Edges, 14) @@ -499,7 +500,7 @@ func TestAccountResolver_StateChanges(t *testing.T) { t.Run("account with no state changes", func(t *testing.T) { nonExistentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedNonExistentAccountAddress)} ctx := getTestCtx("state_changes", []string{"to_id", "state_change_order"}) - stateChanges, err := resolver.StateChanges(ctx, nonExistentAccount, nil, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, nonExistentAccount, nil, nil, nil, nil, nil, nil, nil) require.NoError(t, err) assert.Empty(t, stateChanges.Edges) @@ -529,7 +530,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ TransactionHash: &txHash, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // tx1 has 3 operations (0, 1, 2), each operation has 2 state changes except op 0 (1 state change) @@ -578,7 +579,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ OperationID: &opID, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Operation 1 has 2 state changes @@ -605,7 +606,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { TransactionHash: &txHash, OperationID: &opID, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Only state changes that match both filters @@ -630,7 +631,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ TransactionHash: &txHash, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) require.Empty(t, stateChanges.Edges) @@ -651,7 +652,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { // Get first 2 state changes first := int32(2) - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, &first, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, &first, nil, nil, nil) require.NoError(t, err) require.Len(t, stateChanges.Edges, 2) @@ -672,7 +673,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { // Get next page nextCursor := stateChanges.PageInfo.EndCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, &first, nextCursor, nil, nil) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) require.Len(t, stateChanges.Edges, 2) @@ -693,7 +694,7 @@ func TestAccountResolver_StateChanges_WithFilters(t *testing.T) { // Get final page nextCursor = stateChanges.PageInfo.EndCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, &first, nextCursor, nil, nil) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) require.Len(t, stateChanges.Edges, 1) @@ -731,7 +732,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ Category: &category, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Verify all returned state changes are BALANCE category for _, sc := range stateChanges.Edges { @@ -745,7 +746,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ Reason: &reason, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Verify all returned state changes are CREDIT reason @@ -762,7 +763,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { Category: &category, Reason: &reason, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Verify all returned state changes are SIGNER category and ADD reason @@ -785,7 +786,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { Category: &category, Reason: &reason, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) // Verify all returned state changes have correct IDs, category and reason @@ -807,7 +808,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { // Get first 2 state changes first := int32(2) - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, &first, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, &first, nil, nil, nil) require.NoError(t, err) require.Len(t, stateChanges.Edges, 2) assert.True(t, stateChanges.PageInfo.HasNextPage) @@ -819,7 +820,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { // Get next page nextCursor := stateChanges.PageInfo.EndCursor assert.NotNil(t, nextCursor) - stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, &first, nextCursor, nil, nil) + stateChanges, err = resolver.StateChanges(ctx, parentAccount, filter, nil, nil, &first, nextCursor, nil, nil) require.NoError(t, err) assert.LessOrEqual(t, len(stateChanges.Edges), 2) for _, sc := range stateChanges.Edges { @@ -833,7 +834,7 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { filter := &graphql1.AccountStateChangeFilterInput{ Category: &category, } - stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil) + stateChanges, err := resolver.StateChanges(ctx, parentAccount, filter, nil, nil, nil, nil, nil, nil) require.NoError(t, err) require.Empty(t, stateChanges.Edges) @@ -841,3 +842,170 @@ func TestAccountResolver_StateChanges_WithCategoryReasonFilters(t *testing.T) { assert.False(t, stateChanges.PageInfo.HasPreviousPage) }) } + +func TestAccountResolver_Transactions_WithTimeRange(t *testing.T) { + parentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedTestAccountAddress)} + + mockMetricsService := &metrics.MockMetricsService{} + mockMetricsService.On("IncDBQuery", "BatchGetByAccountAddress", "transactions").Return() + mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByAccountAddress", "transactions", mock.Anything).Return() + defer mockMetricsService.AssertExpectations(t) + + resolver := &accountResolver{&Resolver{ + models: &data.Models{ + Transactions: &data.TransactionModel{ + DB: testDBConnectionPool, + MetricsService: mockMetricsService, + }, + }, + }} + + t.Run("since in the past returns all transactions", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + pastTime := time.Now().Add(-24 * time.Hour) + txs, err := resolver.Transactions(ctx, parentAccount, &pastTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, txs.Edges, 4) + }) + + t.Run("since in the future returns no transactions", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + futureTime := time.Now().Add(24 * time.Hour) + txs, err := resolver.Transactions(ctx, parentAccount, &futureTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, txs.Edges) + }) + + t.Run("until in the past returns no transactions", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + pastTime := time.Now().Add(-24 * time.Hour) + txs, err := resolver.Transactions(ctx, parentAccount, nil, &pastTime, nil, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, txs.Edges) + }) + + t.Run("until in the future returns all transactions", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + futureTime := time.Now().Add(24 * time.Hour) + txs, err := resolver.Transactions(ctx, parentAccount, nil, &futureTime, nil, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, txs.Edges, 4) + }) + + t.Run("until before since returns error", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + since := time.Now() + until := since.Add(-1 * time.Hour) + _, err := resolver.Transactions(ctx, parentAccount, &since, &until, nil, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "until must not be before since") + }) + + t.Run("time range combined with pagination", func(t *testing.T) { + ctx := getTestCtx("transactions", []string{"hash"}) + pastTime := time.Now().Add(-24 * time.Hour) + first := int32(2) + txs, err := resolver.Transactions(ctx, parentAccount, &pastTime, nil, &first, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, txs.Edges, 2) + assert.True(t, txs.PageInfo.HasNextPage) + }) +} + +func TestAccountResolver_Operations_WithTimeRange(t *testing.T) { + parentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedTestAccountAddress)} + + mockMetricsService := &metrics.MockMetricsService{} + mockMetricsService.On("IncDBQuery", "BatchGetByAccountAddress", "operations").Return() + mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByAccountAddress", "operations", mock.Anything).Return() + defer mockMetricsService.AssertExpectations(t) + + resolver := &accountResolver{&Resolver{ + models: &data.Models{ + Operations: &data.OperationModel{ + DB: testDBConnectionPool, + MetricsService: mockMetricsService, + }, + }, + }} + + t.Run("since in the past returns all operations", func(t *testing.T) { + ctx := getTestCtx("operations", []string{"operation_xdr"}) + pastTime := time.Now().Add(-24 * time.Hour) + ops, err := resolver.Operations(ctx, parentAccount, &pastTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, ops.Edges, 8) + }) + + t.Run("since in the future returns no operations", func(t *testing.T) { + ctx := getTestCtx("operations", []string{"operation_xdr"}) + futureTime := time.Now().Add(24 * time.Hour) + ops, err := resolver.Operations(ctx, parentAccount, &futureTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, ops.Edges) + }) + + t.Run("until before since returns error", func(t *testing.T) { + ctx := getTestCtx("operations", []string{"operation_xdr"}) + since := time.Now() + until := since.Add(-1 * time.Hour) + _, err := resolver.Operations(ctx, parentAccount, &since, &until, nil, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "until must not be before since") + }) +} + +func TestAccountResolver_StateChanges_WithTimeRange(t *testing.T) { + parentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedTestAccountAddress)} + + mockMetricsService := &metrics.MockMetricsService{} + mockMetricsService.On("IncDBQuery", "BatchGetByAccountAddress", "state_changes").Return() + mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByAccountAddress", "state_changes", mock.Anything).Return() + defer mockMetricsService.AssertExpectations(t) + + resolver := &accountResolver{&Resolver{ + models: &data.Models{ + StateChanges: &data.StateChangeModel{ + DB: testDBConnectionPool, + MetricsService: mockMetricsService, + }, + }, + }} + + t.Run("since in the past returns all state changes", func(t *testing.T) { + ctx := getTestCtx("state_changes", []string{""}) + pastTime := time.Now().Add(-24 * time.Hour) + sc, err := resolver.StateChanges(ctx, parentAccount, nil, &pastTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, sc.Edges, 20) + }) + + t.Run("since in the future returns no state changes", func(t *testing.T) { + ctx := getTestCtx("state_changes", []string{""}) + futureTime := time.Now().Add(24 * time.Hour) + sc, err := resolver.StateChanges(ctx, parentAccount, nil, &futureTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Empty(t, sc.Edges) + }) + + t.Run("until before since returns error", func(t *testing.T) { + ctx := getTestCtx("state_changes", []string{""}) + since := time.Now() + until := since.Add(-1 * time.Hour) + _, err := resolver.StateChanges(ctx, parentAccount, nil, &since, &until, nil, nil, nil, nil) + require.Error(t, err) + assert.Contains(t, err.Error(), "until must not be before since") + }) + + t.Run("time range combined with filter", func(t *testing.T) { + ctx := getTestCtx("state_changes", []string{""}) + pastTime := time.Now().Add(-24 * time.Hour) + txHash := testTxHash1 + filter := &graphql1.AccountStateChangeFilterInput{ + TransactionHash: &txHash, + } + sc, err := resolver.StateChanges(ctx, parentAccount, filter, &pastTime, nil, nil, nil, nil, nil) + require.NoError(t, err) + assert.Len(t, sc.Edges, 5) + }) +} diff --git a/internal/serve/graphql/resolvers/operation.resolvers.go b/internal/serve/graphql/resolvers/operation.resolvers.go index 4097f59c..ca994d78 100644 --- a/internal/serve/graphql/resolvers/operation.resolvers.go +++ b/internal/serve/graphql/resolvers/operation.resolvers.go @@ -73,7 +73,7 @@ func (r *operationResolver) Accounts(ctx context.Context, obj *types.Operation) // Field resolvers receive the parent object (Operation) and return the field value func (r *operationResolver) StateChanges(ctx context.Context, obj *types.Operation, first *int32, after *string, last *int32, before *string) (*graphql1.StateChangeConnection, error) { dbColumns := GetDBColumnsForFields(ctx, types.StateChange{}) - params, err := parsePaginationParams(first, after, last, before, true) + params, err := parsePaginationParams(first, after, last, before, CursorTypeStateChange) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } @@ -95,7 +95,7 @@ func (r *operationResolver) StateChanges(ctx context.Context, obj *types.Operati convertedStateChanges := convertStateChangeToBaseStateChange(stateChanges) conn := NewConnectionWithRelayPagination(convertedStateChanges, params, func(sc *baseStateChangeWithCursor) string { - return fmt.Sprintf("%d:%d:%d", sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) + return fmt.Sprintf("%d:%d:%d:%d", sc.cursor.LedgerCreatedAt.UnixNano(), sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) }) edges := make([]*graphql1.StateChangeEdge, len(conn.Edges)) diff --git a/internal/serve/graphql/resolvers/queries.resolvers.go b/internal/serve/graphql/resolvers/queries.resolvers.go index d6596ea2..de99037a 100644 --- a/internal/serve/graphql/resolvers/queries.resolvers.go +++ b/internal/serve/graphql/resolvers/queries.resolvers.go @@ -32,20 +32,20 @@ func (r *queryResolver) TransactionByHash(ctx context.Context, hash string) (*ty // This resolver handles the "transactions" query. // It demonstrates handling optional arguments (limit can be nil) func (r *queryResolver) Transactions(ctx context.Context, first *int32, after *string, last *int32, before *string) (*graphql1.TransactionConnection, error) { - params, err := parsePaginationParams(first, after, last, before, false) + params, err := parsePaginationParams(first, after, last, before, CursorTypeComposite) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } queryLimit := *params.Limit + 1 // +1 to check if there is a next page dbColumns := GetDBColumnsForFields(ctx, types.Transaction{}) - transactions, err := r.models.Transactions.GetAll(ctx, strings.Join(dbColumns, ", "), &queryLimit, params.Cursor, params.SortOrder) + transactions, err := r.models.Transactions.GetAll(ctx, strings.Join(dbColumns, ", "), &queryLimit, params.CompositeCursor, params.SortOrder) if err != nil { return nil, fmt.Errorf("getting transactions from db: %w", err) } - conn := NewConnectionWithRelayPagination(transactions, params, func(t *types.TransactionWithCursor) int64 { - return t.Cursor + conn := NewConnectionWithRelayPagination(transactions, params, func(t *types.TransactionWithCursor) string { + return fmt.Sprintf("%d:%d", t.Cursor.LedgerCreatedAt.UnixNano(), t.Cursor.ID) }) edges := make([]*graphql1.TransactionEdge, len(conn.Edges)) @@ -73,20 +73,20 @@ func (r *queryResolver) AccountByAddress(ctx context.Context, address string) (* // Operations is the resolver for the operations field. // This resolver handles the "operations" query. func (r *queryResolver) Operations(ctx context.Context, first *int32, after *string, last *int32, before *string) (*graphql1.OperationConnection, error) { - params, err := parsePaginationParams(first, after, last, before, false) + params, err := parsePaginationParams(first, after, last, before, CursorTypeComposite) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } queryLimit := *params.Limit + 1 // +1 to check if there is a next page dbColumns := GetDBColumnsForFields(ctx, types.Operation{}) - operations, err := r.models.Operations.GetAll(ctx, strings.Join(dbColumns, ", "), &queryLimit, params.Cursor, params.SortOrder) + operations, err := r.models.Operations.GetAll(ctx, strings.Join(dbColumns, ", "), &queryLimit, params.CompositeCursor, params.SortOrder) if err != nil { return nil, fmt.Errorf("getting operations from db: %w", err) } - conn := NewConnectionWithRelayPagination(operations, params, func(o *types.OperationWithCursor) int64 { - return o.Cursor + conn := NewConnectionWithRelayPagination(operations, params, func(o *types.OperationWithCursor) string { + return fmt.Sprintf("%d:%d", o.Cursor.LedgerCreatedAt.UnixNano(), o.Cursor.ID) }) edges := make([]*graphql1.OperationEdge, len(conn.Edges)) @@ -111,7 +111,7 @@ func (r *queryResolver) OperationByID(ctx context.Context, id int64) (*types.Ope // StateChanges is the resolver for the stateChanges field. func (r *queryResolver) StateChanges(ctx context.Context, first *int32, after *string, last *int32, before *string) (*graphql1.StateChangeConnection, error) { - params, err := parsePaginationParams(first, after, last, before, true) + params, err := parsePaginationParams(first, after, last, before, CursorTypeStateChange) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } @@ -125,7 +125,7 @@ func (r *queryResolver) StateChanges(ctx context.Context, first *int32, after *s convertedStateChanges := convertStateChangeToBaseStateChange(stateChanges) conn := NewConnectionWithRelayPagination(convertedStateChanges, params, func(sc *baseStateChangeWithCursor) string { - return fmt.Sprintf("%d:%d:%d", sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) + return fmt.Sprintf("%d:%d:%d:%d", sc.cursor.LedgerCreatedAt.UnixNano(), sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) }) edges := make([]*graphql1.StateChangeEdge, len(conn.Edges)) diff --git a/internal/serve/graphql/resolvers/queries_resolvers_test.go b/internal/serve/graphql/resolvers/queries_resolvers_test.go index d41db20e..c4ef1073 100644 --- a/internal/serve/graphql/resolvers/queries_resolvers_test.go +++ b/internal/serve/graphql/resolvers/queries_resolvers_test.go @@ -144,9 +144,9 @@ func TestQueryResolver_Transactions(t *testing.T) { assert.False(t, txs.PageInfo.HasNextPage) assert.True(t, txs.PageInfo.HasPreviousPage) - // Get the next cursor + // Get the next cursor (going backward, use StartCursor per Relay spec) last = int32(1) - nextCursor := txs.PageInfo.EndCursor + nextCursor := txs.PageInfo.StartCursor assert.NotNil(t, nextCursor) txs, err = resolver.Transactions(ctx, nil, nil, &last, nextCursor) require.NoError(t, err) @@ -155,7 +155,7 @@ func TestQueryResolver_Transactions(t *testing.T) { assert.True(t, txs.PageInfo.HasNextPage) assert.True(t, txs.PageInfo.HasPreviousPage) - nextCursor = txs.PageInfo.EndCursor + nextCursor = txs.PageInfo.StartCursor assert.NotNil(t, nextCursor) last = int32(10) txs, err = resolver.Transactions(ctx, nil, nil, &last, nextCursor) @@ -326,9 +326,9 @@ func TestQueryResolver_Operations(t *testing.T) { assert.False(t, ops.PageInfo.HasNextPage) assert.True(t, ops.PageInfo.HasPreviousPage) - // Get the previous page + // Get the previous page (use StartCursor per Relay spec) last = int32(1) - prevCursor := ops.PageInfo.EndCursor + prevCursor := ops.PageInfo.StartCursor assert.NotNil(t, prevCursor) ops, err = resolver.Operations(ctx, nil, nil, &last, prevCursor) require.NoError(t, err) @@ -337,7 +337,7 @@ func TestQueryResolver_Operations(t *testing.T) { assert.True(t, ops.PageInfo.HasNextPage) assert.True(t, ops.PageInfo.HasPreviousPage) - prevCursor = ops.PageInfo.EndCursor + prevCursor = ops.PageInfo.StartCursor assert.NotNil(t, prevCursor) last = int32(10) ops, err = resolver.Operations(ctx, nil, nil, &last, prevCursor) @@ -607,8 +607,8 @@ func TestQueryResolver_StateChanges(t *testing.T) { assert.False(t, stateChanges.PageInfo.HasNextPage) assert.True(t, stateChanges.PageInfo.HasPreviousPage) - // Get the previous page - prevCursor := stateChanges.PageInfo.EndCursor + // Get the previous page (use StartCursor per Relay spec) + prevCursor := stateChanges.PageInfo.StartCursor assert.NotNil(t, prevCursor) stateChanges, err = resolver.StateChanges(ctx, nil, nil, &last, prevCursor) require.NoError(t, err) @@ -628,8 +628,8 @@ func TestQueryResolver_StateChanges(t *testing.T) { assert.True(t, stateChanges.PageInfo.HasNextPage) assert.True(t, stateChanges.PageInfo.HasPreviousPage) - // Get more previous items - prevCursor = stateChanges.PageInfo.EndCursor + // Get more previous items (use StartCursor per Relay spec) + prevCursor = stateChanges.PageInfo.StartCursor assert.NotNil(t, prevCursor) last = int32(20) stateChanges, err = resolver.StateChanges(ctx, nil, nil, &last, prevCursor) diff --git a/internal/serve/graphql/resolvers/resolver.go b/internal/serve/graphql/resolvers/resolver.go index af66197b..35472ab2 100644 --- a/internal/serve/graphql/resolvers/resolver.go +++ b/internal/serve/graphql/resolvers/resolver.go @@ -140,22 +140,15 @@ func (r *Resolver) resolveRequiredJSONBField(field interface{}) (string, error) // Shared resolver functions for BaseStateChange interface // These functions provide common logic that all state change types can use -// resolveStateChangeAccount resolves the account field for any state change type -// Since state changes have a direct account_id reference, we can fetch the account directly -func (r *Resolver) resolveStateChangeAccount(ctx context.Context, toID int64, operationID int64, stateChangeOrder int64) (*types.Account, error) { - loaders := ctx.Value(middleware.LoadersKey).(*dataloaders.Dataloaders) - dbColumns := GetDBColumnsForFields(ctx, types.Account{}) - - stateChangeID := fmt.Sprintf("%d-%d-%d", toID, operationID, stateChangeOrder) - loaderKey := dataloaders.AccountColumnsKey{ - StateChangeID: stateChangeID, - Columns: strings.Join(dbColumns, ", "), - } - account, err := loaders.AccountByStateChangeIDLoader.Load(ctx, loaderKey) - if err != nil { - return nil, fmt.Errorf("loading account for state change %s: %w", stateChangeID, err) - } - return account, nil +// resolveStateChangeAccount resolves the account field for any state change type. +// Uses the already-populated AccountID from the state change row to avoid a re-query. +func (r *Resolver) resolveStateChangeAccount(accountID types.AddressBytea) (*types.Account, error) { + if accountID == "" { + return nil, fmt.Errorf("state change has no account_id") + } + return &types.Account{ + StellarAddress: accountID, + }, nil } // resolveStateChangeOperation resolves the operation field for any state change type diff --git a/internal/serve/graphql/resolvers/statechange.resolvers.go b/internal/serve/graphql/resolvers/statechange.resolvers.go index c32d8125..15db70ed 100644 --- a/internal/serve/graphql/resolvers/statechange.resolvers.go +++ b/internal/serve/graphql/resolvers/statechange.resolvers.go @@ -24,7 +24,7 @@ func (r *accountChangeResolver) Reason(ctx context.Context, obj *types.AccountSt // Account is the resolver for the account field. func (r *accountChangeResolver) Account(ctx context.Context, obj *types.AccountStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -54,7 +54,7 @@ func (r *balanceAuthorizationChangeResolver) Reason(ctx context.Context, obj *ty // Account is the resolver for the account field. func (r *balanceAuthorizationChangeResolver) Account(ctx context.Context, obj *types.BalanceAuthorizationStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -98,7 +98,7 @@ func (r *flagsChangeResolver) Reason(ctx context.Context, obj *types.FlagsStateC // Account is the resolver for the account field. func (r *flagsChangeResolver) Account(ctx context.Context, obj *types.FlagsStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -132,7 +132,7 @@ func (r *metadataChangeResolver) Reason(ctx context.Context, obj *types.Metadata // Account is the resolver for the account field. func (r *metadataChangeResolver) Account(ctx context.Context, obj *types.MetadataStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -162,7 +162,7 @@ func (r *reservesChangeResolver) Reason(ctx context.Context, obj *types.Reserves // Account is the resolver for the account field. func (r *reservesChangeResolver) Account(ctx context.Context, obj *types.ReservesStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -217,7 +217,7 @@ func (r *signerChangeResolver) Reason(ctx context.Context, obj *types.SignerStat // Account is the resolver for the account field. func (r *signerChangeResolver) Account(ctx context.Context, obj *types.SignerStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -258,7 +258,7 @@ func (r *signerThresholdsChangeResolver) Reason(ctx context.Context, obj *types. // Account is the resolver for the account field. func (r *signerThresholdsChangeResolver) Account(ctx context.Context, obj *types.SignerThresholdsStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -291,7 +291,7 @@ func (r *standardBalanceChangeResolver) Reason(ctx context.Context, obj *types.S // Account is the resolver for the account field. func (r *standardBalanceChangeResolver) Account(ctx context.Context, obj *types.StandardBalanceStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. @@ -326,7 +326,7 @@ func (r *trustlineChangeResolver) Reason(ctx context.Context, obj *types.Trustli // Account is the resolver for the account field. func (r *trustlineChangeResolver) Account(ctx context.Context, obj *types.TrustlineStateChangeModel) (*types.Account, error) { - return r.resolveStateChangeAccount(ctx, obj.ToID, obj.OperationID, obj.StateChangeOrder) + return r.resolveStateChangeAccount(obj.AccountID) } // Operation is the resolver for the operation field. diff --git a/internal/serve/graphql/resolvers/statechange_resolvers_test.go b/internal/serve/graphql/resolvers/statechange_resolvers_test.go index 65c1e19e..a918593b 100644 --- a/internal/serve/graphql/resolvers/statechange_resolvers_test.go +++ b/internal/serve/graphql/resolvers/statechange_resolvers_test.go @@ -224,34 +224,16 @@ func TestStateChangeResolver_TypedFields(t *testing.T) { } func TestStateChangeResolver_Account(t *testing.T) { - mockMetricsService := &metrics.MockMetricsService{} - mockMetricsService.On("IncDBQuery", "BatchGetByStateChangeIDs", "state_changes").Return() - mockMetricsService.On("ObserveDBQueryDuration", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return() - mockMetricsService.On("ObserveDBBatchSize", "BatchGetByStateChangeIDs", "state_changes", mock.Anything).Return() - defer mockMetricsService.AssertExpectations(t) - - resolver := &standardBalanceChangeResolver{&Resolver{ - models: &data.Models{ - Account: &data.AccountModel{ - DB: testDBConnectionPool, - MetricsService: mockMetricsService, - }, - }, - }} - opID := toid.New(1000, 1, 1).ToInt64() - txToID := opID &^ 0xFFF // Derive transaction to_id from operation_id using TOID bitmask - parentSC := types.StandardBalanceStateChangeModel{ - StateChange: types.StateChange{ - ToID: txToID, - OperationID: opID, - StateChangeOrder: 1, - StateChangeCategory: types.StateChangeCategoryBalance, - }, - } + resolver := &standardBalanceChangeResolver{&Resolver{}} t.Run("success", func(t *testing.T) { - loaders := dataloaders.NewDataloaders(resolver.models) - ctx := context.WithValue(getTestCtx("accounts", []string{""}), middleware.LoadersKey, loaders) + parentSC := types.StandardBalanceStateChangeModel{ + StateChange: types.StateChange{ + AccountID: types.AddressBytea(sharedTestAccountAddress), + StateChangeCategory: types.StateChangeCategoryBalance, + }, + } + ctx := context.Background() account, err := resolver.Account(ctx, &parentSC) require.NoError(t, err) @@ -259,29 +241,26 @@ func TestStateChangeResolver_Account(t *testing.T) { }) t.Run("nil state change panics", func(t *testing.T) { - loaders := dataloaders.NewDataloaders(resolver.models) - ctx := context.WithValue(getTestCtx("accounts", []string{""}), middleware.LoadersKey, loaders) + ctx := context.Background() assert.Panics(t, func() { _, _ = resolver.Account(ctx, nil) //nolint:errcheck }) }) - t.Run("state change with non-existent account", func(t *testing.T) { - nonExistentSC := types.StandardBalanceStateChangeModel{ + t.Run("state change with empty account_id returns error", func(t *testing.T) { + emptySC := types.StandardBalanceStateChangeModel{ StateChange: types.StateChange{ - ToID: 9999, - OperationID: 0, - StateChangeOrder: 1, + AccountID: "", StateChangeCategory: types.StateChangeCategoryBalance, }, } - loaders := dataloaders.NewDataloaders(resolver.models) - ctx := context.WithValue(getTestCtx("accounts", []string{""}), middleware.LoadersKey, loaders) + ctx := context.Background() - account, err := resolver.Account(ctx, &nonExistentSC) - require.NoError(t, err) // Dataloader returns nil, not error for missing data + account, err := resolver.Account(ctx, &emptySC) + require.Error(t, err) assert.Nil(t, account) + assert.Contains(t, err.Error(), "state change has no account_id") }) } diff --git a/internal/serve/graphql/resolvers/test_utils.go b/internal/serve/graphql/resolvers/test_utils.go index b4357a49..f6e6b70e 100644 --- a/internal/serve/graphql/resolvers/test_utils.go +++ b/internal/serve/graphql/resolvers/test_utils.go @@ -65,6 +65,7 @@ var ( func setupDB(ctx context.Context, t *testing.T, dbConnectionPool db.ConnectionPool) { testLedger := int32(1000) + now := time.Now() parentAccount := &types.Account{StellarAddress: types.AddressBytea(sharedTestAccountAddress)} txns := make([]*types.Transaction, 0, 4) ops := make([]*types.Operation, 0, 8) @@ -78,7 +79,7 @@ func setupDB(ctx context.Context, t *testing.T, dbConnectionPool db.ConnectionPo ResultCode: "TransactionResultCodeTxSuccess", MetaXDR: ptr(fmt.Sprintf("meta%d", i+1)), LedgerNumber: 1, - LedgerCreatedAt: time.Now(), + LedgerCreatedAt: now, IsFeeBump: false, } txns = append(txns, txn) @@ -92,7 +93,7 @@ func setupDB(ctx context.Context, t *testing.T, dbConnectionPool db.ConnectionPo ResultCode: "op_success", Successful: true, LedgerNumber: 1, - LedgerCreatedAt: time.Now(), + LedgerCreatedAt: now, }) opIdx++ } @@ -121,7 +122,7 @@ func setupDB(ctx context.Context, t *testing.T, dbConnectionPool db.ConnectionPo StateChangeReason: reason, OperationID: op.ID, AccountID: parentAccount.StellarAddress, - LedgerCreatedAt: time.Now(), + LedgerCreatedAt: now, LedgerNumber: 1, }) } @@ -135,7 +136,7 @@ func setupDB(ctx context.Context, t *testing.T, dbConnectionPool db.ConnectionPo StateChangeCategory: types.StateChangeCategoryBalance, StateChangeReason: &debitReason, AccountID: parentAccount.StellarAddress, - LedgerCreatedAt: time.Now(), + LedgerCreatedAt: now, LedgerNumber: 1000, }) } diff --git a/internal/serve/graphql/resolvers/transaction.resolvers.go b/internal/serve/graphql/resolvers/transaction.resolvers.go index eeb7dec2..8542283b 100644 --- a/internal/serve/graphql/resolvers/transaction.resolvers.go +++ b/internal/serve/graphql/resolvers/transaction.resolvers.go @@ -25,7 +25,7 @@ func (r *transactionResolver) Hash(ctx context.Context, obj *types.Transaction) // It's called when a GraphQL query requests the operations within a transaction func (r *transactionResolver) Operations(ctx context.Context, obj *types.Transaction, first *int32, after *string, last *int32, before *string) (*graphql1.OperationConnection, error) { dbColumns := GetDBColumnsForFields(ctx, types.Operation{}) - params, err := parsePaginationParams(first, after, last, before, false) + params, err := parsePaginationParams(first, after, last, before, CursorTypeInt64) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } @@ -46,7 +46,7 @@ func (r *transactionResolver) Operations(ctx context.Context, obj *types.Transac } conn := NewConnectionWithRelayPagination(operations, params, func(o *types.OperationWithCursor) int64 { - return o.Cursor + return o.Cursor.ID }) edges := make([]*graphql1.OperationEdge, len(conn.Edges)) @@ -89,7 +89,7 @@ func (r *transactionResolver) Accounts(ctx context.Context, obj *types.Transacti // It's called when a GraphQL query requests the state changes within a transaction func (r *transactionResolver) StateChanges(ctx context.Context, obj *types.Transaction, first *int32, after *string, last *int32, before *string) (*graphql1.StateChangeConnection, error) { dbColumns := GetDBColumnsForFields(ctx, types.StateChange{}) - params, err := parsePaginationParams(first, after, last, before, true) + params, err := parsePaginationParams(first, after, last, before, CursorTypeStateChange) if err != nil { return nil, fmt.Errorf("parsing pagination params: %w", err) } @@ -111,7 +111,7 @@ func (r *transactionResolver) StateChanges(ctx context.Context, obj *types.Trans convertedStateChanges := convertStateChangeToBaseStateChange(stateChanges) conn := NewConnectionWithRelayPagination(convertedStateChanges, params, func(sc *baseStateChangeWithCursor) string { - return fmt.Sprintf("%d:%d:%d", sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) + return fmt.Sprintf("%d:%d:%d:%d", sc.cursor.LedgerCreatedAt.UnixNano(), sc.cursor.ToID, sc.cursor.OperationID, sc.cursor.StateChangeOrder) }) edges := make([]*graphql1.StateChangeEdge, len(conn.Edges)) diff --git a/internal/serve/graphql/resolvers/transaction_resolvers_test.go b/internal/serve/graphql/resolvers/transaction_resolvers_test.go index 1624bd77..0d271834 100644 --- a/internal/serve/graphql/resolvers/transaction_resolvers_test.go +++ b/internal/serve/graphql/resolvers/transaction_resolvers_test.go @@ -106,8 +106,8 @@ func TestTransactionResolver_Operations(t *testing.T) { assert.False(t, ops.PageInfo.HasNextPage) assert.True(t, ops.PageInfo.HasPreviousPage) - // Get the previous page using cursor - prevCursor := ops.PageInfo.EndCursor + // Get the previous page using cursor (use StartCursor per Relay spec) + prevCursor := ops.PageInfo.StartCursor assert.NotNil(t, prevCursor) ops, err = resolver.Operations(ctx, parentTx, nil, nil, &last, prevCursor) require.NoError(t, err) diff --git a/internal/serve/graphql/resolvers/utils.go b/internal/serve/graphql/resolvers/utils.go index c7b1d407..2402a3ea 100644 --- a/internal/serve/graphql/resolvers/utils.go +++ b/internal/serve/graphql/resolvers/utils.go @@ -7,6 +7,7 @@ import ( "reflect" "strconv" "strings" + "time" "github.com/99designs/gqlgen/graphql" @@ -31,9 +32,22 @@ type GenericConnection[T any] struct { PageInfo *generated.PageInfo } +// CursorType determines how pagination cursors are parsed and interpreted. +type CursorType int + +const ( + // CursorTypeInt64 is used for within-transaction nested resolvers (e.g., operations by ToID) + CursorTypeInt64 CursorType = iota + // CursorTypeComposite is used for account-level and root tx/ops queries (ledger_created_at:id format) + CursorTypeComposite + // CursorTypeStateChange is used for state change queries (ledger_created_at:to_id:op_id:sc_order format) + CursorTypeStateChange +) + type PaginationParams struct { Limit *int32 Cursor *int64 + CompositeCursor *types.CompositeCursor StateChangeCursor *types.StateChangeCursor ForwardPagination bool SortOrder data.SortOrder @@ -49,19 +63,21 @@ func NewConnectionWithRelayPagination[T any, C int64 | string](nodes []T, params hasNextPage := false hasPreviousPage := false + hasCursor := params.Cursor != nil || params.CompositeCursor != nil || params.StateChangeCursor != nil + if params.ForwardPagination { if int32(len(nodes)) > *params.Limit { hasNextPage = true nodes = nodes[:*params.Limit] } - hasPreviousPage = (params.Cursor != nil || params.StateChangeCursor != nil) + hasPreviousPage = hasCursor } else { if int32(len(nodes)) > *params.Limit { hasPreviousPage = true nodes = nodes[1:] } // In backward pagination, presence of a before-cursor implies there may be newer items (a "next page") - hasNextPage = (params.Cursor != nil || params.StateChangeCursor != nil) + hasNextPage = hasCursor } edges := make([]*GenericEdge[T], len(nodes)) @@ -75,11 +91,7 @@ func NewConnectionWithRelayPagination[T any, C int64 | string](nodes []T, params var startCursor, endCursor *string if len(edges) > 0 { startCursor = &edges[0].Cursor - if params.ForwardPagination { - endCursor = &edges[len(edges)-1].Cursor - } else { - endCursor = &edges[0].Cursor - } + endCursor = &edges[len(edges)-1].Cursor } pageInfo := &generated.PageInfo{ @@ -273,7 +285,7 @@ func getColumnMap(model any) map[string]string { return fieldToColumnMap } -func parsePaginationParams(first *int32, after *string, last *int32, before *string, isStateChange bool) (PaginationParams, error) { +func parsePaginationParams(first *int32, after *string, last *int32, before *string, cursorType CursorType) (PaginationParams, error) { err := validatePaginationParams(first, after, last, before) if err != nil { return PaginationParams{}, fmt.Errorf("validating pagination params: %w", err) @@ -299,13 +311,20 @@ func parsePaginationParams(first *int32, after *string, last *int32, before *str ForwardPagination: forwardPagination, } - if isStateChange { + switch cursorType { + case CursorTypeStateChange: stateChangeCursor, err := parseStateChangeCursor(cursor) if err != nil { return PaginationParams{}, fmt.Errorf("parsing state change cursor: %w", err) } paginationParams.StateChangeCursor = stateChangeCursor - } else { + case CursorTypeComposite: + compositeCursor, err := parseCompositeCursor(cursor) + if err != nil { + return PaginationParams{}, fmt.Errorf("parsing composite cursor: %w", err) + } + paginationParams.CompositeCursor = compositeCursor + default: decodedCursor, err := decodeInt64Cursor(cursor) if err != nil { return PaginationParams{}, fmt.Errorf("decoding cursor: %w", err) @@ -327,32 +346,82 @@ func parseStateChangeCursor(s *string) (*types.StateChangeCursor, error) { } parts := strings.Split(*decodedCursor, ":") - if len(parts) != 3 { - return nil, fmt.Errorf("invalid cursor format: %s (expected format: to_id:operation_id:state_change_order)", *s) + if len(parts) != 4 { + return nil, fmt.Errorf("invalid cursor format: %s (expected format: ledger_created_at_nano:to_id:operation_id:state_change_order)", *s) } - toID, err := strconv.ParseInt(parts[0], 10, 64) + nanos, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing ledger_created_at: %w", err) + } + + toID, err := strconv.ParseInt(parts[1], 10, 64) if err != nil { return nil, fmt.Errorf("parsing to_id: %w", err) } - operationID, err := strconv.ParseInt(parts[1], 10, 64) + operationID, err := strconv.ParseInt(parts[2], 10, 64) if err != nil { return nil, fmt.Errorf("parsing operation_id: %w", err) } - stateChangeOrder, err := strconv.ParseInt(parts[2], 10, 64) + stateChangeOrder, err := strconv.ParseInt(parts[3], 10, 64) if err != nil { return nil, fmt.Errorf("parsing state_change_order: %w", err) } return &types.StateChangeCursor{ + LedgerCreatedAt: time.Unix(0, nanos), ToID: toID, OperationID: operationID, StateChangeOrder: stateChangeOrder, }, nil } +func parseCompositeCursor(s *string) (*types.CompositeCursor, error) { + if s == nil { + return nil, nil + } + + decodedCursor, err := decodeStringCursor(s) + if err != nil { + return nil, fmt.Errorf("decoding cursor: %w", err) + } + + parts := strings.Split(*decodedCursor, ":") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid cursor format: %s (expected format: ledger_created_at_nano:id)", *s) + } + + nanos, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing ledger_created_at: %w", err) + } + + id, err := strconv.ParseInt(parts[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("parsing id: %w", err) + } + + return &types.CompositeCursor{ + LedgerCreatedAt: time.Unix(0, nanos), + ID: id, + }, nil +} + +// buildTimeRange constructs a *data.TimeRange from optional since/until params. +// Returns an error if both are provided and until is before since. +// Returns nil if both are nil (no time range filtering). +func buildTimeRange(since *time.Time, until *time.Time) (*data.TimeRange, error) { + if since == nil && until == nil { + return nil, nil + } + if since != nil && until != nil && until.Before(*since) { + return nil, fmt.Errorf("until must not be before since") + } + return &data.TimeRange{Since: since, Until: until}, nil +} + func validatePaginationParams(first *int32, after *string, last *int32, before *string) error { if first != nil && last != nil { return fmt.Errorf("first and last cannot be used together") diff --git a/internal/serve/graphql/schema/account.graphqls b/internal/serve/graphql/schema/account.graphqls index cc2eba2a..818eba2d 100644 --- a/internal/serve/graphql/schema/account.graphqls +++ b/internal/serve/graphql/schema/account.graphqls @@ -5,18 +5,22 @@ type Account{ # GraphQL Relationships - these fields use resolvers for data fetching # Each relationship resolver will be called when the field is requested - + # All transactions associated with this account - transactions(first: Int, after: String, last: Int, before: String): TransactionConnection - + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at + transactions(since: Time, until: Time, first: Int, after: String, last: Int, before: String): TransactionConnection + # All operations associated with this account - operations(first: Int, after: String, last: Int, before: String): OperationConnection - + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at + operations(since: Time, until: Time, first: Int, after: String, last: Int, before: String): OperationConnection + # All state changes associated with this account # Uses resolver to fetch related state changes # Optional filter parameter allows filtering by transaction hash and/or operation ID + # Optional since/until params enable TimescaleDB chunk pruning on ledger_created_at stateChanges( filter: AccountStateChangeFilterInput + since: Time, until: Time first: Int, after: String, last: Int, before: String ): StateChangeConnection } diff --git a/pkg/wbclient/client.go b/pkg/wbclient/client.go index 2b2d1423..53197a5d 100644 --- a/pkg/wbclient/client.go +++ b/pkg/wbclient/client.go @@ -449,7 +449,7 @@ func (c *Client) GetStateChanges(ctx context.Context, first, last *int32, after, return data.StateChanges, nil } -func (c *Client) GetAccountTransactions(ctx context.Context, address string, first, last *int32, after, before *string, opts ...*QueryOptions) (*types.TransactionConnection, error) { +func (c *Client) GetAccountTransactions(ctx context.Context, address string, since, until *time.Time, first, last *int32, after, before *string, opts ...*QueryOptions) (*types.TransactionConnection, error) { var fields []string if len(opts) > 0 && opts[0] != nil { fields = opts[0].TransactionFields @@ -464,6 +464,12 @@ func (c *Client) GetAccountTransactions(ctx context.Context, address string, fir map[string]interface{}{"address": address}, paginationVars, ) + if since != nil { + variables["since"] = *since + } + if until != nil { + variables["until"] = *until + } data, err := executeGraphQL[AccountTransactionsData](c, ctx, buildAccountTransactionsQuery(fields), variables) if err != nil { @@ -473,7 +479,7 @@ func (c *Client) GetAccountTransactions(ctx context.Context, address string, fir return data.AccountByAddress.Transactions, nil } -func (c *Client) GetAccountOperations(ctx context.Context, address string, first, last *int32, after, before *string, opts ...*QueryOptions) (*types.OperationConnection, error) { +func (c *Client) GetAccountOperations(ctx context.Context, address string, since, until *time.Time, first, last *int32, after, before *string, opts ...*QueryOptions) (*types.OperationConnection, error) { var fields []string if len(opts) > 0 && opts[0] != nil { fields = opts[0].OperationFields @@ -488,6 +494,12 @@ func (c *Client) GetAccountOperations(ctx context.Context, address string, first map[string]interface{}{"address": address}, paginationVars, ) + if since != nil { + variables["since"] = *since + } + if until != nil { + variables["until"] = *until + } data, err := executeGraphQL[AccountOperationsData](c, ctx, buildAccountOperationsQuery(fields), variables) if err != nil { @@ -497,7 +509,7 @@ func (c *Client) GetAccountOperations(ctx context.Context, address string, first return data.AccountByAddress.Operations, nil } -func (c *Client) GetAccountStateChanges(ctx context.Context, address string, transactionHash *string, operationID *int64, category *string, reason *string, first, last *int32, after, before *string) (*types.StateChangeConnection, error) { +func (c *Client) GetAccountStateChanges(ctx context.Context, address string, transactionHash *string, operationID *int64, category *string, reason *string, since, until *time.Time, first, last *int32, after, before *string) (*types.StateChangeConnection, error) { paginationVars, err := buildPaginationVars(first, last, after, before) if err != nil { return nil, fmt.Errorf("building pagination variables: %w", err) @@ -525,6 +537,13 @@ func (c *Client) GetAccountStateChanges(ctx context.Context, address string, tra variables["filter"] = filter } + if since != nil { + variables["since"] = *since + } + if until != nil { + variables["until"] = *until + } + variables = mergeVariables(variables, paginationVars) data, err := executeGraphQL[AccountStateChangesData](c, ctx, buildAccountStateChangesQuery(), variables) diff --git a/pkg/wbclient/queries.go b/pkg/wbclient/queries.go index fd54e942..652f15b9 100644 --- a/pkg/wbclient/queries.go +++ b/pkg/wbclient/queries.go @@ -45,9 +45,7 @@ const ( ingestedAt ledgerCreatedAt ledgerNumber - account { - address - } + ... on StandardBalanceChange { standardBalanceTokenId: tokenId amount @@ -224,9 +222,9 @@ func buildStateChangesQuery() string { func buildAccountTransactionsQuery(fields []string) string { fieldList := buildFieldList(fields, defaultTransactionFields) return fmt.Sprintf(` - query AccountTransactions($address: String!, $first: Int, $after: String, $last: Int, $before: String) { + query AccountTransactions($address: String!, $since: Time, $until: Time, $first: Int, $after: String, $last: Int, $before: String) { accountByAddress(address: $address) { - transactions(first: $first, after: $after, last: $last, before: $before) { + transactions(since: $since, until: $until, first: $first, after: $after, last: $last, before: $before) { edges { node { %s @@ -249,9 +247,9 @@ func buildAccountTransactionsQuery(fields []string) string { func buildAccountOperationsQuery(fields []string) string { fieldList := buildFieldList(fields, defaultOperationFields) return fmt.Sprintf(` - query AccountOperations($address: String!, $first: Int, $after: String, $last: Int, $before: String) { + query AccountOperations($address: String!, $since: Time, $until: Time, $first: Int, $after: String, $last: Int, $before: String) { accountByAddress(address: $address) { - operations(first: $first, after: $after, last: $last, before: $before) { + operations(since: $since, until: $until, first: $first, after: $after, last: $last, before: $before) { edges { node { %s @@ -274,9 +272,9 @@ func buildAccountOperationsQuery(fields []string) string { // Supports optional filtering by transaction hash and/or operation ID func buildAccountStateChangesQuery() string { return fmt.Sprintf(` - query AccountStateChanges($address: String!, $filter: AccountStateChangeFilterInput, $first: Int, $after: String, $last: Int, $before: String) { + query AccountStateChanges($address: String!, $filter: AccountStateChangeFilterInput, $since: Time, $until: Time, $first: Int, $after: String, $last: Int, $before: String) { accountByAddress(address: $address) { - stateChanges(filter: $filter, first: $first, after: $after, last: $last, before: $before) { + stateChanges(filter: $filter, since: $since, until: $until, first: $first, after: $after, last: $last, before: $before) { edges { node { %s diff --git a/pkg/wbclient/types/statechange.go b/pkg/wbclient/types/statechange.go index e07923d8..94e414d7 100644 --- a/pkg/wbclient/types/statechange.go +++ b/pkg/wbclient/types/statechange.go @@ -15,7 +15,6 @@ type StateChangeNode interface { GetIngestedAt() time.Time GetLedgerCreatedAt() time.Time GetLedgerNumber() uint32 - GetAccountID() string } // BaseStateChangeFields contains the common fields shared by all state change types @@ -25,7 +24,6 @@ type BaseStateChangeFields struct { IngestedAt time.Time `json:"ingestedAt"` LedgerCreatedAt time.Time `json:"ledgerCreatedAt"` LedgerNumber uint32 `json:"ledgerNumber"` - Account Account `json:"account"` } // GetType returns the state change category @@ -53,11 +51,6 @@ func (b BaseStateChangeFields) GetLedgerNumber() uint32 { return b.LedgerNumber } -// GetAccountID returns the account address -func (b BaseStateChangeFields) GetAccountID() string { - return b.Account.Address -} - // StandardBalanceChange represents a standard balance state change type StandardBalanceChange struct { BaseStateChangeFields