Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@
[submodule "lib/FlowALP"]
path = lib/FlowALP
url = git@github.com:onflow/FlowALP.git
branch = v0
38 changes: 31 additions & 7 deletions cadence/contracts/FlowYieldVaultsAutoBalancers.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ access(all) contract FlowYieldVaultsAutoBalancers {
/// The path prefix used for StoragePath & PublicPath derivations
access(all) let pathPrefix: String

/// Storage path for the shared execution callback that reports to the registry (one per account)
access(self) let registryReportCallbackStoragePath: StoragePath

/// Callback resource invoked by each AutoBalancer after execution; calls Registry.reportExecutionFromCallback with its id
access(all) resource RegistryReportCallback: DeFiActions.AutoBalancerExecutionCallback {
access(all) fun onExecuted(balancerUUID: UInt64) {
FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID: balancerUUID)
}
}

/* --- PUBLIC METHODS --- */

/// Returns the path (StoragePath or PublicPath) at which an AutoBalancer is stored with the associated
Expand All @@ -55,7 +65,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
if autoBalancer == nil {
return false
}

let txnIDs = autoBalancer!.getScheduledTransactionIDs()
for txnID in txnIDs {
if autoBalancer!.borrowScheduledTransaction(id: txnID)?.status() == FlowTransactionScheduler.Status.Scheduled {
Expand All @@ -79,24 +89,24 @@ access(all) contract FlowYieldVaultsAutoBalancers {
if autoBalancer == nil {
return false
}

// Check if yield vault has recurring config (should be executing periodically)
let config = autoBalancer!.getRecurringConfig()
if config == nil {
return false // Not configured for recurring, can't be "stuck"
}

// Check if there's an active schedule
if self.hasActiveSchedule(id: id) {
return false // Has active schedule, not stuck
}

// Check if yield vault is overdue
let nextExpected = autoBalancer!.calculateNextExecutionTimestampAsConfigured()
if nextExpected == nil {
return true // Can't calculate next time, likely stuck
}

// If next expected time has passed and no active schedule, yield vault is stuck
return nextExpected! < getCurrentBlock().timestamp
}
Expand Down Expand Up @@ -136,6 +146,8 @@ access(all) contract FlowYieldVaultsAutoBalancers {
assert(!publishedCap,
message: "Published Capability collision found when publishing AutoBalancer for UniqueIdentifier.id \(uniqueID.id) at path \(publicPath)")

let reportCap = self.account.capabilities.storage.issue<&{DeFiActions.AutoBalancerExecutionCallback}>(self.registryReportCallbackStoragePath)

// create & save AutoBalancer with optional recurring config
let autoBalancer <- DeFiActions.createAutoBalancer(
oracle: oracle,
Expand All @@ -147,6 +159,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
recurringConfig: recurringConfig,
uniqueID: uniqueID
)
autoBalancer.setExecutionCallback(reportCap)
self.account.storage.save(<-autoBalancer, to: storagePath)
let autoBalancerRef = self._borrowAutoBalancer(uniqueID.id)

Expand Down Expand Up @@ -210,7 +223,7 @@ access(all) contract FlowYieldVaultsAutoBalancers {
let publicPath = self.deriveAutoBalancerPath(id: id, storage: false) as! PublicPath
// unpublish the public AutoBalancer Capability
let _ = self.account.capabilities.unpublish(publicPath)

// Collect controller IDs first (can't modify during iteration)
var controllersToDelete: [UInt64] = []
self.account.capabilities.storage.forEachController(forPath: storagePath, fun(_ controller: &StorageCapabilityController): Bool {
Expand All @@ -223,13 +236,24 @@ access(all) contract FlowYieldVaultsAutoBalancers {
controller.delete()
}
}

// load & burn the AutoBalancer (this also handles any pending scheduled transactions via burnCallback)
let autoBalancer <-self.account.storage.load<@DeFiActions.AutoBalancer>(from: storagePath)
Burner.burn(<-autoBalancer)
}

access(self) fun createRegistryReportCallbackImpl(): @RegistryReportCallback {
return <-create RegistryReportCallback()
}

init() {
self.pathPrefix = "FlowYieldVaultsAutoBalancer_"
self.registryReportCallbackStoragePath = StoragePath(identifier: "FlowYieldVaultsRegistryReportCallback")!

// Ensure shared execution callback exists (reports this account's executions to Registry)
if self.account.storage.type(at: self.registryReportCallbackStoragePath) == nil {
self.account.storage.save(<-self.createRegistryReportCallbackImpl(), to: self.registryReportCallbackStoragePath)
}

}
}
60 changes: 50 additions & 10 deletions cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,11 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
/// Stored as a dictionary for O(1) add/remove; iteration gives the pending set
access(self) var pendingQueue: {UInt64: Bool}

/// Order for stuck scanning: least recently reported (executed) first.
/// Vaults call reportExecution() on each run (remove id from array, append to end).
/// Supervisor scans only the first MAX_BATCH_SIZE entries for stuck detection.
access(self) var stuckScanOrder: [UInt64]

/* --- ACCOUNT-LEVEL FUNCTIONS --- */

/// Register a YieldVault and store its handler and schedule capabilities (idempotent)
Expand All @@ -73,9 +78,27 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
self.yieldVaultRegistry[yieldVaultID] = true
self.handlerCaps[yieldVaultID] = handlerCap
self.scheduleCaps[yieldVaultID] = scheduleCap
self.stuckScanOrder.append(yieldVaultID)
emit YieldVaultRegistered(yieldVaultID: yieldVaultID)
}

/// Called by the account that holds this contract (e.g. from the wrapper) on every execution. Removes yieldVaultID from stuckScanOrder (if present)
/// and appends it to the end so the Supervisor only scans the first N (least recently executed) for stuck.
access(account) fun reportExecution(yieldVaultID: UInt64) {
if !(self.yieldVaultRegistry[yieldVaultID] ?? false) {
return
}
var i = 0
while i < self.stuckScanOrder.length {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use firstIndex(of: T): Int? to find the element here
https://cadence-lang.org/docs/language/values-and-types/arrays#array-fields-and-functions

if self.stuckScanOrder[i] == yieldVaultID {
self.stuckScanOrder.remove(at: i)
break
}
i = i + 1
}
self.stuckScanOrder.append(yieldVaultID)
}

/// Adds a yield vault to the pending queue for seeding by the Supervisor
access(account) fun enqueuePending(yieldVaultID: UInt64) {
if self.yieldVaultRegistry[yieldVaultID] == true {
Expand All @@ -92,12 +115,20 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
}
}

/// Unregister a YieldVault (idempotent) - removes from registry, capabilities, and pending queue
/// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and stuckScanOrder
access(account) fun unregister(yieldVaultID: UInt64) {
self.yieldVaultRegistry.remove(key: yieldVaultID)
self.handlerCaps.remove(key: yieldVaultID)
self.scheduleCaps.remove(key: yieldVaultID)
let pending = self.pendingQueue.remove(key: yieldVaultID)
var i = 0
while i < self.stuckScanOrder.length {
if self.stuckScanOrder[i] == yieldVaultID {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here

self.stuckScanOrder.remove(at: i)
break
}
i = i + 1
}
emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil)
}

Expand Down Expand Up @@ -156,19 +187,19 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
/// Get paginated pending yield vault IDs
/// @param page: The page number (0-indexed)
/// @param size: The page size (defaults to MAX_BATCH_SIZE if nil)
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: Int?): [UInt64] {
let pageSize = size ?? self.MAX_BATCH_SIZE
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt?): [UInt64] {
let pageSize = size ?? Int(self.MAX_BATCH_SIZE)
Comment on lines +190 to +191

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt?): [UInt64] {
let pageSize = size ?? Int(self.MAX_BATCH_SIZE)
access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt): [UInt64] {
let pageSize = size == 0 ? Int(self.MAX_BATCH_SIZE) : Int(size)

main no longer replaces 0 with nil, so the behaviour of defaulting to MAX_BATCH_SIZE on 0 should either be moved here (and its other occurrence updated, and the doc comment above) or added back to main

let allPending = self.pendingQueue.keys
let startIndex = page * pageSize
let startIndex = page * Int(pageSize)

if startIndex >= allPending.length {
return []
}
let endIndex = startIndex + pageSize > allPending.length
? allPending.length
: startIndex + pageSize

let endIndex = startIndex + Int(pageSize) > allPending.length
? allPending.length
: startIndex + Int(pageSize)

return allPending.slice(from: startIndex, upTo: endIndex)
}

Expand All @@ -177,6 +208,14 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
return self.pendingQueue.length
}

/// Returns the first n yield vault IDs from the stuck-scan order (least recently executed first).
/// Supervisor should only scan these for stuck detection instead of all registered vaults.
/// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE)
access(all) view fun getStuckScanCandidates(limit: UInt): [UInt64] {
let end = limit > UInt(self.stuckScanOrder.length) ? self.stuckScanOrder.length : limit
return self.stuckScanOrder.slice(from: 0, upTo: Int(end))
}

/// Get global Supervisor capability, if set
/// NOTE: Access restricted - only used internally by the scheduler
access(account)
Expand All @@ -193,6 +232,7 @@ access(all) contract FlowYieldVaultsSchedulerRegistry {
self.handlerCaps = {}
self.scheduleCaps = {}
self.pendingQueue = {}
self.stuckScanOrder = []
}
}

Expand Down
24 changes: 4 additions & 20 deletions cadence/contracts/FlowYieldVaultsSchedulerV1.cdc
Original file line number Diff line number Diff line change
Expand Up @@ -186,24 +186,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// STEP 1: State-based detection - scan for stuck yield vaults
if scanForStuck {
// TODO: add pagination - this will inevitably fails and at minimum creates inconsistent execution
// effort between runs
let registeredYieldVaults = FlowYieldVaultsSchedulerRegistry.getRegisteredYieldVaultIDs()
var scanned = 0
for yieldVaultID in registeredYieldVaults {
if scanned >= FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE {
break
}
scanned = scanned + 1

// Skip if already in pending queue
// TODO: This is extremely inefficient - accessing from mapping is preferrable to iterating over
// an array
if FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs().contains(yieldVaultID) {
continue
}

// Check if yield vault is stuck (has recurring config, no active schedule, overdue)
let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates(limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE))
for yieldVaultID in candidates {
if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) {
FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID)
emit StuckYieldVaultDetected(yieldVaultID: yieldVaultID)
Expand All @@ -213,7 +197,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// STEP 2: Process pending yield vaults - recover them via Schedule capability
let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil)

for yieldVaultID in pendingYieldVaults {
// Get Schedule capability for this yield vault
let scheduleCap = FlowYieldVaultsSchedulerRegistry.getScheduleCap(yieldVaultID: yieldVaultID)
Expand Down Expand Up @@ -457,7 +441,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 {

// Initialize paths
self.SupervisorStoragePath = /storage/FlowYieldVaultsSupervisor

// Configure Supervisor at deploy time
self.ensureSupervisorConfigured()
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@ import "FlowYieldVaultsSchedulerRegistry"
/// Returns a paginated list of yield vault IDs in the pending queue.
/// @param page: The page number (0-indexed)
/// @param size: The number of yield vaults per page (defaults to MAX_BATCH_SIZE if 0)
access(all) fun main(page: Int, size: Int): [UInt64] {
let pageSize: Int? = size > 0 ? size : nil
return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: pageSize)
access(all) fun main(page: Int, size: UInt): [UInt64] {
return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: size)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Either update getPendingYieldVaultIDsPaginated or add back the default check here

}

Loading
Loading