From e1ffc9f71c88e0c9bf1f8cfca120310c91c6da76 Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Wed, 25 Feb 2026 21:14:46 +0100 Subject: [PATCH 1/6] Fix supervisor unstuck vaults added autobalancer callback to find potentially stuck vaults --- .../FlowYieldVaultsAutoBalancers.cdc | 38 +++++- .../FlowYieldVaultsSchedulerRegistry.cdc | 60 +++++++-- .../contracts/FlowYieldVaultsSchedulerV1.cdc | 24 +--- cadence/tests/scheduled_supervisor_test.cdc | 127 ++++++++++++++++++ 4 files changed, 212 insertions(+), 37 deletions(-) diff --git a/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc b/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc index 8748dd8e..b46da441 100644 --- a/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc +++ b/cadence/contracts/FlowYieldVaultsAutoBalancers.cdc @@ -29,6 +29,16 @@ access(all) contract FlowYieldVaultsAutoBalancers { /// The path prefix used for StoragePath & PublicPath derivations access(all) let pathPrefix: String + /// Storage path for the shared execution callback that reports to the registry (one per account) + access(self) let registryReportCallbackStoragePath: StoragePath + + /// Callback resource invoked by each AutoBalancer after execution; calls Registry.reportExecutionFromCallback with its id + access(all) resource RegistryReportCallback: DeFiActions.AutoBalancerExecutionCallback { + access(all) fun onExecuted(balancerUUID: UInt64) { + FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID: balancerUUID) + } + } + /* --- PUBLIC METHODS --- */ /// Returns the path (StoragePath or PublicPath) at which an AutoBalancer is stored with the associated @@ -69,7 +79,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { if autoBalancer == nil { return false } - + let txnIDs = autoBalancer!.getScheduledTransactionIDs() for txnID in txnIDs { if autoBalancer!.borrowScheduledTransaction(id: txnID)?.status() == FlowTransactionScheduler.Status.Scheduled { @@ -93,24 +103,24 @@ access(all) contract FlowYieldVaultsAutoBalancers { if autoBalancer == nil { return false } - + // Check if yield vault has recurring config (should be executing periodically) let config = autoBalancer!.getRecurringConfig() if config == nil { return false // Not configured for recurring, can't be "stuck" } - + // Check if there's an active schedule if self.hasActiveSchedule(id: id) { return false // Has active schedule, not stuck } - + // Check if yield vault is overdue let nextExpected = autoBalancer!.calculateNextExecutionTimestampAsConfigured() if nextExpected == nil { return true // Can't calculate next time, likely stuck } - + // If next expected time has passed and no active schedule, yield vault is stuck return nextExpected! < getCurrentBlock().timestamp } @@ -150,6 +160,8 @@ access(all) contract FlowYieldVaultsAutoBalancers { assert(!publishedCap, message: "Published Capability collision found when publishing AutoBalancer for UniqueIdentifier.id \(uniqueID.id) at path \(publicPath)") + let reportCap = self.account.capabilities.storage.issue<&{DeFiActions.AutoBalancerExecutionCallback}>(self.registryReportCallbackStoragePath) + // create & save AutoBalancer with optional recurring config let autoBalancer <- DeFiActions.createAutoBalancer( oracle: oracle, @@ -161,6 +173,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { recurringConfig: recurringConfig, uniqueID: uniqueID ) + autoBalancer.setExecutionCallback(reportCap) self.account.storage.save(<-autoBalancer, to: storagePath) let autoBalancerRef = self._borrowAutoBalancer(uniqueID.id) @@ -224,7 +237,7 @@ access(all) contract FlowYieldVaultsAutoBalancers { let publicPath = self.deriveAutoBalancerPath(id: id, storage: false) as! PublicPath // unpublish the public AutoBalancer Capability let _ = self.account.capabilities.unpublish(publicPath) - + // Collect controller IDs first (can't modify during iteration) var controllersToDelete: [UInt64] = [] self.account.capabilities.storage.forEachController(forPath: storagePath, fun(_ controller: &StorageCapabilityController): Bool { @@ -237,13 +250,24 @@ access(all) contract FlowYieldVaultsAutoBalancers { controller.delete() } } - + // load & burn the AutoBalancer (this also handles any pending scheduled transactions via burnCallback) let autoBalancer <-self.account.storage.load<@DeFiActions.AutoBalancer>(from: storagePath) Burner.burn(<-autoBalancer) } + access(self) fun createRegistryReportCallbackImpl(): @RegistryReportCallback { + return <-create RegistryReportCallback() + } + init() { self.pathPrefix = "FlowYieldVaultsAutoBalancer_" + self.registryReportCallbackStoragePath = StoragePath(identifier: "FlowYieldVaultsRegistryReportCallback")! + + // Ensure shared execution callback exists (reports this account's executions to Registry) + if self.account.storage.type(at: self.registryReportCallbackStoragePath) == nil { + self.account.storage.save(<-self.createRegistryReportCallbackImpl(), to: self.registryReportCallbackStoragePath) + } + } } diff --git a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc index 645d7e0a..d0f275b6 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc @@ -58,6 +58,11 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Stored as a dictionary for O(1) add/remove; iteration gives the pending set access(self) var pendingQueue: {UInt64: Bool} + /// Order for stuck scanning: least recently reported (executed) first. + /// Vaults call reportExecution() on each run (remove id from array, append to end). + /// Supervisor scans only the first MAX_BATCH_SIZE entries for stuck detection. + access(self) var stuckScanOrder: [UInt64] + /* --- ACCOUNT-LEVEL FUNCTIONS --- */ /// Register a YieldVault and store its handler and schedule capabilities (idempotent) @@ -73,9 +78,27 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.yieldVaultRegistry[yieldVaultID] = true self.handlerCaps[yieldVaultID] = handlerCap self.scheduleCaps[yieldVaultID] = scheduleCap + self.stuckScanOrder.append(yieldVaultID) emit YieldVaultRegistered(yieldVaultID: yieldVaultID) } + /// Called by the account that holds this contract (e.g. from the wrapper) on every execution. Removes yieldVaultID from stuckScanOrder (if present) + /// and appends it to the end so the Supervisor only scans the first N (least recently executed) for stuck. + access(account) fun reportExecution(yieldVaultID: UInt64) { + if !(self.yieldVaultRegistry[yieldVaultID] ?? false) { + return + } + var i = 0 + while i < self.stuckScanOrder.length { + if self.stuckScanOrder[i] == yieldVaultID { + self.stuckScanOrder.remove(at: i) + break + } + i = i + 1 + } + self.stuckScanOrder.append(yieldVaultID) + } + /// Adds a yield vault to the pending queue for seeding by the Supervisor access(account) fun enqueuePending(yieldVaultID: UInt64) { if self.yieldVaultRegistry[yieldVaultID] == true { @@ -92,12 +115,20 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { } } - /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, and pending queue + /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and stuckScanOrder access(account) fun unregister(yieldVaultID: UInt64) { self.yieldVaultRegistry.remove(key: yieldVaultID) self.handlerCaps.remove(key: yieldVaultID) self.scheduleCaps.remove(key: yieldVaultID) let pending = self.pendingQueue.remove(key: yieldVaultID) + var i = 0 + while i < self.stuckScanOrder.length { + if self.stuckScanOrder[i] == yieldVaultID { + self.stuckScanOrder.remove(at: i) + break + } + i = i + 1 + } emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil) } @@ -156,19 +187,19 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Get paginated pending yield vault IDs /// @param page: The page number (0-indexed) /// @param size: The page size (defaults to MAX_BATCH_SIZE if nil) - access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: Int?): [UInt64] { - let pageSize = size ?? self.MAX_BATCH_SIZE + access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt?): [UInt64] { + let pageSize = size ?? Int(self.MAX_BATCH_SIZE) let allPending = self.pendingQueue.keys - let startIndex = page * pageSize - + let startIndex = page * Int(pageSize) + if startIndex >= allPending.length { return [] } - - let endIndex = startIndex + pageSize > allPending.length - ? allPending.length - : startIndex + pageSize - + + let endIndex = startIndex + Int(pageSize) > allPending.length + ? allPending.length + : startIndex + Int(pageSize) + return allPending.slice(from: startIndex, upTo: endIndex) } @@ -177,6 +208,14 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { return self.pendingQueue.length } + /// Returns the first n yield vault IDs from the stuck-scan order (least recently executed first). + /// Supervisor should only scan these for stuck detection instead of all registered vaults. + /// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE) + access(all) view fun getStuckScanCandidates(limit: UInt): [UInt64] { + let end = limit > UInt(self.stuckScanOrder.length) ? self.stuckScanOrder.length : limit + return self.stuckScanOrder.slice(from: 0, upTo: Int(end)) + } + /// Get global Supervisor capability, if set /// NOTE: Access restricted - only used internally by the scheduler access(account) @@ -193,6 +232,7 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.handlerCaps = {} self.scheduleCaps = {} self.pendingQueue = {} + self.stuckScanOrder = [] } } diff --git a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc index be81a875..4af44109 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc @@ -186,24 +186,8 @@ access(all) contract FlowYieldVaultsSchedulerV1 { // STEP 1: State-based detection - scan for stuck yield vaults if scanForStuck { - // TODO: add pagination - this will inevitably fails and at minimum creates inconsistent execution - // effort between runs - let registeredYieldVaults = FlowYieldVaultsSchedulerRegistry.getRegisteredYieldVaultIDs() - var scanned = 0 - for yieldVaultID in registeredYieldVaults { - if scanned >= FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE { - break - } - scanned = scanned + 1 - - // Skip if already in pending queue - // TODO: This is extremely inefficient - accessing from mapping is preferrable to iterating over - // an array - if FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs().contains(yieldVaultID) { - continue - } - - // Check if yield vault is stuck (has recurring config, no active schedule, overdue) + let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates(limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) + for yieldVaultID in candidates { if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) { FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID) emit StuckYieldVaultDetected(yieldVaultID: yieldVaultID) @@ -213,7 +197,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 { // STEP 2: Process pending yield vaults - recover them via Schedule capability let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil) - + for yieldVaultID in pendingYieldVaults { // Get Schedule capability for this yield vault let scheduleCap = FlowYieldVaultsSchedulerRegistry.getScheduleCap(yieldVaultID: yieldVaultID) @@ -457,7 +441,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 { // Initialize paths self.SupervisorStoragePath = /storage/FlowYieldVaultsSupervisor - + // Configure Supervisor at deploy time self.ensureSupervisorConfigured() } diff --git a/cadence/tests/scheduled_supervisor_test.cdc b/cadence/tests/scheduled_supervisor_test.cdc index eada6f15..dff90dc6 100644 --- a/cadence/tests/scheduled_supervisor_test.cdc +++ b/cadence/tests/scheduled_supervisor_test.cdc @@ -913,3 +913,130 @@ fun testInsufficientFundsAndRecovery() { log("- All ".concat(activeScheduleCount.toString()).concat(" yield vaults have active schedules")) log("========================================") } + +/// Supervisor batch recovery: 200 stuck vaults, no capacity-probe loop. +/// +/// Flow: create 200 yield vaults, run 2 scheduling rounds, drain FLOW so executions fail, +/// wait for vaults to be marked stuck, refund FLOW, schedule the supervisor, then advance +/// time for ceil(200/MAX_BATCH_SIZE)+10 supervisor ticks. Asserts all 200 vaults are +/// recovered (YieldVaultRecovered events), none still stuck, and all have active schedules. +/// The +10 extra ticks are a buffer so every vault is processed despite scheduler timing. +access(all) +fun testSupervisorHandlesManyStuckVaults() { + let n = 200 + let maxBatchSize = FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE + + if snapshot != getCurrentBlockHeight() { + Test.reset(to: snapshot) + } + + // 1. Setup: user, FLOW, and grant + let user = Test.createAccount() + mintFlow(to: user, amount: 100000.0) + grantBeta(flowYieldVaultsAccount, user) + mintFlow(to: flowYieldVaultsAccount, amount: 10000.0) + + // 2. Create n yield vaults in batch (Test.executeTransactions) + var i = 0 + let tx = Test.Transaction( + code: Test.readFile("../transactions/flow-yield-vaults/create_yield_vault.cdc"), + authorizers: [user.address], + signers: [user], + arguments: [strategyIdentifier, flowTokenIdentifier, 5.0] + ) + let txs: [Test.Transaction] = [] + while i < n { + txs.append(tx) + i = i + 1 + } + let results = Test.executeTransactions(txs) + for result in results { + Test.expect(result, Test.beSucceeded()) + } + log("testSupervisorHandlesManyStuckVaults: created \(n.toString()) yield vaults") + + let yieldVaultIDs = getYieldVaultIDs(address: user.address)! + Test.assert(yieldVaultIDs.length == n, message: "expected \(n.toString()) vaults, got \(yieldVaultIDs.length.toString())") + + // 3. Two scheduling rounds so vaults run once + setMockOraclePrice(signer: flowYieldVaultsAccount, forTokenIdentifier: flowTokenIdentifier, price: 1.5) + setMockOraclePrice(signer: flowYieldVaultsAccount, forTokenIdentifier: yieldTokenIdentifier, price: 1.2) + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + + // 4. Drain FLOW so subsequent executions fail and vaults become stuck + let balanceBeforeDrain = (executeScript( + "../scripts/flow-yield-vaults/get_flow_balance.cdc", + [flowYieldVaultsAccount.address] + ).returnValue! as! UFix64) + if balanceBeforeDrain > 0.01 { + let drainRes = _executeTransaction( + "../transactions/flow-yield-vaults/drain_flow.cdc", + [balanceBeforeDrain - 0.001], + flowYieldVaultsAccount + ) + Test.expect(drainRes, Test.beSucceeded()) + } + log("testSupervisorHandlesManyStuckVaults: drained FLOW, waiting for vaults to be marked stuck") + + // 5. Wait rounds until vaults are marked stuck + var waitRound = 0 + while waitRound < 6 { + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + waitRound = waitRound + 1 + } + + // 6. Refund FLOW and schedule supervisor + mintFlow(to: flowYieldVaultsAccount, amount: 500.0) + Test.commitBlock() + Test.moveTime(by: 1.0) + Test.commitBlock() + + let interval = 60.0 * 10.0 + let schedSupRes = _executeTransaction( + "../transactions/flow-yield-vaults/admin/schedule_supervisor.cdc", + [interval, UInt8(1), UInt64(5000), true], + flowYieldVaultsAccount + ) + Test.expect(schedSupRes, Test.beSucceeded()) + + // 7. Advance time for supervisor ticks (ceil(n/MAX_BATCH_SIZE)+10); each tick processes a batch + let supervisorRunsNeeded = (UInt(n) + UInt(maxBatchSize) - 1) / UInt(maxBatchSize) + var run = 0 as UInt + while run < supervisorRunsNeeded + 10 { + Test.moveTime(by: 60.0 * 10.0 + 10.0) + Test.commitBlock() + run = run + 1 + } + log("testSupervisorHandlesManyStuckVaults: ran \(supervisorRunsNeeded + 10).toString()) supervisor ticks") + + let recoveredEvents = Test.eventsOfType(Type()) + Test.assert(recoveredEvents.length >= n, message: "expected at least \(n.toString()) recovered, got \(recoveredEvents.length.toString())") + log("testSupervisorHandlesManyStuckVaults: recovered \(recoveredEvents.length.toString()) vaults") + + // 8. Health check: none stuck, all have active schedules + var stillStuck = 0 + var activeCount = 0 + for yieldVaultID in yieldVaultIDs { + let isStuckRes = executeScript( + "../scripts/flow-yield-vaults/is_stuck_yield_vault.cdc", + [yieldVaultID] + ) + if isStuckRes.returnValue != nil && (isStuckRes.returnValue! as! Bool) { + stillStuck = stillStuck + 1 + } + let hasActiveRes = executeScript( + "../scripts/flow-yield-vaults/has_active_schedule.cdc", + [yieldVaultID] + ) + if hasActiveRes.returnValue != nil && (hasActiveRes.returnValue! as! Bool) { + activeCount = activeCount + 1 + } + } + Test.assert(stillStuck == 0, message: "expected 0 stuck, got \(stillStuck.toString())") + Test.assert(activeCount == n, message: "expected \(n.toString()) active, got \(activeCount.toString())") + log("testSupervisorHandlesManyStuckVaults: all \(n.toString()) vaults healthy, active schedules: \(activeCount.toString())") +} \ No newline at end of file From 05148e960c2b361f48f8079b820034813eed1223 Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Tue, 3 Mar 2026 20:23:27 +0100 Subject: [PATCH 2/6] update submodule --- .gitmodules | 1 + .../get_pending_yield_vaults_paginated.cdc | 5 ++--- cadence/tests/scheduled_supervisor_test.cdc | 7 +++++-- lib/FlowALP | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/.gitmodules b/.gitmodules index 2a6d948d..ae805f4f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,3 +22,4 @@ [submodule "lib/FlowALP"] path = lib/FlowALP url = git@github.com:onflow/FlowALP.git + branch = nialexsan/pre-refactor diff --git a/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc b/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc index d9be5b50..c84abc43 100644 --- a/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc +++ b/cadence/scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc @@ -3,8 +3,7 @@ import "FlowYieldVaultsSchedulerRegistry" /// Returns a paginated list of yield vault IDs in the pending queue. /// @param page: The page number (0-indexed) /// @param size: The number of yield vaults per page (defaults to MAX_BATCH_SIZE if 0) -access(all) fun main(page: Int, size: Int): [UInt64] { - let pageSize: Int? = size > 0 ? size : nil - return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: pageSize) +access(all) fun main(page: Int, size: UInt): [UInt64] { + return FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: page, size: size) } diff --git a/cadence/tests/scheduled_supervisor_test.cdc b/cadence/tests/scheduled_supervisor_test.cdc index dff90dc6..9c6b98d0 100644 --- a/cadence/tests/scheduled_supervisor_test.cdc +++ b/cadence/tests/scheduled_supervisor_test.cdc @@ -279,7 +279,9 @@ fun testMultiYieldVaultIndependentExecution() { /// access(all) fun testPaginationStress() { - Test.reset(to: snapshot) + if snapshot != getCurrentBlockHeight() { + Test.reset(to: snapshot) + } // Calculate number of yield vaults: 3 * MAX_BATCH_SIZE + partial batch // MAX_BATCH_SIZE is 5 in FlowYieldVaultsSchedulerRegistry let maxBatchSize = 5 @@ -333,7 +335,8 @@ fun testPaginationStress() { // Test paginated access - request each page up to MAX_BATCH_SIZE var page = 0 while page <= fullBatches { - let pageRes = executeScript("../scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc", [page, maxBatchSize]) + let pageRes = executeScript("../scripts/flow-yield-vaults/get_pending_yield_vaults_paginated.cdc", [page, UInt(maxBatchSize)]) + Test.expect(pageRes, Test.beSucceeded()) let pageData = pageRes.returnValue! as! [UInt64] log("Page ".concat(page.toString()).concat(" of pending queue: ").concat(pageData.length.toString()).concat(" yield vaults")) page = page + 1 diff --git a/lib/FlowALP b/lib/FlowALP index ee6fb772..169fdef4 160000 --- a/lib/FlowALP +++ b/lib/FlowALP @@ -1 +1 @@ -Subproject commit ee6fb772aa24ab9867e0e28bacd8e9e1a0f1fe58 +Subproject commit 169fdef40a1d68a3fa6e2120861dcb8ce8b9f1db From 53cad2999c0039525513b4976011c0c528c6aac8 Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Wed, 4 Mar 2026 18:54:03 +0100 Subject: [PATCH 3/6] update submodule branch to v0 --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index ae805f4f..b12ebf1b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -22,4 +22,4 @@ [submodule "lib/FlowALP"] path = lib/FlowALP url = git@github.com:onflow/FlowALP.git - branch = nialexsan/pre-refactor + branch = v0 From c21d5b4433f4077a2d395e480086d9ee9fdc0699 Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Mon, 9 Mar 2026 18:11:23 +0100 Subject: [PATCH 4/6] address pr comments --- .../FlowYieldVaultsSchedulerRegistry.cdc | 26 ++++------ .../contracts/FlowYieldVaultsSchedulerV1.cdc | 2 +- .../contracts/FlowYieldVaultsStrategiesV2.cdc | 4 +- docs/SCHEDULED_REBALANCING_GUIDE.md | 13 ++--- docs/rebalancing_architecture.md | 48 +++++++++++-------- 5 files changed, 47 insertions(+), 46 deletions(-) diff --git a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc index d0f275b6..35c5a3de 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc @@ -88,13 +88,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { if !(self.yieldVaultRegistry[yieldVaultID] ?? false) { return } - var i = 0 - while i < self.stuckScanOrder.length { - if self.stuckScanOrder[i] == yieldVaultID { - self.stuckScanOrder.remove(at: i) - break - } - i = i + 1 + if let index = self.stuckScanOrder.firstIndex(of: yieldVaultID) { + let _ = self.stuckScanOrder.remove(at: index) } self.stuckScanOrder.append(yieldVaultID) } @@ -121,13 +116,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.handlerCaps.remove(key: yieldVaultID) self.scheduleCaps.remove(key: yieldVaultID) let pending = self.pendingQueue.remove(key: yieldVaultID) - var i = 0 - while i < self.stuckScanOrder.length { - if self.stuckScanOrder[i] == yieldVaultID { - self.stuckScanOrder.remove(at: i) - break - } - i = i + 1 + if let index = self.stuckScanOrder.firstIndex(of: yieldVaultID) { + let _ = self.stuckScanOrder.remove(at: index) } emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil) } @@ -186,11 +176,11 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Get paginated pending yield vault IDs /// @param page: The page number (0-indexed) - /// @param size: The page size (defaults to MAX_BATCH_SIZE if nil) - access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt?): [UInt64] { - let pageSize = size ?? Int(self.MAX_BATCH_SIZE) + /// @param size: The page size (defaults to MAX_BATCH_SIZE if 0) + access(all) view fun getPendingYieldVaultIDsPaginated(page: Int, size: UInt): [UInt64] { + let pageSize = size == 0 ? self.MAX_BATCH_SIZE : Int(size) let allPending = self.pendingQueue.keys - let startIndex = page * Int(pageSize) + let startIndex = page * pageSize if startIndex >= allPending.length { return [] diff --git a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc index 4af44109..a1a3c0e6 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerV1.cdc @@ -196,7 +196,7 @@ access(all) contract FlowYieldVaultsSchedulerV1 { } // STEP 2: Process pending yield vaults - recover them via Schedule capability - let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil) + let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) for yieldVaultID in pendingYieldVaults { // Get Schedule capability for this yield vault diff --git a/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc b/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc index 7ee81669..99b6ea34 100644 --- a/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc +++ b/cadence/contracts/FlowYieldVaultsStrategiesV2.cdc @@ -857,7 +857,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 { } access(all) view fun getSupportedComposers(): {Type: Bool} { - return { + return { Type<@MorphoERC4626StrategyComposer>(): true } } @@ -994,7 +994,7 @@ access(all) contract FlowYieldVaultsStrategiesV2 { fun _createRecurringConfig(withID: DeFiActions.UniqueIdentifier?): DeFiActions.AutoBalancerRecurringConfig { // Create txnFunder that can provide/accept FLOW for scheduling fees let txnFunder = self._createTxnFunder(withID: withID) - + return DeFiActions.AutoBalancerRecurringConfig( interval: 60 * 10, // Rebalance every 10 minutes priority: FlowTransactionScheduler.Priority.Medium, diff --git a/docs/SCHEDULED_REBALANCING_GUIDE.md b/docs/SCHEDULED_REBALANCING_GUIDE.md index 824f176e..61519057 100644 --- a/docs/SCHEDULED_REBALANCING_GUIDE.md +++ b/docs/SCHEDULED_REBALANCING_GUIDE.md @@ -161,10 +161,11 @@ flow scripts execute cadence/scripts/flow-yield-vaults/get_pending_count.cdc ### What It Does -The Supervisor handles yield vaults that failed to self-schedule: -- Processes bounded `pendingQueue` (MAX 50 yield vaults per run) -- Schedules failed yield vaults -- Self-reschedules if more work remains +The Supervisor handles two recovery scenarios per run: +1. **Stuck detection**: Scans up to `MAX_BATCH_SIZE` vault candidates using `getStuckScanCandidates()`, which returns vaults ordered least-recently-executed first (LRU). Stuck vaults are enqueued in `pendingQueue`. +2. **Pending processing**: Seeds vaults from `pendingQueue` (up to `MAX_BATCH_SIZE` per run). Self-reschedules if more work remains. + +Each AutoBalancer reports back to the registry after every execution via `RegistryReportCallback`, which calls `reportExecution()` to move the vault to the end of the stuck-scan order. This ensures the Supervisor always prioritises the longest-idle vaults. ### When It's Needed @@ -311,5 +312,5 @@ A: No, one schedule per yield vault. Cancel to reschedule. --- -**Last Updated**: November 26, 2025 -**Version**: 2.0.0 +**Last Updated**: March 9, 2026 +**Version**: 2.1.0 diff --git a/docs/rebalancing_architecture.md b/docs/rebalancing_architecture.md index a0f5c9e8..bab0ef91 100644 --- a/docs/rebalancing_architecture.md +++ b/docs/rebalancing_architecture.md @@ -45,6 +45,7 @@ - `yieldVaultRegistry`: registered yield vault IDs - `handlerCaps`: direct capabilities to AutoBalancers (no wrapper) - `pendingQueue`: yield vaults needing (re)seeding (bounded by MAX_BATCH_SIZE=50) + - `stuckScanOrder`: LRU-ordered list of vault IDs for stuck detection; vaults call `reportExecution()` on each run to move themselves to the end, so the Supervisor always scans the longest-idle vaults first - `supervisorCap`: capability for Supervisor self-scheduling - **FlowYieldVaultsScheduler** provides: - `registerYieldVault()`: atomic registration + initial scheduling @@ -154,32 +155,41 @@ fun executeTransaction(id: UInt64, data: AnyStruct?) { ### Supervisor Recovery (Bounded) -The Supervisor handles failed schedules via a bounded pending queue: +The Supervisor runs two steps per execution: + +**Step 1 – Stuck detection** (when `scanForStuck == true`): +Fetches up to `MAX_BATCH_SIZE` candidates from `getStuckScanCandidates(limit:)`, which returns the LRU head of `stuckScanOrder`. Vaults that are stuck (recurring config set, no active schedule, overdue) are enqueued into `pendingQueue`. + +**Step 2 – Pending processing**: +Seeds vaults from `pendingQueue` (up to `MAX_BATCH_SIZE` per run via `getPendingYieldVaultIDsPaginated(page: 0, size: nil)`). ```cadence access(FlowTransactionScheduler.Execute) fun executeTransaction(id: UInt64, data: AnyStruct?) { - // Process only pending yield vaults (MAX 50 per run) - let pendingYieldVaultIDs = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDs() - - for yieldVaultID in pendingYieldVaults { - if manager.hasScheduled(yieldVaultID: yieldVaultID) { - FlowYieldVaultsSchedulerRegistry.dequeuePending(yieldVaultID: yieldVaultID) - continue + // STEP 1: scan least-recently-executed vaults for stuck detection + let candidates = FlowYieldVaultsSchedulerRegistry.getStuckScanCandidates( + limit: UInt(FlowYieldVaultsSchedulerRegistry.MAX_BATCH_SIZE)) + for yieldVaultID in candidates { + if FlowYieldVaultsAutoBalancers.isStuckYieldVault(id: yieldVaultID) { + FlowYieldVaultsSchedulerRegistry.enqueuePending(yieldVaultID: yieldVaultID) } - - // Schedule and dequeue - let handlerCap = FlowYieldVaultsSchedulerRegistry.getHandlerCap(yieldVaultID: yieldVaultID) - // ... estimate fees, schedule, dequeue ... } - - // Self-reschedule if more pending work + + // STEP 2: process pending queue (MAX_BATCH_SIZE per run) + let pendingYieldVaults = FlowYieldVaultsSchedulerRegistry.getPendingYieldVaultIDsPaginated(page: 0, size: nil) + for yieldVaultID in pendingYieldVaults { + // ... estimate fees, schedule via scheduleCap, dequeue ... + } + + // Self-reschedule if more pending work remains if FlowYieldVaultsSchedulerRegistry.getPendingCount() > 0 { // Schedule next Supervisor run - } - } + } +} ``` +Each AutoBalancer sets a `RegistryReportCallback` at creation time. On every execution it calls `FlowYieldVaultsSchedulerRegistry.reportExecution(yieldVaultID:)`, which moves the vault to the tail of `stuckScanOrder`. + --- ## 4. Behavior in Different Price Scenarios @@ -233,7 +243,7 @@ fun executeTransaction(id: UInt64, data: AnyStruct?) { | AutoBalancer | Manages Yield exposure, executes rebalance | | FlowALP Position | Manages collateral/debt health | | FlowYieldVaultsScheduler | Registration, atomic initial scheduling | -| FlowYieldVaultsSchedulerRegistry | Stores registry, pending queue | -| Supervisor | Recovery for failed schedules (bounded) | +| FlowYieldVaultsSchedulerRegistry | Stores registry, pending queue, stuck-scan order | +| Supervisor | Stuck detection (LRU scan) + pending queue recovery (bounded) | -**Last Updated**: November 26, 2025 +**Last Updated**: March 9, 2026 From 3878bc4b2a2420e02b370746567c6cf1a9fdef13 Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Tue, 10 Mar 2026 05:32:51 +0100 Subject: [PATCH 5/6] update submodule --- lib/FlowALP | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/FlowALP b/lib/FlowALP index 169fdef4..ee6fb772 160000 --- a/lib/FlowALP +++ b/lib/FlowALP @@ -1 +1 @@ -Subproject commit 169fdef40a1d68a3fa6e2120861dcb8ce8b9f1db +Subproject commit ee6fb772aa24ab9867e0e28bacd8e9e1a0f1fe58 From c7fa0e62aec26ee93d4fcf957f1486835121f9ed Mon Sep 17 00:00:00 2001 From: Patrick Fuchs Date: Tue, 10 Mar 2026 05:52:56 +0100 Subject: [PATCH 6/6] use linked list for stuck-scan --- .../FlowYieldVaultsSchedulerRegistry.cdc | 127 ++++++++++++++---- 1 file changed, 102 insertions(+), 25 deletions(-) diff --git a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc index 35c5a3de..5cdb5df3 100644 --- a/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc +++ b/cadence/contracts/FlowYieldVaultsSchedulerRegistry.cdc @@ -13,6 +13,27 @@ import "DeFiActions" /// access(all) contract FlowYieldVaultsSchedulerRegistry { + /* --- TYPES --- */ + + /// Node in the simulated doubly-linked list used for O(1) stuck-scan ordering. + /// `prev` points toward the head (most recently executed); `next` points toward the tail (oldest/least recently executed). + access(all) struct ListNode { + access(all) var prev: UInt64? + access(all) var next: UInt64? + init(prev: UInt64?, next: UInt64?) { + self.prev = prev + self.next = next + } + + access(all) fun setPrev(prev: UInt64?) { + self.prev = prev + } + + access(all) fun setNext(next: UInt64?) { + self.next = next + } + } + /* --- EVENTS --- */ /// Emitted when a yield vault is registered with its handler capability @@ -58,10 +79,54 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { /// Stored as a dictionary for O(1) add/remove; iteration gives the pending set access(self) var pendingQueue: {UInt64: Bool} - /// Order for stuck scanning: least recently reported (executed) first. - /// Vaults call reportExecution() on each run (remove id from array, append to end). - /// Supervisor scans only the first MAX_BATCH_SIZE entries for stuck detection. - access(self) var stuckScanOrder: [UInt64] + /// Simulated doubly-linked list for O(1) stuck-scan ordering. + /// listHead = most recently executed vault ID (or nil if empty). + /// listTail = least recently executed vault ID — getStuckScanCandidates walks from here. + /// On reportExecution a vault is snipped from its current position and moved to head in O(1). + access(self) var listNodes: {UInt64: ListNode} + access(self) var listHead: UInt64? + access(self) var listTail: UInt64? + + /* --- PRIVATE LIST HELPERS --- */ + + /// Insert `id` at the head of the list (most-recently-executed end). + /// Caller must ensure `id` is not already in the list. + access(self) fun _listInsertAtHead(id: UInt64) { + let node = ListNode(prev: nil, next: self.listHead) + if let oldHeadID = self.listHead { + var oldHead = self.listNodes[oldHeadID]! + oldHead.setPrev(prev: id) + self.listNodes[oldHeadID] = oldHead + } else { + // List was empty — id is also the tail + self.listTail = id + } + self.listNodes[id] = node + self.listHead = id + } + + /// Remove `id` from wherever it sits in the list in O(1). + access(self) fun _listRemove(id: UInt64) { + let node = self.listNodes.remove(key: id) ?? panic("Node not found") + + if let prevID = node.prev { + var prevNode = self.listNodes[prevID]! + prevNode.setNext(next: node.next) + self.listNodes[prevID] = prevNode + } else { + // id was the head + self.listHead = node.next + } + + if let nextID = node.next { + var nextNode = self.listNodes[nextID]! + nextNode.setPrev(prev: node.prev) + self.listNodes[nextID] = nextNode + } else { + // id was the tail + self.listTail = node.prev + } + } /* --- ACCOUNT-LEVEL FUNCTIONS --- */ @@ -78,20 +143,23 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.yieldVaultRegistry[yieldVaultID] = true self.handlerCaps[yieldVaultID] = handlerCap self.scheduleCaps[yieldVaultID] = scheduleCap - self.stuckScanOrder.append(yieldVaultID) + // New vaults go to the head; they haven't executed yet but are freshly registered. + // If already in the list (idempotent re-register), remove first to avoid duplicates. + if self.listNodes[yieldVaultID] != nil { + self._listRemove(id: yieldVaultID) + } + self._listInsertAtHead(id: yieldVaultID) emit YieldVaultRegistered(yieldVaultID: yieldVaultID) } - /// Called by the account that holds this contract (e.g. from the wrapper) on every execution. Removes yieldVaultID from stuckScanOrder (if present) - /// and appends it to the end so the Supervisor only scans the first N (least recently executed) for stuck. + /// Called on every execution. Moves yieldVaultID to the head (most recently executed) + /// so the Supervisor scans from the tail (least recently executed) for stuck detection — O(1). access(account) fun reportExecution(yieldVaultID: UInt64) { if !(self.yieldVaultRegistry[yieldVaultID] ?? false) { return } - if let index = self.stuckScanOrder.firstIndex(of: yieldVaultID) { - let _ = self.stuckScanOrder.remove(at: index) - } - self.stuckScanOrder.append(yieldVaultID) + self._listRemove(id: yieldVaultID) + self._listInsertAtHead(id: yieldVaultID) } /// Adds a yield vault to the pending queue for seeding by the Supervisor @@ -110,15 +178,13 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { } } - /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and stuckScanOrder + /// Unregister a YieldVault (idempotent) - removes from registry, capabilities, pending queue, and linked list access(account) fun unregister(yieldVaultID: UInt64) { - self.yieldVaultRegistry.remove(key: yieldVaultID) - self.handlerCaps.remove(key: yieldVaultID) - self.scheduleCaps.remove(key: yieldVaultID) + let _r = self.yieldVaultRegistry.remove(key: yieldVaultID) + let _h = self.handlerCaps.remove(key: yieldVaultID) + let _s = self.scheduleCaps.remove(key: yieldVaultID) let pending = self.pendingQueue.remove(key: yieldVaultID) - if let index = self.stuckScanOrder.firstIndex(of: yieldVaultID) { - let _ = self.stuckScanOrder.remove(at: index) - } + self._listRemove(id: yieldVaultID) emit YieldVaultUnregistered(yieldVaultID: yieldVaultID, wasInPendingQueue: pending != nil) } @@ -198,12 +264,23 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { return self.pendingQueue.length } - /// Returns the first n yield vault IDs from the stuck-scan order (least recently executed first). + /// Returns up to `limit` vault IDs starting from the tail (least recently executed). /// Supervisor should only scan these for stuck detection instead of all registered vaults. /// @param limit: Maximum number of IDs to return (caller typically passes MAX_BATCH_SIZE) - access(all) view fun getStuckScanCandidates(limit: UInt): [UInt64] { - let end = limit > UInt(self.stuckScanOrder.length) ? self.stuckScanOrder.length : limit - return self.stuckScanOrder.slice(from: 0, upTo: Int(end)) + access(all) fun getStuckScanCandidates(limit: UInt): [UInt64] { + var result: [UInt64] = [] + var current = self.listTail + var count: UInt = 0 + while count < limit { + if let id = current { + result.append(id) + current = self.listNodes[id]?.prev + count = count + 1 + } else { + break + } + } + return result } /// Get global Supervisor capability, if set @@ -222,8 +299,8 @@ access(all) contract FlowYieldVaultsSchedulerRegistry { self.handlerCaps = {} self.scheduleCaps = {} self.pendingQueue = {} - self.stuckScanOrder = [] + self.listNodes = {} + self.listHead = nil + self.listTail = nil } } - -