diff --git a/docs/CompilerUpgrade0833.md b/docs/archive/CompilerUpgrade0833.md similarity index 100% rename from docs/CompilerUpgrade0833.md rename to docs/archive/CompilerUpgrade0833.md diff --git a/packages/contracts-test/tests/unit/disputes/poi.test.ts b/packages/contracts-test/tests/unit/disputes/poi.test.ts index b465f5986..b391dd0d4 100644 --- a/packages/contracts-test/tests/unit/disputes/poi.test.ts +++ b/packages/contracts-test/tests/unit/disputes/poi.test.ts @@ -1,4 +1,4 @@ -import { DisputeManager } from '@graphprotocol/contracts' +import { DisputeManager, IRewardsManager } from '@graphprotocol/contracts' import { EpochManager } from '@graphprotocol/contracts' import { GraphToken } from '@graphprotocol/contracts' import { IStaking } from '@graphprotocol/contracts' @@ -30,6 +30,7 @@ describe('DisputeManager:POI', () => { let epochManager: EpochManager let grt: GraphToken let staking: IStaking + let rewardsManager: IRewardsManager // Derive some channel keys for each indexer used to sign attestations const indexerChannelKey = deriveChannelKey() @@ -92,10 +93,15 @@ describe('DisputeManager:POI', () => { epochManager = contracts.EpochManager as EpochManager grt = contracts.GraphToken as GraphToken staking = contracts.Staking as IStaking + rewardsManager = contracts.RewardsManager as IRewardsManager // Give some funds to the fisherman await grt.connect(governor).mint(fisherman.address, fishermanTokens) await grt.connect(fisherman).approve(disputeManager.address, fishermanTokens) + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/disputes/query.test.ts b/packages/contracts-test/tests/unit/disputes/query.test.ts index 73238b4e0..e411bd028 100644 --- a/packages/contracts-test/tests/unit/disputes/query.test.ts +++ b/packages/contracts-test/tests/unit/disputes/query.test.ts @@ -1,5 +1,5 @@ import { createAttestation, Receipt } from '@graphprotocol/common-ts' -import { DisputeManager } from '@graphprotocol/contracts' +import { DisputeManager, IRewardsManager } from '@graphprotocol/contracts' import { EpochManager } from '@graphprotocol/contracts' import { GraphToken } from '@graphprotocol/contracts' import { IStaking } from '@graphprotocol/contracts' @@ -35,6 +35,7 @@ describe('DisputeManager:Query', () => { let epochManager: EpochManager let grt: GraphToken let staking: IStaking + let rewardsManager: IRewardsManager // Derive some channel keys for each indexer used to sign attestations const indexer1ChannelKey = deriveChannelKey() @@ -121,6 +122,7 @@ describe('DisputeManager:Query', () => { epochManager = contracts.EpochManager as EpochManager grt = contracts.GraphToken as GraphToken staking = contracts.Staking as IStaking + rewardsManager = contracts.RewardsManager as IRewardsManager // Give some funds to the fisherman for (const dst of [fisherman, fisherman2]) { @@ -139,6 +141,10 @@ describe('DisputeManager:Query', () => { indexerAddress: indexer.address, receipt, } + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/l2/l2Curation.test.ts b/packages/contracts-test/tests/unit/l2/l2Curation.test.ts index 6ee8a5cd3..a680ec28c 100644 --- a/packages/contracts-test/tests/unit/l2/l2Curation.test.ts +++ b/packages/contracts-test/tests/unit/l2/l2Curation.test.ts @@ -154,7 +154,7 @@ describe('L2Curation', () => { let me: SignerWithAddress let governor: SignerWithAddress let curator: SignerWithAddress - let stakingMock: SignerWithAddress + let subgraphServiceMock: SignerWithAddress let gnsImpersonator: Signer let fixture: NetworkFixture @@ -310,8 +310,8 @@ describe('L2Curation', () => { const beforeTotalBalance = await grt.balanceOf(curation.address) // Source of tokens must be the staking for this to work - await grt.connect(stakingMock).transfer(curation.address, tokensToCollect) - const tx = curation.connect(stakingMock).collect(subgraphDeploymentID, tokensToCollect) + await grt.connect(subgraphServiceMock).transfer(curation.address, tokensToCollect) + const tx = curation.connect(subgraphServiceMock).collect(subgraphDeploymentID, tokensToCollect) await expect(tx).emit(curation, 'Collected').withArgs(subgraphDeploymentID, tokensToCollect) // After state @@ -325,7 +325,7 @@ describe('L2Curation', () => { before(async function () { // Use stakingMock so we can call collect - ;[me, curator, stakingMock] = await graph.getTestAccounts() + ;[me, curator, subgraphServiceMock] = await graph.getTestAccounts() ;({ governor } = await graph.getNamedAccounts()) fixture = new NetworkFixture(graph.provider) contracts = await fixture.load(governor, true) @@ -343,8 +343,11 @@ describe('L2Curation', () => { await grt.connect(gnsImpersonator).approve(curation.address, curatorTokens) // Give some funds to the staking contract and approve the curation contract - await grt.connect(governor).mint(stakingMock.address, tokensToCollect) - await grt.connect(stakingMock).approve(curation.address, tokensToCollect) + await grt.connect(governor).mint(subgraphServiceMock.address, tokensToCollect) + await grt.connect(subgraphServiceMock).approve(curation.address, tokensToCollect) + + // Set the subgraph service + await curation.connect(governor).setSubgraphService(subgraphServiceMock.address) }) beforeEach(async function () { @@ -514,10 +517,10 @@ describe('L2Curation', () => { context('> not curated', function () { it('reject collect tokens distributed to the curation pool', async function () { // Source of tokens must be the staking for this to work - await controller.connect(governor).setContractProxy(utils.id('Staking'), stakingMock.address) + await controller.connect(governor).setContractProxy(utils.id('Staking'), subgraphServiceMock.address) await curation.connect(governor).syncAllContracts() // call sync because we change the proxy for staking - const tx = curation.connect(stakingMock).collect(subgraphDeploymentID, tokensToCollect) + const tx = curation.connect(subgraphServiceMock).collect(subgraphDeploymentID, tokensToCollect) await expect(tx).revertedWith('Subgraph deployment must be curated to collect fees') }) }) @@ -529,11 +532,11 @@ describe('L2Curation', () => { it('reject collect tokens distributed from invalid address', async function () { const tx = curation.connect(me).collect(subgraphDeploymentID, tokensToCollect) - await expect(tx).revertedWith('Caller must be the subgraph service or staking contract') + await expect(tx).revertedWith('Caller must be the subgraph service') }) it('should collect tokens distributed to the curation pool', async function () { - await controller.connect(governor).setContractProxy(utils.id('Staking'), stakingMock.address) + await controller.connect(governor).setContractProxy(utils.id('Staking'), subgraphServiceMock.address) await curation.connect(governor).syncAllContracts() // call sync because we change the proxy for staking await shouldCollect(toGRT('1')) @@ -544,7 +547,7 @@ describe('L2Curation', () => { }) it('should collect tokens and then unsignal all', async function () { - await controller.connect(governor).setContractProxy(utils.id('Staking'), stakingMock.address) + await controller.connect(governor).setContractProxy(utils.id('Staking'), subgraphServiceMock.address) await curation.connect(governor).syncAllContracts() // call sync because we change the proxy for staking // Collect increase the pool reserves @@ -556,7 +559,7 @@ describe('L2Curation', () => { }) it('should collect tokens and then unsignal multiple times', async function () { - await controller.connect(governor).setContractProxy(utils.id('Staking'), stakingMock.address) + await controller.connect(governor).setContractProxy(utils.id('Staking'), subgraphServiceMock.address) await curation.connect(governor).syncAllContracts() // call sync because we change the proxy for staking // Collect increase the pool reserves diff --git a/packages/contracts-test/tests/unit/l2/l2GNS.test.ts b/packages/contracts-test/tests/unit/l2/l2GNS.test.ts index 5b8f1d028..0fd691939 100644 --- a/packages/contracts-test/tests/unit/l2/l2GNS.test.ts +++ b/packages/contracts-test/tests/unit/l2/l2GNS.test.ts @@ -2,12 +2,10 @@ import { L2GNS } from '@graphprotocol/contracts' import { L2GraphTokenGateway } from '@graphprotocol/contracts' import { L2Curation } from '@graphprotocol/contracts' import { GraphToken } from '@graphprotocol/contracts' -import { IL2Staking } from '@graphprotocol/contracts' import { L1GNS, L1GraphTokenGateway } from '@graphprotocol/contracts' import { buildSubgraph, buildSubgraphId, - deriveChannelKey, GraphNetworkContracts, helpers, PublishSubgraph, @@ -44,7 +42,6 @@ interface L1SubgraphParams { describe('L2GNS', () => { const graph = hre.graph() let me: SignerWithAddress - let attacker: SignerWithAddress let other: SignerWithAddress let governor: SignerWithAddress let fixture: NetworkFixture @@ -58,7 +55,6 @@ describe('L2GNS', () => { let gns: L2GNS let curation: L2Curation let grt: GraphToken - let staking: IL2Staking let newSubgraph0: PublishSubgraph let newSubgraph1: PublishSubgraph @@ -109,7 +105,7 @@ describe('L2GNS', () => { before(async function () { newSubgraph0 = buildSubgraph() - ;[me, attacker, other] = await graph.getTestAccounts() + ;[me, other] = await graph.getTestAccounts() ;({ governor } = await graph.getNamedAccounts()) fixture = new NetworkFixture(graph.provider) @@ -118,7 +114,6 @@ describe('L2GNS', () => { fixtureContracts = await fixture.load(governor, true) l2GraphTokenGateway = fixtureContracts.L2GraphTokenGateway as L2GraphTokenGateway gns = fixtureContracts.L2GNS as L2GNS - staking = fixtureContracts.L2Staking as unknown as IL2Staking curation = fixtureContracts.L2Curation as L2Curation grt = fixtureContracts.GraphToken as GraphToken @@ -354,61 +349,6 @@ describe('L2GNS', () => { .emit(gns, 'SignalMinted') .withArgs(l2SubgraphId, me.address, expectedNSignal, expectedSignal, curatedTokens) }) - it('protects the owner against a rounding attack', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() - const collectTokens = curatedTokens.mul(20) - - await staking.connect(governor).setCurationPercentage(100000) - - // Set up an indexer account with some stake - await grt.connect(governor).mint(attacker.address, toGRT('1000000')) - // Curate 1 wei GRT by minting 1 GRT and burning most of it - await grt.connect(attacker).approve(curation.address, toBN(1)) - await curation.connect(attacker).mint(newSubgraph0.subgraphDeploymentID, toBN(1), 0) - - // Check this actually gave us 1 wei signal - expect(await curation.getCurationPoolTokens(newSubgraph0.subgraphDeploymentID)).eq(1) - await grt.connect(attacker).approve(staking.address, toGRT('1000000')) - await staking.connect(attacker).stake(toGRT('100000')) - const channelKey = deriveChannelKey() - // Allocate to the same deployment ID - await staking - .connect(attacker) - .allocateFrom( - attacker.address, - newSubgraph0.subgraphDeploymentID, - toGRT('100000'), - channelKey.address, - randomHexBytes(32), - await channelKey.generateProof(attacker.address), - ) - // Spoof some query fees, 10% of which will go to the Curation pool - await staking.connect(attacker).collect(collectTokens, channelKey.address) - // The curation pool now has 1 wei shares and a lot of tokens, so the rounding attack is prepared - // But L2GNS will protect the owner by sending the tokens - const callhookData = defaultAbiCoder.encode(['uint8', 'uint256', 'address'], [toBN(0), l1SubgraphId, me.address]) - await gatewayFinalizeTransfer(l1GNSMock.address, gns.address, curatedTokens, callhookData) - - const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) - const tx = gns - .connect(me) - .finishSubgraphTransferFromL1( - l2SubgraphId, - newSubgraph0.subgraphDeploymentID, - subgraphMetadata, - versionMetadata, - ) - await expect(tx) - .emit(gns, 'SubgraphPublished') - .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) - await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l2SubgraphId, subgraphMetadata) - await expect(tx).emit(gns, 'CuratorBalanceReturnedToBeneficiary') - await expect(tx).emit(gns, 'SubgraphUpgraded').withArgs(l2SubgraphId, 0, 0, newSubgraph0.subgraphDeploymentID) - await expect(tx) - .emit(gns, 'SubgraphVersionUpdated') - .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) - await expect(tx).emit(gns, 'SubgraphL2TransferFinalized').withArgs(l2SubgraphId) - }) it('cannot be called by someone other than the subgraph owner', async function () { const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode(['uint8', 'uint256', 'address'], [toBN(0), l1SubgraphId, me.address]) @@ -654,50 +594,6 @@ describe('L2GNS', () => { expect(gnsBalanceAfter).eq(gnsBalanceBefore) }) - it('protects the curator against a rounding attack', async function () { - // Transfer a subgraph from L1 with only 1 wei GRT of curated signal - const { l1SubgraphId, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() - const curatedTokens = toBN('1') - await transferMockSubgraphFromL1(l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata) - // Prepare the rounding attack by setting up an indexer and collecting a lot of query fees - const curatorTokens = toGRT('10000') - const collectTokens = curatorTokens.mul(20) - await staking.connect(governor).setCurationPercentage(100000) - // Set up an indexer account with some stake - await grt.connect(governor).mint(attacker.address, toGRT('1000000')) - - await grt.connect(attacker).approve(staking.address, toGRT('1000000')) - await staking.connect(attacker).stake(toGRT('100000')) - const channelKey = deriveChannelKey() - // Allocate to the same deployment ID - await staking - .connect(attacker) - .allocateFrom( - attacker.address, - newSubgraph0.subgraphDeploymentID, - toGRT('100000'), - channelKey.address, - randomHexBytes(32), - await channelKey.generateProof(attacker.address), - ) - // Spoof some query fees, 10% of which will go to the Curation pool - await staking.connect(attacker).collect(collectTokens, channelKey.address) - - const callhookData = defaultAbiCoder.encode(['uint8', 'uint256', 'address'], [toBN(1), l1SubgraphId, me.address]) - const curatorTokensBefore = await grt.balanceOf(me.address) - const gnsBalanceBefore = await grt.balanceOf(gns.address) - const tx = gatewayFinalizeTransfer(l1GNSMock.address, gns.address, curatorTokens, callhookData) - await expect(tx) - .emit(gns, 'CuratorBalanceReturnedToBeneficiary') - .withArgs(l1SubgraphId, me.address, curatorTokens) - const curatorTokensAfter = await grt.balanceOf(me.address) - expect(curatorTokensAfter).eq(curatorTokensBefore.add(curatorTokens)) - const gnsBalanceAfter = await grt.balanceOf(gns.address) - // gatewayFinalizeTransfer will mint the tokens that are sent to the curator, - // so the GNS balance should be the same - expect(gnsBalanceAfter).eq(gnsBalanceBefore) - }) - it('if a subgraph was deprecated after transfer, it returns the tokens to the beneficiary', async function () { const l1GNSMockL2Alias = await helpers.getL2SignerFromL1(l1GNSMock.address) // Eth for gas: diff --git a/packages/contracts-test/tests/unit/l2/l2Staking.test.ts b/packages/contracts-test/tests/unit/l2/l2Staking.test.ts index 39dc75e7a..cf22eaba0 100644 --- a/packages/contracts-test/tests/unit/l2/l2Staking.test.ts +++ b/packages/contracts-test/tests/unit/l2/l2Staking.test.ts @@ -1,4 +1,4 @@ -import { IL2Staking } from '@graphprotocol/contracts' +import { IL2Staking, IRewardsManager } from '@graphprotocol/contracts' import { L2GraphTokenGateway } from '@graphprotocol/contracts' import { GraphToken } from '@graphprotocol/contracts' import { EpochManager, L1GNS, L1GraphTokenGateway, L1Staking } from '@graphprotocol/contracts' @@ -35,6 +35,7 @@ describe('L2Staking', () => { let l2GraphTokenGateway: L2GraphTokenGateway let staking: IL2Staking let grt: GraphToken + let rewardsManager: IRewardsManager const tokens10k = toGRT('10000') const tokens100k = toGRT('100000') @@ -88,6 +89,7 @@ describe('L2Staking', () => { l1StakingMock = l1MockContracts.L1Staking as L1Staking l1GNSMock = l1MockContracts.L1GNS as L1GNS l1GRTGatewayMock = l1MockContracts.L1GraphTokenGateway as L1GraphTokenGateway + rewardsManager = fixtureContracts.RewardsManager as IRewardsManager // Deploy L2 arbitrum bridge await fixture.loadL2ArbitrumBridge(governor) @@ -99,6 +101,10 @@ describe('L2Staking', () => { await grt.connect(me).approve(staking.address, tokens1m) await grt.connect(governor).mint(other.address, tokens1m) await grt.connect(other).approve(staking.address, tokens1m) + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/rewards/rewards-calculations.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-calculations.test.ts index e07717805..168166745 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-calculations.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-calculations.test.ts @@ -146,6 +146,9 @@ describe('Rewards - Calculations', () => { await grt.connect(wallet).approve(staking.address, toGRT('1000000')) await grt.connect(wallet).approve(curation.address, toGRT('1000000')) } + + // Set the staking contract as the subgraph service so it can call takeRewards + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/rewards/rewards-distribution.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-distribution.test.ts index d4a55c1b9..e34ace2fd 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-distribution.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-distribution.test.ts @@ -85,6 +85,9 @@ describe('Rewards - Distribution', () => { await grt.connect(wallet).approve(staking.address, toGRT('1000000')) await grt.connect(wallet).approve(curation.address, toGRT('1000000')) } + + // Set the staking contract as the subgraph service so it can call takeRewards + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts index ee60c3dd2..4db522378 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts @@ -97,6 +97,9 @@ describe('Rewards - Eligibility Oracle', () => { await grt.connect(wallet).approve(staking.address, toGRT('1000000')) await grt.connect(wallet).approve(curation.address, toGRT('1000000')) } + + // Set the staking contract as the subgraph service so it can call takeRewards + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { @@ -108,13 +111,13 @@ describe('Rewards - Eligibility Oracle', () => { }) describe('rewards eligibility oracle', function () { - it('should reject setRewardsEligibilityOracle if unauthorized', async function () { + it('should reject setProviderEligibilityOracle if unauthorized', async function () { const MockRewardsEligibilityOracleFactory = await hre.ethers.getContractFactory( 'contracts/tests/MockRewardsEligibilityOracle.sol:MockRewardsEligibilityOracle', ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) await mockOracle.deployed() - const tx = rewardsManager.connect(indexer1).setRewardsEligibilityOracle(mockOracle.address) + const tx = rewardsManager.connect(indexer1).setProviderEligibilityOracle(mockOracle.address) await expect(tx).revertedWith('Only Controller governor') }) @@ -125,12 +128,12 @@ describe('Rewards - Eligibility Oracle', () => { const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) await mockOracle.deployed() - const tx = rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + const tx = rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) await expect(tx) - .emit(rewardsManager, 'RewardsEligibilityOracleSet') + .emit(rewardsManager, 'ProviderEligibilityOracleSet') .withArgs(constants.AddressZero, mockOracle.address) - expect(await rewardsManager.getRewardsEligibilityOracle()).eq(mockOracle.address) + expect(await rewardsManager.getProviderEligibilityOracle()).eq(mockOracle.address) }) it('should allow setting rewards eligibility oracle to zero address', async function () { @@ -140,32 +143,32 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Then set to zero address to disable - const tx = rewardsManager.connect(governor).setRewardsEligibilityOracle(constants.AddressZero) + const tx = rewardsManager.connect(governor).setProviderEligibilityOracle(constants.AddressZero) await expect(tx) - .emit(rewardsManager, 'RewardsEligibilityOracleSet') + .emit(rewardsManager, 'ProviderEligibilityOracleSet') .withArgs(mockOracle.address, constants.AddressZero) - expect(await rewardsManager.getRewardsEligibilityOracle()).eq(constants.AddressZero) + expect(await rewardsManager.getProviderEligibilityOracle()).eq(constants.AddressZero) }) it('should reject setting oracle that does not support interface', async function () { // Try to set an EOA (externally owned account) as the rewards eligibility oracle - const tx = rewardsManager.connect(governor).setRewardsEligibilityOracle(indexer1.address) + const tx = rewardsManager.connect(governor).setProviderEligibilityOracle(indexer1.address) // EOA doesn't have code, so the call will revert (error message may vary by ethers version) await expect(tx).to.be.reverted }) - it('should reject setting oracle that does not support IRewardsEligibility interface', async function () { - // Deploy a contract that supports ERC165 but not IRewardsEligibility + it('should reject setting oracle that does not support IProviderEligibility interface', async function () { + // Deploy a contract that supports ERC165 but not IProviderEligibility const MockERC165Factory = await hre.ethers.getContractFactory('contracts/tests/MockERC165.sol:MockERC165') const mockERC165 = await MockERC165Factory.deploy() await mockERC165.deployed() - const tx = rewardsManager.connect(governor).setRewardsEligibilityOracle(mockERC165.address) - await expect(tx).revertedWith('Contract does not support IRewardsEligibility interface') + const tx = rewardsManager.connect(governor).setProviderEligibilityOracle(mockERC165.address) + await expect(tx).revertedWith('Contract does not support IProviderEligibility interface') }) it('should not emit event when setting same oracle address', async function () { @@ -174,11 +177,11 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Setting the same oracle again should not emit an event - const tx = rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) - await expect(tx).to.not.emit(rewardsManager, 'RewardsEligibilityOracleSet') + const tx = rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) + await expect(tx).to.not.emit(rewardsManager, 'ProviderEligibilityOracleSet') }) }) @@ -192,7 +195,7 @@ describe('Rewards - Eligibility Oracle', () => { await mockOracle.deployed() // Set the rewards eligibility oracle - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -237,7 +240,7 @@ describe('Rewards - Eligibility Oracle', () => { await mockOracle.deployed() // Set the rewards eligibility oracle - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -292,7 +295,7 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -320,7 +323,7 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny indexer await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -362,7 +365,7 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) // Start eligible await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -407,7 +410,7 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Start ineligible await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -495,7 +498,7 @@ describe('Rewards - Eligibility Oracle', () => { it('should allow rewards when REO is zero address (disabled)', async function () { // Ensure REO is not set (zero address = disabled) - expect(await rewardsManager.getRewardsEligibilityOracle()).eq(constants.AddressZero) + expect(await rewardsManager.getProviderEligibilityOracle()).eq(constants.AddressZero) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -574,7 +577,7 @@ describe('Rewards - Eligibility Oracle', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) await helpers.mineEpoch(epochManager) await setupIndexerAllocation() diff --git a/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts index 132790e51..3fdd15ee6 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts @@ -58,7 +58,7 @@ describe('RewardsManager interfaces', () => { }) it('IRewardsManager should have stable interface ID', () => { - expect(IRewardsManager__factory.interfaceId).to.equal('0x36b70adb') + expect(IRewardsManager__factory.interfaceId).to.equal('0x7e0447a1') }) }) @@ -93,7 +93,7 @@ describe('RewardsManager interfaces', () => { }) it('should return zero address for rewards eligibility oracle when not set', async function () { - const oracle = await rewardsManager.getRewardsEligibilityOracle() + const oracle = await rewardsManager.getProviderEligibilityOracle() expect(oracle).to.equal(constants.AddressZero) }) diff --git a/packages/contracts-test/tests/unit/rewards/rewards-reclaim.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-reclaim.test.ts index ece4b213d..a1a17269a 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-reclaim.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-reclaim.test.ts @@ -109,6 +109,9 @@ describe('Rewards - Reclaim Addresses', () => { await grt.connect(wallet).approve(staking.address, toGRT('1000000')) await grt.connect(wallet).approve(curation.address, toGRT('1000000')) } + + // Set the staking contract as the subgraph service so it can call takeRewards + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { @@ -303,7 +306,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -367,7 +370,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -428,7 +431,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -479,7 +482,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -521,7 +524,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -570,7 +573,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -601,7 +604,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) // Allow await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) @@ -729,6 +732,39 @@ describe('Rewards - Reclaim Addresses', () => { await expect(tx).to.not.emit(rewardsManager, 'RewardsReclaimed') }) + it('should return 0 when reason is NONE', async function () { + // Setup allocation in real staking contract + await setupIndexerAllocation() + + // Also set allocation data in mock so RewardsManager can query it + const tokensAllocated = toGRT('12500') + await mockSubgraphService.setAllocation( + allocationID1, + true, + indexer1.address, + subgraphDeploymentID1, + tokensAllocated, + 0, + 0, + ) + await mockSubgraphService.setSubgraphAllocatedTokens(subgraphDeploymentID1, tokensAllocated) + + // Jump to next epoch to accrue rewards + await helpers.mineEpoch(epochManager) + + // Call reclaimRewards with NONE (HashZero) - should return 0 + const result = await mockSubgraphService.callStatic.callReclaimRewards( + rewardsManager.address, + HashZero, + allocationID1, + ) + expect(result).eq(0) + + // Verify no RewardsReclaimed event emitted + const tx = await mockSubgraphService.callReclaimRewards(rewardsManager.address, HashZero, allocationID1) + await expect(tx).to.not.emit(rewardsManager, 'RewardsReclaimed') + }) + it('should reject when called by unauthorized address', async function () { // Try to call reclaimRewards directly from indexer1 (not the subgraph service) const abiCoder = hre.ethers.utils.defaultAbiCoder @@ -1039,7 +1075,7 @@ describe('Rewards - Reclaim Addresses', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) diff --git a/packages/contracts-test/tests/unit/rewards/rewards-signal-allocation-update.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-signal-allocation-update.test.ts index accf1ea60..62097acbb 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-signal-allocation-update.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-signal-allocation-update.test.ts @@ -58,6 +58,9 @@ describe('Rewards: Signal and Allocation Update Accounting', () => { curation = contracts.Curation as Curation staking = contracts.Staking as IStaking rewardsManager = contracts.RewardsManager as RewardsManager + + // Set the staking contract as the subgraph service so it can call takeRewards + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/rewards/rewards-subgraph-service.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-subgraph-service.test.ts index d92b20298..58338cac8 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-subgraph-service.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-subgraph-service.test.ts @@ -436,7 +436,7 @@ describe('Rewards - SubgraphService', () => { ) const mockREO = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny by default await mockREO.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockREO.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockREO.address) // Setup: Create signal const signalled1 = toGRT('1500') diff --git a/packages/contracts-test/tests/unit/rewards/rewards.test.ts b/packages/contracts-test/tests/unit/rewards/rewards.test.ts index 09e5e39a1..240d78178 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards.test.ts @@ -159,6 +159,10 @@ describe('Rewards', () => { await grt.connect(wallet).approve(staking.address, toGRT('1000000')) await grt.connect(wallet).approve(curation.address, toGRT('1000000')) } + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { @@ -1031,7 +1035,7 @@ describe('Rewards', () => { ) const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny await mockOracle.deployed() - await rewardsManager.connect(governor).setRewardsEligibilityOracle(mockOracle.address) + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) // Align with the epoch boundary await helpers.mineEpoch(epochManager) diff --git a/packages/contracts-test/tests/unit/staking/allocation.test.ts b/packages/contracts-test/tests/unit/staking/allocation.test.ts index dd28aa73d..76de77a35 100644 --- a/packages/contracts-test/tests/unit/staking/allocation.test.ts +++ b/packages/contracts-test/tests/unit/staking/allocation.test.ts @@ -379,6 +379,10 @@ describe('Staking:Allocation', () => { // Give some funds to the delegator and approve staking contract to use funds on delegator behalf await grt.connect(governor).mint(delegator.address, tokensToDelegate) await grt.connect(delegator).approve(staking.address, tokensToDelegate) + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts-test/tests/unit/staking/delegation.test.ts b/packages/contracts-test/tests/unit/staking/delegation.test.ts index 71f911006..3542e817e 100644 --- a/packages/contracts-test/tests/unit/staking/delegation.test.ts +++ b/packages/contracts-test/tests/unit/staking/delegation.test.ts @@ -1,4 +1,4 @@ -import { EpochManager } from '@graphprotocol/contracts' +import { EpochManager, IRewardsManager } from '@graphprotocol/contracts' import { GraphToken } from '@graphprotocol/contracts' import { IStaking } from '@graphprotocol/contracts' import { deriveChannelKey, GraphNetworkContracts, helpers, randomHexBytes, toBN, toGRT } from '@graphprotocol/sdk' @@ -29,6 +29,7 @@ describe('Staking::Delegation', () => { let epochManager: EpochManager let grt: GraphToken let staking: IStaking + let rewardsManager: IRewardsManager // Test values const poi = randomHexBytes() @@ -159,6 +160,7 @@ describe('Staking::Delegation', () => { epochManager = contracts.EpochManager as EpochManager grt = contracts.GraphToken as GraphToken staking = contracts.Staking as IStaking + rewardsManager = contracts.RewardsManager as IRewardsManager // Distribute test funds for (const wallet of [delegator, delegator2]) { @@ -173,6 +175,10 @@ describe('Staking::Delegation', () => { } await grt.connect(governor).mint(assetHolder.address, tokensToCollect) await grt.connect(assetHolder).approve(staking.address, tokensToCollect) + + // HACK: we set the staking contract as the subgraph service to make tests pass. + // This is due to the test suite being outdated. + await rewardsManager.connect(governor).setSubgraphService(staking.address) }) beforeEach(async function () { diff --git a/packages/contracts/contracts/governance/Controller.sol b/packages/contracts/contracts/governance/Controller.sol index 3f289ca7d..af9c78bd8 100644 --- a/packages/contracts/contracts/governance/Controller.sol +++ b/packages/contracts/contracts/governance/Controller.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-indexed-events, gas-small-strings diff --git a/packages/contracts/contracts/governance/Governed.sol b/packages/contracts/contracts/governance/Governed.sol index d20df43a2..6a31cffea 100644 --- a/packages/contracts/contracts/governance/Governed.sol +++ b/packages/contracts/contracts/governance/Governed.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; /* solhint-disable gas-custom-errors */ // Cannot use custom errors with 0.7.6 diff --git a/packages/contracts/contracts/governance/Pausable.sol b/packages/contracts/contracts/governance/Pausable.sol index d7a1824f2..8f5614231 100644 --- a/packages/contracts/contracts/governance/Pausable.sol +++ b/packages/contracts/contracts/governance/Pausable.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-indexed-events diff --git a/packages/contracts/contracts/l2/curation/L2Curation.sol b/packages/contracts/contracts/l2/curation/L2Curation.sol index 56e83c13a..fd26bd2ac 100644 --- a/packages/contracts/contracts/l2/curation/L2Curation.sol +++ b/packages/contracts/contracts/l2/curation/L2Curation.sol @@ -171,11 +171,8 @@ contract L2Curation is CurationV3Storage, GraphUpgradeable, IL2Curation { * @param _tokens Amount of Graph Tokens to add to reserves */ function collect(bytes32 _subgraphDeploymentID, uint256 _tokens) external override { - // Only SubgraphService and Staking contract are authorized as callers - require( - msg.sender == subgraphService || msg.sender == address(staking()), - "Caller must be the subgraph service or staking contract" - ); + // Only SubgraphService is authorized as caller + require(msg.sender == subgraphService, "Caller must be the subgraph service"); // Must be curated to accept tokens require(isCurated(_subgraphDeploymentID), "Subgraph deployment must be curated to collect fees"); diff --git a/packages/contracts/contracts/rewards/RewardsManager.sol b/packages/contracts/contracts/rewards/RewardsManager.sol index 0b223429c..846767799 100644 --- a/packages/contracts/contracts/rewards/RewardsManager.sol +++ b/packages/contracts/contracts/rewards/RewardsManager.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.7.6; +pragma solidity ^0.7.6; pragma abicoder v2; import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; @@ -16,24 +16,36 @@ import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/r import { IRewardsManagerDeprecated } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManagerDeprecated.sol"; import { IIssuanceAllocationDistribution } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceAllocationDistribution.sol"; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol"; import { RewardsCondition } from "@graphprotocol/interfaces/contracts/contracts/rewards/RewardsCondition.sol"; /** * @title Rewards Manager Contract * @author Edge & Node - * @notice Manages indexing rewards distribution using a two-level accumulation model: - * signal → subgraph → allocation. See docs/RewardAccountingSafety.md for details. + * @notice Manages rewards distribution for indexers and delegators in the Graph Protocol + * @dev Tracks how inflationary GRT rewards should be handed out. Relies on the Curation contract + * and the Staking contract. Signaled GRT in Curation determine what percentage of the tokens go + * towards each subgraph. Then each Subgraph can have multiple Indexers Staked on it. Thus, the + * total rewards for the Subgraph are split up for each Indexer based on much they have Staked on + * that Subgraph. * - * @dev Issuance source: `issuanceAllocator` if set, otherwise `issuancePerBlock` storage. - * Getter functions (getAccRewardsPerSignal, getRewards, etc.) may overestimate until - * takeRewards is called due to pending state updates. + * Note: + * The contract provides getter functions to query the state of accrued rewards: + * - getAccRewardsPerSignal + * - getAccRewardsForSubgraph + * - getAccRewardsPerAllocatedToken + * - getRewards + * These functions may overestimate the actual rewards due to changes in the total supply + * until the actual takeRewards function is called. + * custom:security-contact Please email security+contracts@ thegraph.com (remove space) if you find any bugs. We might have an active bug bounty program. */ contract RewardsManager is GraphUpgradeable, IERC165, IRewardsManager, IIssuanceTarget, + IProviderEligibilityManagement, IRewardsManagerDeprecated, RewardsManagerV6Storage { @@ -71,7 +83,8 @@ contract RewardsManager is return interfaceId == type(IERC165).interfaceId || interfaceId == type(IIssuanceTarget).interfaceId || - interfaceId == type(IRewardsManager).interfaceId; + interfaceId == type(IRewardsManager).interfaceId || + interfaceId == type(IProviderEligibilityManagement).interfaceId; } // -- Config -- @@ -197,26 +210,26 @@ contract RewardsManager is } /** - * @inheritdoc IRewardsManager - * @dev Note that the rewards eligibility oracle can be set to the zero address to disable use of an oracle, in + * @inheritdoc IProviderEligibilityManagement + * @dev Note that the eligibility oracle can be set to the zero address to disable use of an oracle, in * which case no indexers will be denied rewards due to eligibility. */ - function setRewardsEligibilityOracle(address newRewardsEligibilityOracle) external override onlyGovernor { - if (address(rewardsEligibilityOracle) != newRewardsEligibilityOracle) { - // Check that the contract supports the IRewardsEligibility interface - // Allow zero address to disable the oracle - if (newRewardsEligibilityOracle != address(0)) { - // solhint-disable-next-line gas-small-strings - require( - IERC165(newRewardsEligibilityOracle).supportsInterface(type(IRewardsEligibility).interfaceId), - "Contract does not support IRewardsEligibility interface" - ); - } - - address oldRewardsEligibilityOracle = address(rewardsEligibilityOracle); - rewardsEligibilityOracle = IRewardsEligibility(newRewardsEligibilityOracle); - emit RewardsEligibilityOracleSet(oldRewardsEligibilityOracle, newRewardsEligibilityOracle); + function setProviderEligibilityOracle(IProviderEligibility oracle) external override onlyGovernor { + IProviderEligibility oldOracle = rewardsEligibilityOracle; + if (address(oldOracle) == address(oracle)) return; + + // Check that the contract supports the IProviderEligibility interface + // Allow zero address to disable the oracle + if (address(oracle) != address(0)) { + // solhint-disable-next-line gas-small-strings + require( + IERC165(address(oracle)).supportsInterface(type(IProviderEligibility).interfaceId), + "Contract does not support IProviderEligibility interface" + ); } + + rewardsEligibilityOracle = oracle; + emit ProviderEligibilityOracleSet(oldOracle, oracle); } /** @@ -325,9 +338,9 @@ contract RewardsManager is } /** - * @inheritdoc IRewardsManager + * @inheritdoc IProviderEligibilityManagement */ - function getRewardsEligibilityOracle() external view override returns (IRewardsEligibility) { + function getProviderEligibilityOracle() external view override returns (IProviderEligibility) { return rewardsEligibilityOracle; } @@ -446,19 +459,13 @@ contract RewardsManager is /** * @notice Get total allocated tokens for a subgraph across all issuers * @param _subgraphDeploymentID Subgraph deployment - * @return Total tokens allocated to this subgraph - */ - function _getSubgraphAllocatedTokens(bytes32 _subgraphDeploymentID) private view returns (uint256) { - uint256 subgraphAllocatedTokens = 0; - address[2] memory rewardsIssuers = [address(staking()), address(subgraphService)]; - for (uint256 i = 0; i < rewardsIssuers.length; ++i) { - if (rewardsIssuers[i] != address(0)) { - subgraphAllocatedTokens += IRewardsIssuer(rewardsIssuers[i]).getSubgraphAllocatedTokens( - _subgraphDeploymentID - ); - } - } - return subgraphAllocatedTokens; + * @return subgraphAllocatedTokens Total tokens allocated to this subgraph + */ + function _getSubgraphAllocatedTokens( + bytes32 _subgraphDeploymentID + ) private view returns (uint256 subgraphAllocatedTokens) { + if (address(subgraphService) != address(0)) + subgraphAllocatedTokens += subgraphService.getSubgraphAllocatedTokens(_subgraphDeploymentID); } // -- Updates -- @@ -578,7 +585,7 @@ contract RewardsManager is /** * @inheritdoc IRewardsManager - * @dev Hook called from the Staking contract on allocate() and close() + * @dev Hook called from the IRewardsIssuer contract on allocate() and close() * * ## Claimability Behavior * @@ -626,10 +633,7 @@ contract RewardsManager is * takeRewards(). */ function getRewards(address _rewardsIssuer, address _allocationID) external view override returns (uint256) { - require( - _rewardsIssuer == address(staking()) || _rewardsIssuer == address(subgraphService), - "Not a rewards issuer" - ); + require(_rewardsIssuer == address(subgraphService), "Not a rewards issuer"); ( bool isActive, @@ -783,7 +787,7 @@ contract RewardsManager is /** * @inheritdoc IRewardsManager * @dev This function can only be called by an authorized rewards issuer which are - * the staking contract (for legacy allocations), and the subgraph service (for new allocations). + * - the subgraph service (for allocations). * Mints 0 tokens if the allocation is not active. * @dev First successful reclaim wins - short-circuits on reclaim: * - If subgraph denied with reclaim address → reclaim to SUBGRAPH_DENIED address (eligibility NOT checked) @@ -793,10 +797,7 @@ contract RewardsManager is */ function takeRewards(address _allocationID) external override returns (uint256) { address rewardsIssuer = msg.sender; - require( - rewardsIssuer == address(staking()) || rewardsIssuer == address(subgraphService), - "Caller must be a rewards issuer" - ); + require(rewardsIssuer == address(subgraphService), "Caller must be a rewards issuer"); (uint256 rewards, address indexer, bytes32 subgraphDeploymentID) = _calcAllocationRewards( rewardsIssuer, diff --git a/packages/contracts/contracts/rewards/RewardsManagerStorage.sol b/packages/contracts/contracts/rewards/RewardsManagerStorage.sol index 14a8061b0..5969d11c6 100644 --- a/packages/contracts/contracts/rewards/RewardsManagerStorage.sol +++ b/packages/contracts/contracts/rewards/RewardsManagerStorage.sol @@ -5,10 +5,10 @@ // TODO: Re-enable and fix issues when publishing a new version // solhint-disable named-parameters-mapping -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; import { IIssuanceAllocationDistribution } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceAllocationDistribution.sol"; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManager.sol"; import { IRewardsManagerDeprecated } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManagerDeprecated.sol"; @@ -102,7 +102,7 @@ abstract contract RewardsManagerV6Storage is RewardsManagerV5Storage { /// @dev Address of the rewards eligibility oracle contract /// When set, indexers must pass eligibility check to claim rewards. /// Zero address disables eligibility checks. - IRewardsEligibility internal rewardsEligibilityOracle; + IProviderEligibility internal rewardsEligibilityOracle; /// @dev Address of the issuance allocator /// When set, determines GRT issued per block. Zero address uses issuancePerBlock storage value. diff --git a/packages/contracts/contracts/tests/MockERC165.sol b/packages/contracts/contracts/tests/MockERC165.sol index 056493fd3..446c752a7 100644 --- a/packages/contracts/contracts/tests/MockERC165.sol +++ b/packages/contracts/contracts/tests/MockERC165.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.7.6; +pragma solidity ^0.7.6; import { IERC165 } from "@openzeppelin/contracts/introspection/IERC165.sol"; diff --git a/packages/contracts/contracts/tests/MockIssuanceAllocator.sol b/packages/contracts/contracts/tests/MockIssuanceAllocator.sol index 6113b8bc0..24e482a55 100644 --- a/packages/contracts/contracts/tests/MockIssuanceAllocator.sol +++ b/packages/contracts/contracts/tests/MockIssuanceAllocator.sol @@ -2,7 +2,7 @@ // solhint-disable gas-increment-by-one, gas-indexed-events, named-parameters-mapping, use-natspec -pragma solidity 0.7.6; +pragma solidity ^0.7.6; pragma abicoder v2; import { IERC165 } from "@openzeppelin/contracts/introspection/IERC165.sol"; diff --git a/packages/contracts/contracts/tests/MockRewardsEligibilityOracle.sol b/packages/contracts/contracts/tests/MockRewardsEligibilityOracle.sol index 6b13d4d76..b0ac05a19 100644 --- a/packages/contracts/contracts/tests/MockRewardsEligibilityOracle.sol +++ b/packages/contracts/contracts/tests/MockRewardsEligibilityOracle.sol @@ -2,9 +2,9 @@ // solhint-disable named-parameters-mapping -pragma solidity 0.7.6; +pragma solidity ^0.7.6; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; import { IERC165 } from "@openzeppelin/contracts/introspection/IERC165.sol"; /** @@ -13,7 +13,7 @@ import { IERC165 } from "@openzeppelin/contracts/introspection/IERC165.sol"; * @notice A simple mock contract for the RewardsEligibilityOracle interface * @dev A simple mock contract for the RewardsEligibilityOracle interface */ -contract MockRewardsEligibilityOracle is IRewardsEligibility, IERC165 { +contract MockRewardsEligibilityOracle is IProviderEligibility, IERC165 { /// @dev Mapping to store eligibility status for each indexer mapping(address => bool) private eligible; @@ -50,7 +50,7 @@ contract MockRewardsEligibilityOracle is IRewardsEligibility, IERC165 { } /** - * @inheritdoc IRewardsEligibility + * @inheritdoc IProviderEligibility */ function isEligible(address indexer) external view override returns (bool) { // If the indexer has been explicitly set, return that value @@ -66,6 +66,6 @@ contract MockRewardsEligibilityOracle is IRewardsEligibility, IERC165 { * @inheritdoc IERC165 */ function supportsInterface(bytes4 interfaceId) public pure override returns (bool) { - return interfaceId == type(IRewardsEligibility).interfaceId || interfaceId == type(IERC165).interfaceId; + return interfaceId == type(IProviderEligibility).interfaceId || interfaceId == type(IERC165).interfaceId; } } diff --git a/packages/contracts/contracts/tests/MockSubgraphService.sol b/packages/contracts/contracts/tests/MockSubgraphService.sol index cdee9ab6a..1e355923b 100644 --- a/packages/contracts/contracts/tests/MockSubgraphService.sol +++ b/packages/contracts/contracts/tests/MockSubgraphService.sol @@ -2,7 +2,7 @@ // solhint-disable named-parameters-mapping -pragma solidity 0.7.6; +pragma solidity ^0.7.6; import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; diff --git a/packages/contracts/contracts/upgrades/GraphProxy.sol b/packages/contracts/contracts/upgrades/GraphProxy.sol index 65216a4d7..624c3a650 100644 --- a/packages/contracts/contracts/upgrades/GraphProxy.sol +++ b/packages/contracts/contracts/upgrades/GraphProxy.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-small-strings diff --git a/packages/contracts/contracts/upgrades/GraphProxyAdmin.sol b/packages/contracts/contracts/upgrades/GraphProxyAdmin.sol index e72bf3626..e603a6a50 100644 --- a/packages/contracts/contracts/upgrades/GraphProxyAdmin.sol +++ b/packages/contracts/contracts/upgrades/GraphProxyAdmin.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; /* solhint-disable gas-custom-errors */ // Cannot use custom errors with 0.7.6 diff --git a/packages/contracts/contracts/upgrades/GraphProxyStorage.sol b/packages/contracts/contracts/upgrades/GraphProxyStorage.sol index 4c3d2e4de..d550d18f0 100644 --- a/packages/contracts/contracts/upgrades/GraphProxyStorage.sol +++ b/packages/contracts/contracts/upgrades/GraphProxyStorage.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; /* solhint-disable gas-custom-errors */ // Cannot use custom errors with 0.7.6 diff --git a/packages/contracts/contracts/upgrades/GraphUpgradeable.sol b/packages/contracts/contracts/upgrades/GraphUpgradeable.sol index 466084fba..a6cc7b8c6 100644 --- a/packages/contracts/contracts/upgrades/GraphUpgradeable.sol +++ b/packages/contracts/contracts/upgrades/GraphUpgradeable.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; /* solhint-disable gas-custom-errors */ // Cannot use custom errors with 0.7.6 diff --git a/packages/contracts/contracts/utils/TokenUtils.sol b/packages/contracts/contracts/utils/TokenUtils.sol index 10c244e26..f4c0f58f5 100644 --- a/packages/contracts/contracts/utils/TokenUtils.sol +++ b/packages/contracts/contracts/utils/TokenUtils.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27 || 0.8.33; +pragma solidity ^0.7.6 || ^0.8.27; /* solhint-disable gas-custom-errors */ // Cannot use custom errors with 0.7.6 diff --git a/packages/deployment/deploy/rewards/eligibility/06_integrate.ts b/packages/deployment/deploy/rewards/eligibility/06_integrate.ts index b7670f7e3..3773c6982 100644 --- a/packages/deployment/deploy/rewards/eligibility/06_integrate.ts +++ b/packages/deployment/deploy/rewards/eligibility/06_integrate.ts @@ -19,7 +19,7 @@ const func: DeployScriptModule = async (env) => { ]) const client = graph.getPublicClient(env) as PublicClient - // Apply: RM.rewardsEligibilityOracle = REO (always governance TX) + // Apply: RM.providerEligibilityOracle = REO (always governance TX) await applyConfiguration(env, client, [createRMIntegrationCondition(reo.address)], { contractName: `${Contracts.horizon.RewardsManager.name}-REO`, contractAddress: rm.address, diff --git a/packages/deployment/docs/Design.md b/packages/deployment/docs/Design.md index c6f972507..d53d22125 100644 --- a/packages/deployment/docs/Design.md +++ b/packages/deployment/docs/Design.md @@ -110,7 +110,7 @@ graph LR RM -->|check eligibility| REO ``` -**Integration:** `RewardsManager.setRewardsEligibilityOracle(REO)` via governance +**Integration:** `RewardsManager.setProviderEligibilityOracle(REO)` via governance ### IssuanceAllocator Integration diff --git a/packages/deployment/docs/deploy/RewardsEligibilityOracleDeployment.md b/packages/deployment/docs/deploy/RewardsEligibilityOracleDeployment.md index 6d05be2e4..9a5c1bfde 100644 --- a/packages/deployment/docs/deploy/RewardsEligibilityOracleDeployment.md +++ b/packages/deployment/docs/deploy/RewardsEligibilityOracleDeployment.md @@ -60,7 +60,7 @@ pnpm hardhat deploy --tags rewards-eligibility-integrate --network ### Integration -- [ ] `RewardsManager.getRewardsEligibilityOracle()` returns REO address +- [ ] `RewardsManager.getProviderEligibilityOracle()` returns REO address ## Configuration Parameters diff --git a/packages/deployment/lib/contract-checks.ts b/packages/deployment/lib/contract-checks.ts index c12b324cd..412b5243e 100644 --- a/packages/deployment/lib/contract-checks.ts +++ b/packages/deployment/lib/contract-checks.ts @@ -746,15 +746,15 @@ export function formatAddress(address: string): string { /** * Create RewardsManager integration condition for REO * - * Checks that RewardsManager.getRewardsEligibilityOracle() == reoAddress + * Checks that RewardsManager.getProviderEligibilityOracle() == reoAddress */ export function createRMIntegrationCondition(reoAddress: string): ParamCondition { return { - name: 'rewardsEligibilityOracle', + name: 'providerEligibilityOracle', description: 'RewardsEligibilityOracle', abi: REWARDS_MANAGER_ABI, - getter: 'getRewardsEligibilityOracle', - setter: 'setRewardsEligibilityOracle', + getter: 'getProviderEligibilityOracle', + setter: 'setProviderEligibilityOracle', target: reoAddress, compare: addressEquals, format: formatAddress, diff --git a/packages/deployment/lib/issuance-deploy-utils.ts b/packages/deployment/lib/issuance-deploy-utils.ts index 4cf41496b..bd1b5f486 100644 --- a/packages/deployment/lib/issuance-deploy-utils.ts +++ b/packages/deployment/lib/issuance-deploy-utils.ts @@ -358,7 +358,7 @@ async function deployProxyWithOwnImpl( // Deploy OZ v5 TransparentUpgradeableProxy // Constructor: (address _logic, address initialOwner, bytes memory _data) // The proxy creates its own ProxyAdmin owned by initialOwner (governor) - // Use issuance-compiled proxy artifact (0.8.33) for consistent verification + // Use issuance-compiled proxy artifact (0.8.34) for consistent verification const proxyArtifact = loadTransparentProxyArtifact() const proxyResult = await deployFn( `${contract.name}_Proxy`, @@ -447,7 +447,7 @@ async function deployProxyWithSharedImpl( // Deploy OZ v5 TransparentUpgradeableProxy // Constructor: (address _logic, address initialOwner, bytes memory _data) - // Use issuance-compiled proxy artifact (0.8.33) for consistent verification + // Use issuance-compiled proxy artifact (0.8.34) for consistent verification const proxyArtifact = loadTransparentProxyArtifact() const proxyResult = await deployFn( `${contract.name}_Proxy`, diff --git a/packages/deployment/tasks/deployment-status.ts b/packages/deployment/tasks/deployment-status.ts index 7bf9061c0..8b5994f0d 100644 --- a/packages/deployment/tasks/deployment-status.ts +++ b/packages/deployment/tasks/deployment-status.ts @@ -340,10 +340,10 @@ async function getRewardsEligibilityOracleChecks( const currentREO = (await client.readContract({ address: rmAddress as `0x${string}`, abi: REWARDS_MANAGER_ABI, - functionName: 'getRewardsEligibilityOracle', + functionName: 'getProviderEligibilityOracle', })) as string const configured = currentREO.toLowerCase() === reoAddress.toLowerCase() - checks.push({ ok: configured, label: 'RM.rewardsEligibilityOracle == this' }) + checks.push({ ok: configured, label: 'RM.providerEligibilityOracle == this' }) } catch { // Function not available on old RM } diff --git a/packages/horizon/audits/2025-06-Indexing-Payments.pdf b/packages/horizon/audits/2025-06-Indexing-Payments.pdf new file mode 100644 index 000000000..bd5325dca Binary files /dev/null and b/packages/horizon/audits/2025-06-Indexing-Payments.pdf differ diff --git a/packages/horizon/contracts/data-service/DataService.sol b/packages/horizon/contracts/data-service/DataService.sol index 8206f4924..ccdec7151 100644 --- a/packages/horizon/contracts/data-service/DataService.sol +++ b/packages/horizon/contracts/data-service/DataService.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IDataService } from "@graphprotocol/interfaces/contracts/data-service/IDataService.sol"; diff --git a/packages/horizon/contracts/data-service/DataServiceStorage.sol b/packages/horizon/contracts/data-service/DataServiceStorage.sol index 3ce552a7f..4ce5a7f20 100644 --- a/packages/horizon/contracts/data-service/DataServiceStorage.sol +++ b/packages/horizon/contracts/data-service/DataServiceStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; /** * @title DataServiceStorage diff --git a/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol b/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol index 0f8cf3653..f68852513 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServiceFees.sol @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IDataServiceFees } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceFees.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; import { ProvisionTracker } from "../libraries/ProvisionTracker.sol"; import { LinkedList } from "../../libraries/LinkedList.sol"; +import { StakeClaims } from "../libraries/StakeClaims.sol"; import { DataService } from "../DataService.sol"; import { DataServiceFeesV1Storage } from "./DataServiceFeesStorage.sol"; @@ -43,23 +44,17 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat * @param _unlockTimestamp The timestamp when the tokens can be released */ function _lockStake(address _serviceProvider, uint256 _tokens, uint256 _unlockTimestamp) internal { - require(_tokens != 0, DataServiceFeesZeroTokens()); - feesProvisionTracker.lock(_graphStaking(), _serviceProvider, _tokens, _delegationRatio); - - ILinkedList.List storage claimsList = claimsLists[_serviceProvider]; - - // Save item and add to list - bytes32 claimId = _buildStakeClaimId(_serviceProvider, claimsList.nonce); - claims[claimId] = StakeClaim({ - tokens: _tokens, - createdAt: block.timestamp, - releasableAt: _unlockTimestamp, - nextClaim: bytes32(0) - }); - if (claimsList.count != 0) claims[claimsList.tail].nextClaim = claimId; - claimsList.addTail(claimId); - - emit StakeClaimLocked(_serviceProvider, claimId, _tokens, _unlockTimestamp); + StakeClaims.lockStake( + feesProvisionTracker, + claims, + claimsLists, + _graphStaking(), + address(this), + _delegationRatio, + _serviceProvider, + _tokens, + _unlockTimestamp + ); } /** @@ -82,7 +77,7 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat _numClaimsToRelease ); - emit StakeClaimsReleased(_serviceProvider, claimsReleased, abi.decode(data, (uint256))); + emit StakeClaims.StakeClaimsReleased(_serviceProvider, claimsReleased, abi.decode(data, (uint256))); } /** @@ -94,23 +89,7 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat * @return The updated accumulator data */ function _processStakeClaim(bytes32 _claimId, bytes memory _acc) private returns (bool, bytes memory) { - StakeClaim memory claim = _getStakeClaim(_claimId); - - // early exit - if (claim.releasableAt > block.timestamp) { - return (true, LinkedList.NULL_BYTES); - } - - // decode - (uint256 tokensClaimed, address serviceProvider) = abi.decode(_acc, (uint256, address)); - - // process - feesProvisionTracker.release(serviceProvider, claim.tokens); - emit StakeClaimReleased(serviceProvider, _claimId, claim.tokens, claim.releasableAt); - - // encode - _acc = abi.encode(tokensClaimed + claim.tokens, serviceProvider); - return (false, _acc); + return StakeClaims.processStakeClaim(feesProvisionTracker, claims, _claimId, _acc); } /** @@ -119,18 +98,7 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat * @param _claimId The ID of the stake claim to delete */ function _deleteStakeClaim(bytes32 _claimId) private { - delete claims[_claimId]; - } - - /** - * @notice Gets the details of a stake claim - * @param _claimId The ID of the stake claim - * @return The stake claim details - */ - function _getStakeClaim(bytes32 _claimId) private view returns (StakeClaim memory) { - StakeClaim memory claim = claims[_claimId]; - require(claim.createdAt != 0, DataServiceFeesClaimNotFound(_claimId)); - return claim; + StakeClaims.deleteStakeClaim(claims, _claimId); } /** @@ -140,17 +108,6 @@ abstract contract DataServiceFees is DataService, DataServiceFeesV1Storage, IDat * @return The next stake claim ID */ function _getNextStakeClaim(bytes32 _claimId) private view returns (bytes32) { - return claims[_claimId].nextClaim; - } - - // forge-lint: disable-next-item(asm-keccak256) - /** - * @notice Builds a stake claim ID - * @param _serviceProvider The address of the service provider - * @param _nonce A nonce of the stake claim - * @return The stake claim ID - */ - function _buildStakeClaimId(address _serviceProvider, uint256 _nonce) private view returns (bytes32) { - return keccak256(abi.encodePacked(address(this), _serviceProvider, _nonce)); + return StakeClaims.getNextStakeClaim(claims, _claimId); } } diff --git a/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol b/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol index 384149201..4c5b89709 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServiceFeesStorage.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; -import { IDataServiceFees } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceFees.sol"; +import { StakeClaims } from "../libraries/StakeClaims.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; @@ -17,7 +17,7 @@ abstract contract DataServiceFeesV1Storage { mapping(address serviceProvider => uint256 tokens) public feesProvisionTracker; /// @notice List of all locked stake claims to be released to service providers - mapping(bytes32 claimId => IDataServiceFees.StakeClaim claim) public claims; + mapping(bytes32 claimId => StakeClaims.StakeClaim claim) public claims; /// @notice Service providers registered in the data service mapping(address serviceProvider => ILinkedList.List list) public claimsLists; diff --git a/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol b/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol index 7d0c8c522..8eed40165 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServicePausable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IDataServicePausable } from "@graphprotocol/interfaces/contracts/data-service/IDataServicePausable.sol"; diff --git a/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol b/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol index 6dc2433ce..4770a9375 100644 --- a/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol +++ b/packages/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IDataServicePausable } from "@graphprotocol/interfaces/contracts/data-service/IDataServicePausable.sol"; diff --git a/packages/horizon/contracts/data-service/libraries/ProvisionTracker.sol b/packages/horizon/contracts/data-service/libraries/ProvisionTracker.sol index 8f7ddff8d..d52bf13ad 100644 --- a/packages/horizon/contracts/data-service/libraries/ProvisionTracker.sol +++ b/packages/horizon/contracts/data-service/libraries/ProvisionTracker.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities diff --git a/packages/horizon/contracts/data-service/libraries/StakeClaims.sol b/packages/horizon/contracts/data-service/libraries/StakeClaims.sol new file mode 100644 index 000000000..7590b709c --- /dev/null +++ b/packages/horizon/contracts/data-service/libraries/StakeClaims.sol @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { ProvisionTracker } from "./ProvisionTracker.sol"; +import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; +import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; +import { LinkedList } from "../../libraries/LinkedList.sol"; + +/** + * @title StakeClaims library + * @author Edge & Node + * @notice Manages stake claims — provisioned stake locked for release to service providers. + */ +library StakeClaims { + using ProvisionTracker for mapping(address => uint256); + using LinkedList for ILinkedList.List; + + /** + * @notice A stake claim, representing provisioned stake that gets locked + * to be released to a service provider. + * @dev StakeClaims are stored in linked lists by service provider, ordered by + * creation timestamp. + * @param tokens The amount of tokens to be locked in the claim + * @param createdAt The timestamp when the claim was created + * @param releasableAt The timestamp when the tokens can be released + * @param nextClaim The next claim in the linked list + */ + struct StakeClaim { + uint256 tokens; + uint256 createdAt; + uint256 releasableAt; + bytes32 nextClaim; + } + + /* solhint-disable gas-indexed-events */ + /** + * @notice Emitted when a stake claim is created and stake is locked. + * @param serviceProvider The address of the service provider + * @param claimId The id of the stake claim + * @param tokens The amount of tokens to lock in the claim + * @param unlockTimestamp The timestamp when the tokens can be released + */ + event StakeClaimLocked( + address indexed serviceProvider, + bytes32 indexed claimId, + uint256 tokens, + uint256 unlockTimestamp + ); + + /** + * @notice Emitted when a stake claim is released and stake is unlocked. + * @param serviceProvider The address of the service provider + * @param claimId The id of the stake claim + * @param tokens The amount of tokens released + * @param releasableAt The timestamp when the tokens were released + */ + event StakeClaimReleased( + address indexed serviceProvider, + bytes32 indexed claimId, + uint256 tokens, + uint256 releasableAt + ); + + /** + * @notice Emitted when a series of stake claims are released. + * @param serviceProvider The address of the service provider + * @param claimsCount The number of stake claims being released + * @param tokensReleased The total amount of tokens being released + */ + event StakeClaimsReleased(address indexed serviceProvider, uint256 claimsCount, uint256 tokensReleased); + /* solhint-enable gas-indexed-events */ + + /** + * @notice Thrown when attempting to get a stake claim that does not exist. + * @param claimId The id of the stake claim + */ + error StakeClaimsClaimNotFound(bytes32 claimId); + + /** + * @notice Emitted when trying to lock zero tokens in a stake claim + */ + error StakeClaimsZeroTokens(); + + /** + * @notice Locks stake for a service provider to back a payment. + * Creates a stake claim, which is stored in a linked list by service provider. + * @dev Requirements: + * - The associated provision must have enough available tokens to lock the stake. + * + * Emits a {StakeClaimLocked} event. + * + * @param feesProvisionTracker The mapping that tracks the provision tokens for each service provider + * @param claims The mapping that stores stake claims by their ID + * @param claimsLists The mapping that stores linked lists of stake claims by service provider + * @param graphStaking The Horizon staking contract used to lock the tokens + * @param _dataService The address of the data service + * @param _delegationRatio The delegation ratio to use for the stake claim + * @param _serviceProvider The address of the service provider + * @param _tokens The amount of tokens to lock in the claim + * @param _unlockTimestamp The timestamp when the tokens can be released + */ + function lockStake( + mapping(address => uint256) storage feesProvisionTracker, + mapping(bytes32 => StakeClaim) storage claims, + mapping(address serviceProvider => ILinkedList.List list) storage claimsLists, + IHorizonStaking graphStaking, + address _dataService, + uint32 _delegationRatio, + address _serviceProvider, + uint256 _tokens, + uint256 _unlockTimestamp + ) external { + require(_tokens != 0, StakeClaimsZeroTokens()); + feesProvisionTracker.lock(graphStaking, _serviceProvider, _tokens, _delegationRatio); + + ILinkedList.List storage claimsList = claimsLists[_serviceProvider]; + + // Save item and add to list + bytes32 claimId = _buildStakeClaimId(_dataService, _serviceProvider, claimsList.nonce); + claims[claimId] = StakeClaim({ + tokens: _tokens, + createdAt: block.timestamp, + releasableAt: _unlockTimestamp, + nextClaim: bytes32(0) + }); + if (claimsList.count != 0) claims[claimsList.tail].nextClaim = claimId; + claimsList.addTail(claimId); + + emit StakeClaimLocked(_serviceProvider, claimId, _tokens, _unlockTimestamp); + } + + /** + * @notice Processes a stake claim, releasing the tokens if the claim has expired. + * @dev This function is used as a callback in the stake claims linked list traversal. + * @param feesProvisionTracker The mapping that tracks the provision tokens for each service provider. + * @param claims The mapping that stores stake claims by their ID. + * @param _claimId The ID of the stake claim to process. + * @param _acc The accumulator data, which contains the total tokens claimed and the service provider address. + * @return Whether the stake claim is still locked, indicating that the traversal should continue or stop. + * @return The updated accumulator data + */ + function processStakeClaim( + mapping(address serviceProvider => uint256 tokens) storage feesProvisionTracker, + mapping(bytes32 claimId => StakeClaim claim) storage claims, + bytes32 _claimId, + bytes memory _acc + ) external returns (bool, bytes memory) { + StakeClaim memory claim = claims[_claimId]; + require(claim.createdAt != 0, StakeClaimsClaimNotFound(_claimId)); + + // early exit + if (claim.releasableAt > block.timestamp) { + return (true, LinkedList.NULL_BYTES); + } + + // decode + (uint256 tokensClaimed, address serviceProvider) = abi.decode(_acc, (uint256, address)); + + // process + feesProvisionTracker.release(serviceProvider, claim.tokens); + emit StakeClaimReleased(serviceProvider, _claimId, claim.tokens, claim.releasableAt); + + // encode + _acc = abi.encode(tokensClaimed + claim.tokens, serviceProvider); + return (false, _acc); + } + + /** + * @notice Deletes a stake claim. + * @dev This function is used as a callback in the stake claims linked list traversal. + * @param claims The mapping that stores stake claims by their ID + * @param claimId The ID of the stake claim to delete + */ + function deleteStakeClaim(mapping(bytes32 claimId => StakeClaim claim) storage claims, bytes32 claimId) external { + delete claims[claimId]; + } + + /** + * @notice Gets the next stake claim in the linked list + * @dev This function is used as a callback in the stake claims linked list traversal. + * @param claims The mapping that stores stake claims by their ID + * @param claimId The ID of the stake claim + * @return The next stake claim ID + */ + function getNextStakeClaim( + mapping(bytes32 claimId => StakeClaim claim) storage claims, + bytes32 claimId + ) external view returns (bytes32) { + return claims[claimId].nextClaim; + } + + /** + * @notice Builds a stake claim ID + * @param dataService The address of the data service + * @param serviceProvider The address of the service provider + * @param nonce A nonce of the stake claim + * @return The stake claim ID + */ + function buildStakeClaimId( + address dataService, + address serviceProvider, + uint256 nonce + ) public pure returns (bytes32) { + return _buildStakeClaimId(dataService, serviceProvider, nonce); + } + + /** + * @notice Builds a stake claim ID + * @param _dataService The address of the data service + * @param _serviceProvider The address of the service provider + * @param _nonce A nonce of the stake claim + * @return The stake claim ID + */ + function _buildStakeClaimId( + address _dataService, + address _serviceProvider, + uint256 _nonce + ) internal pure returns (bytes32) { + // forge-lint: disable-next-line(asm-keccak256) + return keccak256(abi.encodePacked(_dataService, _serviceProvider, _nonce)); + } +} diff --git a/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol b/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol index ec0be49c3..202f4693c 100644 --- a/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol +++ b/packages/horizon/contracts/data-service/utilities/ProvisionManager.sol @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; -// TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-indexed-events // solhint-disable gas-strict-inequalities @@ -111,31 +110,15 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa */ error ProvisionManagerProvisionNotFound(address serviceProvider); - // forge-lint: disable-next-item(unwrapped-modifier-logic) /** * @notice Checks if the caller is authorized to manage the provision of a service provider. - * @param serviceProvider The address of the service provider. + * @param _serviceProvider The address of the service provider. */ - modifier onlyAuthorizedForProvision(address serviceProvider) { + function _requireAuthorizedForProvision(address _serviceProvider) internal view { require( - _graphStaking().isAuthorized(serviceProvider, address(this), msg.sender), - ProvisionManagerNotAuthorized(serviceProvider, msg.sender) + _graphStaking().isAuthorized(_serviceProvider, address(this), msg.sender), + ProvisionManagerNotAuthorized(_serviceProvider, msg.sender) ); - _; - } - - // Warning: Virtual modifiers are deprecated and scheduled for removal. - // forge-lint: disable-next-item(unwrapped-modifier-logic) - /** - * @notice Checks if a provision of a service provider is valid according - * to the parameter ranges established. - * @param serviceProvider The address of the service provider. - */ - modifier onlyValidProvision(address serviceProvider) virtual { - IHorizonStaking.Provision memory provision = _getProvision(serviceProvider); - _checkProvisionTokens(provision); - _checkProvisionParameters(provision, false); - _; } // forge-lint: disable-next-item(mixed-case-function) @@ -186,7 +169,7 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * @param _max The maximum allowed value for the provision tokens. */ function _setProvisionTokensRange(uint256 _min, uint256 _max) internal { - require(_min <= _max, ProvisionManagerInvalidRange(_min, _max)); + _requireLTE(_min, _max); _minimumProvisionTokens = _min; _maximumProvisionTokens = _max; emit ProvisionTokensRangeSet(_min, _max); @@ -198,7 +181,7 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * @param _max The maximum allowed value for the max verifier cut. */ function _setVerifierCutRange(uint32 _min, uint32 _max) internal { - require(_min <= _max, ProvisionManagerInvalidRange(_min, _max)); + _requireLTE(_min, _max); require(PPMMath.isValidPPM(_max), ProvisionManagerInvalidRange(_min, _max)); _minimumVerifierCut = _min; _maximumVerifierCut = _max; @@ -211,12 +194,23 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * @param _max The maximum allowed value for the thawing period. */ function _setThawingPeriodRange(uint64 _min, uint64 _max) internal { - require(_min <= _max, ProvisionManagerInvalidRange(_min, _max)); + _requireLTE(_min, _max); _minimumThawingPeriod = _min; _maximumThawingPeriod = _max; emit ThawingPeriodRangeSet(_min, _max); } + /** + * @notice Checks if a provision of a service provider is valid according + * to the parameter ranges established. + * @param _serviceProvider The address of the service provider. + */ + function _requireValidProvision(address _serviceProvider) internal view { + IHorizonStaking.Provision memory provision = _getProvision(_serviceProvider); + _checkProvisionTokens(provision); + _checkProvisionParameters(provision, false); + } + // -- checks -- /** @@ -224,8 +218,7 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * @param _serviceProvider The address of the service provider. */ function _checkProvisionTokens(address _serviceProvider) internal view virtual { - IHorizonStaking.Provision memory provision = _getProvision(_serviceProvider); - _checkProvisionTokens(provision); + _checkProvisionTokens(_getProvision(_serviceProvider)); } /** @@ -248,8 +241,7 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa * @param _checkPending If true, checks the pending provision parameters. */ function _checkProvisionParameters(address _serviceProvider, bool _checkPending) internal view virtual { - IHorizonStaking.Provision memory provision = _getProvision(_serviceProvider); - _checkProvisionParameters(provision, _checkPending); + _checkProvisionParameters(_getProvision(_serviceProvider), _checkPending); } /** @@ -330,4 +322,13 @@ abstract contract ProvisionManager is Initializable, GraphDirectory, ProvisionMa function _checkValueInRange(uint256 _value, uint256 _min, uint256 _max, bytes memory _revertMessage) private pure { require(_value.isInRange(_min, _max), ProvisionManagerInvalidValue(_revertMessage, _value, _min, _max)); } + + /** + * @notice Requires that a value is less than or equal to another value. + * @param _a The value to check. + * @param _b The value to compare against. + */ + function _requireLTE(uint256 _a, uint256 _b) private pure { + require(_a <= _b, ProvisionManagerInvalidRange(_a, _b)); + } } diff --git a/packages/horizon/contracts/data-service/utilities/ProvisionManagerStorage.sol b/packages/horizon/contracts/data-service/utilities/ProvisionManagerStorage.sol index 02631d866..dbfe94cc8 100644 --- a/packages/horizon/contracts/data-service/utilities/ProvisionManagerStorage.sol +++ b/packages/horizon/contracts/data-service/utilities/ProvisionManagerStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; /** * @title Storage layout for the {ProvisionManager} helper contract. diff --git a/packages/horizon/contracts/libraries/LibFixedMath.sol b/packages/horizon/contracts/libraries/LibFixedMath.sol deleted file mode 100644 index f248a513d..000000000 --- a/packages/horizon/contracts/libraries/LibFixedMath.sol +++ /dev/null @@ -1,299 +0,0 @@ -/* - - Copyright 2017 Bprotocol Foundation, 2019 ZeroEx Intl. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -*/ - -// SPDX-License-Identifier: Apache-2.0 - -pragma solidity 0.8.27 || 0.8.33; - -// TODO: Re-enable and fix issues when publishing a new version -// solhint-disable function-max-lines, gas-strict-inequalities -// forge-lint: disable-start(unsafe-typecast) - -/** - * @title LibFixedMath - * @author Edge & Node - * @notice This library provides fixed-point arithmetic operations. - * @custom:security-contact Please email security+contracts@thegraph.com if you find any - * bugs. We may have an active bug bounty program. - */ -library LibFixedMath { - // 1 - int256 private constant FIXED_1 = int256(0x0000000000000000000000000000000080000000000000000000000000000000); - // 2**255 - int256 private constant MIN_FIXED_VAL = type(int256).min; - // 0 - int256 private constant EXP_MAX_VAL = 0; - // -63.875 - int256 private constant EXP_MIN_VAL = -int256(0x0000000000000000000000000000001ff0000000000000000000000000000000); - - /** - * @notice Get one as a fixed-point number - * @return f The fixed-point representation of one - */ - function one() internal pure returns (int256 f) { - f = FIXED_1; - } - - /** - * @notice Returns the subtraction of two fixed point numbers, reverting on overflow - * @param a The first fixed point number - * @param b The second fixed point number to subtract - * @return c The result of a - b - */ - function sub(int256 a, int256 b) internal pure returns (int256 c) { - if (b == MIN_FIXED_VAL) { - revert("out-of-bounds"); - } - c = _add(a, -b); - } - - /** - * @notice Returns the multiplication of two fixed point numbers, reverting on overflow - * @param a The first fixed point number - * @param b The second fixed point number - * @return c The result of a * b - */ - function mul(int256 a, int256 b) internal pure returns (int256 c) { - c = _mul(a, b) / FIXED_1; - } - - /** - * @notice Performs (a * n) / d, without scaling for precision - * @param a The first fixed point number - * @param n The numerator - * @param d The denominator - * @return c The result of (a * n) / d - */ - function mulDiv(int256 a, int256 n, int256 d) internal pure returns (int256 c) { - c = _div(_mul(a, n), d); - } - - /** - * @notice Returns the unsigned integer result of multiplying a fixed-point number with an integer - * @dev Negative results are clamped to zero. Reverts if the multiplication overflows. - * @param f Fixed-point number - * @param u Unsigned integer - * @return Unsigned integer result, clamped to zero if negative - */ - function uintMul(int256 f, uint256 u) internal pure returns (uint256) { - if (int256(u) < int256(0)) { - revert("out-of-bounds"); - } - int256 c = _mul(f, int256(u)); - if (c <= 0) { - return 0; - } - return uint256(uint256(c) >> 127); - } - - /** - * @notice Convert signed `n` / `d` to a fixed-point number - * @param n Numerator - * @param d Denominator - * @return f Fixed-point representation of n/d - */ - function toFixed(int256 n, int256 d) internal pure returns (int256 f) { - f = _div(_mul(n, FIXED_1), d); - } - - /** - * @notice Convert a fixed-point number to an integer - * @param f Fixed-point number - * @return n Integer representation - */ - function toInteger(int256 f) internal pure returns (int256 n) { - return f / FIXED_1; - } - - /** - * @notice Compute the natural exponent for a fixed-point number EXP_MIN_VAL <= `x` <= 1 - * @param x Fixed-point number to compute exponent for - * @return r The natural exponent of x - */ - function exp(int256 x) internal pure returns (int256 r) { - if (x < EXP_MIN_VAL) { - // Saturate to zero below EXP_MIN_VAL. - return 0; - } - if (x == 0) { - return FIXED_1; - } - if (x > EXP_MAX_VAL) { - revert("out-of-bounds"); - } - - // Rewrite the input as a product of natural exponents and a - // single residual q, where q is a number of small magnitude. - // For example: e^-34.419 = e^(-32 - 2 - 0.25 - 0.125 - 0.044) - // = e^-32 * e^-2 * e^-0.25 * e^-0.125 * e^-0.044 - // -> q = -0.044 - - // Multiply with the taylor series for e^q - int256 y; - int256 z; - // q = x % 0.125 (the residual) - z = y = x % 0x0000000000000000000000000000000010000000000000000000000000000000; - z = (z * y) / FIXED_1; - r += z * 0x10e1b3be415a0000; // add y^02 * (20! / 02!) - z = (z * y) / FIXED_1; - r += z * 0x05a0913f6b1e0000; // add y^03 * (20! / 03!) - z = (z * y) / FIXED_1; - r += z * 0x0168244fdac78000; // add y^04 * (20! / 04!) - z = (z * y) / FIXED_1; - r += z * 0x004807432bc18000; // add y^05 * (20! / 05!) - z = (z * y) / FIXED_1; - r += z * 0x000c0135dca04000; // add y^06 * (20! / 06!) - z = (z * y) / FIXED_1; - r += z * 0x0001b707b1cdc000; // add y^07 * (20! / 07!) - z = (z * y) / FIXED_1; - r += z * 0x000036e0f639b800; // add y^08 * (20! / 08!) - z = (z * y) / FIXED_1; - r += z * 0x00000618fee9f800; // add y^09 * (20! / 09!) - z = (z * y) / FIXED_1; - r += z * 0x0000009c197dcc00; // add y^10 * (20! / 10!) - z = (z * y) / FIXED_1; - r += z * 0x0000000e30dce400; // add y^11 * (20! / 11!) - z = (z * y) / FIXED_1; - r += z * 0x000000012ebd1300; // add y^12 * (20! / 12!) - z = (z * y) / FIXED_1; - r += z * 0x0000000017499f00; // add y^13 * (20! / 13!) - z = (z * y) / FIXED_1; - r += z * 0x0000000001a9d480; // add y^14 * (20! / 14!) - z = (z * y) / FIXED_1; - r += z * 0x00000000001c6380; // add y^15 * (20! / 15!) - z = (z * y) / FIXED_1; - r += z * 0x000000000001c638; // add y^16 * (20! / 16!) - z = (z * y) / FIXED_1; - r += z * 0x0000000000001ab8; // add y^17 * (20! / 17!) - z = (z * y) / FIXED_1; - r += z * 0x000000000000017c; // add y^18 * (20! / 18!) - z = (z * y) / FIXED_1; - r += z * 0x0000000000000014; // add y^19 * (20! / 19!) - z = (z * y) / FIXED_1; - r += z * 0x0000000000000001; // add y^20 * (20! / 20!) - r = r / 0x21c3677c82b40000 + y + FIXED_1; // divide by 20! and then add y^1 / 1! + y^0 / 0! - - // Multiply with the non-residual terms. - x = -x; - // e ^ -32 - if ((x & int256(0x0000000000000000000000000000001000000000000000000000000000000000)) != 0) { - r = - (r * int256(0x00000000000000000000000000000000000000f1aaddd7742e56d32fb9f99744)) / - int256(0x0000000000000000000000000043cbaf42a000812488fc5c220ad7b97bf6e99e); // * e ^ -32 - } - // e ^ -16 - if ((x & int256(0x0000000000000000000000000000000800000000000000000000000000000000)) != 0) { - r = - (r * int256(0x00000000000000000000000000000000000afe10820813d65dfe6a33c07f738f)) / - int256(0x000000000000000000000000000005d27a9f51c31b7c2f8038212a0574779991); // * e ^ -16 - } - // e ^ -8 - if ((x & int256(0x0000000000000000000000000000000400000000000000000000000000000000)) != 0) { - r = - (r * int256(0x0000000000000000000000000000000002582ab704279e8efd15e0265855c47a)) / - int256(0x0000000000000000000000000000001b4c902e273a58678d6d3bfdb93db96d02); // * e ^ -8 - } - // e ^ -4 - if ((x & int256(0x0000000000000000000000000000000200000000000000000000000000000000)) != 0) { - r = - (r * int256(0x000000000000000000000000000000001152aaa3bf81cb9fdb76eae12d029571)) / - int256(0x00000000000000000000000000000003b1cc971a9bb5b9867477440d6d157750); // * e ^ -4 - } - // e ^ -2 - if ((x & int256(0x0000000000000000000000000000000100000000000000000000000000000000)) != 0) { - r = - (r * int256(0x000000000000000000000000000000002f16ac6c59de6f8d5d6f63c1482a7c86)) / - int256(0x000000000000000000000000000000015bf0a8b1457695355fb8ac404e7a79e3); // * e ^ -2 - } - // e ^ -1 - if ((x & int256(0x0000000000000000000000000000000080000000000000000000000000000000)) != 0) { - r = - (r * int256(0x000000000000000000000000000000004da2cbf1be5827f9eb3ad1aa9866ebb3)) / - int256(0x00000000000000000000000000000000d3094c70f034de4b96ff7d5b6f99fcd8); // * e ^ -1 - } - // e ^ -0.5 - if ((x & int256(0x0000000000000000000000000000000040000000000000000000000000000000)) != 0) { - r = - (r * int256(0x0000000000000000000000000000000063afbe7ab2082ba1a0ae5e4eb1b479dc)) / - int256(0x00000000000000000000000000000000a45af1e1f40c333b3de1db4dd55f29a7); // * e ^ -0.5 - } - // e ^ -0.25 - if ((x & int256(0x0000000000000000000000000000000020000000000000000000000000000000)) != 0) { - r = - (r * int256(0x0000000000000000000000000000000070f5a893b608861e1f58934f97aea57d)) / - int256(0x00000000000000000000000000000000910b022db7ae67ce76b441c27035c6a1); // * e ^ -0.25 - } - // e ^ -0.125 - if ((x & int256(0x0000000000000000000000000000000010000000000000000000000000000000)) != 0) { - r = - (r * int256(0x00000000000000000000000000000000783eafef1c0a8f3978c7f81824d62ebf)) / - int256(0x0000000000000000000000000000000088415abbe9a76bead8d00cf112e4d4a8); // * e ^ -0.125 - } - } - - /** - * @notice Returns the multiplication of two numbers, reverting on overflow - * @param a First number - * @param b Second number - * @return c The result of a * b - */ - function _mul(int256 a, int256 b) private pure returns (int256 c) { - if (a == 0 || b == 0) { - return 0; - } - unchecked { - c = a * b; - if (c / a != b || c / b != a) { - revert("overflow"); - } - } - } - - /** - * @notice Returns the division of two numbers, reverting on division by zero - * @param a Dividend - * @param b Divisor - * @return c The result of a / b - */ - function _div(int256 a, int256 b) private pure returns (int256 c) { - if (b == 0) { - revert("overflow"); - } - if (a == MIN_FIXED_VAL && b == -1) { - revert("overflow"); - } - unchecked { - c = a / b; - } - } - - /** - * @notice Adds two numbers, reverting on overflow - * @param a First number - * @param b Second number - * @return c The result of a + b - */ - function _add(int256 a, int256 b) private pure returns (int256 c) { - unchecked { - c = a + b; - if ((a < 0 && b < 0 && c > a) || (a > 0 && b > 0 && c < a)) { - revert("overflow"); - } - } - } -} diff --git a/packages/horizon/contracts/libraries/LinkedList.sol b/packages/horizon/contracts/libraries/LinkedList.sol index 24e5610a0..893ea4a24 100644 --- a/packages/horizon/contracts/libraries/LinkedList.sol +++ b/packages/horizon/contracts/libraries/LinkedList.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-increment-by-one, gas-strict-inequalities diff --git a/packages/horizon/contracts/libraries/MathUtils.sol b/packages/horizon/contracts/libraries/MathUtils.sol deleted file mode 100644 index ec8cc8161..000000000 --- a/packages/horizon/contracts/libraries/MathUtils.sol +++ /dev/null @@ -1,56 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -// TODO: Re-enable and fix issues when publishing a new version -// solhint-disable gas-strict-inequalities - -pragma solidity 0.8.27 || 0.8.33; - -/** - * @title MathUtils Library - * @author Edge & Node - * @notice A collection of functions to perform math operations - * @custom:security-contact Please email security+contracts@thegraph.com if you find any - * bugs. We may have an active bug bounty program. - */ -library MathUtils { - /** - * @notice Calculates the weighted average of two values pondering each of these - * values based on configured weights - * @dev The contribution of each value N is - * weightN/(weightA + weightB). The calculation rounds up to ensure the result - * is always equal or greater than the smallest of the two values. - * @param valueA The amount for value A - * @param weightA The weight to use for value A - * @param valueB The amount for value B - * @param weightB The weight to use for value B - * @return The weighted average result - */ - function weightedAverageRoundingUp( - uint256 valueA, - uint256 weightA, - uint256 valueB, - uint256 weightB - ) internal pure returns (uint256) { - return ((valueA * weightA) + (valueB * weightB) + (weightA + weightB - 1)) / (weightA + weightB); - } - - /** - * @notice Returns the minimum of two numbers - * @param x The first number - * @param y The second number - * @return The minimum of the two numbers - */ - function min(uint256 x, uint256 y) internal pure returns (uint256) { - return x <= y ? x : y; - } - - /** - * @notice Returns the difference between two numbers or zero if negative - * @param x The first number - * @param y The second number - * @return The difference between the two numbers or zero if negative - */ - function diffOrZero(uint256 x, uint256 y) internal pure returns (uint256) { - return (x > y) ? x - y : 0; - } -} diff --git a/packages/horizon/contracts/libraries/PPMMath.sol b/packages/horizon/contracts/libraries/PPMMath.sol index a3108d88b..75448a6d0 100644 --- a/packages/horizon/contracts/libraries/PPMMath.sol +++ b/packages/horizon/contracts/libraries/PPMMath.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities diff --git a/packages/horizon/contracts/libraries/UintRange.sol b/packages/horizon/contracts/libraries/UintRange.sol index c96222464..3783b95ea 100644 --- a/packages/horizon/contracts/libraries/UintRange.sol +++ b/packages/horizon/contracts/libraries/UintRange.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities diff --git a/packages/horizon/contracts/mocks/imports.sol b/packages/horizon/contracts/mocks/imports.sol index 3a05b2b4d..f153a9320 100644 --- a/packages/horizon/contracts/mocks/imports.sol +++ b/packages/horizon/contracts/mocks/imports.sol @@ -1,7 +1,7 @@ // solhint-disable no-global-import // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity ^0.7.6 || 0.8.27; +pragma solidity ^0.7.6 || ^0.8.27; // We import these here to force Hardhat to compile them. // This ensures that their artifacts are available for Hardhat Ignition to use. diff --git a/packages/horizon/contracts/payments/GraphPayments.sol b/packages/horizon/contracts/payments/GraphPayments.sol index 276ce2100..ed83d4b3c 100644 --- a/packages/horizon/contracts/payments/GraphPayments.sol +++ b/packages/horizon/contracts/payments/GraphPayments.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable function-max-lines diff --git a/packages/horizon/contracts/payments/PaymentsEscrow.sol b/packages/horizon/contracts/payments/PaymentsEscrow.sol index 6af296e42..59c3f771f 100644 --- a/packages/horizon/contracts/payments/PaymentsEscrow.sol +++ b/packages/horizon/contracts/payments/PaymentsEscrow.sol @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; -// TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; @@ -36,7 +35,8 @@ contract PaymentsEscrow is Initializable, MulticallUpgradeable, GraphDirectory, /// @notice Escrow account details for payer-collector-receiver tuples mapping(address payer => mapping(address collector => mapping(address receiver => IPaymentsEscrow.EscrowAccount escrowAccount))) - public escrowAccounts; + public + override escrowAccounts; // forge-lint: disable-next-item(unwrapped-modifier-logic) /** @@ -91,6 +91,42 @@ contract PaymentsEscrow is Initializable, MulticallUpgradeable, GraphDirectory, emit Thaw(msg.sender, collector, receiver, tokens, account.thawEndTimestamp); } + /// @inheritdoc IPaymentsEscrow + function adjustThaw( + address collector, + address receiver, + uint256 tokensToThaw, + bool evenIfTimerReset + ) external override notPaused returns (uint256 tokensThawing) { + EscrowAccount storage account = escrowAccounts[msg.sender][collector][receiver]; + uint256 currentThawing = account.tokensThawing; + + tokensThawing = tokensToThaw < account.balance ? tokensToThaw : account.balance; + + if (tokensThawing == currentThawing) return tokensThawing; + + uint256 thawEndTimestamp; + uint256 previousThawEnd = account.thawEndTimestamp; + if (tokensThawing < currentThawing) { + // Decreasing (or canceling): preserve timer, clear if fully canceled + account.tokensThawing = tokensThawing; + if (tokensThawing == 0) account.thawEndTimestamp = 0; + else thawEndTimestamp = previousThawEnd; + } else { + thawEndTimestamp = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + // Increasing: reset timer (skip if evenIfTimerReset=false and timer would change) + if (!evenIfTimerReset && previousThawEnd != 0 && previousThawEnd != thawEndTimestamp) return currentThawing; + account.tokensThawing = tokensThawing; + account.thawEndTimestamp = thawEndTimestamp; + } + + if (tokensThawing == 0) { + emit CancelThaw(msg.sender, collector, receiver, currentThawing, previousThawEnd); + } else { + emit Thaw(msg.sender, collector, receiver, tokensThawing, thawEndTimestamp); + } + } + /// @inheritdoc IPaymentsEscrow function cancelThaw(address collector, address receiver) external override notPaused { EscrowAccount storage account = escrowAccounts[msg.sender][collector][receiver]; diff --git a/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol b/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol index 9040219fc..8b8a161ee 100644 --- a/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol +++ b/packages/horizon/contracts/payments/collectors/GraphTallyCollector.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-small-strings diff --git a/packages/horizon/contracts/payments/collectors/MaxSecondsPerCollectionCap.md b/packages/horizon/contracts/payments/collectors/MaxSecondsPerCollectionCap.md new file mode 100644 index 000000000..c3926b31c --- /dev/null +++ b/packages/horizon/contracts/payments/collectors/MaxSecondsPerCollectionCap.md @@ -0,0 +1,56 @@ +# maxSecondsPerCollection: Cap, Not Deadline + +## Problem + +`_requireValidCollect` treats `maxSecondsPerCollection` as a hard deadline: + +```solidity +require( + _collectionSeconds <= _agreement.maxSecondsPerCollection, + RecurringCollectorCollectionTooLate(...) +); +uint256 maxTokens = _agreement.maxOngoingTokensPerSecond * _collectionSeconds; +``` + +If the indexer collects even 1 second past `maxSecondsPerCollection`, the transaction reverts and the agreement becomes permanently stuck. The only recovery is a zero-token collect that bypasses temporal validation entirely (since `_requireValidCollect` is inside `if (tokens != 0)`), which works but is an unnatural mechanism. + +## Fix + +Cap `collectionSeconds` at `maxSecondsPerCollection` in `_getCollectionInfo`, so all callers (RC's `_collect` and SS's `IndexingAgreement.collect`) receive consistent capped seconds: + +```solidity +uint256 elapsed = collectionEnd - collectionStart; +return (true, Math.min(elapsed, uint256(_agreement.maxSecondsPerCollection)), ...); +``` + +The payer's per-collection exposure is still bounded by `maxOngoingTokensPerSecond * maxSecondsPerCollection`. The indexer can collect after the window closes, but receives no more tokens than if they had collected exactly at the deadline. + +## Why this is correct + +1. **`_getMaxNextClaim` already caps.** The view function (used by escrow to compute worst-case exposure) clamps `windowSeconds` at `maxSecondsPerCollection` rather than returning 0. The mutation function should be consistent. + +2. **`collectionSeconds` is derived from on-chain state**, not caller-supplied. The indexer's only leverage is _when_ they call. Capping means they can't extract more by waiting longer. + +3. **No stuck agreements.** A missed window no longer requires cancellation or a zero-token hack to recover. + +4. **`minSecondsPerCollection` is unaffected.** If elapsed time exceeds `maxSecondsPerCollection`, it trivially exceeds `minSecondsPerCollection` (since `max > min` is enforced at accept time). + +5. **Initial tokens preserved.** `maxInitialTokens` is added on top of the capped ongoing amount on first collection. With a hard deadline, a late first collection reverts and the indexer loses both the initial bonus and the ongoing amount — misaligning incentives. With a cap, the initial bonus is always available. + +6. **Late collection loses unclaimed seconds, not ability to collect.** After a capped collection, `lastCollectionAt` resets to `block.timestamp`, not `lastCollectionAt + maxSecondsPerCollection`. The indexer permanently loses tokens for the gap beyond the cap. This incentivizes timely collection without the cliff-edge of a hard revert. + +## Zero-token temporal validation enforced + +`_requireValidCollect` was previously inside `if (tokens != 0)`, allowing zero-token collections to update `lastCollectionAt` without temporal checks. With the cap in place there is no legitimate bypass scenario, so temporal validation now runs unconditionally. + +This also makes `lastCollectionAt` (publicly readable via `getAgreement`) trustworthy as a liveness signal. Previously it could be advanced to `block.timestamp` without any real collection. Now it can only be updated through a validated collection, making it reliable for external consumers (e.g. payers or SAM operators checking indexer activity to decide whether to cancel). + +## Zero-POI special case removed + +The old code special-cased `entities == 0 && poi == bytes32(0)` to force `tokens = 0`, bypassing `_tokensToCollect` and RC temporal validation. This existed as a reset mechanism for stuck agreements. With the cap, there are no stuck agreements, so the special case is removed. Every collection now goes through `_tokensToCollect` and RC validation uniformly, and every POI is disputable. + +## Contrast with indexing rewards + +Indexing rewards require a zero-POI "heartbeat" to keep allocations alive because reward rates change per epoch and snapshots are influenced by other participants' activity. That reset mechanism exists because the system is inherently snapshot-driven. + +RCA indexing fees have no snapshots. The rate (`tokensPerSecond`, `tokensPerEntityPerSecond`) is fixed at agreement accept/update time. No external state changes the per-second rate between collections. The amount owed for N seconds of service is deterministic regardless of when collection happens, so capping is strictly correct — there is no reason to penalize a late collection beyond limiting it to `maxSecondsPerCollection` worth of tokens. diff --git a/packages/horizon/contracts/payments/collectors/RecurringCollector.sol b/packages/horizon/contracts/payments/collectors/RecurringCollector.sol new file mode 100644 index 000000000..452822a05 --- /dev/null +++ b/packages/horizon/contracts/payments/collectors/RecurringCollector.sol @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { EIP712 } from "@openzeppelin/contracts/utils/cryptography/EIP712.sol"; +import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; + +import { Authorizable } from "../../utilities/Authorizable.sol"; +import { GraphDirectory } from "../../utilities/GraphDirectory.sol"; +// solhint-disable-next-line no-unused-import +import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; // for @inheritdoc +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { PPMMath } from "../../libraries/PPMMath.sol"; + +/** + * @title RecurringCollector contract + * @author Edge & Node + * @dev Implements the {IRecurringCollector} interface. + * @notice A payments collector contract that can be used to collect payments using a RCA (Recurring Collection Agreement). + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringCollector { + using PPMMath for uint256; + + /// @notice The minimum number of seconds that must be between two collections + uint32 public constant MIN_SECONDS_COLLECTION_WINDOW = 600; + + /* solhint-disable gas-small-strings */ + /// @notice The EIP712 typehash for the RecurringCollectionAgreement struct + bytes32 public constant EIP712_RCA_TYPEHASH = + keccak256( + "RecurringCollectionAgreement(uint64 deadline,uint64 endsAt,address payer,address dataService,address serviceProvider,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint256 nonce,bytes metadata)" + ); + + /// @notice The EIP712 typehash for the RecurringCollectionAgreementUpdate struct + bytes32 public constant EIP712_RCAU_TYPEHASH = + keccak256( + "RecurringCollectionAgreementUpdate(bytes16 agreementId,uint64 deadline,uint64 endsAt,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint32 nonce,bytes metadata)" + ); + /* solhint-enable gas-small-strings */ + + /// @notice Tracks agreements + mapping(bytes16 agreementId => AgreementData data) internal agreements; + + /** + * @notice Constructs a new instance of the RecurringCollector contract. + * @param eip712Name The name of the EIP712 domain. + * @param eip712Version The version of the EIP712 domain. + * @param controller The address of the Graph controller. + * @param revokeSignerThawingPeriod The duration (in seconds) in which a signer is thawing before they can be revoked. + */ + constructor( + string memory eip712Name, + string memory eip712Version, + address controller, + uint256 revokeSignerThawingPeriod + ) EIP712(eip712Name, eip712Version) GraphDirectory(controller) Authorizable(revokeSignerThawingPeriod) {} + + /** + * @inheritdoc IPaymentsCollector + * @notice Initiate a payment collection through the payments protocol. + * See {IPaymentsCollector.collect}. + * @dev Caller must be the data service the RCA was issued to. + */ + function collect(IGraphPayments.PaymentTypes paymentType, bytes calldata data) external returns (uint256) { + try this.decodeCollectData(data) returns (CollectParams memory collectParams) { + return _collect(paymentType, collectParams); + } catch { + revert RecurringCollectorInvalidCollectData(data); + } + } + + /** + * @inheritdoc IRecurringCollector + * @notice Accept a Recurring Collection Agreement. + * @dev Caller must be the data service the RCA was issued to. + */ + function accept(RecurringCollectionAgreement calldata rca, bytes calldata signature) external returns (bytes16) { + /* solhint-disable gas-strict-inequalities */ + require( + rca.deadline >= block.timestamp, + RecurringCollectorAgreementDeadlineElapsed(block.timestamp, rca.deadline) + ); + /* solhint-enable gas-strict-inequalities */ + + if (0 < signature.length) { + // ECDSA-signed path: verify signature + _requireAuthorizedRCASigner(rca, signature); + } else { + // Contract-approved path: verify payer is a contract and confirms the agreement + require(0 < rca.payer.code.length, RecurringCollectorApproverNotContract(rca.payer)); + bytes32 agreementHash = _hashRCA(rca); + require( + IAgreementOwner(rca.payer).approveAgreement(agreementHash) == IAgreementOwner.approveAgreement.selector, + RecurringCollectorInvalidSigner() + ); + } + return _validateAndStoreAgreement(rca); + } + + /** + * @notice Validates RCA fields and stores the agreement. + * @param _rca The Recurring Collection Agreement to validate and store + * @return agreementId The deterministically generated agreement ID + */ + /* solhint-disable function-max-lines */ + function _validateAndStoreAgreement(RecurringCollectionAgreement memory _rca) private returns (bytes16) { + bytes16 agreementId = _generateAgreementId( + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.deadline, + _rca.nonce + ); + + require(agreementId != bytes16(0), RecurringCollectorAgreementIdZero()); + require(msg.sender == _rca.dataService, RecurringCollectorUnauthorizedCaller(msg.sender, _rca.dataService)); + + require( + _rca.dataService != address(0) && _rca.payer != address(0) && _rca.serviceProvider != address(0), + RecurringCollectorAgreementAddressNotSet() + ); + + _requireValidCollectionWindowParams(_rca.endsAt, _rca.minSecondsPerCollection, _rca.maxSecondsPerCollection); + + AgreementData storage agreement = _getAgreementStorage(agreementId); + // check that the agreement is not already accepted + require( + agreement.state == AgreementState.NotAccepted, + RecurringCollectorAgreementIncorrectState(agreementId, agreement.state) + ); + + // accept the agreement + agreement.acceptedAt = uint64(block.timestamp); + agreement.state = AgreementState.Accepted; + agreement.dataService = _rca.dataService; + agreement.payer = _rca.payer; + agreement.serviceProvider = _rca.serviceProvider; + agreement.endsAt = _rca.endsAt; + agreement.maxInitialTokens = _rca.maxInitialTokens; + agreement.maxOngoingTokensPerSecond = _rca.maxOngoingTokensPerSecond; + agreement.minSecondsPerCollection = _rca.minSecondsPerCollection; + agreement.maxSecondsPerCollection = _rca.maxSecondsPerCollection; + agreement.updateNonce = 0; + + emit AgreementAccepted( + agreement.dataService, + agreement.payer, + agreement.serviceProvider, + agreementId, + agreement.acceptedAt, + agreement.endsAt, + agreement.maxInitialTokens, + agreement.maxOngoingTokensPerSecond, + agreement.minSecondsPerCollection, + agreement.maxSecondsPerCollection + ); + + return agreementId; + } + /* solhint-enable function-max-lines */ + + /** + * @inheritdoc IRecurringCollector + * @notice Cancel a Recurring Collection Agreement. + * See {IRecurringCollector.cancel}. + * @dev Caller must be the data service for the agreement. + */ + function cancel(bytes16 agreementId, CancelAgreementBy by) external { + AgreementData storage agreement = _getAgreementStorage(agreementId); + require( + agreement.state == AgreementState.Accepted, + RecurringCollectorAgreementIncorrectState(agreementId, agreement.state) + ); + require( + agreement.dataService == msg.sender, + RecurringCollectorDataServiceNotAuthorized(agreementId, msg.sender) + ); + agreement.canceledAt = uint64(block.timestamp); + if (by == CancelAgreementBy.Payer) { + agreement.state = AgreementState.CanceledByPayer; + } else { + agreement.state = AgreementState.CanceledByServiceProvider; + } + + emit AgreementCanceled( + agreement.dataService, + agreement.payer, + agreement.serviceProvider, + agreementId, + agreement.canceledAt, + by + ); + } + + /** + * @inheritdoc IRecurringCollector + * @notice Update a Recurring Collection Agreement. + * @dev Caller must be the data service for the agreement. + * @dev Note: Updated pricing terms apply immediately and will affect the next collection + * for the entire period since lastCollectionAt. + */ + function update(RecurringCollectionAgreementUpdate calldata rcau, bytes calldata signature) external { + AgreementData storage agreement = _requireValidUpdateTarget(rcau.agreementId); + + /* solhint-disable gas-strict-inequalities */ + require( + rcau.deadline >= block.timestamp, + RecurringCollectorAgreementDeadlineElapsed(block.timestamp, rcau.deadline) + ); + /* solhint-enable gas-strict-inequalities */ + + if (0 < signature.length) { + // ECDSA-signed path: verify signature + _requireAuthorizedRCAUSigner(rcau, signature, agreement.payer); + } else { + // Contract-approved path: verify payer is a contract and confirms the update + require(0 < agreement.payer.code.length, RecurringCollectorApproverNotContract(agreement.payer)); + bytes32 updateHash = _hashRCAU(rcau); + require( + IAgreementOwner(agreement.payer).approveAgreement(updateHash) == + IAgreementOwner.approveAgreement.selector, + RecurringCollectorInvalidSigner() + ); + } + + _validateAndStoreUpdate(agreement, rcau); + } + + /// @inheritdoc IRecurringCollector + function recoverRCASigner( + RecurringCollectionAgreement calldata rca, + bytes calldata signature + ) external view returns (address) { + return _recoverRCASigner(rca, signature); + } + + /// @inheritdoc IRecurringCollector + function recoverRCAUSigner( + RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata signature + ) external view returns (address) { + return _recoverRCAUSigner(rcau, signature); + } + + /// @inheritdoc IRecurringCollector + function hashRCA(RecurringCollectionAgreement calldata rca) external view returns (bytes32) { + return _hashRCA(rca); + } + + /// @inheritdoc IRecurringCollector + function hashRCAU(RecurringCollectionAgreementUpdate calldata rcau) external view returns (bytes32) { + return _hashRCAU(rcau); + } + + /// @inheritdoc IRecurringCollector + function getAgreement(bytes16 agreementId) external view returns (AgreementData memory) { + return _getAgreement(agreementId); + } + + /// @inheritdoc IRecurringCollector + function getCollectionInfo( + AgreementData calldata agreement + ) external view returns (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason) { + return _getCollectionInfo(agreement); + } + + /// @inheritdoc IRecurringCollector + function getMaxNextClaim(bytes16 agreementId) external view returns (uint256) { + return _getMaxNextClaim(agreements[agreementId]); + } + + /// @inheritdoc IRecurringCollector + function generateAgreementId( + address payer, + address dataService, + address serviceProvider, + uint64 deadline, + uint256 nonce + ) external pure returns (bytes16) { + return _generateAgreementId(payer, dataService, serviceProvider, deadline, nonce); + } + + /** + * @notice Decodes the collect data. + * @param data The encoded collect parameters. + * @return The decoded collect parameters. + */ + function decodeCollectData(bytes calldata data) public pure returns (CollectParams memory) { + return abi.decode(data, (CollectParams)); + } + + /* solhint-disable function-max-lines */ + /** + * @notice Collect payment through the payments protocol. + * @dev Caller must be the data service the RCA was issued to. + * + * Emits {PaymentCollected} and {RCACollected} events. + * + * @param _paymentType The type of payment to collect + * @param _params The decoded parameters for the collection + * @return The amount of tokens collected + */ + function _collect( + IGraphPayments.PaymentTypes _paymentType, + CollectParams memory _params + ) private returns (uint256) { + AgreementData storage agreement = _getAgreementStorage(_params.agreementId); + + // Check if agreement is collectable first + (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason) = _getCollectionInfo( + agreement + ); + require(isCollectable, RecurringCollectorAgreementNotCollectable(_params.agreementId, reason)); + + require( + msg.sender == agreement.dataService, + RecurringCollectorDataServiceNotAuthorized(_params.agreementId, msg.sender) + ); + + // Check the service provider has an active provision with the data service + // This prevents an attack where the payer can deny the service provider from collecting payments + // by using a signer as data service to syphon off the tokens in the escrow to an account they control + { + uint256 tokensAvailable = _graphStaking().getProviderTokensAvailable( + agreement.serviceProvider, + agreement.dataService + ); + require(tokensAvailable > 0, RecurringCollectorUnauthorizedDataService(agreement.dataService)); + } + + // Always validate temporal constraints (min/maxSecondsPerCollection) even for + // zero-token collections, to prevent bypassing temporal windows while updating + // lastCollectionAt. + uint256 tokensToCollect = _requireValidCollect( + agreement, + _params.agreementId, + _params.tokens, + collectionSeconds + ); + + if (_params.tokens != 0) { + uint256 slippage = _params.tokens - tokensToCollect; + /* solhint-disable gas-strict-inequalities */ + require( + slippage <= _params.maxSlippage, + RecurringCollectorExcessiveSlippage(_params.tokens, tokensToCollect, _params.maxSlippage) + ); + /* solhint-enable gas-strict-inequalities */ + } + agreement.lastCollectionAt = uint64(block.timestamp); + + // Hard eligibility gate for contract payers that opt in via ERC165 + if (0 < tokensToCollect && 0 < agreement.payer.code.length) { + try IERC165(agreement.payer).supportsInterface(type(IProviderEligibility).interfaceId) returns ( + bool supported + ) { + if (supported) { + require( + IProviderEligibility(agreement.payer).isEligible(agreement.serviceProvider), + RecurringCollectorCollectionNotEligible(_params.agreementId, agreement.serviceProvider) + ); + } + } catch {} + // Let contract payers top up escrow if short + try IAgreementOwner(agreement.payer).beforeCollection(_params.agreementId, tokensToCollect) {} catch {} + } + + if (0 < tokensToCollect) { + _graphPaymentsEscrow().collect( + _paymentType, + agreement.payer, + agreement.serviceProvider, + tokensToCollect, + agreement.dataService, + _params.dataServiceCut, + _params.receiverDestination + ); + } + + emit PaymentCollected( + _paymentType, + _params.collectionId, + agreement.payer, + agreement.serviceProvider, + agreement.dataService, + tokensToCollect + ); + + emit RCACollected( + agreement.dataService, + agreement.payer, + agreement.serviceProvider, + _params.agreementId, + _params.collectionId, + tokensToCollect, + _params.dataServiceCut + ); + + // Notify contract payers so they can reconcile escrow in the same transaction + if (0 < agreement.payer.code.length) { + try IAgreementOwner(agreement.payer).afterCollection(_params.agreementId, tokensToCollect) {} catch {} + } + + return tokensToCollect; + } + /* solhint-enable function-max-lines */ + + /** + * @notice Requires that the collection window parameters are valid. + * + * @param _endsAt The end time of the agreement + * @param _minSecondsPerCollection The minimum seconds per collection + * @param _maxSecondsPerCollection The maximum seconds per collection + */ + function _requireValidCollectionWindowParams( + uint64 _endsAt, + uint32 _minSecondsPerCollection, + uint32 _maxSecondsPerCollection + ) private view { + // Agreement needs to end in the future + require(_endsAt > block.timestamp, RecurringCollectorAgreementElapsedEndsAt(block.timestamp, _endsAt)); + + // Collection window needs to be at least MIN_SECONDS_COLLECTION_WINDOW + require( + _maxSecondsPerCollection > _minSecondsPerCollection && + // solhint-disable-next-line gas-strict-inequalities + (_maxSecondsPerCollection - _minSecondsPerCollection >= MIN_SECONDS_COLLECTION_WINDOW), + RecurringCollectorAgreementInvalidCollectionWindow( + MIN_SECONDS_COLLECTION_WINDOW, + _minSecondsPerCollection, + _maxSecondsPerCollection + ) + ); + + // Agreement needs to last at least one min collection window + require( + // solhint-disable-next-line gas-strict-inequalities + _endsAt - block.timestamp >= _minSecondsPerCollection + MIN_SECONDS_COLLECTION_WINDOW, + RecurringCollectorAgreementInvalidDuration( + _minSecondsPerCollection + MIN_SECONDS_COLLECTION_WINDOW, + _endsAt - block.timestamp + ) + ); + } + + /** + * @notice Requires that the collection params are valid. + * @param _agreement The agreement data + * @param _agreementId The ID of the agreement + * @param _tokens The number of tokens to collect + * @param _collectionSeconds Collection duration from _getCollectionInfo() + * @return The number of tokens that can be collected + */ + function _requireValidCollect( + AgreementData memory _agreement, + bytes16 _agreementId, + uint256 _tokens, + uint256 _collectionSeconds + ) private view returns (uint256) { + bool canceledOrElapsed = _agreement.state == AgreementState.CanceledByPayer || + block.timestamp > _agreement.endsAt; + if (!canceledOrElapsed) { + require( + // solhint-disable-next-line gas-strict-inequalities + _collectionSeconds >= _agreement.minSecondsPerCollection, + RecurringCollectorCollectionTooSoon( + _agreementId, + // casting to uint32 is safe because _collectionSeconds < minSecondsPerCollection (uint32) + // forge-lint: disable-next-line(unsafe-typecast) + uint32(_collectionSeconds), + _agreement.minSecondsPerCollection + ) + ); + } + // _collectionSeconds is already capped at maxSecondsPerCollection by _getCollectionInfo + uint256 maxTokens = _agreement.maxOngoingTokensPerSecond * _collectionSeconds; + maxTokens += _agreement.lastCollectionAt == 0 ? _agreement.maxInitialTokens : 0; + + return Math.min(_tokens, maxTokens); + } + + /** + * @notice See {recoverRCASigner} + * @param _rca The RCA whose hash was signed + * @param _signature The ECDSA signature bytes + * @return The address of the signer + */ + function _recoverRCASigner( + RecurringCollectionAgreement memory _rca, + bytes memory _signature + ) private view returns (address) { + bytes32 messageHash = _hashRCA(_rca); + return ECDSA.recover(messageHash, _signature); + } + + /** + * @notice See {recoverRCAUSigner} + * @param _rcau The RCAU whose hash was signed + * @param _signature The ECDSA signature bytes + * @return The address of the signer + */ + function _recoverRCAUSigner( + RecurringCollectionAgreementUpdate memory _rcau, + bytes memory _signature + ) private view returns (address) { + bytes32 messageHash = _hashRCAU(_rcau); + return ECDSA.recover(messageHash, _signature); + } + + /** + * @notice See {hashRCA} + * @param _rca The RCA to hash + * @return The EIP712 hash of the RCA + */ + function _hashRCA(RecurringCollectionAgreement memory _rca) private view returns (bytes32) { + return + _hashTypedDataV4( + keccak256( + abi.encode( + EIP712_RCA_TYPEHASH, + _rca.deadline, + _rca.endsAt, + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.maxInitialTokens, + _rca.maxOngoingTokensPerSecond, + _rca.minSecondsPerCollection, + _rca.maxSecondsPerCollection, + _rca.nonce, + keccak256(_rca.metadata) + ) + ) + ); + } + + /** + * @notice See {hashRCAU} + * @param _rcau The RCAU to hash + * @return The EIP712 hash of the RCAU + */ + function _hashRCAU(RecurringCollectionAgreementUpdate memory _rcau) private view returns (bytes32) { + return + _hashTypedDataV4( + keccak256( + abi.encode( + EIP712_RCAU_TYPEHASH, + _rcau.agreementId, + _rcau.deadline, + _rcau.endsAt, + _rcau.maxInitialTokens, + _rcau.maxOngoingTokensPerSecond, + _rcau.minSecondsPerCollection, + _rcau.maxSecondsPerCollection, + _rcau.nonce, + keccak256(_rcau.metadata) + ) + ) + ); + } + + /** + * @notice Requires that the signer for the RCA is authorized + * by the payer of the RCA. + * @param _rca The RCA whose hash was signed + * @param _signature The ECDSA signature bytes + * @return The address of the authorized signer + */ + function _requireAuthorizedRCASigner( + RecurringCollectionAgreement memory _rca, + bytes memory _signature + ) private view returns (address) { + address signer = _recoverRCASigner(_rca, _signature); + require(_isAuthorized(_rca.payer, signer), RecurringCollectorInvalidSigner()); + + return signer; + } + + /** + * @notice Requires that the signer for the RCAU is authorized + * by the payer. + * @param _rcau The RCAU whose hash was signed + * @param _signature The ECDSA signature bytes + * @param _payer The address of the payer + * @return The address of the authorized signer + */ + function _requireAuthorizedRCAUSigner( + RecurringCollectionAgreementUpdate memory _rcau, + bytes memory _signature, + address _payer + ) private view returns (address) { + address signer = _recoverRCAUSigner(_rcau, _signature); + require(_isAuthorized(_payer, signer), RecurringCollectorInvalidSigner()); + + return signer; + } + + /** + * @notice Validates that an agreement is in a valid state for updating and that the caller is authorized. + * @param _agreementId The ID of the agreement to validate + * @return The storage reference to the agreement data + */ + function _requireValidUpdateTarget(bytes16 _agreementId) private view returns (AgreementData storage) { + AgreementData storage agreement = _getAgreementStorage(_agreementId); + require( + agreement.state == AgreementState.Accepted, + RecurringCollectorAgreementIncorrectState(_agreementId, agreement.state) + ); + require( + agreement.dataService == msg.sender, + RecurringCollectorDataServiceNotAuthorized(_agreementId, msg.sender) + ); + return agreement; + } + + /** + * @notice Validates and stores an update to a Recurring Collection Agreement. + * Shared validation/storage/emit logic for the update function. + * @param _agreement The storage reference to the agreement data + * @param _rcau The Recurring Collection Agreement Update to apply + */ + function _validateAndStoreUpdate( + AgreementData storage _agreement, + RecurringCollectionAgreementUpdate calldata _rcau + ) private { + // validate nonce to prevent replay attacks + uint32 expectedNonce = _agreement.updateNonce + 1; + require( + _rcau.nonce == expectedNonce, + RecurringCollectorInvalidUpdateNonce(_rcau.agreementId, expectedNonce, _rcau.nonce) + ); + + _requireValidCollectionWindowParams(_rcau.endsAt, _rcau.minSecondsPerCollection, _rcau.maxSecondsPerCollection); + + // update the agreement + _agreement.endsAt = _rcau.endsAt; + _agreement.maxInitialTokens = _rcau.maxInitialTokens; + _agreement.maxOngoingTokensPerSecond = _rcau.maxOngoingTokensPerSecond; + _agreement.minSecondsPerCollection = _rcau.minSecondsPerCollection; + _agreement.maxSecondsPerCollection = _rcau.maxSecondsPerCollection; + _agreement.updateNonce = _rcau.nonce; + + emit AgreementUpdated( + _agreement.dataService, + _agreement.payer, + _agreement.serviceProvider, + _rcau.agreementId, + uint64(block.timestamp), + _agreement.endsAt, + _agreement.maxInitialTokens, + _agreement.maxOngoingTokensPerSecond, + _agreement.minSecondsPerCollection, + _agreement.maxSecondsPerCollection + ); + } + + /** + * @notice Gets an agreement to be updated. + * @param _agreementId The ID of the agreement to get + * @return The storage reference to the agreement data + */ + function _getAgreementStorage(bytes16 _agreementId) private view returns (AgreementData storage) { + return agreements[_agreementId]; + } + + /** + * @notice See {getAgreement} + * @param _agreementId The ID of the agreement to get + * @return The agreement data + */ + function _getAgreement(bytes16 _agreementId) private view returns (AgreementData memory) { + return agreements[_agreementId]; + } + + /** + * @notice Internal function to get collection info for an agreement + * @dev This is the single source of truth for collection window logic + * @param _agreement The agreement data + * @return isCollectable Whether the agreement can be collected from + * @return collectionSeconds The valid collection duration in seconds (0 if not collectable) + * @return reason The reason why the agreement is not collectable (None if collectable) + */ + function _getCollectionInfo( + AgreementData memory _agreement + ) private view returns (bool, uint256, AgreementNotCollectableReason) { + // Check if agreement is in collectable state + bool hasValidState = _agreement.state == AgreementState.Accepted || + _agreement.state == AgreementState.CanceledByPayer; + + if (!hasValidState) { + return (false, 0, AgreementNotCollectableReason.InvalidAgreementState); + } + + bool canceledOrElapsed = _agreement.state == AgreementState.CanceledByPayer || + block.timestamp > _agreement.endsAt; + uint256 canceledOrNow = _agreement.state == AgreementState.CanceledByPayer + ? _agreement.canceledAt + : block.timestamp; + + uint256 collectionEnd = canceledOrElapsed ? Math.min(canceledOrNow, _agreement.endsAt) : block.timestamp; + uint256 collectionStart = _agreementCollectionStartAt(_agreement); + + if (collectionEnd < collectionStart) { + return (false, 0, AgreementNotCollectableReason.InvalidTemporalWindow); + } + + if (collectionStart == collectionEnd) { + return (false, 0, AgreementNotCollectableReason.ZeroCollectionSeconds); + } + + uint256 elapsed = collectionEnd - collectionStart; + return ( + true, + Math.min(elapsed, uint256(_agreement.maxSecondsPerCollection)), + AgreementNotCollectableReason.None + ); + } + + /** + * @notice Gets the start time for the collection of an agreement. + * @param _agreement The agreement data + * @return The start time for the collection of the agreement + */ + function _agreementCollectionStartAt(AgreementData memory _agreement) private pure returns (uint256) { + return _agreement.lastCollectionAt > 0 ? _agreement.lastCollectionAt : _agreement.acceptedAt; + } + + /** + * @notice Compute the maximum tokens collectable in the next collection (worst case). + * @dev For active agreements uses endsAt as the collection end (worst case), + * not block.timestamp (current). Returns 0 for non-collectable states. + * @param _a The agreement data + * @return The maximum tokens that could be collected + */ + function _getMaxNextClaim(AgreementData memory _a) private pure returns (uint256) { + // CanceledByServiceProvider = immediately non-collectable + if (_a.state == AgreementState.CanceledByServiceProvider) return 0; + // Only Accepted and CanceledByPayer are collectable + if (_a.state != AgreementState.Accepted && _a.state != AgreementState.CanceledByPayer) return 0; + + // Collection starts from last collection (or acceptance if never collected) + uint256 collectionStart = 0 < _a.lastCollectionAt ? _a.lastCollectionAt : _a.acceptedAt; + + // Determine the latest possible collection end + uint256 collectionEnd; + if (_a.state == AgreementState.CanceledByPayer) { + // Payer cancel freezes the window at min(canceledAt, endsAt) + collectionEnd = _a.canceledAt < _a.endsAt ? _a.canceledAt : _a.endsAt; + } else { + // Active: collection window capped at endsAt + collectionEnd = _a.endsAt; + } + + // No collection possible if window is empty + // solhint-disable-next-line gas-strict-inequalities + if (collectionEnd <= collectionStart) return 0; + + // Max seconds is capped by maxSecondsPerCollection (enforced by _requireValidCollect) + uint256 windowSeconds = collectionEnd - collectionStart; + uint256 maxSeconds = windowSeconds < _a.maxSecondsPerCollection ? windowSeconds : _a.maxSecondsPerCollection; + + uint256 maxClaim = _a.maxOngoingTokensPerSecond * maxSeconds; + if (_a.lastCollectionAt == 0) maxClaim += _a.maxInitialTokens; + return maxClaim; + } + + /** + * @notice Internal function to generate deterministic agreement ID + * @param _payer The address of the payer + * @param _dataService The address of the data service + * @param _serviceProvider The address of the service provider + * @param _deadline The deadline for accepting the agreement + * @param _nonce A unique nonce for preventing collisions + * @return agreementId The deterministically generated agreement ID + */ + function _generateAgreementId( + address _payer, + address _dataService, + address _serviceProvider, + uint64 _deadline, + uint256 _nonce + ) private pure returns (bytes16) { + return bytes16(keccak256(abi.encode(_payer, _dataService, _serviceProvider, _deadline, _nonce))); + } +} diff --git a/packages/horizon/contracts/staking/HorizonStaking.sol b/packages/horizon/contracts/staking/HorizonStaking.sol index 7040ac343..bd6ccef70 100644 --- a/packages/horizon/contracts/staking/HorizonStaking.sol +++ b/packages/horizon/contracts/staking/HorizonStaking.sol @@ -5,16 +5,15 @@ // solhint-disable gas-increment-by-one // solhint-disable function-max-lines -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; -import { MathUtils } from "../libraries/MathUtils.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; import { PPMMath } from "../libraries/PPMMath.sol"; import { LinkedList } from "../libraries/LinkedList.sol"; @@ -28,9 +27,6 @@ import { HorizonStakingBase } from "./HorizonStakingBase.sol"; * @dev Implements the {IHorizonStakingMain} interface. * @dev This is the main Staking contract in The Graph protocol after the Horizon upgrade. * It is designed to be deployed as an upgrade to the L2Staking contract from the legacy contracts package. - * @dev It uses a {HorizonStakingExtension} contract to implement the full {IHorizonStaking} interface through delegatecalls. - * This is due to the contract size limit on Arbitrum (24kB). The extension contract implements functionality to support - * the legacy staking functions. It can be eventually removed without affecting the main staking contract. * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ @@ -42,9 +38,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { /// @dev Maximum number of simultaneous stake thaw requests (per provision) or undelegations (per delegation) uint256 private constant MAX_THAW_REQUESTS = 1_000; - /// @dev Address of the staking extension contract - address private immutable STAKING_EXTENSION_ADDRESS; - /// @dev Minimum amount of delegation. uint256 private constant MIN_DELEGATION = 1e18; @@ -79,50 +72,12 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { /** * @notice The staking contract is upgradeable however we still use the constructor to set a few immutable variables * @param controller The address of the Graph controller contract - * @param stakingExtensionAddress The address of the staking extension contract * @param subgraphDataServiceAddress The address of the subgraph data service */ constructor( address controller, - address stakingExtensionAddress, address subgraphDataServiceAddress - ) HorizonStakingBase(controller, subgraphDataServiceAddress) { - STAKING_EXTENSION_ADDRESS = stakingExtensionAddress; - } - - /** - * @notice Delegates the current call to the StakingExtension implementation. - * @dev This function does not return to its internal call site, it will return directly to the - * external caller. - */ - fallback() external { - // solhint-disable-previous-line payable-fallback, no-complex-fallback - address extensionImpl = STAKING_EXTENSION_ADDRESS; - // solhint-disable-next-line no-inline-assembly - assembly { - // (a) get free memory pointer - let ptr := mload(0x40) - - // (1) copy incoming call data - calldatacopy(ptr, 0, calldatasize()) - - // (2) forward call to logic contract - let result := delegatecall(gas(), extensionImpl, ptr, calldatasize(), 0, 0) - let size := returndatasize() - - // (3) retrieve return data - returndatacopy(ptr, 0, size) - - // (4) forward return data back to caller - switch result - case 0 { - revert(ptr, size) - } - default { - return(ptr, size) - } - } - } + ) HorizonStakingBase(controller, subgraphDataServiceAddress) {} /* * STAKING @@ -158,6 +113,11 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { _withdraw(msg.sender); } + /// @inheritdoc IHorizonStakingMain + function forceWithdraw(address serviceProvider) external override notPaused { + _withdraw(serviceProvider); + } + /* * PROVISIONS */ @@ -258,6 +218,11 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { require(prov.createdAt != 0, HorizonStakingInvalidProvision(serviceProvider, verifier)); if ((prov.maxVerifierCutPending != prov.maxVerifierCut) || (prov.thawingPeriodPending != prov.thawingPeriod)) { + // Re-validate thawing period in case governor reduced _maxThawingPeriod after staging + require( + prov.thawingPeriodPending <= _maxThawingPeriod, + HorizonStakingInvalidThawingPeriod(prov.thawingPeriodPending, _maxThawingPeriod) + ); prov.maxVerifierCut = prov.maxVerifierCutPending; prov.thawingPeriod = prov.thawingPeriodPending; emit ProvisionParametersSet(serviceProvider, verifier, prov.maxVerifierCut, prov.thawingPeriod); @@ -369,33 +334,15 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { address serviceProvider, address // deprecated - kept for backwards compatibility ) external override notPaused returns (uint256) { - // Get the delegation pool of the indexer - address delegator = msg.sender; - DelegationPoolInternal storage pool = _legacyDelegationPools[serviceProvider]; - DelegationInternal storage delegation = pool.delegators[delegator]; - - // Validation - uint256 tokensToWithdraw = 0; - uint256 currentEpoch = _graphEpochManager().currentEpoch(); - if ( - delegation.__DEPRECATED_tokensLockedUntil > 0 && currentEpoch >= delegation.__DEPRECATED_tokensLockedUntil - ) { - tokensToWithdraw = delegation.__DEPRECATED_tokensLocked; - } - require(tokensToWithdraw > 0, HorizonStakingNothingToWithdraw()); - - // Reset lock - delegation.__DEPRECATED_tokensLocked = 0; - delegation.__DEPRECATED_tokensLockedUntil = 0; - - emit StakeDelegatedWithdrawn(serviceProvider, delegator, tokensToWithdraw); - - // -- Interactions -- - - // Return tokens to the delegator - _graphToken().pushTokens(delegator, tokensToWithdraw); + return _withdrawDelegatedLegacy(serviceProvider, msg.sender); + } - return tokensToWithdraw; + /// @inheritdoc IHorizonStakingMain + function forceWithdrawDelegated( + address serviceProvider, + address delegator + ) external override notPaused returns (uint256) { + return _withdrawDelegatedLegacy(serviceProvider, delegator); } /* @@ -409,33 +356,18 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { uint256 tokensVerifier, address verifierDestination ) external override notPaused { - // TRANSITION PERIOD: remove after the transition period - // Check if sender is authorized to slash on the deprecated list - if (__DEPRECATED_slashers[msg.sender]) { - // Forward call to staking extension - // solhint-disable-next-line avoid-low-level-calls - (bool success, ) = STAKING_EXTENSION_ADDRESS.delegatecall( - abi.encodeCall( - IHorizonStakingExtension.legacySlash, - (serviceProvider, tokens, tokensVerifier, verifierDestination) - ) - ); - require(success, HorizonStakingLegacySlashFailed()); - return; - } - address verifier = msg.sender; Provision storage prov = _provisions[serviceProvider][verifier]; DelegationPoolInternal storage pool = _getDelegationPool(serviceProvider, verifier); uint256 tokensProvisionTotal = prov.tokens + pool.tokens; require(tokensProvisionTotal != 0, HorizonStakingNoTokensToSlash()); - uint256 tokensToSlash = MathUtils.min(tokens, tokensProvisionTotal); + uint256 tokensToSlash = Math.min(tokens, tokensProvisionTotal); // Slash service provider first // - A portion goes to verifier as reward // - A portion gets burned - uint256 providerTokensSlashed = MathUtils.min(prov.tokens, tokensToSlash); + uint256 providerTokensSlashed = Math.min(prov.tokens, tokensToSlash); if (providerTokensSlashed > 0) { // Pay verifier reward - must be within the maxVerifierCut percentage uint256 maxVerifierTokens = providerTokensSlashed.mulPPM(prov.maxVerifierCut); @@ -540,12 +472,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { emit DelegationSlashingEnabled(); } - /// @inheritdoc IHorizonStakingMain - function clearThawingPeriod() external override onlyGovernor { - __DEPRECATED_thawingPeriod = 0; - emit ThawingPeriodCleared(); - } - /// @inheritdoc IHorizonStakingMain function setMaxThawingPeriod(uint64 maxThawingPeriod) external override onlyGovernor { _maxThawingPeriod = maxThawingPeriod; @@ -571,17 +497,19 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { } /* - * GETTERS + * PRIVATE FUNCTIONS */ - /// @inheritdoc IHorizonStakingMain - function getStakingExtension() external view override returns (address) { - return STAKING_EXTENSION_ADDRESS; - } - - /* - * PRIVATE FUNCTIONS + /** + * @notice Deposit tokens into the service provider stake. + * Emits a {HorizonStakeDeposited} event. + * @param _serviceProvider The address of the service provider. + * @param _tokens The amount of tokens to deposit. */ + function _stake(address _serviceProvider, uint256 _tokens) internal { + _serviceProviders[_serviceProvider].tokensStaked = _serviceProviders[_serviceProvider].tokensStaked + _tokens; + emit HorizonStakeDeposited(_serviceProvider, _tokens); + } /** * @notice Deposit tokens on the service provider stake, on behalf of the service provider. @@ -601,12 +529,7 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { /** * @notice Move idle stake back to the owner's account. - * Stake is removed from the protocol: - * - During the transition period it's locked for a period of time before it can be withdrawn - * by calling {withdraw}. - * - After the transition period it's immediately withdrawn. - * Note that after the transition period if there are tokens still locked they will have to be - * withdrawn by calling {withdraw}. + * Stake is immediately removed from the protocol. * @param _tokens Amount of tokens to unstake */ function _unstake(uint256 _tokens) private { @@ -616,45 +539,19 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { require(_tokens <= tokensIdle, HorizonStakingInsufficientIdleStake(_tokens, tokensIdle)); ServiceProviderInternal storage sp = _serviceProviders[serviceProvider]; - uint256 stakedTokens = sp.tokensStaked; - - // This is also only during the transition period: we need - // to ensure tokens stay locked after closing legacy allocations. - // After sufficient time (56 days?) we should remove the closeAllocation function - // and set the thawing period to 0. - uint256 lockingPeriod = __DEPRECATED_thawingPeriod; - if (lockingPeriod == 0) { - sp.tokensStaked = stakedTokens - _tokens; - _graphToken().pushTokens(serviceProvider, _tokens); - emit HorizonStakeWithdrawn(serviceProvider, _tokens); - } else { - // Before locking more tokens, withdraw any unlocked ones if possible - if (sp.__DEPRECATED_tokensLocked != 0 && block.number >= sp.__DEPRECATED_tokensLockedUntil) { - _withdraw(serviceProvider); - } - // TRANSITION PERIOD: remove after the transition period - // Take into account period averaging for multiple unstake requests - if (sp.__DEPRECATED_tokensLocked > 0) { - lockingPeriod = MathUtils.weightedAverageRoundingUp( - MathUtils.diffOrZero(sp.__DEPRECATED_tokensLockedUntil, block.number), // Remaining thawing period - sp.__DEPRECATED_tokensLocked, // Weighted by remaining unstaked tokens - lockingPeriod, // Thawing period - _tokens // Weighted by new tokens to unstake - ); - } + sp.tokensStaked -= _tokens; - // Update balances - sp.__DEPRECATED_tokensLocked = sp.__DEPRECATED_tokensLocked + _tokens; - sp.__DEPRECATED_tokensLockedUntil = block.number + lockingPeriod; - emit HorizonStakeLocked(serviceProvider, sp.__DEPRECATED_tokensLocked, sp.__DEPRECATED_tokensLockedUntil); - } + _graphToken().pushTokens(serviceProvider, _tokens); + emit HorizonStakeWithdrawn(serviceProvider, _tokens); } /** * @notice Withdraw service provider tokens once the thawing period (initiated by {unstake}) has passed. * All thawed tokens are withdrawn. - * @dev TRANSITION PERIOD: This is only needed during the transition period while we still have - * a global lock. After that, unstake() will automatically withdraw. + * This function is for backwards compatibility with the legacy staking contract. + * It only allows withdrawing tokens unstaked before horizon upgrade. + * @dev This function can't be removed in case there are still pre-horizon unstakes. + * Note that it's assumed unstakes have already passed their thawing period. * @param _serviceProvider Address of service provider to withdraw funds from */ function _withdraw(address _serviceProvider) private { @@ -662,10 +559,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { ServiceProviderInternal storage sp = _serviceProviders[_serviceProvider]; uint256 tokensToWithdraw = sp.__DEPRECATED_tokensLocked; require(tokensToWithdraw != 0, HorizonStakingInvalidZeroTokens()); - require( - block.number >= sp.__DEPRECATED_tokensLockedUntil, - HorizonStakingStillThawing(sp.__DEPRECATED_tokensLockedUntil) - ); // Reset locked tokens sp.__DEPRECATED_tokensLocked = 0; @@ -685,8 +578,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { * service, where the data service is the verifier. * This function can be called by the service provider or by an operator authorized by the provider * for this specific verifier. - * @dev TRANSITION PERIOD: During the transition period, only the subgraph data service can be used as a verifier. This - * prevents an escape hatch for legacy allocation stake. * @param _serviceProvider The service provider address * @param _tokens The amount of tokens that will be locked and slashable * @param _verifier The verifier address for which the tokens are provisioned (who will be able to slash the tokens) @@ -701,11 +592,6 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { uint64 _thawingPeriod ) private { require(_tokens > 0, HorizonStakingInvalidZeroTokens()); - // TRANSITION PERIOD: Remove this after the transition period - it prevents an early escape hatch for legacy allocations - require( - _verifier == SUBGRAPH_DATA_SERVICE_ADDRESS || __DEPRECATED_thawingPeriod == 0, - HorizonStakingInvalidVerifier(_verifier) - ); require(PPMMath.isValidPPM(_maxVerifierCut), HorizonStakingInvalidMaxVerifierCut(_maxVerifierCut)); require( _thawingPeriod <= _maxThawingPeriod, @@ -958,8 +844,8 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { * @dev The parameter `nThawRequests` can be set to a non zero value to fulfill a specific number of thaw * requests in the event that fulfilling all of them results in a gas limit error. Otherwise, the function * will attempt to fulfill all thaw requests until the first one that is not yet expired is found. - * @dev If the delegation pool was completely slashed before withdrawing, calling this function will fulfill - * the thaw requests with an amount equal to zero. + * @dev If the delegation pool was completely slashed before withdrawing, calling this function will revert + * until the pool state is repaired with {IHorizonStakingMain-addToDelegationPool}. * @param _serviceProvider The service provider address * @param _verifier The verifier address * @param _newServiceProvider The new service provider address @@ -1231,6 +1117,39 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { emit OperatorSet(msg.sender, _verifier, _operator, _allowed); } + /** + * @notice Withdraw legacy undelegated tokens for a delegator. + * @dev This function handles pre-Horizon undelegations where tokens are locked + * in the legacy delegation pool. + * @param _serviceProvider The service provider address + * @param _delegator The delegator address + * @return The amount of tokens withdrawn + */ + function _withdrawDelegatedLegacy(address _serviceProvider, address _delegator) private returns (uint256) { + DelegationPoolInternal storage pool = _legacyDelegationPools[_serviceProvider]; + DelegationInternal storage delegation = pool.delegators[_delegator]; + + // Validation + uint256 tokensToWithdraw = 0; + if (delegation.__DEPRECATED_tokensLockedUntil > 0) { + tokensToWithdraw = delegation.__DEPRECATED_tokensLocked; + } + require(tokensToWithdraw > 0, HorizonStakingNothingToWithdraw()); + + // Reset lock + delegation.__DEPRECATED_tokensLocked = 0; + delegation.__DEPRECATED_tokensLockedUntil = 0; + + emit StakeDelegatedWithdrawn(_serviceProvider, _delegator, tokensToWithdraw); + + // -- Interactions -- + + // Return tokens to the delegator + _graphToken().pushTokens(_delegator, tokensToWithdraw); + + return tokensToWithdraw; + } + /** * @notice Check if an operator is authorized for the caller on a specific verifier / data service. * @dev Note that this function handles the special case where the verifier is the subgraph data service, @@ -1251,6 +1170,30 @@ contract HorizonStaking is HorizonStakingBase, IHorizonStakingMain { } } + /// @inheritdoc IHorizonStakingMain + function isAllocation(address allocationID) external view override returns (bool) { + return _getLegacyAllocationState(allocationID) != LegacyAllocationState.Null; + } + + /** + * @notice Return the current state of a legacy allocation + * @param _allocationID Allocation identifier + * @return LegacyAllocationState enum with the state of the allocation + */ + function _getLegacyAllocationState(address _allocationID) private view returns (LegacyAllocationState) { + LegacyAllocation storage alloc = __DEPRECATED_allocations[_allocationID]; + + if (alloc.indexer == address(0)) { + return LegacyAllocationState.Null; + } + + if (alloc.createdAtEpoch != 0 && alloc.closedAtEpoch == 0) { + return LegacyAllocationState.Active; + } + + return LegacyAllocationState.Closed; + } + /** * @notice Determines the correct callback function for `deleteItem` based on the request type. * @param _requestType The type of thaw request (Provision or Delegation). diff --git a/packages/horizon/contracts/staking/HorizonStakingBase.sol b/packages/horizon/contracts/staking/HorizonStakingBase.sol index 615de4994..199e894d3 100644 --- a/packages/horizon/contracts/staking/HorizonStakingBase.sol +++ b/packages/horizon/contracts/staking/HorizonStakingBase.sol @@ -3,14 +3,14 @@ // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { IHorizonStakingBase } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; -import { MathUtils } from "../libraries/MathUtils.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; import { LinkedList } from "../libraries/LinkedList.sol"; import { Multicall } from "@openzeppelin/contracts/utils/Multicall.sol"; @@ -23,9 +23,7 @@ import { HorizonStakingV1Storage } from "./HorizonStakingStorage.sol"; * @author Edge & Node * @notice This contract is the base staking contract implementing storage getters for both internal * and external use. - * @dev Implementation of the {IHorizonStakingBase} interface. - * @dev It's meant to be inherited by the {HorizonStaking} and {HorizonStakingExtension} - * contracts so some internal functions are also included here. + * @dev Implementation of the {IHorizonStakingBase} interface, meant to be inherited by {HorizonStaking}. * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ @@ -54,6 +52,11 @@ abstract contract HorizonStakingBase is SUBGRAPH_DATA_SERVICE_ADDRESS = subgraphDataServiceAddress; } + /// @inheritdoc IHorizonStakingBase + function getSubgraphService() external view override returns (address) { + return SUBGRAPH_DATA_SERVICE_ADDRESS; + } + /// @inheritdoc IHorizonStakingBase /// @dev Removes deprecated fields from the return value. function getServiceProvider(address serviceProvider) external view override returns (ServiceProvider memory) { @@ -127,7 +130,7 @@ abstract contract HorizonStakingBase is uint256 tokensAvailableDelegated = _getDelegatedTokensAvailable(serviceProvider, verifier); uint256 tokensDelegatedMax = tokensAvailableProvider * (uint256(delegationRatio)); - uint256 tokensDelegatedCapacity = MathUtils.min(tokensAvailableDelegated, tokensDelegatedMax); + uint256 tokensDelegatedCapacity = Math.min(tokensAvailableDelegated, tokensDelegatedMax); return tokensAvailableProvider + tokensDelegatedCapacity; } @@ -179,14 +182,26 @@ abstract contract HorizonStakingBase is } uint256 thawedTokens = 0; - Provision storage prov = _provisions[serviceProvider][verifier]; - uint256 tokensThawing = prov.tokensThawing; - uint256 sharesThawing = prov.sharesThawing; + uint256 tokensThawing; + uint256 sharesThawing; + uint256 thawingNonce; + + if (requestType == ThawRequestType.Provision) { + Provision storage prov = _provisions[serviceProvider][verifier]; + tokensThawing = prov.tokensThawing; + sharesThawing = prov.sharesThawing; + thawingNonce = prov.thawingNonce; + } else { + DelegationPoolInternal storage pool = _getDelegationPool(serviceProvider, verifier); + tokensThawing = pool.tokensThawing; + sharesThawing = pool.sharesThawing; + thawingNonce = pool.thawingNonce; + } bytes32 thawRequestId = thawRequestList.head; while (thawRequestId != bytes32(0)) { ThawRequest storage thawRequest = _getThawRequest(requestType, thawRequestId); - if (thawRequest.thawingNonce == prov.thawingNonce) { + if (thawRequest.thawingNonce == thawingNonce) { if (thawRequest.thawingUntil <= block.timestamp) { // sharesThawing cannot be zero if there is a valid thaw request so the next division is safe uint256 tokens = (thawRequest.shares * tokensThawing) / sharesThawing; @@ -218,31 +233,18 @@ abstract contract HorizonStakingBase is return _delegationSlashingEnabled; } - /** - * @notice Deposit tokens into the service provider stake. - * @dev TRANSITION PERIOD: After transition period move to IHorizonStakingMain. Temporarily it - * needs to be here since it's used by both {HorizonStaking} and {HorizonStakingExtension}. - * - * Emits a {HorizonStakeDeposited} event. - * @param _serviceProvider The address of the service provider. - * @param _tokens The amount of tokens to deposit. - */ - function _stake(address _serviceProvider, uint256 _tokens) internal { - _serviceProviders[_serviceProvider].tokensStaked = _serviceProviders[_serviceProvider].tokensStaked + _tokens; - emit HorizonStakeDeposited(_serviceProvider, _tokens); - } - /** * @notice Gets the service provider's idle stake which is the stake that is not being * used for any provision. Note that this only includes service provider's self stake. - * @dev Note that the calculation considers tokens that were locked in the legacy staking contract. - * @dev TRANSITION PERIOD: update the calculation after the transition period. + * @dev Note that the calculation: + * - assumes tokens that were allocated to a subgraph deployment pre-horizon were all unallocated. + * - considers tokens that were locked in the legacy staking contract and never withdrawn. + * * @param _serviceProvider The address of the service provider. * @return The amount of tokens that are idle. */ function _getIdleStake(address _serviceProvider) internal view returns (uint256) { uint256 tokensUsed = _serviceProviders[_serviceProvider].tokensProvisioned + - _serviceProviders[_serviceProvider].__DEPRECATED_tokensAllocated + _serviceProviders[_serviceProvider].__DEPRECATED_tokensLocked; uint256 tokensStaked = _serviceProviders[_serviceProvider].tokensStaked; return tokensStaked > tokensUsed ? tokensStaked - tokensUsed : 0; diff --git a/packages/horizon/contracts/staking/HorizonStakingExtension.sol b/packages/horizon/contracts/staking/HorizonStakingExtension.sol deleted file mode 100644 index 3258381b2..000000000 --- a/packages/horizon/contracts/staking/HorizonStakingExtension.sol +++ /dev/null @@ -1,485 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity 0.8.27 || 0.8.33; - -// TODO: Re-enable and fix issues when publishing a new version -// solhint-disable function-max-lines, gas-strict-inequalities -// forge-lint: disable-start(mixed-case-variable, mixed-case-function, unwrapped-modifier-logic) - -import { ICuration } from "@graphprotocol/interfaces/contracts/contracts/curation/ICuration.sol"; -import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; -import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; - -import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; -import { MathUtils } from "../libraries/MathUtils.sol"; -import { ExponentialRebates } from "./libraries/ExponentialRebates.sol"; -import { PPMMath } from "../libraries/PPMMath.sol"; - -import { HorizonStakingBase } from "./HorizonStakingBase.sol"; - -/** - * @title Horizon Staking extension contract - * @author Edge & Node - * @notice The {HorizonStakingExtension} contract implements the legacy functionality required to support the transition - * to the Horizon Staking contract. It allows indexers to close allocations and collect pending query fees, but it - * does not allow for the creation of new allocations. This should allow indexers to migrate to a subgraph data service - * without losing rewards or having service interruptions. - * @dev TRANSITION PERIOD: Once the transition period passes this contract can be removed (note that an upgrade to the - * RewardsManager will also be required). It's expected the transition period to last for at least a full allocation cycle - * (28 epochs). - * @custom:security-contact Please email security+contracts@thegraph.com if you find any - * bugs. We may have an active bug bounty program. - */ -contract HorizonStakingExtension is HorizonStakingBase, IHorizonStakingExtension { - using TokenUtils for IGraphToken; - using PPMMath for uint256; - - /** - * @dev Check if the caller is the slasher. - */ - modifier onlySlasher() { - require(__DEPRECATED_slashers[msg.sender], "!slasher"); - _; - } - - /** - * @notice The staking contract is upgradeable however we still use the constructor to set a few immutable variables - * @param controller The address of the Graph controller contract - * @param subgraphDataServiceAddress The address of the subgraph data service - */ - constructor( - address controller, - address subgraphDataServiceAddress - ) HorizonStakingBase(controller, subgraphDataServiceAddress) {} - - /// @inheritdoc IHorizonStakingExtension - function closeAllocation(address allocationID, bytes32 poi) external override notPaused { - _closeAllocation(allocationID, poi); - } - - /// @inheritdoc IHorizonStakingExtension - function collect(uint256 tokens, address allocationID) external override notPaused { - // Allocation identifier validation - require(allocationID != address(0), "!alloc"); - - // Allocation must exist - AllocationState allocState = _getAllocationState(allocationID); - require(allocState != AllocationState.Null, "!collect"); - - // If the query fees are zero, we don't want to revert - // but we also don't need to do anything, so just return - if (tokens == 0) { - return; - } - - Allocation storage alloc = __DEPRECATED_allocations[allocationID]; - bytes32 subgraphDeploymentID = alloc.subgraphDeploymentID; - - uint256 queryFees = tokens; // Tokens collected from the channel - uint256 protocolTax = 0; // Tokens burnt as protocol tax - uint256 curationFees = 0; // Tokens distributed to curators as curation fees - uint256 queryRebates = 0; // Tokens to distribute to indexer - uint256 delegationRewards = 0; // Tokens to distribute to delegators - - { - // -- Pull tokens from the sender -- - _graphToken().pullTokens(msg.sender, queryFees); - - // -- Collect protocol tax -- - protocolTax = _collectTax(queryFees, __DEPRECATED_protocolPercentage); - queryFees = queryFees - protocolTax; - - // -- Collect curation fees -- - // Only if the subgraph deployment is curated - curationFees = _collectCurationFees(subgraphDeploymentID, queryFees, __DEPRECATED_curationPercentage); - queryFees = queryFees - curationFees; - - // -- Process rebate reward -- - // Using accumulated fees and subtracting previously distributed rebates - // allows for multiple vouchers to be collected while following the rebate formula - alloc.collectedFees = alloc.collectedFees + queryFees; - - // No rebates if indexer has no stake or if lambda is zero - uint256 newRebates = (alloc.tokens == 0 || __DEPRECATED_lambdaNumerator == 0) - ? 0 - : ExponentialRebates.exponentialRebates( - alloc.collectedFees, - alloc.tokens, - __DEPRECATED_alphaNumerator, - __DEPRECATED_alphaDenominator, - __DEPRECATED_lambdaNumerator, - __DEPRECATED_lambdaDenominator - ); - - // -- Ensure rebates to distribute are within bounds -- - // Indexers can become under or over rebated if rebate parameters (alpha, lambda) - // change between successive collect calls for the same allocation - - // Ensure rebates to distribute are not negative (indexer is over-rebated) - queryRebates = MathUtils.diffOrZero(newRebates, alloc.distributedRebates); - - // Ensure rebates to distribute are not greater than available (indexer is under-rebated) - queryRebates = MathUtils.min(queryRebates, queryFees); - - // -- Burn rebates remanent -- - _graphToken().burnTokens(queryFees - queryRebates); - - // -- Distribute rebates -- - if (queryRebates > 0) { - alloc.distributedRebates = alloc.distributedRebates + queryRebates; - - // -- Collect delegation rewards into the delegation pool -- - delegationRewards = _collectDelegationQueryRewards(alloc.indexer, queryRebates); - queryRebates = queryRebates - delegationRewards; - - // -- Transfer or restake rebates -- - _sendRewards(queryRebates, alloc.indexer, __DEPRECATED_rewardsDestination[alloc.indexer] == address(0)); - } - } - - emit RebateCollected( - msg.sender, - alloc.indexer, - subgraphDeploymentID, - allocationID, - _graphEpochManager().currentEpoch(), - tokens, - protocolTax, - curationFees, - queryFees, - queryRebates, - delegationRewards - ); - } - - /// @inheritdoc IHorizonStakingExtension - function legacySlash( - address indexer, - uint256 tokens, - uint256 reward, - address beneficiary - ) external override onlySlasher notPaused { - ServiceProviderInternal storage indexerStake = _serviceProviders[indexer]; - - // Only able to slash a non-zero number of tokens - require(tokens > 0, "!tokens"); - - // Rewards comes from tokens slashed balance - require(tokens >= reward, "rewards>slash"); - - // Cannot slash stake of an indexer without any or enough stake - require(indexerStake.tokensStaked > 0, "!stake"); - require(tokens <= indexerStake.tokensStaked, "slash>stake"); - - // Validate beneficiary of slashed tokens - require(beneficiary != address(0), "!beneficiary"); - - // Slashing tokens that are already provisioned would break provision accounting, we need to limit - // the slash amount. This can be compensated for, by slashing with the main slash function if needed. - uint256 slashableStake = indexerStake.tokensStaked - indexerStake.tokensProvisioned; - if (slashableStake == 0) { - emit StakeSlashed(indexer, 0, 0, beneficiary); - return; - } - if (tokens > slashableStake) { - reward = (reward * slashableStake) / tokens; - tokens = slashableStake; - } - - // Slashing more tokens than freely available (over allocation condition) - // Unlock locked tokens to avoid the indexer to withdraw them - uint256 tokensUsed = indexerStake.__DEPRECATED_tokensAllocated + indexerStake.__DEPRECATED_tokensLocked; - uint256 tokensAvailable = tokensUsed > indexerStake.tokensStaked ? 0 : indexerStake.tokensStaked - tokensUsed; - if (tokens > tokensAvailable && indexerStake.__DEPRECATED_tokensLocked > 0) { - uint256 tokensOverAllocated = tokens - tokensAvailable; - uint256 tokensToUnlock = MathUtils.min(tokensOverAllocated, indexerStake.__DEPRECATED_tokensLocked); - indexerStake.__DEPRECATED_tokensLocked = indexerStake.__DEPRECATED_tokensLocked - tokensToUnlock; - if (indexerStake.__DEPRECATED_tokensLocked == 0) { - indexerStake.__DEPRECATED_tokensLockedUntil = 0; - } - } - - // Remove tokens to slash from the stake - indexerStake.tokensStaked = indexerStake.tokensStaked - tokens; - - // -- Interactions -- - - // Set apart the reward for the beneficiary and burn remaining slashed stake - _graphToken().burnTokens(tokens - reward); - - // Give the beneficiary a reward for slashing - _graphToken().pushTokens(beneficiary, reward); - - emit StakeSlashed(indexer, tokens, reward, beneficiary); - } - - /// @inheritdoc IHorizonStakingExtension - function isAllocation(address allocationID) external view override returns (bool) { - return _getAllocationState(allocationID) != AllocationState.Null; - } - - /// @inheritdoc IHorizonStakingExtension - function getAllocation(address allocationID) external view override returns (Allocation memory) { - return __DEPRECATED_allocations[allocationID]; - } - - /// @inheritdoc IRewardsIssuer - function getAllocationData( - address allocationID - ) external view override returns (bool, address, bytes32, uint256, uint256, uint256) { - Allocation memory allo = __DEPRECATED_allocations[allocationID]; - bool isActive = _getAllocationState(allocationID) == AllocationState.Active; - return (isActive, allo.indexer, allo.subgraphDeploymentID, allo.tokens, allo.accRewardsPerAllocatedToken, 0); - } - - /// @inheritdoc IHorizonStakingExtension - function getAllocationState(address allocationID) external view override returns (AllocationState) { - return _getAllocationState(allocationID); - } - - /// @inheritdoc IRewardsIssuer - function getSubgraphAllocatedTokens(bytes32 subgraphDeploymentID) external view override returns (uint256) { - return __DEPRECATED_subgraphAllocations[subgraphDeploymentID]; - } - - /// @inheritdoc IHorizonStakingExtension - function getIndexerStakedTokens(address indexer) external view override returns (uint256) { - return _serviceProviders[indexer].tokensStaked; - } - - /// @inheritdoc IHorizonStakingExtension - function getSubgraphService() external view override returns (address) { - return SUBGRAPH_DATA_SERVICE_ADDRESS; - } - - /// @inheritdoc IHorizonStakingExtension - function hasStake(address indexer) external view override returns (bool) { - return _serviceProviders[indexer].tokensStaked > 0; - } - - /// @inheritdoc IHorizonStakingExtension - function __DEPRECATED_getThawingPeriod() external view returns (uint64) { - return __DEPRECATED_thawingPeriod; - } - - /// @inheritdoc IHorizonStakingExtension - function isOperator(address operator, address serviceProvider) public view override returns (bool) { - return _legacyOperatorAuth[serviceProvider][operator]; - } - - /** - * @notice Collect tax to burn for an amount of tokens - * @param _tokens Total tokens received used to calculate the amount of tax to collect - * @param _percentage Percentage of tokens to burn as tax - * @return Amount of tax charged - */ - function _collectTax(uint256 _tokens, uint256 _percentage) private returns (uint256) { - uint256 tax = _tokens.mulPPMRoundUp(_percentage); - _graphToken().burnTokens(tax); // Burn tax if any - return tax; - } - - /** - * @notice Triggers an update of rewards due to a change in allocations - * @param _subgraphDeploymentID Subgraph deployment updated - */ - function _updateRewards(bytes32 _subgraphDeploymentID) private { - _graphRewardsManager().onSubgraphAllocationUpdate(_subgraphDeploymentID); - } - - /** - * @notice Assign rewards for the closed allocation to indexer and delegators - * @param _allocationID Allocation - * @param _indexer Address of the indexer that did the allocation - */ - function _distributeRewards(address _allocationID, address _indexer) private { - // Automatically triggers update of rewards snapshot as allocation will change - // after this call. Take rewards mint tokens for the Staking contract to distribute - // between indexer and delegators - uint256 totalRewards = _graphRewardsManager().takeRewards(_allocationID); - if (totalRewards == 0) { - return; - } - - // Calculate delegation rewards and add them to the delegation pool - uint256 delegationRewards = _collectDelegationIndexingRewards(_indexer, totalRewards); - uint256 indexerRewards = totalRewards - delegationRewards; - - // Send the indexer rewards - _sendRewards(indexerRewards, _indexer, __DEPRECATED_rewardsDestination[_indexer] == address(0)); - } - - /** - * @notice Send rewards to the appropriate destination - * @param _tokens Number of rewards tokens - * @param _beneficiary Address of the beneficiary of rewards - * @param _restake Whether to restake or not - */ - function _sendRewards(uint256 _tokens, address _beneficiary, bool _restake) private { - if (_tokens == 0) return; - - if (_restake) { - // Restake to place fees into the indexer stake - _stake(_beneficiary, _tokens); - } else { - // Transfer funds to the beneficiary's designated rewards destination if set - address destination = __DEPRECATED_rewardsDestination[_beneficiary]; - _graphToken().pushTokens(destination == address(0) ? _beneficiary : destination, _tokens); - } - } - - /** - * @notice Close an allocation and free the staked tokens - * @param _allocationID The allocation identifier - * @param _poi Proof of indexing submitted for the allocated period - */ - function _closeAllocation(address _allocationID, bytes32 _poi) private { - // Allocation must exist and be active - AllocationState allocState = _getAllocationState(_allocationID); - require(allocState == AllocationState.Active, "!active"); - - // Get allocation - Allocation memory alloc = __DEPRECATED_allocations[_allocationID]; - - // Validate that an allocation cannot be closed before one epoch - alloc.closedAtEpoch = _graphEpochManager().currentEpoch(); - uint256 epochs = MathUtils.diffOrZero(alloc.closedAtEpoch, alloc.createdAtEpoch); - - // Indexer or operator can close an allocation - // Anyone is allowed to close ONLY under two concurrent conditions - // - After maxAllocationEpochs passed - // - When the allocation is for non-zero amount of tokens - bool isIndexerOrOperator = msg.sender == alloc.indexer || isOperator(msg.sender, alloc.indexer); - if (epochs <= __DEPRECATED_maxAllocationEpochs || alloc.tokens == 0) { - require(isIndexerOrOperator, "!auth"); - } - - // -- Rewards Distribution -- - - // Process non-zero-allocation rewards tracking - if (alloc.tokens > 0) { - // Distribute rewards if proof of indexing was presented by the indexer or operator - if (isIndexerOrOperator && _poi != 0 && epochs > 0) { - _distributeRewards(_allocationID, alloc.indexer); - } else { - _updateRewards(alloc.subgraphDeploymentID); - } - - // Free allocated tokens from use - _serviceProviders[alloc.indexer].__DEPRECATED_tokensAllocated = - _serviceProviders[alloc.indexer].__DEPRECATED_tokensAllocated - alloc.tokens; - - // Track total allocations per subgraph - // Used for rewards calculations - __DEPRECATED_subgraphAllocations[alloc.subgraphDeploymentID] = - __DEPRECATED_subgraphAllocations[alloc.subgraphDeploymentID] - alloc.tokens; - } - - // Close the allocation - // Note that this breaks CEI pattern. We update after the rewards distribution logic as it expects the allocation - // to still be active. There shouldn't be reentrancy risk here as all internal calls are to trusted contracts. - __DEPRECATED_allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; - - emit AllocationClosed( - alloc.indexer, - alloc.subgraphDeploymentID, - alloc.closedAtEpoch, - alloc.tokens, - _allocationID, - msg.sender, - _poi, - !isIndexerOrOperator - ); - } - - /** - * @notice Collect the delegation rewards for query fees - * @dev This function will assign the collected fees to the delegation pool - * @param _indexer Indexer to which the tokens to distribute are related - * @param _tokens Total tokens received used to calculate the amount of fees to collect - * @return Amount of delegation rewards - */ - function _collectDelegationQueryRewards(address _indexer, uint256 _tokens) private returns (uint256) { - uint256 delegationRewards = 0; - DelegationPoolInternal storage pool = _legacyDelegationPools[_indexer]; - if (pool.tokens > 0 && uint256(pool.__DEPRECATED_queryFeeCut).isValidPPM()) { - uint256 indexerCut = uint256(pool.__DEPRECATED_queryFeeCut).mulPPM(_tokens); - delegationRewards = _tokens - indexerCut; - pool.tokens = pool.tokens + delegationRewards; - } - return delegationRewards; - } - - /** - * @notice Collect the delegation rewards for indexing - * @dev This function will assign the collected fees to the delegation pool - * @param _indexer Indexer to which the tokens to distribute are related - * @param _tokens Total tokens received used to calculate the amount of fees to collect - * @return Amount of delegation rewards - */ - function _collectDelegationIndexingRewards(address _indexer, uint256 _tokens) private returns (uint256) { - uint256 delegationRewards = 0; - DelegationPoolInternal storage pool = _legacyDelegationPools[_indexer]; - if (pool.tokens > 0 && uint256(pool.__DEPRECATED_indexingRewardCut).isValidPPM()) { - uint256 indexerCut = uint256(pool.__DEPRECATED_indexingRewardCut).mulPPM(_tokens); - delegationRewards = _tokens - indexerCut; - pool.tokens = pool.tokens + delegationRewards; - } - return delegationRewards; - } - - /** - * @notice Collect the curation fees for a subgraph deployment from an amount of tokens - * @dev This function transfer curation fees to the Curation contract by calling Curation.collect - * @param _subgraphDeploymentID Subgraph deployment to which the curation fees are related - * @param _tokens Total tokens received used to calculate the amount of fees to collect - * @param _curationCut Percentage of tokens to collect as fees - * @return Amount of curation fees - */ - function _collectCurationFees( - bytes32 _subgraphDeploymentID, - uint256 _tokens, - uint256 _curationCut - ) private returns (uint256) { - if (_tokens == 0) { - return 0; - } - - ICuration curation = _graphCuration(); - bool isCurationEnabled = _curationCut > 0 && address(curation) != address(0); - - if (isCurationEnabled && curation.isCurated(_subgraphDeploymentID)) { - uint256 curationFees = _tokens.mulPPMRoundUp(_curationCut); - if (curationFees > 0) { - // Transfer and call collect() - // This function transfer tokens to a trusted protocol contracts - // Then we call collect() to do the transfer Bookkeeping - _graphRewardsManager().onSubgraphSignalUpdate(_subgraphDeploymentID); - _graphToken().pushTokens(address(curation), curationFees); - curation.collect(_subgraphDeploymentID, curationFees); - } - return curationFees; - } - return 0; - } - - /** - * @notice Return the current state of an allocation - * @param _allocationID Allocation identifier - * @return AllocationState enum with the state of the allocation - */ - function _getAllocationState(address _allocationID) private view returns (AllocationState) { - Allocation storage alloc = __DEPRECATED_allocations[_allocationID]; - - if (alloc.indexer == address(0)) { - return AllocationState.Null; - } - - if (alloc.createdAtEpoch != 0 && alloc.closedAtEpoch == 0) { - return AllocationState.Active; - } - - return AllocationState.Closed; - } -} diff --git a/packages/horizon/contracts/staking/HorizonStakingStorage.sol b/packages/horizon/contracts/staking/HorizonStakingStorage.sol index 1469d27a2..c10ac5d29 100644 --- a/packages/horizon/contracts/staking/HorizonStakingStorage.sol +++ b/packages/horizon/contracts/staking/HorizonStakingStorage.sol @@ -1,11 +1,10 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // forge-lint: disable-start(mixed-case-variable) -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; @@ -64,8 +63,9 @@ abstract contract HorizonStakingV1Storage { mapping(address serviceProvider => IHorizonStakingTypes.ServiceProviderInternal details) internal _serviceProviders; /// @dev Allocation details. - /// Deprecated, now applied on the subgraph data service - mapping(address allocationId => IHorizonStakingExtension.Allocation allocation) internal __DEPRECATED_allocations; + /// Deprecated, now applied on the subgraph data service. + /// Kept for storage compatibility and to check for allocation id collisions. + mapping(address allocationId => IHorizonStakingTypes.LegacyAllocation allocation) internal __DEPRECATED_allocations; /// @dev Subgraph allocations, tracks the tokens allocated to a subgraph deployment /// Deprecated, now applied on the SubgraphService @@ -92,7 +92,7 @@ abstract contract HorizonStakingV1Storage { uint32 internal __DEPRECATED_delegationParametersCooldown; /// @dev Time in epochs a delegator needs to wait to withdraw delegated stake - /// Deprecated, now only enforced during a transition period + /// Deprecated, enforced by each data service as needed. uint32 internal __DEPRECATED_delegationUnbondingPeriod; /// @dev Percentage of tokens to tax a delegation deposit diff --git a/packages/horizon/contracts/staking/libraries/ExponentialRebates.sol b/packages/horizon/contracts/staking/libraries/ExponentialRebates.sol deleted file mode 100644 index 9e2544533..000000000 --- a/packages/horizon/contracts/staking/libraries/ExponentialRebates.sol +++ /dev/null @@ -1,71 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity 0.8.27 || 0.8.33; - -// TODO: Re-enable and fix issues when publishing a new version -// forge-lint: disable-start(unsafe-typecast) - -import { LibFixedMath } from "../../libraries/LibFixedMath.sol"; - -/** - * @title ExponentialRebates library - * @author Edge & Node - * @notice A library to compute query fee rebates using an exponential formula - * @dev This is only used for backwards compatibility in HorizonStaking, and should - * be removed after the transition period. - * @custom:security-contact Please email security+contracts@thegraph.com if you find any - * bugs. We may have an active bug bounty program. - */ -library ExponentialRebates { - /// @dev Maximum value of the exponent for which to compute the exponential before clamping to zero. - uint32 private constant MAX_EXPONENT = 15; - - /** - * @notice The exponential formula used to compute fee-based rewards for staking pools in a given epoch - * @dev This function does not perform bounds checking on the inputs, but the following conditions - * need to be true: - * 0 <= alphaNumerator / alphaDenominator <= 1 - * 0 < lambdaNumerator / lambdaDenominator - * The exponential rebates function has the form: - * `(1 - alpha * exp ^ (-lambda * stake / fees)) * fees` - * @param fees Fees generated by indexer in the staking pool - * @param stake Stake attributed to the indexer in the staking pool - * @param alphaNumerator Numerator of `alpha` in the rebates function - * @param alphaDenominator Denominator of `alpha` in the rebates function - * @param lambdaNumerator Numerator of `lambda` in the rebates function - * @param lambdaDenominator Denominator of `lambda` in the rebates function - * @return rewards Rewards owed to the staking pool - */ - function exponentialRebates( - uint256 fees, - uint256 stake, - uint32 alphaNumerator, - uint32 alphaDenominator, - uint32 lambdaNumerator, - uint32 lambdaDenominator - ) external pure returns (uint256) { - // If alpha is zero indexer gets 100% fees rebate - int256 alpha = LibFixedMath.toFixed(int32(alphaNumerator), int32(alphaDenominator)); - if (alpha == 0) { - return fees; - } - - // No rebates if no fees... - if (fees == 0) { - return 0; - } - - // Award all fees as rebate if the exponent is too large - int256 lambda = LibFixedMath.toFixed(int32(lambdaNumerator), int32(lambdaDenominator)); - int256 exponent = LibFixedMath.mulDiv(lambda, int256(stake), int256(fees)); - if (LibFixedMath.toInteger(exponent) > int256(uint256(MAX_EXPONENT))) { - return fees; - } - - // Compute `1 - alpha * exp ^(-exponent)` - int256 factor = LibFixedMath.sub(LibFixedMath.one(), LibFixedMath.mul(alpha, LibFixedMath.exp(-exponent))); - - // Weight the fees by the factor - return LibFixedMath.uintMul(factor, fees); - } -} diff --git a/packages/horizon/contracts/staking/utilities/Managed.sol b/packages/horizon/contracts/staking/utilities/Managed.sol index 8839912f5..8efec4711 100644 --- a/packages/horizon/contracts/staking/utilities/Managed.sol +++ b/packages/horizon/contracts/staking/utilities/Managed.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { GraphDirectory } from "../../utilities/GraphDirectory.sol"; diff --git a/packages/horizon/contracts/utilities/Authorizable.sol b/packages/horizon/contracts/utilities/Authorizable.sol index 9cbd41672..d48d2e1a3 100644 --- a/packages/horizon/contracts/utilities/Authorizable.sol +++ b/packages/horizon/contracts/utilities/Authorizable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities diff --git a/packages/horizon/contracts/utilities/GraphDirectory.sol b/packages/horizon/contracts/utilities/GraphDirectory.sol index 0534ca3c7..1eb7aba61 100644 --- a/packages/horizon/contracts/utilities/GraphDirectory.sol +++ b/packages/horizon/contracts/utilities/GraphDirectory.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.27 || 0.8.33; +pragma solidity ^0.8.27; import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; @@ -13,8 +13,6 @@ import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/r import { ITokenGateway } from "@graphprotocol/interfaces/contracts/contracts/arbitrum/ITokenGateway.sol"; import { IGraphProxyAdmin } from "@graphprotocol/interfaces/contracts/contracts/upgrades/IGraphProxyAdmin.sol"; -import { ICuration } from "@graphprotocol/interfaces/contracts/contracts/curation/ICuration.sol"; - /** * @title GraphDirectory contract * @author Edge & Node @@ -55,13 +53,6 @@ abstract contract GraphDirectory { /// @notice The Graph Proxy Admin contract address IGraphProxyAdmin private immutable GRAPH_PROXY_ADMIN; - // -- Legacy Graph contracts -- - // These are required for backwards compatibility on HorizonStakingExtension - // TRANSITION PERIOD: remove these once HorizonStakingExtension is removed - - /// @notice The Curation contract address - ICuration private immutable GRAPH_CURATION; - /** * @notice Emitted when the GraphDirectory is initialized * @param graphToken The Graph Token contract address @@ -73,7 +64,6 @@ abstract contract GraphDirectory { * @param graphRewardsManager The Rewards Manager contract address * @param graphTokenGateway The Token Gateway contract address * @param graphProxyAdmin The Graph Proxy Admin contract address - * @param graphCuration The Curation contract address */ event GraphDirectoryInitialized( address indexed graphToken, @@ -84,8 +74,7 @@ abstract contract GraphDirectory { address graphEpochManager, address graphRewardsManager, address graphTokenGateway, - address graphProxyAdmin, - address graphCuration + address graphProxyAdmin ); /** @@ -116,7 +105,6 @@ abstract contract GraphDirectory { GRAPH_REWARDS_MANAGER = IRewardsManager(_getContractFromController("RewardsManager")); GRAPH_TOKEN_GATEWAY = ITokenGateway(_getContractFromController("GraphTokenGateway")); GRAPH_PROXY_ADMIN = IGraphProxyAdmin(_getContractFromController("GraphProxyAdmin")); - GRAPH_CURATION = ICuration(_getContractFromController("Curation")); emit GraphDirectoryInitialized( address(GRAPH_TOKEN), @@ -127,8 +115,7 @@ abstract contract GraphDirectory { address(GRAPH_EPOCH_MANAGER), address(GRAPH_REWARDS_MANAGER), address(GRAPH_TOKEN_GATEWAY), - address(GRAPH_PROXY_ADMIN), - address(GRAPH_CURATION) + address(GRAPH_PROXY_ADMIN) ); } @@ -204,14 +191,6 @@ abstract contract GraphDirectory { return GRAPH_PROXY_ADMIN; } - /** - * @notice Get the Curation contract - * @return The Curation contract - */ - function _graphCuration() internal view returns (ICuration) { - return GRAPH_CURATION; - } - /** * @notice Get a contract address from the controller * @dev Requirements: diff --git a/packages/horizon/ignition/modules/core/HorizonStaking.ts b/packages/horizon/ignition/modules/core/HorizonStaking.ts index c4044b0af..a7bec9076 100644 --- a/packages/horizon/ignition/modules/core/HorizonStaking.ts +++ b/packages/horizon/ignition/modules/core/HorizonStaking.ts @@ -3,8 +3,6 @@ import GraphProxyAdminArtifact from '@graphprotocol/contracts/artifacts/contract import { buildModule } from '@nomicfoundation/hardhat-ignition/modules' import HorizonStakingArtifact from '../../../build/contracts/contracts/staking/HorizonStaking.sol/HorizonStaking.json' -import HorizonStakingExtensionArtifact from '../../../build/contracts/contracts/staking/HorizonStakingExtension.sol/HorizonStakingExtension.json' -import ExponentialRebatesArtifact from '../../../build/contracts/contracts/staking/libraries/ExponentialRebates.sol/ExponentialRebates.json' import GraphPeripheryModule, { MigratePeripheryModule } from '../periphery/periphery' import { upgradeGraphProxy } from '../proxy/GraphProxy' import { deployImplementation } from '../proxy/implementation' @@ -17,25 +15,11 @@ export default buildModule('HorizonStaking', (m) => { const subgraphServiceAddress = m.getParameter('subgraphServiceAddress') const maxThawingPeriod = m.getParameter('maxThawingPeriod') - // Deploy HorizonStakingExtension - requires periphery and proxies to be registered in the controller - const ExponentialRebates = m.library('ExponentialRebates', ExponentialRebatesArtifact) - const HorizonStakingExtension = m.contract( - 'HorizonStakingExtension', - HorizonStakingExtensionArtifact, - [Controller, subgraphServiceAddress], - { - libraries: { - ExponentialRebates: ExponentialRebates, - }, - after: [GraphPeripheryModule, HorizonProxiesModule], - }, - ) - // Deploy HorizonStaking implementation const HorizonStakingImplementation = deployImplementation(m, { name: 'HorizonStaking', artifact: HorizonStakingArtifact, - constructorArgs: [Controller, HorizonStakingExtension, subgraphServiceAddress], + constructorArgs: [Controller, subgraphServiceAddress], }) // Upgrade proxy to implementation contract @@ -61,24 +45,11 @@ export const MigrateHorizonStakingDeployerModule = buildModule('HorizonStakingDe const HorizonStakingProxy = m.contractAt('HorizonStakingProxy', GraphProxyArtifact, horizonStakingAddress) - // Deploy HorizonStakingExtension - requires periphery and proxies to be registered in the controller - const ExponentialRebates = m.library('ExponentialRebates', ExponentialRebatesArtifact) - const HorizonStakingExtension = m.contract( - 'HorizonStakingExtension', - HorizonStakingExtensionArtifact, - [Controller, subgraphServiceAddress], - { - libraries: { - ExponentialRebates: ExponentialRebates, - }, - }, - ) - // Deploy HorizonStaking implementation const HorizonStakingImplementation = deployImplementation(m, { name: 'HorizonStaking', artifact: HorizonStakingArtifact, - constructorArgs: [Controller, HorizonStakingExtension, subgraphServiceAddress], + constructorArgs: [Controller, subgraphServiceAddress], }) return { HorizonStakingProxy, HorizonStakingImplementation } diff --git a/packages/horizon/package.json b/packages/horizon/package.json index 1e712eb99..09eb7eaaf 100644 --- a/packages/horizon/package.json +++ b/packages/horizon/package.json @@ -23,7 +23,7 @@ "lint": "pnpm lint:ts; pnpm lint:sol; pnpm lint:forge; pnpm lint:md; pnpm lint:json", "lint:ts": "eslint --fix --cache '**/*.{js,ts,cjs,mjs,jsx,tsx}'; prettier -w --cache --log-level warn '**/*.{js,ts,cjs,mjs,jsx,tsx}'", "lint:sol": "solhint --fix --noPrompt --noPoster 'contracts/**/*.sol'; prettier -w --cache --log-level warn '**/*.sol'", - "lint:forge": "forge lint", + "lint:forge": "forge lint contracts/", "lint:md": "markdownlint --fix --ignore-path ../../.gitignore '**/*.md'; prettier -w --cache --log-level warn '**/*.md'", "lint:json": "prettier -w --cache --log-level warn '**/*.json'", "clean": "rm -rf build dist cache cache_forge typechain-types", diff --git a/packages/horizon/scripts/integration b/packages/horizon/scripts/integration index baf48cf5e..c92a85ee8 100755 --- a/packages/horizon/scripts/integration +++ b/packages/horizon/scripts/integration @@ -100,12 +100,6 @@ npx hardhat deploy:migrate --network localhost --horizon-config integration --st # Step 4 - Governor npx hardhat deploy:migrate --network localhost --horizon-config integration --step 4 --patch-config --account-index 1 --hide-banner --standalone -# Run integration tests - During transition period -npx hardhat test:integration --phase during-transition-period --network localhost - -# Clear thawing period -npx hardhat transition:clear-thawing --network localhost - # Run integration tests - After transition period npx hardhat test:integration --phase after-transition-period --network localhost diff --git a/packages/horizon/tasks/test/integration.ts b/packages/horizon/tasks/test/integration.ts index 95b2ea230..bba9fa1c2 100644 --- a/packages/horizon/tasks/test/integration.ts +++ b/packages/horizon/tasks/test/integration.ts @@ -4,13 +4,9 @@ import { TASK_TEST } from 'hardhat/builtin-tasks/task-names' import { task } from 'hardhat/config' task('test:integration', 'Runs all integration tests') - .addParam( - 'phase', - 'Test phase to run: "during-transition-period", "after-transition-period", "after-delegation-slashing-enabled"', - ) + .addParam('phase', 'Test phase to run: "after-transition-period", "after-delegation-slashing-enabled"') .setAction(async (taskArgs, hre) => { // Get test files for each phase - const duringTransitionPeriodFiles = await glob('test/integration/during-transition-period/**/*.{js,ts}') const afterTransitionPeriodFiles = await glob('test/integration/after-transition-period/**/*.{js,ts}') const afterDelegationSlashingEnabledFiles = await glob( 'test/integration/after-delegation-slashing-enabled/**/*.{js,ts}', @@ -20,9 +16,6 @@ task('test:integration', 'Runs all integration tests') printBanner(taskArgs.phase, 'INTEGRATION TESTS: ') switch (taskArgs.phase) { - case 'during-transition-period': - await hre.run(TASK_TEST, { testFiles: duringTransitionPeriodFiles }) - break case 'after-transition-period': await hre.run(TASK_TEST, { testFiles: afterTransitionPeriodFiles }) break @@ -31,7 +24,7 @@ task('test:integration', 'Runs all integration tests') break default: throw new Error( - 'Invalid phase. Must be "during-transition-period", "after-transition-period", "after-delegation-slashing-enabled", or "all"', + 'Invalid phase. Must be "after-transition-period", "after-delegation-slashing-enabled", or "all"', ) } }) diff --git a/packages/horizon/tasks/transitions/thawing-period.ts b/packages/horizon/tasks/transitions/thawing-period.ts deleted file mode 100644 index e21e2bad2..000000000 --- a/packages/horizon/tasks/transitions/thawing-period.ts +++ /dev/null @@ -1,22 +0,0 @@ -import { requireLocalNetwork } from '@graphprotocol/toolshed/hardhat' -import { printBanner } from '@graphprotocol/toolshed/utils' -import { task, types } from 'hardhat/config' - -task('transition:clear-thawing', 'Clears the thawing period in HorizonStaking') - .addOptionalParam('governorIndex', 'Derivation path index for the governor account', 1, types.int) - .addFlag('skipNetworkCheck', 'Skip the network check (use with caution)') - .setAction(async (taskArgs, hre) => { - printBanner('CLEARING THAWING PERIOD') - - if (!taskArgs.skipNetworkCheck) { - requireLocalNetwork(hre) - } - - const graph = hre.graph() - const governor = await graph.accounts.getGovernor(taskArgs.governorIndex) - const horizonStaking = graph.horizon.contracts.HorizonStaking - - console.log('Clearing thawing period...') - await horizonStaking.connect(governor).clearThawingPeriod() - console.log('Thawing period cleared') - }) diff --git a/packages/horizon/test/deployment/HorizonStaking.test.ts b/packages/horizon/test/deployment/HorizonStaking.test.ts index fed2af75f..f60d92b52 100644 --- a/packages/horizon/test/deployment/HorizonStaking.test.ts +++ b/packages/horizon/test/deployment/HorizonStaking.test.ts @@ -1,5 +1,5 @@ import { loadConfig } from '@graphprotocol/toolshed/hardhat' -import { assert, expect } from 'chai' +import { expect } from 'chai' import hre from 'hardhat' import { graphProxyTests } from './lib/GraphProxy.test' @@ -27,16 +27,6 @@ describe('HorizonStaking', function () { expect(delegationSlashingEnabled).to.equal(false) }) - testIf(4)('should set a non zero thawing period', async function () { - if (process.env.IGNITION_DEPLOYMENT_TYPE === 'protocol') { - assert.fail('Deployment type "protocol": no historical state available') - } - const thawingPeriod = await HorizonStaking.__DEPRECATED_getThawingPeriod() - expect(thawingPeriod).to.not.equal(0) - }) - - it.skip('should set the right staking extension address') - testIf(4)('should set the right subgraph data service address', async function () { const subgraphDataServiceAddress = await HorizonStaking.getSubgraphService() expect(subgraphDataServiceAddress).to.equal(config.$global.subgraphServiceAddress) diff --git a/packages/horizon/test/integration/during-transition-period/delegator.test.ts b/packages/horizon/test/integration/during-transition-period/delegator.test.ts deleted file mode 100644 index 352599f18..000000000 --- a/packages/horizon/test/integration/during-transition-period/delegator.test.ts +++ /dev/null @@ -1,143 +0,0 @@ -import { ZERO_ADDRESS } from '@graphprotocol/toolshed' -import { delegators } from '@graphprotocol/toolshed/fixtures' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Delegator', () => { - let snapshotId: string - - const thawingPeriod = 2419200n // 28 days - - // Subgraph service address is not set for integration tests - const subgraphServiceAddress = '0x0000000000000000000000000000000000000000' - - const graph = hre.graph() - const horizonStaking = graph.horizon.contracts.HorizonStaking - const graphToken = graph.horizon.contracts.L2GraphToken - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Existing Protocol Users', () => { - describe('User undelegated before horizon was deployed', () => { - let indexer: HardhatEthersSigner - let delegator: HardhatEthersSigner - let tokens: bigint - - before(async () => { - const delegatorFixture = delegators[2] - const delegationFixture = delegatorFixture.delegations[0] - - // Verify delegator is undelegated - expect(delegatorFixture.undelegate).to.be.true - - // Get signers - indexer = await ethers.getSigner(delegationFixture.indexerAddress) - delegator = await ethers.getSigner(delegatorFixture.address) - - // Get tokens - tokens = delegationFixture.tokens - }) - - it('should be able to withdraw their tokens after the thawing period', async () => { - // Get the thawing period - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Mine remaining blocks to complete thawing period - for (let i = 0; i < Number(thawingPeriod) + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get delegator balance before withdrawing - const balanceBefore = await graphToken.balanceOf(delegator.address) - - // Withdraw tokens - await horizonStaking.connect(delegator)['withdrawDelegated(address,address)'](indexer.address, ZERO_ADDRESS) - - // Get delegator balance after withdrawing - const balanceAfter = await graphToken.balanceOf(delegator.address) - - // Expected balance after is the balance before plus the tokens minus the 0.5% delegation tax - const expectedBalanceAfter = balanceBefore + tokens - (tokens * 5000n) / 1000000n - - // Verify tokens are withdrawn - expect(balanceAfter).to.equal(expectedBalanceAfter) - }) - - it('should revert if the thawing period has not passed', async () => { - // Withdraw tokens - await expect( - horizonStaking.connect(delegator)['withdrawDelegated(address,address)'](indexer.address, ZERO_ADDRESS), - ).to.be.revertedWithCustomError(horizonStaking, 'HorizonStakingNothingToWithdraw') - }) - }) - - describe('Transition period is over', () => { - let governor: HardhatEthersSigner - let indexer: HardhatEthersSigner - let delegator: HardhatEthersSigner - let tokens: bigint - - before(async () => { - const delegatorFixture = delegators[0] - const delegationFixture = delegatorFixture.delegations[0] - - // Get signers - governor = await graph.accounts.getGovernor() - indexer = await ethers.getSigner(delegationFixture.indexerAddress) - delegator = await ethers.getSigner(delegatorFixture.address) - - // Get tokens - tokens = delegationFixture.tokens - }) - - it('should be able to undelegate during transition period and withdraw after transition period', async () => { - // Get delegator's delegation - const delegation = await horizonStaking.getDelegation( - indexer.address, - subgraphServiceAddress, - delegator.address, - ) - - // Undelegate tokens - await horizonStaking - .connect(delegator) - ['undelegate(address,address,uint256)'](indexer.address, subgraphServiceAddress, delegation.shares) - - // Wait for thawing period - await ethers.provider.send('evm_increaseTime', [Number(thawingPeriod) + 1]) - await ethers.provider.send('evm_mine', []) - - // Clear thawing period - await horizonStaking.connect(governor).clearThawingPeriod() - - // Get delegator balance before withdrawing - const balanceBefore = await graphToken.balanceOf(delegator.address) - - // Withdraw tokens - await horizonStaking - .connect(delegator) - ['withdrawDelegated(address,address,uint256)'](indexer.address, ZERO_ADDRESS, BigInt(1)) - - // Get delegator balance after withdrawing - const balanceAfter = await graphToken.balanceOf(delegator.address) - - // Expected balance after is the balance before plus the tokens minus the 0.5% delegation tax - // because the delegation was before the horizon upgrade, after the upgrade there is no tax - const expectedBalanceAfter = balanceBefore + tokens - (tokens * 5000n) / 1000000n - - // Verify tokens are withdrawn - expect(balanceAfter).to.equal(expectedBalanceAfter) - }) - }) - }) -}) diff --git a/packages/horizon/test/integration/during-transition-period/multicall.test.ts b/packages/horizon/test/integration/during-transition-period/multicall.test.ts deleted file mode 100644 index 948cd8f5f..000000000 --- a/packages/horizon/test/integration/during-transition-period/multicall.test.ts +++ /dev/null @@ -1,114 +0,0 @@ -import { ONE_MILLION, PaymentTypes } from '@graphprotocol/toolshed' -import { setGRTBalance } from '@graphprotocol/toolshed/hardhat' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Service Provider', () => { - let snapshotId: string - - const maxVerifierCut = 50_000n - const thawingPeriod = 2419200n - - const graph = hre.graph() - const horizonStaking = graph.horizon.contracts.HorizonStaking - const graphToken = graph.horizon.contracts.L2GraphToken - - const subgraphServiceAddress = '0x0000000000000000000000000000000000000000' - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('New Protocol Users', () => { - let serviceProvider: HardhatEthersSigner - - before(async () => { - ;[, , serviceProvider] = await graph.accounts.getTestAccounts() - await setGRTBalance(graph.provider, graphToken.target, serviceProvider.address, ONE_MILLION) - }) - - it('should allow multicalling stake+provision calls', async () => { - const tokensToStake = ethers.parseEther('1000') - const tokensToProvision = ethers.parseEther('100') - - // check state before - const beforeProvision = await horizonStaking.getProvision(serviceProvider.address, subgraphServiceAddress) - expect(beforeProvision.tokens).to.equal(0) - expect(beforeProvision.maxVerifierCut).to.equal(0) - expect(beforeProvision.thawingPeriod).to.equal(0) - expect(beforeProvision.createdAt).to.equal(0) - - // multicall - await graphToken.connect(serviceProvider).approve(horizonStaking.target, tokensToStake) - const stakeCalldata = horizonStaking.interface.encodeFunctionData('stake', [tokensToStake]) - const provisionCalldata = horizonStaking.interface.encodeFunctionData('provision', [ - serviceProvider.address, - subgraphServiceAddress, - tokensToProvision, - maxVerifierCut, - thawingPeriod, - ]) - await horizonStaking.connect(serviceProvider).multicall([stakeCalldata, provisionCalldata]) - - // check state after - const block = await graph.provider.getBlock('latest') - const afterProvision = await horizonStaking.getProvision(serviceProvider.address, subgraphServiceAddress) - expect(afterProvision.tokens).to.equal(tokensToProvision) - expect(afterProvision.maxVerifierCut).to.equal(maxVerifierCut) - expect(afterProvision.thawingPeriod).to.equal(thawingPeriod) - expect(afterProvision.createdAt).to.equal(block?.timestamp) - }) - - it('should allow multicalling delegation parameter set calls', async () => { - // check state before - const beforeIndexingRewards = await horizonStaking.getDelegationFeeCut( - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.IndexingRewards, - ) - const beforeQueryFee = await horizonStaking.getDelegationFeeCut( - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.QueryFee, - ) - expect(beforeIndexingRewards).to.equal(0) - expect(beforeQueryFee).to.equal(0) - - // multicall - const indexingRewardsCalldata = horizonStaking.interface.encodeFunctionData('setDelegationFeeCut', [ - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.IndexingRewards, - 10_000n, - ]) - const queryFeeCalldata = horizonStaking.interface.encodeFunctionData('setDelegationFeeCut', [ - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.QueryFee, - 12_345n, - ]) - await horizonStaking.connect(serviceProvider).multicall([indexingRewardsCalldata, queryFeeCalldata]) - - // check state after - const afterIndexingRewards = await horizonStaking.getDelegationFeeCut( - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.IndexingRewards, - ) - const afterQueryFee = await horizonStaking.getDelegationFeeCut( - serviceProvider.address, - subgraphServiceAddress, - PaymentTypes.QueryFee, - ) - expect(afterIndexingRewards).to.equal(10_000n) - expect(afterQueryFee).to.equal(12_345n) - }) - }) -}) diff --git a/packages/horizon/test/integration/during-transition-period/operator.test.ts b/packages/horizon/test/integration/during-transition-period/operator.test.ts deleted file mode 100644 index ab5b26ebf..000000000 --- a/packages/horizon/test/integration/during-transition-period/operator.test.ts +++ /dev/null @@ -1,99 +0,0 @@ -import { generatePOI } from '@graphprotocol/toolshed' -import { indexers } from '@graphprotocol/toolshed/fixtures' -import { getEventData } from '@graphprotocol/toolshed/hardhat' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Operator', () => { - let snapshotId: string - - // Subgraph service address is not set for integration tests - const subgraphServiceAddress = '0x0000000000000000000000000000000000000000' - - const graph = hre.graph() - const horizonStaking = graph.horizon.contracts.HorizonStaking - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Existing Protocol Users', () => { - let indexer: HardhatEthersSigner - let operator: HardhatEthersSigner - let allocationID: string - let allocationTokens: bigint - let delegationIndexingCut: number - - before(async () => { - const indexerFixture = indexers[0] - const allocationFixture = indexerFixture.allocations[0] - - // Get signers - indexer = await ethers.getSigner(indexerFixture.address) - ;[operator] = await graph.accounts.getTestAccounts() - - // Get allocation details - allocationID = allocationFixture.allocationID - allocationTokens = allocationFixture.tokens - delegationIndexingCut = indexerFixture.indexingRewardCut - - // Set the operator - await horizonStaking.connect(indexer).setOperator(subgraphServiceAddress, operator.address, true) - }) - - it('should allow the operator to close an open legacy allocation and collect rewards', async () => { - // Use a non-zero POI - const poi = generatePOI('poi') - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Get delegation pool before closing allocation - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Mine blocks to simulate time passing - const halfThawingPeriod = Number(thawingPeriod) / 2 - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get idle stake before closing allocation - const idleStakeBefore = await horizonStaking.getIdleStake(indexer.address) - - // Close allocation - const tx = await horizonStaking.connect(operator).closeAllocation(allocationID, poi) - const eventData = await getEventData( - tx, - 'event HorizonRewardsAssigned(address indexed indexer, address indexed allocationID, uint256 amount)', - ) - const rewards = eventData[2] - - // Verify rewards are not zero - expect(rewards).to.not.equal(0, 'Rewards were not transferred to service provider') - - // Verify rewards minus delegation cut are restaked - const idleStakeAfter = await horizonStaking.getIdleStake(indexer.address) - const idleStakeRewardsTokens = (rewards * BigInt(delegationIndexingCut)) / 1000000n - expect(idleStakeAfter).to.equal( - idleStakeBefore + allocationTokens + idleStakeRewardsTokens, - 'Rewards were not restaked', - ) - - // Verify delegators cut is added to delegation pool - const delegationPool = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPool.tokens - const delegationRewardsTokens = rewards - idleStakeRewardsTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationRewardsTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - }) -}) diff --git a/packages/horizon/test/integration/during-transition-period/permissionless.test.ts b/packages/horizon/test/integration/during-transition-period/permissionless.test.ts deleted file mode 100644 index a7d13e302..000000000 --- a/packages/horizon/test/integration/during-transition-period/permissionless.test.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { generatePOI } from '@graphprotocol/toolshed' -import { indexers } from '@graphprotocol/toolshed/fixtures' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Permissionless', () => { - let snapshotId: string - - const graph = hre.graph() - const horizonStaking = graph.horizon.contracts.HorizonStaking - const epochManager = graph.horizon.contracts.EpochManager - const subgraphServiceAddress = '0x0000000000000000000000000000000000000000' - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('After max allocation epochs', () => { - let indexer: HardhatEthersSigner - let anySigner: HardhatEthersSigner - let allocationID: string - let allocationTokens: bigint - - before(async () => { - // Get signers - indexer = await ethers.getSigner(indexers[0].address) - ;[anySigner] = await graph.accounts.getTestAccounts() - - // ensure anySigner is not operator for the indexer - await horizonStaking.connect(indexer).setOperator(subgraphServiceAddress, anySigner.address, false) - - // Get allocation details - allocationID = indexers[0].allocations[0].allocationID - allocationTokens = indexers[0].allocations[0].tokens - }) - - it('should allow any user to close an allocation after 28 epochs', async () => { - // Get indexer's idle stake before closing allocation - const idleStakeBefore = await horizonStaking.getIdleStake(indexer.address) - - // Mine blocks to simulate 28 epochs passing - const startingEpoch = await epochManager.currentEpoch() - while ((await epochManager.currentEpoch()) - startingEpoch < 28) { - await ethers.provider.send('evm_mine', []) - } - - // Close allocation - const poi = generatePOI('poi') - await horizonStaking.connect(anySigner).closeAllocation(allocationID, poi) - - // Get indexer's idle stake after closing allocation - const idleStakeAfter = await horizonStaking.getIdleStake(indexer.address) - - // Verify allocation tokens were added to indexer's idle stake but no rewards were collected - expect(idleStakeAfter).to.be.equal(idleStakeBefore + allocationTokens) - }) - }) -}) diff --git a/packages/horizon/test/integration/during-transition-period/service-provider.test.ts b/packages/horizon/test/integration/during-transition-period/service-provider.test.ts deleted file mode 100644 index 0be3c6112..000000000 --- a/packages/horizon/test/integration/during-transition-period/service-provider.test.ts +++ /dev/null @@ -1,521 +0,0 @@ -import { generatePOI, ONE_MILLION } from '@graphprotocol/toolshed' -import { indexers } from '@graphprotocol/toolshed/fixtures' -import { getEventData, setGRTBalance } from '@graphprotocol/toolshed/hardhat' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Service Provider', () => { - let snapshotId: string - - const graph = hre.graph() - const { stake, collect } = graph.horizon.actions - const horizonStaking = graph.horizon.contracts.HorizonStaking - const graphToken = graph.horizon.contracts.L2GraphToken - - // Subgraph service address is not set for integration tests - const subgraphServiceAddress = '0x0000000000000000000000000000000000000000' - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('New Protocol Users', () => { - let serviceProvider: HardhatEthersSigner - let tokensToStake = ethers.parseEther('1000') - - before(async () => { - ;[, , serviceProvider] = await graph.accounts.getTestAccounts() - await setGRTBalance(graph.provider, graphToken.target, serviceProvider.address, ONE_MILLION) - - // Stake tokens to service provider - await stake(serviceProvider, [tokensToStake]) - }) - - it('should allow service provider to unstake and withdraw after thawing period', async () => { - const tokensToUnstake = ethers.parseEther('100') - const balanceBefore = await graphToken.balanceOf(serviceProvider.address) - - // First unstake request - await horizonStaking.connect(serviceProvider).unstake(tokensToUnstake) - - // During transition period, tokens are locked by thawing period - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Mine remaining blocks to complete thawing period - for (let i = 0; i < Number(thawingPeriod) + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Now we can withdraw - await horizonStaking.connect(serviceProvider).withdraw() - const balanceAfter = await graphToken.balanceOf(serviceProvider.address) - - expect(balanceAfter).to.equal( - balanceBefore + tokensToUnstake, - 'Tokens were not transferred back to service provider', - ) - }) - - it('should handle multiple unstake requests correctly', async () => { - // Make multiple unstake requests - const request1 = ethers.parseEther('50') - const request2 = ethers.parseEther('75') - - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // First unstake request - await horizonStaking.connect(serviceProvider).unstake(request1) - - // Mine half of thawing period blocks - const halfThawingPeriod = Number(thawingPeriod) / 2 - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Second unstake request - await horizonStaking.connect(serviceProvider).unstake(request2) - - // Mine remaining blocks to complete first unstake thawing period - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Check that withdraw reverts since thawing period is not complete - await expect(horizonStaking.connect(serviceProvider).withdraw()).to.be.revertedWithCustomError( - horizonStaking, - 'HorizonStakingStillThawing', - ) - - // Mine remaining blocks to complete thawing period - for (let i = 0; i < halfThawingPeriod + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get balance before withdrawing - const balanceBefore = await graphToken.balanceOf(serviceProvider.address) - - // Withdraw all thawed tokens - await horizonStaking.connect(serviceProvider).withdraw() - - // Verify all tokens are withdrawn and transferred back to service provider - const balanceAfter = await graphToken.balanceOf(serviceProvider.address) - expect(balanceAfter).to.equal( - balanceBefore + request1 + request2, - 'Tokens were not transferred back to service provider', - ) - }) - - describe('Transition period is over', () => { - let governor: HardhatEthersSigner - let tokensToUnstake: bigint - - before(async () => { - // Get governor - governor = await graph.accounts.getGovernor() - - // Set tokens - tokensToStake = ethers.parseEther('100000') - tokensToUnstake = ethers.parseEther('10000') - }) - - it('should be able to withdraw tokens that were unstaked during transition period', async () => { - // Stake tokens - await stake(serviceProvider, [tokensToStake]) - - // Unstake tokens - await horizonStaking.connect(serviceProvider).unstake(tokensToUnstake) - - // Get balance before withdrawing - const balanceBefore = await graphToken.balanceOf(serviceProvider.address) - - // Get thawing period - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Clear thawing period - await horizonStaking.connect(governor).clearThawingPeriod() - - // Mine blocks to complete thawing period - for (let i = 0; i < Number(thawingPeriod) + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Withdraw tokens - await horizonStaking.connect(serviceProvider).withdraw() - - // Get balance after withdrawing - const balanceAfter = await graphToken.balanceOf(serviceProvider.address) - expect(balanceAfter).to.equal( - balanceBefore + tokensToUnstake, - 'Tokens were not transferred back to service provider', - ) - }) - - it('should be able to unstake tokens without a thawing period', async () => { - // Stake tokens - await stake(serviceProvider, [tokensToStake]) - - // Clear thawing period - await horizonStaking.connect(governor).clearThawingPeriod() - - // Get balance before withdrawing - const balanceBefore = await graphToken.balanceOf(serviceProvider.address) - - // Unstake tokens - await horizonStaking.connect(serviceProvider).unstake(tokensToUnstake) - - // Get balance after withdrawing - const balanceAfter = await graphToken.balanceOf(serviceProvider.address) - expect(balanceAfter).to.equal( - balanceBefore + tokensToUnstake, - 'Tokens were not transferred back to service provider', - ) - }) - }) - }) - - describe('Existing Protocol Users', () => { - let indexer: HardhatEthersSigner - let tokensUnstaked: bigint - - before(async () => { - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - tokensUnstaked = indexerFixture.tokensToUnstake || 0n - - await setGRTBalance(graph.provider, graphToken.target, indexer.address, ONE_MILLION) - }) - - it('should allow service provider to withdraw their locked tokens after thawing period passes', async () => { - // Get balance before withdrawing - const balanceBefore = await graphToken.balanceOf(indexer.address) - - // Get thawing period - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Mine blocks to complete thawing period - for (let i = 0; i < Number(thawingPeriod) + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Withdraw tokens - await horizonStaking.connect(indexer).withdraw() - - // Verify tokens are transferred back to service provider - const balanceAfter = await graphToken.balanceOf(indexer.address) - expect(balanceAfter).to.equal( - balanceBefore + tokensUnstaked, - 'Tokens were not transferred back to service provider', - ) - }) - - describe('Legacy allocations', () => { - describe('Restaking', () => { - let delegationIndexingCut: number - let delegationQueryFeeCut: number - let allocationID: string - let allocationTokens: bigint - let gateway: HardhatEthersSigner - - beforeEach(async () => { - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - delegationIndexingCut = indexerFixture.indexingRewardCut - delegationQueryFeeCut = indexerFixture.queryFeeCut - allocationID = indexerFixture.allocations[0].allocationID - allocationTokens = indexerFixture.allocations[0].tokens - gateway = await graph.accounts.getGateway() - await setGRTBalance(graph.provider, graphToken.target, gateway.address, ONE_MILLION) - }) - - it('should be able to close an open legacy allocation and collect rewards', async () => { - // Use a non-zero POI - const poi = generatePOI('poi') - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Get delegation pool before closing allocation - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Mine blocks to simulate time passing - const halfThawingPeriod = Number(thawingPeriod) / 2 - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get idle stake before closing allocation - const idleStakeBefore = await horizonStaking.getIdleStake(indexer.address) - - // Close allocation - const tx = await horizonStaking.connect(indexer).closeAllocation(allocationID, poi) - const eventData = await getEventData( - tx, - 'event HorizonRewardsAssigned(address indexed indexer, address indexed allocationID, uint256 amount)', - ) - const rewards = eventData[2] - - // Verify rewards are not zero - expect(rewards).to.not.equal(0, 'Rewards were not transferred to service provider') - - // Verify rewards minus delegation cut are restaked - const idleStakeAfter = await horizonStaking.getIdleStake(indexer.address) - const idleStakeRewardsTokens = (rewards * BigInt(delegationIndexingCut)) / 1000000n - expect(idleStakeAfter).to.equal( - idleStakeBefore + allocationTokens + idleStakeRewardsTokens, - 'Rewards were not restaked', - ) - - // Verify delegators cut is added to delegation pool - const delegationPool = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPool.tokens - const delegationRewardsTokens = rewards - idleStakeRewardsTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationRewardsTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - - it('should be able to collect query fees', async () => { - const tokensToCollect = ethers.parseEther('1000') - - // Get idle stake before collecting - const idleStakeBefore = await horizonStaking.getIdleStake(indexer.address) - - // Get delegation pool before collecting - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Collect query fees - await collect(gateway, [tokensToCollect, allocationID]) - - // Get idle stake after collecting - const idleStakeAfter = await horizonStaking.getIdleStake(indexer.address) - - // Subtract protocol tax (1%) and curation fees (10% after the protocol tax deduction) - const protocolTax = (tokensToCollect * 1n) / 100n - const curationFees = (tokensToCollect * 99n) / 1000n - const remainingTokens = tokensToCollect - protocolTax - curationFees - - // Verify tokens minus delegators cut are restaked - const indexerCutTokens = (remainingTokens * BigInt(delegationQueryFeeCut)) / 1000000n - expect(idleStakeAfter).to.equal(idleStakeBefore + indexerCutTokens, 'Indexer cut was not restaked') - - // Verify delegators cut is added to delegation pool - const delegationPool = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPool.tokens - const delegationCutTokens = remainingTokens - indexerCutTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationCutTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - - it('should be able to close an allocation and collect query fees for the closed allocation', async () => { - // Use a non-zero POI - const poi = generatePOI('poi') - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Mine blocks to simulate time passing - const halfThawingPeriod = Number(thawingPeriod) / 2 - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Close allocation - await horizonStaking.connect(indexer).closeAllocation(allocationID, poi) - - // Tokens to collect - const tokensToCollect = ethers.parseEther('1000') - - // Get idle stake before collecting - const idleStakeBefore = await horizonStaking.getIdleStake(indexer.address) - - // Get delegation pool before collecting - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Collect query fees - await collect(gateway, [tokensToCollect, allocationID]) - - // Get idle stake after collecting - const idleStakeAfter = await horizonStaking.getIdleStake(indexer.address) - - // Subtract protocol tax (1%) and curation fees (10% after the protocol tax deduction) - const protocolTax = (tokensToCollect * 1n) / 100n - const curationFees = (tokensToCollect * 99n) / 1000n - const remainingTokens = tokensToCollect - protocolTax - curationFees - - // Verify tokens minus delegators cut are restaked - const indexerCutTokens = (remainingTokens * BigInt(delegationQueryFeeCut)) / 1000000n - expect(idleStakeAfter).to.equal(idleStakeBefore + indexerCutTokens, 'Indexer cut was not restaked') - - // Verify delegators cut is added to delegation pool - const delegationPool = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPool.tokens - const delegationCutTokens = remainingTokens - indexerCutTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationCutTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - }) - - describe('With rewardsDestination set', () => { - let delegationIndexingCut: number - let delegationQueryFeeCut: number - let rewardsDestination: string - let allocationID: string - let gateway: HardhatEthersSigner - - beforeEach(async () => { - const indexerFixture = indexers[1] - indexer = await ethers.getSigner(indexerFixture.address) - delegationIndexingCut = indexerFixture.indexingRewardCut - delegationQueryFeeCut = indexerFixture.queryFeeCut - rewardsDestination = indexerFixture.rewardsDestination! - allocationID = indexerFixture.allocations[0].allocationID - gateway = await graph.accounts.getGateway() - await setGRTBalance(graph.provider, graphToken.target, gateway.address, ONE_MILLION) - }) - - it('should be able to close an open allocation and collect rewards', async () => { - // Use a non-zero POI - const poi = generatePOI('poi') - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Get delegation tokens before - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Mine blocks to simulate time passing - const halfThawingPeriod = Number(thawingPeriod) / 2 - for (let i = 0; i < halfThawingPeriod; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get rewards destination balance before closing allocation - const balanceBefore = await graphToken.balanceOf(rewardsDestination) - - // Close allocation - const tx = await horizonStaking.connect(indexer).closeAllocation(allocationID, poi) - const eventData = await getEventData( - tx, - 'event HorizonRewardsAssigned(address indexed indexer, address indexed allocationID, uint256 amount)', - ) - const rewards = eventData[2] - - // Verify rewards are not zero - expect(rewards).to.not.equal(0, 'Rewards were not transferred to rewards destination') - - // Verify indexer rewards cut is transferred to rewards destination - const balanceAfter = await graphToken.balanceOf(rewardsDestination) - const indexerCutTokens = (rewards * BigInt(delegationIndexingCut)) / 1000000n - expect(balanceAfter).to.equal( - balanceBefore + indexerCutTokens, - 'Indexer cut was not transferred to rewards destination', - ) - - // Verify delegators cut is added to delegation pool - const delegationPoolAfter = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPoolAfter.tokens - const delegationCutTokens = rewards - indexerCutTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationCutTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - - it('should be able to collect query fees', async () => { - const tokensToCollect = ethers.parseEther('1000') - - // Get rewards destination balance before collecting - const balanceBefore = await graphToken.balanceOf(rewardsDestination) - - // Get delegation tokens before - const delegationPoolBefore = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensBefore = delegationPoolBefore.tokens - - // Collect query fees - await collect(gateway, [tokensToCollect, allocationID]) - - // Get rewards destination balance after collecting - const balanceAfter = await graphToken.balanceOf(rewardsDestination) - - // Subtract protocol tax (1%) and curation fees (10% after the protocol tax deduction) - const protocolTax = (tokensToCollect * 1n) / 100n - const curationFees = (tokensToCollect * 99n) / 1000n - const remainingTokens = tokensToCollect - protocolTax - curationFees - - // Verify indexer cut is transferred to rewards destination - const indexerCutTokens = (remainingTokens * BigInt(delegationQueryFeeCut)) / 1000000n - expect(balanceAfter).to.equal( - balanceBefore + indexerCutTokens, - 'Indexer cut was not transferred to rewards destination', - ) - - // Verify delegators cut is added to delegation pool - const delegationPoolAfter = await horizonStaking.getDelegationPool(indexer.address, subgraphServiceAddress) - const delegationPoolTokensAfter = delegationPoolAfter.tokens - const delegationCutTokens = remainingTokens - indexerCutTokens - expect(delegationPoolTokensAfter).to.equal( - delegationPoolTokensBefore + delegationCutTokens, - 'Delegators cut was not added to delegation pool', - ) - }) - }) - }) - - describe('Transition period is over', () => { - let governor: HardhatEthersSigner - let tokensToUnstake: bigint - - before(async () => { - // Get governor - governor = await graph.accounts.getGovernor() - - // Get indexer - const indexerFixture = indexers[2] - indexer = await ethers.getSigner(indexerFixture.address) - - // Set tokens - tokensToUnstake = ethers.parseEther('10000') - }) - - it('should be able to withdraw tokens that were unstaked during transition period', async () => { - // Unstake tokens during transition period - await horizonStaking.connect(indexer).unstake(tokensToUnstake) - - // Get thawing period - const thawingPeriod = await horizonStaking.__DEPRECATED_getThawingPeriod() - - // Clear thawing period - await horizonStaking.connect(governor).clearThawingPeriod() - - // Mine blocks to complete thawing period - for (let i = 0; i < Number(thawingPeriod) + 1; i++) { - await ethers.provider.send('evm_mine', []) - } - - // Get balance before withdrawing - const balanceBefore = await graphToken.balanceOf(indexer.address) - - // Withdraw tokens - await horizonStaking.connect(indexer).withdraw() - - // Get balance after withdrawing - const balanceAfter = await graphToken.balanceOf(indexer.address) - expect(balanceAfter).to.equal( - balanceBefore + tokensToUnstake, - 'Tokens were not transferred back to service provider', - ) - }) - }) - }) -}) diff --git a/packages/horizon/test/integration/during-transition-period/slasher.test.ts b/packages/horizon/test/integration/during-transition-period/slasher.test.ts deleted file mode 100644 index 47ced0883..000000000 --- a/packages/horizon/test/integration/during-transition-period/slasher.test.ts +++ /dev/null @@ -1,88 +0,0 @@ -import { indexers } from '@graphprotocol/toolshed/fixtures' -import type { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import hre from 'hardhat' -import { ethers } from 'hardhat' - -describe('Slasher', () => { - let snapshotId: string - - let indexer: string - let slasher: HardhatEthersSigner - let tokensToSlash: bigint - - const graph = hre.graph() - const horizonStaking = graph.horizon.contracts.HorizonStaking - const graphToken = graph.horizon.contracts.L2GraphToken - - before(async () => { - slasher = await graph.accounts.getArbitrator() - }) - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Available tokens', () => { - before(() => { - const indexerFixture = indexers[0] - indexer = indexerFixture.address - tokensToSlash = ethers.parseEther('10000') - }) - - it('should be able to slash indexer stake', async () => { - // Before slash state - const idleStakeBeforeSlash = await horizonStaking.getIdleStake(indexer) - const tokensVerifier = tokensToSlash / 2n - const slasherBeforeBalance = await graphToken.balanceOf(slasher.address) - - // Slash tokens - await horizonStaking.connect(slasher).slash(indexer, tokensToSlash, tokensVerifier, slasher.address) - - // Indexer's stake should have decreased - const idleStakeAfterSlash = await horizonStaking.getIdleStake(indexer) - expect(idleStakeAfterSlash).to.equal(idleStakeBeforeSlash - tokensToSlash, 'Indexer stake should have decreased') - - // Slasher should have received the tokens - const slasherAfterBalance = await graphToken.balanceOf(slasher.address) - expect(slasherAfterBalance).to.equal( - slasherBeforeBalance + tokensVerifier, - 'Slasher should have received the tokens', - ) - }) - }) - - describe('Locked tokens', () => { - before(() => { - const indexerFixture = indexers[1] - indexer = indexerFixture.address - tokensToSlash = indexerFixture.stake - }) - - it('should be able to slash locked tokens', async () => { - // Before slash state - const tokensVerifier = tokensToSlash / 2n - const slasherBeforeBalance = await graphToken.balanceOf(slasher.address) - - // Slash tokens - await horizonStaking.connect(slasher).slash(indexer, tokensToSlash, tokensVerifier, slasher.address) - - // Indexer's entire stake should have been slashed - const indexerStakeAfterSlash = await horizonStaking.getServiceProvider(indexer) - expect(indexerStakeAfterSlash.tokensStaked).to.equal(0n, 'Indexer stake should have been slashed') - - // Slasher should have received the tokens - const slasherAfterBalance = await graphToken.balanceOf(slasher.address) - expect(slasherAfterBalance).to.equal( - slasherBeforeBalance + tokensVerifier, - 'Slasher should have received the tokens', - ) - }) - }) -}) diff --git a/packages/horizon/test/unit/GraphBase.t.sol b/packages/horizon/test/unit/GraphBase.t.sol index 7fa450295..14ffb2ccb 100644 --- a/packages/horizon/test/unit/GraphBase.t.sol +++ b/packages/horizon/test/unit/GraphBase.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Create2 } from "@openzeppelin/contracts/utils/Create2.sol"; import { GraphProxyAdmin } from "@graphprotocol/contracts/contracts/upgrades/GraphProxyAdmin.sol"; @@ -12,7 +12,6 @@ import { GraphPayments } from "contracts/payments/GraphPayments.sol"; import { GraphTallyCollector } from "contracts/payments/collectors/GraphTallyCollector.sol"; import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; import { HorizonStaking } from "contracts/staking/HorizonStaking.sol"; -import { HorizonStakingExtension } from "contracts/staking/HorizonStakingExtension.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { MockGRTToken } from "../../contracts/mocks/MockGRTToken.sol"; import { EpochManagerMock } from "contracts/mocks/EpochManagerMock.sol"; @@ -41,7 +40,6 @@ abstract contract GraphBaseTest is IHorizonStakingTypes, Utils, Constants { GraphTallyCollector graphTallyCollector; HorizonStaking private stakingBase; - HorizonStakingExtension private stakingExtension; address subgraphDataServiceLegacyAddress = makeAddr("subgraphDataServiceLegacyAddress"); address subgraphDataServiceAddress = makeAddr("subgraphDataServiceAddress"); @@ -69,8 +67,7 @@ abstract contract GraphBaseTest is IHorizonStakingTypes, Utils, Constants { operator: createUser("operator"), gateway: createUser("gateway"), verifier: createUser("verifier"), - delegator: createUser("delegator"), - legacySlasher: createUser("legacySlasher") + delegator: createUser("delegator") }); // Deploy protocol contracts @@ -84,7 +81,6 @@ abstract contract GraphBaseTest is IHorizonStakingTypes, Utils, Constants { vm.label({ account: address(payments), newLabel: "GraphPayments" }); vm.label({ account: address(escrow), newLabel: "PaymentsEscrow" }); vm.label({ account: address(staking), newLabel: "HorizonStaking" }); - vm.label({ account: address(stakingExtension), newLabel: "HorizonStakingExtension" }); vm.label({ account: address(graphTallyCollector), newLabel: "GraphTallyCollector" }); // Ensure caller is back to the original msg.sender @@ -192,12 +188,7 @@ abstract contract GraphBaseTest is IHorizonStakingTypes, Utils, Constants { escrow = PaymentsEscrow(escrowProxyAddress); } - stakingExtension = new HorizonStakingExtension(address(controller), subgraphDataServiceLegacyAddress); - stakingBase = new HorizonStaking( - address(controller), - address(stakingExtension), - subgraphDataServiceLegacyAddress - ); + stakingBase = new HorizonStaking(address(controller), subgraphDataServiceLegacyAddress); graphTallyCollector = new GraphTallyCollector( "GraphTallyCollector", diff --git a/packages/horizon/test/unit/data-service/DataService.t.sol b/packages/horizon/test/unit/data-service/DataService.t.sol index 209362767..a7fb52d58 100644 --- a/packages/horizon/test/unit/data-service/DataService.t.sol +++ b/packages/horizon/test/unit/data-service/DataService.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; import { HorizonStakingSharedTest } from "../shared/horizon-staking/HorizonStakingShared.t.sol"; diff --git a/packages/horizon/test/unit/data-service/DataServiceUpgradeable.t.sol b/packages/horizon/test/unit/data-service/DataServiceUpgradeable.t.sol index a4501242b..ac2be13ea 100644 --- a/packages/horizon/test/unit/data-service/DataServiceUpgradeable.t.sol +++ b/packages/horizon/test/unit/data-service/DataServiceUpgradeable.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { GraphBaseTest } from "../GraphBase.t.sol"; import { DataServiceBaseUpgradeable } from "./implementations/DataServiceBaseUpgradeable.sol"; diff --git a/packages/horizon/test/unit/data-service/extensions/DataServiceFees.t.sol b/packages/horizon/test/unit/data-service/extensions/DataServiceFees.t.sol index a2ae10653..5692dd952 100644 --- a/packages/horizon/test/unit/data-service/extensions/DataServiceFees.t.sol +++ b/packages/horizon/test/unit/data-service/extensions/DataServiceFees.t.sol @@ -1,9 +1,9 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingSharedTest } from "../../shared/horizon-staking/HorizonStakingShared.t.sol"; import { DataServiceImpFees } from "../implementations/DataServiceImpFees.sol"; -import { IDataServiceFees } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceFees.sol"; +import { StakeClaims } from "../../../../contracts/data-service/libraries/StakeClaims.sol"; import { ProvisionTracker } from "../../../../contracts/data-service/libraries/ProvisionTracker.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; @@ -13,7 +13,7 @@ contract DataServiceFeesTest is HorizonStakingSharedTest { useIndexer useProvisionDataService(address(dataService), PROVISION_TOKENS, 0, 0) { - vm.expectRevert(abi.encodeWithSignature("DataServiceFeesZeroTokens()")); + vm.expectRevert(abi.encodeWithSignature("StakeClaimsZeroTokens()")); dataService.lockStake(users.indexer, 0); } @@ -145,7 +145,7 @@ contract DataServiceFeesTest is HorizonStakingSharedTest { // it should emit a an event vm.expectEmit(); - emit IDataServiceFees.StakeClaimLocked( + emit StakeClaims.StakeClaimLocked( serviceProvider, calcValues.predictedClaimId, calcValues.stakeToLock, @@ -207,14 +207,14 @@ contract DataServiceFeesTest is HorizonStakingSharedTest { break; } - emit IDataServiceFees.StakeClaimReleased(serviceProvider, calcValues.head, claimTokens, releasableAt); + emit StakeClaims.StakeClaimReleased(serviceProvider, calcValues.head, claimTokens, releasableAt); calcValues.head = nextClaim; calcValues.tokensReleased += claimTokens; calcValues.claimsCount++; } // it should emit a an event - emit IDataServiceFees.StakeClaimsReleased(serviceProvider, calcValues.claimsCount, calcValues.tokensReleased); + emit StakeClaims.StakeClaimsReleased(serviceProvider, calcValues.claimsCount, calcValues.tokensReleased); dataService.releaseStake(numClaimsToRelease); // after state diff --git a/packages/horizon/test/unit/data-service/extensions/DataServicePausable.t.sol b/packages/horizon/test/unit/data-service/extensions/DataServicePausable.t.sol index 47912797b..97c6bb100 100644 --- a/packages/horizon/test/unit/data-service/extensions/DataServicePausable.t.sol +++ b/packages/horizon/test/unit/data-service/extensions/DataServicePausable.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingSharedTest } from "../../shared/horizon-staking/HorizonStakingShared.t.sol"; import { DataServiceImpPausable } from "../implementations/DataServiceImpPausable.sol"; diff --git a/packages/horizon/test/unit/data-service/extensions/DataServicePausableUpgradeable.t.sol b/packages/horizon/test/unit/data-service/extensions/DataServicePausableUpgradeable.t.sol index d5413ed5b..520676ec0 100644 --- a/packages/horizon/test/unit/data-service/extensions/DataServicePausableUpgradeable.t.sol +++ b/packages/horizon/test/unit/data-service/extensions/DataServicePausableUpgradeable.t.sol @@ -1,19 +1,22 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { GraphBaseTest } from "../../GraphBase.t.sol"; import { DataServiceImpPausableUpgradeable } from "../implementations/DataServiceImpPausableUpgradeable.sol"; +import { IDataServicePausable } from "@graphprotocol/interfaces/contracts/data-service/IDataServicePausable.sol"; import { UnsafeUpgrades } from "@openzeppelin/foundry-upgrades/src/Upgrades.sol"; import { PPMMath } from "./../../../../contracts/libraries/PPMMath.sol"; contract DataServicePausableUpgradeableTest is GraphBaseTest { - function test_WhenTheContractIsDeployed() external { - ( - DataServiceImpPausableUpgradeable dataService, - DataServiceImpPausableUpgradeable implementation - ) = _deployDataService(); + DataServiceImpPausableUpgradeable private dataService; + function setUp() public override { + super.setUp(); + (dataService, ) = _deployDataService(); + } + + function test_WhenTheContractIsDeployed() external view { // via proxy - ensure that the proxy was initialized correctly // these calls validate proxy storage was correctly initialized uint32 delegationRatio = dataService.getDelegationRatio(); @@ -30,13 +33,113 @@ contract DataServicePausableUpgradeableTest is GraphBaseTest { (uint64 minThawingPeriod, uint64 maxThawingPeriod) = dataService.getThawingPeriodRange(); assertEq(minThawingPeriod, type(uint64).min); assertEq(maxThawingPeriod, type(uint64).max); + } + + // -- setPauseGuardian -- + + function test_SetPauseGuardian() external { + address guardian = makeAddr("guardian"); + + vm.expectEmit(address(dataService)); + emit IDataServicePausable.PauseGuardianSet(guardian, true); + dataService.setPauseGuardian(guardian, true); + + assertTrue(dataService.pauseGuardians(guardian)); + } + + function test_SetPauseGuardian_Remove() external { + address guardian = makeAddr("guardian"); + dataService.setPauseGuardian(guardian, true); + + vm.expectEmit(address(dataService)); + emit IDataServicePausable.PauseGuardianSet(guardian, false); + dataService.setPauseGuardian(guardian, false); + + assertFalse(dataService.pauseGuardians(guardian)); + } + + function test_RevertWhen_SetPauseGuardian_NoChange_AlreadyFalse() external { + address guardian = makeAddr("guardian"); + + // guardian defaults to false, setting to false should revert + vm.expectRevert( + abi.encodeWithSelector( + IDataServicePausable.DataServicePausablePauseGuardianNoChange.selector, + guardian, + false + ) + ); + dataService.setPauseGuardian(guardian, false); + } + + function test_RevertWhen_SetPauseGuardian_NoChange_AlreadyTrue() external { + address guardian = makeAddr("guardian"); + dataService.setPauseGuardian(guardian, true); + + // guardian is already true, setting to true should revert + vm.expectRevert( + abi.encodeWithSelector( + IDataServicePausable.DataServicePausablePauseGuardianNoChange.selector, + guardian, + true + ) + ); + dataService.setPauseGuardian(guardian, true); + } + + // -- pause -- + + function test_Pause() external { + address guardian = makeAddr("guardian"); + dataService.setPauseGuardian(guardian, true); + + vm.prank(guardian); + dataService.pause(); + + assertTrue(dataService.paused()); + } + + function test_RevertWhen_Pause_NotGuardian() external { + address notGuardian = makeAddr("notGuardian"); - // this ensures that implementation immutables were correctly initialized - // and they can be read via the proxy - assertEq(implementation.controller(), address(controller)); - assertEq(dataService.controller(), address(controller)); + vm.expectRevert( + abi.encodeWithSelector(IDataServicePausable.DataServicePausableNotPauseGuardian.selector, notGuardian) + ); + vm.prank(notGuardian); + dataService.pause(); } + // -- unpause -- + + function test_Unpause() external { + address guardian = makeAddr("guardian"); + dataService.setPauseGuardian(guardian, true); + + vm.startPrank(guardian); + dataService.pause(); + dataService.unpause(); + vm.stopPrank(); + + assertFalse(dataService.paused()); + } + + function test_RevertWhen_Unpause_NotGuardian() external { + address guardian = makeAddr("guardian"); + dataService.setPauseGuardian(guardian, true); + + vm.prank(guardian); + dataService.pause(); + + address notGuardian = makeAddr("notGuardian"); + vm.expectRevert( + abi.encodeWithSelector(IDataServicePausable.DataServicePausableNotPauseGuardian.selector, notGuardian) + ); + vm.prank(notGuardian); + dataService.unpause(); + } + + // -- helpers -- + function _deployDataService() internal returns (DataServiceImpPausableUpgradeable, DataServiceImpPausableUpgradeable) diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceBase.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceBase.sol index b58bbc5e0..d5286be57 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceBase.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceBase.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataService } from "../../../../contracts/data-service/DataService.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceBaseUpgradeable.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceBaseUpgradeable.sol index d328089f9..b0057e941 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceBaseUpgradeable.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceBaseUpgradeable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataService } from "../../../../contracts/data-service/DataService.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceImpFees.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceImpFees.sol index 85c51465f..85fc23b25 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceImpFees.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceImpFees.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataService } from "../../../../contracts/data-service/DataService.sol"; import { DataServiceFees } from "../../../../contracts/data-service/extensions/DataServiceFees.sol"; diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausable.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausable.sol index bba7de566..9f15584d5 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausable.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataService } from "../../../../contracts/data-service/DataService.sol"; import { DataServicePausable } from "../../../../contracts/data-service/extensions/DataServicePausable.sol"; diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausableUpgradeable.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausableUpgradeable.sol index 71453fd19..2eccd5899 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausableUpgradeable.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceImpPausableUpgradeable.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataService } from "../../../../contracts/data-service/DataService.sol"; import { DataServicePausableUpgradeable } from "../../../../contracts/data-service/extensions/DataServicePausableUpgradeable.sol"; @@ -31,6 +31,10 @@ contract DataServiceImpPausableUpgradeable is DataServicePausableUpgradeable { function slash(address serviceProvider, bytes calldata data) external {} + function setPauseGuardian(address _pauseGuardian, bool _allowed) external { + _setPauseGuardian(_pauseGuardian, _allowed); + } + function controller() external view returns (address) { return address(_graphController()); } diff --git a/packages/horizon/test/unit/data-service/implementations/DataServiceOverride.sol b/packages/horizon/test/unit/data-service/implementations/DataServiceOverride.sol index c5d50ca74..6af527271 100644 --- a/packages/horizon/test/unit/data-service/implementations/DataServiceOverride.sol +++ b/packages/horizon/test/unit/data-service/implementations/DataServiceOverride.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { DataServiceBase } from "./DataServiceBase.sol"; diff --git a/packages/horizon/test/unit/data-service/libraries/ProvisionTracker.t.sol b/packages/horizon/test/unit/data-service/libraries/ProvisionTracker.t.sol index d3424dfc5..d56d770b0 100644 --- a/packages/horizon/test/unit/data-service/libraries/ProvisionTracker.t.sol +++ b/packages/horizon/test/unit/data-service/libraries/ProvisionTracker.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingSharedTest } from "../../shared/horizon-staking/HorizonStakingShared.t.sol"; import { ProvisionTrackerImplementation } from "./ProvisionTrackerImplementation.sol"; diff --git a/packages/horizon/test/unit/data-service/libraries/ProvisionTrackerImplementation.sol b/packages/horizon/test/unit/data-service/libraries/ProvisionTrackerImplementation.sol index abb525b91..7722df836 100644 --- a/packages/horizon/test/unit/data-service/libraries/ProvisionTrackerImplementation.sol +++ b/packages/horizon/test/unit/data-service/libraries/ProvisionTrackerImplementation.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; contract ProvisionTrackerImplementation { mapping(address => uint256) public provisionTracker; diff --git a/packages/horizon/test/unit/data-service/utilities/ProvisionManager.t.sol b/packages/horizon/test/unit/data-service/utilities/ProvisionManager.t.sol new file mode 100644 index 000000000..4993b7f57 --- /dev/null +++ b/packages/horizon/test/unit/data-service/utilities/ProvisionManager.t.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { ProvisionManager } from "../../../../contracts/data-service/utilities/ProvisionManager.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; +import { PartialControllerMock } from "../../mocks/PartialControllerMock.t.sol"; +import { HorizonStakingMock } from "../../mocks/HorizonStakingMock.t.sol"; +import { ProvisionManagerImpl } from "./ProvisionManagerImpl.t.sol"; + +contract ProvisionManagerTest is Test { + ProvisionManagerImpl internal _provisionManager; + HorizonStakingMock internal _horizonStakingMock; + + function setUp() public { + _horizonStakingMock = new HorizonStakingMock(); + + PartialControllerMock.Entry[] memory entries = new PartialControllerMock.Entry[](1); + entries[0] = PartialControllerMock.Entry({ name: "Staking", addr: address(_horizonStakingMock) }); + _provisionManager = new ProvisionManagerImpl(address(new PartialControllerMock(entries))); + } + + /* solhint-disable graph/func-name-mixedcase */ + + function test_OnlyValidProvision(address serviceProvider) public { + vm.expectRevert( + abi.encodeWithSelector(ProvisionManager.ProvisionManagerProvisionNotFound.selector, serviceProvider) + ); + _provisionManager.requireValidProvision_(serviceProvider); + + IHorizonStakingTypes.Provision memory provision; + provision.createdAt = 1; + + _horizonStakingMock.setProvision(serviceProvider, address(_provisionManager), provision); + + _provisionManager.requireValidProvision_(serviceProvider); + } + + function test_OnlyAuthorizedForProvision(address serviceProvider, address sender) public { + vm.expectRevert( + abi.encodeWithSelector(ProvisionManager.ProvisionManagerNotAuthorized.selector, serviceProvider, sender) + ); + vm.prank(sender); + _provisionManager.requireAuthorizedForProvision_(serviceProvider); + + _horizonStakingMock.setIsAuthorized(serviceProvider, address(_provisionManager), sender, true); + vm.prank(sender); + _provisionManager.requireAuthorizedForProvision_(serviceProvider); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/data-service/utilities/ProvisionManagerImpl.t.sol b/packages/horizon/test/unit/data-service/utilities/ProvisionManagerImpl.t.sol new file mode 100644 index 000000000..1cbfe2cd2 --- /dev/null +++ b/packages/horizon/test/unit/data-service/utilities/ProvisionManagerImpl.t.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { ProvisionManager } from "../../../../contracts/data-service/utilities/ProvisionManager.sol"; +import { GraphDirectory } from "../../../../contracts/utilities/GraphDirectory.sol"; + +contract ProvisionManagerImpl is GraphDirectory, ProvisionManager { + constructor(address controller) GraphDirectory(controller) {} + + function requireValidProvision_(address serviceProvider) public view { + _requireValidProvision(serviceProvider); + } + + function requireAuthorizedForProvision_(address serviceProvider) public view { + _requireAuthorizedForProvision(serviceProvider); + } +} diff --git a/packages/horizon/test/unit/escrow/GraphEscrow.t.sol b/packages/horizon/test/unit/escrow/GraphEscrow.t.sol index a0c3fbad1..3f88b468c 100644 --- a/packages/horizon/test/unit/escrow/GraphEscrow.t.sol +++ b/packages/horizon/test/unit/escrow/GraphEscrow.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; diff --git a/packages/horizon/test/unit/escrow/collect.t.sol b/packages/horizon/test/unit/escrow/collect.t.sol index bbd35922c..9d229e1ab 100644 --- a/packages/horizon/test/unit/escrow/collect.t.sol +++ b/packages/horizon/test/unit/escrow/collect.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; diff --git a/packages/horizon/test/unit/escrow/constructor.t.sol b/packages/horizon/test/unit/escrow/constructor.t.sol index c1b097010..430d9926d 100644 --- a/packages/horizon/test/unit/escrow/constructor.t.sol +++ b/packages/horizon/test/unit/escrow/constructor.t.sol @@ -21,7 +21,6 @@ contract GraphEscrowConstructorTest is Test { controller.setContractProxy(keccak256("RewardsManager"), makeAddr("RewardsManager")); controller.setContractProxy(keccak256("GraphTokenGateway"), makeAddr("GraphTokenGateway")); controller.setContractProxy(keccak256("GraphProxyAdmin"), makeAddr("GraphProxyAdmin")); - controller.setContractProxy(keccak256("Curation"), makeAddr("Curation")); } function testConstructor_MaxWaitPeriodBoundary() public { diff --git a/packages/horizon/test/unit/escrow/deposit.t.sol b/packages/horizon/test/unit/escrow/deposit.t.sol index 3f7c254c0..0f1fe450e 100644 --- a/packages/horizon/test/unit/escrow/deposit.t.sol +++ b/packages/horizon/test/unit/escrow/deposit.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { GraphEscrowTest } from "./GraphEscrow.t.sol"; diff --git a/packages/horizon/test/unit/escrow/getters.t.sol b/packages/horizon/test/unit/escrow/getters.t.sol index 23f700036..01a215f06 100644 --- a/packages/horizon/test/unit/escrow/getters.t.sol +++ b/packages/horizon/test/unit/escrow/getters.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; @@ -15,6 +15,16 @@ contract GraphEscrowGettersTest is GraphEscrowTest { assertEq(balance, amount); } + function testEscrowAccounts(uint256 amount) public useGateway useDeposit(amount) { + (uint256 balance, uint256 tokensThawing, ) = escrow.escrowAccounts( + users.gateway, + users.verifier, + users.indexer + ); + assertEq(balance, amount); + assertEq(tokensThawing, 0); + } + function testGetBalance_WhenThawing( uint256 amountDeposit, uint256 amountThawing diff --git a/packages/horizon/test/unit/escrow/paused.t.sol b/packages/horizon/test/unit/escrow/paused.t.sol index ea3fce631..2787f5f56 100644 --- a/packages/horizon/test/unit/escrow/paused.t.sol +++ b/packages/horizon/test/unit/escrow/paused.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; @@ -50,6 +50,11 @@ contract GraphEscrowPausedTest is GraphEscrowTest { escrow.cancelThaw(users.verifier, users.indexer); } + function testPaused_RevertWhen_AdjustThaw(uint256 tokens) public useGateway useDeposit(tokens) usePaused(true) { + vm.expectRevert(abi.encodeWithSelector(IPaymentsEscrow.PaymentsEscrowIsPaused.selector)); + escrow.adjustThaw(users.verifier, users.indexer, tokens, false); + } + function testPaused_RevertWhen_WithdrawTokens( uint256 tokens, uint256 thawAmount diff --git a/packages/horizon/test/unit/escrow/thaw.t.sol b/packages/horizon/test/unit/escrow/thaw.t.sol index 0b71e6d1b..a8284f8b2 100644 --- a/packages/horizon/test/unit/escrow/thaw.t.sol +++ b/packages/horizon/test/unit/escrow/thaw.t.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { GraphEscrowTest } from "./GraphEscrow.t.sol"; contract GraphEscrowThawTest is GraphEscrowTest { @@ -74,4 +75,265 @@ contract GraphEscrowThawTest is GraphEscrowTest { vm.expectRevert(expectedError); escrow.cancelThaw(users.verifier, users.indexer); } + + function testThaw_AlwaysResetsTimerOnSuccessiveCalls(uint256 amount) public useGateway { + amount = bound(amount, 3, type(uint256).max - 10); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 2 - 1) / 2; + uint256 secondAmountToThaw = (amount + 10 - 1) / 10; + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + + // Advance time — simple thaw always resets the timer, even on decrease + vm.warp(block.timestamp + 1 hours); + + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + + (, address msgSender, ) = vm.readCallers(); + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, secondAmountToThaw, expectedThawEnd); + escrow.thaw(users.verifier, users.indexer, secondAmountToThaw); + + (, uint256 amountThawing, uint256 thawEndTimestamp) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawing, secondAmountToThaw); + assertEq(thawEndTimestamp, expectedThawEnd, "Timer should always reset on simple thaw"); + } + + function testThaw_ResetsTimerOnIncrease(uint256 amount) public useGateway { + amount = bound(amount, 10, type(uint256).max - 10); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 10 - 1) / 10; + uint256 secondAmountToThaw = (amount + 2 - 1) / 2; + + (, address msgSender, ) = vm.readCallers(); + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + + // Advance time — second thaw with larger amount should reset the timer + vm.warp(block.timestamp + 1 hours); + + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, secondAmountToThaw, expectedThawEnd); + escrow.thaw(users.verifier, users.indexer, secondAmountToThaw); + + (, uint256 amountThawing, uint256 thawEndTimestamp) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawing, secondAmountToThaw); + assertEq(thawEndTimestamp, expectedThawEnd, "Timer should reset on increase"); + } + + /* + * adjustThaw tests + */ + + function testAdjustThaw_CapsAtBalance(uint256 amount, uint256 overAmount) public useGateway useDeposit(amount) { + overAmount = bound(overAmount, amount + 1, type(uint256).max); + + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, overAmount, true); + assertEq(amountThawing, amount, "Should cap at balance"); + + (, address msgSender, ) = vm.readCallers(); + (, uint256 storedThawing, ) = escrow.escrowAccounts(msgSender, users.verifier, users.indexer); + assertEq(storedThawing, amount); + } + + function testAdjustThaw_ZeroAmountCancelsAll(uint256 amount) public useGateway useDeposit(amount) { + escrow.thaw(users.verifier, users.indexer, amount); + + (, address msgSender, ) = vm.readCallers(); + (, uint256 amountThawingBefore, uint256 thawEndTimestampBefore) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawingBefore, amount); + + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.CancelThaw( + msgSender, + users.verifier, + users.indexer, + amountThawingBefore, + thawEndTimestampBefore + ); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, 0, true); + assertEq(amountThawing, 0); + + (, uint256 amountThawingAfter, uint256 thawEndTimestampAfter) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawingAfter, 0); + assertEq(thawEndTimestampAfter, 0); + } + + function testAdjustThaw_NoopWhenRequestedEqualsCurrentThawing(uint256 amount) public useGateway useDeposit(amount) { + escrow.thaw(users.verifier, users.indexer, amount); + + (, address msgSender, ) = vm.readCallers(); + (, uint256 amountThawingBefore, uint256 thawEndTimestampBefore) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, amount, true); + assertEq(amountThawing, amount); + + (, uint256 amountThawingAfter, uint256 thawEndTimestampAfter) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawingAfter, amountThawingBefore); + assertEq(thawEndTimestampAfter, thawEndTimestampBefore); + } + + function testAdjustThaw_PreservesTimerOnDecrease(uint256 amount) public useGateway { + amount = bound(amount, 3, type(uint256).max - 10); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 2 - 1) / 2; + uint256 secondAmountToThaw = (amount + 10 - 1) / 10; + + (, address msgSender, ) = vm.readCallers(); + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + + vm.warp(block.timestamp + 1 hours); + + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, secondAmountToThaw, expectedThawEnd); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, secondAmountToThaw, true); + assertEq(amountThawing, secondAmountToThaw); + + (, uint256 storedThawing, uint256 thawEndTimestamp) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(storedThawing, secondAmountToThaw); + assertEq(thawEndTimestamp, expectedThawEnd, "Timer should be preserved on decrease"); + } + + /* + * adjustThaw evenIfTimerReset = false tests + */ + + function testAdjustThaw_EvenIfTimerResetFalse_ProceedsWithNewThaw( + uint256 amount + ) public useGateway useDeposit(amount) { + (, address msgSender, ) = vm.readCallers(); + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, amount, expectedThawEnd); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, amount, false); + assertEq(amountThawing, amount); + } + + function testAdjustThaw_EvenIfTimerResetFalse_ProceedsWithDecrease(uint256 amount) public useGateway { + amount = bound(amount, 10, MAX_STAKING_TOKENS); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 2 - 1) / 2; + uint256 secondAmountToThaw = (amount + 10 - 1) / 10; + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + + vm.warp(block.timestamp + 1 hours); + + (, address msgSender, ) = vm.readCallers(); + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, secondAmountToThaw, expectedThawEnd); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, secondAmountToThaw, false); + assertEq(amountThawing, secondAmountToThaw); + + (, , uint256 thawEndTimestamp) = escrow.escrowAccounts(msgSender, users.verifier, users.indexer); + assertEq(thawEndTimestamp, expectedThawEnd, "Timer should be preserved on decrease"); + } + + function testAdjustThaw_EvenIfTimerResetFalse_SkipsIncreaseWhenTimerWouldReset(uint256 amount) public useGateway { + amount = bound(amount, 10, MAX_STAKING_TOKENS); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 10 - 1) / 10; + uint256 secondAmountToThaw = (amount + 2 - 1) / 2; + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + uint256 originalThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + + vm.warp(block.timestamp + 1 hours); + + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, secondAmountToThaw, false); + assertEq(amountThawing, firstAmountToThaw, "Should return current thawing, not new amount"); + + (, address msgSender, ) = vm.readCallers(); + (, uint256 storedThawing, uint256 thawEndTimestamp) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(storedThawing, firstAmountToThaw); + assertEq(thawEndTimestamp, originalThawEnd, "Timer should remain unchanged"); + } + + function testAdjustThaw_EvenIfTimerResetFalse_ProceedsWhenTimerUnchanged(uint256 amount) public useGateway { + amount = bound(amount, 10, MAX_STAKING_TOKENS); + _depositTokens(users.verifier, users.indexer, amount); + + uint256 firstAmountToThaw = (amount + 10 - 1) / 10; + uint256 secondAmountToThaw = (amount + 2 - 1) / 2; + + escrow.thaw(users.verifier, users.indexer, firstAmountToThaw); + + (, address msgSender, ) = vm.readCallers(); + uint256 expectedThawEnd = block.timestamp + WITHDRAW_ESCROW_THAWING_PERIOD; + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.Thaw(msgSender, users.verifier, users.indexer, secondAmountToThaw, expectedThawEnd); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, secondAmountToThaw, false); + assertEq(amountThawing, secondAmountToThaw, "Should proceed when timer unchanged"); + } + + function testAdjustThaw_EvenIfTimerResetFalse_CancelsThawing(uint256 amount) public useGateway useDeposit(amount) { + escrow.thaw(users.verifier, users.indexer, amount); + + (, address msgSender, ) = vm.readCallers(); + (, uint256 amountThawingBefore, uint256 thawEndTimestampBefore) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + vm.expectEmit(address(escrow)); + emit IPaymentsEscrow.CancelThaw( + msgSender, + users.verifier, + users.indexer, + amountThawingBefore, + thawEndTimestampBefore + ); + uint256 amountThawing = escrow.adjustThaw(users.verifier, users.indexer, 0, false); + assertEq(amountThawing, 0); + + (, uint256 amountThawingAfter, uint256 thawEndTimestampAfter) = escrow.escrowAccounts( + msgSender, + users.verifier, + users.indexer + ); + assertEq(amountThawingAfter, 0); + assertEq(thawEndTimestampAfter, 0); + } } diff --git a/packages/horizon/test/unit/escrow/withdraw.t.sol b/packages/horizon/test/unit/escrow/withdraw.t.sol index bcc116fd1..5f33c11f6 100644 --- a/packages/horizon/test/unit/escrow/withdraw.t.sol +++ b/packages/horizon/test/unit/escrow/withdraw.t.sol @@ -1,7 +1,8 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { GraphEscrowTest } from "./GraphEscrow.t.sol"; contract GraphEscrowWithdrawTest is GraphEscrowTest { @@ -39,6 +40,23 @@ contract GraphEscrowWithdrawTest is GraphEscrowTest { escrow.withdraw(users.verifier, users.indexer); } + function testWithdraw_RevertWhen_AtExactThawEndTimestamp( + uint256 amount, + uint256 thawAmount + ) public useGateway depositAndThawTokens(amount, thawAmount) { + // Advance time to exactly the thaw end timestamp (boundary: thawEndTimestamp < block.timestamp required) + skip(WITHDRAW_ESCROW_THAWING_PERIOD); + + (, , uint256 thawEndTimestamp) = escrow.escrowAccounts(users.gateway, users.verifier, users.indexer); + bytes memory expectedError = abi.encodeWithSignature( + "PaymentsEscrowStillThawing(uint256,uint256)", + block.timestamp, + thawEndTimestamp + ); + vm.expectRevert(expectedError); + escrow.withdraw(users.verifier, users.indexer); + } + function testWithdraw_SucceedsOneSecondAfterThawEnd( uint256 amount, uint256 thawAmount @@ -55,7 +73,7 @@ contract GraphEscrowWithdrawTest is GraphEscrowTest { uint256 amountCollected ) public useGateway depositAndThawTokens(amountDeposited, amountThawed) { vm.assume(amountCollected > 0); - vm.assume(amountCollected < amountDeposited); + vm.assume(amountCollected <= amountDeposited); // burn some tokens to prevent overflow resetPrank(users.indexer); @@ -76,8 +94,15 @@ contract GraphEscrowWithdrawTest is GraphEscrowTest { // Advance time to simulate the thawing period skip(WITHDRAW_ESCROW_THAWING_PERIOD + 1); - // withdraw the remaining thawed balance + // After collect, tokensThawing is capped at remaining balance. + // Withdraw succeeds if tokens remain, otherwise reverts. resetPrank(users.gateway); - _withdrawEscrow(users.verifier, users.indexer); + (, uint256 tokensThawing, ) = escrow.escrowAccounts(users.gateway, users.verifier, users.indexer); + if (tokensThawing != 0) { + _withdrawEscrow(users.verifier, users.indexer); + } else { + vm.expectRevert(abi.encodeWithSelector(IPaymentsEscrow.PaymentsEscrowNotThawing.selector)); + escrow.withdraw(users.verifier, users.indexer); + } } } diff --git a/packages/horizon/test/unit/libraries/LinkedList.t.sol b/packages/horizon/test/unit/libraries/LinkedList.t.sol index bdf902edf..e55469d25 100644 --- a/packages/horizon/test/unit/libraries/LinkedList.t.sol +++ b/packages/horizon/test/unit/libraries/LinkedList.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { LinkedList } from "../../../contracts/libraries/LinkedList.sol"; diff --git a/packages/horizon/test/unit/libraries/ListImplementation.sol b/packages/horizon/test/unit/libraries/ListImplementation.sol index dad859f59..72577a4d7 100644 --- a/packages/horizon/test/unit/libraries/ListImplementation.sol +++ b/packages/horizon/test/unit/libraries/ListImplementation.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; import { LinkedList } from "../../../contracts/libraries/LinkedList.sol"; diff --git a/packages/horizon/test/unit/libraries/PPMMath.t.sol b/packages/horizon/test/unit/libraries/PPMMath.t.sol index c760cab06..bed8438a1 100644 --- a/packages/horizon/test/unit/libraries/PPMMath.t.sol +++ b/packages/horizon/test/unit/libraries/PPMMath.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { PPMMath } from "../../../contracts/libraries/PPMMath.sol"; diff --git a/packages/horizon/test/unit/libraries/StakeClaims.t.sol b/packages/horizon/test/unit/libraries/StakeClaims.t.sol new file mode 100644 index 000000000..90d65e567 --- /dev/null +++ b/packages/horizon/test/unit/libraries/StakeClaims.t.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { StakeClaims } from "../../../contracts/data-service/libraries/StakeClaims.sol"; + +contract StakeClaimsTest is Test { + /* solhint-disable graph/func-name-mixedcase */ + + function test_BuildStakeClaimId(address dataService, address serviceProvider, uint256 nonce) public pure { + bytes32 id = StakeClaims.buildStakeClaimId(dataService, serviceProvider, nonce); + bytes32 expectedId = keccak256(abi.encodePacked(dataService, serviceProvider, nonce)); + assertEq(id, expectedId, "StakeClaim ID does not match expected value"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/mocks/HorizonStakingMock.t.sol b/packages/horizon/test/unit/mocks/HorizonStakingMock.t.sol new file mode 100644 index 000000000..995442388 --- /dev/null +++ b/packages/horizon/test/unit/mocks/HorizonStakingMock.t.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; + +contract HorizonStakingMock { + mapping(address => mapping(address => IHorizonStakingTypes.Provision)) public provisions; + mapping(address => mapping(address => mapping(address => bool))) public authorizations; + + function setProvision( + address serviceProvider, + address verifier, + IHorizonStakingTypes.Provision memory provision + ) external { + provisions[serviceProvider][verifier] = provision; + } + + function getProvision( + address serviceProvider, + address verifier + ) external view returns (IHorizonStakingTypes.Provision memory) { + return provisions[serviceProvider][verifier]; + } + + function isAuthorized(address serviceProvider, address verifier, address operator) external view returns (bool) { + return authorizations[serviceProvider][verifier][operator]; + } + + function setIsAuthorized(address serviceProvider, address verifier, address operator, bool authorized) external { + authorizations[serviceProvider][verifier][operator] = authorized; + } + + function getProviderTokensAvailable(address serviceProvider, address verifier) external view returns (uint256) { + IHorizonStakingTypes.Provision memory provision = provisions[serviceProvider][verifier]; + return provision.tokens - provision.tokensThawing; + } +} diff --git a/packages/horizon/test/unit/mocks/InvalidControllerMock.t.sol b/packages/horizon/test/unit/mocks/InvalidControllerMock.t.sol new file mode 100644 index 000000000..8005c7a01 --- /dev/null +++ b/packages/horizon/test/unit/mocks/InvalidControllerMock.t.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { PartialControllerMock } from "./PartialControllerMock.t.sol"; + +contract InvalidControllerMock is PartialControllerMock { + constructor() PartialControllerMock(new PartialControllerMock.Entry[](0)) {} +} diff --git a/packages/horizon/test/unit/mocks/PartialControllerMock.t.sol b/packages/horizon/test/unit/mocks/PartialControllerMock.t.sol new file mode 100644 index 000000000..946ec46a2 --- /dev/null +++ b/packages/horizon/test/unit/mocks/PartialControllerMock.t.sol @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { ControllerMock } from "../../../contracts/mocks/ControllerMock.sol"; + +contract PartialControllerMock is ControllerMock, Test { + struct Entry { + string name; + address addr; + } + + address private _invalidContractAddress; + + Entry[] private _contracts; + + constructor(Entry[] memory contracts) ControllerMock(address(0)) { + for (uint256 i = 0; i < contracts.length; i++) { + _contracts.push(Entry({ name: contracts[i].name, addr: contracts[i].addr })); + } + _invalidContractAddress = makeAddr("invalidContractAddress"); + } + + function getContractProxy(bytes32 data) external view override returns (address) { + for (uint256 i = 0; i < _contracts.length; i++) { + if (keccak256(abi.encodePacked(_contracts[i].name)) == data) { + return _contracts[i].addr; + } + } + return _invalidContractAddress; + } +} diff --git a/packages/horizon/test/unit/payments/GraphPayments.t.sol b/packages/horizon/test/unit/payments/GraphPayments.t.sol index 62d739ba3..d4bf17153 100644 --- a/packages/horizon/test/unit/payments/GraphPayments.t.sol +++ b/packages/horizon/test/unit/payments/GraphPayments.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/GraphTallyCollector.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/GraphTallyCollector.t.sol index b8e569574..4b05992f3 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/GraphTallyCollector.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/GraphTallyCollector.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { MessageHashUtils } from "@openzeppelin/contracts/utils/cryptography/MessageHashUtils.sol"; import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; @@ -42,7 +42,7 @@ contract GraphTallyTest is HorizonStakingSharedTest, PaymentsEscrowSharedTest { * HELPERS */ - function _getSignerProof(uint256 _proofDeadline, uint256 _signer) internal view returns (bytes memory) { + function _getSignerProof(uint256 _proofDeadline, uint256 _signer) internal returns (bytes memory) { (, address msgSender, ) = vm.readCallers(); bytes32 messageHash = keccak256( abi.encodePacked( diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/collect/collect.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/collect/collect.t.sol index 2c15a930d..e9c25d6cc 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/collect/collect.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/collect/collect.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/signer/authorizeSigner.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/signer/authorizeSigner.t.sol index cbc3f2960..948a9a1c2 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/signer/authorizeSigner.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/signer/authorizeSigner.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/signer/cancelThawSigner.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/signer/cancelThawSigner.t.sol index d117cfb95..b3b1cbeb6 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/signer/cancelThawSigner.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/signer/cancelThawSigner.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/signer/revokeSigner.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/signer/revokeSigner.t.sol index 5d987cb9c..6e6b92dfb 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/signer/revokeSigner.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/signer/revokeSigner.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; diff --git a/packages/horizon/test/unit/payments/graph-tally-collector/signer/thawSigner.t.sol b/packages/horizon/test/unit/payments/graph-tally-collector/signer/thawSigner.t.sol index 781551f61..bf6269ee6 100644 --- a/packages/horizon/test/unit/payments/graph-tally-collector/signer/thawSigner.t.sol +++ b/packages/horizon/test/unit/payments/graph-tally-collector/signer/thawSigner.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; diff --git a/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol b/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol new file mode 100644 index 000000000..2f6324957 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; + +/// @notice Minimal contract payer that implements IAgreementOwner but NOT IERC165. +/// Calling supportsInterface on this contract will revert (no such function), +/// exercising the catch {} fallthrough in RecurringCollector's eligibility gate. +contract BareAgreementOwner is IAgreementOwner { + mapping(bytes32 => bool) public authorizedHashes; + + function authorize(bytes32 agreementHash) external { + authorizedHashes[agreementHash] = true; + } + + function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { + if (!authorizedHashes[agreementHash]) return bytes4(0); + return IAgreementOwner.approveAgreement.selector; + } + + function beforeCollection(bytes16, uint256) external override {} + + function afterCollection(bytes16, uint256) external override {} +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol b/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol new file mode 100644 index 000000000..614dab81a --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; + +/// @notice Mock contract approver for testing acceptUnsigned and updateUnsigned. +/// Can be configured to return valid selector, wrong value, or revert. +/// Optionally supports IERC165 + IProviderEligibility for eligibility gate testing. +contract MockAgreementOwner is IAgreementOwner, IERC165, IProviderEligibility { + mapping(bytes32 => bool) public authorizedHashes; + bool public shouldRevert; + bytes4 public overrideReturnValue; + bool public useOverride; + + // -- Eligibility configuration -- + bool public eligibilityEnabled; + mapping(address => bool) public eligibleProviders; + bool public defaultEligible; + + function authorize(bytes32 agreementHash) external { + authorizedHashes[agreementHash] = true; + } + + function setShouldRevert(bool _shouldRevert) external { + shouldRevert = _shouldRevert; + } + + function setOverrideReturnValue(bytes4 _value) external { + overrideReturnValue = _value; + useOverride = true; + } + + function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { + if (shouldRevert) { + revert("MockAgreementOwner: forced revert"); + } + if (useOverride) { + return overrideReturnValue; + } + if (!authorizedHashes[agreementHash]) { + return bytes4(0); + } + return IAgreementOwner.approveAgreement.selector; + } + + bytes16 public lastBeforeCollectionAgreementId; + uint256 public lastBeforeCollectionTokens; + bool public shouldRevertOnBeforeCollection; + + function setShouldRevertOnBeforeCollection(bool _shouldRevert) external { + shouldRevertOnBeforeCollection = _shouldRevert; + } + + function beforeCollection(bytes16 agreementId, uint256 tokensToCollect) external override { + if (shouldRevertOnBeforeCollection) { + revert("MockAgreementOwner: forced revert on beforeCollection"); + } + lastBeforeCollectionAgreementId = agreementId; + lastBeforeCollectionTokens = tokensToCollect; + } + + bytes16 public lastCollectedAgreementId; + uint256 public lastCollectedTokens; + bool public shouldRevertOnCollected; + + function setShouldRevertOnCollected(bool _shouldRevert) external { + shouldRevertOnCollected = _shouldRevert; + } + + function afterCollection(bytes16 agreementId, uint256 tokensCollected) external override { + if (shouldRevertOnCollected) { + revert("MockAgreementOwner: forced revert on afterCollection"); + } + lastCollectedAgreementId = agreementId; + lastCollectedTokens = tokensCollected; + } + + // -- ERC165 + IProviderEligibility -- + + /// @notice Enable ERC165 reporting of IProviderEligibility support + function setEligibilityEnabled(bool _enabled) external { + eligibilityEnabled = _enabled; + } + + /// @notice Set whether a specific provider is eligible + function setProviderEligible(address provider, bool _eligible) external { + eligibleProviders[provider] = _eligible; + } + + /// @notice Set default eligibility for providers not explicitly configured + function setDefaultEligible(bool _eligible) external { + defaultEligible = _eligible; + } + + function supportsInterface(bytes4 interfaceId) external view override returns (bool) { + if (interfaceId == type(IERC165).interfaceId) return true; + if (interfaceId == type(IProviderEligibility).interfaceId) return eligibilityEnabled; + return false; + } + + function isEligible(address indexer) external view override returns (bool) { + if (eligibleProviders[indexer]) return true; + return defaultEligible; + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/PaymentsEscrowMock.t.sol b/packages/horizon/test/unit/payments/recurring-collector/PaymentsEscrowMock.t.sol new file mode 100644 index 000000000..96a1f217f --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/PaymentsEscrowMock.t.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; + +contract PaymentsEscrowMock is IPaymentsEscrow { + function initialize() external {} + + function collect(IGraphPayments.PaymentTypes, address, address, uint256, address, uint256, address) external {} + + function deposit(address, address, uint256) external {} + + function depositTo(address, address, address, uint256) external {} + + function thaw(address, address, uint256) external {} + + function adjustThaw(address, address, uint256, bool /* evenIfTimerReset */) external pure returns (uint256) { + return 0; + } + + function cancelThaw(address, address) external {} + + function withdraw(address, address) external {} + + function getBalance(address, address, address) external pure returns (uint256) { + return 0; + } + + function escrowAccounts(address, address, address) external pure returns (uint256, uint256, uint256) { + return (0, 0, 0); + } + + function MAX_WAIT_PERIOD() external pure returns (uint256) { + return 0; + } + + function WITHDRAW_ESCROW_THAWING_PERIOD() external pure returns (uint256) { + return 0; + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol new file mode 100644 index 000000000..b4d109678 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; +import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; + +import { AuthorizableTest } from "../../../unit/utilities/Authorizable.t.sol"; +import { InvalidControllerMock } from "../../mocks/InvalidControllerMock.t.sol"; + +contract RecurringCollectorAuthorizableTest is AuthorizableTest { + function newAuthorizable(uint256 thawPeriod) public override returns (IAuthorizable) { + return new RecurringCollector("RecurringCollector", "1", address(new InvalidControllerMock()), thawPeriod); + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol new file mode 100644 index 000000000..9a01754aa --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol @@ -0,0 +1,188 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; +import { AuthorizableHelper } from "../../../unit/utilities/Authorizable.t.sol"; +import { Bounder } from "../../../unit/utils/Bounder.t.sol"; + +contract RecurringCollectorHelper is AuthorizableHelper, Bounder { + RecurringCollector public collector; + + constructor( + RecurringCollector collector_ + ) AuthorizableHelper(collector_, collector_.REVOKE_AUTHORIZATION_THAWING_PERIOD()) { + collector = collector_; + } + + function generateSignedRCA( + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint256 signerPrivateKey + ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory) { + bytes32 messageHash = collector.hashRCA(rca); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKey, messageHash); + bytes memory signature = abi.encodePacked(r, s, v); + + return (rca, signature); + } + + function generateSignedRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + uint256 signerPrivateKey + ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { + bytes32 messageHash = collector.hashRCAU(rcau); + (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKey, messageHash); + bytes memory signature = abi.encodePacked(r, s, v); + + return (rcau, signature); + } + + function generateSignedRCAUForAgreement( + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + uint256 signerPrivateKey + ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { + // Automatically set the correct nonce based on current agreement state + IRecurringCollector.AgreementData memory agreement = collector.getAgreement(agreementId); + rcau.nonce = agreement.updateNonce + 1; + + return generateSignedRCAU(rcau, signerPrivateKey); + } + + function generateSignedRCAUWithCorrectNonce( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + uint256 signerPrivateKey + ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { + // This is kept for backwards compatibility but should not be used with new interface + // since we can't determine agreementId without it being passed separately + return generateSignedRCAU(rcau, signerPrivateKey); + } + + function generateSignedRCAWithCalculatedId( + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint256 signerPrivateKey + ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory, bytes16) { + // Ensure we have sensible values + rca = sensibleRCA(rca); + + // Calculate the agreement ID + bytes16 agreementId = collector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + (IRecurringCollector.RecurringCollectionAgreement memory signedRca, bytes memory signature) = generateSignedRCA( + rca, + signerPrivateKey + ); + return (signedRca, signature, agreementId); + } + + function withElapsedAcceptDeadline( + IRecurringCollector.RecurringCollectionAgreement memory rca + ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + require(block.timestamp > 0, "block.timestamp can't be zero"); + require(block.timestamp <= type(uint64).max, "block.timestamp can't be huge"); + rca.deadline = uint64(bound(rca.deadline, 0, block.timestamp - 1)); + return rca; + } + + function withOKAcceptDeadline( + IRecurringCollector.RecurringCollectionAgreement memory rca + ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + require(block.timestamp <= type(uint64).max, "block.timestamp can't be huge"); + rca.deadline = uint64(boundTimestampMin(rca.deadline, block.timestamp)); + return rca; + } + + function sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement memory rca + ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + vm.assume(rca.dataService != address(0)); + vm.assume(rca.payer != address(0)); + vm.assume(rca.serviceProvider != address(0)); + + // Ensure we have a nonce if it's zero + if (rca.nonce == 0) { + rca.nonce = 1; + } + + rca.minSecondsPerCollection = _sensibleMinSecondsPerCollection(rca.minSecondsPerCollection); + rca.maxSecondsPerCollection = _sensibleMaxSecondsPerCollection( + rca.maxSecondsPerCollection, + rca.minSecondsPerCollection + ); + + rca.deadline = _sensibleDeadline(rca.deadline); + rca.endsAt = _sensibleEndsAt(rca.endsAt, rca.maxSecondsPerCollection); + + rca.maxInitialTokens = _sensibleMaxInitialTokens(rca.maxInitialTokens); + rca.maxOngoingTokensPerSecond = _sensibleMaxOngoingTokensPerSecond(rca.maxOngoingTokensPerSecond); + + return rca; + } + + function sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau + ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + rcau.minSecondsPerCollection = _sensibleMinSecondsPerCollection(rcau.minSecondsPerCollection); + rcau.maxSecondsPerCollection = _sensibleMaxSecondsPerCollection( + rcau.maxSecondsPerCollection, + rcau.minSecondsPerCollection + ); + + rcau.deadline = _sensibleDeadline(rcau.deadline); + rcau.endsAt = _sensibleEndsAt(rcau.endsAt, rcau.maxSecondsPerCollection); + rcau.maxInitialTokens = _sensibleMaxInitialTokens(rcau.maxInitialTokens); + rcau.maxOngoingTokensPerSecond = _sensibleMaxOngoingTokensPerSecond(rcau.maxOngoingTokensPerSecond); + + return rcau; + } + + function _sensibleDeadline(uint256 _seed) internal view returns (uint64) { + return + uint64( + bound(_seed, block.timestamp + 1, block.timestamp + uint256(collector.MIN_SECONDS_COLLECTION_WINDOW())) + ); // between now and +MIN_SECONDS_COLLECTION_WINDOW + } + + function _sensibleEndsAt(uint256 _seed, uint32 _maxSecondsPerCollection) internal view returns (uint64) { + return + uint64( + bound( + _seed, + block.timestamp + (10 * uint256(_maxSecondsPerCollection)), + block.timestamp + (1_000_000 * uint256(_maxSecondsPerCollection)) + ) + ); // between 10 and 1M max collections + } + + function _sensibleMaxSecondsPerCollection( + uint32 _seed, + uint32 _minSecondsPerCollection + ) internal view returns (uint32) { + return + uint32( + bound( + _seed, + _minSecondsPerCollection + uint256(collector.MIN_SECONDS_COLLECTION_WINDOW()), + 60 * 60 * 24 * 30 + ) // between minSecondsPerCollection + 2h and 30 days + ); + } + + function _sensibleMaxInitialTokens(uint256 _seed) internal pure returns (uint256) { + return bound(_seed, 0, 1e18 * 100_000_000); // between 0 and 100M tokens + } + + function _sensibleMaxOngoingTokensPerSecond(uint256 _seed) internal pure returns (uint256) { + return bound(_seed, 1, 1e18); // between 1 and 1e18 tokens per second + } + + function _sensibleMinSecondsPerCollection(uint32 _seed) internal pure returns (uint32) { + return uint32(bound(_seed, 10 * 60, 24 * 60 * 60)); // between 10 min and 24h + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol b/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol new file mode 100644 index 000000000..8404db85e --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorAcceptTest is RecurringCollectorSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + + function test_Accept(FuzzyTestAccept calldata fuzzyTestAccept) public { + _sensibleAuthorizeAndAccept(fuzzyTestAccept); + } + + function test_Accept_Revert_WhenAcceptanceDeadlineElapsed( + IRecurringCollector.RecurringCollectionAgreement memory fuzzyRCA, + bytes memory fuzzySignature, + uint256 unboundedSkip + ) public { + // Ensure non-empty signature so the signed path is taken (which checks deadline first) + vm.assume(fuzzySignature.length > 0); + // Generate deterministic agreement ID for validation + bytes16 agreementId = _recurringCollector.generateAgreementId( + fuzzyRCA.payer, + fuzzyRCA.dataService, + fuzzyRCA.serviceProvider, + fuzzyRCA.deadline, + fuzzyRCA.nonce + ); + vm.assume(agreementId != bytes16(0)); + skip(boundSkip(unboundedSkip, 1, type(uint64).max - block.timestamp)); + fuzzyRCA = _recurringCollectorHelper.withElapsedAcceptDeadline(fuzzyRCA); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + block.timestamp, + fuzzyRCA.deadline + ); + vm.expectRevert(expectedErr); + vm.prank(fuzzyRCA.dataService); + _recurringCollector.accept(fuzzyRCA, fuzzySignature); + } + + function test_Accept_Revert_WhenAlreadyAccepted(FuzzyTestAccept calldata fuzzyTestAccept) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes memory signature, + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + agreementId, + IRecurringCollector.AgreementState.Accepted + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.accept(acceptedRca, signature); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol b/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol new file mode 100644 index 000000000..153b69141 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +contract RecurringCollectorAcceptUnsignedTest is RecurringCollectorSharedTest { + function _newApprover() internal returns (MockAgreementOwner) { + return new MockAgreementOwner(); + } + + function _makeSimpleRCA(address payer) internal returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return + _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + } + + /* solhint-disable graph/func-name-mixedcase */ + + function test_AcceptUnsigned(FuzzyTestAccept calldata fuzzyTestAccept) public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + fuzzyTestAccept.rca + ); + rca.payer = address(approver); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + bytes16 expectedId = _recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementAccepted( + rca.dataService, + rca.payer, + rca.serviceProvider, + expectedId, + uint64(block.timestamp), + rca.endsAt, + rca.maxInitialTokens, + rca.maxOngoingTokensPerSecond, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection + ); + + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, ""); + + assertEq(agreementId, expectedId); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + assertEq(uint8(agreement.state), uint8(IRecurringCollector.AgreementState.Accepted)); + assertEq(agreement.payer, address(approver)); + assertEq(agreement.serviceProvider, rca.serviceProvider); + assertEq(agreement.dataService, rca.dataService); + } + + function test_AcceptUnsigned_Revert_WhenPayerNotContract() public { + address eoa = makeAddr("eoa"); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(eoa); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, eoa) + ); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenHashNotAuthorized() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + // Don't authorize the hash + vm.expectRevert(); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenWrongMagicValue() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + approver.setOverrideReturnValue(bytes4(0xdeadbeef)); + + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.RecurringCollectorInvalidSigner.selector)); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenNotDataService() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + + address notDataService = makeAddr("notDataService"); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorUnauthorizedCaller.selector, + notDataService, + rca.dataService + ) + ); + vm.prank(notDataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenAlreadyAccepted(FuzzyTestAccept calldata fuzzyTestAccept) public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + fuzzyTestAccept.rca + ); + rca.payer = address(approver); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, ""); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + agreementId, + IRecurringCollector.AgreementState.Accepted + ); + vm.expectRevert(expectedErr); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenApproverReverts() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + approver.setShouldRevert(true); + + vm.expectRevert("MockAgreementOwner: forced revert"); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + function test_AcceptUnsigned_Revert_WhenDeadlineElapsed() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + + // Advance time past the deadline + vm.warp(rca.deadline + 1); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + block.timestamp, + rca.deadline + ); + vm.expectRevert(expectedErr); + vm.prank(rca.dataService); + _recurringCollector.accept(rca, ""); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol b/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol new file mode 100644 index 000000000..c84958daf --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +/// @notice Tests for IAgreementOwner.beforeCollection and .afterCollection in RecurringCollector._collect() +contract RecurringCollectorAfterCollectionTest is RecurringCollectorSharedTest { + function _newApprover() internal returns (MockAgreementOwner) { + return new MockAgreementOwner(); + } + + function _acceptUnsignedAgreement( + MockAgreementOwner approver + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.prank(rca.dataService); + agreementId = _recurringCollector.accept(rca, ""); + } + + /* solhint-disable graph/func-name-mixedcase */ + + function test_BeforeCollection_CallbackInvoked() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + + // beforeCollection should have been called with the tokens about to be collected + assertEq(approver.lastBeforeCollectionAgreementId(), agreementId); + assertEq(approver.lastBeforeCollectionTokens(), tokens); + } + + function test_BeforeCollection_CollectionSucceedsWhenCallbackReverts() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + approver.setShouldRevertOnBeforeCollection(true); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + // Collection should still succeed despite beforeCollection reverting + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens); + + // beforeCollection state not updated (it reverted), but afterCollection still runs + assertEq(approver.lastBeforeCollectionAgreementId(), bytes16(0)); + assertEq(approver.lastCollectedAgreementId(), agreementId); + } + + function test_AfterCollection_CallbackInvoked() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // Skip past minSecondsPerCollection and collect + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + + // Verify callback was invoked with correct parameters + assertEq(approver.lastCollectedAgreementId(), agreementId); + assertEq(approver.lastCollectedTokens(), tokens); + } + + function test_AfterCollection_CollectionSucceedsWhenCallbackReverts() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // Configure callback to revert + approver.setShouldRevertOnCollected(true); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + // Collection should still succeed despite callback reverting + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens); + + // Callback state should not have been updated (it reverted) + assertEq(approver.lastCollectedAgreementId(), bytes16(0)); + assertEq(approver.lastCollectedTokens(), 0); + } + + function test_AfterCollection_NotCalledForEOAPayer(FuzzyTestCollect calldata fuzzy) public { + // Use standard ECDSA-signed path (EOA payer, no contract) + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, , , ) = _sensibleAuthorizeAndAccept( + fuzzy.fuzzyTestAccept + ); + + (bytes memory data, uint256 collectionSeconds, uint256 tokens) = _generateValidCollection( + acceptedRca, + fuzzy.collectParams, + fuzzy.collectParams.tokens, // reuse as skip seed + fuzzy.collectParams.tokens + ); + + skip(collectionSeconds); + // Should succeed without any callback issues (EOA has no code) + vm.prank(acceptedRca.dataService); + uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + assertEq(collected, tokens); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/base.t.sol b/packages/horizon/test/unit/payments/recurring-collector/base.t.sol new file mode 100644 index 000000000..c37ced83f --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/base.t.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorBaseTest is RecurringCollectorSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + + function test_RecoverRCASigner(FuzzyTestAccept memory fuzzyTestAccept) public view { + uint256 signerKey = boundKey(fuzzyTestAccept.unboundedSignerKey); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(fuzzyTestAccept.rca, signerKey); + + assertEq( + _recurringCollector.recoverRCASigner(rca, signature), + vm.addr(signerKey), + "Recovered RCA signer does not match" + ); + } + + function test_RecoverRCAUSigner(FuzzyTestUpdate memory fuzzyTestUpdate) public view { + uint256 signerKey = boundKey(fuzzyTestUpdate.fuzzyTestAccept.unboundedSignerKey); + ( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCAU(fuzzyTestUpdate.rcau, signerKey); + + assertEq( + _recurringCollector.recoverRCAUSigner(rcau, signature), + vm.addr(signerKey), + "Recovered RCAU signer does not match" + ); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol b/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol new file mode 100644 index 000000000..1ccb0ccc1 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorCancelTest is RecurringCollectorSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + + function test_Cancel(FuzzyTestAccept calldata fuzzyTestAccept, uint8 unboundedCanceler) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); + + _cancel(acceptedRca, agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + } + + function test_Cancel_Revert_WhenNotAccepted( + IRecurringCollector.RecurringCollectionAgreement memory fuzzyRCA, + uint8 unboundedCanceler + ) public { + // Generate deterministic agreement ID + bytes16 agreementId = _recurringCollector.generateAgreementId( + fuzzyRCA.payer, + fuzzyRCA.dataService, + fuzzyRCA.serviceProvider, + fuzzyRCA.deadline, + fuzzyRCA.nonce + ); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + agreementId, + IRecurringCollector.AgreementState.NotAccepted + ); + vm.expectRevert(expectedErr); + vm.prank(fuzzyRCA.dataService); + _recurringCollector.cancel(agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + } + + function test_Cancel_Revert_WhenNotDataService( + FuzzyTestAccept calldata fuzzyTestAccept, + uint8 unboundedCanceler, + address notDataService + ) public { + vm.assume(fuzzyTestAccept.rca.dataService != notDataService); + + (, , , bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, + agreementId, + notDataService + ); + vm.expectRevert(expectedErr); + vm.prank(notDataService); + _recurringCollector.cancel(agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + } + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol b/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol new file mode 100644 index 000000000..d19f5caed --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol @@ -0,0 +1,516 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + + function test_Collect_Revert_WhenInvalidData(address caller, uint8 unboundedPaymentType, bytes memory data) public { + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorInvalidCollectData.selector, + data + ); + vm.expectRevert(expectedErr); + vm.prank(caller); + _recurringCollector.collect(_paymentType(unboundedPaymentType), data); + } + + function test_Collect_Revert_WhenCallerNotDataService( + FuzzyTestCollect calldata fuzzy, + address notDataService + ) public { + vm.assume(fuzzy.fuzzyTestAccept.rca.dataService != notDataService); + + (, , , bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + IRecurringCollector.CollectParams memory collectParams = fuzzy.collectParams; + + skip(1); + collectParams.agreementId = agreementId; + bytes memory data = _generateCollectData(collectParams); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, + collectParams.agreementId, + notDataService + ); + vm.expectRevert(expectedErr); + vm.prank(notDataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + } + + function test_Collect_Revert_WhenUnauthorizedDataService(FuzzyTestCollect calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + IRecurringCollector.CollectParams memory collectParams = fuzzy.collectParams; + collectParams.agreementId = agreementId; + collectParams.tokens = bound(collectParams.tokens, 1, type(uint256).max); + bytes memory data = _generateCollectData(collectParams); + + skip(1); + + // Set up the scenario where service provider has no tokens staked with data service + // This simulates an unauthorized data service attack + _horizonStaking.setProvision( + acceptedRca.serviceProvider, + acceptedRca.dataService, + IHorizonStakingTypes.Provision({ + tokens: 0, // No tokens staked - this triggers the vulnerability + tokensThawing: 0, + sharesThawing: 0, + maxVerifierCut: 100000, + thawingPeriod: 604800, + createdAt: uint64(block.timestamp), + maxVerifierCutPending: 100000, + thawingPeriodPending: 604800, + lastParametersStagedAt: 0, + thawingNonce: 0 + }) + ); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorUnauthorizedDataService.selector, + acceptedRca.dataService + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + } + + function test_Collect_Revert_WhenUnknownAgreement(FuzzyTestCollect memory fuzzy, address dataService) public { + bytes memory data = _generateCollectData(fuzzy.collectParams); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementNotCollectable.selector, + fuzzy.collectParams.agreementId, + IRecurringCollector.AgreementNotCollectableReason.InvalidAgreementState + ); + vm.expectRevert(expectedErr); + vm.prank(dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + } + + function test_Collect_Revert_WhenCanceledAgreementByServiceProvider(FuzzyTestCollect calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + _cancel(acceptedRca, agreementId, IRecurringCollector.CancelAgreementBy.ServiceProvider); + IRecurringCollector.CollectParams memory collectData = fuzzy.collectParams; + collectData.tokens = bound(collectData.tokens, 1, type(uint256).max); + IRecurringCollector.CollectParams memory collectParams = _generateCollectParams( + acceptedRca, + agreementId, + collectData.collectionId, + collectData.tokens, + collectData.dataServiceCut + ); + bytes memory data = _generateCollectData(collectParams); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementNotCollectable.selector, + collectParams.agreementId, + IRecurringCollector.AgreementNotCollectableReason.InvalidAgreementState + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + } + + function test_Collect_Revert_WhenCollectingTooSoon( + FuzzyTestCollect calldata fuzzy, + uint256 unboundedCollectionSeconds + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + skip(acceptedRca.minSecondsPerCollection); + bytes memory data = _generateCollectData( + _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + 1, + fuzzy.collectParams.dataServiceCut + ) + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + + uint256 collectionSeconds = boundSkip(unboundedCollectionSeconds, 1, acceptedRca.minSecondsPerCollection - 1); + skip(collectionSeconds); + + IRecurringCollector.CollectParams memory collectParams = _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + bound(fuzzy.collectParams.tokens, 1, type(uint256).max), + fuzzy.collectParams.dataServiceCut + ); + data = _generateCollectData(collectParams); + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorCollectionTooSoon.selector, + collectParams.agreementId, + collectionSeconds, + acceptedRca.minSecondsPerCollection + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + } + + function test_Collect_OK_WhenCollectingPastMaxSeconds( + FuzzyTestCollect calldata fuzzy, + uint256 unboundedFirstCollectionSeconds, + uint256 unboundedSecondCollectionSeconds + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + // First valid collection to establish lastCollectionAt + skip( + boundSkip( + unboundedFirstCollectionSeconds, + acceptedRca.minSecondsPerCollection, + acceptedRca.maxSecondsPerCollection + ) + ); + bytes memory firstData = _generateCollectData( + _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + 1, + fuzzy.collectParams.dataServiceCut + ) + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), firstData); + + // Skip PAST maxSecondsPerCollection (but still within agreement endsAt) + uint256 collectionSeconds = boundSkip( + unboundedSecondCollectionSeconds, + acceptedRca.maxSecondsPerCollection + 1, + acceptedRca.endsAt - block.timestamp + ); + skip(collectionSeconds); + + // Request more tokens than the cap allows + uint256 cappedMaxTokens = acceptedRca.maxOngoingTokensPerSecond * acceptedRca.maxSecondsPerCollection; + uint256 requestedTokens = cappedMaxTokens + 1; + + IRecurringCollector.CollectParams memory collectParams = _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + requestedTokens, + fuzzy.collectParams.dataServiceCut + ); + bytes memory data = _generateCollectData(collectParams); + + // Collection should SUCCEED with tokens capped at maxSecondsPerCollection worth + _expectCollectCallAndEmit( + acceptedRca, + agreementId, + _paymentType(fuzzy.unboundedPaymentType), + collectParams, + cappedMaxTokens + ); + vm.prank(acceptedRca.dataService); + uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + assertEq(collected, cappedMaxTokens, "Tokens should be capped at maxSecondsPerCollection worth"); + } + + function test_Collect_OK_WhenCollectingTooMuch( + FuzzyTestCollect calldata fuzzy, + uint256 unboundedInitialCollectionSeconds, + uint256 unboundedCollectionSeconds, + uint256 unboundedTokens, + bool testInitialCollection + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + if (!testInitialCollection) { + // skip to collectable time + skip( + boundSkip( + unboundedInitialCollectionSeconds, + acceptedRca.minSecondsPerCollection, + acceptedRca.maxSecondsPerCollection + ) + ); + bytes memory initialData = _generateCollectData( + _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + 1, + fuzzy.collectParams.dataServiceCut + ) + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), initialData); + } + + // skip to collectable time + uint256 collectionSeconds = boundSkip( + unboundedCollectionSeconds, + acceptedRca.minSecondsPerCollection, + acceptedRca.maxSecondsPerCollection + ); + skip(collectionSeconds); + uint256 maxTokens = acceptedRca.maxOngoingTokensPerSecond * collectionSeconds; + maxTokens += testInitialCollection ? acceptedRca.maxInitialTokens : 0; + uint256 tokens = bound(unboundedTokens, maxTokens + 1, type(uint256).max); + IRecurringCollector.CollectParams memory collectParams = _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + tokens, + fuzzy.collectParams.dataServiceCut + ); + bytes memory data = _generateCollectData(collectParams); + vm.prank(acceptedRca.dataService); + uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + assertEq(collected, maxTokens); + } + + function test_Collect_OK( + FuzzyTestCollect calldata fuzzy, + uint256 unboundedCollectionSeconds, + uint256 unboundedTokens + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + (bytes memory data, uint256 collectionSeconds, uint256 tokens) = _generateValidCollection( + acceptedRca, + fuzzy.collectParams, + unboundedCollectionSeconds, + unboundedTokens + ); + + skip(collectionSeconds); + _expectCollectCallAndEmit( + acceptedRca, + agreementId, + _paymentType(fuzzy.unboundedPaymentType), + fuzzy.collectParams, + tokens + ); + vm.prank(acceptedRca.dataService); + uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + assertEq(collected, tokens); + } + + function test_Collect_RevertWhen_ExceedsMaxSlippage() public { + // Setup: Create agreement with known parameters + IRecurringCollector.RecurringCollectionAgreement memory rca; + rca.deadline = uint64(block.timestamp + 1000); + rca.endsAt = uint64(block.timestamp + 2000); + rca.payer = address(0x123); + rca.dataService = address(0x456); + rca.serviceProvider = address(0x789); + rca.maxInitialTokens = 0; // No initial tokens to keep calculation simple + rca.maxOngoingTokensPerSecond = 1 ether; // 1 token per second + rca.minSecondsPerCollection = 60; // 1 minute + rca.maxSecondsPerCollection = 3600; // 1 hour + rca.nonce = 1; + rca.metadata = ""; + + // Accept the agreement + _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, 1); + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, 1); + bytes16 agreementId = _accept(rca, signature); + + // Do a first collection to use up initial tokens allowance + skip(rca.minSecondsPerCollection); + IRecurringCollector.CollectParams memory firstCollection = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("first"), + tokens: 1 ether, // Small amount + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, _generateCollectData(firstCollection)); + + // Wait minimum collection time again for second collection + skip(rca.minSecondsPerCollection); + + // Calculate expected narrowing: max allowed is 60 tokens (60 seconds * 1 token/second) + uint256 maxAllowed = rca.maxOngoingTokensPerSecond * rca.minSecondsPerCollection; // 60 tokens + uint256 requested = maxAllowed + 50 ether; // Request 110 tokens + uint256 expectedSlippage = requested - maxAllowed; // 50 tokens + uint256 maxSlippage = expectedSlippage - 1; // Allow up to 49 tokens slippage + + // Create collect params with slippage protection + IRecurringCollector.CollectParams memory collectParams = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("test"), + tokens: requested, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: maxSlippage + }); + + bytes memory data = _generateCollectData(collectParams); + + // Expect revert due to excessive slippage (50 > 49) + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorExcessiveSlippage.selector, + requested, + maxAllowed, + maxSlippage + ) + ); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + } + + function test_Collect_OK_WithMaxSlippageDisabled() public { + // Setup: Create agreement with known parameters + IRecurringCollector.RecurringCollectionAgreement memory rca; + rca.deadline = uint64(block.timestamp + 1000); + rca.endsAt = uint64(block.timestamp + 2000); + rca.payer = address(0x123); + rca.dataService = address(0x456); + rca.serviceProvider = address(0x789); + rca.maxInitialTokens = 0; // No initial tokens to keep calculation simple + rca.maxOngoingTokensPerSecond = 1 ether; // 1 token per second + rca.minSecondsPerCollection = 60; // 1 minute + rca.maxSecondsPerCollection = 3600; // 1 hour + rca.nonce = 1; + rca.metadata = ""; + + // Accept the agreement + _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, 1); + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, 1); + bytes16 agreementId = _accept(rca, signature); + + // Do a first collection to use up initial tokens allowance + skip(rca.minSecondsPerCollection); + IRecurringCollector.CollectParams memory firstCollection = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("first"), + tokens: 1 ether, // Small amount + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, _generateCollectData(firstCollection)); + + // Wait minimum collection time again for second collection + skip(rca.minSecondsPerCollection); + + // Calculate expected narrowing: max allowed is 60 tokens (60 seconds * 1 token/second) + uint256 maxAllowed = rca.maxOngoingTokensPerSecond * rca.minSecondsPerCollection; // 60 tokens + uint256 requested = maxAllowed + 50 ether; // Request 110 tokens (will be narrowed to 60) + + // Create collect params with slippage disabled (type(uint256).max) + IRecurringCollector.CollectParams memory collectParams = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("test"), + tokens: requested, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + bytes memory data = _generateCollectData(collectParams); + + // Should succeed despite slippage when maxSlippage is disabled + _expectCollectCallAndEmit( + rca, + agreementId, + IGraphPayments.PaymentTypes.IndexingFee, + collectParams, + maxAllowed // Will collect the narrowed amount + ); + + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, maxAllowed); + } + function test_Collect_Revert_WhenZeroTokensBypassesTemporalValidation(FuzzyTestCollect calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + // First valid collection to establish lastCollectionAt + skip(acceptedRca.minSecondsPerCollection); + bytes memory firstData = _generateCollectData( + _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + 1, + fuzzy.collectParams.dataServiceCut + ) + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), firstData); + + // Attempt zero-token collection immediately (before minSecondsPerCollection). + // This MUST revert with CollectionTooSoon — zero tokens should NOT bypass + // the temporal validation that guards minSecondsPerCollection. + skip(1); + IRecurringCollector.CollectParams memory zeroParams = _generateCollectParams( + acceptedRca, + agreementId, + fuzzy.collectParams.collectionId, + 0, // zero tokens + fuzzy.collectParams.dataServiceCut + ); + bytes memory zeroData = _generateCollectData(zeroParams); + + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorCollectionTooSoon.selector, + agreementId, + uint32(1), // only 1 second elapsed + acceptedRca.minSecondsPerCollection + ) + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), zeroData); + } + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol b/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol new file mode 100644 index 000000000..310e1a88f --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol @@ -0,0 +1,190 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; +import { BareAgreementOwner } from "./BareAgreementOwner.t.sol"; + +/// @notice Tests for the IProviderEligibility gate in RecurringCollector._collect() +contract RecurringCollectorEligibilityTest is RecurringCollectorSharedTest { + function _newApprover() internal returns (MockAgreementOwner) { + return new MockAgreementOwner(); + } + + function _acceptUnsignedAgreement( + MockAgreementOwner approver + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.prank(rca.dataService); + agreementId = _recurringCollector.accept(rca, ""); + } + + /* solhint-disable graph/func-name-mixedcase */ + + function test_Collect_OK_WhenEligible() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // Enable eligibility check and mark provider as eligible + approver.setEligibilityEnabled(true); + approver.setProviderEligible(rca.serviceProvider, true); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens); + } + + function test_Collect_Revert_WhenNotEligible() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // Enable eligibility check but provider is NOT eligible + approver.setEligibilityEnabled(true); + // defaultEligible is false, and provider not explicitly set + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorCollectionNotEligible.selector, + agreementId, + rca.serviceProvider + ) + ); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + } + + function test_Collect_OK_WhenPayerDoesNotSupportInterface() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // eligibilityEnabled is false by default — supportsInterface returns false for IProviderEligibility + // Collection should proceed normally (backward compatible) + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens); + } + + function test_Collect_OK_WhenEOAPayer(FuzzyTestCollect calldata fuzzy) public { + // Use standard ECDSA-signed path (EOA payer) + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + + (bytes memory data, uint256 collectionSeconds, uint256 tokens) = _generateValidCollection( + acceptedRca, + fuzzy.collectParams, + fuzzy.collectParams.tokens, + fuzzy.collectParams.tokens + ); + + skip(collectionSeconds); + // EOA payer has no code — eligibility check is skipped entirely + vm.prank(acceptedRca.dataService); + uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); + assertEq(collected, tokens); + } + + function test_Collect_OK_WhenPayerHasNoERC165() public { + // BareAgreementOwner implements IAgreementOwner but NOT IERC165. + // The supportsInterface call will revert, hitting the catch {} branch. + BareAgreementOwner bare = new BareAgreementOwner(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(bare), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + bare.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, ""); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + // Collection succeeds — the catch {} swallows the revert from supportsInterface + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens); + } + + function test_Collect_OK_ZeroTokensSkipsEligibilityCheck() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + // Enable eligibility check, provider is NOT eligible + approver.setEligibilityEnabled(true); + // defaultEligible = false + + // Zero-token collection should NOT trigger the eligibility gate + // (the guard is inside `if (0 < tokensToCollect && ...)`) + skip(rca.minSecondsPerCollection); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), 0, 0)); + + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol b/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol new file mode 100644 index 000000000..801beef6d --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // -- Test 1: NotAccepted agreement returns 0 -- + + function test_GetMaxNextClaim_NotAccepted() public view { + bytes16 fakeId = bytes16(keccak256("nonexistent")); + assertEq(_recurringCollector.getMaxNextClaim(fakeId), 0, "NotAccepted agreement should return 0"); + } + + // -- Test 2: CanceledByServiceProvider agreement returns 0 -- + + function test_GetMaxNextClaim_CanceledByServiceProvider(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.ServiceProvider); + + assertEq(_recurringCollector.getMaxNextClaim(agreementId), 0, "CanceledByServiceProvider should return 0"); + } + + // -- Test 3: Active agreement, never collected -- + // Returns maxOngoingTokensPerSecond * min(windowSeconds, maxSecondsPerCollection) + maxInitialTokens + + function test_GetMaxNextClaim_Accepted_NeverCollected(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // Never collected: window = endsAt - acceptedAt, capped at maxSecondsPerCollection + // Also includes maxInitialTokens + uint256 windowSeconds = rca.endsAt - block.timestamp; + uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds + rca.maxInitialTokens; + assertEq(maxClaim, expected, "Never-collected active agreement mismatch"); + } + + // -- Test 4: Active agreement, already collected once -- + // Returns maxOngoingTokensPerSecond * min(windowSeconds, maxSecondsPerCollection) (no initial bonus) + + function test_GetMaxNextClaim_Accepted_AfterCollection(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + // Perform a first collection so lastCollectionAt is set + skip(rca.minSecondsPerCollection); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, keccak256("col"), 1, 0)); + vm.prank(rca.dataService); + _recurringCollector.collect(_paymentType(0), data); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // After collection: no initial tokens, window from lastCollectionAt to endsAt + uint256 windowSeconds = rca.endsAt - block.timestamp; + uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds; + assertEq(maxClaim, expected, "Post-collection active agreement should exclude initial tokens"); + } + + // -- Test 5: CanceledByPayer agreement -- + + // 5a: Canceled in the same block as accepted (window = 0) + function test_GetMaxNextClaim_CanceledByPayer_SameBlock(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // canceledAt == acceptedAt (same block), so window = 0, maxClaim = 0 + assertEq(maxClaim, 0, "CanceledByPayer in same block should return 0"); + } + + // 5b: Canceled after time has elapsed (canceledAt < endsAt) + function test_GetMaxNextClaim_CanceledByPayer_WithWindow(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + // Advance time, then cancel (still before endsAt due to sensible bounds) + skip(rca.minSecondsPerCollection + 100); + + _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // collectionEnd = min(canceledAt, endsAt) = canceledAt (since canceledAt < endsAt) + // collectionStart = acceptedAt (never collected) + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + uint256 windowSeconds = agreement.canceledAt - agreement.acceptedAt; + uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds + rca.maxInitialTokens; + assertEq(maxClaim, expected, "CanceledByPayer with elapsed time mismatch"); + } + + // 5c: CanceledByPayer after a collection (no initial tokens) + function test_GetMaxNextClaim_CanceledByPayer_AfterCollection(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + // Perform a first collection + skip(rca.minSecondsPerCollection); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, keccak256("col"), 1, 0)); + vm.prank(rca.dataService); + _recurringCollector.collect(_paymentType(0), data); + + // Advance more time, then cancel + skip(rca.minSecondsPerCollection + 100); + _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // lastCollectionAt is set, so no initial bonus + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + uint256 windowSeconds = agreement.canceledAt - agreement.lastCollectionAt; + uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds; + assertEq(maxClaim, expected, "CanceledByPayer post-collection should exclude initial tokens"); + } + + // -- Test 6: Agreement past endsAt -- + // For an active (Accepted) agreement that has gone past endsAt, the window + // is capped at endsAt, so returns maxOngoingTokensPerSecond * min(remaining, maxSecondsPerCollection) + + function test_GetMaxNextClaim_Accepted_PastEndsAt(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + // Perform a first collection so we have a lastCollectionAt + skip(rca.minSecondsPerCollection); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, keccak256("col"), 1, 0)); + vm.prank(rca.dataService); + _recurringCollector.collect(_paymentType(0), data); + + uint256 lastCollectionAt = block.timestamp; + + // Warp past endsAt + vm.warp(rca.endsAt + 1000); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // collectionEnd = endsAt (active, capped), collectionStart = lastCollectionAt + // remaining = endsAt - lastCollectionAt, capped by maxSecondsPerCollection + uint256 remaining = rca.endsAt - lastCollectionAt; + uint256 maxSeconds = remaining < rca.maxSecondsPerCollection ? remaining : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds; + assertEq(maxClaim, expected, "Past-endsAt active agreement should cap at endsAt"); + } + + // Also test past endsAt when never collected (includes initial tokens) + function test_GetMaxNextClaim_Accepted_PastEndsAt_NeverCollected(FuzzyTestAccept calldata fuzzy) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzy); + + uint256 acceptedAt = block.timestamp; + + // Warp past endsAt without ever collecting + vm.warp(rca.endsAt + 1000); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // collectionEnd = endsAt, collectionStart = acceptedAt + // window = endsAt - acceptedAt, capped by maxSecondsPerCollection + // Never collected so includes maxInitialTokens + uint256 windowSeconds = rca.endsAt - acceptedAt; + uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds + rca.maxInitialTokens; + assertEq(maxClaim, expected, "Past-endsAt never-collected should include initial tokens"); + } + + // -- Test 7: maxSecondsPerCollection caps the window -- + + function test_GetMaxNextClaim_MaxSecondsPerCollectionCaps() public { + // Use deterministic values to precisely verify the cap behavior + uint256 signerKey = 0xBEEF; + address payer = address(0x1111); + address dataService = address(0x2222); + address serviceProvider = address(0x3333); + + uint32 minSecondsPerCollection = 1000; + uint32 maxSecondsPerCollection = 3600; // 1 hour cap + uint256 maxOngoingTokensPerSecond = 100; + uint256 maxInitialTokens = 5000; + + // Accept the agreement + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1000), + endsAt: uint64(block.timestamp + 100_000), // much larger than maxSecondsPerCollection + payer: payer, + dataService: dataService, + serviceProvider: serviceProvider, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: minSecondsPerCollection, + maxSecondsPerCollection: maxSecondsPerCollection, + nonce: 1, + metadata: "" + }); + + // Authorize signer and accept + _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); + _setupValidProvision(serviceProvider, dataService); + vm.prank(dataService); + bytes16 agreementId = _recurringCollector.accept(rca, signature); + + // Window = endsAt - acceptedAt = 100_000 seconds, which is > maxSecondsPerCollection (3600) + // So the window should be capped at maxSecondsPerCollection + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // maxSeconds = min(100_000, 3600) = 3600 + uint256 expectedCapped = maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens; + assertEq(maxClaim, expectedCapped, "Window should be capped at maxSecondsPerCollection"); + + // Verify the cap actually applies by checking it is less than the uncapped value + uint256 uncappedWindow = rca.endsAt - block.timestamp; + uint256 expectedUncapped = maxOngoingTokensPerSecond * uncappedWindow + maxInitialTokens; + assertLt(expectedCapped, expectedUncapped, "Capped value should be less than uncapped value"); + } + + function test_GetMaxNextClaim_WindowSmallerThanMaxSecondsPerCollection() public { + // Test the case where the window is smaller than maxSecondsPerCollection (no cap) + uint256 signerKey = 0xBEEF; + address payer = address(0x1111); + address dataService = address(0x2222); + address serviceProvider = address(0x3333); + + uint32 minSecondsPerCollection = 1000; + uint32 maxSecondsPerCollection = 100_000; // very large cap + uint256 maxOngoingTokensPerSecond = 100; + uint256 maxInitialTokens = 5000; + + // endsAt is set so window (endsAt - acceptedAt) < maxSecondsPerCollection + uint64 endsAt = uint64(block.timestamp + 10_000); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1000), + endsAt: endsAt, + payer: payer, + dataService: dataService, + serviceProvider: serviceProvider, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: minSecondsPerCollection, + maxSecondsPerCollection: maxSecondsPerCollection, + nonce: 1, + metadata: "" + }); + + _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); + _setupValidProvision(serviceProvider, dataService); + vm.prank(dataService); + bytes16 agreementId = _recurringCollector.accept(rca, signature); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // Window = 10_000, maxSecondsPerCollection = 100_000 + // min(10_000, 100_000) = 10_000 (window is the limiting factor, not the cap) + uint256 windowSeconds = endsAt - block.timestamp; + uint256 expected = maxOngoingTokensPerSecond * windowSeconds + maxInitialTokens; + assertEq(maxClaim, expected, "When window < maxSecondsPerCollection, window should be used directly"); + // Confirm that the window was indeed smaller + assertLt(windowSeconds, maxSecondsPerCollection, "Window should be smaller than maxSecondsPerCollection"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol b/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol new file mode 100644 index 000000000..10d6ee5e0 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol @@ -0,0 +1,179 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +/// @notice Tests that ECDSA and contract-approved paths can be mixed for accept and update. +contract RecurringCollectorMixedPathTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + /// @notice ECDSA accept, then contract-approved update should fail (payer is EOA) + function test_MixedPath_ECDSAAccept_UnsignedUpdate_RevertsForEOA() public { + uint256 signerKey = 0xA11CE; + address payer = vm.addr(signerKey); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + // Accept via ECDSA + (, , bytes16 agreementId) = _authorizeAndAccept(rca, signerKey); + + // Try unsigned update — should revert because payer is an EOA + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 7200, + nonce: 1, + metadata: "" + }) + ); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, payer) + ); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + /// @notice Contract-approved accept, then ECDSA update should fail (no authorized signer) + function test_MixedPath_UnsignedAccept_ECDSAUpdate_RevertsForUnauthorizedSigner() public { + MockAgreementOwner approver = new MockAgreementOwner(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + // Accept via contract-approved path + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, ""); + + // Try ECDSA update with an unauthorized signer + uint256 wrongKey = 0xDEAD; + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 7200, + nonce: 1, + metadata: "" + }) + ); + + (, bytes memory sig) = _recurringCollectorHelper.generateSignedRCAU(rcau, wrongKey); + + vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, sig); + } + + /// @notice Contract-approved accept, then contract-approved update works + function test_MixedPath_UnsignedAccept_UnsignedUpdate_OK() public { + MockAgreementOwner approver = new MockAgreementOwner(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + // Accept via contract-approved path + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, ""); + + // Update via contract-approved path (use sensibleRCAU to stay in valid ranges) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 50 ether, + maxOngoingTokensPerSecond: 0.5 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 7200, + nonce: 1, + metadata: "" + }) + ); + + bytes32 updateHash = _recurringCollector.hashRCAU(rcau); + approver.authorize(updateHash); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated( + rca.dataService, + address(approver), + rca.serviceProvider, + agreementId, + uint64(block.timestamp), + rcau.endsAt, + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection + ); + + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + + // Verify updated terms + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + assertEq(agreement.maxOngoingTokensPerSecond, rcau.maxOngoingTokensPerSecond); + assertEq(agreement.maxSecondsPerCollection, rcau.maxSecondsPerCollection); + assertEq(agreement.updateNonce, 1); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol b/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol new file mode 100644 index 000000000..0c20ccf7f --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; +import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; + +import { Bounder } from "../../../unit/utils/Bounder.t.sol"; +import { PartialControllerMock } from "../../mocks/PartialControllerMock.t.sol"; +import { HorizonStakingMock } from "../../mocks/HorizonStakingMock.t.sol"; +import { PaymentsEscrowMock } from "./PaymentsEscrowMock.t.sol"; +import { RecurringCollectorHelper } from "./RecurringCollectorHelper.t.sol"; + +contract RecurringCollectorSharedTest is Test, Bounder { + struct FuzzyTestCollect { + FuzzyTestAccept fuzzyTestAccept; + uint8 unboundedPaymentType; + IRecurringCollector.CollectParams collectParams; + } + + struct FuzzyTestAccept { + IRecurringCollector.RecurringCollectionAgreement rca; + uint256 unboundedSignerKey; + } + + struct FuzzyTestUpdate { + FuzzyTestAccept fuzzyTestAccept; + IRecurringCollector.RecurringCollectionAgreementUpdate rcau; + } + + RecurringCollector internal _recurringCollector; + PaymentsEscrowMock internal _paymentsEscrow; + HorizonStakingMock internal _horizonStaking; + RecurringCollectorHelper internal _recurringCollectorHelper; + + function setUp() public { + _paymentsEscrow = new PaymentsEscrowMock(); + _horizonStaking = new HorizonStakingMock(); + PartialControllerMock.Entry[] memory entries = new PartialControllerMock.Entry[](2); + entries[0] = PartialControllerMock.Entry({ name: "PaymentsEscrow", addr: address(_paymentsEscrow) }); + entries[1] = PartialControllerMock.Entry({ name: "Staking", addr: address(_horizonStaking) }); + _recurringCollector = new RecurringCollector( + "RecurringCollector", + "1", + address(new PartialControllerMock(entries)), + 1 + ); + _recurringCollectorHelper = new RecurringCollectorHelper(_recurringCollector); + } + + function _sensibleAuthorizeAndAccept( + FuzzyTestAccept calldata _fuzzyTestAccept + ) + internal + returns ( + IRecurringCollector.RecurringCollectionAgreement memory, + bytes memory signature, + uint256 key, + bytes16 agreementId + ) + { + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + _fuzzyTestAccept.rca + ); + key = boundKey(_fuzzyTestAccept.unboundedSignerKey); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes memory sig, + bytes16 id + ) = _authorizeAndAccept(rca, key); + return (acceptedRca, sig, key, id); + } + + // authorizes signer, signs the RCA, and accepts it + function _authorizeAndAccept( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + uint256 _signerKey + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory, bytes16 agreementId) { + _recurringCollectorHelper.authorizeSignerWithChecks(_rca.payer, _signerKey); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(_rca, _signerKey); + + agreementId = _accept(rca, signature); + return (rca, signature, agreementId); + } + + function _accept( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes memory _signature + ) internal returns (bytes16) { + // Set up valid staking provision by default to allow collections to succeed + _setupValidProvision(_rca.serviceProvider, _rca.dataService); + + // Calculate the expected agreement ID for verification + bytes16 expectedAgreementId = _recurringCollector.generateAgreementId( + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.deadline, + _rca.nonce + ); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementAccepted( + _rca.dataService, + _rca.payer, + _rca.serviceProvider, + expectedAgreementId, + uint64(block.timestamp), + _rca.endsAt, + _rca.maxInitialTokens, + _rca.maxOngoingTokensPerSecond, + _rca.minSecondsPerCollection, + _rca.maxSecondsPerCollection + ); + vm.prank(_rca.dataService); + bytes16 actualAgreementId = _recurringCollector.accept(_rca, _signature); + + // Verify the agreement ID matches expectation + assertEq(actualAgreementId, expectedAgreementId); + return actualAgreementId; + } + + function _setupValidProvision(address _serviceProvider, address _dataService) internal { + _horizonStaking.setProvision( + _serviceProvider, + _dataService, + IHorizonStakingTypes.Provision({ + tokens: 1000 ether, + tokensThawing: 0, + sharesThawing: 0, + maxVerifierCut: 100000, // 10% + thawingPeriod: 604800, // 7 days + createdAt: uint64(block.timestamp), + maxVerifierCutPending: 100000, + thawingPeriodPending: 604800, + lastParametersStagedAt: 0, + thawingNonce: 0 + }) + ); + } + + function _cancel( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes16 _agreementId, + IRecurringCollector.CancelAgreementBy _by + ) internal { + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementCanceled( + _rca.dataService, + _rca.payer, + _rca.serviceProvider, + _agreementId, + uint64(block.timestamp), + _by + ); + vm.prank(_rca.dataService); + _recurringCollector.cancel(_agreementId, _by); + } + + function _expectCollectCallAndEmit( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes16 _agreementId, + IGraphPayments.PaymentTypes __paymentType, + IRecurringCollector.CollectParams memory _fuzzyParams, + uint256 _tokens + ) internal { + vm.expectCall( + address(_paymentsEscrow), + abi.encodeCall( + _paymentsEscrow.collect, + ( + __paymentType, + _rca.payer, + _rca.serviceProvider, + _tokens, + _rca.dataService, + _fuzzyParams.dataServiceCut, + _rca.serviceProvider + ) + ) + ); + vm.expectEmit(address(_recurringCollector)); + emit IPaymentsCollector.PaymentCollected( + __paymentType, + _fuzzyParams.collectionId, + _rca.payer, + _rca.serviceProvider, + _rca.dataService, + _tokens + ); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.RCACollected( + _rca.dataService, + _rca.payer, + _rca.serviceProvider, + _agreementId, + _fuzzyParams.collectionId, + _tokens, + _fuzzyParams.dataServiceCut + ); + } + + function _generateValidCollection( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + IRecurringCollector.CollectParams memory _fuzzyParams, + uint256 _unboundedCollectionSkip, + uint256 _unboundedTokens + ) internal view returns (bytes memory, uint256, uint256) { + uint256 collectionSeconds = boundSkip( + _unboundedCollectionSkip, + _rca.minSecondsPerCollection, + _rca.maxSecondsPerCollection + ); + uint256 tokens = bound(_unboundedTokens, 1, _rca.maxOngoingTokensPerSecond * collectionSeconds); + + // Generate the agreement ID deterministically + bytes16 agreementId = _recurringCollector.generateAgreementId( + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.deadline, + _rca.nonce + ); + + bytes memory data = _generateCollectData( + _generateCollectParams(_rca, agreementId, _fuzzyParams.collectionId, tokens, _fuzzyParams.dataServiceCut) + ); + + return (data, collectionSeconds, tokens); + } + + function _generateCollectParams( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes16 _agreementId, + bytes32 _collectionId, + uint256 _tokens, + uint256 _dataServiceCut + ) internal pure returns (IRecurringCollector.CollectParams memory) { + return + IRecurringCollector.CollectParams({ + agreementId: _agreementId, + collectionId: _collectionId, + tokens: _tokens, + dataServiceCut: _dataServiceCut, + receiverDestination: _rca.serviceProvider, + maxSlippage: type(uint256).max + }); + } + + function _generateCollectData( + IRecurringCollector.CollectParams memory _params + ) internal pure returns (bytes memory) { + return abi.encode(_params); + } + + function _fuzzyCancelAgreementBy(uint8 _seed) internal pure returns (IRecurringCollector.CancelAgreementBy) { + return + IRecurringCollector.CancelAgreementBy( + bound(_seed, 0, uint256(IRecurringCollector.CancelAgreementBy.Payer)) + ); + } + + function _paymentType(uint8 _unboundedPaymentType) internal pure returns (IGraphPayments.PaymentTypes) { + return + IGraphPayments.PaymentTypes( + bound( + _unboundedPaymentType, + uint256(type(IGraphPayments.PaymentTypes).min), + uint256(type(IGraphPayments.PaymentTypes).max) + ) + ); + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/update.t.sol b/packages/horizon/test/unit/payments/recurring-collector/update.t.sol new file mode 100644 index 000000000..d466f3c49 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/update.t.sol @@ -0,0 +1,315 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +contract RecurringCollectorUpdateTest is RecurringCollectorSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + + function test_Update_Revert_WhenUpdateElapsed( + FuzzyTestUpdate calldata fuzzyTestUpdate, + uint256 unboundedUpdateSkip + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + + boundSkipCeil(unboundedUpdateSkip, type(uint64).max); + rcau.deadline = uint64(bound(rcau.deadline, 0, block.timestamp - 1)); + + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + block.timestamp, + rcau.deadline + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau, signature); + } + + function test_Update_Revert_WhenNeverAccepted( + IRecurringCollector.RecurringCollectionAgreement memory rca, + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau + ) public { + rca = _recurringCollectorHelper.sensibleRCA(rca); + rcau = _recurringCollectorHelper.sensibleRCAU(rcau); + // Generate deterministic agreement ID + bytes16 agreementId = _recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + rcau.agreementId = agreementId; + + rcau.deadline = uint64(block.timestamp); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + rcau.agreementId, + IRecurringCollector.AgreementState.NotAccepted + ); + vm.expectRevert(expectedErr); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_Update_Revert_WhenDataServiceNotAuthorized( + FuzzyTestUpdate calldata fuzzyTestUpdate, + address notDataService + ) public { + vm.assume(fuzzyTestUpdate.fuzzyTestAccept.rca.dataService != notDataService); + (, , uint256 signerKey, bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAUWithCorrectNonce(rcau, signerKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, + rcau.agreementId, + notDataService + ); + vm.expectRevert(expectedErr); + vm.prank(notDataService); + _recurringCollector.update(rcau, signature); + } + + function test_Update_Revert_WhenInvalidSigner( + FuzzyTestUpdate calldata fuzzyTestUpdate, + uint256 unboundedInvalidSignerKey + ) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + , + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + uint256 signerKey = boundKey(fuzzyTestUpdate.fuzzyTestAccept.unboundedSignerKey); + uint256 invalidSignerKey = boundKey(unboundedInvalidSignerKey); + vm.assume(signerKey != invalidSignerKey); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, invalidSignerKey); + + vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau, signature); + } + + function test_Update_OK(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + // Don't use fuzzed nonce - use correct nonce for first update + rcau.nonce = 1; + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated( + acceptedRca.dataService, + acceptedRca.payer, + acceptedRca.serviceProvider, + rcau.agreementId, + uint64(block.timestamp), + rcau.endsAt, + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection + ); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau, signature); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + assertEq(rcau.endsAt, agreement.endsAt); + assertEq(rcau.maxInitialTokens, agreement.maxInitialTokens); + assertEq(rcau.maxOngoingTokensPerSecond, agreement.maxOngoingTokensPerSecond); + assertEq(rcau.minSecondsPerCollection, agreement.minSecondsPerCollection); + assertEq(rcau.maxSecondsPerCollection, agreement.maxSecondsPerCollection); + assertEq(rcau.nonce, agreement.updateNonce); + } + + function test_Update_Revert_WhenInvalidNonce_TooLow(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + rcau.nonce = 0; // Invalid: should be 1 for first update + + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + rcau.agreementId, + 1, // expected + 0 // provided + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau, signature); + } + + function test_Update_Revert_WhenInvalidNonce_TooHigh(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau.agreementId = agreementId; + rcau.nonce = 5; // Invalid: should be 1 for first update + + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + rcau.agreementId, + 1, // expected + 5 // provided + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau, signature); + } + + function test_Update_Revert_WhenReplayAttack(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau1.agreementId = agreementId; + rcau1.nonce = 1; + + // First update succeeds + (, bytes memory signature1) = _recurringCollectorHelper.generateSignedRCAU(rcau1, signerKey); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau1, signature1); + + // Second update with different terms and nonce 2 succeeds + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: rcau1.agreementId, + deadline: rcau1.deadline, + endsAt: rcau1.endsAt, + maxInitialTokens: rcau1.maxInitialTokens, + maxOngoingTokensPerSecond: rcau1.maxOngoingTokensPerSecond * 2, // Different terms + minSecondsPerCollection: rcau1.minSecondsPerCollection, + maxSecondsPerCollection: rcau1.maxSecondsPerCollection, + nonce: 2, + metadata: rcau1.metadata + }); + + (, bytes memory signature2) = _recurringCollectorHelper.generateSignedRCAU(rcau2, signerKey); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau2, signature2); + + // Attempting to replay first update should fail + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + rcau1.agreementId, + 3, // expected (current nonce + 1) + 1 // provided (old nonce) + ); + vm.expectRevert(expectedErr); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau1, signature1); + } + + function test_Update_OK_NonceIncrementsCorrectly(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + , + uint256 signerKey, + bytes16 agreementId + ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + + // Initial nonce should be 0 + IRecurringCollector.AgreementData memory initialAgreement = _recurringCollector.getAgreement(agreementId); + assertEq(initialAgreement.updateNonce, 0); + + // First update with nonce 1 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau + ); + rcau1.agreementId = agreementId; + rcau1.nonce = 1; + + (, bytes memory signature1) = _recurringCollectorHelper.generateSignedRCAU(rcau1, signerKey); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau1, signature1); + + // Verify nonce incremented to 1 + IRecurringCollector.AgreementData memory updatedAgreement1 = _recurringCollector.getAgreement(agreementId); + assertEq(updatedAgreement1.updateNonce, 1); + + // Second update with nonce 2 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: rcau1.agreementId, + deadline: rcau1.deadline, + endsAt: rcau1.endsAt, + maxInitialTokens: rcau1.maxInitialTokens, + maxOngoingTokensPerSecond: rcau1.maxOngoingTokensPerSecond * 2, // Different terms + minSecondsPerCollection: rcau1.minSecondsPerCollection, + maxSecondsPerCollection: rcau1.maxSecondsPerCollection, + nonce: 2, + metadata: rcau1.metadata + }); + + (, bytes memory signature2) = _recurringCollectorHelper.generateSignedRCAU(rcau2, signerKey); + vm.prank(acceptedRca.dataService); + _recurringCollector.update(rcau2, signature2); + + // Verify nonce incremented to 2 + IRecurringCollector.AgreementData memory updatedAgreement2 = _recurringCollector.getAgreement(agreementId); + assertEq(updatedAgreement2.updateNonce, 2); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol b/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol new file mode 100644 index 000000000..22016075a --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { + function _newApprover() internal returns (MockAgreementOwner) { + return new MockAgreementOwner(); + } + + /// @notice Helper to accept an agreement via the unsigned path and return the ID + function _acceptUnsigned( + MockAgreementOwner approver, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal returns (bytes16) { + bytes32 agreementHash = _recurringCollector.hashRCA(rca); + approver.authorize(agreementHash); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.prank(rca.dataService); + return _recurringCollector.accept(rca, ""); + } + + function _makeSimpleRCA(address payer) internal returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return + _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + } + + function _makeSimpleRCAU( + bytes16 agreementId, + uint32 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + return + _recurringCollectorHelper.sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 7200, + nonce: nonce, + metadata: "" + }) + ); + } + + /* solhint-disable graph/func-name-mixedcase */ + + function test_UpdateUnsigned() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + // Authorize the update hash + bytes32 updateHash = _recurringCollector.hashRCAU(rcau); + approver.authorize(updateHash); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated( + rca.dataService, + rca.payer, + rca.serviceProvider, + agreementId, + uint64(block.timestamp), + rcau.endsAt, + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection + ); + + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); + assertEq(rcau.endsAt, agreement.endsAt); + assertEq(rcau.maxInitialTokens, agreement.maxInitialTokens); + assertEq(rcau.maxOngoingTokensPerSecond, agreement.maxOngoingTokensPerSecond); + assertEq(rcau.minSecondsPerCollection, agreement.minSecondsPerCollection); + assertEq(rcau.maxSecondsPerCollection, agreement.maxSecondsPerCollection); + assertEq(rcau.nonce, agreement.updateNonce); + } + + function test_UpdateUnsigned_Revert_WhenPayerNotContract() public { + // Use the signed accept path to create an agreement with an EOA payer, + // then attempt updateUnsigned which should fail because payer isn't a contract + uint256 signerKey = 0xA11CE; + address payer = vm.addr(signerKey); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + nonce: 1, + metadata: "" + }) + ); + + // Accept via signed path + _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(rca.dataService); + bytes16 agreementId = _recurringCollector.accept(rca, signature); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, payer) + ); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenHashNotAuthorized() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + // Don't authorize the update hash — approver returns bytes4(0), caller rejects + vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenWrongMagicValue() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + approver.setOverrideReturnValue(bytes4(0xdeadbeef)); + + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.RecurringCollectorInvalidSigner.selector)); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenNotDataService() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + bytes32 updateHash = _recurringCollector.hashRCAU(rcau); + approver.authorize(updateHash); + + address notDataService = makeAddr("notDataService"); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, + agreementId, + notDataService + ) + ); + vm.prank(notDataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenNotAccepted() public { + // Don't accept — just try to update a non-existent agreement + bytes16 fakeId = bytes16(keccak256("fake")); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(fakeId, 1); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + fakeId, + IRecurringCollector.AgreementState.NotAccepted + ); + vm.expectRevert(expectedErr); + vm.prank(makeAddr("ds")); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenInvalidNonce() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + // Use wrong nonce (0 instead of 1) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 0); + + bytes32 updateHash = _recurringCollector.hashRCAU(rcau); + approver.authorize(updateHash); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + agreementId, + 1, // expected + 0 // provided + ); + vm.expectRevert(expectedErr); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenApproverReverts() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + approver.setShouldRevert(true); + + vm.expectRevert("MockAgreementOwner: forced revert"); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + function test_UpdateUnsigned_Revert_WhenDeadlineElapsed() public { + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + bytes16 agreementId = _acceptUnsigned(approver, rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); + + // Set the update deadline in the past + rcau.deadline = uint64(block.timestamp - 1); + + bytes32 updateHash = _recurringCollector.hashRCAU(rcau); + approver.authorize(updateHash); + + bytes memory expectedErr = abi.encodeWithSelector( + IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + block.timestamp, + rcau.deadline + ); + vm.expectRevert(expectedErr); + vm.prank(rca.dataService); + _recurringCollector.update(rcau, ""); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/shared/horizon-staking/HorizonStakingShared.t.sol b/packages/horizon/test/unit/shared/horizon-staking/HorizonStakingShared.t.sol index 27b4aeca9..1c15ce738 100644 --- a/packages/horizon/test/unit/shared/horizon-staking/HorizonStakingShared.t.sol +++ b/packages/horizon/test/unit/shared/horizon-staking/HorizonStakingShared.t.sol @@ -1,18 +1,15 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { GraphBaseTest } from "../../GraphBase.t.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; -import { IHorizonStakingBase } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol"; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; import { LinkedList } from "../../../../contracts/libraries/LinkedList.sol"; -import { MathUtils } from "../../../../contracts/libraries/MathUtils.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; import { PPMMath } from "../../../../contracts/libraries/PPMMath.sol"; -import { ExponentialRebates } from "../../../../contracts/staking/libraries/ExponentialRebates.sol"; abstract contract HorizonStakingSharedTest is GraphBaseTest { using LinkedList for ILinkedList.List; @@ -21,13 +18,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { event Transfer(address indexed from, address indexed to, uint tokens); address internal _allocationId = makeAddr("allocationId"); - bytes32 internal constant _SUBGRAPH_DEPLOYMENT_ID = keccak256("subgraphDeploymentID"); - uint256 internal constant MAX_ALLOCATION_EPOCHS = 28; - - uint32 internal alphaNumerator = 100; - uint32 internal alphaDenominator = 100; - uint32 internal lambdaNumerator = 60; - uint32 internal lambdaDenominator = 100; /* * MODIFIERS @@ -78,17 +68,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { _createProvision(users.indexer, dataService, tokens, maxVerifierCut, thawingPeriod); } - modifier useAllocation(uint256 tokens) { - vm.assume(tokens <= MAX_STAKING_TOKENS); - _createAllocation(users.indexer, _allocationId, _SUBGRAPH_DEPLOYMENT_ID, tokens); - _; - } - - modifier useRebateParameters() { - _setStorageRebateParameters(alphaNumerator, alphaDenominator, lambdaNumerator, lambdaDenominator); - _; - } - /* * HELPERS: these are shortcuts to perform common actions that often involve multiple contract calls */ @@ -103,34 +82,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { _provision(serviceProvider, verifier, tokens, maxVerifierCut, thawingPeriod); } - // This allows setting up contract state with legacy allocations - function _createAllocation( - address serviceProvider, - address allocationId, - bytes32 subgraphDeploymentId, - uint256 tokens - ) internal { - _setStorageMaxAllocationEpochs(MAX_ALLOCATION_EPOCHS); - - IHorizonStakingExtension.Allocation memory _allocation = IHorizonStakingExtension.Allocation({ - indexer: serviceProvider, - subgraphDeploymentID: subgraphDeploymentId, - tokens: tokens, - createdAtEpoch: block.timestamp, - closedAtEpoch: 0, - collectedFees: 0, - __DEPRECATED_effectiveAllocation: 0, - accRewardsPerAllocatedToken: 0, - distributedRebates: 0 - }); - _setStorageAllocation(_allocation, allocationId, tokens); - - // delegation pool initialized - _setStorageDelegationPool(serviceProvider, 0, uint32(PPMMath.MAX_PPM), uint32(PPMMath.MAX_PPM)); - - require(token.transfer(address(staking), tokens), "Transfer failed"); - } - /* * ACTIONS: these are individual contract calls wrapped in assertion blocks to ensure they work as expected */ @@ -150,7 +101,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { // stakeTo token.approve(address(staking), tokens); vm.expectEmit(); - emit IHorizonStakingBase.HorizonStakeDeposited(serviceProvider, tokens); + emit IHorizonStakingMain.HorizonStakeDeposited(serviceProvider, tokens); staking.stakeTo(serviceProvider, tokens); // after @@ -183,7 +134,7 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { // stakeTo token.approve(address(staking), tokens); vm.expectEmit(); - emit IHorizonStakingBase.HorizonStakeDeposited(serviceProvider, tokens); + emit IHorizonStakingMain.HorizonStakeDeposited(serviceProvider, tokens); vm.expectEmit(); emit IHorizonStakingMain.ProvisionIncreased(serviceProvider, verifier, tokens); staking.stakeToProvision(serviceProvider, verifier, tokens); @@ -230,48 +181,15 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { function _unstake(uint256 _tokens) internal { (, address msgSender, ) = vm.readCallers(); - uint256 deprecatedThawingPeriod = staking.__DEPRECATED_getThawingPeriod(); - // before uint256 beforeSenderBalance = token.balanceOf(msgSender); uint256 beforeStakingBalance = token.balanceOf(address(staking)); ServiceProviderInternal memory beforeServiceProvider = _getStorageServiceProviderInternal(msgSender); - bool withdrawCalled = beforeServiceProvider.__DEPRECATED_tokensLocked != 0 && - block.number >= beforeServiceProvider.__DEPRECATED_tokensLockedUntil; - - if (deprecatedThawingPeriod != 0 && beforeServiceProvider.__DEPRECATED_tokensLocked > 0) { - deprecatedThawingPeriod = MathUtils.weightedAverageRoundingUp( - MathUtils.diffOrZero( - withdrawCalled ? 0 : beforeServiceProvider.__DEPRECATED_tokensLockedUntil, - block.number - ), - withdrawCalled ? 0 : beforeServiceProvider.__DEPRECATED_tokensLocked, - deprecatedThawingPeriod, - _tokens - ); - } - // unstake - if (deprecatedThawingPeriod == 0) { - vm.expectEmit(address(staking)); - emit IHorizonStakingMain.HorizonStakeWithdrawn(msgSender, _tokens); - } else { - if (withdrawCalled) { - vm.expectEmit(address(staking)); - emit IHorizonStakingMain.HorizonStakeWithdrawn( - msgSender, - beforeServiceProvider.__DEPRECATED_tokensLocked - ); - } + vm.expectEmit(address(staking)); + emit IHorizonStakingMain.HorizonStakeWithdrawn(msgSender, _tokens); - vm.expectEmit(address(staking)); - emit IHorizonStakingMain.HorizonStakeLocked( - msgSender, - withdrawCalled ? _tokens : beforeServiceProvider.__DEPRECATED_tokensLocked + _tokens, - block.number + deprecatedThawingPeriod - ); - } staking.unstake(_tokens); // after @@ -280,41 +198,16 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { ServiceProviderInternal memory afterServiceProvider = _getStorageServiceProviderInternal(msgSender); // assert - if (deprecatedThawingPeriod == 0) { - assertEq(afterSenderBalance, _tokens + beforeSenderBalance); - assertEq(afterStakingBalance, beforeStakingBalance - _tokens); - assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked - _tokens); - assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned); - assertEq( - afterServiceProvider.__DEPRECATED_tokensAllocated, - beforeServiceProvider.__DEPRECATED_tokensAllocated - ); - assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, beforeServiceProvider.__DEPRECATED_tokensLocked); - assertEq( - afterServiceProvider.__DEPRECATED_tokensLockedUntil, - beforeServiceProvider.__DEPRECATED_tokensLockedUntil - ); - } else { - assertEq( - afterServiceProvider.tokensStaked, - withdrawCalled - ? beforeServiceProvider.tokensStaked - beforeServiceProvider.__DEPRECATED_tokensLocked - : beforeServiceProvider.tokensStaked - ); - assertEq( - afterServiceProvider.__DEPRECATED_tokensLocked, - _tokens + (withdrawCalled ? 0 : beforeServiceProvider.__DEPRECATED_tokensLocked) - ); - assertEq(afterServiceProvider.__DEPRECATED_tokensLockedUntil, block.number + deprecatedThawingPeriod); - assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned); - assertEq( - afterServiceProvider.__DEPRECATED_tokensAllocated, - beforeServiceProvider.__DEPRECATED_tokensAllocated - ); - uint256 tokensTransferred = (withdrawCalled ? beforeServiceProvider.__DEPRECATED_tokensLocked : 0); - assertEq(afterSenderBalance, beforeSenderBalance + tokensTransferred); - assertEq(afterStakingBalance, beforeStakingBalance - tokensTransferred); - } + assertEq(afterSenderBalance, _tokens + beforeSenderBalance); + assertEq(afterStakingBalance, beforeStakingBalance - _tokens); + assertEq(afterServiceProvider.tokensStaked, beforeServiceProvider.tokensStaked - _tokens); + assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned); + assertEq(afterServiceProvider.__DEPRECATED_tokensAllocated, beforeServiceProvider.__DEPRECATED_tokensAllocated); + assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, beforeServiceProvider.__DEPRECATED_tokensLocked); + assertEq( + afterServiceProvider.__DEPRECATED_tokensLockedUntil, + beforeServiceProvider.__DEPRECATED_tokensLockedUntil + ); } function _withdraw() internal { @@ -1453,19 +1346,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { assertEq(afterEnabled, true); } - function _clearThawingPeriod() internal { - // clearThawingPeriod - vm.expectEmit(address(staking)); - emit IHorizonStakingMain.ThawingPeriodCleared(); - staking.clearThawingPeriod(); - - // after - uint64 afterThawingPeriod = staking.__DEPRECATED_getThawingPeriod(); - - // assert - assertEq(afterThawingPeriod, 0); - } - function _setMaxThawingPeriod(uint64 maxThawingPeriod) internal { // setMaxThawingPeriod vm.expectEmit(address(staking)); @@ -1509,8 +1389,8 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { // Calculate expected tokens after slashing CalcValuesSlash memory calcValues; - calcValues.tokensToSlash = MathUtils.min(tokens, before.provision.tokens + before.pool.tokens); - calcValues.providerTokensSlashed = MathUtils.min(before.provision.tokens, calcValues.tokensToSlash); + calcValues.tokensToSlash = Math.min(tokens, before.provision.tokens + before.pool.tokens); + calcValues.providerTokensSlashed = Math.min(before.provision.tokens, calcValues.tokensToSlash); calcValues.delegationTokensSlashed = calcValues.tokensToSlash - calcValues.providerTokensSlashed; if (calcValues.tokensToSlash > 0) { @@ -1612,314 +1492,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { } } - // use struct to avoid 'stack too deep' error - struct CalcValuesCloseAllocation { - uint256 rewards; - uint256 delegatorRewards; - uint256 indexerRewards; - } - struct BeforeValuesCloseAllocation { - IHorizonStakingExtension.Allocation allocation; - DelegationPoolInternalTest pool; - ServiceProviderInternal serviceProvider; - uint256 subgraphAllocations; - uint256 stakingBalance; - uint256 indexerBalance; - uint256 beneficiaryBalance; - } - - // Current rewards manager is mocked and assumed to mint fixed rewards - function _closeAllocation(address allocationId, bytes32 poi) internal { - (, address msgSender, ) = vm.readCallers(); - - // before - BeforeValuesCloseAllocation memory beforeValues; - beforeValues.allocation = staking.getAllocation(allocationId); - beforeValues.pool = _getStorageDelegationPoolInternal( - beforeValues.allocation.indexer, - subgraphDataServiceLegacyAddress, - true - ); - beforeValues.serviceProvider = _getStorageServiceProviderInternal(beforeValues.allocation.indexer); - beforeValues.subgraphAllocations = _getStorageSubgraphAllocations(beforeValues.allocation.subgraphDeploymentID); - beforeValues.stakingBalance = token.balanceOf(address(staking)); - beforeValues.indexerBalance = token.balanceOf(beforeValues.allocation.indexer); - beforeValues.beneficiaryBalance = token.balanceOf( - _getStorageRewardsDestination(beforeValues.allocation.indexer) - ); - - bool isAuth = staking.isAuthorized( - beforeValues.allocation.indexer, - subgraphDataServiceLegacyAddress, - msgSender - ); - address rewardsDestination = _getStorageRewardsDestination(beforeValues.allocation.indexer); - - CalcValuesCloseAllocation memory calcValues = CalcValuesCloseAllocation({ - rewards: ALLOCATIONS_REWARD_CUT, - delegatorRewards: ALLOCATIONS_REWARD_CUT - - uint256(beforeValues.pool.__DEPRECATED_indexingRewardCut).mulPPM(ALLOCATIONS_REWARD_CUT), - indexerRewards: 0 - }); - calcValues.indexerRewards = - ALLOCATIONS_REWARD_CUT - (beforeValues.pool.tokens > 0 ? calcValues.delegatorRewards : 0); - - // closeAllocation - vm.expectEmit(address(staking)); - emit IHorizonStakingExtension.AllocationClosed( - beforeValues.allocation.indexer, - beforeValues.allocation.subgraphDeploymentID, - epochManager.currentEpoch(), - beforeValues.allocation.tokens, - allocationId, - msgSender, - poi, - !isAuth - ); - staking.closeAllocation(allocationId, poi); - - // after - IHorizonStakingExtension.Allocation memory afterAllocation = staking.getAllocation(allocationId); - DelegationPoolInternalTest memory afterPool = _getStorageDelegationPoolInternal( - beforeValues.allocation.indexer, - subgraphDataServiceLegacyAddress, - true - ); - ServiceProviderInternal memory afterServiceProvider = _getStorageServiceProviderInternal( - beforeValues.allocation.indexer - ); - uint256 afterSubgraphAllocations = _getStorageSubgraphAllocations(beforeValues.allocation.subgraphDeploymentID); - uint256 afterStakingBalance = token.balanceOf(address(staking)); - uint256 afterIndexerBalance = token.balanceOf(beforeValues.allocation.indexer); - uint256 afterBeneficiaryBalance = token.balanceOf(rewardsDestination); - - if (beforeValues.allocation.tokens > 0) { - if (isAuth && poi != 0) { - if (rewardsDestination != address(0)) { - assertEq( - beforeValues.stakingBalance + calcValues.rewards - calcValues.indexerRewards, - afterStakingBalance - ); - assertEq(beforeValues.indexerBalance, afterIndexerBalance); - assertEq(beforeValues.beneficiaryBalance + calcValues.indexerRewards, afterBeneficiaryBalance); - } else { - assertEq(beforeValues.stakingBalance + calcValues.rewards, afterStakingBalance); - assertEq(beforeValues.indexerBalance, afterIndexerBalance); - assertEq(beforeValues.beneficiaryBalance, afterBeneficiaryBalance); - } - } else { - assertEq(beforeValues.stakingBalance, afterStakingBalance); - assertEq(beforeValues.indexerBalance, afterIndexerBalance); - assertEq(beforeValues.beneficiaryBalance, afterBeneficiaryBalance); - } - } else { - assertEq(beforeValues.stakingBalance, afterStakingBalance); - assertEq(beforeValues.indexerBalance, afterIndexerBalance); - assertEq(beforeValues.beneficiaryBalance, afterBeneficiaryBalance); - } - - assertEq(afterAllocation.indexer, beforeValues.allocation.indexer); - assertEq(afterAllocation.subgraphDeploymentID, beforeValues.allocation.subgraphDeploymentID); - assertEq(afterAllocation.tokens, beforeValues.allocation.tokens); - assertEq(afterAllocation.createdAtEpoch, beforeValues.allocation.createdAtEpoch); - assertEq(afterAllocation.closedAtEpoch, epochManager.currentEpoch()); - assertEq(afterAllocation.collectedFees, beforeValues.allocation.collectedFees); - assertEq( - afterAllocation.__DEPRECATED_effectiveAllocation, - beforeValues.allocation.__DEPRECATED_effectiveAllocation - ); - assertEq(afterAllocation.accRewardsPerAllocatedToken, beforeValues.allocation.accRewardsPerAllocatedToken); - assertEq(afterAllocation.distributedRebates, beforeValues.allocation.distributedRebates); - - if (beforeValues.allocation.tokens > 0 && isAuth && poi != 0 && rewardsDestination == address(0)) { - assertEq( - afterServiceProvider.tokensStaked, - beforeValues.serviceProvider.tokensStaked + calcValues.indexerRewards - ); - } else { - assertEq(afterServiceProvider.tokensStaked, beforeValues.serviceProvider.tokensStaked); - } - assertEq(afterServiceProvider.tokensProvisioned, beforeValues.serviceProvider.tokensProvisioned); - assertEq( - afterServiceProvider.__DEPRECATED_tokensAllocated + beforeValues.allocation.tokens, - beforeValues.serviceProvider.__DEPRECATED_tokensAllocated - ); - assertEq( - afterServiceProvider.__DEPRECATED_tokensLocked, - beforeValues.serviceProvider.__DEPRECATED_tokensLocked - ); - assertEq( - afterServiceProvider.__DEPRECATED_tokensLockedUntil, - beforeValues.serviceProvider.__DEPRECATED_tokensLockedUntil - ); - - assertEq(afterSubgraphAllocations + beforeValues.allocation.tokens, beforeValues.subgraphAllocations); - - if (beforeValues.allocation.tokens > 0 && isAuth && poi != 0 && beforeValues.pool.tokens > 0) { - assertEq(afterPool.tokens, beforeValues.pool.tokens + calcValues.delegatorRewards); - } else { - assertEq(afterPool.tokens, beforeValues.pool.tokens); - } - } - - // use struct to avoid 'stack too deep' error - struct BeforeValuesCollect { - IHorizonStakingExtension.Allocation allocation; - DelegationPoolInternalTest pool; - ServiceProviderInternal serviceProvider; - uint256 stakingBalance; - uint256 senderBalance; - uint256 curationBalance; - uint256 beneficiaryBalance; - } - struct CalcValuesCollect { - uint256 protocolTaxTokens; - uint256 queryFees; - uint256 curationCutTokens; - uint256 newRebates; - uint256 payment; - uint256 delegationFeeCut; - } - struct AfterValuesCollect { - IHorizonStakingExtension.Allocation allocation; - DelegationPoolInternalTest pool; - ServiceProviderInternal serviceProvider; - uint256 stakingBalance; - uint256 senderBalance; - uint256 curationBalance; - uint256 beneficiaryBalance; - } - - function _collect(uint256 tokens, address allocationId) internal { - (, address msgSender, ) = vm.readCallers(); - - // before - BeforeValuesCollect memory beforeValues; - beforeValues.allocation = staking.getAllocation(allocationId); - beforeValues.pool = _getStorageDelegationPoolInternal( - beforeValues.allocation.indexer, - subgraphDataServiceLegacyAddress, - true - ); - beforeValues.serviceProvider = _getStorageServiceProviderInternal(beforeValues.allocation.indexer); - - (uint32 curationPercentage, uint32 protocolPercentage) = _getStorageProtocolTaxAndCuration(); - address rewardsDestination = _getStorageRewardsDestination(beforeValues.allocation.indexer); - - beforeValues.stakingBalance = token.balanceOf(address(staking)); - beforeValues.senderBalance = token.balanceOf(msgSender); - beforeValues.curationBalance = token.balanceOf(address(curation)); - beforeValues.beneficiaryBalance = token.balanceOf(rewardsDestination); - - // calc some stuff - CalcValuesCollect memory calcValues; - calcValues.protocolTaxTokens = tokens.mulPPMRoundUp(protocolPercentage); - calcValues.queryFees = tokens - calcValues.protocolTaxTokens; - calcValues.curationCutTokens = 0; - if (curation.isCurated(beforeValues.allocation.subgraphDeploymentID)) { - calcValues.curationCutTokens = calcValues.queryFees.mulPPMRoundUp(curationPercentage); - calcValues.queryFees -= calcValues.curationCutTokens; - } - calcValues.newRebates = ExponentialRebates.exponentialRebates( - calcValues.queryFees + beforeValues.allocation.collectedFees, - beforeValues.allocation.tokens, - alphaNumerator, - alphaDenominator, - lambdaNumerator, - lambdaDenominator - ); - calcValues.payment = calcValues.newRebates > calcValues.queryFees - ? calcValues.queryFees - : calcValues.newRebates; - calcValues.delegationFeeCut = 0; - if (beforeValues.pool.tokens > 0) { - calcValues.delegationFeeCut = - calcValues.payment - calcValues.payment.mulPPM(beforeValues.pool.__DEPRECATED_queryFeeCut); - calcValues.payment -= calcValues.delegationFeeCut; - } - - // staking.collect() - if (tokens > 0) { - vm.expectEmit(address(staking)); - emit IHorizonStakingExtension.RebateCollected( - msgSender, - beforeValues.allocation.indexer, - beforeValues.allocation.subgraphDeploymentID, - allocationId, - epochManager.currentEpoch(), - tokens, - calcValues.protocolTaxTokens, - calcValues.curationCutTokens, - calcValues.queryFees, - calcValues.payment, - calcValues.delegationFeeCut - ); - } - staking.collect(tokens, allocationId); - - // after - AfterValuesCollect memory afterValues; - afterValues.allocation = staking.getAllocation(allocationId); - afterValues.pool = _getStorageDelegationPoolInternal( - beforeValues.allocation.indexer, - subgraphDataServiceLegacyAddress, - true - ); - afterValues.serviceProvider = _getStorageServiceProviderInternal(beforeValues.allocation.indexer); - afterValues.stakingBalance = token.balanceOf(address(staking)); - afterValues.senderBalance = token.balanceOf(msgSender); - afterValues.curationBalance = token.balanceOf(address(curation)); - afterValues.beneficiaryBalance = token.balanceOf(rewardsDestination); - - // assert - assertEq(afterValues.senderBalance + tokens, beforeValues.senderBalance); - assertEq(afterValues.curationBalance, beforeValues.curationBalance + calcValues.curationCutTokens); - if (rewardsDestination != address(0)) { - assertEq(afterValues.beneficiaryBalance, beforeValues.beneficiaryBalance + calcValues.payment); - assertEq(afterValues.stakingBalance, beforeValues.stakingBalance + calcValues.delegationFeeCut); - } else { - assertEq(afterValues.beneficiaryBalance, beforeValues.beneficiaryBalance); - assertEq( - afterValues.stakingBalance, - beforeValues.stakingBalance + calcValues.delegationFeeCut + calcValues.payment - ); - } - - assertEq( - afterValues.allocation.collectedFees, - beforeValues.allocation.collectedFees + tokens - calcValues.protocolTaxTokens - calcValues.curationCutTokens - ); - assertEq(afterValues.allocation.indexer, beforeValues.allocation.indexer); - assertEq(afterValues.allocation.subgraphDeploymentID, beforeValues.allocation.subgraphDeploymentID); - assertEq(afterValues.allocation.tokens, beforeValues.allocation.tokens); - assertEq(afterValues.allocation.createdAtEpoch, beforeValues.allocation.createdAtEpoch); - assertEq(afterValues.allocation.closedAtEpoch, beforeValues.allocation.closedAtEpoch); - assertEq( - afterValues.allocation.accRewardsPerAllocatedToken, - beforeValues.allocation.accRewardsPerAllocatedToken - ); - assertEq( - afterValues.allocation.distributedRebates, - beforeValues.allocation.distributedRebates + calcValues.newRebates - ); - - assertEq(afterValues.pool.tokens, beforeValues.pool.tokens + calcValues.delegationFeeCut); - assertEq(afterValues.pool.shares, beforeValues.pool.shares); - assertEq(afterValues.pool.tokensThawing, beforeValues.pool.tokensThawing); - assertEq(afterValues.pool.sharesThawing, beforeValues.pool.sharesThawing); - assertEq(afterValues.pool.thawingNonce, beforeValues.pool.thawingNonce); - - assertEq(afterValues.serviceProvider.tokensProvisioned, beforeValues.serviceProvider.tokensProvisioned); - if (rewardsDestination != address(0)) { - assertEq(afterValues.serviceProvider.tokensStaked, beforeValues.serviceProvider.tokensStaked); - } else { - assertEq( - afterValues.serviceProvider.tokensStaked, - beforeValues.serviceProvider.tokensStaked + calcValues.payment - ); - } - } - /* * STORAGE HELPERS */ @@ -1964,22 +1536,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { return vm.load(address(staking), bytes32(slot)) == bytes32(uint256(1)); } - function _setStorageDeprecatedThawingPeriod(uint32 _thawingPeriod) internal { - uint256 slot = 13; - - // Read the current value of the slot - uint256 currentSlotValue = uint256(vm.load(address(staking), bytes32(slot))); - - // Create a mask to clear the bits for __DEPRECATED_thawingPeriod (bits 0-31) - uint256 mask = ~(uint256(0xFFFFFFFF)); // Mask to clear the first 32 bits - - // Clear the bits for __DEPRECATED_thawingPeriod and set the new value - uint256 newSlotValue = (currentSlotValue & mask) | uint256(_thawingPeriod); - - // Store the updated value back into the slot - vm.store(address(staking), bytes32(slot), bytes32(newSlotValue)); - } - function _setStorageServiceProvider( address _indexer, uint256 _tokensStaked, @@ -2091,62 +1647,9 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { return delegation; } - function _setStorageAllocation( - IHorizonStakingExtension.Allocation memory allocation, - address allocationId, - uint256 tokens - ) internal { - // __DEPRECATED_allocations - uint256 allocationsSlot = 15; - bytes32 allocationBaseSlot = keccak256(abi.encode(allocationId, allocationsSlot)); - vm.store(address(staking), allocationBaseSlot, bytes32(uint256(uint160(allocation.indexer)))); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 1), allocation.subgraphDeploymentID); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 2), bytes32(tokens)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 3), bytes32(allocation.createdAtEpoch)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 4), bytes32(allocation.closedAtEpoch)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 5), bytes32(allocation.collectedFees)); - vm.store( - address(staking), - bytes32(uint256(allocationBaseSlot) + 6), - bytes32(allocation.__DEPRECATED_effectiveAllocation) - ); - vm.store( - address(staking), - bytes32(uint256(allocationBaseSlot) + 7), - bytes32(allocation.accRewardsPerAllocatedToken) - ); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 8), bytes32(allocation.distributedRebates)); - - // _serviceProviders - uint256 serviceProviderSlot = 14; - bytes32 serviceProviderBaseSlot = keccak256(abi.encode(allocation.indexer, serviceProviderSlot)); - uint256 currentTokensStaked = uint256(vm.load(address(staking), serviceProviderBaseSlot)); - uint256 currentTokensProvisioned = uint256( - vm.load(address(staking), bytes32(uint256(serviceProviderBaseSlot) + 1)) - ); - vm.store( - address(staking), - bytes32(uint256(serviceProviderBaseSlot) + 0), - bytes32(currentTokensStaked + tokens) - ); - vm.store( - address(staking), - bytes32(uint256(serviceProviderBaseSlot) + 1), - bytes32(currentTokensProvisioned + tokens) - ); - - // __DEPRECATED_subgraphAllocations - uint256 subgraphsAllocationsSlot = 16; - bytes32 subgraphAllocationsBaseSlot = keccak256( - abi.encode(allocation.subgraphDeploymentID, subgraphsAllocationsSlot) - ); - uint256 currentAllocatedTokens = uint256(vm.load(address(staking), subgraphAllocationsBaseSlot)); - vm.store(address(staking), subgraphAllocationsBaseSlot, bytes32(currentAllocatedTokens + tokens)); - } - - function _getStorageSubgraphAllocations(bytes32 subgraphDeploymentId) internal view returns (uint256) { + function _getStorageSubgraphAllocations(bytes32 subgraphDeploymentID) internal view returns (uint256) { uint256 subgraphsAllocationsSlot = 16; - bytes32 subgraphAllocationsBaseSlot = keccak256(abi.encode(subgraphDeploymentId, subgraphsAllocationsSlot)); + bytes32 subgraphAllocationsBaseSlot = keccak256(abi.encode(subgraphDeploymentID, subgraphsAllocationsSlot)); return uint256(vm.load(address(staking), subgraphAllocationsBaseSlot)); } @@ -2162,40 +1665,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { return address(uint160(uint256(vm.load(address(staking), rewardsDestinationSlotBaseSlot)))); } - function _setStorageMaxAllocationEpochs(uint256 maxAllocationEpochs) internal { - uint256 slot = 13; - - // Read the current value of the storage slot - uint256 currentSlotValue = uint256(vm.load(address(staking), bytes32(slot))); - - // Mask to clear the specific bits for __DEPRECATED_maxAllocationEpochs (bits 128-159) - uint256 mask = ~(uint256(0xFFFFFFFF) << 128); - - // Clear the bits and set the new maxAllocationEpochs value - uint256 newSlotValue = (currentSlotValue & mask) | (uint256(maxAllocationEpochs) << 128); - - // Store the updated value back into the slot - vm.store(address(staking), bytes32(slot), bytes32(newSlotValue)); - - uint256 readMaxAllocationEpochs = _getStorageMaxAllocationEpochs(); - assertEq(readMaxAllocationEpochs, maxAllocationEpochs); - } - - function _getStorageMaxAllocationEpochs() internal view returns (uint256) { - uint256 slot = 13; - - // Read the current value of the storage slot - uint256 currentSlotValue = uint256(vm.load(address(staking), bytes32(slot))); - - // Mask to isolate bits 128-159 - uint256 mask = uint256(0xFFFFFFFF) << 128; - - // Extract the maxAllocationEpochs by masking and shifting - uint256 maxAllocationEpochs = (currentSlotValue & mask) >> 128; - - return maxAllocationEpochs; - } - function _setStorageDelegationPool( address serviceProvider, uint256 tokens, @@ -2211,148 +1680,6 @@ abstract contract HorizonStakingSharedTest is GraphBaseTest { vm.store(address(staking), tokensSlot, bytes32(tokens)); } - function _setStorageRebateParameters( - uint32 alphaNumerator_, - uint32 alphaDenominator_, - uint32 lambdaNumerator_, - uint32 lambdaDenominator_ - ) internal { - // Store alpha numerator and denominator in slot 13 - uint256 alphaSlot = 13; - - uint256 newAlphaSlotValue; - { - uint256 alphaNumeratorOffset = 160; // Offset for __DEPRECATED_alphaNumerator (20th byte) - uint256 alphaDenominatorOffset = 192; // Offset for __DEPRECATED_alphaDenominator (24th byte) - - // Read current value of the slot - uint256 currentAlphaSlotValue = uint256(vm.load(address(staking), bytes32(alphaSlot))); - - // Create a mask to clear the bits for alphaNumerator and alphaDenominator - uint256 alphaMask = ~(uint256(0xFFFFFFFF) << alphaNumeratorOffset) & - ~(uint256(0xFFFFFFFF) << alphaDenominatorOffset); - - // Clear and set new values - newAlphaSlotValue = - (currentAlphaSlotValue & alphaMask) | - (uint256(alphaNumerator_) << alphaNumeratorOffset) | - (uint256(alphaDenominator_) << alphaDenominatorOffset); - } - - // Store the updated value back into the slot - vm.store(address(staking), bytes32(alphaSlot), bytes32(newAlphaSlotValue)); - - // Store lambda numerator and denominator in slot 25 - uint256 lambdaSlot = 25; - - uint256 newLambdaSlotValue; - { - uint256 lambdaNumeratorOffset = 160; // Offset for lambdaNumerator (20th byte) - uint256 lambdaDenominatorOffset = 192; // Offset for lambdaDenominator (24th byte) - - // Read current value of the slot - uint256 currentLambdaSlotValue = uint256(vm.load(address(staking), bytes32(lambdaSlot))); - - // Create a mask to clear the bits for lambdaNumerator and lambdaDenominator - uint256 lambdaMask = ~(uint256(0xFFFFFFFF) << lambdaNumeratorOffset) & - ~(uint256(0xFFFFFFFF) << lambdaDenominatorOffset); - - // Clear and set new values - newLambdaSlotValue = - (currentLambdaSlotValue & lambdaMask) | - (uint256(lambdaNumerator_) << lambdaNumeratorOffset) | - (uint256(lambdaDenominator_) << lambdaDenominatorOffset); - } - - // Store the updated value back into the slot - vm.store(address(staking), bytes32(lambdaSlot), bytes32(newLambdaSlotValue)); - - // Verify the storage - ( - uint32 readAlphaNumerator, - uint32 readAlphaDenominator, - uint32 readLambdaNumerator, - uint32 readLambdaDenominator - ) = _getStorageRebateParameters(); - assertEq(readAlphaNumerator, alphaNumerator_); - assertEq(readAlphaDenominator, alphaDenominator_); - assertEq(readLambdaNumerator, lambdaNumerator_); - assertEq(readLambdaDenominator, lambdaDenominator_); - } - - function _getStorageRebateParameters() internal view returns (uint32, uint32, uint32, uint32) { - // Read alpha numerator and denominator - uint256 alphaSlot = 13; - uint256 alphaValues = uint256(vm.load(address(staking), bytes32(alphaSlot))); - // forge-lint: disable-next-line(unsafe-typecast) - uint32 alphaNumerator_ = uint32(alphaValues >> 160); - // forge-lint: disable-next-line(unsafe-typecast) - uint32 alphaDenominator_ = uint32(alphaValues >> 192); - - // Read lambda numerator and denominator - uint256 lambdaSlot = 25; - uint256 lambdaValues = uint256(vm.load(address(staking), bytes32(lambdaSlot))); - // forge-lint: disable-next-line(unsafe-typecast) - uint32 lambdaNumerator_ = uint32(lambdaValues >> 160); - // forge-lint: disable-next-line(unsafe-typecast) - uint32 lambdaDenominator_ = uint32(lambdaValues >> 192); - - return (alphaNumerator_, alphaDenominator_, lambdaNumerator_, lambdaDenominator_); - } - - // function _setStorageProtocolTaxAndCuration(uint32 curationPercentage, uint32 taxPercentage) private { - // bytes32 slot = bytes32(uint256(13)); - // uint256 curationOffset = 4; - // uint256 protocolTaxOffset = 8; - // bytes32 originalValue = vm.load(address(staking), slot); - - // bytes32 newProtocolTaxValue = bytes32( - // ((uint256(originalValue) & - // ~((0xFFFFFFFF << (8 * curationOffset)) | (0xFFFFFFFF << (8 * protocolTaxOffset)))) | - // (uint256(curationPercentage) << (8 * curationOffset))) | - // (uint256(taxPercentage) << (8 * protocolTaxOffset)) - // ); - // vm.store(address(staking), slot, newProtocolTaxValue); - - // (uint32 readCurationPercentage, uint32 readTaxPercentage) = _getStorageProtocolTaxAndCuration(); - // assertEq(readCurationPercentage, curationPercentage); - // } - - function _setStorageProtocolTaxAndCuration(uint32 curationPercentage, uint32 taxPercentage) internal { - bytes32 slot = bytes32(uint256(13)); - - // Offsets for the percentages - uint256 curationOffset = 32; // __DEPRECATED_curationPercentage (2nd uint32, bits 32-63) - uint256 protocolTaxOffset = 64; // __DEPRECATED_protocolPercentage (3rd uint32, bits 64-95) - - // Read the current slot value - uint256 originalValue = uint256(vm.load(address(staking), slot)); - - // Create masks to clear the specific bits for the two percentages - uint256 mask = ~(uint256(0xFFFFFFFF) << curationOffset) & ~(uint256(0xFFFFFFFF) << protocolTaxOffset); // Mask for curationPercentage // Mask for protocolTax - - // Clear the existing bits and set the new values - uint256 newSlotValue = (originalValue & mask) | - (uint256(curationPercentage) << curationOffset) | - (uint256(taxPercentage) << protocolTaxOffset); - - // Store the updated slot value - vm.store(address(staking), slot, bytes32(newSlotValue)); - - // Verify the values were set correctly - (uint32 readCurationPercentage, uint32 readTaxPercentage) = _getStorageProtocolTaxAndCuration(); - assertEq(readCurationPercentage, curationPercentage); - assertEq(readTaxPercentage, taxPercentage); - } - - function _getStorageProtocolTaxAndCuration() internal view returns (uint32, uint32) { - bytes32 slot = bytes32(uint256(13)); - bytes32 value = vm.load(address(staking), slot); - uint32 curationPercentage = uint32(uint256(value) >> 32); - uint32 taxPercentage = uint32(uint256(value) >> 64); - return (curationPercentage, taxPercentage); - } - /* * MISC: private functions to help with testing */ diff --git a/packages/horizon/test/unit/shared/payments-escrow/PaymentsEscrowShared.t.sol b/packages/horizon/test/unit/shared/payments-escrow/PaymentsEscrowShared.t.sol index ca62aa02b..8e51aed9f 100644 --- a/packages/horizon/test/unit/shared/payments-escrow/PaymentsEscrowShared.t.sol +++ b/packages/horizon/test/unit/shared/payments-escrow/PaymentsEscrowShared.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { GraphBaseTest } from "../../GraphBase.t.sol"; diff --git a/packages/horizon/test/unit/staking/HorizonStaking.t.sol b/packages/horizon/test/unit/staking/HorizonStaking.t.sol index 8046723f7..256fce859 100644 --- a/packages/horizon/test/unit/staking/HorizonStaking.t.sol +++ b/packages/horizon/test/unit/staking/HorizonStaking.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { stdStorage, StdStorage } from "forge-std/Test.sol"; diff --git a/packages/horizon/test/unit/staking/allocation/allocation.t.sol b/packages/horizon/test/unit/staking/allocation/allocation.t.sol deleted file mode 100644 index 2b7349817..000000000 --- a/packages/horizon/test/unit/staking/allocation/allocation.t.sol +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.27; - -import { HorizonStakingTest } from "../HorizonStaking.t.sol"; -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; - -contract HorizonStakingAllocationTest is HorizonStakingTest { - /* - * TESTS - */ - - function testAllocation_GetAllocationState_Active(uint256 tokens) public useIndexer useAllocation(tokens) { - IHorizonStakingExtension.AllocationState state = staking.getAllocationState(_allocationId); - assertEq(uint16(state), uint16(IHorizonStakingExtension.AllocationState.Active)); - } - - function testAllocation_GetAllocationState_Null() public view { - IHorizonStakingExtension.AllocationState state = staking.getAllocationState(_allocationId); - assertEq(uint16(state), uint16(IHorizonStakingExtension.AllocationState.Null)); - } - - function testAllocation_IsAllocation(uint256 tokens) public useIndexer useAllocation(tokens) { - bool isAllocation = staking.isAllocation(_allocationId); - assertTrue(isAllocation); - } - - function testAllocation_IsNotAllocation() public view { - bool isAllocation = staking.isAllocation(_allocationId); - assertFalse(isAllocation); - } -} diff --git a/packages/horizon/test/unit/staking/allocation/close.t.sol b/packages/horizon/test/unit/staking/allocation/close.t.sol deleted file mode 100644 index 41eddfe0f..000000000 --- a/packages/horizon/test/unit/staking/allocation/close.t.sol +++ /dev/null @@ -1,113 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.27; - -import { HorizonStakingTest } from "../HorizonStaking.t.sol"; -import { PPMMath } from "../../../../contracts/libraries/PPMMath.sol"; - -contract HorizonStakingCloseAllocationTest is HorizonStakingTest { - using PPMMath for uint256; - - bytes32 internal constant _POI = keccak256("poi"); - - /* - * MODIFIERS - */ - - modifier useLegacyOperator() { - resetPrank(users.indexer); - _setOperator(subgraphDataServiceLegacyAddress, users.operator, true); - vm.startPrank(users.operator); - _; - vm.stopPrank(); - } - - /* - * TESTS - */ - - function testCloseAllocation(uint256 tokens) public useIndexer useAllocation(1 ether) { - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - // Skip 15 epochs - vm.roll(15); - - _closeAllocation(_allocationId, _POI); - } - - function testCloseAllocation_Operator(uint256 tokens) public useLegacyOperator useAllocation(1 ether) { - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - // Skip 15 epochs - vm.roll(15); - - _closeAllocation(_allocationId, _POI); - } - - function testCloseAllocation_WithBeneficiaryAddress(uint256 tokens) public useIndexer useAllocation(1 ether) { - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - address beneficiary = makeAddr("beneficiary"); - _setStorageRewardsDestination(users.indexer, beneficiary); - - // Skip 15 epochs - vm.roll(15); - - _closeAllocation(_allocationId, _POI); - } - - function testCloseAllocation_RevertWhen_NotActive() public { - vm.expectRevert("!active"); - staking.closeAllocation(_allocationId, _POI); - } - - function testCloseAllocation_RevertWhen_NotIndexer() public useIndexer useAllocation(1 ether) { - resetPrank(users.delegator); - vm.expectRevert("!auth"); - staking.closeAllocation(_allocationId, _POI); - } - - function testCloseAllocation_AfterMaxEpochs_AnyoneCanClose( - uint256 tokens - ) public useIndexer useAllocation(1 ether) { - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - // Skip to over the max allocation epochs - vm.roll((MAX_ALLOCATION_EPOCHS + 1) * EPOCH_LENGTH + 1); - - resetPrank(users.delegator); - _closeAllocation(_allocationId, 0x0); - } - - function testCloseAllocation_RevertWhen_ZeroTokensNotAuthorized() public useIndexer useAllocation(1 ether) { - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, 100 ether, 0, 0); - - resetPrank(users.delegator); - vm.expectRevert("!auth"); - staking.closeAllocation(_allocationId, 0x0); - } - - function testCloseAllocation_WithDelegation( - uint256 tokens, - uint256 delegationTokens, - uint32 indexingRewardCut - ) public useIndexer useAllocation(1 ether) { - tokens = bound(tokens, 2, MAX_STAKING_TOKENS); - delegationTokens = bound(delegationTokens, 0, MAX_STAKING_TOKENS); - vm.assume(indexingRewardCut <= MAX_PPM); - - uint256 legacyAllocationTokens = tokens / 2; - uint256 provisionTokens = tokens - legacyAllocationTokens; - - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, provisionTokens, 0, 0); - _setStorageDelegationPool(users.indexer, delegationTokens, indexingRewardCut, 0); - - // Skip 15 epochs - vm.roll(15); - - _closeAllocation(_allocationId, _POI); - } -} diff --git a/packages/horizon/test/unit/staking/allocation/collect.t.sol b/packages/horizon/test/unit/staking/allocation/collect.t.sol deleted file mode 100644 index a05c55220..000000000 --- a/packages/horizon/test/unit/staking/allocation/collect.t.sol +++ /dev/null @@ -1,81 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.27; - -import { console } from "forge-std/console.sol"; - -import { HorizonStakingTest } from "../HorizonStaking.t.sol"; -import { ExponentialRebates } from "../../../../contracts/staking/libraries/ExponentialRebates.sol"; -import { PPMMath } from "../../../../contracts/libraries/PPMMath.sol"; - -contract HorizonStakingCollectAllocationTest is HorizonStakingTest { - using PPMMath for uint256; - - /* - * TESTS - */ - - function testCollectAllocation_RevertWhen_InvalidAllocationId( - uint256 tokens - ) public useIndexer useAllocation(1 ether) { - vm.expectRevert("!alloc"); - staking.collect(tokens, address(0)); - } - - function testCollectAllocation_RevertWhen_Null(uint256 tokens) public { - vm.expectRevert("!collect"); - staking.collect(tokens, _allocationId); - } - - function testCollect_Tokens( - uint256 allocationTokens, - uint256 collectTokens, - uint256 curationTokens, - uint32 curationPercentage, - uint32 protocolTaxPercentage, - uint256 delegationTokens, - uint32 queryFeeCut - ) public useIndexer useRebateParameters useAllocation(allocationTokens) { - collectTokens = bound(collectTokens, 0, MAX_STAKING_TOKENS); - curationTokens = bound(curationTokens, 0, MAX_STAKING_TOKENS); - delegationTokens = bound(delegationTokens, 0, MAX_STAKING_TOKENS); - vm.assume(curationPercentage <= MAX_PPM); - vm.assume(protocolTaxPercentage <= MAX_PPM); - vm.assume(queryFeeCut <= MAX_PPM); - - resetPrank(users.indexer); - _setStorageProtocolTaxAndCuration(curationPercentage, protocolTaxPercentage); - console.log("queryFeeCut", queryFeeCut); - _setStorageDelegationPool(users.indexer, delegationTokens, 0, queryFeeCut); - curation.signal(_SUBGRAPH_DEPLOYMENT_ID, curationTokens); - - resetPrank(users.gateway); - approve(address(staking), collectTokens); - _collect(collectTokens, _allocationId); - } - - function testCollect_WithBeneficiaryAddress( - uint256 allocationTokens, - uint256 collectTokens - ) public useIndexer useRebateParameters useAllocation(allocationTokens) { - collectTokens = bound(collectTokens, 0, MAX_STAKING_TOKENS); - - address beneficiary = makeAddr("beneficiary"); - _setStorageRewardsDestination(users.indexer, beneficiary); - - resetPrank(users.gateway); - approve(address(staking), collectTokens); - _collect(collectTokens, _allocationId); - - uint256 newRebates = ExponentialRebates.exponentialRebates( - collectTokens, - allocationTokens, - alphaNumerator, - alphaDenominator, - lambdaNumerator, - lambdaDenominator - ); - uint256 payment = newRebates > collectTokens ? collectTokens : newRebates; - - assertEq(token.balanceOf(beneficiary), payment); - } -} diff --git a/packages/horizon/test/unit/staking/delegation/addToPool.t.sol b/packages/horizon/test/unit/staking/delegation/addToPool.t.sol index 5c61b1ffc..46a86b096 100644 --- a/packages/horizon/test/unit/staking/delegation/addToPool.t.sol +++ b/packages/horizon/test/unit/staking/delegation/addToPool.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/delegation/delegate.t.sol b/packages/horizon/test/unit/staking/delegation/delegate.t.sol index 5395a8464..2209b2dff 100644 --- a/packages/horizon/test/unit/staking/delegation/delegate.t.sol +++ b/packages/horizon/test/unit/staking/delegation/delegate.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; diff --git a/packages/horizon/test/unit/staking/delegation/forceWithdrawDelegated.t.sol b/packages/horizon/test/unit/staking/delegation/forceWithdrawDelegated.t.sol new file mode 100644 index 000000000..5331fd9ea --- /dev/null +++ b/packages/horizon/test/unit/staking/delegation/forceWithdrawDelegated.t.sol @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; + +import { HorizonStakingTest } from "../HorizonStaking.t.sol"; + +contract HorizonStakingForceWithdrawDelegatedTest is HorizonStakingTest { + /* + * MODIFIERS + */ + + modifier useDelegator() { + resetPrank(users.delegator); + _; + } + + /* + * HELPERS + */ + + function _setLegacyDelegation( + address _indexer, + address _delegator, + uint256 _shares, + uint256 __DEPRECATED_tokensLocked, + uint256 __DEPRECATED_tokensLockedUntil + ) public { + // Calculate the base storage slot for the serviceProvider in the mapping + bytes32 baseSlot = keccak256(abi.encode(_indexer, uint256(20))); + + // Calculate the slot for the delegator's DelegationInternal struct + bytes32 delegatorSlot = keccak256(abi.encode(_delegator, bytes32(uint256(baseSlot) + 4))); + + // Use vm.store to set each field of the struct + vm.store(address(staking), bytes32(uint256(delegatorSlot)), bytes32(_shares)); + vm.store(address(staking), bytes32(uint256(delegatorSlot) + 1), bytes32(__DEPRECATED_tokensLocked)); + vm.store(address(staking), bytes32(uint256(delegatorSlot) + 2), bytes32(__DEPRECATED_tokensLockedUntil)); + } + + /* + * ACTIONS + */ + + function _forceWithdrawDelegated(address _indexer, address _delegator) internal { + IHorizonStakingTypes.DelegationPool memory pool = staking.getDelegationPool( + _indexer, + subgraphDataServiceLegacyAddress + ); + uint256 beforeStakingBalance = token.balanceOf(address(staking)); + uint256 beforeDelegatorBalance = token.balanceOf(_delegator); + + vm.expectEmit(address(staking)); + emit IHorizonStakingMain.StakeDelegatedWithdrawn(_indexer, _delegator, pool.tokens); + staking.forceWithdrawDelegated(_indexer, _delegator); + + uint256 afterStakingBalance = token.balanceOf(address(staking)); + uint256 afterDelegatorBalance = token.balanceOf(_delegator); + + assertEq(afterStakingBalance, beforeStakingBalance - pool.tokens); + assertEq(afterDelegatorBalance - pool.tokens, beforeDelegatorBalance); + + DelegationInternal memory delegation = _getStorageDelegation( + _indexer, + subgraphDataServiceLegacyAddress, + _delegator, + true + ); + assertEq(delegation.shares, 0); + assertEq(delegation.__DEPRECATED_tokensLocked, 0); + assertEq(delegation.__DEPRECATED_tokensLockedUntil, 0); + } + + /* + * TESTS + */ + + function testForceWithdrawDelegated_Tokens(uint256 tokensLocked) public useDelegator { + vm.assume(tokensLocked > 0); + + _setStorageDelegationPool(users.indexer, tokensLocked, 0, 0); + _setLegacyDelegation(users.indexer, users.delegator, 0, tokensLocked, 1); + require(token.transfer(address(staking), tokensLocked), "transfer failed"); + + // switch to a third party (not the delegator) + resetPrank(users.operator); + + _forceWithdrawDelegated(users.indexer, users.delegator); + } + + function testForceWithdrawDelegated_CalledByDelegator(uint256 tokensLocked) public useDelegator { + vm.assume(tokensLocked > 0); + + _setStorageDelegationPool(users.indexer, tokensLocked, 0, 0); + _setLegacyDelegation(users.indexer, users.delegator, 0, tokensLocked, 1); + require(token.transfer(address(staking), tokensLocked), "transfer failed"); + + // delegator can also call forceWithdrawDelegated on themselves + _forceWithdrawDelegated(users.indexer, users.delegator); + } + + function testForceWithdrawDelegated_RevertWhen_NoTokens() public useDelegator { + _setStorageDelegationPool(users.indexer, 0, 0, 0); + _setLegacyDelegation(users.indexer, users.delegator, 0, 0, 0); + + // switch to a third party + resetPrank(users.operator); + + bytes memory expectedError = abi.encodeWithSignature("HorizonStakingNothingToWithdraw()"); + vm.expectRevert(expectedError); + staking.forceWithdrawDelegated(users.indexer, users.delegator); + } +} diff --git a/packages/horizon/test/unit/staking/delegation/legacyWithdraw.t.sol b/packages/horizon/test/unit/staking/delegation/legacyWithdraw.t.sol index 59acde904..0c5db17f5 100644 --- a/packages/horizon/test/unit/staking/delegation/legacyWithdraw.t.sol +++ b/packages/horizon/test/unit/staking/delegation/legacyWithdraw.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; diff --git a/packages/horizon/test/unit/staking/delegation/redelegate.t.sol b/packages/horizon/test/unit/staking/delegation/redelegate.t.sol index 710586785..a8cd04a59 100644 --- a/packages/horizon/test/unit/staking/delegation/redelegate.t.sol +++ b/packages/horizon/test/unit/staking/delegation/redelegate.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; diff --git a/packages/horizon/test/unit/staking/delegation/undelegate.t.sol b/packages/horizon/test/unit/staking/delegation/undelegate.t.sol index 15fa5c4c1..faa8d4f30 100644 --- a/packages/horizon/test/unit/staking/delegation/undelegate.t.sol +++ b/packages/horizon/test/unit/staking/delegation/undelegate.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; diff --git a/packages/horizon/test/unit/staking/delegation/withdraw.t.sol b/packages/horizon/test/unit/staking/delegation/withdraw.t.sol index 31155cec2..bdc811c56 100644 --- a/packages/horizon/test/unit/staking/delegation/withdraw.t.sol +++ b/packages/horizon/test/unit/staking/delegation/withdraw.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; @@ -160,4 +160,56 @@ contract HorizonStakingWithdrawDelegationTest is HorizonStakingTest { resetPrank(users.delegator); _withdrawDelegated(users.indexer, subgraphDataServiceAddress, 0); } + + function testWithdrawDelegation_GetThawedTokens( + uint256 delegationAmount, + uint256 withdrawShares + ) + public + useIndexer + useProvision(10_000_000 ether, 0, MAX_THAWING_PERIOD) + useDelegation(delegationAmount) + useUndelegate(withdrawShares) + { + ILinkedList.List memory thawingRequests = staking.getThawRequestList( + IHorizonStakingTypes.ThawRequestType.Delegation, + users.indexer, + subgraphDataServiceAddress, + users.delegator + ); + ThawRequest memory thawRequest = staking.getThawRequest( + IHorizonStakingTypes.ThawRequestType.Delegation, + thawingRequests.tail + ); + + // Before thawing period passes, thawed tokens should be 0 + uint256 thawedTokensBefore = staking.getThawedTokens( + IHorizonStakingTypes.ThawRequestType.Delegation, + users.indexer, + subgraphDataServiceAddress, + users.delegator + ); + assertEq(thawedTokensBefore, 0); + + // Skip past thawing period + skip(thawRequest.thawingUntil + 1); + + // After thawing period, thawed tokens should match expected amount + uint256 thawedTokensAfter = staking.getThawedTokens( + IHorizonStakingTypes.ThawRequestType.Delegation, + users.indexer, + subgraphDataServiceAddress, + users.delegator + ); + + // Thawed tokens should be greater than 0 and should match what we can withdraw + assertGt(thawedTokensAfter, 0); + + // Withdraw and verify the amount matches + uint256 balanceBefore = token.balanceOf(users.delegator); + _withdrawDelegated(users.indexer, subgraphDataServiceAddress, 0); + uint256 balanceAfter = token.balanceOf(users.delegator); + + assertEq(balanceAfter - balanceBefore, thawedTokensAfter); + } } diff --git a/packages/horizon/test/unit/staking/governance/governance.t.sol b/packages/horizon/test/unit/staking/governance/governance.t.sol index cc2a54465..7d6c90461 100644 --- a/packages/horizon/test/unit/staking/governance/governance.t.sol +++ b/packages/horizon/test/unit/staking/governance/governance.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; @@ -37,19 +37,6 @@ contract HorizonStakingGovernanceTest is HorizonStakingTest { staking.setDelegationSlashingEnabled(); } - function testGovernance_ClearThawingPeriod(uint32 thawingPeriod) public useGovernor { - // simulate previous thawing period - _setStorageDeprecatedThawingPeriod(thawingPeriod); - - _clearThawingPeriod(); - } - - function testGovernance_ClearThawingPeriod_NotGovernor() public useIndexer { - bytes memory expectedError = abi.encodeWithSignature("ManagedOnlyGovernor()"); - vm.expectRevert(expectedError); - staking.clearThawingPeriod(); - } - function testGovernance__SetMaxThawingPeriod(uint64 maxThawingPeriod) public useGovernor { _setMaxThawingPeriod(maxThawingPeriod); } diff --git a/packages/horizon/test/unit/staking/legacy/isAllocation.t.sol b/packages/horizon/test/unit/staking/legacy/isAllocation.t.sol new file mode 100644 index 000000000..4e74e29c9 --- /dev/null +++ b/packages/horizon/test/unit/staking/legacy/isAllocation.t.sol @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; +import { HorizonStakingSharedTest } from "../../shared/horizon-staking/HorizonStakingShared.t.sol"; + +contract HorizonStakingIsAllocationTest is HorizonStakingSharedTest { + /* + * TESTS + */ + + function test_IsAllocation_ReturnsFalse_WhenAllocationDoesNotExist() public { + address nonExistentAllocationId = makeAddr("nonExistentAllocation"); + assertFalse(staking.isAllocation(nonExistentAllocationId)); + } + + function test_IsAllocation_ReturnsTrue_WhenActiveAllocationExists() public { + address allocationId = makeAddr("activeAllocation"); + + // Set up an active legacy allocation in storage + _setLegacyAllocationInStaking( + allocationId, + users.indexer, + bytes32("subgraphDeploymentId"), + 1000 ether, // tokens + 1, // createdAtEpoch + 0 // closedAtEpoch (0 = still active) + ); + + assertTrue(staking.isAllocation(allocationId)); + } + + function test_IsAllocation_ReturnsTrue_WhenClosedAllocationExists() public { + address allocationId = makeAddr("closedAllocation"); + + // Set up a closed legacy allocation in storage + _setLegacyAllocationInStaking( + allocationId, + users.indexer, + bytes32("subgraphDeploymentId"), + 1000 ether, // tokens + 1, // createdAtEpoch + 10 // closedAtEpoch (non-zero = closed) + ); + + assertTrue(staking.isAllocation(allocationId)); + } + + function test_IsAllocation_ReturnsFalse_WhenIndexerIsZeroAddress() public { + address allocationId = makeAddr("zeroIndexerAllocation"); + + // Set up an allocation with zero indexer (should be considered Null) + _setLegacyAllocationInStaking( + allocationId, + address(0), // indexer is zero + bytes32("subgraphDeploymentId"), + 1000 ether, + 1, + 0 + ); + + assertFalse(staking.isAllocation(allocationId)); + } + + /* + * HELPERS + */ + + /** + * @notice Sets a legacy allocation directly in HorizonStaking storage + * @dev The __DEPRECATED_allocations mapping is at storage slot 10 in HorizonStakingStorage + * The LegacyAllocation struct has the following layout: + * - slot 0: indexer (address) + * - slot 1: subgraphDeploymentID (bytes32) + * - slot 2: tokens (uint256) + * - slot 3: createdAtEpoch (uint256) + * - slot 4: closedAtEpoch (uint256) + * - slot 5: collectedFees (uint256) + * - slot 6: __DEPRECATED_effectiveAllocation (uint256) + * - slot 7: accRewardsPerAllocatedToken (uint256) + * - slot 8: distributedRebates (uint256) + */ + function _setLegacyAllocationInStaking( + address _allocationId, + address _indexer, + bytes32 _subgraphDeploymentId, + uint256 _tokens, + uint256 _createdAtEpoch, + uint256 _closedAtEpoch + ) internal { + // Storage slot for __DEPRECATED_allocations mapping in HorizonStaking + // Use `forge inspect HorizonStaking storage-layout` to verify + uint256 allocationsSlot = 15; + bytes32 allocationBaseSlot = keccak256(abi.encode(_allocationId, allocationsSlot)); + + // Set indexer (slot 0) + vm.store(address(staking), allocationBaseSlot, bytes32(uint256(uint160(_indexer)))); + // Set subgraphDeploymentID (slot 1) + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 1), _subgraphDeploymentId); + // Set tokens (slot 2) + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 2), bytes32(_tokens)); + // Set createdAtEpoch (slot 3) + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 3), bytes32(_createdAtEpoch)); + // Set closedAtEpoch (slot 4) + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 4), bytes32(_closedAtEpoch)); + } +} diff --git a/packages/horizon/test/unit/staking/operator/locked.t.sol b/packages/horizon/test/unit/staking/operator/locked.t.sol index 474407692..83f753348 100644 --- a/packages/horizon/test/unit/staking/operator/locked.t.sol +++ b/packages/horizon/test/unit/staking/operator/locked.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/operator/operator.t.sol b/packages/horizon/test/unit/staking/operator/operator.t.sol index 672269aab..b52b9c6a3 100644 --- a/packages/horizon/test/unit/staking/operator/operator.t.sol +++ b/packages/horizon/test/unit/staking/operator/operator.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/provision/deprovision.t.sol b/packages/horizon/test/unit/staking/provision/deprovision.t.sol index 51725b111..c37410b8c 100644 --- a/packages/horizon/test/unit/staking/provision/deprovision.t.sol +++ b/packages/horizon/test/unit/staking/provision/deprovision.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/provision/locked.t.sol b/packages/horizon/test/unit/staking/provision/locked.t.sol index f7f95c6ac..f48ca384d 100644 --- a/packages/horizon/test/unit/staking/provision/locked.t.sol +++ b/packages/horizon/test/unit/staking/provision/locked.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/provision/parameters.t.sol b/packages/horizon/test/unit/staking/provision/parameters.t.sol index 3c3c745de..0b3ed7203 100644 --- a/packages/horizon/test/unit/staking/provision/parameters.t.sol +++ b/packages/horizon/test/unit/staking/provision/parameters.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; @@ -175,4 +175,36 @@ contract HorizonStakingProvisionParametersTest is HorizonStakingTest { ); staking.acceptProvisionParameters(users.indexer); } + + function test_ProvisionParametersAccept_RevertWhen_MaxThawingPeriodReduced( + uint256 amount, + uint32 maxVerifierCut, + uint64 thawingPeriod + ) public useIndexer useValidParameters(maxVerifierCut, thawingPeriod) { + vm.assume(amount > 0); + vm.assume(amount <= MAX_STAKING_TOKENS); + vm.assume(thawingPeriod > 0); + + // Create provision with initial parameters (thawingPeriod = 0) + _createProvision(users.indexer, subgraphDataServiceAddress, amount, 0, 0); + + // Stage new parameters with valid thawing period + _setProvisionParameters(users.indexer, subgraphDataServiceAddress, maxVerifierCut, thawingPeriod); + + // Governor reduces max thawing period to below the staged value + uint64 newMaxThawingPeriod = thawingPeriod - 1; + resetPrank(users.governor); + _setMaxThawingPeriod(newMaxThawingPeriod); + + // Verifier tries to accept the parameters - should revert + resetPrank(subgraphDataServiceAddress); + vm.expectRevert( + abi.encodeWithSelector( + IHorizonStakingMain.HorizonStakingInvalidThawingPeriod.selector, + thawingPeriod, + newMaxThawingPeriod + ) + ); + staking.acceptProvisionParameters(users.indexer); + } } diff --git a/packages/horizon/test/unit/staking/provision/provision.t.sol b/packages/horizon/test/unit/staking/provision/provision.t.sol index 5149e8cf6..53b29a0f2 100644 --- a/packages/horizon/test/unit/staking/provision/provision.t.sol +++ b/packages/horizon/test/unit/staking/provision/provision.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; @@ -94,22 +94,6 @@ contract HorizonStakingProvisionTest is HorizonStakingTest { staking.provision(users.indexer, subgraphDataServiceAddress, amount, maxVerifierCut, thawingPeriod); } - function testProvision_RevertWhen_VerifierIsNotSubgraphDataServiceDuringTransitionPeriod( - uint256 amount - ) public useIndexer useStake(amount) { - // simulate the transition period - _setStorageDeprecatedThawingPeriod(THAWING_PERIOD_IN_BLOCKS); - - // oddly we use subgraphDataServiceLegacyAddress as the subgraph service address - // so subgraphDataServiceAddress is not the subgraph service ¯\_(ツ)_/¯ - bytes memory expectedError = abi.encodeWithSignature( - "HorizonStakingInvalidVerifier(address)", - subgraphDataServiceAddress - ); - vm.expectRevert(expectedError); - staking.provision(users.indexer, subgraphDataServiceAddress, amount, 0, 0); - } - function testProvision_AddTokensToProvision( uint256 amount, uint32 maxVerifierCut, diff --git a/packages/horizon/test/unit/staking/provision/reprovision.t.sol b/packages/horizon/test/unit/staking/provision/reprovision.t.sol index 377dfa35d..f90ae56fa 100644 --- a/packages/horizon/test/unit/staking/provision/reprovision.t.sol +++ b/packages/horizon/test/unit/staking/provision/reprovision.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/provision/thaw.t.sol b/packages/horizon/test/unit/staking/provision/thaw.t.sol index 5669189e9..6703f330c 100644 --- a/packages/horizon/test/unit/staking/provision/thaw.t.sol +++ b/packages/horizon/test/unit/staking/provision/thaw.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/serviceProvider/serviceProvider.t.sol b/packages/horizon/test/unit/staking/serviceProvider/serviceProvider.t.sol index 651fd662f..99ad0f25a 100644 --- a/packages/horizon/test/unit/staking/serviceProvider/serviceProvider.t.sol +++ b/packages/horizon/test/unit/staking/serviceProvider/serviceProvider.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; @@ -99,37 +99,6 @@ contract HorizonStakingServiceProviderTest is HorizonStakingTest { assertEq(providerTokensAvailable, amount); } - function testServiceProvider_HasStake( - uint256 amount - ) public useIndexer useProvision(amount, MAX_PPM, MAX_THAWING_PERIOD) { - assertTrue(staking.hasStake(users.indexer)); - - _thaw(users.indexer, subgraphDataServiceAddress, amount); - skip(MAX_THAWING_PERIOD + 1); - _deprovision(users.indexer, subgraphDataServiceAddress, 0); - staking.unstake(amount); - - assertFalse(staking.hasStake(users.indexer)); - } - - function testServiceProvider_GetIndexerStakedTokens( - uint256 amount - ) public useIndexer useProvision(amount, MAX_PPM, MAX_THAWING_PERIOD) { - assertEq(staking.getIndexerStakedTokens(users.indexer), amount); - - _thaw(users.indexer, subgraphDataServiceAddress, amount); - // Does not discount thawing tokens - assertEq(staking.getIndexerStakedTokens(users.indexer), amount); - - skip(MAX_THAWING_PERIOD + 1); - _deprovision(users.indexer, subgraphDataServiceAddress, 0); - // Does not discount thawing tokens - assertEq(staking.getIndexerStakedTokens(users.indexer), amount); - - staking.unstake(amount); - assertEq(staking.getIndexerStakedTokens(users.indexer), 0); - } - function testServiceProvider_RevertIf_InvalidDelegationFeeCut( uint256 cut, uint8 paymentTypeInput diff --git a/packages/horizon/test/unit/staking/slash/legacySlash.t.sol b/packages/horizon/test/unit/staking/slash/legacySlash.t.sol deleted file mode 100644 index 4e4a9bdd3..000000000 --- a/packages/horizon/test/unit/staking/slash/legacySlash.t.sol +++ /dev/null @@ -1,251 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity 0.8.27; - -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; - -import { HorizonStakingTest } from "../HorizonStaking.t.sol"; - -contract HorizonStakingLegacySlashTest is HorizonStakingTest { - /* - * MODIFIERS - */ - - modifier useLegacySlasher(address slasher) { - bytes32 storageKey = keccak256(abi.encode(slasher, 18)); - vm.store(address(staking), storageKey, bytes32(uint256(1))); - _; - } - - /* - * HELPERS - */ - - function _setIndexer( - address _indexer, - uint256 _tokensStaked, - uint256 _tokensAllocated, - uint256 _tokensLocked, - uint256 _tokensLockedUntil - ) public { - bytes32 baseSlot = keccak256(abi.encode(_indexer, 14)); - - vm.store(address(staking), bytes32(uint256(baseSlot)), bytes32(_tokensStaked)); - vm.store(address(staking), bytes32(uint256(baseSlot) + 1), bytes32(_tokensAllocated)); - vm.store(address(staking), bytes32(uint256(baseSlot) + 2), bytes32(_tokensLocked)); - vm.store(address(staking), bytes32(uint256(baseSlot) + 3), bytes32(_tokensLockedUntil)); - } - - /* - * ACTIONS - */ - - function _legacySlash(address _indexer, uint256 _tokens, uint256 _rewards, address _beneficiary) internal { - // before - uint256 beforeStakingBalance = token.balanceOf(address(staking)); - uint256 beforeRewardsDestinationBalance = token.balanceOf(_beneficiary); - ServiceProviderInternal memory beforeIndexer = _getStorageServiceProviderInternal(_indexer); - - // calculate slashable stake - uint256 slashableStake = beforeIndexer.tokensStaked - beforeIndexer.tokensProvisioned; - uint256 actualTokens = _tokens; - uint256 actualRewards = _rewards; - if (slashableStake == 0) { - actualTokens = 0; - actualRewards = 0; - } else if (_tokens > slashableStake) { - actualRewards = (_rewards * slashableStake) / _tokens; - actualTokens = slashableStake; - } - - // slash - vm.expectEmit(address(staking)); - emit IHorizonStakingExtension.StakeSlashed(_indexer, actualTokens, actualRewards, _beneficiary); - staking.slash(_indexer, _tokens, _rewards, _beneficiary); - - // after - uint256 afterStakingBalance = token.balanceOf(address(staking)); - uint256 afterRewardsDestinationBalance = token.balanceOf(_beneficiary); - ServiceProviderInternal memory afterIndexer = _getStorageServiceProviderInternal(_indexer); - - assertEq(beforeStakingBalance - actualTokens, afterStakingBalance); - assertEq(beforeRewardsDestinationBalance, afterRewardsDestinationBalance - actualRewards); - assertEq(afterIndexer.tokensStaked, beforeIndexer.tokensStaked - actualTokens); - } - - /* - * TESTS - */ - function testSlash_Legacy( - uint256 tokensStaked, - uint256 tokensProvisioned, - uint256 slashTokens, - uint256 reward - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokensStaked > 0); - vm.assume(tokensStaked <= MAX_STAKING_TOKENS); - vm.assume(tokensProvisioned > 0); - vm.assume(tokensProvisioned <= tokensStaked); - slashTokens = bound(slashTokens, 1, tokensStaked); - reward = bound(reward, 0, slashTokens); - - _stake(tokensStaked); - _provision(users.indexer, subgraphDataServiceLegacyAddress, tokensProvisioned, 0, 0); - - resetPrank(users.legacySlasher); - _legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_UsingLockedTokens( - uint256 tokens, - uint256 slashTokens, - uint256 reward - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 1); - slashTokens = bound(slashTokens, 1, tokens); - reward = bound(reward, 0, slashTokens); - - _setIndexer(users.indexer, tokens, 0, tokens, block.timestamp + 1); - // Send tokens manually to staking - require(token.transfer(address(staking), tokens), "Transfer failed"); - - resetPrank(users.legacySlasher); - _legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_UsingAllocatedTokens( - uint256 tokens, - uint256 slashTokens, - uint256 reward - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 1); - slashTokens = bound(slashTokens, 1, tokens); - reward = bound(reward, 0, slashTokens); - - _setIndexer(users.indexer, tokens, 0, tokens, 0); - // Send tokens manually to staking - require(token.transfer(address(staking), tokens), "Transfer failed"); - - resetPrank(users.legacySlasher); - staking.legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_RevertWhen_CallerNotSlasher( - uint256 tokens, - uint256 slashTokens, - uint256 reward - ) public useIndexer { - vm.assume(tokens > 0); - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - vm.expectRevert("!slasher"); - staking.legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_RevertWhen_RewardsOverSlashTokens( - uint256 tokens, - uint256 slashTokens, - uint256 reward - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 0); - vm.assume(slashTokens > 0); - vm.assume(reward > slashTokens); - - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - resetPrank(users.legacySlasher); - vm.expectRevert("rewards>slash"); - staking.legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_RevertWhen_NoStake( - uint256 slashTokens, - uint256 reward - ) public useLegacySlasher(users.legacySlasher) { - vm.assume(slashTokens > 0); - reward = bound(reward, 0, slashTokens); - - resetPrank(users.legacySlasher); - vm.expectRevert("!stake"); - staking.legacySlash(users.indexer, slashTokens, reward, makeAddr("fisherman")); - } - - function testSlash_Legacy_RevertWhen_ZeroTokens( - uint256 tokens - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 0); - - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - resetPrank(users.legacySlasher); - vm.expectRevert("!tokens"); - staking.legacySlash(users.indexer, 0, 0, makeAddr("fisherman")); - } - - function testSlash_Legacy_RevertWhen_NoBeneficiary( - uint256 tokens, - uint256 slashTokens, - uint256 reward - ) public useIndexer useLegacySlasher(users.legacySlasher) { - vm.assume(tokens > 0); - slashTokens = bound(slashTokens, 1, tokens); - reward = bound(reward, 0, slashTokens); - - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, 0); - - resetPrank(users.legacySlasher); - vm.expectRevert("!beneficiary"); - staking.legacySlash(users.indexer, slashTokens, reward, address(0)); - } - - function test_LegacySlash_WhenTokensAllocatedGreaterThanStake() - public - useIndexer - useLegacySlasher(users.legacySlasher) - { - // Setup indexer with: - // - tokensStaked = 1000 GRT - // - tokensAllocated = 800 GRT - // - tokensLocked = 300 GRT - // This means tokensUsed (1100 GRT) > tokensStaked (1000 GRT) - _setIndexer( - users.indexer, - 1000 ether, // tokensStaked - 800 ether, // tokensAllocated - 300 ether, // tokensLocked - 0 // tokensLockedUntil - ); - - // Send tokens manually to staking - require(token.transfer(address(staking), 1100 ether), "Transfer failed"); - - resetPrank(users.legacySlasher); - _legacySlash(users.indexer, 1000 ether, 500 ether, makeAddr("fisherman")); - } - - function test_LegacySlash_WhenDelegateCallFails() public useIndexer useLegacySlasher(users.legacySlasher) { - // Setup indexer with: - // - tokensStaked = 1000 GRT - // - tokensAllocated = 800 GRT - // - tokensLocked = 300 GRT - - _setIndexer( - users.indexer, - 1000 ether, // tokensStaked - 800 ether, // tokensAllocated - 300 ether, // tokensLocked - 0 // tokensLockedUntil - ); - - // Send tokens manually to staking - require(token.transfer(address(staking), 1100 ether), "Transfer failed"); - - // Change staking extension code to an invalid opcode so the delegatecall reverts - address stakingExtension = staking.getStakingExtension(); - vm.etch(stakingExtension, hex"fe"); - - resetPrank(users.legacySlasher); - bytes memory expectedError = abi.encodeWithSignature("HorizonStakingLegacySlashFailed()"); - vm.expectRevert(expectedError); - staking.slash(users.indexer, 1000 ether, 500 ether, makeAddr("fisherman")); - } -} diff --git a/packages/horizon/test/unit/staking/slash/slash.t.sol b/packages/horizon/test/unit/staking/slash/slash.t.sol index 4572ed93f..cba33ae8a 100644 --- a/packages/horizon/test/unit/staking/slash/slash.t.sol +++ b/packages/horizon/test/unit/staking/slash/slash.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; diff --git a/packages/horizon/test/unit/staking/stake/forceWithdraw.t.sol b/packages/horizon/test/unit/staking/stake/forceWithdraw.t.sol new file mode 100644 index 000000000..843d7e087 --- /dev/null +++ b/packages/horizon/test/unit/staking/stake/forceWithdraw.t.sol @@ -0,0 +1,125 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; + +import { HorizonStakingTest } from "../HorizonStaking.t.sol"; + +contract HorizonStakingForceWithdrawTest is HorizonStakingTest { + /* + * HELPERS + */ + + function _forceWithdraw(address _serviceProvider) internal { + (, address msgSender, ) = vm.readCallers(); + + // before + ServiceProviderInternal memory beforeServiceProvider = _getStorageServiceProviderInternal(_serviceProvider); + uint256 beforeServiceProviderBalance = token.balanceOf(_serviceProvider); + uint256 beforeCallerBalance = token.balanceOf(msgSender); + uint256 beforeStakingBalance = token.balanceOf(address(staking)); + + // forceWithdraw + vm.expectEmit(address(staking)); + emit IHorizonStakingMain.HorizonStakeWithdrawn( + _serviceProvider, + beforeServiceProvider.__DEPRECATED_tokensLocked + ); + staking.forceWithdraw(_serviceProvider); + + // after + ServiceProviderInternal memory afterServiceProvider = _getStorageServiceProviderInternal(_serviceProvider); + uint256 afterServiceProviderBalance = token.balanceOf(_serviceProvider); + uint256 afterCallerBalance = token.balanceOf(msgSender); + uint256 afterStakingBalance = token.balanceOf(address(staking)); + + // assert - tokens go to service provider, not caller + assertEq( + afterServiceProviderBalance - beforeServiceProviderBalance, + beforeServiceProvider.__DEPRECATED_tokensLocked + ); + assertEq(afterCallerBalance, beforeCallerBalance); // caller balance unchanged + assertEq(beforeStakingBalance - afterStakingBalance, beforeServiceProvider.__DEPRECATED_tokensLocked); + + // assert - service provider state updated + assertEq( + afterServiceProvider.tokensStaked, + beforeServiceProvider.tokensStaked - beforeServiceProvider.__DEPRECATED_tokensLocked + ); + assertEq(afterServiceProvider.tokensProvisioned, beforeServiceProvider.tokensProvisioned); + assertEq(afterServiceProvider.__DEPRECATED_tokensAllocated, beforeServiceProvider.__DEPRECATED_tokensAllocated); + assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, 0); + assertEq(afterServiceProvider.__DEPRECATED_tokensLockedUntil, 0); + } + + /* + * TESTS + */ + + function testForceWithdraw_Tokens(uint256 tokens, uint256 tokensLocked) public useIndexer { + tokens = bound(tokens, 1, MAX_STAKING_TOKENS); + tokensLocked = bound(tokensLocked, 1, tokens); + + // simulate locked tokens ready to withdraw + require(token.transfer(address(staking), tokens), "transfer failed"); + _setStorageServiceProvider(users.indexer, tokens, 0, tokensLocked, block.number, 0); + + _createProvision(users.indexer, subgraphDataServiceAddress, tokens, 0, MAX_THAWING_PERIOD); + + // switch to a different user (not the service provider) + resetPrank(users.delegator); + + _forceWithdraw(users.indexer); + } + + function testForceWithdraw_CalledByServiceProvider(uint256 tokens, uint256 tokensLocked) public useIndexer { + tokens = bound(tokens, 1, MAX_STAKING_TOKENS); + tokensLocked = bound(tokensLocked, 1, tokens); + + // simulate locked tokens ready to withdraw + require(token.transfer(address(staking), tokens), "transfer failed"); + _setStorageServiceProvider(users.indexer, tokens, 0, tokensLocked, block.number, 0); + + _createProvision(users.indexer, subgraphDataServiceAddress, tokens, 0, MAX_THAWING_PERIOD); + + // before + ServiceProviderInternal memory beforeServiceProvider = _getStorageServiceProviderInternal(users.indexer); + uint256 beforeServiceProviderBalance = token.balanceOf(users.indexer); + uint256 beforeStakingBalance = token.balanceOf(address(staking)); + + // service provider can also call forceWithdraw on themselves + vm.expectEmit(address(staking)); + emit IHorizonStakingMain.HorizonStakeWithdrawn(users.indexer, beforeServiceProvider.__DEPRECATED_tokensLocked); + staking.forceWithdraw(users.indexer); + + // after + ServiceProviderInternal memory afterServiceProvider = _getStorageServiceProviderInternal(users.indexer); + uint256 afterServiceProviderBalance = token.balanceOf(users.indexer); + uint256 afterStakingBalance = token.balanceOf(address(staking)); + + // assert + assertEq( + afterServiceProviderBalance - beforeServiceProviderBalance, + beforeServiceProvider.__DEPRECATED_tokensLocked + ); + assertEq(beforeStakingBalance - afterStakingBalance, beforeServiceProvider.__DEPRECATED_tokensLocked); + assertEq(afterServiceProvider.__DEPRECATED_tokensLocked, 0); + assertEq(afterServiceProvider.__DEPRECATED_tokensLockedUntil, 0); + } + + function testForceWithdraw_RevertWhen_ZeroTokens(uint256 tokens) public useIndexer { + tokens = bound(tokens, 1, MAX_STAKING_TOKENS); + + // simulate zero locked tokens + require(token.transfer(address(staking), tokens), "transfer failed"); + _setStorageServiceProvider(users.indexer, tokens, 0, 0, 0, 0); + + _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, MAX_THAWING_PERIOD); + + // switch to a different user + resetPrank(users.delegator); + + vm.expectRevert(abi.encodeWithSelector(IHorizonStakingMain.HorizonStakingInvalidZeroTokens.selector)); + staking.forceWithdraw(users.indexer); + } +} diff --git a/packages/horizon/test/unit/staking/stake/stake.t.sol b/packages/horizon/test/unit/staking/stake/stake.t.sol index ea1425de0..db00ad7ec 100644 --- a/packages/horizon/test/unit/staking/stake/stake.t.sol +++ b/packages/horizon/test/unit/staking/stake/stake.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; diff --git a/packages/horizon/test/unit/staking/stake/unstake.t.sol b/packages/horizon/test/unit/staking/stake/unstake.t.sol index 54803cc60..5cf89bf8f 100644 --- a/packages/horizon/test/unit/staking/stake/unstake.t.sol +++ b/packages/horizon/test/unit/staking/stake/unstake.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { HorizonStakingTest } from "../HorizonStaking.t.sol"; @@ -24,79 +24,6 @@ contract HorizonStakingUnstakeTest is HorizonStakingTest { _unstake(tokensToUnstake); } - function testUnstake_LockingPeriodGreaterThanZero_NoThawing( - uint256 tokens, - uint256 tokensToUnstake, - uint32 maxVerifierCut, - uint64 thawingPeriod - ) public useIndexer useProvision(tokens, maxVerifierCut, thawingPeriod) { - tokensToUnstake = bound(tokensToUnstake, 1, tokens); - - // simulate transition period - _setStorageDeprecatedThawingPeriod(THAWING_PERIOD_IN_BLOCKS); - - // thaw, wait and deprovision - _thaw(users.indexer, subgraphDataServiceAddress, tokens); - skip(thawingPeriod + 1); - _deprovision(users.indexer, subgraphDataServiceAddress, 0); - - // unstake - _unstake(tokensToUnstake); - } - - function testUnstake_LockingPeriodGreaterThanZero_TokensDoneThawing( - uint256 tokens, - uint256 tokensToUnstake, - uint256 tokensLocked - ) public useIndexer { - // bounds - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - tokensToUnstake = bound(tokensToUnstake, 1, tokens); - tokensLocked = bound(tokensLocked, 1, MAX_STAKING_TOKENS); - - // simulate locked tokens with past locking period - _setStorageDeprecatedThawingPeriod(THAWING_PERIOD_IN_BLOCKS); - require(token.transfer(address(staking), tokensLocked), "Transfer failed"); - _setStorageServiceProvider(users.indexer, tokensLocked, 0, tokensLocked, block.number, 0); - - // create provision, thaw and deprovision - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, MAX_THAWING_PERIOD); - _thaw(users.indexer, subgraphDataServiceLegacyAddress, tokens); - skip(MAX_THAWING_PERIOD + 1); - _deprovision(users.indexer, subgraphDataServiceLegacyAddress, 0); - - // unstake - _unstake(tokensToUnstake); - } - - function testUnstake_LockingPeriodGreaterThanZero_TokensStillThawing( - uint256 tokens, - uint256 tokensToUnstake, - uint256 tokensThawing, - uint32 tokensThawingUntilBlock - ) public useIndexer { - // bounds - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - tokensToUnstake = bound(tokensToUnstake, 1, tokens); - tokensThawing = bound(tokensThawing, 1, MAX_STAKING_TOKENS); - vm.assume(tokensThawingUntilBlock > block.number); - vm.assume(tokensThawingUntilBlock < block.number + THAWING_PERIOD_IN_BLOCKS); - - // simulate locked tokens still thawing - _setStorageDeprecatedThawingPeriod(THAWING_PERIOD_IN_BLOCKS); - require(token.transfer(address(staking), tokensThawing), "Transfer failed"); - _setStorageServiceProvider(users.indexer, tokensThawing, 0, tokensThawing, tokensThawingUntilBlock, 0); - - // create provision, thaw and deprovision - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, MAX_THAWING_PERIOD); - _thaw(users.indexer, subgraphDataServiceLegacyAddress, tokens); - skip(MAX_THAWING_PERIOD + 1); - _deprovision(users.indexer, subgraphDataServiceLegacyAddress, 0); - - // unstake - _unstake(tokensToUnstake); - } - function testUnstake_RevertWhen_ZeroTokens( uint256 amount, uint32 maxVerifierCut, diff --git a/packages/horizon/test/unit/staking/stake/withdraw.t.sol b/packages/horizon/test/unit/staking/stake/withdraw.t.sol index 2d7b89382..6afeb85cc 100644 --- a/packages/horizon/test/unit/staking/stake/withdraw.t.sol +++ b/packages/horizon/test/unit/staking/stake/withdraw.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IHorizonStakingMain } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol"; @@ -35,19 +35,4 @@ contract HorizonStakingWithdrawTest is HorizonStakingTest { vm.expectRevert(abi.encodeWithSelector(IHorizonStakingMain.HorizonStakingInvalidZeroTokens.selector)); staking.withdraw(); } - - function testWithdraw_RevertWhen_StillThawing(uint256 tokens, uint256 tokensLocked) public useIndexer { - tokens = bound(tokens, 1, MAX_STAKING_TOKENS); - tokensLocked = bound(tokensLocked, 1, tokens); - - // simulate locked tokens still thawing - uint256 thawUntil = block.timestamp + 1; - require(token.transfer(address(staking), tokens), "Transfer failed"); - _setStorageServiceProvider(users.indexer, tokens, 0, tokensLocked, thawUntil, 0); - - _createProvision(users.indexer, subgraphDataServiceLegacyAddress, tokens, 0, MAX_THAWING_PERIOD); - - vm.expectRevert(abi.encodeWithSelector(IHorizonStakingMain.HorizonStakingStillThawing.selector, thawUntil)); - staking.withdraw(); - } } diff --git a/packages/horizon/test/unit/utilities/Authorizable.t.sol b/packages/horizon/test/unit/utilities/Authorizable.t.sol index 33713c436..66c4bb921 100644 --- a/packages/horizon/test/unit/utilities/Authorizable.t.sol +++ b/packages/horizon/test/unit/utilities/Authorizable.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; @@ -14,23 +14,27 @@ contract AuthorizableImp is Authorizable { } contract AuthorizableTest is Test, Bounder { - AuthorizableImp public authorizable; + IAuthorizable public authorizable; AuthorizableHelper authHelper; modifier withFuzzyThaw(uint256 _thawPeriod) { // Max thaw period is 1 year to allow for thawing tests _thawPeriod = bound(_thawPeriod, 1, 60 * 60 * 24 * 365); - setupAuthorizable(new AuthorizableImp(_thawPeriod)); + setupAuthorizable(_thawPeriod); _; } - function setUp() public virtual { - setupAuthorizable(new AuthorizableImp(0)); + function setUp() public { + setupAuthorizable(0); } - function setupAuthorizable(AuthorizableImp _authorizable) internal { - authorizable = _authorizable; - authHelper = new AuthorizableHelper(authorizable); + function setupAuthorizable(uint256 _thawPeriod) internal { + authorizable = newAuthorizable(_thawPeriod); + authHelper = new AuthorizableHelper(authorizable, _thawPeriod); + } + + function newAuthorizable(uint256 _thawPeriod) public virtual returns (IAuthorizable) { + return new AuthorizableImp(_thawPeriod); } function test_AuthorizeSigner(uint256 _unboundedKey, address _authorizer) public { @@ -304,12 +308,12 @@ contract AuthorizableTest is Test, Bounder { authHelper.authorizeAndThawSignerWithChecks(_authorizer, signerKey); - _skip = bound(_skip, 0, authorizable.REVOKE_AUTHORIZATION_THAWING_PERIOD() - 1); + _skip = bound(_skip, 0, authHelper.revokeAuthorizationThawingPeriod() - 1); skip(_skip); bytes memory expectedErr = abi.encodeWithSelector( IAuthorizable.AuthorizableSignerStillThawing.selector, block.timestamp, - block.timestamp - _skip + authorizable.REVOKE_AUTHORIZATION_THAWING_PERIOD() + block.timestamp - _skip + authHelper.revokeAuthorizationThawingPeriod() ); vm.expectRevert(expectedErr); vm.prank(_authorizer); @@ -322,17 +326,19 @@ contract AuthorizableTest is Test, Bounder { } contract AuthorizableHelper is Test { - AuthorizableImp internal authorizable; + IAuthorizable internal authorizable; + uint256 public revokeAuthorizationThawingPeriod; - constructor(AuthorizableImp _authorizable) { + constructor(IAuthorizable _authorizable, uint256 _thawPeriod) { authorizable = _authorizable; + revokeAuthorizationThawingPeriod = _thawPeriod; } function authorizeAndThawSignerWithChecks(address _authorizer, uint256 _signerKey) public { address signer = vm.addr(_signerKey); authorizeSignerWithChecks(_authorizer, _signerKey); - uint256 thawEndTimestamp = block.timestamp + authorizable.REVOKE_AUTHORIZATION_THAWING_PERIOD(); + uint256 thawEndTimestamp = block.timestamp + revokeAuthorizationThawingPeriod; vm.expectEmit(address(authorizable)); emit IAuthorizable.SignerThawing(_authorizer, signer, thawEndTimestamp); vm.prank(_authorizer); @@ -344,7 +350,7 @@ contract AuthorizableHelper is Test { function authorizeAndRevokeSignerWithChecks(address _authorizer, uint256 _signerKey) public { address signer = vm.addr(_signerKey); authorizeAndThawSignerWithChecks(_authorizer, _signerKey); - skip(authorizable.REVOKE_AUTHORIZATION_THAWING_PERIOD() + 1); + skip(revokeAuthorizationThawingPeriod + 1); vm.expectEmit(address(authorizable)); emit IAuthorizable.SignerRevoked(_authorizer, signer); vm.prank(_authorizer); @@ -357,6 +363,7 @@ contract AuthorizableHelper is Test { address signer = vm.addr(_signerKey); assertNotAuthorized(_authorizer, signer); + require(block.timestamp < type(uint256).max, "Test cannot be run at the end of time"); uint256 proofDeadline = block.timestamp + 1; bytes memory proof = generateAuthorizationProof( block.chainid, diff --git a/packages/horizon/test/unit/utilities/GraphDirectory.t.sol b/packages/horizon/test/unit/utilities/GraphDirectory.t.sol index 2eea04b73..5606eedc6 100644 --- a/packages/horizon/test/unit/utilities/GraphDirectory.t.sol +++ b/packages/horizon/test/unit/utilities/GraphDirectory.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { GraphBaseTest } from "../GraphBase.t.sol"; import { GraphDirectory } from "./../../../contracts/utilities/GraphDirectory.sol"; @@ -17,8 +17,7 @@ contract GraphDirectoryTest is GraphBaseTest { _getContractFromController("EpochManager"), _getContractFromController("RewardsManager"), _getContractFromController("GraphTokenGateway"), - _getContractFromController("GraphProxyAdmin"), - _getContractFromController("Curation") + _getContractFromController("GraphProxyAdmin") ); _deployImplementation(address(controller)); } @@ -47,7 +46,6 @@ contract GraphDirectoryTest is GraphBaseTest { assertEq(_getContractFromController("RewardsManager"), address(directory.graphRewardsManager())); assertEq(_getContractFromController("GraphTokenGateway"), address(directory.graphTokenGateway())); assertEq(_getContractFromController("GraphProxyAdmin"), address(directory.graphProxyAdmin())); - assertEq(_getContractFromController("Curation"), address(directory.graphCuration())); } function test_RevertWhen_AnInvalidContractGetterIsCalled() external { diff --git a/packages/horizon/test/unit/utilities/GraphDirectoryImplementation.sol b/packages/horizon/test/unit/utilities/GraphDirectoryImplementation.sol index 4a88bf0cd..b3c6198df 100644 --- a/packages/horizon/test/unit/utilities/GraphDirectoryImplementation.sol +++ b/packages/horizon/test/unit/utilities/GraphDirectoryImplementation.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; @@ -12,7 +12,6 @@ import { IEpochManager } from "@graphprotocol/interfaces/contracts/contracts/epo import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManager.sol"; import { ITokenGateway } from "@graphprotocol/interfaces/contracts/contracts/arbitrum/ITokenGateway.sol"; import { IGraphProxyAdmin } from "@graphprotocol/interfaces/contracts/contracts/upgrades/IGraphProxyAdmin.sol"; -import { ICuration } from "@graphprotocol/interfaces/contracts/contracts/curation/ICuration.sol"; import { GraphDirectory } from "./../../../contracts/utilities/GraphDirectory.sol"; @@ -22,6 +21,7 @@ contract GraphDirectoryImplementation is GraphDirectory { function getContractFromController(bytes memory contractName) external view returns (address) { return _graphController().getContractProxy(keccak256(contractName)); } + function graphToken() external view returns (IGraphToken) { return _graphToken(); } @@ -57,8 +57,4 @@ contract GraphDirectoryImplementation is GraphDirectory { function graphProxyAdmin() external view returns (IGraphProxyAdmin) { return _graphProxyAdmin(); } - - function graphCuration() external view returns (ICuration) { - return _graphCuration(); - } } diff --git a/packages/horizon/test/unit/utils/Bounder.t.sol b/packages/horizon/test/unit/utils/Bounder.t.sol index 44e977f57..58e2fa324 100644 --- a/packages/horizon/test/unit/utils/Bounder.t.sol +++ b/packages/horizon/test/unit/utils/Bounder.t.sol @@ -1,23 +1,27 @@ // SPDX-License-Identifier: UNLICENSED -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; contract Bounder is Test { uint256 constant SECP256K1_CURVE_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + function boundKeyAndAddr(uint256 _value) internal pure returns (uint256, address) { + uint256 key = bound(_value, 1, SECP256K1_CURVE_ORDER - 1); + return (key, vm.addr(key)); + } + function boundAddrAndKey(uint256 _value) internal pure returns (uint256, address) { - uint256 signerKey = bound(_value, 1, SECP256K1_CURVE_ORDER - 1); - return (signerKey, vm.addr(signerKey)); + return boundKeyAndAddr(_value); } function boundAddr(uint256 _value) internal pure returns (address) { - (, address addr) = boundAddrAndKey(_value); + (, address addr) = boundKeyAndAddr(_value); return addr; } function boundKey(uint256 _value) internal pure returns (uint256) { - (uint256 key, ) = boundAddrAndKey(_value); + (uint256 key, ) = boundKeyAndAddr(_value); return key; } @@ -28,4 +32,21 @@ contract Bounder is Test { function boundTimestampMin(uint256 _value, uint256 _min) internal pure returns (uint256) { return bound(_value, _min, type(uint256).max); } + + function boundSkipFloor(uint256 _value, uint256 _min) internal view returns (uint256) { + return boundSkip(_value, _min, type(uint256).max); + } + + function boundSkipCeil(uint256 _value, uint256 _max) internal view returns (uint256) { + return boundSkip(_value, 0, _max); + } + + function boundSkip(uint256 _value, uint256 _min, uint256 _max) internal view returns (uint256) { + return bound(_value, orTillEndOfTime(_min), orTillEndOfTime(_max)); + } + + function orTillEndOfTime(uint256 _value) internal view returns (uint256) { + uint256 tillEndOfTime = type(uint256).max - block.timestamp; + return _value < tillEndOfTime ? _value : tillEndOfTime; + } } diff --git a/packages/horizon/test/unit/utils/Constants.sol b/packages/horizon/test/unit/utils/Constants.sol index 51b882118..036ca43a2 100644 --- a/packages/horizon/test/unit/utils/Constants.sol +++ b/packages/horizon/test/unit/utils/Constants.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; abstract contract Constants { uint32 internal constant MAX_PPM = 1000000; // 100% in parts per million diff --git a/packages/horizon/test/unit/utils/Users.sol b/packages/horizon/test/unit/utils/Users.sol index 6213e4e82..bd6177cf0 100644 --- a/packages/horizon/test/unit/utils/Users.sol +++ b/packages/horizon/test/unit/utils/Users.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; struct Users { address governor; @@ -9,5 +9,4 @@ struct Users { address gateway; address verifier; address delegator; - address legacySlasher; } diff --git a/packages/horizon/test/unit/utils/Utils.sol b/packages/horizon/test/unit/utils/Utils.sol index 741c7367f..45da9df8c 100644 --- a/packages/horizon/test/unit/utils/Utils.sol +++ b/packages/horizon/test/unit/utils/Utils.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.27; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; diff --git a/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol b/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol index aa7d32eba..43a13d791 100644 --- a/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol +++ b/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol @@ -3,7 +3,6 @@ pragma solidity ^0.7.6 || ^0.8.0; import { IIssuanceAllocationDistribution } from "../../issuance/allocate/IIssuanceAllocationDistribution.sol"; -import { IRewardsEligibility } from "../../issuance/eligibility/IRewardsEligibility.sol"; import { IRewardsIssuer } from "./IRewardsIssuer.sol"; /** @@ -53,16 +52,6 @@ interface IRewardsManager { event RewardsDeniedDueToEligibility(address indexed indexer, address indexed allocationID, uint256 amount); // solhint-disable-previous-line gas-indexed-events - /** - * @notice Emitted when the rewards eligibility oracle contract is set - * @param oldRewardsEligibilityOracle Previous rewards eligibility oracle address - * @param newRewardsEligibilityOracle New rewards eligibility oracle address - */ - event RewardsEligibilityOracleSet( - address indexed oldRewardsEligibilityOracle, - address indexed newRewardsEligibilityOracle - ); - /** * @notice New reclaim address set * @param reason The reclaim reason (or condition) identifier (see RewardsCondition library for canonical reasons) @@ -124,12 +113,6 @@ interface IRewardsManager { */ function setSubgraphService(address newSubgraphService) external; - /** - * @notice Set the rewards eligibility oracle address - * @param newRewardsEligibilityOracle The address of the rewards eligibility oracle - */ - function setRewardsEligibilityOracle(address newRewardsEligibilityOracle) external; - /** * @notice Set the reclaim address for a specific reason * @dev Address to mint tokens for denied/reclaimed rewards. Set to zero to disable. @@ -201,12 +184,6 @@ interface IRewardsManager { */ function getDefaultReclaimAddress() external view returns (address); - /** - * @notice Get the rewards eligibility oracle address - * @return The rewards eligibility oracle contract - */ - function getRewardsEligibilityOracle() external view returns (IRewardsEligibility); - /** * @notice Gets the effective issuance per block, accounting for the issuance allocator * @dev When an issuance allocator is set, returns the allocated rate for this contract. @@ -278,7 +255,7 @@ interface IRewardsManager { /** * @notice Pull rewards from the contract for a particular allocation - * @dev This function can only be called by the Staking contract. + * @dev This function can only be called by the Subgraph Service contract. * This function will mint the necessary tokens to reward based on the inflation calculation. * @param allocationID Allocation * @return Assigned rewards amount diff --git a/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol b/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol new file mode 100644 index 000000000..ea5b0dd54 --- /dev/null +++ b/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +/** + * @title Interface for data services that manage indexing agreements. + * @author Edge & Node + * @notice Interface to support payer-initiated cancellation of indexing agreements. + * Any data service that participates in agreement lifecycle management via + * {RecurringAgreementManager} should implement this interface. + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IDataServiceAgreements { + /** + * @notice Cancel an indexing agreement by payer / signer. + * @param agreementId The id of the indexing agreement + */ + function cancelIndexingAgreementByPayer(bytes16 agreementId) external; +} diff --git a/packages/interfaces/contracts/data-service/IDataServiceFees.sol b/packages/interfaces/contracts/data-service/IDataServiceFees.sol index 9cba91d7a..e9bf60bf0 100644 --- a/packages/interfaces/contracts/data-service/IDataServiceFees.sol +++ b/packages/interfaces/contracts/data-service/IDataServiceFees.sol @@ -26,70 +26,6 @@ import { IDataService } from "./IDataService.sol"; * bugs. We may have an active bug bounty program. */ interface IDataServiceFees is IDataService { - /** - * @notice A stake claim, representing provisioned stake that gets locked - * to be released to a service provider. - * @dev StakeClaims are stored in linked lists by service provider, ordered by - * creation timestamp. - * @param tokens The amount of tokens to be locked in the claim - * @param createdAt The timestamp when the claim was created - * @param releasableAt The timestamp when the tokens can be released - * @param nextClaim The next claim in the linked list - */ - struct StakeClaim { - uint256 tokens; - uint256 createdAt; - uint256 releasableAt; - bytes32 nextClaim; - } - - /** - * @notice Emitted when a stake claim is created and stake is locked. - * @param serviceProvider The address of the service provider - * @param claimId The id of the stake claim - * @param tokens The amount of tokens to lock in the claim - * @param unlockTimestamp The timestamp when the tokens can be released - */ - event StakeClaimLocked( - address indexed serviceProvider, - bytes32 indexed claimId, - uint256 tokens, - uint256 unlockTimestamp - ); - - /** - * @notice Emitted when a stake claim is released and stake is unlocked. - * @param serviceProvider The address of the service provider - * @param claimId The id of the stake claim - * @param tokens The amount of tokens released - * @param releasableAt The timestamp when the tokens were released - */ - event StakeClaimReleased( - address indexed serviceProvider, - bytes32 indexed claimId, - uint256 tokens, - uint256 releasableAt - ); - - /** - * @notice Emitted when a series of stake claims are released. - * @param serviceProvider The address of the service provider - * @param claimsCount The number of stake claims being released - * @param tokensReleased The total amount of tokens being released - */ - event StakeClaimsReleased(address indexed serviceProvider, uint256 claimsCount, uint256 tokensReleased); - - /** - * @notice Thrown when attempting to get a stake claim that does not exist. - * @param claimId The id of the stake claim - */ - error DataServiceFeesClaimNotFound(bytes32 claimId); - - /** - * @notice Emitted when trying to lock zero tokens in a stake claim - */ - error DataServiceFeesZeroTokens(); - /** * @notice Releases expired stake claims for the caller. * @dev This function is only meant to be called if the service provider has enough diff --git a/packages/interfaces/contracts/horizon/IAgreementOwner.sol b/packages/interfaces/contracts/horizon/IAgreementOwner.sol new file mode 100644 index 000000000..00de00f9e --- /dev/null +++ b/packages/interfaces/contracts/horizon/IAgreementOwner.sol @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +/** + * @title Interface for contract payer callbacks from RecurringCollector + * @author Edge & Node + * @notice Callbacks that RecurringCollector invokes on contract payers (payers with + * deployed code, as opposed to EOA payers that use ECDSA signatures). + * + * Three callbacks: + * - {approveAgreement}: gate — called during accept/update to verify authorization. + * Uses the magic-value pattern (return selector on success). Called with RCA hash + * on accept, RCAU hash on update; namespaces don't collide (different EIP712 type hashes). + * - {beforeCollection}: called before PaymentsEscrow.collect() so the payer can top up + * escrow if needed. Only acts when the escrow balance is short for the collection. + * - {afterCollection}: called after collection so the payer can reconcile escrow state. + * Both collection callbacks are wrapped in try/catch — reverts do not block collection. + * + * No per-payer authorization step is needed — the contract's code is the authorization. + * The trust chain is: governance grants operator role → operator registers + * (validates and pre-funds) → approveAgreement confirms → RC accepts/updates. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IAgreementOwner { + /** + * @notice Confirms this contract authorized the given agreement or update + * @dev Called by {RecurringCollector.accept} with an RCA hash or by + * {RecurringCollector.update} with an RCAU hash to verify authorization (empty authData path). + * @param agreementHash The EIP712 hash of the RCA or RCAU struct + * @return magic `IAgreementOwner.approveAgreement.selector` if authorized + */ + function approveAgreement(bytes32 agreementHash) external view returns (bytes4); + + /** + * @notice Called by RecurringCollector before PaymentsEscrow.collect() + * @dev Allows contract payers to top up escrow if the balance is insufficient + * for the upcoming collection. Wrapped in try/catch — reverts do not block collection. + * @param agreementId The agreement being collected + * @param tokensToCollect Amount of tokens about to be collected + */ + function beforeCollection(bytes16 agreementId, uint256 tokensToCollect) external; + + /** + * @notice Called by RecurringCollector after a successful collection + * @dev Allows contract payers to reconcile escrow state in the same transaction + * as the collection. Wrapped in try/catch — reverts do not block collection. + * @param agreementId The collected agreement + * @param tokensCollected Amount of tokens collected + */ + function afterCollection(bytes16 agreementId, uint256 tokensCollected) external; +} diff --git a/packages/interfaces/contracts/horizon/IHorizonStaking.sol b/packages/interfaces/contracts/horizon/IHorizonStaking.sol index 4e680a1e5..9b16ad368 100644 --- a/packages/interfaces/contracts/horizon/IHorizonStaking.sol +++ b/packages/interfaces/contracts/horizon/IHorizonStaking.sol @@ -5,15 +5,14 @@ pragma solidity ^0.8.22; import { IHorizonStakingTypes } from "./internal/IHorizonStakingTypes.sol"; import { IHorizonStakingMain } from "./internal/IHorizonStakingMain.sol"; import { IHorizonStakingBase } from "./internal/IHorizonStakingBase.sol"; -import { IHorizonStakingExtension } from "./internal/IHorizonStakingExtension.sol"; /** * @title Complete interface for the Horizon Staking contract * @author Edge & Node - * @notice This interface exposes all functions implemented by the {HorizonStaking} contract and its extension - * {HorizonStakingExtension} as well as the custom data types used by the contract. + * @notice This interface exposes all functions implemented by the {HorizonStaking} contract + * as well as the custom data types used by the contract. * @dev Use this interface to interact with the Horizon Staking contract. * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ -interface IHorizonStaking is IHorizonStakingTypes, IHorizonStakingBase, IHorizonStakingMain, IHorizonStakingExtension {} +interface IHorizonStaking is IHorizonStakingTypes, IHorizonStakingBase, IHorizonStakingMain {} diff --git a/packages/interfaces/contracts/horizon/IPaymentsEscrow.sol b/packages/interfaces/contracts/horizon/IPaymentsEscrow.sol index 9dbe9906a..a73866273 100644 --- a/packages/interfaces/contracts/horizon/IPaymentsEscrow.sol +++ b/packages/interfaces/contracts/horizon/IPaymentsEscrow.sol @@ -198,6 +198,25 @@ interface IPaymentsEscrow { */ function thaw(address collector, address receiver, uint256 tokens) external; + /** + * @notice Adjusts the thawing amount with a guard against timer reset. + * Caps the requested amount to the current balance. When decreasing, the timer is preserved. + * When increasing, the timer resets; if `evenIfTimerReset` is false and the timer would + * change, the call is a no-op and returns the current tokensThawing. + * Setting tokens to 0 cancels the thaw entirely. + * @param collector The address of the collector + * @param receiver The address of the receiver + * @param tokensToThaw The desired amount of tokens to thaw + * @param evenIfTimerReset If true, always proceed. If false, skip increases that would reset the timer. + * @return tokensThawing The resulting amount of tokens thawing after the operation + */ + function adjustThaw( + address collector, + address receiver, + uint256 tokensToThaw, + bool evenIfTimerReset + ) external returns (uint256 tokensThawing); + /** * @notice Cancels the thawing of escrow from a payer-collector-receiver's escrow account. * @param collector The address of the collector @@ -257,4 +276,19 @@ interface IPaymentsEscrow { * @return The balance of the payer-collector-receiver tuple */ function getBalance(address payer, address collector, address receiver) external view returns (uint256); + + /** + * @notice Escrow account details for a payer-collector-receiver tuple + * @param payer The address of the payer + * @param collector The address of the collector + * @param receiver The address of the receiver + * @return balance The total token balance + * @return tokensThawing The amount of tokens currently being thawed + * @return thawEndTimestamp The timestamp at which thawing period ends (zero if not thawing) + */ + function escrowAccounts( + address payer, + address collector, + address receiver + ) external view returns (uint256 balance, uint256 tokensThawing, uint256 thawEndTimestamp); } diff --git a/packages/interfaces/contracts/horizon/IRecurringCollector.sol b/packages/interfaces/contracts/horizon/IRecurringCollector.sol new file mode 100644 index 000000000..ef34f11bd --- /dev/null +++ b/packages/interfaces/contracts/horizon/IRecurringCollector.sol @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +import { IPaymentsCollector } from "./IPaymentsCollector.sol"; +import { IGraphPayments } from "./IGraphPayments.sol"; +import { IAuthorizable } from "./IAuthorizable.sol"; + +/** + * @title Interface for the {RecurringCollector} contract + * @author Edge & Node + * @dev Implements the {IPaymentCollector} interface as defined by the Graph + * Horizon payments protocol. + * @notice Implements a payments collector contract that can be used to collect + * recurrent payments. + */ +interface IRecurringCollector is IAuthorizable, IPaymentsCollector { + /// @notice The state of an agreement + enum AgreementState { + NotAccepted, + Accepted, + CanceledByServiceProvider, + CanceledByPayer + } + + /// @notice The party that can cancel an agreement + enum CancelAgreementBy { + ServiceProvider, + Payer, + ThirdParty + } + + /// @notice Reasons why an agreement is not collectable + enum AgreementNotCollectableReason { + None, + InvalidAgreementState, + ZeroCollectionSeconds, + InvalidTemporalWindow + } + + /** + * @notice The Recurring Collection Agreement (RCA) + * @param deadline The deadline for accepting the RCA + * @param endsAt The timestamp when the agreement ends + * @param payer The address of the payer the RCA was issued by + * @param dataService The address of the data service the RCA was issued to + * @param serviceProvider The address of the service provider the RCA was issued to + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * on top of the amount allowed for subsequent collections + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * except for the first collection + * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections + * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + * @param nonce A unique nonce for preventing collisions (user-chosen) + * @param metadata Arbitrary metadata to extend functionality if a data service requires it + * + */ + // solhint-disable-next-line gas-struct-packing + struct RecurringCollectionAgreement { + uint64 deadline; + uint64 endsAt; + address payer; + address dataService; + address serviceProvider; + uint256 maxInitialTokens; + uint256 maxOngoingTokensPerSecond; + uint32 minSecondsPerCollection; + uint32 maxSecondsPerCollection; + uint256 nonce; + bytes metadata; + } + + /** + * @notice The Recurring Collection Agreement Update (RCAU) + * @param agreementId The agreement ID of the RCAU + * @param deadline The deadline for upgrading the RCA + * @param endsAt The timestamp when the agreement ends + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * on top of the amount allowed for subsequent collections + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * except for the first collection + * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections + * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + * @param nonce The nonce for preventing replay attacks (must be current nonce + 1) + * @param metadata Arbitrary metadata to extend functionality if a data service requires it + */ + // solhint-disable-next-line gas-struct-packing + struct RecurringCollectionAgreementUpdate { + bytes16 agreementId; + uint64 deadline; + uint64 endsAt; + uint256 maxInitialTokens; + uint256 maxOngoingTokensPerSecond; + uint32 minSecondsPerCollection; + uint32 maxSecondsPerCollection; + uint32 nonce; + bytes metadata; + } + + /** + * @notice The data for an agreement + * @dev This struct is used to store the data of an agreement in the contract + * @param dataService The address of the data service + * @param payer The address of the payer + * @param serviceProvider The address of the service provider + * @param acceptedAt The timestamp when the agreement was accepted + * @param lastCollectionAt The timestamp when the agreement was last collected at + * @param endsAt The timestamp when the agreement ends + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * on top of the amount allowed for subsequent collections + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * except for the first collection + * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections + * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + * @param updateNonce The current nonce for updates (prevents replay attacks) + * @param canceledAt The timestamp when the agreement was canceled + * @param state The state of the agreement + */ + struct AgreementData { + address dataService; + address payer; + address serviceProvider; + uint64 acceptedAt; + uint64 lastCollectionAt; + uint64 endsAt; + uint256 maxInitialTokens; + uint256 maxOngoingTokensPerSecond; + uint32 minSecondsPerCollection; + uint32 maxSecondsPerCollection; + uint32 updateNonce; + uint64 canceledAt; + AgreementState state; + } + + /** + * @notice The params for collecting an agreement + * @param agreementId The agreement ID of the RCA + * @param collectionId The collection ID of the RCA + * @param tokens The amount of tokens to collect + * @param dataServiceCut The data service cut in parts per million + * @param receiverDestination The address where the collected fees should be sent + * @param maxSlippage Max acceptable tokens to lose due to rate limiting, or type(uint256).max to ignore + */ + struct CollectParams { + bytes16 agreementId; + bytes32 collectionId; + uint256 tokens; + uint256 dataServiceCut; + address receiverDestination; + uint256 maxSlippage; + } + + /** + * @notice Emitted when an agreement is accepted + * @param dataService The address of the data service + * @param payer The address of the payer + * @param serviceProvider The address of the service provider + * @param agreementId The agreement ID + * @param acceptedAt The timestamp when the agreement was accepted + * @param endsAt The timestamp when the agreement ends + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections + * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + */ + event AgreementAccepted( + address indexed dataService, + address indexed payer, + address indexed serviceProvider, + bytes16 agreementId, + uint64 acceptedAt, + uint64 endsAt, + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection + ); + + /** + * @notice Emitted when an agreement is canceled + * @param dataService The address of the data service + * @param payer The address of the payer + * @param serviceProvider The address of the service provider + * @param agreementId The agreement ID + * @param canceledAt The timestamp when the agreement was canceled + * @param canceledBy The party that canceled the agreement + */ + event AgreementCanceled( + address indexed dataService, + address indexed payer, + address indexed serviceProvider, + bytes16 agreementId, + uint64 canceledAt, + CancelAgreementBy canceledBy + ); + + /** + * @notice Emitted when an agreement is updated + * @param dataService The address of the data service + * @param payer The address of the payer + * @param serviceProvider The address of the service provider + * @param agreementId The agreement ID + * @param updatedAt The timestamp when the agreement was updated + * @param endsAt The timestamp when the agreement ends + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections + * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + */ + event AgreementUpdated( + address indexed dataService, + address indexed payer, + address indexed serviceProvider, + bytes16 agreementId, + uint64 updatedAt, + uint64 endsAt, + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection + ); + + /** + * @notice Emitted when an RCA is collected + * @param dataService The address of the data service + * @param payer The address of the payer + * @param serviceProvider The address of the service provider + * @param agreementId The agreement ID + * @param collectionId The collection ID + * @param tokens The amount of tokens collected + * @param dataServiceCut The tokens cut for the data service + */ + event RCACollected( + address indexed dataService, + address indexed payer, + address indexed serviceProvider, + bytes16 agreementId, + bytes32 collectionId, + uint256 tokens, + uint256 dataServiceCut + ); + + /** + * @notice Thrown when accepting an agreement with a zero ID + */ + error RecurringCollectorAgreementIdZero(); + + /** + * @notice Thrown when interacting with an agreement not owned by the message sender + * @param agreementId The agreement ID + * @param unauthorizedDataService The address of the unauthorized data service + */ + error RecurringCollectorDataServiceNotAuthorized(bytes16 agreementId, address unauthorizedDataService); + /** + * @notice Thrown when the data service is not authorized for the service provider + * @param dataService The address of the unauthorized data service + */ + error RecurringCollectorUnauthorizedDataService(address dataService); + + /** + * @notice Thrown when interacting with an agreement with an elapsed deadline + * @param currentTimestamp The current timestamp + * @param deadline The elapsed deadline timestamp + */ + error RecurringCollectorAgreementDeadlineElapsed(uint256 currentTimestamp, uint64 deadline); + + /** + * @notice Thrown when the signer is invalid + */ + error RecurringCollectorInvalidSigner(); + + /** + * @notice Thrown when the payment type is not IndexingFee + * @param invalidPaymentType The invalid payment type + */ + error RecurringCollectorInvalidPaymentType(IGraphPayments.PaymentTypes invalidPaymentType); + + /** + * @notice Thrown when the caller is not the data service the RCA was issued to + * @param unauthorizedCaller The address of the caller + * @param dataService The address of the data service + */ + error RecurringCollectorUnauthorizedCaller(address unauthorizedCaller, address dataService); + + /** + * @notice Thrown when calling collect() with invalid data + * @param invalidData The invalid data + */ + error RecurringCollectorInvalidCollectData(bytes invalidData); + + /** + * @notice Thrown when interacting with an agreement that has an incorrect state + * @param agreementId The agreement ID + * @param incorrectState The incorrect state + */ + error RecurringCollectorAgreementIncorrectState(bytes16 agreementId, AgreementState incorrectState); + + /** + * @notice Thrown when an agreement is not collectable + * @param agreementId The agreement ID + * @param reason The reason why the agreement is not collectable + */ + error RecurringCollectorAgreementNotCollectable(bytes16 agreementId, AgreementNotCollectableReason reason); + + /** + * @notice Thrown when accepting an agreement with an address that is not set + */ + error RecurringCollectorAgreementAddressNotSet(); + + /** + * @notice Thrown when accepting or upgrading an agreement with an elapsed endsAt + * @param currentTimestamp The current timestamp + * @param endsAt The agreement end timestamp + */ + error RecurringCollectorAgreementElapsedEndsAt(uint256 currentTimestamp, uint64 endsAt); + + /** + * @notice Thrown when accepting or upgrading an agreement with an elapsed endsAt + * @param allowedMinCollectionWindow The allowed minimum collection window + * @param minSecondsPerCollection The minimum seconds per collection + * @param maxSecondsPerCollection The maximum seconds per collection + */ + error RecurringCollectorAgreementInvalidCollectionWindow( + uint32 allowedMinCollectionWindow, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection + ); + + /** + * @notice Thrown when accepting or upgrading an agreement with an invalid duration + * @param requiredMinDuration The required minimum duration + * @param invalidDuration The invalid duration + */ + error RecurringCollectorAgreementInvalidDuration(uint32 requiredMinDuration, uint256 invalidDuration); + + /** + * @notice Thrown when calling collect() with a zero collection seconds + * @param agreementId The agreement ID + * @param currentTimestamp The current timestamp + * @param lastCollectionAt The timestamp when the last collection was done + * + */ + error RecurringCollectorZeroCollectionSeconds( + bytes16 agreementId, + uint256 currentTimestamp, + uint64 lastCollectionAt + ); + + /** + * @notice Thrown when calling collect() too soon + * @param agreementId The agreement ID + * @param secondsSinceLast Seconds since last collection + * @param minSeconds Minimum seconds between collections + */ + error RecurringCollectorCollectionTooSoon(bytes16 agreementId, uint32 secondsSinceLast, uint32 minSeconds); + + /** + * @notice Thrown when calling update() with an invalid nonce + * @param agreementId The agreement ID + * @param expected The expected nonce + * @param provided The provided nonce + */ + error RecurringCollectorInvalidUpdateNonce(bytes16 agreementId, uint32 expected, uint32 provided); + + /** + * @notice Thrown when collected tokens are less than requested beyond the allowed slippage + * @param requested The amount of tokens requested to collect + * @param actual The actual amount that would be collected + * @param maxSlippage The maximum allowed slippage + */ + error RecurringCollectorExcessiveSlippage(uint256 requested, uint256 actual, uint256 maxSlippage); + + /** + * @notice Thrown when a contract payer's eligibility oracle denies the service provider + * @param agreementId The agreement ID + * @param serviceProvider The service provider that is not eligible + */ + error RecurringCollectorCollectionNotEligible(bytes16 agreementId, address serviceProvider); + + /** + * @notice Thrown when the contract approver is not a contract + * @param approver The address that is not a contract + */ + error RecurringCollectorApproverNotContract(address approver); + + /** + * @notice Accept a Recurring Collection Agreement. + * @dev Caller must be the data service the RCA was issued to. + * If `signature` is non-empty: checks `rca.deadline >= block.timestamp` and verifies the ECDSA signature. + * If `signature` is empty: the payer must be a contract implementing {IAgreementOwner.approveAgreement} + * and must return the magic value for the RCA's EIP712 hash. + * @param rca The Recurring Collection Agreement to accept + * @param signature ECDSA signature bytes, or empty for contract-approved agreements + * @return agreementId The deterministically generated agreement ID + */ + function accept( + RecurringCollectionAgreement calldata rca, + bytes calldata signature + ) external returns (bytes16 agreementId); + + /** + * @notice Cancel an indexing agreement. + * @param agreementId The agreement's ID. + * @param by The party that is canceling the agreement. + */ + function cancel(bytes16 agreementId, CancelAgreementBy by) external; + + /** + * @notice Update a Recurring Collection Agreement. + * @dev Caller must be the data service for the agreement. + * If `signature` is non-empty: checks `rcau.deadline >= block.timestamp` and verifies the ECDSA signature. + * If `signature` is empty: the payer (stored in the agreement) must be a contract implementing + * {IAgreementOwner.approveAgreement} and must return the magic value for the RCAU's EIP712 hash. + * @param rcau The Recurring Collection Agreement Update to apply + * @param signature ECDSA signature bytes, or empty for contract-approved updates + */ + function update(RecurringCollectionAgreementUpdate calldata rcau, bytes calldata signature) external; + + /** + * @notice Computes the hash of a RecurringCollectionAgreement (RCA). + * @param rca The RCA for which to compute the hash. + * @return The hash of the RCA. + */ + function hashRCA(RecurringCollectionAgreement calldata rca) external view returns (bytes32); + + /** + * @notice Computes the hash of a RecurringCollectionAgreementUpdate (RCAU). + * @param rcau The RCAU for which to compute the hash. + * @return The hash of the RCAU. + */ + function hashRCAU(RecurringCollectionAgreementUpdate calldata rcau) external view returns (bytes32); + + /** + * @notice Recovers the signer address of a signed RecurringCollectionAgreement (RCA). + * @param rca The RCA whose hash was signed. + * @param signature The ECDSA signature bytes. + * @return The address of the signer. + */ + function recoverRCASigner( + RecurringCollectionAgreement calldata rca, + bytes calldata signature + ) external view returns (address); + + /** + * @notice Recovers the signer address of a signed RecurringCollectionAgreementUpdate (RCAU). + * @param rcau The RCAU whose hash was signed. + * @param signature The ECDSA signature bytes. + * @return The address of the signer. + */ + function recoverRCAUSigner( + RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata signature + ) external view returns (address); + + /** + * @notice Gets an agreement. + * @param agreementId The ID of the agreement to retrieve. + * @return The AgreementData struct containing the agreement's data. + */ + function getAgreement(bytes16 agreementId) external view returns (AgreementData memory); + + /** + * @notice Get the maximum tokens collectable in the next collection for an agreement. + * @dev Computes the worst-case (maximum possible) claim amount based on current on-chain + * agreement state. For active agreements, uses `endsAt` as the upper bound (not block.timestamp). + * Returns 0 for NotAccepted, CanceledByServiceProvider, or fully expired agreements. + * @param agreementId The ID of the agreement + * @return The maximum tokens that could be collected in the next collection + */ + function getMaxNextClaim(bytes16 agreementId) external view returns (uint256); + + /** + * @notice Get collection info for an agreement + * @param agreement The agreement data + * @return isCollectable Whether the agreement is in a valid state that allows collection attempts, + * not that there are necessarily funds available to collect. + * @return collectionSeconds The valid collection duration in seconds (0 if not collectable) + * @return reason The reason why the agreement is not collectable (None if collectable) + */ + function getCollectionInfo( + AgreementData calldata agreement + ) external view returns (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason); + + /** + * @notice Generate a deterministic agreement ID from agreement parameters + * @param payer The address of the payer + * @param dataService The address of the data service + * @param serviceProvider The address of the service provider + * @param deadline The deadline for accepting the agreement + * @param nonce A unique nonce for preventing collisions + * @return agreementId The deterministically generated agreement ID + */ + function generateAgreementId( + address payer, + address dataService, + address serviceProvider, + uint64 deadline, + uint256 nonce + ) external pure returns (bytes16 agreementId); +} diff --git a/packages/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol b/packages/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol index c48f20099..4bc81d44f 100644 --- a/packages/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol +++ b/packages/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol @@ -13,7 +13,7 @@ import { ILinkedList } from "./ILinkedList.sol"; /** * @title Interface for the {HorizonStakingBase} contract. * @author Edge & Node - * @notice Provides getters for {HorizonStaking} and {HorizonStakingExtension} storage variables. + * @notice Provides getters for {HorizonStaking} storage variables. * @dev Most functions operate over {HorizonStaking} provisions. To uniquely identify a provision * functions take `serviceProvider` and `verifier` addresses. * @custom:security-contact Please email security+contracts@thegraph.com if you find any @@ -21,19 +21,15 @@ import { ILinkedList } from "./ILinkedList.sol"; */ interface IHorizonStakingBase { /** - * @notice Emitted when a service provider stakes tokens. - * @dev TRANSITION PERIOD: After transition period move to IHorizonStakingMain. Temporarily it - * needs to be here since it's emitted by {_stake} which is used by both {HorizonStaking} - * and {HorizonStakingExtension}. - * @param serviceProvider The address of the service provider. - * @param tokens The amount of tokens staked. + * @notice Thrown when using an invalid thaw request type. */ - event HorizonStakeDeposited(address indexed serviceProvider, uint256 tokens); + error HorizonStakingInvalidThawRequestType(); /** - * @notice Thrown when using an invalid thaw request type. + * @notice Gets the address of the subgraph data service. + * @return The address of the subgraph data service. */ - error HorizonStakingInvalidThawRequestType(); + function getSubgraphService() external view returns (address); /** * @notice Gets the details of a service provider. diff --git a/packages/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol b/packages/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol deleted file mode 100644 index d487b2eca..000000000 --- a/packages/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol +++ /dev/null @@ -1,215 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity ^0.8.22; - -// TODO: Re-enable and fix issues when publishing a new version -// solhint-disable gas-indexed-events - -import { IRewardsIssuer } from "../../contracts/rewards/IRewardsIssuer.sol"; - -/** - * @title Interface for {HorizonStakingExtension} contract. - * @author Edge & Node - * @notice Provides functions for managing legacy allocations. - * @custom:security-contact Please email security+contracts@thegraph.com if you find any - * bugs. We may have an active bug bounty program. - */ -interface IHorizonStakingExtension is IRewardsIssuer { - /** - * @dev Allocate GRT tokens for the purpose of serving queries of a subgraph deployment - * An allocation is created in the allocate() function and closed in closeAllocation() - * @param indexer The indexer address - * @param subgraphDeploymentID The subgraph deployment ID - * @param tokens The amount of tokens allocated to the subgraph deployment - * @param createdAtEpoch The epoch when the allocation was created - * @param closedAtEpoch The epoch when the allocation was closed - * @param collectedFees The amount of collected fees for the allocation - * @param __DEPRECATED_effectiveAllocation Deprecated field. - * @param accRewardsPerAllocatedToken Snapshot used for reward calculation - * @param distributedRebates The amount of collected rebates that have been rebated - */ - struct Allocation { - address indexer; - bytes32 subgraphDeploymentID; - uint256 tokens; - uint256 createdAtEpoch; - uint256 closedAtEpoch; - uint256 collectedFees; - uint256 __DEPRECATED_effectiveAllocation; - uint256 accRewardsPerAllocatedToken; - uint256 distributedRebates; - } - - /** - * @dev Possible states an allocation can be. - * States: - * - Null = indexer == address(0) - * - Active = not Null && tokens > 0 - * - Closed = Active && closedAtEpoch != 0 - */ - enum AllocationState { - Null, - Active, - Closed - } - - /** - * @notice Emitted when `indexer` close an allocation in `epoch` for `allocationID`. - * An amount of `tokens` get unallocated from `subgraphDeploymentID`. - * This event also emits the POI (proof of indexing) submitted by the indexer. - * `isPublic` is true if the sender was someone other than the indexer. - * @param indexer The indexer address - * @param subgraphDeploymentID The subgraph deployment ID - * @param epoch The protocol epoch the allocation was closed on - * @param tokens The amount of tokens unallocated from the allocation - * @param allocationID The allocation identifier - * @param sender The address closing the allocation - * @param poi The proof of indexing submitted by the sender - * @param isPublic True if the allocation was force closed by someone other than the indexer/operator - */ - event AllocationClosed( - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - uint256 epoch, - uint256 tokens, - address indexed allocationID, - address sender, - bytes32 poi, - bool isPublic - ); - - /** - * @notice Emitted when `indexer` collects a rebate on `subgraphDeploymentID` for `allocationID`. - * `epoch` is the protocol epoch the rebate was collected on - * The rebate is for `tokens` amount which are being provided by `assetHolder`; `queryFees` - * is the amount up for rebate after `curationFees` are distributed and `protocolTax` is burnt. - * `queryRebates` is the amount distributed to the `indexer` with `delegationFees` collected - * and sent to the delegation pool. - * @param assetHolder The address of the asset holder, the entity paying the query fees - * @param indexer The indexer address - * @param subgraphDeploymentID The subgraph deployment ID - * @param allocationID The allocation identifier - * @param epoch The protocol epoch the rebate was collected on - * @param tokens The amount of tokens collected - * @param protocolTax The amount of tokens burnt as protocol tax - * @param curationFees The amount of tokens distributed to the curation pool - * @param queryFees The amount of tokens collected as query fees - * @param queryRebates The amount of tokens distributed to the indexer - * @param delegationRewards The amount of tokens collected from the delegation pool - */ - event RebateCollected( - address assetHolder, - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - address indexed allocationID, - uint256 epoch, - uint256 tokens, - uint256 protocolTax, - uint256 curationFees, - uint256 queryFees, - uint256 queryRebates, - uint256 delegationRewards - ); - - /** - * @notice Emitted when `indexer` was slashed for a total of `tokens` amount. - * Tracks `reward` amount of tokens given to `beneficiary`. - * @param indexer The indexer address - * @param tokens The amount of tokens slashed - * @param reward The amount of reward tokens to send to a beneficiary - * @param beneficiary The address of a beneficiary to receive a reward for the slashing - */ - event StakeSlashed(address indexed indexer, uint256 tokens, uint256 reward, address beneficiary); - - /** - * @notice Close an allocation and free the staked tokens. - * To be eligible for rewards a proof of indexing must be presented. - * Presenting a bad proof is subject to slashable condition. - * To opt out of rewards set _poi to 0x0 - * @param allocationID The allocation identifier - * @param poi Proof of indexing submitted for the allocated period - */ - function closeAllocation(address allocationID, bytes32 poi) external; - - /** - * @notice Collect and rebate query fees to the indexer - * This function will accept calls with zero tokens. - * We use an exponential rebate formula to calculate the amount of tokens to rebate to the indexer. - * This implementation allows collecting multiple times on the same allocation, keeping track of the - * total amount rebated, the total amount collected and compensating the indexer for the difference. - * @param tokens Amount of tokens to collect - * @param allocationID Allocation where the tokens will be assigned - */ - function collect(uint256 tokens, address allocationID) external; - - /** - * @notice Slash the indexer stake. Delegated tokens are not subject to slashing. - * Note that depending on the state of the indexer's stake, the slashed amount might be smaller than the - * requested slash amount. This can happen if the indexer has moved a significant part of their stake to - * a provision. Any outstanding slashing amount should be settled using Horizon's slash function - * {IHorizonStaking.slash}. - * @dev Can only be called by the slasher role. - * @param indexer Address of indexer to slash - * @param tokens Amount of tokens to slash from the indexer stake - * @param reward Amount of reward tokens to send to a beneficiary - * @param beneficiary Address of a beneficiary to receive a reward for the slashing - */ - function legacySlash(address indexer, uint256 tokens, uint256 reward, address beneficiary) external; - - /** - * @notice (Legacy) Return true if operator is allowed for the service provider on the subgraph data service. - * @param operator Address of the operator - * @param indexer Address of the service provider - * @return True if operator is allowed for indexer, false otherwise - */ - function isOperator(address operator, address indexer) external view returns (bool); - - /** - * @notice Getter that returns if an indexer has any stake. - * @param indexer Address of the indexer - * @return True if indexer has staked tokens - */ - function hasStake(address indexer) external view returns (bool); - - /** - * @notice Get the total amount of tokens staked by the indexer. - * @param indexer Address of the indexer - * @return Amount of tokens staked by the indexer - */ - function getIndexerStakedTokens(address indexer) external view returns (uint256); - - /** - * @notice Return the allocation by ID. - * @param allocationID Address used as allocation identifier - * @return Allocation data - */ - function getAllocation(address allocationID) external view returns (Allocation memory); - - /** - * @notice Return the current state of an allocation - * @param allocationID Allocation identifier - * @return AllocationState enum with the state of the allocation - */ - function getAllocationState(address allocationID) external view returns (AllocationState); - - /** - * @notice Return if allocationID is used. - * @param allocationID Address used as signer by the indexer for an allocation - * @return True if allocationID already used - */ - function isAllocation(address allocationID) external view returns (bool); - - /** - * @notice Return the time in blocks to unstake - * Deprecated, now enforced by each data service (verifier) - * @return Thawing period in blocks - */ - function __DEPRECATED_getThawingPeriod() external view returns (uint64); - - /** - * @notice Return the address of the subgraph data service. - * @dev TRANSITION PERIOD: After transition period move to main HorizonStaking contract - * @return Address of the subgraph data service - */ - function getSubgraphService() external view returns (address); -} diff --git a/packages/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol b/packages/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol index 19c1e1cf8..1c87fee1e 100644 --- a/packages/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol +++ b/packages/interfaces/contracts/horizon/internal/IHorizonStakingMain.sol @@ -12,13 +12,8 @@ import { IHorizonStakingTypes } from "./IHorizonStakingTypes.sol"; * @title Inferface for the {HorizonStaking} contract. * @author Edge & Node * @notice Provides functions for managing stake, provisions, delegations, and slashing. - * @dev Note that this interface only includes the functions implemented by {HorizonStaking} contract, - * and not those implemented by {HorizonStakingExtension}. - * Do not use this interface to interface with the {HorizonStaking} contract, use {IHorizonStaking} for - * the complete interface. * @dev Most functions operate over {HorizonStaking} provisions. To uniquely identify a provision * functions take `serviceProvider` and `verifier` addresses. - * @dev TRANSITION PERIOD: After transition period rename to IHorizonStaking. * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ @@ -26,15 +21,14 @@ interface IHorizonStakingMain { // -- Events: stake -- /** - * @notice Emitted when a service provider unstakes tokens during the transition period. - * @param serviceProvider The address of the service provider - * @param tokens The amount of tokens now locked (including previously locked tokens) - * @param until The block number until the stake is locked + * @notice Emitted when a service provider stakes tokens. + * @param serviceProvider The address of the service provider. + * @param tokens The amount of tokens staked. */ - event HorizonStakeLocked(address indexed serviceProvider, uint256 tokens, uint256 until); + event HorizonStakeDeposited(address indexed serviceProvider, uint256 tokens); /** - * @notice Emitted when a service provider withdraws tokens during the transition period. + * @notice Emitted when a service provider unstakes tokens. * @param serviceProvider The address of the service provider * @param tokens The amount of tokens withdrawn */ @@ -219,7 +213,7 @@ interface IHorizonStakingMain { /** * @notice Emitted when `delegator` withdrew delegated `tokens` from `indexer` using `withdrawDelegated`. - * @dev This event is for the legacy `withdrawDelegated` function. + * @dev This event is for the legacy `withdrawDelegated` function, only emitted for pre-horizon undelegations. * @param indexer The address of the indexer * @param delegator The address of the delegator * @param tokens The amount of tokens withdrawn @@ -324,12 +318,6 @@ interface IHorizonStakingMain { */ event AllowedLockedVerifierSet(address indexed verifier, bool allowed); - /** - * @notice Emitted when the legacy global thawing period is set to zero. - * @dev This marks the end of the transition period. - */ - event ThawingPeriodCleared(); - /** * @notice Emitted when the delegation slashing global flag is set. */ @@ -373,13 +361,6 @@ interface IHorizonStakingMain { */ error HorizonStakingNotAuthorized(address serviceProvider, address verifier, address caller); - /** - * @notice Thrown when attempting to create a provision with a verifier other than the - * subgraph data service. This restriction only applies during the transition period. - * @param verifier The verifier address - */ - error HorizonStakingInvalidVerifier(address verifier); - /** * @notice Thrown when attempting to create a provision with an invalid maximum verifier cut. * @param maxVerifierCut The maximum verifier cut @@ -407,14 +388,6 @@ interface IHorizonStakingMain { */ error HorizonStakingInsufficientIdleStake(uint256 tokens, uint256 minTokens); - /** - * @notice Thrown during the transition period when the service provider has insufficient stake to - * cover their existing legacy allocations. - * @param tokens The actual token amount - * @param minTokens The minimum required token amount - */ - error HorizonStakingInsufficientStakeForLegacyAllocations(uint256 tokens, uint256 minTokens); - // -- Errors: delegation -- /** @@ -480,18 +453,12 @@ interface IHorizonStakingMain { error HorizonStakingTooManyThawRequests(); /** - * @notice Thrown when attempting to withdraw tokens that have not thawed (legacy undelegate). + * @notice Thrown when attempting to withdraw tokens that have not thawed. + * @dev This error is only thrown for pre-horizon undelegations. */ error HorizonStakingNothingToWithdraw(); // -- Errors: misc -- - /** - * @notice Thrown during the transition period when attempting to withdraw tokens that are still thawing. - * @dev Note this thawing refers to the global thawing period applied to legacy allocated tokens, - * it does not refer to thaw requests. - * @param until The block number until the stake is locked - */ - error HorizonStakingStillThawing(uint256 until); /** * @notice Thrown when a service provider attempts to operate on verifiers that are not allowed. @@ -511,11 +478,6 @@ interface IHorizonStakingMain { */ error HorizonStakingInvalidDelegationFeeCut(uint256 feeCut); - /** - * @notice Thrown when a legacy slash fails. - */ - error HorizonStakingLegacySlashFailed(); - /** * @notice Thrown when there attempting to slash a provision with no tokens to slash. */ @@ -571,19 +533,12 @@ interface IHorizonStakingMain { /** * @notice Move idle stake back to the owner's account. - * Stake is removed from the protocol: - * - During the transition period it's locked for a period of time before it can be withdrawn - * by calling {withdraw}. - * - After the transition period it's immediately withdrawn. - * Note that after the transition period if there are tokens still locked they will have to be - * withdrawn by calling {withdraw}. + * Stake is immediately removed from the protocol. * @dev Requirements: * - `_tokens` cannot be zero. - * - `_serviceProvider` must have enough idle stake to cover the staking amount and any - * legacy allocation. + * - `_serviceProvider` must have enough idle stake to cover the staking amount. * - * Emits a {HorizonStakeLocked} event during the transition period. - * Emits a {HorizonStakeWithdrawn} event after the transition period. + * Emits a {HorizonStakeWithdrawn} event. * * @param tokens Amount of tokens to unstake */ @@ -592,8 +547,12 @@ interface IHorizonStakingMain { /** * @notice Withdraw service provider tokens once the thawing period (initiated by {unstake}) has passed. * All thawed tokens are withdrawn. - * @dev This is only needed during the transition period while we still have - * a global lock. After that, unstake() will automatically withdraw. + * This function is for backwards compatibility with the legacy staking contract. + * It only allows withdrawing tokens unstaked before horizon upgrade. + * @dev This function can't be removed in case there are still pre-horizon unstakes. + * + * Emits a {HorizonStakeWithdrawn} event. + * */ function withdraw() external; @@ -603,8 +562,6 @@ interface IHorizonStakingMain { * service, where the data service is the verifier. * This function can be called by the service provider or by an operator authorized by the provider * for this specific verifier. - * @dev During the transition period, only the subgraph data service can be used as a verifier. This - * prevents an escape hatch for legacy allocation stake. * @dev Requirements: * - `tokens` cannot be zero. * - The `serviceProvider` must have enough idle stake to cover the tokens to provision. @@ -826,7 +783,7 @@ interface IHorizonStakingMain { * - `newServiceProvider` and `newVerifier` must not be the zero address. * - `newServiceProvider` must have previously provisioned stake to `newVerifier`. * - * Emits {ThawRequestFulfilled}, {ThawRequestsFulfilled} and {DelegatedTokensWithdrawn} events. + * Emits {ThawRequestFulfilled} and {ThawRequestsFulfilled} events. * * @param oldServiceProvider The old service provider address * @param oldVerifier The old verifier address @@ -883,6 +840,7 @@ interface IHorizonStakingMain { * @notice Withdraw undelegated tokens from the subgraph data service provision after thawing. * This function is for backwards compatibility with the legacy staking contract. * It only allows withdrawing tokens undelegated before horizon upgrade. + * @dev This function can't be removed in case there are still pre-horizon undelegations. * @dev See {delegate}. * @param serviceProvider The service provider address * @param deprecated Deprecated parameter kept for backwards compatibility @@ -971,14 +929,6 @@ interface IHorizonStakingMain { */ function setDelegationSlashingEnabled() external; - /** - * @notice Clear the legacy global thawing period. - * This signifies the end of the transition period, after which no legacy allocations should be left. - * @dev This function can only be called by the contract governor. - * @dev Emits a {ThawingPeriodCleared} event. - */ - function clearThawingPeriod() external; - /** * @notice Sets the global maximum thawing period allowed for provisions. * @param maxThawingPeriod The new maximum thawing period, in seconds @@ -1004,8 +954,37 @@ interface IHorizonStakingMain { function isAuthorized(address serviceProvider, address verifier, address operator) external view returns (bool); /** - * @notice Get the address of the staking extension. - * @return The address of the staking extension + * @notice Withdraw service provider legacy locked tokens. + * This is a permissionless function that allows anyone to withdraw on behalf of a service provider. + * It only allows withdrawing tokens that were unstaked before the Horizon upgrade. + * @dev Tokens are always sent to the service provider, not the caller. + * + * Emits a {HorizonStakeWithdrawn} event. + * + * @param serviceProvider Address of service provider to withdraw funds from + */ + function forceWithdraw(address serviceProvider) external; + + /** + * @notice Withdraw delegator legacy undelegated tokens. + * This is a permissionless function that allows anyone to withdraw on behalf of a delegator. + * It only allows withdrawing tokens that were undelegated before the Horizon upgrade. + * @dev Tokens are always sent to the delegator, not the caller. + * + * Emits a {StakeDelegatedWithdrawn} event. + * + * @param serviceProvider The service provider address + * @param delegator The delegator address to withdraw funds for + * @return The amount of tokens withdrawn + */ + function forceWithdrawDelegated(address serviceProvider, address delegator) external returns (uint256); + + /** + * @notice Return if allocationID is used. + * @dev This function is used to check for allocation id collisions with legacy allocations + * that were created before the Horizon upgrade. + * @param allocationID Address used as signer by the indexer for an allocation + * @return True if allocationID already used */ - function getStakingExtension() external view returns (address); + function isAllocation(address allocationID) external view returns (bool); } diff --git a/packages/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol b/packages/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol index e8fff211b..22cdb5b4b 100644 --- a/packages/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol +++ b/packages/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol @@ -200,4 +200,42 @@ interface IHorizonStakingTypes { uint256 tokensThawing; uint256 sharesThawing; } + + /** + * @notice Legacy allocation representation + * @dev Kept for storage compatibility and to check for allocation id collisions. + * @param indexer The indexer address + * @param subgraphDeploymentID The subgraph deployment ID + * @param tokens The amount of tokens allocated to the subgraph deployment + * @param createdAtEpoch The epoch when the allocation was created + * @param closedAtEpoch The epoch when the allocation was closed + * @param collectedFees The amount of collected fees for the allocation + * @param __DEPRECATED_effectiveAllocation Deprecated field + * @param accRewardsPerAllocatedToken Snapshot used for reward calculation + * @param distributedRebates The amount of collected rebates that have been rebated + */ + struct LegacyAllocation { + address indexer; + bytes32 subgraphDeploymentID; + uint256 tokens; + uint256 createdAtEpoch; + uint256 closedAtEpoch; + uint256 collectedFees; + uint256 __DEPRECATED_effectiveAllocation; + uint256 accRewardsPerAllocatedToken; + uint256 distributedRebates; + } + + /** + * @dev Possible states a legacy allocation can be. + * States: + * - Null = indexer == address(0) + * - Active = not Null && tokens > 0 + * - Closed = Active && closedAtEpoch != 0 + */ + enum LegacyAllocationState { + Null, + Active, + Closed + } } diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol new file mode 100644 index 000000000..3e37e50e8 --- /dev/null +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +import { IPaymentsEscrow } from "../../horizon/IPaymentsEscrow.sol"; +import { IRecurringEscrowManagement } from "./IRecurringEscrowManagement.sol"; + +/** + * @title Interface for the {RecurringAgreementHelper} contract + * @author Edge & Node + * @notice Stateless, permissionless convenience contract for {RecurringAgreementManager}. + * Provides batch reconciliation (including cleanup of settled agreements) and + * read-only audit views. Independently deployable — better versions can be + * deployed without protocol changes. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IRecurringAgreementHelper { + // -- Audit Structs -- + + /** + * @notice Global financial summary of the RecurringAgreementManager + * @param tokenBalance GRT balance available to the manager + * @param sumMaxNextClaimAll Global sum of maxNextClaim across all (collector, provider) pairs + * @param totalEscrowDeficit Total unfunded escrow across all pairs + * @param totalAgreementCount Total number of tracked agreements + * @param escrowBasis Configured escrow level (Full / OnDemand / JustInTime) + * @param tempJit Whether the temporary JIT breaker is active + * @param collectorCount Number of collectors with active agreements + */ + struct GlobalAudit { + uint256 tokenBalance; + uint256 sumMaxNextClaimAll; + uint256 totalEscrowDeficit; + uint256 totalAgreementCount; + IRecurringEscrowManagement.EscrowBasis escrowBasis; + bool tempJit; + uint256 collectorCount; + } + + /** + * @notice Per-(collector, provider) pair financial summary + * @param collector The collector address + * @param provider The provider address + * @param agreementCount Number of agreements for this pair + * @param sumMaxNextClaim Sum of maxNextClaim for this pair + * @param escrow Escrow account state (balance, tokensThawing, thawEndTimestamp) + */ + struct PairAudit { + address collector; + address provider; + uint256 agreementCount; + uint256 sumMaxNextClaim; + IPaymentsEscrow.EscrowAccount escrow; + } + + // -- Audit Views -- + + /** + * @notice Global financial snapshot of the manager + * @return audit The global audit struct + */ + function auditGlobal() external view returns (GlobalAudit memory audit); + + /** + * @notice All pair summaries for a specific collector + * @param collector The collector address + * @return pairs Array of pair audit structs + */ + function auditPairs(address collector) external view returns (PairAudit[] memory pairs); + + /** + * @notice Paginated pair summaries for a collector + * @param collector The collector address + * @param offset Index to start from + * @param count Maximum number to return + * @return pairs Array of pair audit structs + */ + function auditPairs( + address collector, + uint256 offset, + uint256 count + ) external view returns (PairAudit[] memory pairs); + + /** + * @notice Single pair summary + * @param collector The collector address + * @param provider The provider address + * @return pair The pair audit struct + */ + function auditPair(address collector, address provider) external view returns (PairAudit memory pair); + + // -- Reconciliation -- + + /** + * @notice Reconcile all agreements for a provider, cleaning up fully settled ones. + * @dev Permissionless. O(n) gas — may hit gas limits with many agreements. + * @param provider The provider to reconcile + * @return removed Number of agreements removed during reconciliation + */ + function reconcile(address provider) external returns (uint256 removed); + + /** + * @notice Reconcile a batch of specific agreement IDs, cleaning up fully settled ones. + * @dev Permissionless. Skips non-existent agreements. + * @param agreementIds The agreement IDs to reconcile + * @return removed Number of agreements removed during reconciliation + */ + function reconcileBatch(bytes16[] calldata agreementIds) external returns (uint256 removed); + + /** + * @notice Reconcile all agreements for a (collector, provider) pair, then + * attempt to remove pair tracking if fully drained. + * @dev Permissionless. May require multiple calls if escrow is still thawing. + * @param collector The collector address + * @param provider The provider address + * @return removed Number of agreements removed + * @return pairExists True if the pair is still tracked + */ + function reconcilePair(address collector, address provider) external returns (uint256 removed, bool pairExists); + + /** + * @notice Reconcile all pairs for a collector, then attempt collector removal. + * @dev Permissionless. O(providers * agreements) gas. + * @param collector The collector address + * @return removed Total agreements removed + * @return collectorExists True if the collector is still tracked + */ + function reconcileCollector(address collector) external returns (uint256 removed, bool collectorExists); + + /** + * @notice Reconcile all agreements across all collectors and providers. + * @dev Permissionless. May hit gas limits with many agreements. + * @return removed Total agreements removed + */ + function reconcileAll() external returns (uint256 removed); +} diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol new file mode 100644 index 000000000..43f72057a --- /dev/null +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +import { IRecurringCollector } from "../../horizon/IRecurringCollector.sol"; + +/** + * @title Interface for agreement lifecycle operations on {RecurringAgreementManager} + * @author Edge & Node + * @notice Functions for offering, updating, revoking, canceling, and + * reconciling managed RCAs (Recurring Collection Agreements). + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IRecurringAgreementManagement { + // -- Events -- + // solhint-disable gas-indexed-events + + /** + * @notice Emitted when an agreement is offered for escrow management + * @param agreementId The deterministic agreement ID + * @param provider The service provider for this agreement + * @param maxNextClaim The calculated maximum next claim amount + */ + event AgreementOffered(bytes16 indexed agreementId, address indexed provider, uint256 maxNextClaim); + + /** + * @notice Emitted when an agreement offer is revoked before acceptance + * @param agreementId The agreement ID + * @param provider The provider whose sumMaxNextClaim was reduced + */ + event OfferRevoked(bytes16 indexed agreementId, address indexed provider); + + /** + * @notice Emitted when an agreement is canceled via the data service + * @param agreementId The agreement ID + * @param provider The provider for this agreement + */ + event AgreementCanceled(bytes16 indexed agreementId, address indexed provider); + + /** + * @notice Emitted when an agreement is removed from escrow management + * @param agreementId The agreement ID being removed + * @param provider The provider whose sumMaxNextClaim was reduced + */ + event AgreementRemoved(bytes16 indexed agreementId, address indexed provider); + + /** + * @notice Emitted when an agreement's max next claim is recalculated + * @param agreementId The agreement ID + * @param oldMaxNextClaim The previous max next claim + * @param newMaxNextClaim The updated max next claim + */ + event AgreementReconciled(bytes16 indexed agreementId, uint256 oldMaxNextClaim, uint256 newMaxNextClaim); + + /** + * @notice Emitted when a pending agreement update is offered + * @param agreementId The agreement ID + * @param pendingMaxNextClaim The max next claim for the pending update + * @param updateNonce The RCAU nonce for the pending update + */ + event AgreementUpdateOffered(bytes16 indexed agreementId, uint256 pendingMaxNextClaim, uint32 updateNonce); + + /** + * @notice Emitted when a pending agreement update is revoked + * @param agreementId The agreement ID + * @param pendingMaxNextClaim The escrow that was freed + * @param updateNonce The RCAU nonce that was revoked + */ + event AgreementUpdateRevoked(bytes16 indexed agreementId, uint256 pendingMaxNextClaim, uint32 updateNonce); + + /** + * @notice Emitted when a (collector, provider) pair is removed from tracking + * @dev Emitted when the pair has no agreements AND escrow is fully recovered (balance zero). + * May cascade inline from agreement deletion or be triggered by {reconcileCollectorProvider}. + * @param collector The collector address + * @param provider The provider address + */ + event CollectorProviderRemoved(address indexed collector, address indexed provider); + + /** + * @notice Emitted when a collector is removed from the global tracking set + * @dev Emitted when the collector's last provider is removed, cascading from pair removal. + * @param collector The collector address + */ + event CollectorRemoved(address indexed collector); + + // solhint-enable gas-indexed-events + + // -- Errors -- + + /** + * @notice Thrown when trying to offer an agreement that is already offered + * @param agreementId The agreement ID + */ + error AgreementAlreadyOffered(bytes16 agreementId); + + /** + * @notice Thrown when trying to operate on an agreement that is not offered + * @param agreementId The agreement ID + */ + error AgreementNotOffered(bytes16 agreementId); + + /** + * @notice Thrown when the RCA payer is not this contract + * @param payer The payer address in the RCA + * @param expected The expected payer (this contract) + */ + error PayerMustBeManager(address payer, address expected); + + /** + * @notice Thrown when trying to revoke an agreement that is already accepted + * @param agreementId The agreement ID + */ + error AgreementAlreadyAccepted(bytes16 agreementId); + + /** + * @notice Thrown when trying to cancel an agreement that has not been accepted yet + * @param agreementId The agreement ID + */ + error AgreementNotAccepted(bytes16 agreementId); + + /** + * @notice Thrown when the data service address has no deployed code + * @param dataService The address that was expected to be a contract + */ + error InvalidDataService(address dataService); + + /// @notice Thrown when the RCA service provider is the zero address + error ServiceProviderZeroAddress(); + + /** + * @notice Thrown when the data service address does not have DATA_SERVICE_ROLE + * @param dataService The unauthorized data service address + */ + error UnauthorizedDataService(address dataService); + + /// @notice Thrown when a collection callback is called by an address other than the agreement's collector + error OnlyAgreementCollector(); + + /** + * @notice Thrown when the RCAU nonce does not match the expected next update nonce + * @param agreementId The agreement ID + * @param expectedNonce The expected nonce (collector's updateNonce + 1) + * @param actualNonce The nonce provided in the RCAU + */ + error InvalidUpdateNonce(bytes16 agreementId, uint32 expectedNonce, uint32 actualNonce); + + /** + * @notice Thrown when the collector address does not have COLLECTOR_ROLE + * @param collector The unauthorized collector address + */ + error UnauthorizedCollector(address collector); + + // -- Functions -- + + /** + * @notice Offer an RCA for escrow management. Must be called before + * the data service accepts the agreement (with empty authData). + * @dev Calculates max next claim from RCA parameters, stores the authorized hash + * for the {IAgreementOwner} callback, and deposits into escrow. + * Requires AGREEMENT_MANAGER_ROLE. + * @param rca The Recurring Collection Agreement parameters + * @param collector The RecurringCollector contract to use for this agreement + * @return agreementId The deterministic agreement ID + */ + function offerAgreement( + IRecurringCollector.RecurringCollectionAgreement calldata rca, + IRecurringCollector collector + ) external returns (bytes16 agreementId); + + /** + * @notice Offer a pending agreement update for escrow management. Must be called + * before the data service applies the update (with empty authData). + * @dev Stores the authorized RCAU hash for the {IAgreementOwner} callback and + * adds the pending update's max next claim to sumMaxNextClaim. Treats the + * pending update as a separate escrow entry alongside the current agreement. + * If a previous pending update exists, it is replaced. + * Requires AGREEMENT_MANAGER_ROLE. + * @param rcau The Recurring Collection Agreement Update parameters + * @return agreementId The agreement ID from the RCAU + */ + function offerAgreementUpdate( + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau + ) external returns (bytes16 agreementId); + + /** + * @notice Revoke a pending agreement update, freeing its reserved escrow. + * @dev Requires AGREEMENT_MANAGER_ROLE. Reconciles the agreement first to + * detect if the update was already applied. If the pending update is still + * outstanding after reconciliation, clears it and frees the escrow. + * No-op (returns false) if no pending update exists after reconciliation. + * @param agreementId The agreement ID whose pending update to revoke + * @return revoked True if a pending update was cleared by this call + */ + function revokeAgreementUpdate(bytes16 agreementId) external returns (bool revoked); + + /** + * @notice Revoke an un-accepted agreement offer. Only for agreements not yet + * accepted in RecurringCollector. + * @dev Requires AGREEMENT_MANAGER_ROLE. Clears the agreement tracking and authorized hashes, + * freeing the reserved escrow. Any pending update is also cleared. + * No-op (returns true) if the agreement is not tracked. + * @param agreementId The agreement ID to revoke + * @return gone True if the agreement is not tracked (whether revoked by this call or already absent) + */ + function revokeOffer(bytes16 agreementId) external returns (bool gone); + + /** + * @notice Cancel an accepted agreement by routing through the data service. + * @dev Requires AGREEMENT_MANAGER_ROLE. Reads agreement state from RecurringCollector: + * - NotAccepted: reverts (use {revokeOffer} instead) + * - Accepted: cancels via the data service, then reconciles and updates escrow + * - Already canceled: idempotent — reconciles and updates escrow without re-canceling + * After cancellation, call {reconcileAgreement} once the collection window closes. + * @param agreementId The agreement ID to cancel + * @return gone True if the agreement is not tracked (already absent); false when + * the agreement is still tracked (caller should eventually call {reconcileAgreement}) + */ + function cancelAgreement(bytes16 agreementId) external returns (bool gone); + + /** + * @notice Reconcile a single agreement: re-read on-chain state, recalculate + * max next claim, update escrow, and delete the agreement if fully settled. + * @dev Permissionless. Handles all agreement states: + * - NotAccepted before deadline: keeps pre-offer estimate (returns true) + * - NotAccepted past deadline: zeroes and deletes (returns false) + * - Accepted/Canceled: reconciles maxNextClaim, deletes if zero + * Should be called after collections, cancellations, or agreement updates. + * @param agreementId The agreement ID to reconcile + * @return exists True if the agreement is still tracked after this call + */ + function reconcileAgreement(bytes16 agreementId) external returns (bool exists); + + /** + * @notice Reconcile a (collector, provider) pair: rebalance escrow, withdraw + * completed thaws, and remove tracking if fully drained. + * @dev Permissionless. First updates escrow state (deposit deficit, thaw excess, + * withdraw completed thaws), then removes pair tracking when both pairAgreementCount + * and escrow balance are zero. Also serves as the permissionless "poke" to rebalance + * escrow after {IRecurringEscrowManagement-setEscrowBasis} or {IRecurringEscrowManagement-setTempJit} + * changes. Returns true if the pair still has agreements or escrow is still thawing. + * @param collector The collector address + * @param provider The provider address + * @return exists True if the pair is still tracked after this call + */ + function reconcileCollectorProvider(address collector, address provider) external returns (bool exists); +} diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol new file mode 100644 index 000000000..9d6223ad0 --- /dev/null +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +import { IDataServiceAgreements } from "../../data-service/IDataServiceAgreements.sol"; +import { IPaymentsEscrow } from "../../horizon/IPaymentsEscrow.sol"; +import { IRecurringCollector } from "../../horizon/IRecurringCollector.sol"; +import { IRecurringEscrowManagement } from "./IRecurringEscrowManagement.sol"; + +/** + * @title Interface for querying {RecurringAgreementManager} state + * @author Edge & Node + * @notice Read-only functions for inspecting managed agreements, escrow balances, + * and global tracking state. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IRecurringAgreements { + // -- Structs -- + + /** + * @notice Tracked state for a managed agreement + * @dev An agreement is considered tracked when `provider != address(0)`. + * + * Storage layout (7 slots): + * slot 0: provider (20) + deadline (8) + pendingUpdateNonce (4) = 32 (packed) + * slot 1: maxNextClaim (32) + * slot 2: pendingUpdateMaxNextClaim (32) + * slot 3: agreementHash (32) + * slot 4: pendingUpdateHash (32) + * slot 5: dataService (20) (12 bytes free) + * slot 6: collector (20) (12 bytes free) + * + * @param provider The service provider for this agreement + * @param deadline The RCA deadline for acceptance (used to detect expired offers) + * @param pendingUpdateNonce The RCAU nonce for the pending update (0 means no pending) + * @param maxNextClaim The current maximum tokens claimable in the next collection + * @param pendingUpdateMaxNextClaim Max next claim for an offered-but-not-yet-applied update + * @param agreementHash The RCA hash stored for cleanup of authorizedHashes on deletion + * @param pendingUpdateHash The RCAU hash stored for cleanup of authorizedHashes on deletion + * @param dataService The data service contract for this agreement + * @param collector The RecurringCollector contract for this agreement + */ + struct AgreementInfo { + address provider; + uint64 deadline; + uint32 pendingUpdateNonce; + uint256 maxNextClaim; + uint256 pendingUpdateMaxNextClaim; + bytes32 agreementHash; + bytes32 pendingUpdateHash; + IDataServiceAgreements dataService; + IRecurringCollector collector; + } + + // -- View Functions -- + + /** + * @notice Get the sum of maxNextClaim for all managed agreements for a (collector, provider) pair + * @param collector The collector contract + * @param provider The provider address + * @return tokens The sum of max next claims + */ + function getSumMaxNextClaim(IRecurringCollector collector, address provider) external view returns (uint256 tokens); + + /** + * @notice Get the escrow account for a (collector, provider) pair + * @param collector The collector contract + * @param provider The provider address + * @return account The escrow account data + */ + function getEscrowAccount( + IRecurringCollector collector, + address provider + ) external view returns (IPaymentsEscrow.EscrowAccount memory account); + + /** + * @notice Get the max next claim for a specific agreement + * @param agreementId The agreement ID + * @return tokens The current max next claim stored for this agreement + */ + function getAgreementMaxNextClaim(bytes16 agreementId) external view returns (uint256 tokens); + + /** + * @notice Get the full tracked state for a specific agreement + * @param agreementId The agreement ID + * @return info The agreement info struct (all fields zero if not tracked) + */ + function getAgreementInfo(bytes16 agreementId) external view returns (AgreementInfo memory info); + + /** + * @notice Get the number of managed agreements for a provider + * @param provider The provider address + * @return count The count of tracked agreements + */ + function getProviderAgreementCount(address provider) external view returns (uint256 count); + + /** + * @notice Get all managed agreement IDs for a provider + * @dev Returns the full set of tracked agreement IDs. May be expensive for providers + * with many agreements — prefer the paginated overload or {getProviderAgreementCount} + * for on-chain use. + * @param provider The provider address + * @return agreementIds The array of agreement IDs + */ + function getProviderAgreements(address provider) external view returns (bytes16[] memory agreementIds); + + /** + * @notice Get a paginated slice of managed agreement IDs for a provider + * @param provider The provider address + * @param offset The index to start from + * @param count Maximum number of IDs to return (clamped to available) + * @return agreementIds The array of agreement IDs + */ + function getProviderAgreements( + address provider, + uint256 offset, + uint256 count + ) external view returns (bytes16[] memory agreementIds); + + /** + * @notice Get the current escrow basis setting + * @return basis The configured escrow basis + */ + function getEscrowBasis() external view returns (IRecurringEscrowManagement.EscrowBasis basis); + + /** + * @notice Get the sum of maxNextClaim across all (collector, provider) pairs + * @dev Populated lazily through normal operations. May be stale if agreements were + * offered before this feature was deployed — run reconciliation to populate. + * @return tokens The global sum of max next claims + */ + function getSumMaxNextClaimAll() external view returns (uint256 tokens); + + /** + * @notice Get the total undeposited escrow across all providers + * @dev Maintained incrementally: sum of max(0, sumMaxNextClaim[p] - deposited[p]) + * for each provider p. Correctly accounts for per-provider deficits without + * allowing over-deposited providers to mask under-deposited ones. + * @return tokens The total unfunded amount + */ + function getTotalEscrowDeficit() external view returns (uint256 tokens); + + /** + * @notice Get the total number of tracked agreements across all providers + * @dev Populated lazily through normal operations. + * @return count The total agreement count + */ + function getTotalAgreementCount() external view returns (uint256 count); + + /** + * @notice Check whether temporary JIT mode is currently active + * @dev When active, the system operates in JIT-only mode regardless of the configured + * escrow basis. The configured basis is preserved and takes effect again when + * temp JIT recovers (totalEscrowDeficit < available) or operator calls {setTempJit}. + * @return active True if temporary JIT mode is active + */ + function isTempJit() external view returns (bool active); + + /** + * @notice Get the number of collectors with active agreements + * @return count The number of tracked collectors + */ + function getCollectorCount() external view returns (uint256 count); + + /** + * @notice Get all collector addresses with active agreements + * @dev May be expensive for large sets — prefer the paginated overload for on-chain use. + * @return result Array of collector addresses + */ + function getCollectors() external view returns (address[] memory result); + + /** + * @notice Get a paginated slice of collector addresses + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return result Array of collector addresses + */ + function getCollectors(uint256 offset, uint256 count) external view returns (address[] memory result); + + /** + * @notice Get the number of providers with active agreements for a collector + * @param collector The collector address + * @return count The number of tracked providers + */ + function getCollectorProviderCount(address collector) external view returns (uint256 count); + + /** + * @notice Get all provider addresses with active agreements for a collector + * @dev May be expensive for large sets — prefer the paginated overload for on-chain use. + * @param collector The collector address + * @return result Array of provider addresses + */ + function getCollectorProviders(address collector) external view returns (address[] memory result); + + /** + * @notice Get a paginated slice of provider addresses for a collector + * @param collector The collector address + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return result Array of provider addresses + */ + function getCollectorProviders( + address collector, + uint256 offset, + uint256 count + ) external view returns (address[] memory result); + + /** + * @notice Get the number of managed agreements for a (collector, provider) pair + * @param collector The collector address + * @param provider The provider address + * @return count The pair agreement count + */ + function getPairAgreementCount(address collector, address provider) external view returns (uint256 count); +} diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol new file mode 100644 index 000000000..ee4d3d35b --- /dev/null +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +/** + * @title Interface for escrow management operations on {RecurringAgreementManager} + * @author Edge & Node + * @notice Functions for configuring escrow deposits that back + * managed RCAs. Controls how aggressively escrow is pre-deposited. + * Escrow rebalancing is performed by {IRecurringAgreementManagement-reconcileCollectorProvider}. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IRecurringEscrowManagement { + // -- Enums -- + + /** + * @notice Escrow level — controls how aggressively escrow is pre-deposited. + * Ordered low-to-high. The configured level is the maximum aspiration; the system + * automatically degrades when balance is insufficient. `beforeCollection` (JIT top-up) + * is always active regardless of setting. + * + * @dev JustInTime=0 (thaw everything, pure JIT), OnDemand=1 (no deposits, hold at + * sumMaxNextClaim level), Full=2 (deposit sum of all maxNextClaim — current default). + */ + enum EscrowBasis { + JustInTime, + OnDemand, + Full + } + + // -- Events -- + // solhint-disable gas-indexed-events + + /** + * @notice Emitted when escrow is deposited for a provider + * @param provider The provider whose escrow was deposited into + * @param collector The collector address for the escrow account + * @param deposited The amount deposited + */ + event EscrowFunded(address indexed provider, address indexed collector, uint256 deposited); + + /** + * @notice Emitted when thawed escrow tokens are withdrawn + * @param provider The provider whose escrow was withdrawn + * @param collector The collector address for the escrow account + * @param tokens The amount of tokens withdrawn + */ + event EscrowWithdrawn(address indexed provider, address indexed collector, uint256 tokens); + + /** + * @notice Emitted when the escrow basis is changed + * @param oldBasis The previous escrow basis + * @param newBasis The new escrow basis + */ + event EscrowBasisSet(EscrowBasis indexed oldBasis, EscrowBasis indexed newBasis); + + /** + * @notice Emitted when temporary JIT mode is activated or deactivated + * @param active True when entering temp JIT, false when recovering + * @param automatic True when triggered by the system (beforeCollection/reconcileCollectorProvider), + * false when triggered by operator (setTempJit/setEscrowBasis) + */ + event TempJitSet(bool indexed active, bool indexed automatic); + + // solhint-enable gas-indexed-events + + // -- Functions -- + + /** + * @notice Set the escrow basis (maximum aspiration level). + * @dev Requires OPERATOR_ROLE. The system automatically degrades below the configured + * level when balance is insufficient. Changing the basis does not immediately rebalance + * escrow — call {IRecurringAgreementManagement-reconcileCollectorProvider} per pair to apply. + * @param basis The new escrow basis + */ + function setEscrowBasis(EscrowBasis basis) external; + + /** + * @notice Manually activate or deactivate temporary JIT mode + * @dev Requires OPERATOR_ROLE. When activated, the system operates in JIT-only mode + * regardless of the configured escrow basis. When deactivated, the configured basis + * takes effect again. Emits {TempJitSet}. + * @param active True to activate temp JIT, false to deactivate + */ + function setTempJit(bool active) external; +} diff --git a/packages/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol b/packages/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol index b43bc948a..90a311556 100644 --- a/packages/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol +++ b/packages/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol @@ -15,6 +15,9 @@ interface IIssuanceTarget { */ event IssuanceAllocatorSet(address indexed oldIssuanceAllocator, address indexed newIssuanceAllocator); + /// @notice Emitted before the issuance allocation changes + event BeforeIssuanceAllocationChange(); + /** * @notice Called by the issuance allocator before the target's issuance allocation changes * @dev The target should ensure that all issuance related calculations are up-to-date diff --git a/packages/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol b/packages/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol new file mode 100644 index 000000000..3e8dc3cfe --- /dev/null +++ b/packages/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.7.6 || ^0.8.0; + +/** + * @title IProviderEligibility + * @author Edge & Node + * @notice Minimal interface for checking service provider eligibility to receive rewards or payments. + * Particularly relevant when paid by the protocol from issuance. + * @dev This is the interface that consumers (e.g., RewardsManager, RecurringAgreementManager) need to check + * if a provider is eligible to receive rewards. + */ +interface IProviderEligibility { + /** + * @notice Check if a service provider is eligible to receive rewards or other payments. + * @param provider Address of the service provider + * @return eligible True if the provider is eligible, false otherwise + */ + function isEligible(address provider) external view returns (bool eligible); +} diff --git a/packages/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol b/packages/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol new file mode 100644 index 000000000..69d450f54 --- /dev/null +++ b/packages/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol @@ -0,0 +1,38 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.7.6 || ^0.8.0; + +import { IProviderEligibility } from "./IProviderEligibility.sol"; + +/** + * @title Interface for provider eligibility oracle configuration + * @author Edge & Node + * @notice Configures the provider eligibility oracle that determines which providers + * are eligible for rewards or payments. + */ +interface IProviderEligibilityManagement { + // -- Events -- + + /** + * @notice Emitted when the provider eligibility oracle is changed + * @param oldOracle The previous oracle (IProviderEligibility(address(0)) means none) + * @param newOracle The new oracle (IProviderEligibility(address(0)) means disabled) + */ + event ProviderEligibilityOracleSet(IProviderEligibility indexed oldOracle, IProviderEligibility indexed newOracle); + + // -- Functions -- + + /** + * @notice Set the provider eligibility oracle. + * @dev When set, {isEligible} delegates to this oracle. + * When set to IProviderEligibility(address(0)), all providers are considered eligible (passthrough). + * @param oracle The eligibility oracle (or IProviderEligibility(address(0)) to disable) + */ + function setProviderEligibilityOracle(IProviderEligibility oracle) external; + + /** + * @notice Get the current provider eligibility oracle + * @return oracle The eligibility oracle (IProviderEligibility(address(0)) means disabled) + */ + function getProviderEligibilityOracle() external view returns (IProviderEligibility oracle); +} diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol deleted file mode 100644 index 53c8acf85..000000000 --- a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol +++ /dev/null @@ -1,19 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity ^0.7.6 || ^0.8.0; - -/** - * @title IRewardsEligibility - * @author Edge & Node - * @notice Minimal interface for checking indexer rewards eligibility - * @dev This is the interface that consumers (e.g., RewardsManager) need to check - * if an indexer is eligible to receive rewards - */ -interface IRewardsEligibility { - /** - * @notice Check if an indexer is eligible to receive rewards - * @param indexer Address of the indexer - * @return True if the indexer is eligible to receive rewards, false otherwise - */ - function isEligible(address indexer) external view returns (bool); -} diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol index e8fc2423f..2bc5e0498 100644 --- a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol +++ b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol @@ -34,4 +34,14 @@ interface IRewardsEligibilityAdministration is IRewardsEligibilityEvents { * @return True if successfully set (always the case for current code) */ function setEligibilityValidation(bool enabled) external returns (bool); + + /** + * @notice Set the indexer retention period for tracked indexer cleanup + * @dev Only callable by accounts with the OPERATOR_ROLE. Indexers whose last + * renewal timestamp is older than this period can be permissionlessly removed + * from the tracked set via {IRewardsEligibilityMaintenance-removeStaleIndexer}. + * @param indexerRetentionPeriod New retention period in seconds + * @return True if the state is as requested (retention period is set to the specified value) + */ + function setIndexerRetentionPeriod(uint256 indexerRetentionPeriod) external returns (bool); } diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol index f2214ecb3..b26d9e2be 100644 --- a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol +++ b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol @@ -31,4 +31,14 @@ interface IRewardsEligibilityEvents { /// @param oldTimeout The previous timeout period in seconds /// @param newTimeout The new timeout period in seconds event OracleUpdateTimeoutUpdated(uint256 indexed oldTimeout, uint256 indexed newTimeout); + + /// @notice Emitted when an indexer is added to or removed from the tracked set + /// @param indexer The indexer address + /// @param tracked True when added (first renewal), false when removed (stale cleanup) + event IndexerTrackingUpdated(address indexed indexer, bool indexed tracked); + + /// @notice Emitted when the indexer retention period is updated + /// @param oldPeriod The previous retention period in seconds + /// @param newPeriod The new retention period in seconds + event IndexerRetentionPeriodSet(uint256 indexed oldPeriod, uint256 indexed newPeriod); } diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityHelper.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityHelper.sol new file mode 100644 index 000000000..6a7894218 --- /dev/null +++ b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityHelper.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.7.6 || ^0.8.0; + +/** + * @title Interface for the {RewardsEligibilityHelper} contract + * @author Edge & Node + * @notice Stateless, permissionless convenience contract for {RewardsEligibilityOracle}. + * Provides batch removal of expired indexers from the tracked set. + * Independently deployable — better versions can be deployed without protocol changes. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IRewardsEligibilityHelper { + /** + * @notice Remove expired indexers from the tracked set by explicit address list + * @dev Calls {IRewardsEligibilityMaintenance-removeExpiredIndexer} for each address. + * @param indexers Array of indexer addresses to check and remove + * @return gone Number of indexers now absent from the tracked set + */ + function removeExpiredIndexers(address[] calldata indexers) external returns (uint256 gone); + + /** + * @notice Remove all expired indexers from the tracked set + * @dev Snapshots the full tracked set then calls + * {IRewardsEligibilityMaintenance-removeExpiredIndexer} for each. + * May be expensive for large sets; prefer the paginated overload for gas-bounded calls. + * @return gone Number of indexers now absent from the tracked set + */ + function removeExpiredIndexers() external returns (uint256 gone); + + /** + * @notice Remove expired indexers from the tracked set by paginated scan + * @dev Reads a slice of the tracked set via {IRewardsEligibilityStatus-getIndexers} + * and calls {IRewardsEligibilityMaintenance-removeExpiredIndexer} for each. + * Note: removals shift set indices between pages, so some indexers may be skipped + * across consecutive paginated calls. Use the parameterless overload to process all. + * @param offset Start index into the tracked indexer set + * @param count Maximum number of indexers to process + * @return gone Number of indexers now absent from the tracked set + */ + function removeExpiredIndexers(uint256 offset, uint256 count) external returns (uint256 gone); +} diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol new file mode 100644 index 000000000..039fd0339 --- /dev/null +++ b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.7.6 || ^0.8.0; + +import { IRewardsEligibilityEvents } from "./IRewardsEligibilityEvents.sol"; + +/** + * @title IRewardsEligibilityMaintenance + * @author Edge & Node + * @notice Interface for permissionless maintenance of the tracked indexer set. + * Allows anyone to remove indexers whose last renewal is older than the + * configured indexer retention period. + */ +interface IRewardsEligibilityMaintenance is IRewardsEligibilityEvents { + /** + * @notice Remove an expired indexer from the tracked set + * @dev Permissionless. An indexer is expired when + * `block.timestamp >= renewalTimestamp + indexerRetentionPeriod`. + * Removes the indexer from the enumerable set and deletes its renewal timestamp. + * No-op (returns true) if the indexer is not in the tracked set. + * @param indexer The indexer address to remove + * @return gone True if the indexer is absent from the tracked set (whether removed + * by this call or already not tracked); false if the indexer is still tracked (not expired) + */ + function removeExpiredIndexer(address indexer) external returns (bool gone); +} diff --git a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol index d088e8168..b3ca7652c 100644 --- a/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol +++ b/packages/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol @@ -39,4 +39,31 @@ interface IRewardsEligibilityStatus { * @return True if eligibility validation is enabled, false otherwise */ function getEligibilityValidation() external view returns (bool); + + /** + * @notice Get the indexer retention period for tracked indexer cleanup + * @return The current indexer retention period in seconds + */ + function getIndexerRetentionPeriod() external view returns (uint256); + + /** + * @notice Get the number of tracked indexers + * @return count The number of indexers in the tracked set + */ + function getIndexerCount() external view returns (uint256 count); + + /** + * @notice Get all tracked indexer addresses + * @dev May be expensive for large sets — prefer the paginated overload for on-chain use. + * @return result Array of tracked indexer addresses + */ + function getIndexers() external view returns (address[] memory result); + + /** + * @notice Get a paginated slice of tracked indexer addresses + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return result Array of tracked indexer addresses + */ + function getIndexers(uint256 offset, uint256 count) external view returns (address[] memory result); } diff --git a/packages/interfaces/contracts/subgraph-service/IDisputeManager.sol b/packages/interfaces/contracts/subgraph-service/IDisputeManager.sol index f0661c6f4..555874b44 100644 --- a/packages/interfaces/contracts/subgraph-service/IDisputeManager.sol +++ b/packages/interfaces/contracts/subgraph-service/IDisputeManager.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.22; import { IAttestation } from "./internal/IAttestation.sol"; +import { IIndexingAgreement } from "./internal/IIndexingAgreement.sol"; import { ISubgraphService } from "./ISubgraphService.sol"; /** @@ -18,7 +19,8 @@ interface IDisputeManager { Null, IndexingDispute, QueryDispute, - LegacyDispute + __DEPRECATED_LegacyDispute, + IndexingFeeDispute } /// @notice Status of a dispute @@ -120,48 +122,55 @@ interface IDisputeManager { ); /** - * @notice Emitted when an indexing dispute is created for `allocationId` and `indexer` + * @notice Emitted when an indexing fee dispute is created for `agreementId` and `indexer` * by `fisherman`. - * The event emits the amount of `tokens` deposited by the fisherman. + * @dev The event emits the amount of `tokens` deposited by the fisherman. * @param disputeId The dispute id * @param indexer The indexer address * @param fisherman The fisherman address * @param tokens The amount of tokens deposited by the fisherman - * @param allocationId The allocation id - * @param poi The POI - * @param blockNumber The block number for which the POI was calculated + * @param payer The address of the payer of the indexing fee + * @param agreementId The agreement id + * @param poi The POI disputed + * @param entities The entities disputed * @param stakeSnapshot The stake snapshot of the indexer at the time of the dispute - * @param cancellableAt The timestamp when the dispute can be cancelled */ - event IndexingDisputeCreated( + event IndexingFeeDisputeCreated( bytes32 indexed disputeId, address indexed indexer, address indexed fisherman, uint256 tokens, - address allocationId, + address payer, + bytes16 agreementId, bytes32 poi, - uint256 blockNumber, - uint256 stakeSnapshot, - uint256 cancellableAt + uint256 entities, + uint256 stakeSnapshot ); /** - * @notice Emitted when a legacy dispute is created for `allocationId` and `fisherman`. - * The event emits the amount of `tokensSlash` to slash and `tokensRewards` to reward the fisherman. + * @notice Emitted when an indexing dispute is created for `allocationId` and `indexer` + * by `fisherman`. + * The event emits the amount of `tokens` deposited by the fisherman. * @param disputeId The dispute id * @param indexer The indexer address - * @param fisherman The fisherman address to be credited with the rewards + * @param fisherman The fisherman address + * @param tokens The amount of tokens deposited by the fisherman * @param allocationId The allocation id - * @param tokensSlash The amount of tokens to slash - * @param tokensRewards The amount of tokens to reward the fisherman + * @param poi The POI + * @param blockNumber The block number for which the POI was calculated + * @param stakeSnapshot The stake snapshot of the indexer at the time of the dispute + * @param cancellableAt The timestamp when the dispute can be cancelled */ - event LegacyDisputeCreated( + event IndexingDisputeCreated( bytes32 indexed disputeId, address indexed indexer, address indexed fisherman, + uint256 tokens, address allocationId, - uint256 tokensSlash, - uint256 tokensRewards + bytes32 poi, + uint256 blockNumber, + uint256 stakeSnapshot, + uint256 cancellableAt ); /** @@ -358,6 +367,18 @@ interface IDisputeManager { */ error DisputeManagerSubgraphServiceNotSet(); + /** + * @notice Thrown when the Indexing Agreement is not disputable + * @param agreementId The indexing agreement id + */ + error DisputeManagerIndexingAgreementNotDisputable(bytes16 agreementId); + + /** + * @notice Thrown when the Indexing Agreement is not disputable + * @param version The indexing agreement version + */ + error DisputeManagerIndexingAgreementInvalidVersion(IIndexingAgreement.IndexingAgreementVersion version); + /** * @notice Initialize this contract. * @param owner The owner of the contract @@ -472,36 +493,26 @@ interface IDisputeManager { function createIndexingDispute(address allocationId, bytes32 poi, uint256 blockNumber) external returns (bytes32); /** - * @notice Creates and auto-accepts a legacy dispute. - * This disputes can be created to settle outstanding slashing amounts with an indexer that has been - * "legacy slashed" during or shortly after the transition period. See {HorizonStakingExtension.legacySlash} - * for more details. - * - * Note that this type of dispute: - * - can only be created by the arbitrator - * - does not require a bond - * - is automatically accepted when created - * - * Additionally, note that this type of disputes allow the arbitrator to directly set the slash and rewards - * amounts, bypassing the usual mechanisms that impose restrictions on those. This is done to give arbitrators - * maximum flexibility to ensure outstanding slashing amounts are settled fairly. This function needs to be removed - * after the transition period. + * @notice Create an indexing fee (version 1) dispute for the arbitrator to resolve. + * The disputes are created in reference to a version 1 indexing agreement and specifically + * a POI and entities provided when collecting that agreement. + * This function is called by a fisherman and it will pull `disputeDeposit` GRT tokens. * * Requirements: - * - Indexer must have been legacy slashed during or shortly after the transition period - * - Indexer must have provisioned funds to the Subgraph Service + * - fisherman must have previously approved this contract to pull `disputeDeposit` amount + * of tokens from their balance. * - * @param allocationId The allocation to dispute - * @param fisherman The fisherman address to be credited with the rewards - * @param tokensSlash The amount of tokens to slash - * @param tokensRewards The amount of tokens to reward the fisherman + * @param agreementId The indexing agreement to dispute + * @param poi The Proof of Indexing (POI) being disputed + * @param entities The number of entities disputed + * @param blockNumber The block number at which the indexing fee was collected * @return The dispute id */ - function createAndAcceptLegacyDispute( - address allocationId, - address fisherman, - uint256 tokensSlash, - uint256 tokensRewards + function createIndexingFeeDisputeV1( + bytes16 agreementId, + bytes32 poi, + uint256 entities, + uint256 blockNumber ) external returns (bytes32); // -- Arbitrator -- diff --git a/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol b/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol index db0bdae3f..be0bf05d2 100644 --- a/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol +++ b/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol @@ -1,10 +1,14 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; +import { IDataServiceAgreements } from "../data-service/IDataServiceAgreements.sol"; import { IDataServiceFees } from "../data-service/IDataServiceFees.sol"; import { IGraphPayments } from "../horizon/IGraphPayments.sol"; +import { IRecurringCollector } from "../horizon/IRecurringCollector.sol"; + import { IAllocation } from "./internal/IAllocation.sol"; +import { IIndexingAgreement } from "./internal/IIndexingAgreement.sol"; import { ILegacyAllocation } from "./internal/ILegacyAllocation.sol"; /** @@ -18,7 +22,7 @@ import { ILegacyAllocation } from "./internal/ILegacyAllocation.sol"; * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ -interface ISubgraphService is IDataServiceFees { +interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { /** * @notice Indexer details * @param url The URL where the indexer can be reached at for queries @@ -68,12 +72,25 @@ interface ISubgraphService is IDataServiceFees { event CurationCutSet(uint256 curationCut); // solhint-disable-previous-line gas-indexed-events + /** + * @notice Emitted when indexing fees cut is set + * @param indexingFeesCut The indexing fees cut + */ + event IndexingFeesCutSet(uint256 indexingFeesCut); + // solhint-disable-previous-line gas-indexed-events + /** * @notice Thrown when trying to set a curation cut that is not a valid PPM value * @param curationCut The curation cut value */ error SubgraphServiceInvalidCurationCut(uint256 curationCut); + /** + * @notice Thrown when trying to set an indexing fees cut that is not a valid PPM value + * @param indexingFeesCut The indexing fees cut value + */ + error SubgraphServiceInvalidIndexingFeesCut(uint256 indexingFeesCut); + /** * @notice Thrown when an indexer tries to register with an empty URL */ @@ -104,7 +121,7 @@ interface ISubgraphService is IDataServiceFees { error SubgraphServiceInconsistentCollection(uint256 balanceBefore, uint256 balanceAfter); /** - * @notice @notice Thrown when the service provider in the RAV does not match the expected indexer. + * @notice @notice Thrown when the service provider does not match the expected indexer. * @param providedIndexer The address of the provided indexer. * @param expectedIndexer The address of the expected indexer. */ @@ -197,16 +214,6 @@ interface ISubgraphService is IDataServiceFees { */ function resizeAllocation(address indexer, address allocationId, uint256 tokens) external; - /** - * @notice Imports a legacy allocation id into the subgraph service - * This is a governor only action that is required to prevent indexers from re-using allocation ids from the - * legacy staking contract. - * @param indexer The address of the indexer - * @param allocationId The id of the allocation - * @param subgraphDeploymentId The id of the subgraph deployment - */ - function migrateLegacyAllocation(address indexer, address allocationId, bytes32 subgraphDeploymentId) external; - /** * @notice Sets a pause guardian * @param pauseGuardian The address of the pause guardian @@ -246,6 +253,13 @@ interface ISubgraphService is IDataServiceFees { */ function setCurationCut(uint256 curationCut) external; + /** + * @notice Sets the data service payment cut for indexing fees + * @dev Emits a {IndexingFeesCutSet} event + * @param indexingFeesCut The indexing fees cut for the payment type + */ + function setIndexingFeesCut(uint256 indexingFeesCut) external; + /** * @notice Sets the payments destination for an indexer to receive payments * @dev Emits a {PaymentsDestinationSet} event @@ -253,6 +267,51 @@ interface ISubgraphService is IDataServiceFees { */ function setPaymentsDestination(address newPaymentsDestination) external; + /** + * @notice Accept an indexing agreement. + * @dev If `signature` is non-empty it is treated as an ECDSA signature; if empty the payer + * must be a contract implementing {IAgreementOwner}. + * @param allocationId The id of the allocation + * @param rca The recurring collection agreement parameters + * @param signature ECDSA signature bytes, or empty for contract-approved agreements + * @return agreementId The ID of the accepted indexing agreement + */ + function acceptIndexingAgreement( + address allocationId, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + bytes calldata signature + ) external returns (bytes16); + + /** + * @notice Update an indexing agreement. + * @dev If `signature` is non-empty it is treated as an ECDSA signature; if empty the payer + * must be a contract implementing {IAgreementOwner}. + * @param indexer The address of the indexer + * @param rcau The recurring collector agreement update to apply + * @param signature ECDSA signature bytes, or empty for contract-approved updates + */ + function updateIndexingAgreement( + address indexer, + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata signature + ) external; + + /** + * @notice Cancel an indexing agreement by indexer / operator. + * @param indexer The address of the indexer + * @param agreementId The id of the indexing agreement + */ + function cancelIndexingAgreement(address indexer, bytes16 agreementId) external; + + /** + * @notice Get the indexing agreement for a given agreement ID. + * @param agreementId The id of the indexing agreement + * @return The indexing agreement details + */ + function getIndexingAgreement( + bytes16 agreementId + ) external view returns (IIndexingAgreement.AgreementWrapper memory); + /** * @notice Gets the details of an allocation * For legacy allocations use {getLegacyAllocation} diff --git a/packages/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol b/packages/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol index 5c04767c9..3454e7b8f 100644 --- a/packages/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol +++ b/packages/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol @@ -83,18 +83,6 @@ interface IAllocationManager { bool forceClosed ); - /** - * @notice Emitted when a legacy allocation is migrated into the subgraph service - * @param indexer The address of the indexer - * @param allocationId The id of the allocation - * @param subgraphDeploymentId The id of the subgraph deployment - */ - event LegacyAllocationMigrated( - address indexed indexer, - address indexed allocationId, - bytes32 indexed subgraphDeploymentId - ); - /** * @notice Emitted when the maximum POI staleness is updated * @param maxPOIStaleness The max POI staleness in seconds diff --git a/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol b/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol new file mode 100644 index 000000000..a3a6d02a3 --- /dev/null +++ b/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.8.22; + +import { IRecurringCollector } from "../../horizon/IRecurringCollector.sol"; + +/** + * @title Interface for the {IndexingAgreement} library contract. + * @author Edge & Node + * @notice Interface for managing indexing agreement data and operations + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +interface IIndexingAgreement { + /// @notice Versions of Indexing Agreement Metadata + enum IndexingAgreementVersion { + V1 + } + + /** + * @notice Indexer Agreement Data + * @param allocationId The allocation ID + * @param version The indexing agreement version + */ + struct State { + address allocationId; + IndexingAgreementVersion version; + } + + /** + * @notice Wrapper for Indexing Agreement and Collector Agreement Data + * @param agreement The indexing agreement state + * @param collectorAgreement The collector agreement data + */ + struct AgreementWrapper { + State agreement; + IRecurringCollector.AgreementData collectorAgreement; + } +} diff --git a/packages/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol b/packages/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol index c5bf7f8c7..b6422fad8 100644 --- a/packages/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol +++ b/packages/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol @@ -23,14 +23,8 @@ interface ILegacyAllocation { } /** - * @notice Thrown when attempting to migrate an allocation with an existing id + * @notice Thrown when attempting to create an allocation with an existing legacy id * @param allocationId The allocation id */ error LegacyAllocationAlreadyExists(address allocationId); - - /** - * @notice Thrown when trying to get a non-existent allocation - * @param allocationId The allocation id - */ - error LegacyAllocationDoesNotExist(address allocationId); } diff --git a/packages/interfaces/contracts/toolshed/IPaymentsEscrowToolshed.sol b/packages/interfaces/contracts/toolshed/IPaymentsEscrowToolshed.sol index c7b9b81f2..c62b16173 100644 --- a/packages/interfaces/contracts/toolshed/IPaymentsEscrowToolshed.sol +++ b/packages/interfaces/contracts/toolshed/IPaymentsEscrowToolshed.sol @@ -1,14 +1,13 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; -// solhint-disable use-natspec - import { IPaymentsEscrow } from "../horizon/IPaymentsEscrow.sol"; -interface IPaymentsEscrowToolshed is IPaymentsEscrow { - function escrowAccounts( - address payer, - address collector, - address receiver - ) external view returns (EscrowAccount memory); -} +/** + * @title IPaymentsEscrowToolshed + * @author Edge & Node + * @notice Aggregate interface for PaymentsEscrow TypeScript type generation. + * @dev Combines all PaymentsEscrow interfaces into a single artifact for Wagmi and ethers + * type generation. Not intended for use in Solidity code. + */ +interface IPaymentsEscrowToolshed is IPaymentsEscrow {} diff --git a/packages/interfaces/src/types/issuance.ts b/packages/interfaces/src/types/issuance.ts index 812b1853b..71902a19b 100644 --- a/packages/interfaces/src/types/issuance.ts +++ b/packages/interfaces/src/types/issuance.ts @@ -5,7 +5,7 @@ import type { IIssuanceAllocationStatus, IIssuanceTarget, IPausableControl, - IRewardsEligibility, + IProviderEligibility, IRewardsEligibilityAdministration, IRewardsEligibilityEvents, IRewardsEligibilityReporting, @@ -20,7 +20,7 @@ export { IIssuanceAllocationStatus as IssuanceAllocationStatus, IIssuanceTarget as IssuanceTarget, IPausableControl as PausableControl, - IRewardsEligibility as RewardsEligibility, + IProviderEligibility as ProviderEligibility, IRewardsEligibilityAdministration as RewardsEligibilityAdministration, IRewardsEligibilityEvents as RewardsEligibilityEvents, IRewardsEligibilityReporting as RewardsEligibilityReporting, diff --git a/packages/issuance/README.md b/packages/issuance/README.md index 0209e2d97..c6def2743 100644 --- a/packages/issuance/README.md +++ b/packages/issuance/README.md @@ -11,6 +11,7 @@ The issuance contracts handle token issuance mechanisms for The Graph protocol. - **[IssuanceAllocator](contracts/allocate/IssuanceAllocator.md)** - Central distribution hub for token issuance, allocating tokens to different protocol components based on configured rates - **[RewardsEligibilityOracle](contracts/eligibility/RewardsEligibilityOracle.md)** - Oracle-based eligibility system for indexer rewards with time-based expiration - **DirectAllocation** - Simple target contract implementation for receiving and distributing allocated tokens (deployed as PilotAllocation and other instances) +- **[RecurringAgreementManager](contracts/agreement/RecurringAgreementManager.md)** - Funds PaymentsEscrow deposits for RCAs using issuance tokens, tracking max-next-claim per agreement per indexer ## Development diff --git a/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol b/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol new file mode 100644 index 000000000..250ca600d --- /dev/null +++ b/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.8.27; + +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; + +import { IRecurringAgreementHelper } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +/** + * @title RecurringAgreementHelper + * @author Edge & Node + * @notice Stateless, permissionless convenience contract for {RecurringAgreementManager}. + * Provides batch reconciliation (including cleanup of settled agreements) and + * read-only audit views. Independently deployable — better versions can be + * deployed without protocol changes. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +contract RecurringAgreementHelper is IRecurringAgreementHelper { + /// @notice The RecurringAgreementManager contract address + address public immutable MANAGER; + + /// @notice The GRT token contract + IERC20 public immutable GRAPH_TOKEN; + + /// @notice Thrown when an address parameter is the zero address + error ZeroAddress(); + + /** + * @notice Constructor for the RecurringAgreementHelper contract + * @param manager Address of the RecurringAgreementManager contract + * @param graphToken Address of the GRT token contract + */ + constructor(address manager, IERC20 graphToken) { + require(manager != address(0), ZeroAddress()); + require(address(graphToken) != address(0), ZeroAddress()); + MANAGER = manager; + GRAPH_TOKEN = graphToken; + } + + // -- Audit Views -- + + /// @inheritdoc IRecurringAgreementHelper + function auditGlobal() external view returns (GlobalAudit memory audit) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + audit = GlobalAudit({ + tokenBalance: GRAPH_TOKEN.balanceOf(MANAGER), + sumMaxNextClaimAll: mgr.getSumMaxNextClaimAll(), + totalEscrowDeficit: mgr.getTotalEscrowDeficit(), + totalAgreementCount: mgr.getTotalAgreementCount(), + escrowBasis: mgr.getEscrowBasis(), + tempJit: mgr.isTempJit(), + collectorCount: mgr.getCollectorCount() + }); + } + + /// @inheritdoc IRecurringAgreementHelper + function auditPairs(address collector) external view returns (PairAudit[] memory pairs) { + return _auditPairs(collector, 0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreementHelper + function auditPairs( + address collector, + uint256 offset, + uint256 count + ) external view returns (PairAudit[] memory pairs) { + return _auditPairs(collector, offset, count); + } + + /// @inheritdoc IRecurringAgreementHelper + function auditPair(address collector, address provider) external view returns (PairAudit memory pair) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + pair = PairAudit({ + collector: collector, + provider: provider, + agreementCount: mgr.getPairAgreementCount(collector, provider), + sumMaxNextClaim: mgr.getSumMaxNextClaim(IRecurringCollector(collector), provider), + escrow: mgr.getEscrowAccount(IRecurringCollector(collector), provider) + }); + } + + // -- Reconciliation -- + + /// @inheritdoc IRecurringAgreementHelper + function reconcile(address provider) external returns (uint256 removed) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); + bytes16[] memory ids = mgr.getProviderAgreements(provider); + for (uint256 i = 0; i < ids.length; ++i) if (!mgt.reconcileAgreement(ids[i])) ++removed; + } + + /// @inheritdoc IRecurringAgreementHelper + function reconcileBatch(bytes16[] calldata agreementIds) external returns (uint256 removed) { + IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); + for (uint256 i = 0; i < agreementIds.length; ++i) { + if (!mgt.reconcileAgreement(agreementIds[i])) ++removed; + } + } + + /// @inheritdoc IRecurringAgreementHelper + function reconcilePair(address collector, address provider) external returns (uint256 removed, bool pairExists) { + removed = _reconcilePair(collector, provider); + pairExists = IRecurringAgreementManagement(MANAGER).reconcileCollectorProvider(collector, provider); + } + + /// @inheritdoc IRecurringAgreementHelper + function reconcileCollector(address collector) external returns (uint256 removed, bool collectorExists) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); + // Snapshot providers before iterating (removal modifies the set) + address[] memory providers = mgr.getCollectorProviders(collector); + for (uint256 p = 0; p < providers.length; ++p) { + removed += _reconcilePair(collector, providers[p]); + mgt.reconcileCollectorProvider(collector, providers[p]); + } + collectorExists = mgr.getCollectorProviders(collector).length != 0; + } + + /// @inheritdoc IRecurringAgreementHelper + function reconcileAll() external returns (uint256 removed) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); + // Snapshot collectors before iterating + address[] memory collectors = mgr.getCollectors(); + for (uint256 c = 0; c < collectors.length; ++c) { + address[] memory providers = mgr.getCollectorProviders(collectors[c]); + for (uint256 p = 0; p < providers.length; ++p) { + removed += _reconcilePair(collectors[c], providers[p]); + mgt.reconcileCollectorProvider(collectors[c], providers[p]); + } + } + } + + // -- Private Helpers -- + + function _auditPairs( + address collector, + uint256 offset, + uint256 count + ) private view returns (PairAudit[] memory pairs) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + address[] memory providers = mgr.getCollectorProviders(collector, offset, count); + pairs = new PairAudit[](providers.length); + for (uint256 i = 0; i < providers.length; ++i) { + pairs[i] = PairAudit({ + collector: collector, + provider: providers[i], + agreementCount: mgr.getPairAgreementCount(collector, providers[i]), + sumMaxNextClaim: mgr.getSumMaxNextClaim(IRecurringCollector(collector), providers[i]), + escrow: mgr.getEscrowAccount(IRecurringCollector(collector), providers[i]) + }); + } + } + + function _reconcilePair(address collector, address provider) private returns (uint256 removed) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); + bytes16[] memory ids = mgr.getProviderAgreements(provider); + for (uint256 i = 0; i < ids.length; ++i) { + if (address(mgr.getAgreementInfo(ids[i]).collector) == collector) { + if (!mgt.reconcileAgreement(ids[i])) ++removed; + } + } + } +} diff --git a/packages/issuance/contracts/agreement/RecurringAgreementManager.md b/packages/issuance/contracts/agreement/RecurringAgreementManager.md new file mode 100644 index 000000000..b112e5037 --- /dev/null +++ b/packages/issuance/contracts/agreement/RecurringAgreementManager.md @@ -0,0 +1,168 @@ +# RecurringAgreementManager + +RCA-based payments require escrow pre-deposits — the payer must deposit enough tokens to cover the maximum that could be collected in the next collection window. RecurringAgreementManager automates this for protocol-escrowed agreements by receiving minted GRT from IssuanceAllocator and maintaining escrow balances sufficient to cover worst-case collection amounts. + +It implements seven interfaces: + +- **`IIssuanceTarget`** — receives minted GRT from IssuanceAllocator +- **`IAgreementOwner`** — authorizes RCA acceptance and updates via callback (replaces ECDSA signature) +- **`IRecurringAgreementManagement`** — agreement lifecycle: offer, update, revoke, cancel, remove, reconcile +- **`IRecurringEscrowManagement`** — escrow configuration: setEscrowBasis, setTempJit +- **`IProviderEligibilityManagement`** — eligibility oracle configuration: setProviderEligibilityOracle +- **`IRecurringAgreements`** — read-only queries: agreement info, escrow state, global tracking +- **`IProviderEligibility`** — delegates payment eligibility checks to an optional oracle + +## Escrow Structure + +One escrow account per (RecurringAgreementManager, collector, provider) tuple covers **all** managed RCAs for that (collector, provider) pair. Multiple agreements for the same pair share a single escrow balance: + +``` +sum(maxNextClaim + pendingUpdateMaxNextClaim for all active agreements for that provider) <= PaymentsEscrow.escrowAccounts[RecurringAgreementManager][RecurringCollector][provider] +``` + +Deposits never revert — `_escrowMinMax` degrades the mode when balance is insufficient, ensuring the deposit amount is always affordable. The `getEscrowAccount` view exposes the underlying escrow account for monitoring. + +## Hash Authorization + +The `authorizedHashes` mapping stores `hash → agreementId` rather than `hash → bool`. Hashes are automatically invalidated when agreements are deleted, preventing reuse without explicit cleanup. + +## Max Next Claim + +For accepted agreements, delegated to `RecurringCollector.getMaxNextClaim(agreementId)` as the single source of truth. For pre-accepted offers, a conservative estimate calculated at offer time: + +``` +maxNextClaim = maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens +``` + +| Agreement State | maxNextClaim | +| --------------------------- | -------------------------------------------------------------- | +| NotAccepted (pre-offered) | Stored estimate from `offerAgreement` | +| NotAccepted (past deadline) | 0 (expired offer, removable) | +| Accepted, never collected | Calculated by RecurringCollector (includes initial + ongoing) | +| Accepted, after collect | Calculated by RecurringCollector (ongoing only) | +| CanceledByPayer | Calculated by RecurringCollector (window frozen at canceledAt) | +| CanceledByServiceProvider | 0 | +| Fully expired | 0 | + +## Lifecycle + +### Offer → Accept (two-step) + +1. **Agreement manager** calls `offerAgreement(rca, collector)` — stores hash, calculates conservative maxNextClaim, deposits into escrow +2. **Service provider operator** calls `SubgraphService.acceptUnsignedIndexingAgreement(allocationId, rca)` — SubgraphService → RecurringCollector → `approveAgreement(hash)` callback to RecurringAgreementManager + +During the pending update window, both current and pending maxNextClaim are escrowed simultaneously (conservative). + +### Collect → Reconcile + +Collection flows through `SubgraphService → RecurringCollector → PaymentsEscrow`. RecurringCollector then calls `IAgreementOwner.afterCollection` on the payer, which triggers automatic reconciliation and escrow top-up in the same transaction. Manual reconcile is still available as a fallback. + +The manager exposes `reconcileAgreement` (gas-predictable, per-agreement). Batch convenience functions `reconcileBatch` (caller-selected list) and `reconcile(provider)` (iterates all agreements) are in the stateless `RecurringAgreementHelper` contract, which delegates each reconciliation back to the manager. + +### Revoke / Cancel / Remove + +- **`revokeOffer`** — withdraws an un-accepted offer +- **`cancelAgreement`** — for accepted agreements, routes cancellation through the data service then reconciles; idempotent for already-canceled agreements +- **`removeAgreement`** (permissionless) — cleans up agreements with maxNextClaim = 0 + +| State | Removable when | +| ------------------------- | ------------------------------------- | +| CanceledByServiceProvider | Immediately (maxNextClaim = 0) | +| CanceledByPayer | After collection window expires | +| Accepted past endsAt | After final collection window expires | +| NotAccepted (expired) | After `rca.deadline` passes | + +## Escrow Modes + +The configured `EscrowBasis` controls how aggressively escrow is pre-deposited. The setting is a **maximum aspiration** — the system automatically degrades when balance is insufficient. `beforeCollection` (JIT top-up) is always active regardless of setting, providing a safety net for any gap. + +### Levels + +``` +enum EscrowBasis { JustInTime, OnDemand, Full } +``` + +Ordered low-to-high: + +| Level | min (deposit floor) | max (thaw ceiling) | Behavior | +| -------------- | ------------------- | ------------------ | -------------------------------------------------- | +| Full (2) | `sumMaxNextClaim` | `sumMaxNextClaim` | Current default. Deposits worst-case for all RCAs. | +| OnDemand (1) | 0 | `sumMaxNextClaim` | No deposits, holds at sumMaxNextClaim level. | +| JustInTime (0) | 0 | 0 | Thaws everything, pure JIT. | + +`sumMaxNextClaim` here means the per-(collector, provider) sum from storage. + +**Stability guarantee**: `min <= max` at every level. Deposit-then-immediate-reconcile at the same level never triggers a thaw. + +### Min/Max Model + +`_updateEscrow` uses two numbers from `_escrowMinMax` instead of a single `sumMaxNextClaim`: + +- **min**: deposit floor — deposit if effective balance is below this +- **max**: thaw ceiling — thaw effective balance above this (never resetting an active thaw timer) + +The split ensures smooth transitions between levels. When degradation occurs, min drops to 0 but max holds at `sumMaxNextClaim`, preventing oscillation. + +### Automatic Degradation + +The setting is a ceiling, not a mandate. **Full → OnDemand** when `available <= totalEscrowDeficit` (RAM's balance can't close the system-wide gap): min drops to 0, max stays at `sumMaxNextClaim`. Degradation never reaches JustInTime automatically — only explicit operator setting or temp JIT. + +### `_updateEscrow` Flow + +`_updateEscrow(collector, provider)` normalizes escrow state in four steps using (min, max) from `_escrowMinMax`. Steps 3 and 4 are mutually exclusive (min <= max); the thaw timer is never reset. + +1. **Adjust thaw target** — cancel/reduce thawing to keep min <= effective balance, or increase toward max (without timer reset) +2. **Withdraw completed thaw** — always withdrawn, even if within [min, max] +3. **Thaw excess** — if no thaw active, start new thaw for balance above max +4. **Deposit deficit** — if no thaw active, deposit to reach min + +### Reconciliation + +Per-agreement reconciliation (`reconcileAgreement`) re-reads agreement state from RecurringCollector and updates `sumMaxNextClaim`. Pair-level escrow rebalancing and cleanup is O(1) via `reconcileCollectorProvider(collector, provider)`. Batch helpers `reconcileBatch` and `reconcile(provider)` live in the separate `RecurringAgreementHelper` contract — they are stateless wrappers that call `reconcileAgreement` in a loop. + +### Global Tracking + +| Storage field | Type | Updated at | +| --------------------- | ------- | --------------------------------------------------------------------------- | +| `escrowBasis` | enum | `setEscrowBasis()` | +| `sumMaxNextClaimAll` | uint256 | Every `sumMaxNextClaim[c][p]` mutation | +| `totalEscrowDeficit` | uint256 | Every `sumMaxNextClaim[c][p]` or `escrowSnap[c][p]` mutation | +| `totalAgreementCount` | uint256 | `offerAgreement` (+1), `revokeOffer` (-1), `removeAgreement` (-1) | +| `escrowSnap[c][p]` | mapping | End of `_updateEscrow` via snapshot diff | +| `tempJit` | bool | `beforeCollection` (trip), `_updateEscrow` (recover), `setTempJit` (manual) | + +**`totalEscrowDeficit`** is maintained incrementally as `Σ max(0, sumMaxNextClaim[c][p] - escrowSnap[c][p])` per (collector, provider). Over-deposited pairs cannot mask another pair's deficit. At each mutation point, the pair's deficit is recomputed before and after. + +### Temp JIT + +If `beforeCollection` can't fully deposit for a collection (`available <= deficit`), it deposits nothing and activates temporary JIT mode. While active, `_escrowMinMax` returns `(0, 0)` — JIT-only behavior — regardless of the configured `escrowBasis`. The configured basis is preserved and takes effect again on recovery. + +**Trigger**: `beforeCollection` activates temp JIT when `available <= deficit` (all-or-nothing: no partial deposits). + +**Recovery**: `_updateEscrow` clears temp JIT when `totalEscrowDeficit < available`. Recovery uses `totalEscrowDeficit` (sum of per-(collector, provider) deficits) rather than total sumMaxNextClaim, correctly accounting for already-deposited escrow. During JIT mode, thaws complete and tokens return to RAM, naturally building toward recovery. + +**Operator override**: `setTempJit(bool)` allows direct control. `setEscrowBasis` does not affect `tempJit` — the two settings are independent. + +### Upgrade Safety + +Default storage value 0 maps to `JustInTime`, so `initialize()` sets `escrowBasis = Full` as the default. Future upgrades must set it explicitly via a reinitializer. `tempJit` defaults to `false` (0), which is correct — no temp JIT on fresh deployment. + +## Roles + +- **GOVERNOR_ROLE**: Sets issuance allocator, eligibility oracle; grants `DATA_SERVICE_ROLE`, `COLLECTOR_ROLE`, and other roles; admin of `OPERATOR_ROLE` +- **OPERATOR_ROLE**: Sets escrow basis and temp JIT; admin of `AGREEMENT_MANAGER_ROLE` + - **AGREEMENT_MANAGER_ROLE**: Offers agreements/updates, revokes offers, cancels agreements +- **PAUSE_ROLE**: Pauses contract (reconcile/remove remain available) +- **Permissionless**: `reconcileAgreement`, `removeAgreement`, `reconcileCollectorProvider` +- **RecurringAgreementHelper** (permissionless): `reconcile(provider)`, `reconcileBatch(ids[])` + +## Deployment + +Prerequisites: GraphToken, PaymentsEscrow, RecurringCollector, IssuanceAllocator deployed. + +1. Deploy RecurringAgreementManager implementation (graphToken, paymentsEscrow) +2. Deploy TransparentUpgradeableProxy with implementation and initialization data +3. Initialize with governor address +4. Grant `OPERATOR_ROLE` to the operator account +5. Operator grants `AGREEMENT_MANAGER_ROLE` to the agreement manager account +6. Configure IssuanceAllocator to allocate tokens to RecurringAgreementManager diff --git a/packages/issuance/contracts/agreement/RecurringAgreementManager.sol b/packages/issuance/contracts/agreement/RecurringAgreementManager.sol new file mode 100644 index 000000000..0581e2f8d --- /dev/null +++ b/packages/issuance/contracts/agreement/RecurringAgreementManager.sol @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.8.27; + +// solhint-disable gas-strict-inequalities + +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; + +import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; + +import { EnumerableSetUtil } from "../common/EnumerableSetUtil.sol"; +import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; +import { IGraphToken } from "../common/IGraphToken.sol"; + +// solhint-disable-next-line no-unused-import +import { ERC165Upgradeable } from "@openzeppelin/contracts-upgradeable/utils/introspection/ERC165Upgradeable.sol"; // Used by @inheritdoc +import { ReentrancyGuardTransient } from "@openzeppelin/contracts/utils/ReentrancyGuardTransient.sol"; + +/** + * @title RecurringAgreementManager + * @author Edge & Node + * @notice Manages escrow for RCAs (Recurring Collection Agreements) using + * issuance-allocated tokens. This contract: + * + * 1. Receives minted GRT from IssuanceAllocator (implements IIssuanceTarget) + * 2. Authorizes RCA acceptance via contract callback (implements IAgreementOwner) + * 3. Tracks max-next-claim per agreement, deposits into PaymentsEscrow to cover maximums + * + * One escrow per (this contract, collector, provider) covers all managed + * RCAs for that (collector, provider) pair. Each agreement stores its own collector + * address. Other participants can independently use RCAs via the standard ECDSA-signed flow. + * + * @custom:security CEI — All external calls target trusted protocol contracts (PaymentsEscrow, + * GRT, RecurringCollector) except {cancelAgreement}'s call to the data service, which is + * governance-gated. {nonReentrant} on {cancelAgreement} provides defence-in-depth. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +contract RecurringAgreementManager is + BaseUpgradeable, + ReentrancyGuardTransient, + IIssuanceTarget, + IAgreementOwner, + IRecurringAgreementManagement, + IRecurringEscrowManagement, + IProviderEligibilityManagement, + IRecurringAgreements, + IProviderEligibility +{ + using EnumerableSet for EnumerableSet.Bytes32Set; + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSetUtil for EnumerableSet.AddressSet; + using EnumerableSetUtil for EnumerableSet.Bytes32Set; + + // -- Role Constants -- + + /** + * @notice Role identifier for approved data service contracts + * @dev Addresses with this role can be used as data services in offered agreements. + * Admin: GOVERNOR_ROLE + */ + bytes32 public constant DATA_SERVICE_ROLE = keccak256("DATA_SERVICE_ROLE"); + + /** + * @notice Role identifier for approved collector contracts + * @dev Addresses with this role can be used as collectors in offered agreements. + * Admin: GOVERNOR_ROLE + */ + bytes32 public constant COLLECTOR_ROLE = keccak256("COLLECTOR_ROLE"); + + /** + * @notice Role identifier for agreement lifecycle operations + * @dev Addresses with this role can offer, update, revoke, and cancel agreements. + * Admin: OPERATOR_ROLE + */ + bytes32 public constant AGREEMENT_MANAGER_ROLE = keccak256("AGREEMENT_MANAGER_ROLE"); + + // -- Immutables -- + + /// @notice The PaymentsEscrow contract + /// @custom:oz-upgrades-unsafe-allow state-variable-immutable + IPaymentsEscrow public immutable PAYMENTS_ESCROW; + + // -- Storage (ERC-7201) -- + + /// @custom:storage-location erc7201:graphprotocol.issuance.storage.RecurringAgreementManager + struct RecurringAgreementManagerStorage { + /// @notice Authorized agreement hashes — maps hash to agreementId (bytes16(0) = not authorized) + mapping(bytes32 agreementHash => bytes16) authorizedHashes; + /// @notice Per-agreement tracking data + mapping(bytes16 agreementId => AgreementInfo) agreements; + /// @notice Sum of maxNextClaim for all agreements per (collector, provider) pair + mapping(address collector => mapping(address provider => uint256)) sumMaxNextClaim; + /// @notice Set of agreement IDs per service provider (stored as bytes32 for EnumerableSet) + mapping(address provider => EnumerableSet.Bytes32Set) providerAgreementIds; + /// @notice Sum of sumMaxNextClaim across all (collector, provider) pairs + uint256 sumMaxNextClaimAll; + /// @notice Total unfunded escrow: sum of max(0, sumMaxNextClaim[c][p] - escrowSnap[c][p]) + uint256 totalEscrowDeficit; + /// @notice Total number of tracked agreements across all providers + uint256 totalAgreementCount; + /// @notice Last known escrow balance per (collector, provider) pair (for snapshot diff) + mapping(address collector => mapping(address provider => uint256)) escrowSnap; + /// @notice Optional oracle for checking payment eligibility of service providers + IProviderEligibility providerEligibilityOracle; + /// @notice Set of all collector addresses with active agreements + EnumerableSet.AddressSet collectors; + /// @notice Set of provider addresses per collector + mapping(address collector => EnumerableSet.AddressSet) collectorProviders; + /// @notice Number of agreements per (collector, provider) pair + mapping(address collector => mapping(address provider => uint256)) pairAgreementCount; + /// @notice Governance-configured escrow level (not modified by temp JIT) + EscrowBasis escrowBasis; + /// @notice Whether temporary JIT mode is active (beforeCollection couldn't deposit) + bool tempJit; + } + + // keccak256(abi.encode(uint256(keccak256("graphprotocol.issuance.storage.RecurringAgreementManager")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 private constant RECURRING_AGREEMENT_MANAGER_STORAGE_LOCATION = + 0x13814b254ec9c757012be47b3445539ef5e5e946eb9d2ef31ea6d4423bf88b00; + + // -- Constructor -- + + /** + * @notice Constructor for the RecurringAgreementManager contract + * @param graphToken The Graph Token contract + * @param paymentsEscrow The PaymentsEscrow contract + * @custom:oz-upgrades-unsafe-allow constructor + */ + constructor(IGraphToken graphToken, IPaymentsEscrow paymentsEscrow) BaseUpgradeable(graphToken) { + PAYMENTS_ESCROW = paymentsEscrow; + } + + // -- Initialization -- + + /** + * @notice Initialize the RecurringAgreementManager contract + * @param governor Address that will have the GOVERNOR_ROLE + */ + function initialize(address governor) external virtual initializer { + __BaseUpgradeable_init(governor); + _setRoleAdmin(DATA_SERVICE_ROLE, GOVERNOR_ROLE); + _setRoleAdmin(COLLECTOR_ROLE, GOVERNOR_ROLE); + _setRoleAdmin(AGREEMENT_MANAGER_ROLE, OPERATOR_ROLE); + _getStorage().escrowBasis = EscrowBasis.Full; + } + + // -- ERC165 -- + + /// @inheritdoc ERC165Upgradeable + function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) { + return + interfaceId == type(IIssuanceTarget).interfaceId || + interfaceId == type(IAgreementOwner).interfaceId || + interfaceId == type(IRecurringAgreementManagement).interfaceId || + interfaceId == type(IRecurringEscrowManagement).interfaceId || + interfaceId == type(IProviderEligibilityManagement).interfaceId || + interfaceId == type(IRecurringAgreements).interfaceId || + interfaceId == type(IProviderEligibility).interfaceId || + super.supportsInterface(interfaceId); + } + + // -- IIssuanceTarget -- + + /// @inheritdoc IIssuanceTarget + function beforeIssuanceAllocationChange() external virtual override {} + + /// @inheritdoc IIssuanceTarget + /// @dev No-op: RecurringAgreementManager receives tokens via transfer, does not need the allocator address. + function setIssuanceAllocator(address /* issuanceAllocator */) external virtual override onlyRole(GOVERNOR_ROLE) {} + + // -- IAgreementOwner -- + + /// @inheritdoc IAgreementOwner + function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + bytes16 agreementId = $.authorizedHashes[agreementHash]; + + if (agreementId == bytes16(0) || $.agreements[agreementId].provider == address(0)) return bytes4(0); + + return IAgreementOwner.approveAgreement.selector; + } + + /// @inheritdoc IAgreementOwner + function beforeCollection(bytes16 agreementId, uint256 tokensToCollect) external override { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + address provider = agreement.provider; + if (provider == address(0)) return; + _requireCollector(agreement); + + // JIT top-up: deposit only when escrow balance cannot cover this collection + uint256 escrowBalance = _fetchEscrowAccount(msg.sender, provider).balance; + if (tokensToCollect <= escrowBalance) return; + + // Strict <: when deficit == available, enter tempJit rather than depleting entire balance + uint256 deficit = tokensToCollect - escrowBalance; + if (deficit < GRAPH_TOKEN.balanceOf(address(this))) { + GRAPH_TOKEN.approve(address(PAYMENTS_ESCROW), deficit); + PAYMENTS_ESCROW.deposit(msg.sender, provider, deficit); + } else if (!$.tempJit) { + $.tempJit = true; + emit TempJitSet(true, true); + } + } + + /// @inheritdoc IAgreementOwner + function afterCollection(bytes16 agreementId, uint256 /* tokensCollected */) external override { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + if (agreement.provider == address(0)) return; + _requireCollector(agreement); + + _reconcileAndUpdateEscrow($, agreementId); + } + + // -- IRecurringAgreementManagement -- + + /// @inheritdoc IRecurringAgreementManagement + function offerAgreement( + IRecurringCollector.RecurringCollectionAgreement calldata rca, + IRecurringCollector collector + ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bytes16 agreementId) { + require(rca.payer == address(this), PayerMustBeManager(rca.payer, address(this))); + require(rca.serviceProvider != address(0), ServiceProviderZeroAddress()); + require(hasRole(DATA_SERVICE_ROLE, rca.dataService), UnauthorizedDataService(rca.dataService)); + require(hasRole(COLLECTOR_ROLE, address(collector)), UnauthorizedCollector(address(collector))); + + RecurringAgreementManagerStorage storage $ = _getStorage(); + + agreementId = collector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + require($.agreements[agreementId].provider == address(0), AgreementAlreadyOffered(agreementId)); + + bytes32 agreementHash = collector.hashRCA(rca); + uint256 maxNextClaim = _createAgreement($, agreementId, rca, collector, agreementHash); + _updateEscrow($, address(collector), rca.serviceProvider); + + emit AgreementOffered(agreementId, rca.serviceProvider, maxNextClaim); + } + + /// @inheritdoc IRecurringAgreementManagement + function offerAgreementUpdate( + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau + ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bytes16 agreementId) { + agreementId = rcau.agreementId; + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + require(agreement.provider != address(0), AgreementNotOffered(agreementId)); + + // Reconcile against on-chain state before layering a new pending update, + // so escrow accounting is current and we can validate the nonce. + _reconcileAgreement($, agreementId); + + // Validate nonce: must be the next expected nonce on the collector + IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); + uint32 expectedNonce = rca.updateNonce + 1; + require(rcau.nonce == expectedNonce, InvalidUpdateNonce(agreementId, expectedNonce, rcau.nonce)); + + // Clean up old pending hash if replacing + if (agreement.pendingUpdateHash != bytes32(0)) delete $.authorizedHashes[agreement.pendingUpdateHash]; + + // Authorize the RCAU hash for the IAgreementOwner callback + bytes32 updateHash = agreement.collector.hashRCAU(rcau); + $.authorizedHashes[updateHash] = agreementId; + agreement.pendingUpdateNonce = rcau.nonce; + agreement.pendingUpdateHash = updateHash; + + uint256 pendingMaxNextClaim = _computeMaxFirstClaim( + rcau.maxOngoingTokensPerSecond, + rcau.maxSecondsPerCollection, + rcau.maxInitialTokens + ); + _setAgreementMaxNextClaim($, agreementId, pendingMaxNextClaim, true); + _updateEscrow($, address(agreement.collector), agreement.provider); + + emit AgreementUpdateOffered(agreementId, pendingMaxNextClaim, rcau.nonce); + } + + /// @inheritdoc IRecurringAgreementManagement + function revokeAgreementUpdate( + bytes16 agreementId + ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bool revoked) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + require(agreement.provider != address(0), AgreementNotOffered(agreementId)); + + // Reconcile first — the update may have been accepted since the offer was made + _reconcileAgreement($, agreementId); + + if (agreement.pendingUpdateHash == bytes32(0)) return false; + + uint256 pendingMaxClaim = agreement.pendingUpdateMaxNextClaim; + uint32 nonce = agreement.pendingUpdateNonce; + + _setAgreementMaxNextClaim($, agreementId, 0, true); + delete $.authorizedHashes[agreement.pendingUpdateHash]; + agreement.pendingUpdateNonce = 0; + agreement.pendingUpdateHash = bytes32(0); + + _updateEscrow($, address(agreement.collector), agreement.provider); + + emit AgreementUpdateRevoked(agreementId, pendingMaxClaim, nonce); + return true; + } + + /// @inheritdoc IRecurringAgreementManagement + function revokeOffer( + bytes16 agreementId + ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bool gone) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + if (agreement.provider == address(0)) return true; + + // Only revoke un-accepted agreements — accepted ones must be canceled via cancelAgreement + IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); + require(rca.state == IRecurringCollector.AgreementState.NotAccepted, AgreementAlreadyAccepted(agreementId)); + + address provider = _deleteAgreement($, agreementId, agreement); + emit OfferRevoked(agreementId, provider); + return true; + } + + /// @inheritdoc IRecurringAgreementManagement + function cancelAgreement( + bytes16 agreementId + ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused nonReentrant returns (bool gone) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + if (agreement.provider == address(0)) return true; + + IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); + + // Not accepted — use revokeOffer instead + require(rca.state != IRecurringCollector.AgreementState.NotAccepted, AgreementNotAccepted(agreementId)); + + // If still active, route cancellation through the data service. + // Note: external call before state update — safe because caller must hold + // AGREEMENT_MANAGER_ROLE and data service is governance-gated. nonReentrant + // provides defence-in-depth (see CEI note in contract header). + if (rca.state == IRecurringCollector.AgreementState.Accepted) { + IDataServiceAgreements ds = agreement.dataService; + require(address(ds).code.length != 0, InvalidDataService(address(ds))); + ds.cancelIndexingAgreementByPayer(agreementId); + emit AgreementCanceled(agreementId, agreement.provider); + } + // else: already canceled (CanceledByPayer or CanceledByServiceProvider) — skip cancel call, just reconcile + + return _reconcileAndCleanup($, agreementId, agreement); + } + + /// @inheritdoc IRecurringAgreementManagement + function reconcileAgreement(bytes16 agreementId) external returns (bool exists) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + AgreementInfo storage agreement = $.agreements[agreementId]; + if (agreement.provider == address(0)) return false; + + return !_reconcileAndCleanup($, agreementId, agreement); + } + + /// @inheritdoc IRecurringAgreementManagement + function reconcileCollectorProvider(address collector, address provider) external returns (bool exists) { + return !_reconcilePairTracking(_getStorage(), collector, provider); + } + + // -- IRecurringEscrowManagement -- + + /// @inheritdoc IRecurringEscrowManagement + function setEscrowBasis(EscrowBasis basis) external onlyRole(OPERATOR_ROLE) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + if ($.escrowBasis == basis) return; + EscrowBasis oldBasis = $.escrowBasis; + $.escrowBasis = basis; + emit EscrowBasisSet(oldBasis, basis); + } + + /// @inheritdoc IRecurringEscrowManagement + function setTempJit(bool active) external onlyRole(OPERATOR_ROLE) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + if ($.tempJit != active) { + $.tempJit = active; + emit TempJitSet(active, false); + } + } + + // -- IProviderEligibilityManagement -- + + /// @inheritdoc IProviderEligibilityManagement + function setProviderEligibilityOracle(IProviderEligibility oracle) external onlyRole(GOVERNOR_ROLE) { + RecurringAgreementManagerStorage storage $ = _getStorage(); + if (address($.providerEligibilityOracle) == address(oracle)) return; + IProviderEligibility oldOracle = $.providerEligibilityOracle; + $.providerEligibilityOracle = oracle; + emit ProviderEligibilityOracleSet(oldOracle, oracle); + } + + /// @inheritdoc IProviderEligibilityManagement + function getProviderEligibilityOracle() external view returns (IProviderEligibility) { + return _getStorage().providerEligibilityOracle; + } + + // -- IProviderEligibility -- + + /// @inheritdoc IProviderEligibility + /// @dev When no oracle is configured (address(0)), all providers are eligible. + /// When an oracle is set, delegates to the oracle's isEligible check. + function isEligible(address serviceProvider) external view override returns (bool eligible) { + IProviderEligibility oracle = _getStorage().providerEligibilityOracle; + eligible = (address(oracle) == address(0)) || oracle.isEligible(serviceProvider); + } + + // -- IRecurringAgreements -- + + /// @inheritdoc IRecurringAgreements + function getSumMaxNextClaim(IRecurringCollector collector, address provider) external view returns (uint256) { + return _getStorage().sumMaxNextClaim[address(collector)][provider]; + } + + /// @inheritdoc IRecurringAgreements + function getEscrowAccount( + IRecurringCollector collector, + address provider + ) external view returns (IPaymentsEscrow.EscrowAccount memory account) { + return _fetchEscrowAccount(address(collector), provider); + } + + /// @inheritdoc IRecurringAgreements + function getAgreementMaxNextClaim(bytes16 agreementId) external view returns (uint256) { + return _getStorage().agreements[agreementId].maxNextClaim; + } + + /// @inheritdoc IRecurringAgreements + function getAgreementInfo(bytes16 agreementId) external view returns (AgreementInfo memory) { + return _getStorage().agreements[agreementId]; + } + + /// @inheritdoc IRecurringAgreements + function getProviderAgreementCount(address provider) external view returns (uint256) { + return _getStorage().providerAgreementIds[provider].length(); + } + + /// @inheritdoc IRecurringAgreements + function getProviderAgreements(address provider) external view returns (bytes16[] memory) { + return _getStorage().providerAgreementIds[provider].getPageBytes16(0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreements + function getProviderAgreements( + address provider, + uint256 offset, + uint256 count + ) external view returns (bytes16[] memory) { + return _getStorage().providerAgreementIds[provider].getPageBytes16(offset, count); + } + + /// @inheritdoc IRecurringAgreements + function getEscrowBasis() external view returns (EscrowBasis) { + return _getStorage().escrowBasis; + } + + /// @inheritdoc IRecurringAgreements + function getSumMaxNextClaimAll() external view returns (uint256) { + return _getStorage().sumMaxNextClaimAll; + } + + /// @inheritdoc IRecurringAgreements + function getTotalEscrowDeficit() external view returns (uint256) { + return _getStorage().totalEscrowDeficit; + } + + /// @inheritdoc IRecurringAgreements + function getTotalAgreementCount() external view returns (uint256) { + return _getStorage().totalAgreementCount; + } + + /// @inheritdoc IRecurringAgreements + function isTempJit() external view returns (bool) { + return _getStorage().tempJit; + } + + /// @inheritdoc IRecurringAgreements + function getCollectorCount() external view returns (uint256) { + return _getStorage().collectors.length(); + } + + /// @inheritdoc IRecurringAgreements + function getCollectors() external view returns (address[] memory) { + return _getStorage().collectors.getPage(0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreements + function getCollectors(uint256 offset, uint256 count) external view returns (address[] memory) { + return _getStorage().collectors.getPage(offset, count); + } + + /// @inheritdoc IRecurringAgreements + function getCollectorProviderCount(address collector) external view returns (uint256) { + return _getStorage().collectorProviders[collector].length(); + } + + /// @inheritdoc IRecurringAgreements + function getCollectorProviders(address collector) external view returns (address[] memory) { + return _getStorage().collectorProviders[collector].getPage(0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreements + function getCollectorProviders( + address collector, + uint256 offset, + uint256 count + ) external view returns (address[] memory) { + return _getStorage().collectorProviders[collector].getPage(offset, count); + } + + /// @inheritdoc IRecurringAgreements + function getPairAgreementCount(address collector, address provider) external view returns (uint256) { + return _getStorage().pairAgreementCount[collector][provider]; + } + + // -- Internal Functions -- + + /** + * @notice Require that msg.sender is the agreement's collector. + * @param agreement The agreement info to check against + */ + function _requireCollector(AgreementInfo storage agreement) private view { + require(msg.sender == address(agreement.collector), OnlyAgreementCollector()); + } + + /** + * @notice Create agreement storage, authorize its hash, update pair tracking, and set max-next-claim. + * @param agreementId The generated agreement ID + * @param rca The recurring collection agreement parameters + * @param collector The collector contract + * @param agreementHash The hash of the RCA to authorize + * @return maxNextClaim The computed max-next-claim for the new agreement + */ + // solhint-disable-next-line use-natspec + function _createAgreement( + RecurringAgreementManagerStorage storage $, + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + IRecurringCollector collector, + bytes32 agreementHash + ) private returns (uint256 maxNextClaim) { + $.authorizedHashes[agreementHash] = agreementId; + + $.agreements[agreementId] = AgreementInfo({ + provider: rca.serviceProvider, + deadline: rca.deadline, + pendingUpdateNonce: 0, + maxNextClaim: 0, + pendingUpdateMaxNextClaim: 0, + agreementHash: agreementHash, + pendingUpdateHash: bytes32(0), + dataService: IDataServiceAgreements(rca.dataService), + collector: collector + }); + $.providerAgreementIds[rca.serviceProvider].add(bytes32(agreementId)); + ++$.totalAgreementCount; + if (++$.pairAgreementCount[address(collector)][rca.serviceProvider] == 1) { + $.collectorProviders[address(collector)].add(rca.serviceProvider); + $.collectors.add(address(collector)); + } + + maxNextClaim = _computeMaxFirstClaim( + rca.maxOngoingTokensPerSecond, + rca.maxSecondsPerCollection, + rca.maxInitialTokens + ); + _setAgreementMaxNextClaim($, agreementId, maxNextClaim, false); + } + + /** + * @notice Compute maximum first claim from agreement rate parameters. + * @param maxOngoingTokensPerSecond Maximum ongoing tokens per second + * @param maxSecondsPerCollection Maximum seconds per collection period + * @param maxInitialTokens Maximum initial tokens + * @return Maximum possible claim amount + */ + function _computeMaxFirstClaim( + uint256 maxOngoingTokensPerSecond, + uint256 maxSecondsPerCollection, + uint256 maxInitialTokens + ) private pure returns (uint256) { + return maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens; + } + + /** + * @notice Reconcile an agreement and update escrow for its (collector, provider) pair. + * @param agreementId The agreement ID to reconcile + */ + // solhint-disable-next-line use-natspec + function _reconcileAndUpdateEscrow(RecurringAgreementManagerStorage storage $, bytes16 agreementId) private { + _reconcileAgreement($, agreementId); + AgreementInfo storage info = $.agreements[agreementId]; + _updateEscrow($, address(info.collector), info.provider); + } + + /** + * @notice Reconcile an agreement, update escrow, and delete if nothing left to claim. + * @param agreementId The agreement ID to reconcile + * @param agreement Storage pointer to the agreement info + * @return deleted True if the agreement was removed + */ + // solhint-disable-next-line use-natspec + function _reconcileAndCleanup( + RecurringAgreementManagerStorage storage $, + bytes16 agreementId, + AgreementInfo storage agreement + ) private returns (bool deleted) { + _reconcileAndUpdateEscrow($, agreementId); + if (agreement.maxNextClaim == 0) { + address provider = _deleteAgreement($, agreementId, agreement); + emit AgreementRemoved(agreementId, provider); + return true; + } + } + + /** + * @notice Reconcile a single agreement's max next claim against on-chain state + * @param agreementId The agreement ID to reconcile + */ + // solhint-disable-next-line use-natspec + function _reconcileAgreement(RecurringAgreementManagerStorage storage $, bytes16 agreementId) private { + AgreementInfo storage agreement = $.agreements[agreementId]; + + IRecurringCollector rc = agreement.collector; + IRecurringCollector.AgreementData memory rca = rc.getAgreement(agreementId); + + // Not yet accepted — keep the pre-offer estimate unless the deadline has passed + if (rca.state == IRecurringCollector.AgreementState.NotAccepted) { + if (block.timestamp <= agreement.deadline) return; + // Deadline passed: zero out so the caller can delete the expired offer + uint256 prev = agreement.maxNextClaim; + if (prev != 0) { + _setAgreementMaxNextClaim($, agreementId, 0, false); + emit AgreementReconciled(agreementId, prev, 0); + } + return; + } + + // Clear pending update if applied (updateNonce advanced) or unreachable (agreement canceled) + if ( + agreement.pendingUpdateHash != bytes32(0) && + (agreement.pendingUpdateNonce <= rca.updateNonce || + rca.state != IRecurringCollector.AgreementState.Accepted) + ) { + _setAgreementMaxNextClaim($, agreementId, 0, true); + delete $.authorizedHashes[agreement.pendingUpdateHash]; + agreement.pendingUpdateNonce = 0; + agreement.pendingUpdateHash = bytes32(0); + } + + uint256 oldMaxClaim = agreement.maxNextClaim; + uint256 newMaxClaim = rc.getMaxNextClaim(agreementId); + + if (oldMaxClaim != newMaxClaim) { + _setAgreementMaxNextClaim($, agreementId, newMaxClaim, false); + emit AgreementReconciled(agreementId, oldMaxClaim, newMaxClaim); + } + } + + /** + * @notice Delete an agreement: clean up hashes, zero escrow obligations, remove from provider set, and update escrow. + * @param agreementId The agreement ID to delete + * @param agreement Storage pointer to the agreement info + * @return provider The provider address (captured before deletion) + */ + // solhint-disable-next-line use-natspec + function _deleteAgreement( + RecurringAgreementManagerStorage storage $, + bytes16 agreementId, + AgreementInfo storage agreement + ) private returns (address provider) { + provider = agreement.provider; + IRecurringCollector collector = agreement.collector; + + // Clean up authorized hashes + delete $.authorizedHashes[agreement.agreementHash]; + if (agreement.pendingUpdateHash != bytes32(0)) delete $.authorizedHashes[agreement.pendingUpdateHash]; + + // Zero out escrow requirements before deleting + _setAgreementMaxNextClaim($, agreementId, 0, false); + _setAgreementMaxNextClaim($, agreementId, 0, true); + --$.totalAgreementCount; + $.providerAgreementIds[provider].remove(bytes32(agreementId)); + + --$.pairAgreementCount[address(collector)][provider]; + delete $.agreements[agreementId]; + + _reconcilePairTracking($, address(collector), provider); + } + + /** + * @notice Reconcile escrow then remove (collector, provider) tracking if fully drained. + * @dev Calls {_updateEscrow} to withdraw completed thaws, then removes the pair from + * tracking only when both pairAgreementCount and escrowSnap are zero. + * Cascades to remove the collector when it has no remaining providers. + * @return gone True if the pair is not tracked after this call + */ + // solhint-disable-next-line use-natspec + function _reconcilePairTracking( + RecurringAgreementManagerStorage storage $, + address collector, + address provider + ) private returns (bool gone) { + _updateEscrow($, collector, provider); + if ($.pairAgreementCount[collector][provider] != 0) return false; + if ($.escrowSnap[collector][provider] != 0) return false; + if ($.collectorProviders[collector].remove(provider)) { + emit CollectorProviderRemoved(collector, provider); + if ($.collectorProviders[collector].length() == 0) { + $.collectors.remove(collector); + emit CollectorRemoved(collector); + } + } + return true; + } + + /** + * @notice Atomically set one escrow obligation slot of an agreement and cascade to provider/global totals. + * @dev This and {_setEscrowSnap} are the only two functions that mutate totalEscrowDeficit. + * @param agreementId The agreement to update + * @param newValue The new obligation value + * @param pending If true, updates pendingUpdateMaxNextClaim; otherwise updates maxNextClaim + */ + // solhint-disable-next-line use-natspec + function _setAgreementMaxNextClaim( + RecurringAgreementManagerStorage storage $, + bytes16 agreementId, + uint256 newValue, + bool pending + ) private { + AgreementInfo storage agreement = $.agreements[agreementId]; + + uint256 oldValue = pending ? agreement.pendingUpdateMaxNextClaim : agreement.maxNextClaim; + if (oldValue == newValue) return; + + address collector = address(agreement.collector); + address provider = agreement.provider; + uint256 oldDeficit = _providerEscrowDeficit($, collector, provider); + + if (pending) agreement.pendingUpdateMaxNextClaim = newValue; + else agreement.maxNextClaim = newValue; + + $.sumMaxNextClaim[collector][provider] = $.sumMaxNextClaim[collector][provider] - oldValue + newValue; + $.sumMaxNextClaimAll = $.sumMaxNextClaimAll - oldValue + newValue; + $.totalEscrowDeficit = $.totalEscrowDeficit - oldDeficit + _providerEscrowDeficit($, collector, provider); + } + + /** + * @notice Compute escrow levels (min, max) based on escrow basis. + * @dev Escrow ladder: + * + * | Level | min (deposit floor) | max (thaw ceiling) | + * |------------|---------------------|--------------------| + * | Full | sumMaxNext | sumMaxNext | + * | OnDemand | 0 | sumMaxNext | + * | JustInTime | 0 | 0 | + * + * When tempJit, behaves as JustInTime regardless of configured basis. + * Full degrades to OnDemand when available balance <= totalEscrowDeficit. + * Full requires strictly more tokens on hand than the global deficit. + * + * @param collector The collector address + * @param provider The service provider + * @return min Deposit floor — deposit if balance is below this + * @return max Thaw ceiling — thaw if balance is above this + */ + // solhint-disable-next-line use-natspec + function _escrowMinMax( + RecurringAgreementManagerStorage storage $, + address collector, + address provider + ) private view returns (uint256 min, uint256 max) { + EscrowBasis basis = $.tempJit ? EscrowBasis.JustInTime : $.escrowBasis; + + max = basis == EscrowBasis.JustInTime ? 0 : $.sumMaxNextClaim[collector][provider]; + min = (basis == EscrowBasis.Full && $.totalEscrowDeficit < GRAPH_TOKEN.balanceOf(address(this))) ? max : 0; + } + + /** + * @notice Compute a (collector, provider) pair's escrow deficit: max(0, sumMaxNext - snapshot). + * @param collector The collector address + * @param provider The service provider + * @return deficit The amount not in escrow for this (collector, provider) + */ + // solhint-disable-next-line use-natspec + function _providerEscrowDeficit( + RecurringAgreementManagerStorage storage $, + address collector, + address provider + ) private view returns (uint256 deficit) { + uint256 sumMaxNext = $.sumMaxNextClaim[collector][provider]; + uint256 snapshot = $.escrowSnap[collector][provider]; + + deficit = (snapshot < sumMaxNext) ? sumMaxNext - snapshot : 0; + } + + /** + * @notice Update escrow state for a (collector, provider) pair: adjust thaw targets, + * withdraw completed thaws, thaw excess, or deposit deficit. + * @dev Sequential state normalization using (min, max) from {_escrowMinMax}: + * - min: deposit floor — deposit if effective balance (balance - tokensThawing) is below this + * - max: thaw ceiling — thaw effective balance above this, unless it would reset the thaw timer + * + * Steps: + * 1. Adjust thaw target — cancel/reduce unrealised thawing to keep min <= effective balance, + * or increase thawing to bring effective balance toward max (without resetting timer). + * 2. Withdraw completed thaw — realised thawing is always withdrawn, even if within [min, max]. + * 3. Thaw excess — if no thaw is active (possibly after a withdraw), start a new thaw for + * any balance above max. + * 4. Deposit deficit — if no thaw is active, deposit to reach min. + * + * Steps 3 and 4 are mutually exclusive (min <= max). Only one runs per call. + * The thaw timer is never reset: step 1 passes evenIfTimerReset=false, and steps 3/4 + * only run when tokensThawing == 0. + * + * Uses per-call approve (not infinite allowance). Safe because PaymentsEscrow + * is a trusted protocol contract that transfers exactly the approved amount. + * + * Updates escrow snapshot at the end for global tracking. + * + * @param collector The collector contract address + * @param provider The service provider to update escrow for + */ + // solhint-disable-next-line use-natspec + function _updateEscrow(RecurringAgreementManagerStorage storage $, address collector, address provider) private { + // Auto-recover from tempJit when balance exceeds deficit (same strict < as beforeCollection/escrowMinMax) + if ($.tempJit && $.totalEscrowDeficit < GRAPH_TOKEN.balanceOf(address(this))) { + $.tempJit = false; + emit TempJitSet(false, true); + } + + IPaymentsEscrow.EscrowAccount memory account = _fetchEscrowAccount(collector, provider); + (uint256 min, uint256 max) = _escrowMinMax($, collector, provider); + + // Defensive: PaymentsEscrow maintains tokensThawing <= balance, guard against external invariant breach + uint256 escrowed = account.tokensThawing < account.balance ? account.balance - account.tokensThawing : 0; + // Objectives in order of priority: + // We want to end with escrowed of at least min, and seek to thaw down to no more than max. + // 1. Do not reset thaw timer if a thaw is in progress. + // (This is to avoid thrash of restarting thaws resulting in never withdrawing excess.) + // 2. Make minimal adjustment to thawing tokens to get as close to min/max as possible. + // (First cancel unrealised thawing before depositing.) + uint256 thawTarget = (escrowed < min) + ? (min < account.balance ? account.balance - min : 0) + : (max < escrowed ? account.balance - max : account.tokensThawing); + if (thawTarget != account.tokensThawing) { + PAYMENTS_ESCROW.adjustThaw(collector, provider, thawTarget, false); + account = _fetchEscrowAccount(collector, provider); + } + + _withdrawAndRebalance(collector, provider, account, min, max); + _setEscrowSnap($, collector, provider); + } + + /** + * @notice Withdraw completed thaws and rebalance: thaw excess above max or deposit deficit below min. + * @dev Realised thawing is always withdrawn, even if within [min, max]. + * Then if no thaw is active: thaw any balance above max, or deposit to reach min. + * These last two steps are mutually exclusive (min <= max). Only one runs per call. + * @param collector The collector contract address + * @param provider Service provider address + * @param account Current escrow account state + * @param min Deposit floor + * @param max Thaw ceiling + */ + function _withdrawAndRebalance( + address collector, + address provider, + IPaymentsEscrow.EscrowAccount memory account, + uint256 min, + uint256 max + ) private { + // Withdraw any remaining thawed tokens (realised thawing is withdrawn even if within [min, max]) + if (0 < account.tokensThawing && account.thawEndTimestamp < block.timestamp) { + uint256 withdrawn = account.tokensThawing < account.balance ? account.tokensThawing : account.balance; + PAYMENTS_ESCROW.withdraw(collector, provider); + emit EscrowWithdrawn(provider, collector, withdrawn); + account = _fetchEscrowAccount(collector, provider); + } + + if (account.tokensThawing == 0) { + if (max < account.balance) + // Thaw excess above max (might have withdrawn allowing a new thaw to start) + PAYMENTS_ESCROW.adjustThaw(collector, provider, account.balance - max, false); + else { + // Deposit any deficit below min (deposit exactly the missing amount, no more) + uint256 deposit = (min < account.balance) ? 0 : min - account.balance; + if (0 < deposit) { + GRAPH_TOKEN.approve(address(PAYMENTS_ESCROW), deposit); + PAYMENTS_ESCROW.deposit(collector, provider, deposit); + emit EscrowFunded(provider, collector, deposit); + } + } + } + } + + /** + * @notice Atomically sync the escrow snapshot for a (collector, provider) pair after escrow mutations. + * @dev This and {_setAgreementMaxNextClaim} are the only two functions that mutate totalEscrowDeficit. + * @param collector The collector address + * @param provider The service provider + */ + // solhint-disable-next-line use-natspec + function _setEscrowSnap(RecurringAgreementManagerStorage storage $, address collector, address provider) private { + uint256 oldEscrow = $.escrowSnap[collector][provider]; + uint256 newEscrow = _fetchEscrowAccount(collector, provider).balance; + if (oldEscrow == newEscrow) return; + + uint256 oldDeficit = _providerEscrowDeficit($, collector, provider); + $.escrowSnap[collector][provider] = newEscrow; + uint256 newDeficit = _providerEscrowDeficit($, collector, provider); + $.totalEscrowDeficit = $.totalEscrowDeficit - oldDeficit + newDeficit; + } + + // solhint-disable-next-line use-natspec + function _fetchEscrowAccount( + address collector, + address provider + ) private view returns (IPaymentsEscrow.EscrowAccount memory account) { + (account.balance, account.tokensThawing, account.thawEndTimestamp) = PAYMENTS_ESCROW.escrowAccounts( + address(this), + collector, + provider + ); + } + + /** + * @notice Get the ERC-7201 namespaced storage + */ + // solhint-disable-next-line use-natspec + function _getStorage() private pure returns (RecurringAgreementManagerStorage storage $) { + // solhint-disable-next-line no-inline-assembly + assembly { + $.slot := RECURRING_AGREEMENT_MANAGER_STORAGE_LOCATION + } + } +} diff --git a/packages/issuance/contracts/allocate/DirectAllocation.sol b/packages/issuance/contracts/allocate/DirectAllocation.sol index 4c048acf2..91f153b5e 100644 --- a/packages/issuance/contracts/allocate/DirectAllocation.sol +++ b/packages/issuance/contracts/allocate/DirectAllocation.sol @@ -1,10 +1,11 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; import { ISendTokens } from "@graphprotocol/interfaces/contracts/issuance/allocate/ISendTokens.sol"; import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; +import { IGraphToken } from "../common/IGraphToken.sol"; // solhint-disable-next-line no-unused-import import { ERC165Upgradeable } from "@openzeppelin/contracts-upgradeable/utils/introspection/ERC165Upgradeable.sol"; // Used by @inheritdoc @@ -38,19 +39,16 @@ contract DirectAllocation is BaseUpgradeable, IIssuanceTarget, ISendTokens { event TokensSent(address indexed to, uint256 indexed amount); // Do not need to index amount, ignoring gas-indexed-events warning. - /// @notice Emitted before the issuance allocation changes - event BeforeIssuanceAllocationChange(); - // -- Constructor -- /** * @notice Constructor for the DirectAllocation contract * @dev This contract is upgradeable, but we use the constructor to pass the Graph Token address * to the base contract. - * @param graphToken Address of the Graph Token contract + * @param graphToken The Graph Token contract * @custom:oz-upgrades-unsafe-allow constructor */ - constructor(address graphToken) BaseUpgradeable(graphToken) {} + constructor(IGraphToken graphToken) BaseUpgradeable(graphToken) {} // -- Initialization -- @@ -89,9 +87,7 @@ contract DirectAllocation is BaseUpgradeable, IIssuanceTarget, ISendTokens { * before an allocation change. We simply receive tokens from the IssuanceAllocator. * @inheritdoc IIssuanceTarget */ - function beforeIssuanceAllocationChange() external virtual override { - emit BeforeIssuanceAllocationChange(); - } + function beforeIssuanceAllocationChange() external virtual override {} /** * @dev No-op for DirectAllocation; issuanceAllocator is not stored. diff --git a/packages/issuance/contracts/allocate/IssuanceAllocator.sol b/packages/issuance/contracts/allocate/IssuanceAllocator.sol index 4b8f15291..76ecf8792 100644 --- a/packages/issuance/contracts/allocate/IssuanceAllocator.sol +++ b/packages/issuance/contracts/allocate/IssuanceAllocator.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { TargetIssuancePerBlock, @@ -15,6 +15,7 @@ import { IIssuanceAllocationStatus } from "@graphprotocol/interfaces/contracts/i import { IIssuanceAllocationData } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceAllocationData.sol"; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; +import { IGraphToken } from "../common/IGraphToken.sol"; import { ReentrancyGuardTransient } from "@openzeppelin/contracts/utils/ReentrancyGuardTransient.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; @@ -324,10 +325,10 @@ contract IssuanceAllocator is * @notice Constructor for the IssuanceAllocator contract * @dev This contract is upgradeable, but we use the constructor to pass the Graph Token address * to the base contract. - * @param _graphToken Address of the Graph Token contract + * @param _graphToken The Graph Token contract * @custom:oz-upgrades-unsafe-allow constructor */ - constructor(address _graphToken) BaseUpgradeable(_graphToken) {} + constructor(IGraphToken _graphToken) BaseUpgradeable(_graphToken) {} // -- Initialization -- diff --git a/packages/issuance/contracts/common/BaseUpgradeable.sol b/packages/issuance/contracts/common/BaseUpgradeable.sol index 771d6f0a1..28a8f8966 100644 --- a/packages/issuance/contracts/common/BaseUpgradeable.sol +++ b/packages/issuance/contracts/common/BaseUpgradeable.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; @@ -87,12 +87,12 @@ abstract contract BaseUpgradeable is * @notice Constructor for the BaseUpgradeable contract * @dev This contract is upgradeable, but we use the constructor to set immutable variables * and disable initializers to prevent the implementation contract from being initialized. - * @param graphToken Address of the Graph Token contract + * @param graphToken The Graph Token contract * @custom:oz-upgrades-unsafe-allow constructor */ - constructor(address graphToken) { - require(graphToken != address(0), GraphTokenCannotBeZeroAddress()); - GRAPH_TOKEN = IGraphToken(graphToken); + constructor(IGraphToken graphToken) { + require(address(graphToken) != address(0), GraphTokenCannotBeZeroAddress()); + GRAPH_TOKEN = graphToken; _disableInitializers(); } diff --git a/packages/issuance/contracts/common/EnumerableSetUtil.sol b/packages/issuance/contracts/common/EnumerableSetUtil.sol new file mode 100644 index 000000000..65a09c41c --- /dev/null +++ b/packages/issuance/contracts/common/EnumerableSetUtil.sol @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.8.27; + +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; + +/** + * @title EnumerableSetUtil + * @author Edge & Node + * @notice Pagination helpers for OpenZeppelin EnumerableSet types. + */ +library EnumerableSetUtil { + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSet for EnumerableSet.Bytes32Set; + + /** + * @notice Return a page of addresses from an AddressSet. + * @param set The enumerable address set to paginate + * @param offset Number of entries to skip + * @param count Maximum number of entries to return + * @return result Array of addresses (may be shorter than count) + */ + function getPage( + EnumerableSet.AddressSet storage set, + uint256 offset, + uint256 count + ) internal view returns (address[] memory result) { + uint256 total = set.length(); + // solhint-disable-next-line gas-strict-inequalities + if (total <= offset) return new address[](0); + + uint256 remaining = total - offset; + if (remaining < count) count = remaining; + + result = new address[](count); + for (uint256 i = 0; i < count; ++i) result[i] = set.at(offset + i); + } + + /** + * @notice Return a page of bytes16 ids from a Bytes32Set (truncating each entry). + * @param set The enumerable bytes32 set to paginate + * @param offset Number of entries to skip + * @param count Maximum number of entries to return + * @return result Array of bytes16 values (may be shorter than count) + */ + function getPageBytes16( + EnumerableSet.Bytes32Set storage set, + uint256 offset, + uint256 count + ) internal view returns (bytes16[] memory result) { + uint256 total = set.length(); + // solhint-disable-next-line gas-strict-inequalities + if (total <= offset) return new bytes16[](0); + + uint256 remaining = total - offset; + if (remaining < count) count = remaining; + + result = new bytes16[](count); + for (uint256 i = 0; i < count; ++i) result[i] = bytes16(set.at(offset + i)); + } +} diff --git a/packages/issuance/contracts/eligibility/RewardsEligibilityHelper.sol b/packages/issuance/contracts/eligibility/RewardsEligibilityHelper.sol new file mode 100644 index 000000000..f72e86e22 --- /dev/null +++ b/packages/issuance/contracts/eligibility/RewardsEligibilityHelper.sol @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +pragma solidity ^0.8.27; + +import { IRewardsEligibilityHelper } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityHelper.sol"; +import { IRewardsEligibilityMaintenance } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol"; +import { IRewardsEligibilityStatus } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol"; + +/** + * @title RewardsEligibilityHelper + * @author Edge & Node + * @notice Stateless, permissionless convenience contract for {RewardsEligibilityOracle}. + * Provides batch removal of expired indexers from the tracked set. + * Independently deployable — better versions can be deployed without protocol changes. + * + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +contract RewardsEligibilityHelper is IRewardsEligibilityHelper { + /// @notice The RewardsEligibilityOracle contract address + address public immutable ORACLE; + + /// @notice Thrown when an address parameter is the zero address + error ZeroAddress(); + + /** + * @notice Constructor for the RewardsEligibilityHelper contract + * @param oracle Address of the RewardsEligibilityOracle contract + */ + constructor(address oracle) { + require(oracle != address(0), ZeroAddress()); + ORACLE = oracle; + } + + /// @inheritdoc IRewardsEligibilityHelper + function removeExpiredIndexers(address[] calldata indexers) external returns (uint256 gone) { + for (uint256 i = 0; i < indexers.length; ++i) + if (IRewardsEligibilityMaintenance(ORACLE).removeExpiredIndexer(indexers[i])) ++gone; + } + + /// @inheritdoc IRewardsEligibilityHelper + function removeExpiredIndexers() external returns (uint256 gone) { + address[] memory indexers = IRewardsEligibilityStatus(ORACLE).getIndexers(); + for (uint256 i = 0; i < indexers.length; ++i) + if (IRewardsEligibilityMaintenance(ORACLE).removeExpiredIndexer(indexers[i])) ++gone; + } + + /// @inheritdoc IRewardsEligibilityHelper + function removeExpiredIndexers(uint256 offset, uint256 count) external returns (uint256 gone) { + address[] memory indexers = IRewardsEligibilityStatus(ORACLE).getIndexers(offset, count); + for (uint256 i = 0; i < indexers.length; ++i) + if (IRewardsEligibilityMaintenance(ORACLE).removeExpiredIndexer(indexers[i])) ++gone; + } +} diff --git a/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.md b/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.md index 60449c6d4..c928cbc7c 100644 --- a/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.md +++ b/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.md @@ -14,6 +14,8 @@ The contract operates on a "deny by default" principle - indexers are not eligib - **Oracle-based Renewal**: Only authorized oracles can renew indexer eligibility - **Global Toggle**: Eligibility validation can be globally enabled/disabled - **Timeout Mechanism**: If oracles don't update for too long, all indexers are automatically eligible +- **Enumerable Indexer Tracking**: On-chain discovery of all renewed indexers via `EnumerableSet` +- **Retention-based Cleanup**: Permissionless removal of indexers not renewed within a configurable threshold (default: 365 days) - **Role-based Access Control**: Uses hierarchical roles for governance and operations ## Architecture @@ -36,6 +38,8 @@ The contract uses ERC-7201 namespaced storage to prevent storage collisions in u - `eligibilityValidationEnabled`: Global flag to enable/disable eligibility validation (default: false, to be enabled by operator when ready) - `oracleUpdateTimeout`: Timeout after which all indexers are automatically eligible (default: 7 days) - `lastOracleUpdateTime`: Timestamp of the last oracle update +- `trackedIndexers`: Enumerable set of all indexer addresses renewed by the oracle +- `indexerRetentionPeriod`: Duration after which an un-renewed indexer can be permissionlessly removed from tracking (default: 365 days) ## Core Functions @@ -75,6 +79,14 @@ The `ORACLE_ROLE` constant can be used as the role parameter for these functions - **Returns**: Always true for current implementation - **Events**: Emits `EligibilityValidationUpdated` if state changes +#### `setIndexerRetentionPeriod(uint256 indexerRetentionPeriod) → bool` + +- **Access**: OPERATOR_ROLE only +- **Purpose**: Set how long after last renewal an indexer can be removed from the tracked set +- **Parameters**: `indexerRetentionPeriod` - Duration in seconds +- **Returns**: Always true for current implementation +- **Events**: Emits `IndexerRetentionPeriodSet` if value changes + ### Indexer Management #### `renewIndexerEligibility(address[] calldata indexers, bytes calldata data) → uint256` @@ -87,11 +99,25 @@ The `ORACLE_ROLE` constant can be used as the role parameter for these functions - **Returns**: Number of indexers whose eligibility renewal timestamp was updated - **Events**: - Emits `IndexerEligibilityData` with oracle and data + - Emits `IndexerTrackingUpdated(indexer, true)` when an indexer is first added to the tracked set - Emits `IndexerEligibilityRenewed` for each indexer whose eligibility was renewed - **Notes**: - Updates `lastOracleUpdateTime` to current block timestamp - Only updates timestamp if less than current block timestamp - Ignores zero addresses and duplicate updates within same block + - Adds each renewed indexer to the enumerable tracked set (idempotent for existing members) + +### Maintenance Functions + +#### `removeExpiredIndexer(address indexer) → bool` + +- **Access**: Permissionless +- **Purpose**: Remove an indexer from the tracked set if expired (`block.timestamp >= renewalTimestamp + indexerRetentionPeriod`) +- **Parameters**: `indexer` - The indexer address to check and remove +- **Returns**: True if the indexer is absent from the tracked set (removed or was never there); false if still tracked (not yet expired) +- **Effects**: Removes from the enumerable set and deletes the renewal timestamp mapping entry +- **Events**: Emits `IndexerTrackingUpdated(indexer, false)` when an indexer is actually removed +- **Notes**: A removed indexer can be re-added if the oracle renews it again ### View Functions @@ -129,6 +155,28 @@ The `ORACLE_ROLE` constant can be used as the role parameter for these functions - **Purpose**: Get eligibility validation state - **Returns**: True if enabled, false if disabled +#### `getIndexerRetentionPeriod() → uint256` + +- **Purpose**: Get the indexer retention period for tracked indexer cleanup +- **Returns**: Duration in seconds + +#### `getIndexerCount() → uint256` + +- **Purpose**: Get the number of indexers in the tracked set +- **Returns**: Count of tracked indexers + +#### `getIndexers() → address[]` + +- **Purpose**: Get all tracked indexer addresses +- **Returns**: Array of addresses +- **Note**: May be expensive for large sets; prefer paginated overload for on-chain use + +#### `getIndexers(uint256 offset, uint256 count) → address[]` + +- **Purpose**: Get a paginated slice of tracked indexer addresses +- **Parameters**: `offset` - Start index, `count` - Maximum number to return (clamped) +- **Returns**: Array of addresses + ## Eligibility Logic An indexer is considered eligible if ANY of the following conditions are met: @@ -270,6 +318,8 @@ event IndexerEligibilityRenewed(address indexed indexer, address indexed oracle) event EligibilityPeriodUpdated(uint256 indexed oldPeriod, uint256 indexed newPeriod); event EligibilityValidationUpdated(bool indexed enabled); event OracleUpdateTimeoutUpdated(uint256 indexed oldTimeout, uint256 indexed newTimeout); +event IndexerTrackingUpdated(address indexed indexer, bool indexed tracked); +event IndexerRetentionPeriodSet(uint256 indexed oldThreshold, uint256 indexed newThreshold); ``` ## Default Configuration @@ -277,6 +327,7 @@ event OracleUpdateTimeoutUpdated(uint256 indexed oldTimeout, uint256 indexed new - **Eligibility Period**: 14 days (1,209,600 seconds) - **Oracle Update Timeout**: 7 days (604,800 seconds) - **Eligibility Validation**: Disabled (false) +- **Indexer Retention Period**: 365 days (31,536,000 seconds) - **Last Oracle Update Time**: 0 (never updated) The system is deployed with reasonable defaults but can be adjusted as required. Eligibility validation is disabled by default as the expectation is to first see oracles successfully marking indexers as eligible and having suitably established eligible indexers before enabling. @@ -307,4 +358,21 @@ The system is deployed with reasonable defaults but can be adjusted as required. ## Integration -The contract implements four focused interfaces (`IRewardsEligibility`, `IRewardsEligibilityAdministration`, `IRewardsEligibilityReporting`, and `IRewardsEligibilityStatus`) and can be integrated with any system that needs to verify indexer eligibility status. The primary integration point is the `isEligible(address)` function which returns a simple boolean indicating eligibility. +The contract implements five focused interfaces (`IProviderEligibility`, `IRewardsEligibilityAdministration`, `IRewardsEligibilityMaintenance`, `IRewardsEligibilityReporting`, and `IRewardsEligibilityStatus`) and can be integrated with any system that needs to verify provider eligibility status. The primary integration point is the `isEligible(address)` function which returns a simple boolean indicating eligibility. The `getIndexers()` function enables on-chain discovery of all tracked indexers without requiring event indexing. + +## RewardsEligibilityHelper + +A stateless, permissionless companion contract that provides batch convenience operations on the oracle. Independently deployable — better versions can be deployed without protocol changes. + +### `removeExpiredIndexers(address[] calldata indexers) → uint256` + +- **Purpose**: Batch removal of expired indexers by explicit address list +- **Parameters**: `indexers` - Array of indexer addresses to process +- **Returns**: Number of indexers now absent from the tracked set (`gone` count) + +### `removeExpiredIndexers(uint256 offset, uint256 count) → uint256` + +- **Purpose**: Batch removal by paginated scan of the tracked set +- **Parameters**: `offset` - Start index, `count` - Maximum number of indexers to process +- **Returns**: Number of indexers now absent from the tracked set (`gone` count) +- **Notes**: Useful for keeper-driven sweeps without requiring an off-chain indexer list diff --git a/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.sol b/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.sol index bd2591a44..935b1619b 100644 --- a/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.sol +++ b/packages/issuance/contracts/eligibility/RewardsEligibilityOracle.sol @@ -1,12 +1,17 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; + +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; import { IRewardsEligibilityAdministration } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol"; +import { IRewardsEligibilityMaintenance } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol"; import { IRewardsEligibilityReporting } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityReporting.sol"; import { IRewardsEligibilityStatus } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol"; +import { EnumerableSetUtil } from "../common/EnumerableSetUtil.sol"; import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; +import { IGraphToken } from "../common/IGraphToken.sol"; /** * @title RewardsEligibilityOracle @@ -27,11 +32,15 @@ import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; */ contract RewardsEligibilityOracle is BaseUpgradeable, - IRewardsEligibility, + IProviderEligibility, IRewardsEligibilityAdministration, + IRewardsEligibilityMaintenance, IRewardsEligibilityReporting, IRewardsEligibilityStatus { + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSetUtil for EnumerableSet.AddressSet; + // -- Role Constants -- /** @@ -54,21 +63,27 @@ contract RewardsEligibilityOracle is /// @notice Main storage structure for RewardsEligibilityOracle using ERC-7201 namespaced storage /// @param indexerEligibilityTimestamps Mapping of indexers to their eligibility renewal timestamps /// @param eligibilityPeriod Period in seconds for which indexer eligibility status lasts - /// @param eligibilityValidationEnabled Flag to enable/disable eligibility validation /// @param oracleUpdateTimeout Timeout period in seconds after which isEligible returns true if no oracle updates /// @param lastOracleUpdateTime Timestamp of the last oracle update + /// @param trackedIndexers Enumerable set of all indexers ever renewed by the oracle + /// @param indexerRetentionPeriod Duration after which an un-renewed indexer can be removed from tracking + /// @param eligibilityValidationEnabled Flag to enable/disable eligibility validation /// @custom:storage-location erc7201:graphprotocol.storage.RewardsEligibilityOracle struct RewardsEligibilityOracleData { /// @dev Mapping of indexers to their eligibility renewal timestamps mapping(address => uint256) indexerEligibilityTimestamps; /// @dev Period in seconds for which indexer eligibility status lasts uint256 eligibilityPeriod; - /// @dev Flag to enable/disable eligibility validation - bool eligibilityValidationEnabled; /// @dev Timeout period in seconds after which isEligible returns true if no oracle updates uint256 oracleUpdateTimeout; /// @dev Timestamp of the last oracle update uint256 lastOracleUpdateTime; + /// @dev Enumerable set of all indexers renewed by the oracle + EnumerableSet.AddressSet trackedIndexers; + /// @dev Duration in seconds after which an un-renewed indexer can be permissionlessly removed + uint256 indexerRetentionPeriod; + /// @dev Flag to enable/disable eligibility validation + bool eligibilityValidationEnabled; } /** @@ -91,10 +106,10 @@ contract RewardsEligibilityOracle is * @notice Constructor for the RewardsEligibilityOracle contract * @dev This contract is upgradeable, but we use the constructor to pass the Graph Token address * to the base contract. - * @param graphToken Address of the Graph Token contract + * @param graphToken The Graph Token contract * @custom:oz-upgrades-unsafe-allow constructor */ - constructor(address graphToken) BaseUpgradeable(graphToken) {} + constructor(IGraphToken graphToken) BaseUpgradeable(graphToken) {} // -- Initialization -- @@ -114,6 +129,7 @@ contract RewardsEligibilityOracle is $.eligibilityPeriod = 14 days; $.oracleUpdateTimeout = 7 days; $.eligibilityValidationEnabled = false; // Start with eligibility validation disabled, to be enabled later when the oracle is ready + $.indexerRetentionPeriod = 365 days; } /** @@ -124,8 +140,9 @@ contract RewardsEligibilityOracle is */ function supportsInterface(bytes4 interfaceId) public view virtual override returns (bool) { return - interfaceId == type(IRewardsEligibility).interfaceId || + interfaceId == type(IProviderEligibility).interfaceId || interfaceId == type(IRewardsEligibilityAdministration).interfaceId || + interfaceId == type(IRewardsEligibilityMaintenance).interfaceId || interfaceId == type(IRewardsEligibilityReporting).interfaceId || interfaceId == type(IRewardsEligibilityStatus).interfaceId || super.supportsInterface(interfaceId); @@ -196,6 +213,23 @@ contract RewardsEligibilityOracle is return true; } + /// @inheritdoc IRewardsEligibilityAdministration + function setIndexerRetentionPeriod( + uint256 indexerRetentionPeriod + ) external override onlyRole(OPERATOR_ROLE) returns (bool) { + RewardsEligibilityOracleData storage $ = _getRewardsEligibilityOracleStorage(); + uint256 oldPeriod = $.indexerRetentionPeriod; + + if (indexerRetentionPeriod != oldPeriod) { + $.indexerRetentionPeriod = indexerRetentionPeriod; + emit IndexerRetentionPeriodSet(oldPeriod, indexerRetentionPeriod); + } + + return true; + } + + // -- Oracle Functions -- + /** * @notice Renew eligibility for provided indexers to receive rewards * @param indexers Array of indexer addresses. Zero addresses are ignored. @@ -220,6 +254,7 @@ contract RewardsEligibilityOracle is if (indexer != address(0) && $.indexerEligibilityTimestamps[indexer] < blockTimestamp) { $.indexerEligibilityTimestamps[indexer] = blockTimestamp; + if ($.trackedIndexers.add(indexer)) emit IndexerTrackingUpdated(indexer, true); emit IndexerEligibilityRenewed(indexer, msg.sender); ++updatedCount; } @@ -228,10 +263,28 @@ contract RewardsEligibilityOracle is return updatedCount; } + // -- Maintenance Functions -- + + /// @inheritdoc IRewardsEligibilityMaintenance + function removeExpiredIndexer(address indexer) external override returns (bool gone) { + RewardsEligibilityOracleData storage $ = _getRewardsEligibilityOracleStorage(); + + if (!$.trackedIndexers.contains(indexer)) return true; + + uint256 renewalTime = $.indexerEligibilityTimestamps[indexer]; + if (block.timestamp < renewalTime + $.indexerRetentionPeriod) return false; + + $.trackedIndexers.remove(indexer); + delete $.indexerEligibilityTimestamps[indexer]; + emit IndexerTrackingUpdated(indexer, false); + + return true; + } + // -- View Functions -- /** - * @inheritdoc IRewardsEligibility + * @inheritdoc IProviderEligibility * @dev Returns true if any of the following conditions are met: * 1. Eligibility validation is disabled globally * 2. Oracle timeout has been exceeded (fail-safe to allow all indexers) @@ -293,4 +346,24 @@ contract RewardsEligibilityOracle is function getEligibilityValidation() external view override returns (bool) { return _getRewardsEligibilityOracleStorage().eligibilityValidationEnabled; } + + /// @inheritdoc IRewardsEligibilityStatus + function getIndexerRetentionPeriod() external view override returns (uint256) { + return _getRewardsEligibilityOracleStorage().indexerRetentionPeriod; + } + + /// @inheritdoc IRewardsEligibilityStatus + function getIndexerCount() external view override returns (uint256) { + return _getRewardsEligibilityOracleStorage().trackedIndexers.length(); + } + + /// @inheritdoc IRewardsEligibilityStatus + function getIndexers() external view override returns (address[] memory) { + return _getRewardsEligibilityOracleStorage().trackedIndexers.getPage(0, type(uint256).max); + } + + /// @inheritdoc IRewardsEligibilityStatus + function getIndexers(uint256 offset, uint256 count) external view override returns (address[] memory) { + return _getRewardsEligibilityOracleStorage().trackedIndexers.getPage(offset, count); + } } diff --git a/packages/issuance/contracts/test/allocate/IssuanceAllocatorTestHarness.sol b/packages/issuance/contracts/test/allocate/IssuanceAllocatorTestHarness.sol index 586c6e677..e4aeb5fab 100644 --- a/packages/issuance/contracts/test/allocate/IssuanceAllocatorTestHarness.sol +++ b/packages/issuance/contracts/test/allocate/IssuanceAllocatorTestHarness.sol @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IssuanceAllocator } from "../../allocate/IssuanceAllocator.sol"; +import { IGraphToken } from "../../common/IGraphToken.sol"; /** * @title IssuanceAllocatorTestHarness @@ -13,10 +14,10 @@ import { IssuanceAllocator } from "../../allocate/IssuanceAllocator.sol"; contract IssuanceAllocatorTestHarness is IssuanceAllocator { /** * @notice Constructor for the test harness - * @param _graphToken Address of the Graph Token contract + * @param _graphToken The Graph Token contract * @custom:oz-upgrades-unsafe-allow constructor */ - constructor(address _graphToken) IssuanceAllocator(_graphToken) {} + constructor(IGraphToken _graphToken) IssuanceAllocator(_graphToken) {} /** * @notice Exposes _distributePendingProportionally for testing diff --git a/packages/issuance/foundry.toml b/packages/issuance/foundry.toml index 38d166efd..9251965b5 100644 --- a/packages/issuance/foundry.toml +++ b/packages/issuance/foundry.toml @@ -3,6 +3,7 @@ src = 'contracts' out = 'forge-artifacts' libs = ["node_modules"] auto_detect_remappings = false +test = 'test' remappings = [ "@openzeppelin/=node_modules/@openzeppelin/", "@graphprotocol/=node_modules/@graphprotocol/", @@ -13,11 +14,11 @@ fs_permissions = [{ access = "read", path = "./" }] optimizer = true optimizer_runs = 100 via_ir = true -solc_version = '0.8.33' +solc_version = '0.8.34' evm_version = 'cancun' # Exclude test files from coverage reports -no_match_coverage = "(^test/|/mocks/)" +no_match_coverage = "(^test/|^contracts/test/|/mocks/)" [lint] exclude_lints = ["mixed-case-function", "mixed-case-variable"] diff --git a/packages/issuance/hardhat.base.config.ts b/packages/issuance/hardhat.base.config.ts index 5ae490a66..d31b7d48b 100644 --- a/packages/issuance/hardhat.base.config.ts +++ b/packages/issuance/hardhat.base.config.ts @@ -7,7 +7,7 @@ const ARBITRUM_SEPOLIA_RPC = process.env.ARBITRUM_SEPOLIA_RPC || 'https://sepoli // Issuance-specific Solidity configuration with Cancun EVM version export const issuanceSolidityConfig = { - version: '0.8.33', + version: '0.8.34', settings: { optimizer: { enabled: true, diff --git a/packages/issuance/package.json b/packages/issuance/package.json index dba25ffd1..6223811a4 100644 --- a/packages/issuance/package.json +++ b/packages/issuance/package.json @@ -25,7 +25,7 @@ "build:dep": "pnpm --filter '@graphprotocol/issuance^...' run build:self", "build:self": "pnpm compile && pnpm typechain", "clean": "rm -rf artifacts/ forge-artifacts/ cache_forge/ coverage/ cache/ types/ typechain-src/ .eslintcache test/node_modules/", - "compile": "hardhat compile --quiet", + "compile": "hardhat compile --quiet --no-tests", "typechain": "typechain --target ethers-v6 --out-dir typechain-src 'artifacts/contracts/**/!(*.dbg).json' && tsc -p tsconfig.typechain.json && rm -rf typechain-src && echo '{\"type\":\"commonjs\"}' > types/package.json", "test": "forge test", "test:coverage": "forge coverage", @@ -33,7 +33,7 @@ "lint": "pnpm lint:ts; pnpm lint:sol; pnpm lint:forge; pnpm lint:md; pnpm lint:json", "lint:ts": "eslint '**/*.{js,ts,cjs,mjs,jsx,tsx}' --fix --cache; prettier -w --cache --log-level warn '**/*.{js,ts,cjs,mjs,jsx,tsx}'", "lint:sol": "solhint --fix --noPrompt --noPoster 'contracts/**/*.sol'; prettier -w --cache --log-level warn 'contracts/**/*.sol'", - "lint:forge": "forge lint", + "lint:forge": "forge lint contracts/", "lint:md": "markdownlint --fix --ignore-path ../../.gitignore '**/*.md'; prettier -w --cache --log-level warn '**/*.md'", "lint:json": "prettier -w --cache --log-level warn '**/*.json'", "verify": "hardhat verify", diff --git a/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol b/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol new file mode 100644 index 000000000..6e0eae7c3 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // -- beforeCollection -- + + function test_BeforeCollection_TopsUpWhenEscrowShort() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Simulate: escrow was partially drained (e.g. by a previous collection) + // The mock escrow has the full balance from offerAgreement, so we need to + // set up a scenario where balance < tokensToCollect. + // We'll just call beforeCollection with a large tokensToCollect. + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + // Mint more tokens so SAM has available balance to deposit + token.mint(address(agreementManager), 1000 ether); + + // Request more than current escrow balance + uint256 tokensToCollect = escrowBalance + 500 ether; + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, tokensToCollect); + + // Escrow should now have enough + (uint256 newBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(newBalance, tokensToCollect); + } + + function test_BeforeCollection_NoOpWhenEscrowSufficient() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + (uint256 escrowBefore, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + // Request less than current escrow — should be a no-op + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1 ether); + + (uint256 escrowAfter, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(escrowAfter, escrowBefore); + } + + function test_BeforeCollection_Revert_WhenCallerNotRecurringCollector() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + agreementManager.beforeCollection(agreementId, 100 ether); + } + + function test_BeforeCollection_IgnoresUnknownAgreement() public { + bytes16 unknownId = bytes16(keccak256("unknown")); + + // Should not revert + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(unknownId, 100 ether); + } + + // -- afterCollection -- + + function test_AfterCollection_ReconcileAndFundEscrow() public { + // Offer: maxNextClaim = 1e18 * 3600 + 100e18 = 3700e18 + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3700 ether); + + // Simulate: agreement accepted and first collection happened + uint64 acceptedAt = uint64(block.timestamp); + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(agreementId, rca, acceptedAt, lastCollectionAt); + + vm.warp(lastCollectionAt); + + // Call afterCollection as RecurringCollector (simulates post-collect callback) + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(agreementId, 500 ether); + + // After first collection, maxInitialTokens no longer applies + // New max = 1e18 * 3600 = 3600e18 + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 3600 ether); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3600 ether); + } + + function test_AfterCollection_Revert_WhenCallerNotRecurringCollector() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + agreementManager.afterCollection(agreementId, 100 ether); + } + + function test_AfterCollection_IgnoresUnknownAgreement() public { + bytes16 unknownId = bytes16(keccak256("unknown")); + + // Should not revert — just silently return + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(unknownId, 100 ether); + } + + function test_AfterCollection_CanceledByServiceProvider() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + _setAgreementCanceledBySP(agreementId, rca); + + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(agreementId, 0); + + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/approver.t.sol b/packages/issuance/test/unit/agreement-manager/approver.t.sol new file mode 100644 index 000000000..df6f44bc0 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/approver.t.sol @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerApproverTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // -- IAgreementOwner Tests -- + + function test_ApproveAgreement_ReturnsSelector() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + + bytes32 agreementHash = recurringCollector.hashRCA(rca); + bytes4 result = agreementManager.approveAgreement(agreementHash); + assertEq(result, IAgreementOwner.approveAgreement.selector); + } + + function test_ApproveAgreement_ReturnsZero_WhenNotAuthorized() public { + bytes32 fakeHash = keccak256("fake agreement"); + assertEq(agreementManager.approveAgreement(fakeHash), bytes4(0)); + } + + function test_ApproveAgreement_DifferentHashesAreIndependent() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + // Only offer rca1 + _offerAgreement(rca1); + + // rca1 hash should be authorized + bytes32 hash1 = recurringCollector.hashRCA(rca1); + assertEq(agreementManager.approveAgreement(hash1), IAgreementOwner.approveAgreement.selector); + + // rca2 hash should NOT be authorized + bytes32 hash2 = recurringCollector.hashRCA(rca2); + assertEq(agreementManager.approveAgreement(hash2), bytes4(0)); + } + + // -- ERC165 Tests -- + + function test_SupportsInterface_IIssuanceTarget() public view { + assertTrue(agreementManager.supportsInterface(type(IIssuanceTarget).interfaceId)); + } + + function test_SupportsInterface_IAgreementOwner() public view { + assertTrue(agreementManager.supportsInterface(type(IAgreementOwner).interfaceId)); + } + + function test_SupportsInterface_IRecurringAgreementManagement() public view { + assertTrue(agreementManager.supportsInterface(type(IRecurringAgreementManagement).interfaceId)); + } + + function test_SupportsInterface_IRecurringEscrowManagement() public view { + assertTrue(agreementManager.supportsInterface(type(IRecurringEscrowManagement).interfaceId)); + } + + function test_SupportsInterface_IProviderEligibilityManagement() public view { + assertTrue(agreementManager.supportsInterface(type(IProviderEligibilityManagement).interfaceId)); + } + + function test_SupportsInterface_IRecurringAgreements() public view { + assertTrue(agreementManager.supportsInterface(type(IRecurringAgreements).interfaceId)); + } + + // -- IIssuanceTarget Tests -- + + function test_BeforeIssuanceAllocationChange_DoesNotRevert() public { + agreementManager.beforeIssuanceAllocationChange(); + } + + function test_SetIssuanceAllocator_OnlyGovernor() public { + address nonGovernor = makeAddr("nonGovernor"); + vm.expectRevert(); + vm.prank(nonGovernor); + agreementManager.setIssuanceAllocator(makeAddr("allocator")); + } + + function test_SetIssuanceAllocator_Governor() public { + vm.prank(governor); + agreementManager.setIssuanceAllocator(makeAddr("allocator")); + } + + // -- View Function Tests -- + + function test_GetDeficit_ZeroWhenFullyFunded() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + + // Fully funded (offerAgreement mints enough tokens) + IPaymentsEscrow.EscrowAccount memory account = agreementManager.getEscrowAccount(_collector(), indexer); + assertEq(account.balance - account.tokensThawing, agreementManager.getSumMaxNextClaim(_collector(), indexer)); + } + + function test_GetEscrowAccount_MatchesUnderlying() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + uint256 available = 500 ether; + + token.mint(address(agreementManager), available); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + + IPaymentsEscrow.EscrowAccount memory expected; + (expected.balance, expected.tokensThawing, expected.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + IPaymentsEscrow.EscrowAccount memory actual = agreementManager.getEscrowAccount(_collector(), indexer); + assertEq(actual.balance, expected.balance); + assertEq(actual.tokensThawing, expected.tokensThawing); + assertEq(actual.thawEndTimestamp, expected.thawEndTimestamp); + } + + function test_GetRequiredEscrow_ZeroForUnknownIndexer() public { + assertEq(agreementManager.getSumMaxNextClaim(_collector(), makeAddr("unknown")), 0); + } + + function test_GetAgreementMaxNextClaim_ZeroForUnknown() public view { + assertEq(agreementManager.getAgreementMaxNextClaim(bytes16(keccak256("unknown"))), 0); + } + + function test_GetIndexerAgreementCount_ZeroForUnknown() public { + assertEq(agreementManager.getProviderAgreementCount(makeAddr("unknown")), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol b/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol new file mode 100644 index 000000000..1c91210ec --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_CancelAgreement_Accepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Simulate acceptance + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(agreementId); + assertFalse(gone); // still tracked after cancel + + // Verify the mock was called + assertTrue(mockSubgraphService.canceled(agreementId)); + assertEq(mockSubgraphService.cancelCallCount(agreementId), 1); + } + + function test_CancelAgreement_ReconcileAfterCancel() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint256 originalRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(originalRequired, maxClaim); + + // Accept, then cancel by SP (maxNextClaim -> 0) + _setAgreementCanceledBySP(agreementId, rca); + + // CanceledBySP has maxNextClaim=0 so agreement is deleted inline + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(agreementId); + assertTrue(gone); // deleted inline — nothing left to claim + + // After cancelAgreement (which now reconciles), required escrow should decrease + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_CancelAgreement_Idempotent_CanceledByPayer() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as CanceledByPayer (already canceled) + _setAgreementCanceledByPayer(agreementId, rca, uint64(block.timestamp), uint64(block.timestamp + 1 hours), 0); + + // Should succeed — idempotent, skips the external cancel call + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(agreementId); + assertFalse(gone); // still tracked after cancel + + // Should NOT have called SubgraphService + assertEq(mockSubgraphService.cancelCallCount(agreementId), 0); + } + + function test_CancelAgreement_Idempotent_CanceledByServiceProvider() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as CanceledByServiceProvider + _setAgreementCanceledBySP(agreementId, rca); + + // Should succeed — idempotent, reconciles to update escrow + // CanceledBySP has maxNextClaim=0 so agreement is deleted inline + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(agreementId); + assertTrue(gone); // deleted inline — nothing left to claim + + // Should NOT have called SubgraphService + assertEq(mockSubgraphService.cancelCallCount(agreementId), 0); + + // Required escrow should drop to 0 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_CancelAgreement_Revert_WhenNotAccepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Agreement is NotAccepted — should revert + vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotAccepted.selector, agreementId)); + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + function test_CancelAgreement_ReturnsTrue_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); + + // Returns true (gone) when agreement not found + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(fakeId); + assertTrue(gone); + } + + function test_CancelAgreement_Revert_WhenNotOperator() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + bytes16 agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + address nonOperator = makeAddr("nonOperator"); + vm.expectRevert( + abi.encodeWithSelector(IAccessControl.AccessControlUnauthorizedAccount.selector, nonOperator, AGREEMENT_MANAGER_ROLE) + ); + vm.prank(nonOperator); + agreementManager.cancelAgreement(agreementId); + } + + function test_CancelAgreement_Revert_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + function test_CancelAgreement_EmitsEvent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol new file mode 100644 index 000000000..33f9e5a16 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +/// @notice Tests that canceling an agreement correctly clears pending update escrow. +contract RecurringAgreementManagerCancelWithPendingUpdateTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + /// @notice Demonstrates the bug: when an accepted agreement with a pending (unapplied) + /// update is canceled, the pendingUpdateMaxNextClaim escrow is NOT freed during + /// cancelAgreement. The escrow remains locked until the agreement is fully drained + /// and deleted, even though the update can never be accepted (collector rejects + /// updates on non-Accepted agreements). + function test_CancelAgreement_PendingUpdateEscrowNotFreed() public { + // 1. Offer and accept an agreement + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + uint64 acceptedAt = uint64(block.timestamp); + _setAgreementAccepted(agreementId, rca, acceptedAt); + + // 2. Offer an update (nonce=1) — reserves additional escrow + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq( + agreementManager.getSumMaxNextClaim(_collector(), indexer), + originalMaxClaim + pendingMaxClaim, + "both original and pending escrow should be reserved" + ); + + // 3. Cancel the agreement — simulate CanceledByPayer with remaining collection window. + // The collector still has a non-zero maxNextClaim (remaining window to collect). + // updateNonce is still 0 — the pending update was never applied. + uint64 canceledAt = uint64(block.timestamp + 1 hours); + vm.warp(canceledAt); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); + + // Call cancelAgreement — state is already CanceledByPayer so it skips the DS call + // and goes straight to reconcile-and-cleanup. + vm.prank(operator); + bool gone = agreementManager.cancelAgreement(agreementId); + assertFalse(gone, "agreement should still exist (has remaining claims)"); + + // 4. BUG: The pending update can never be accepted (collector rejects updates on + // canceled agreements), yet pendingUpdateMaxNextClaim is still reserved. + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + uint256 sumAfterCancel = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // The pending escrow should have been freed (zeroed) since the update is dead. + // This assertion demonstrates the bug — it will FAIL because the pending escrow + // is still included in sumMaxNextClaim. + assertEq( + info.pendingUpdateMaxNextClaim, + 0, + "BUG: pending update escrow should be zero after cancel (update can never be applied)" + ); + assertEq( + sumAfterCancel, + agreementManager.getAgreementMaxNextClaim(agreementId), + "BUG: sumMaxNextClaim should only include the base claim, not the dead pending update" + ); + } + + /// @notice After cancel + reconcile, pending update escrow and hash are fully cleared. + function test_CancelAgreement_PendingClearedAfterReconcile() public { + // 1. Offer and accept + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint64 acceptedAt = uint64(block.timestamp); + _setAgreementAccepted(agreementId, rca, acceptedAt); + + // 2. Offer update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + // 3. Cancel (CanceledByPayer, remaining window) + uint64 canceledAt = uint64(block.timestamp + 1 hours); + vm.warp(canceledAt); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); + + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + + // 4. Explicit reconcile — pending should already be cleared + agreementManager.reconcileAgreement(agreementId); + + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.pendingUpdateMaxNextClaim, 0, "pending escrow should be zero after cancel"); + assertEq(info.pendingUpdateNonce, 0, "pending nonce should be zero after cancel"); + assertEq(info.pendingUpdateHash, bytes32(0), "pending hash should be zero after cancel"); + + // 5. The dead update hash should no longer be authorized + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + bytes4 result = agreementManager.approveAgreement(updateHash); + assertTrue(result != agreementManager.approveAgreement.selector, "dead hash should not be authorized"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol b/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol new file mode 100644 index 000000000..e8d6c579e --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + MockRecurringCollector internal collector2; + + function setUp() public override { + super.setUp(); + collector2 = new MockRecurringCollector(); + vm.label(address(collector2), "RecurringCollector2"); + + vm.prank(governor); + agreementManager.grantRole(COLLECTOR_ROLE, address(collector2)); + } + + // -- Helpers -- + + function _collector2() internal view returns (IRecurringCollector) { + return IRecurringCollector(address(collector2)); + } + + function _makeRCAForCollector( + MockRecurringCollector collector, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(agreementManager), + dataService: dataService, + serviceProvider: indexer, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + nonce: nonce, + metadata: "" + }); + agreementId = collector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + } + + function _makeRCAForProvider( + address provider, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(agreementManager), + dataService: dataService, + serviceProvider: provider, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + nonce: nonce, + metadata: "" + }); + agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + } + + function _offerForCollector( + MockRecurringCollector collector, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal returns (bytes16) { + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + } + + // -- Tests: Enumeration after offer -- + + function test_Cascade_SingleAgreement_PopulatesSets() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + _offerAgreement(rca); + + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectors()[0], address(recurringCollector)); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getCollectorProviders(address(recurringCollector))[0], indexer); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + } + + function test_Cascade_TwoAgreements_SamePair_CountIncrements() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); + _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(recurringCollector, 2); + _offerAgreement(rca2); + + // Sets still have one entry each, but pair count is 2 + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 2); + } + + function test_Cascade_MultiCollector_BothTracked() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); + _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(collector2, 2); + _offerForCollector(collector2, rca2); + + assertEq(agreementManager.getCollectorCount(), 2); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getCollectorProviderCount(address(collector2)), 1); + } + + function test_Cascade_MultiProvider_BothTracked() public { + address indexer2 = makeAddr("indexer2"); + + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForProvider(indexer, 1); + _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForProvider(indexer2, 2); + _offerAgreement(rca2); + + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 2); + } + + // -- Tests: Cascade on reconciliation -- + + function test_Cascade_ReconcileOneOfTwo_PairStaysTracked() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id1 = _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(recurringCollector, 2); + _offerAgreement(rca2); + + // Reconcile first (SP canceled → deleted) + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + // Pair still tracked + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + } + + function test_Cascade_ReconcileLast_PairStaysWhileEscrowThawing() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + + _setAgreementCanceledBySP(id, rca); + agreementManager.reconcileAgreement(id); + + // Agreement removed, but pair stays tracked while escrow is thawing + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getCollectorCount(), 1, "collector stays tracked during thaw"); + assertEq( + agreementManager.getCollectorProviderCount(address(recurringCollector)), + 1, + "provider stays tracked during thaw" + ); + + // After thaw period, reconcileCollectorProvider reconciles escrow and removes + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.CollectorProviderRemoved(address(recurringCollector), indexer); + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.CollectorRemoved(address(recurringCollector)); + + assertFalse(agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer)); + + assertEq(agreementManager.getCollectorCount(), 0); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + } + + function test_Cascade_ReconcileLastProvider_CollectorCleanedUp_OtherCollectorRemains() public { + // Set up: collector1 with indexer, collector2 with indexer + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id1 = _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(collector2, 2); + _offerForCollector(collector2, rca2); + + // Reconcile collector1's agreement — pair stays tracked during thaw + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + assertEq(agreementManager.getCollectorCount(), 2, "both collectors tracked during thaw"); + assertEq( + agreementManager.getCollectorProviderCount(address(recurringCollector)), + 1, + "provider stays during thaw" + ); + + // After thaw period, reconcileCollectorProvider reconciles escrow and removes + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + + // collector1 cleaned up, collector2 remains + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectors()[0], address(collector2)); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + assertEq(agreementManager.getCollectorProviderCount(address(collector2)), 1); + } + + function test_Cascade_ReconcileProvider_CollectorRetainsOtherProvider() public { + address indexer2 = makeAddr("indexer2"); + + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForProvider(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForProvider(indexer2, 2); + _offerAgreement(rca2); + + // Reconcile indexer's agreement — pair stays tracked during thaw + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + assertEq(agreementManager.getCollectorCount(), 1); + assertEq( + agreementManager.getCollectorProviderCount(address(recurringCollector)), + 2, + "both providers tracked during thaw" + ); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); + + // After thaw period, reconcileCollectorProvider reconciles escrow and removes + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + + // Now only indexer2 remains + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getCollectorProviders(address(recurringCollector))[0], indexer2); + } + + // -- Tests: Re-addition after cleanup -- + + function test_Cascade_ReaddAfterFullCleanup() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + + // Reconcile agreement — pair stays tracked during escrow thaw + _setAgreementCanceledBySP(id, rca); + agreementManager.reconcileAgreement(id); + assertEq(agreementManager.getCollectorCount(), 1, "stays tracked during thaw"); + + // After thaw period, full cleanup via reconcileCollectorProvider + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertEq(agreementManager.getCollectorCount(), 0); + + // Re-add — sets repopulate + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(recurringCollector, 2); + _offerAgreement(rca2); + + assertEq(agreementManager.getCollectorCount(), 1); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + } + + // -- Tests: Revoke also cascades -- + + function test_Cascade_RevokeOffer_DeferredCleanup() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + + assertEq(agreementManager.getCollectorCount(), 1); + + vm.prank(operator); + agreementManager.revokeOffer(id); + + // Agreement gone, but pair stays tracked during escrow thaw + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getCollectorCount(), 1, "stays tracked during thaw"); + + // After thaw period, reconcileCollectorProvider reconciles escrow and removes + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + + assertEq(agreementManager.getCollectorCount(), 0); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + } + + // -- Tests: Permissionless safety valve functions -- + + function test_ReconcileCollectorProvider_ReturnsTrue_WhenAgreementsExist() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + _offerAgreement(rca); + + // Exists: pair has agreements + bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertTrue(exists); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + } + + function test_ReconcileCollectorProvider_ReturnsFalse_WhenNotTracked() public { + // Not exists: pair was never added + bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertFalse(exists); + } + + function test_ReconcileCollectorProvider_ReturnsTrue_WhenEscrowThawing() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + + _setAgreementCanceledBySP(id, rca); + agreementManager.reconcileAgreement(id); + + // Exists: escrow still has pending thaw + bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertTrue(exists); + } + + function test_ReconcileCollectorProvider_ReturnsFalse_AfterThawPeriod() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + + _setAgreementCanceledBySP(id, rca); + agreementManager.reconcileAgreement(id); + + // After thaw period, reconcileCollectorProvider reconciles escrow internally + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertFalse(exists); + } + + function test_ReconcileCollectorProvider_Permissionless() public { + address anyone = makeAddr("anyone"); + vm.prank(anyone); + bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + assertFalse(exists); + } + + // -- Tests: Helper two-phase cleanup -- + + function test_Helper_ReconcilePair_FirstCallStartsThaw_SecondCallCompletes() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + // First call: reconciles agreement (deletes it), starts thaw, but pair stays + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists, "pair stays during thaw"); + + // Second call after thaw period: completes withdrawal and removes pair + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + (removed, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 0, "no agreements left to reconcile"); + assertFalse(pairExists, "pair gone after escrow recovered"); + } + + function test_Helper_ReconcileCollector_TwoPhase() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + // First call: reconciles agreement (deletes it), starts thaw + (uint256 removed, bool collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); + assertEq(removed, 1); + assertTrue(collectorExists, "collector stays during thaw"); + + // Second call after thaw: completes + vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); + (removed, collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); + assertEq(removed, 0); + assertFalse(collectorExists, "collector gone after escrow recovered"); + } + + // -- Tests: Pagination -- + + function test_GetCollectors_Pagination() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); + _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(collector2, 2); + _offerForCollector(collector2, rca2); + + // Full list + address[] memory all = agreementManager.getCollectors(); + assertEq(all.length, 2); + + // Paginated + address[] memory first = agreementManager.getCollectors(0, 1); + assertEq(first.length, 1); + assertEq(first[0], all[0]); + + address[] memory second = agreementManager.getCollectors(1, 1); + assertEq(second.length, 1); + assertEq(second[0], all[1]); + + // Past end + address[] memory empty = agreementManager.getCollectors(2, 1); + assertEq(empty.length, 0); + } + + function test_GetCollectorProviders_Pagination() public { + address indexer2 = makeAddr("indexer2"); + + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForProvider(indexer, 1); + _offerAgreement(rca1); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForProvider(indexer2, 2); + _offerAgreement(rca2); + + // Full list + address[] memory all = agreementManager.getCollectorProviders(address(recurringCollector)); + assertEq(all.length, 2); + + // Paginated + address[] memory first = agreementManager.getCollectorProviders(address(recurringCollector), 0, 1); + assertEq(first.length, 1); + assertEq(first[0], all[0]); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol b/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol new file mode 100644 index 000000000..f492297da --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol @@ -0,0 +1,1261 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; + +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +/// @notice Edge case and boundary condition tests for RecurringAgreementManager. +contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== supportsInterface Fallback ==================== + + function test_SupportsInterface_UnknownInterfaceReturnsFalse() public view { + // Use a random interfaceId that doesn't match any supported interface + // This exercises the super.supportsInterface() fallback (line 100) + assertFalse(agreementManager.supportsInterface(bytes4(0xdeadbeef))); + } + + function test_SupportsInterface_ERC165() public view { + // ERC165 itself (0x01ffc9a7) is supported via super.supportsInterface() + assertTrue(agreementManager.supportsInterface(type(IERC165).interfaceId)); + } + + // ==================== Cancel with Invalid Data Service ==================== + + function test_CancelAgreement_Revert_WhenDataServiceHasNoCode() public { + // Use an EOA as dataService so ds.code.length == 0 (line 255) + address eoa = makeAddr("eoa-data-service"); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.dataService = eoa; + + // Grant DATA_SERVICE_ROLE so the offer goes through + vm.prank(governor); + agreementManager.grantRole(DATA_SERVICE_ROLE, eoa); + + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + + // Set as Accepted so it takes the cancel-via-dataService path + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: eoa, + payer: address(agreementManager), + serviceProvider: indexer, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.InvalidDataService.selector, eoa)); + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + // ==================== Hash Cleanup Tests ==================== + + function test_RevokeOffer_CleansUpAgreementHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + bytes32 rcaHash = recurringCollector.hashRCA(rca); + + // Hash is authorized + assertEq(agreementManager.approveAgreement(rcaHash), IAgreementOwner.approveAgreement.selector); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + // Hash is cleaned up (not just stale — actually deleted) + assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + } + + function test_RevokeOffer_CleansUpPendingUpdateHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + // Update hash is authorized + assertEq(agreementManager.approveAgreement(updateHash), IAgreementOwner.approveAgreement.selector); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + // Both hashes cleaned up + assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + } + + function test_Remove_CleansUpAgreementHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + bytes32 rcaHash = recurringCollector.hashRCA(rca); + + // SP cancels — removable + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + // Hash is cleaned up + assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + } + + function test_Remove_CleansUpPendingUpdateHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + + // SP cancels — removable + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + // Pending update hash also cleaned up + assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + } + + function test_Reconcile_CleansUpAppliedPendingUpdateHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + assertEq(agreementManager.approveAgreement(updateHash), IAgreementOwner.approveAgreement.selector); + + // Simulate: agreement accepted with pending <= updateNonce (update was applied) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 7200, + updateNonce: 1, // (pending <=) + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + agreementManager.reconcileAgreement(agreementId); + + // Pending update hash should be cleaned up after reconcile clears the applied update + assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + } + + function test_OfferUpdate_CleansUpReplacedPendingHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // First pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + bytes32 hash1 = recurringCollector.hashRCAU(rcau1); + assertEq(agreementManager.approveAgreement(hash1), IAgreementOwner.approveAgreement.selector); + + // Second pending update replaces first (same nonce — collector hasn't accepted either) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 180 days), + 1 + ); + _offerAgreementUpdate(rcau2); + + // First update hash should be cleaned up + assertEq(agreementManager.approveAgreement(hash1), bytes4(0)); + + // Second update hash should be authorized + bytes32 hash2 = recurringCollector.hashRCAU(rcau2); + assertEq(agreementManager.approveAgreement(hash2), IAgreementOwner.approveAgreement.selector); + } + + function test_GetAgreementInfo_IncludesHashes() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + bytes32 rcaHash = recurringCollector.hashRCA(rca); + + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.agreementHash, rcaHash); + assertEq(info.pendingUpdateHash, bytes32(0)); + + // Offer an update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.agreementHash, rcaHash); + assertEq(info.pendingUpdateHash, updateHash); + } + + // ==================== Zero-Value Parameter Tests ==================== + + function test_Offer_ZeroMaxInitialTokens() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 0, // zero initial tokens + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // maxNextClaim = 1e18 * 3600 + 0 = 3600e18 + uint256 expectedMaxClaim = 1 ether * 3600; + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); + } + + function test_Offer_ZeroOngoingTokensPerSecond() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 0, // zero ongoing rate + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // maxNextClaim = 0 * 3600 + 100e18 = 100e18 + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 100 ether); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 100 ether); + } + + function test_Offer_AllZeroValues() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 0, // zero initial + 0, // zero ongoing + 0, // zero min seconds + 0, // zero max seconds + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // maxNextClaim = 0 * 0 + 0 = 0 + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + // ==================== Deadline Boundary Tests ==================== + + function test_Remove_AtExactDeadline_NotAccepted() public { + uint64 deadline = uint64(block.timestamp + 1 hours); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + // Override deadline (default from _makeRCA is block.timestamp + 1 hours, same as this) + + bytes16 agreementId = _offerAgreement(rca); + + // Warp to exactly the deadline + vm.warp(deadline); + + // At deadline (block.timestamp == deadline), the condition is `block.timestamp <= info.deadline` + // so this should still be claimable + bool exists = agreementManager.reconcileAgreement(agreementId); + assertTrue(exists); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_Remove_OneSecondAfterDeadline_NotAccepted() public { + uint64 deadline = uint64(block.timestamp + 1 hours); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Warp to one second past deadline + vm.warp(deadline + 1); + + // Now removable (deadline < block.timestamp) + agreementManager.reconcileAgreement(agreementId); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + // ==================== Reconcile Edge Cases ==================== + + function test_Reconcile_WhenCollectionEndEqualsCollectionStart() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint64 now_ = uint64(block.timestamp); + // Set as accepted with lastCollectionAt == endsAt (fully consumed) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: now_, + lastCollectionAt: rca.endsAt, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + agreementManager.reconcileAgreement(agreementId); + + // getMaxNextClaim returns 0 when collectionEnd <= collectionStart + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + // ==================== Cancel Edge Cases ==================== + + function test_CancelAgreement_Revert_WhenDataServiceReverts() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as accepted + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // Configure the mock SubgraphService to revert + mockSubgraphService.setRevert(true, "SubgraphService: cannot cancel"); + + vm.expectRevert("SubgraphService: cannot cancel"); + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + // ==================== Offer With Zero Balance Tests ==================== + + function test_Offer_ZeroTokenBalance_PartialFunding() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + // Don't fund the contract — zero token balance + vm.prank(operator); + bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Agreement is tracked even though escrow couldn't be funded + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + + // Escrow has zero balance + (uint256 escrowBal,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + escrowBal, + 0 + ); + + // Escrow balance is 0 + assertEq(agreementManager.getEscrowAccount(_collector(), indexer).balance, 0); + } + + // ==================== ReconcileBatch Edge Cases ==================== + + function test_ReconcileBatch_InterleavedDuplicateIndexers() public { + // Create agreements for two different indexers, interleaved + address indexer2 = makeAddr("indexer2"); + + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.serviceProvider = indexer2; + rca2.nonce = 2; + + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCA( + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 365 days) + ); + rca3.nonce = 3; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + bytes16 id3 = _offerAgreement(rca3); + + // Accept all, then SP-cancel all + _setAgreementCanceledBySP(id1, rca1); + _setAgreementCanceledBySP(id2, rca2); + _setAgreementCanceledBySP(id3, rca3); + + // Interleaved order: indexer, indexer2, indexer + // The lastFunded optimization won't catch the second indexer occurrence + bytes16[] memory ids = new bytes16[](3); + ids[0] = id1; + ids[1] = id2; + ids[2] = id3; + + // Should succeed without error — _fundEscrow is idempotent + agreementHelper.reconcileBatch(ids); + + // All reconciled to 0 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), 0); + } + + function test_ReconcileBatch_EmptyArray() public { + // Empty batch should succeed with no effect + bytes16[] memory ids = new bytes16[](0); + agreementHelper.reconcileBatch(ids); + } + + function test_ReconcileBatch_NonExistentAgreements() public { + // Batch with non-existent IDs should skip silently + bytes16[] memory ids = new bytes16[](2); + ids[0] = bytes16(keccak256("nonexistent1")); + ids[1] = bytes16(keccak256("nonexistent2")); + + agreementHelper.reconcileBatch(ids); + } + + // ==================== UpdateEscrow Edge Cases ==================== + + function test_UpdateEscrow_FullThawWithdrawCycle() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Remove the agreement + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + // First reconcileCollectorProvider: initiates thaw + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Warp past mock's thawing period (1 day) + vm.warp(block.timestamp + 1 days + 1); + + // Second reconcileCollectorProvider: withdraws thawed tokens, then no more to thaw + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Third reconcileCollectorProvider: should be a no-op (nothing to thaw or withdraw) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + } + + // ==================== Multiple Pending Update Replacements ==================== + + // ==================== Zero-Value Pending Update Hash Cleanup ==================== + + function test_OfferUpdate_ZeroValuePendingUpdate_HashCleanedOnReplace() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Offer a zero-value pending update (both initial and ongoing are 0) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 0, // zero initial + 0, // zero ongoing + 60, + 3600, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + bytes32 zeroHash = recurringCollector.hashRCAU(rcau1); + // Zero-value hash should still be authorized + assertEq(agreementManager.approveAgreement(zeroHash), IAgreementOwner.approveAgreement.selector); + // sumMaxNextClaim should be unchanged (original + 0) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + + // Replace with a non-zero update (same nonce — collector hasn't accepted either) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau2); + + // Old zero-value hash should be cleaned up + assertEq(agreementManager.approveAgreement(zeroHash), bytes4(0)); + + // New hash should be authorized + bytes32 newHash = recurringCollector.hashRCAU(rcau2); + assertEq(agreementManager.approveAgreement(newHash), IAgreementOwner.approveAgreement.selector); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + } + + function test_Reconcile_ZeroValuePendingUpdate_ClearedWhenApplied() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer a zero-value pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 0, + 0, + 60, + 3600, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + bytes32 zeroHash = recurringCollector.hashRCAU(rcau); + assertEq(agreementManager.approveAgreement(zeroHash), IAgreementOwner.approveAgreement.selector); + + // Simulate: agreement accepted with update applied (pending nonce <= updateNonce) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 0, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + updateNonce: 1, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + agreementManager.reconcileAgreement(agreementId); + + // Zero-value pending hash should be cleaned up + assertEq(agreementManager.approveAgreement(zeroHash), bytes4(0)); + + // Pending fields should be cleared + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.pendingUpdateMaxNextClaim, 0); + assertEq(info.pendingUpdateNonce, 0); + assertEq(info.pendingUpdateHash, bytes32(0)); + } + + // ==================== Re-offer After Remove ==================== + + function test_ReofferAfterRemove_FullLifecycle() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + // 1. Offer + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + + // 2. SP cancels and remove + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + + // 3. Re-offer the same agreement (same parameters, same agreementId) + bytes16 reofferedId = _offerAgreement(rca); + assertEq(reofferedId, agreementId); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + + // 4. Verify the re-offered agreement is fully functional + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(reofferedId); + assertTrue(info.provider != address(0)); + assertEq(info.provider, indexer); + assertEq(info.maxNextClaim, maxClaim); + + // Hash is authorized again + bytes32 rcaHash = recurringCollector.hashRCA(rca); + assertEq(agreementManager.approveAgreement(rcaHash), IAgreementOwner.approveAgreement.selector); + } + + function test_ReofferAfterRemove_WithDifferentNonce() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + bytes16 id1 = _offerAgreement(rca1); + + // Remove + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + // Re-offer with different nonce (different agreementId) + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id2 = _offerAgreement(rca2); + assertTrue(id1 != id2); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + // ==================== Input Validation ==================== + + function test_Offer_Revert_ZeroServiceProvider() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = address(0); + + token.mint(address(agreementManager), 1_000_000 ether); + vm.expectRevert(IRecurringAgreementManagement.ServiceProviderZeroAddress.selector); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + function test_Offer_Revert_ZeroDataService() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.dataService = address(0); + + token.mint(address(agreementManager), 1_000_000 ether); + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedDataService.selector, address(0)) + ); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + // ==================== getProviderAgreements ==================== + + function test_GetIndexerAgreements_Empty() public { + bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + assertEq(ids.length, 0); + } + + function test_GetIndexerAgreements_SingleAgreement() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + assertEq(ids.length, 1); + assertEq(ids[0], agreementId); + } + + function test_GetIndexerAgreements_MultipleAgreements() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + assertEq(ids.length, 2); + // EnumerableSet maintains insertion order + assertEq(ids[0], id1); + assertEq(ids[1], id2); + } + + function test_GetIndexerAgreements_AfterRemoval() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + // Remove first agreement + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + assertEq(ids.length, 1); + assertEq(ids[0], id2); + } + + function test_GetIndexerAgreements_CrossIndexerIsolation() public { + address indexer2 = makeAddr("indexer2"); + + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.serviceProvider = indexer2; + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + bytes16[] memory indexer1Ids = agreementManager.getProviderAgreements(indexer); + bytes16[] memory indexer2Ids = agreementManager.getProviderAgreements(indexer2); + + assertEq(indexer1Ids.length, 1); + assertEq(indexer1Ids[0], id1); + assertEq(indexer2Ids.length, 1); + assertEq(indexer2Ids[0], id2); + } + + function test_GetIndexerAgreements_Paginated() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + // Full range returns both + bytes16[] memory all = agreementManager.getProviderAgreements(indexer, 0, 10); + assertEq(all.length, 2); + assertEq(all[0], id1); + assertEq(all[1], id2); + + // Offset skips first + bytes16[] memory fromOne = agreementManager.getProviderAgreements(indexer, 1, 10); + assertEq(fromOne.length, 1); + assertEq(fromOne[0], id2); + + // Count limits result + bytes16[] memory firstOnly = agreementManager.getProviderAgreements(indexer, 0, 1); + assertEq(firstOnly.length, 1); + assertEq(firstOnly[0], id1); + } + + // ==================== Withdraw Timing Boundary (Issue 1) ==================== + + function test_UpdateEscrow_NoWithdrawAtExactThawEnd() public { + // At exactly thawEndTimestamp, PaymentsEscrow does NOT allow withdrawal + // (real contract: `block.timestamp <= thawEnd` returns 0). + // RecurringAgreementManager must not enter the withdraw branch at the boundary. + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // SP cancels — reconcile triggers thaw + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + IPaymentsEscrow.EscrowAccount memory accountBeforeWarp; + (accountBeforeWarp.balance, accountBeforeWarp.tokensThawing, accountBeforeWarp.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(accountBeforeWarp.tokensThawing, maxClaim, "All tokens should be thawing"); + uint256 thawEnd = accountBeforeWarp.thawEndTimestamp; + assertTrue(0 < thawEnd, "Thaw should be active"); + + // Warp to EXACTLY thawEndTimestamp (boundary) + vm.warp(thawEnd); + + // Record logs to verify no EscrowWithdrawn event + vm.recordLogs(); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + Vm.Log[] memory entries = vm.getRecordedLogs(); + bytes32 withdrawSig = keccak256("EscrowWithdrawn(address,address,uint256)"); + for (uint256 i = 0; i < entries.length; i++) { + assertTrue( + entries[i].topics[0] != withdrawSig, + "EscrowWithdrawn must not be emitted at exact thawEndTimestamp" + ); + } + + // Escrow balance should be unchanged (still thawing) + IPaymentsEscrow.EscrowAccount memory accountAfter; + (accountAfter.balance, accountAfter.tokensThawing, accountAfter.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(accountAfter.balance, maxClaim, "Balance unchanged at boundary"); + assertEq(accountAfter.tokensThawing, maxClaim, "Still thawing at boundary"); + } + + function test_UpdateEscrow_WithdrawsOneSecondAfterThawEnd() public { + // One second past thawEndTimestamp, withdrawal should succeed. + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + (,, uint256 thawEnd) = paymentsEscrow + .escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + + // Warp to thawEndTimestamp + 1 + vm.warp(thawEnd + 1); + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Escrow should be empty + (uint256 finalBalance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + finalBalance, + 0 + ); + } + + // ==================== BeforeCollection Boundary (Issue 2) ==================== + + function test_BeforeCollection_NoOpWhenTokensToCollectEqualsBalance() public { + // When tokensToCollect == escrow balance, beforeCollection should be a no-op. + // Bug: current code uses strict '<', falling through when equal. + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + (uint256 escrowBalance,,) = paymentsEscrow + .escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertTrue(0 < escrowBalance, "Escrow should be funded"); + + // Drain manager's free token balance + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + assertEq(token.balanceOf(address(agreementManager)), 0, "Manager has no free tokens"); + + // Request exactly the escrow balance — no deficit exists + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, escrowBalance); + + // tempJit must NOT be set — there is no deficit + assertFalse(agreementManager.isTempJit(), "No tempJit when escrow exactly covers collection"); + } + + // ==================== Cancel Event Behavior ==================== + + function test_CancelAgreement_NoEvent_WhenAlreadyCanceled() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as already CanceledByServiceProvider + _setAgreementCanceledBySP(agreementId, rca); + + // Record logs to verify no AgreementCanceled event + vm.recordLogs(); + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + + // Check that no AgreementCanceled event was emitted + Vm.Log[] memory entries = vm.getRecordedLogs(); + bytes32 cancelEventSig = keccak256("AgreementCanceled(bytes16,address)"); + for (uint256 i = 0; i < entries.length; i++) { + assertTrue( + entries[i].topics[0] != cancelEventSig, + "AgreementCanceled should not be emitted on idempotent path" + ); + } + } + + function test_CancelAgreement_EmitsEvent_WhenAccepted() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + + vm.prank(operator); + agreementManager.cancelAgreement(agreementId); + } + + // ==================== Multiple Pending Update Replacements ==================== + + function test_OfferUpdate_ThreeConsecutiveReplacements() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Update 1 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + uint256 pending1 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending1); + + // Update 2 replaces 1 (same nonce — collector hasn't accepted either) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 180 days), + 1 + ); + _offerAgreementUpdate(rcau2); + uint256 pending2 = 0.5 ether * 1800 + 50 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending2); + + // Update 3 replaces 2 (same nonce — collector still hasn't accepted) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau3 = _makeRCAU( + agreementId, + 300 ether, + 3 ether, + 60, + 3600, + uint64(block.timestamp + 1095 days), + 1 + ); + _offerAgreementUpdate(rcau3); + uint256 pending3 = 3 ether * 3600 + 300 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending3); + + // Only hash for update 3 should be authorized + bytes32 hash1 = recurringCollector.hashRCAU(rcau1); + bytes32 hash2 = recurringCollector.hashRCAU(rcau2); + bytes32 hash3 = recurringCollector.hashRCAU(rcau3); + + assertEq(agreementManager.approveAgreement(hash1), bytes4(0)); + assertEq(agreementManager.approveAgreement(hash2), bytes4(0)); + assertEq(agreementManager.approveAgreement(hash3), IAgreementOwner.approveAgreement.selector); + } + + // ==================== setTempJit No-Op ==================== + + function test_SetTempJit_NoopWhenAlreadyFalse() public { + // Default tempJit is false; setting false again should early-return with no event + vm.recordLogs(); + vm.prank(operator); + agreementManager.setTempJit(false); + + Vm.Log[] memory logs = vm.getRecordedLogs(); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue( + logs[i].topics[0] != IRecurringEscrowManagement.TempJitSet.selector, + "TempJitSet should not be emitted" + ); + } + assertFalse(agreementManager.isTempJit()); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/eligibility.t.sol b/packages/issuance/test/unit/agreement-manager/eligibility.t.sol new file mode 100644 index 000000000..ffc2f6fb5 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/eligibility.t.sol @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockEligibilityOracle } from "./mocks/MockEligibilityOracle.sol"; + +/// @notice Tests for payment eligibility oracle in RecurringAgreementManager +contract RecurringAgreementManagerEligibilityTest is RecurringAgreementManagerSharedTest { + MockEligibilityOracle internal oracle; + IProviderEligibility internal constant NO_ORACLE = IProviderEligibility(address(0)); + + function setUp() public override { + super.setUp(); + oracle = new MockEligibilityOracle(); + vm.label(address(oracle), "EligibilityOracle"); + } + + /* solhint-disable graph/func-name-mixedcase */ + + // -- setProviderEligibilityOracle tests -- + + function test_SetPaymentEligibilityOracle() public { + vm.expectEmit(address(agreementManager)); + emit IProviderEligibilityManagement.ProviderEligibilityOracleSet(NO_ORACLE, oracle); + + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + } + + function test_SetPaymentEligibilityOracle_DisableWithZeroAddress() public { + // First set an oracle + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + + // Then disable it + vm.expectEmit(address(agreementManager)); + emit IProviderEligibilityManagement.ProviderEligibilityOracleSet(oracle, NO_ORACLE); + + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(NO_ORACLE); + } + + function test_SetPaymentEligibilityOracle_NoopWhenSameOracle() public { + // Set oracle + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + + // Set same oracle again — early return, no event + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + + // Oracle still works (confirms state unchanged) + oracle.setEligible(indexer, true); + assertTrue(agreementManager.isEligible(indexer)); + } + + function test_SetPaymentEligibilityOracle_Revert_WhenNotGovernor() public { + vm.expectRevert(); + vm.prank(operator); + agreementManager.setProviderEligibilityOracle(oracle); + } + + function test_GetProviderEligibilityOracle_ReturnsZeroByDefault() public view { + assertEq(address(agreementManager.getProviderEligibilityOracle()), address(0)); + } + + function test_GetProviderEligibilityOracle_ReturnsSetOracle() public { + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + assertEq(address(agreementManager.getProviderEligibilityOracle()), address(oracle)); + } + + // -- isEligible passthrough tests -- + + function test_IsEligible_TrueWhenNoOracle() public view { + // No oracle set — all providers are eligible + assertTrue(agreementManager.isEligible(indexer)); + } + + function test_IsEligible_DelegatesToOracle_WhenEligible() public { + oracle.setEligible(indexer, true); + + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + + assertTrue(agreementManager.isEligible(indexer)); + } + + function test_IsEligible_DelegatesToOracle_WhenNotEligible() public { + // indexer not set as eligible, default is false + + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + + assertFalse(agreementManager.isEligible(indexer)); + } + + function test_IsEligible_TrueAfterOracleDisabled() public { + // Set oracle that denies indexer + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(oracle); + assertFalse(agreementManager.isEligible(indexer)); + + // Disable oracle + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(NO_ORACLE); + assertTrue(agreementManager.isEligible(indexer)); + } + + // -- ERC165 tests -- + + function test_SupportsInterface_IProviderEligibility() public view { + assertTrue(agreementManager.supportsInterface(type(IProviderEligibility).interfaceId)); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol b/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol new file mode 100644 index 000000000..960825dc6 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol @@ -0,0 +1,1544 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + address internal indexer2; + + function setUp() public virtual override { + super.setUp(); + indexer2 = makeAddr("indexer2"); + } + + // -- Helper -- + + function _makeRCAForIndexer( + address sp, + uint256 maxInitial, + uint256 maxOngoing, + uint32 maxSec, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = sp; + rca.nonce = nonce; + return rca; + } + + // ==================== setEscrowBasis ==================== + + function test_SetEscrowBasis_DefaultIsFull() public view { + assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + } + + function test_SetEscrowBasis_OperatorCanSet() public { + vm.prank(operator); + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.EscrowBasisSet( + IRecurringEscrowManagement.EscrowBasis.Full, + IRecurringEscrowManagement.EscrowBasis.OnDemand + ); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand)); + } + + function test_SetEscrowBasis_Revert_WhenNotOperator() public { + vm.prank(governor); + vm.expectRevert(); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + } + + // ==================== Global Tracking ==================== + + function test_GlobalTracking_TotalRequired() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + _offerAgreement(rca1); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim1); + assertEq(agreementManager.getTotalAgreementCount(), 1); + + _offerAgreement(rca2); + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim1 + maxClaim2); + assertEq(agreementManager.getTotalAgreementCount(), 2); + } + + function test_GlobalTracking_TotalUndeposited() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + + // In Full mode, escrow is fully deposited — totalEscrowDeficit should be 0 + assertEq(agreementManager.getTotalEscrowDeficit(), 0, "Fully escrowed: totalEscrowDeficit = 0"); + } + + function test_GlobalTracking_TotalUndeposited_WhenPartiallyFunded() public { + // Offer in JIT mode (no deposits) — totalEscrowDeficit = sumMaxNextClaim + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + assertEq(agreementManager.getTotalEscrowDeficit(), maxClaim, "JIT: totalEscrowDeficit = sumMaxNextClaim"); + } + + function test_GlobalTracking_RevokeDecrementsCountAndRequired() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim); + assertEq(agreementManager.getTotalAgreementCount(), 1); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + assertEq(agreementManager.getSumMaxNextClaimAll(), 0); + assertEq(agreementManager.getTotalAgreementCount(), 0); + } + + function test_GlobalTracking_RemoveDecrementsCountAndRequired() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + assertEq(agreementManager.getTotalAgreementCount(), 1); + + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + assertEq(agreementManager.getSumMaxNextClaimAll(), 0); + assertEq(agreementManager.getTotalAgreementCount(), 0); + } + + function test_GlobalTracking_ReconcileUpdatesRequired() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim); + + // SP cancels — reconcile sets maxNextClaim to 0 + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + assertEq(agreementManager.getSumMaxNextClaimAll(), 0); + // Reconcile now deletes settled agreements inline + assertEq(agreementManager.getTotalAgreementCount(), 0); + } + + function test_GlobalTracking_TotalUndeposited_MultiProvider() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + _offerAgreement(rca1); + _offerAgreement(rca2); + + // In Full mode, both are fully deposited — totalEscrowDeficit should be 0 + assertEq(agreementManager.getTotalEscrowDeficit(), 0, "Both deposited: totalEscrowDeficit = 0"); + } + + function test_GlobalTracking_TotalUndeposited_OverdepositedProviderDoesNotMaskDeficit() public { + // Regression test: over-deposited provider must NOT mask another provider's deficit. + // Offer rca1 for indexer (gets fully deposited) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca1); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + + // Drain SAM so indexer2's agreement can't be deposited + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + // Offer rca2 for indexer2 (can't be deposited) + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // indexer is fully deposited (undeposited = 0), indexer2 has full deficit (undeposited = maxClaim2) + // totalEscrowDeficit must be maxClaim2, NOT 0 (the old buggy sumMaxNextClaim - totalInEscrow approach + // would compute sumMaxNextClaim = maxClaim1 + maxClaim2, totalInEscrow = maxClaim1, + // deficit = maxClaim2 — which happens to be correct here, but would be wrong if indexer + // were over-deposited and the excess masked indexer2's deficit) + assertEq(agreementManager.getTotalEscrowDeficit(), maxClaim2, "Undeposited = indexer2's full deficit"); + + // Verify per-provider escrow state + assertEq( + paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), + maxClaim1, + "indexer: fully deposited" + ); + assertEq( + paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer2), + 0, + "indexer2: undeposited" + ); + } + + // ==================== Full Mode (default — existing behavior) ==================== + + function test_FullMode_DepositsFullRequired() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + assertEq(paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), maxClaim); + } + + function test_FullMode_ThawsExcess() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + + // SP cancels, remove (triggers thaw of all excess) + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance - account.tokensThawing, 0, "Full mode: all excess should be thawing"); + } + + // ==================== JustInTime Mode ==================== + + function test_JustInTime_ThawsEverything() public { + // Start in Full mode, offer agreement (gets deposited) + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Switch to JustInTime + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + // Update escrow — should thaw everything + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, maxClaim, "JustInTime: all balance should be thawing"); + } + + function test_JustInTime_NoProactiveDeposit() public { + // Switch to JustInTime before offering + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + + // No deposit should have been made + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance, 0, "JustInTime: no proactive deposit"); + } + + function test_JustInTime_JITStillWorks() public { + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Escrow is 0, but beforeCollection should top up + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 500 ether); + + (uint256 newBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(newBalance, 500 ether, "JustInTime: JIT should deposit requested amount"); + } + + // ==================== OnDemand Mode ==================== + + function test_OnDemand_NoProactiveDeposit() public { + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + + // No deposit — same as JustInTime for deposits + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance, 0, "OnDemand: no proactive deposit"); + } + + function test_OnDemand_HoldsAtRequiredLevel() public { + // Fund with Full mode first, then switch to OnDemand + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // OnDemand thaw ceiling = required — no thaw expected (balance == thawCeiling) + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, 0, "OnDemand: no thaw (balance == required == thawCeiling)"); + assertEq(account.balance, maxClaim, "OnDemand: balance held at required level"); + } + + function test_OnDemand_PreservesThawFromJIT() public { + // Fund 6 agreements at Full level, then switch JIT -> OnDemand + for (uint256 i = 1; i <= 6; i++) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + i + ); + _offerAgreement(rca); + } + + uint256 maxClaimEach = 1 ether * 3600 + 100 ether; + uint256 sumMaxNextClaim = maxClaimEach * 6; + + // JustInTime would thaw everything + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory jitAccount; + (jitAccount.balance, jitAccount.tokensThawing, jitAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(jitAccount.tokensThawing, sumMaxNextClaim, "JustInTime: thaws everything"); + + // Switch to OnDemand — min=0, min <= liquid=0, so thaw is left alone + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory odAccount; + (odAccount.balance, odAccount.tokensThawing, odAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + // OnDemand: min=0, min(0) <= liquid(0) — existing thaw preserved, no unnecessary cancellation + assertEq(odAccount.tokensThawing, jitAccount.tokensThawing, "OnDemand preserves thaw when min <= liquid"); + } + + function test_OnDemand_JITStillWorks() public { + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // No deposit, but JIT works + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 500 ether); + + (uint256 newBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(newBalance, 500 ether, "OnDemand: JIT should work"); + } + + // ==================== Degradation: Full -> OnDemand ==================== + + function test_Degradation_FullToOnDemand_WhenInsufficientBalance() public { + // Offer agreement for indexer1 that consumes most available funds + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca1); + + // Offer 6 agreements for indexer2, each with large maxClaim + // SAM won't have enough for all of them at Full level + for (uint256 i = 1; i <= 6; i++) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer2, + 100_000 ether, + 100 ether, + 7200, + i + 10 + ); + token.mint(address(agreementManager), 100_000 ether); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + // sumMaxNextClaim should be larger than totalEscrowDeficit (degradation occurred: Full -> OnDemand) + assertTrue(0 < agreementManager.getTotalEscrowDeficit(), "Degradation: some undeposited deficit exists"); + } + + function test_Degradation_NeverReachesJustInTime() public { + // Even with severe underfunding, degradation stops at OnDemand (thaw ceiling = required) + // and never reaches JustInTime (thaw ceiling = 0) + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Balance should still be at maxClaim (thaw ceiling = required) + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance, maxClaim, "Balance preserved - degradation doesn't go to JustInTime"); + assertEq(account.tokensThawing, 0, "No thaw - not at JustInTime"); + } + + // ==================== Mode Switch Doesn't Break State ==================== + + function test_ModeSwitch_PreservesAgreements() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Switch through all modes — agreement data preserved + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_ModeSwitch_UpdateEscrowAppliesNewMode() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + assertEq(paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), maxClaim); + + // Switch to JustInTime and update escrow + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, maxClaim, "JustInTime should thaw all"); + } + + // ==================== JIT (beforeCollection) Works in All Modes ==================== + + function test_JIT_WorksInFullMode() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + token.mint(address(agreementManager), 10000 ether); + + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + uint256 tokensToCollect = escrowBalance + 500 ether; + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, tokensToCollect); + + (uint256 newBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(newBalance, tokensToCollect, "JIT top-up should cover collection in Full mode"); + } + + // ==================== afterCollection Reconciles in All Modes ==================== + + function test_AfterCollection_ReconcileInOnDemandMode() public { + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + uint64 acceptedAt = uint64(block.timestamp); + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(agreementId, rca, acceptedAt, lastCollectionAt); + vm.warp(lastCollectionAt); + + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(agreementId, 500 ether); + + uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + assertEq(newMaxClaim, 1 ether * 3600, "maxNextClaim = ongoing only after first collection"); + } + + // ==================== PendingUpdate with sumMaxNextClaim tracking ==================== + + function test_GlobalTracking_PendingUpdate() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim); + } + + function test_GlobalTracking_ReplacePendingUpdate() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + uint256 pendingMaxClaim1 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim1); + + // Replace with different terms (same nonce — collector hasn't accepted either) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 180 days), + 1 + ); + _offerAgreementUpdate(rcau2); + + uint256 pendingMaxClaim2 = 0.5 ether * 1800 + 50 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim2); + } + + // ==================== Upward Transitions ==================== + + function test_Transition_JustInTimeToFull() public { + // Start in JIT (no deposits), switch to Full (deposits required) + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Verify no deposit in JIT mode + assertEq( + paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), + 0, + "JIT: no deposit" + ); + + // Switch to Full + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + assertEq( + paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), + maxClaim, + "Full: deposits required" + ); + } + + function test_Transition_OnDemandToFull() public { + // Fund at Full, switch to OnDemand (holds at required), switch back to Full + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Switch to OnDemand — holds at required (no thaw for 1 agreement) + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory odAccount; + (odAccount.balance, odAccount.tokensThawing, odAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(odAccount.balance, maxClaim, "OnDemand: balance held at required"); + + // Switch back to Full — no change needed (already at required) + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory fullAccount; + (fullAccount.balance, fullAccount.tokensThawing, fullAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(fullAccount.balance, maxClaim, "Full: at required"); + } + + // ==================== Thaw-In-Progress Transitions ==================== + + function test_Transition_FullToJustInTime_WhileThawActive() public { + // Create agreements, cancel one to start a thaw, then switch to JIT + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaimEach = 1 ether * 3600 + 100 ether; + + // Cancel and remove rca1 — this triggers a thaw for excess + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + IPaymentsEscrow.EscrowAccount memory beforeSwitch; + (beforeSwitch.balance, beforeSwitch.tokensThawing, beforeSwitch.thawEndTimestamp) = paymentsEscrow + .escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertTrue(0 < beforeSwitch.tokensThawing, "Thaw in progress before switch"); + assertEq(beforeSwitch.tokensThawing, maxClaimEach, "Thawing excess from removed agreement"); + + // Switch to JustInTime while thaw is active — existing thaw continues, + // remaining balance thaws after current thaw completes and is withdrawn + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory midCycle; + (midCycle.balance, midCycle.tokensThawing, midCycle.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + // Same-block increase is fine (no timer reset) — thaws everything + assertEq(midCycle.tokensThawing, 2 * maxClaimEach, "Same-block: thaw increased to full balance"); + + // Complete thaw, withdraw all + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory afterWithdraw; + (afterWithdraw.balance, afterWithdraw.tokensThawing, afterWithdraw.thawEndTimestamp) = paymentsEscrow + .escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + // Everything withdrawn in one cycle + assertEq(afterWithdraw.balance, 0, "JIT: all withdrawn"); + assertEq(afterWithdraw.tokensThawing, 0, "JIT: nothing left to thaw"); + } + + // ==================== Temp JIT ==================== + + function test_TempJit_TripsOnPartialBeforeCollection() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain SAM's token balance so beforeCollection can't fully fund + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + // Request collection exceeding escrow balance + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(true, true); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + + // Verify state + assertTrue(agreementManager.isTempJit(), "Temp JIT should be tripped"); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis unchanged (temp JIT overrides behavior, not escrowBasis)" + ); + } + + function test_BeforeCollection_TripsWhenAvailableEqualsDeficit() public { + // Boundary: available == deficit — strict '<' means trip, not deposit + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Set manager balance to exactly the escrow shortfall + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + uint256 tokensToCollect = escrowBalance + 500 ether; + uint256 deficit = tokensToCollect - escrowBalance; // 500 ether + + // Drain SAM then mint exactly the deficit + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + token.mint(address(agreementManager), deficit); + assertEq(token.balanceOf(address(agreementManager)), deficit, "Balance == deficit"); + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(true, true); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, tokensToCollect); + + assertTrue(agreementManager.isTempJit(), "Trips when available == deficit"); + } + + function test_BeforeCollection_DepositsWhenAvailableExceedsDeficit() public { + // Boundary: available == deficit + 1 — deposits instead of tripping + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + uint256 tokensToCollect = escrowBalance + 500 ether; + uint256 deficit = tokensToCollect - escrowBalance; // 500 ether + + // Drain SAM then mint deficit + 1 + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + token.mint(address(agreementManager), deficit + 1); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, tokensToCollect); + + assertFalse(agreementManager.isTempJit(), "No trip when deficit < available"); + (uint256 newEscrow, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(newEscrow, tokensToCollect, "Escrow topped up to tokensToCollect"); + } + + function test_TempJit_PreservesBasisOnTrip() public { + // Set OnDemand, trip — escrowBasis should NOT change + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain SAM + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(true, true); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + + // Basis stays OnDemand (not switched to JIT) + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), + "Basis unchanged during trip" + ); + assertTrue(agreementManager.isTempJit()); + } + + function test_TempJit_DoesNotTripWhenFullyCovered() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Ensure SAM has plenty of tokens + token.mint(address(agreementManager), 1_000_000 ether); + + // Request less than escrow balance — no trip + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, maxClaim); + + assertFalse(agreementManager.isTempJit(), "No trip when fully covered"); + } + + function test_TempJit_DoesNotTripWhenAlreadyActive() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain SAM + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + // First trip + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Second partial collection — should NOT emit event again + vm.recordLogs(); + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + + // Check no TempJitSet event was emitted + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 tripSig = keccak256("TempJitSet(bool,bool)"); + bool found = false; + for (uint256 i = 0; i < logs.length; i++) { + if (logs[i].topics[0] == tripSig) found = true; + } + assertFalse(found, "No second trip event"); + } + + function test_TempJit_TripsEvenWhenAlreadyJustInTime() public { + // Governor explicitly sets JIT + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain SAM so beforeCollection can't cover + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + + assertTrue(agreementManager.isTempJit(), "Trips even in JIT mode"); + } + + function test_TempJit_JitStillWorksWhileActive() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain SAM to trip the breaker + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Now fund SAM and do a JIT top-up while temp JIT is active + token.mint(address(agreementManager), 500 ether); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 500 ether); + + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertTrue(maxClaim <= escrowBalance, "JIT still works during temp JIT"); + } + + function test_TempJit_RecoveryOnUpdateEscrow() public { + // Offer rca1 (fully deposited), drain SAM, offer rca2 (creates undeposited deficit) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca1); + + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + + // Trip temp JIT + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Mint more than totalEscrowDeficit — recovery requires strict deficit < available + uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); + assertTrue(0 < totalEscrowDeficit, "Deficit exists"); + token.mint(address(agreementManager), totalEscrowDeficit + 1); + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(false, true); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + assertFalse(agreementManager.isTempJit(), "Temp JIT recovered"); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis still Full" + ); + } + + function test_TempJit_NoRecoveryWhenPartiallyFunded() public { + // Offer rca1 (fully deposited), drain, offer rca2 (undeposited — creates deficit) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca1); + + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + + // Trip + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); + assertTrue(0 < totalEscrowDeficit, "0 < totalEscrowDeficit"); + + // Mint less than totalEscrowDeficit — no recovery + token.mint(address(agreementManager), totalEscrowDeficit / 2); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + assertTrue(agreementManager.isTempJit(), "Still tripped (insufficient balance)"); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis unchanged" + ); + } + + function test_TempJit_NoRecoveryWhenExactlyFunded() public { + // Boundary: available == totalEscrowDeficit — strict '<' means no recovery + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca1); + + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + + // Trip + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Mint exactly totalEscrowDeficit — recovery requires strict deficit < available + uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); + assertTrue(0 < totalEscrowDeficit, "Deficit exists"); + token.mint(address(agreementManager), totalEscrowDeficit); + assertEq(token.balanceOf(address(agreementManager)), totalEscrowDeficit, "Balance == deficit"); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + assertTrue(agreementManager.isTempJit(), "Still tripped (available == deficit, not >)"); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis unchanged" + ); + } + + function test_TempJit_EscrowBasisPreservedDuringTrip() public { + // Set OnDemand, trip, recover — escrowBasis stays OnDemand throughout + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain and trip + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), + "Basis preserved during trip" + ); + + // Recovery — mint more than deficit (recovery requires strict deficit < available) + token.mint(address(agreementManager), agreementManager.getSumMaxNextClaimAll() + 1); + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(false, true); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + assertFalse(agreementManager.isTempJit()); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), + "Basis still OnDemand after recovery" + ); + } + + function test_TempJit_SetTempJitClearsBreaker() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain and trip + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Operator clears tempJit directly + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.TempJitSet(false, false); + + vm.prank(operator); + agreementManager.setTempJit(false); + + assertFalse(agreementManager.isTempJit(), "Operator cleared breaker"); + } + + function test_TempJit_SetEscrowBasisDoesNotClearBreaker() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Drain and trip + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Operator changes basis — tempJit stays active + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + assertTrue(agreementManager.isTempJit(), "setEscrowBasis does not clear tempJit"); + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), + "Basis changed independently" + ); + } + + function test_TempJit_MultipleTripRecoverCycles() public { + // Offer rca1 (deposited), drain SAM, offer rca2 (undeposited — creates deficit) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca1); + + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + + uint256 undeposited = agreementManager.getTotalEscrowDeficit(); + assertTrue(0 < undeposited, "Has undeposited deficit"); + + // --- Cycle 1: Trip --- + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // --- Cycle 1: Recover (mint more than deficit — recovery requires strict deficit < available) --- + token.mint(address(agreementManager), undeposited + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + assertFalse(agreementManager.isTempJit()); + assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + + // After recovery, reconcileCollectorProvider deposited into escrow. Drain again and create new deficit. + samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 3 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca3, _collector()); + + undeposited = agreementManager.getTotalEscrowDeficit(); + assertTrue(0 < undeposited, "New undeposited deficit"); + + // --- Cycle 2: Trip --- + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // --- Cycle 2: Recover (mint more than deficit) --- + token.mint(address(agreementManager), undeposited + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + assertFalse(agreementManager.isTempJit()); + assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + } + + function test_TempJit_MultiProvider() public { + // Offer rca1 (deposited), drain SAM, offer rca2 (creates deficit → 0 < totalEscrowDeficit) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 id1 = _offerAgreement(rca1); + + // Drain SAM so rca2 can't be deposited + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + + // Offer rca2 directly (no mint) — escrow stays undeposited, creates deficit + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 100 ether, + 1 ether, + 3600, + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, _collector()); + assertTrue(0 < agreementManager.getTotalEscrowDeficit(), "should have undeposited escrow"); + + // Trip via indexer's agreement + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(id1, 1_000_000 ether); + assertTrue(agreementManager.isTempJit()); + + // Both providers should see JIT behavior (thaw everything) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + + IPaymentsEscrow.EscrowAccount memory acc1; + (acc1.balance, acc1.tokensThawing, acc1.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + IPaymentsEscrow.EscrowAccount memory acc2; + (acc2.balance, acc2.tokensThawing, acc2.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer2 + ); + + // Both providers should be thawing (JIT mode via temp JIT) + assertEq(acc1.tokensThawing, acc1.balance, "indexer: JIT thaws all"); + assertEq(acc2.tokensThawing, acc2.balance, "indexer2: JIT thaws all"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/fuzz.t.sol b/packages/issuance/test/unit/agreement-manager/fuzz.t.sol new file mode 100644 index 000000000..7825282fc --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/fuzz.t.sol @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // -- offerAgreement -- + + function testFuzz_Offer_MaxNextClaimCalculation( + uint128 maxInitialTokens, + uint128 maxOngoingTokensPerSecond, + uint32 maxSecondsPerCollection + ) public { + // Bound to avoid overflow: uint128 * uint32 fits in uint256 + vm.assume(0 < maxSecondsPerCollection); + + uint64 endsAt = uint64(block.timestamp + 365 days); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitialTokens, + maxOngoingTokensPerSecond, + 60, + maxSecondsPerCollection, + endsAt + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint256 expectedMaxClaim = uint256(maxOngoingTokensPerSecond) * uint256(maxSecondsPerCollection) + + uint256(maxInitialTokens); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); + } + + function testFuzz_Offer_EscrowFundedUpToAvailable( + uint128 maxInitialTokens, + uint128 maxOngoingTokensPerSecond, + uint32 maxSecondsPerCollection, + uint256 availableTokens + ) public { + vm.assume(0 < maxSecondsPerCollection); + availableTokens = bound(availableTokens, 0, 10_000_000 ether); + + uint64 endsAt = uint64(block.timestamp + 365 days); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitialTokens, + maxOngoingTokensPerSecond, + 60, + maxSecondsPerCollection, + endsAt + ); + + // Fund with a specific amount instead of the default 1M ether + token.mint(address(agreementManager), availableTokens); + vm.prank(operator); + bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + + uint256 maxNextClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + (uint256 escrowBalance,,) = paymentsEscrow + .escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + + // In Full mode (default): + // If totalEscrowDeficit < available: Full deposits required (there is buffer). + // Otherwise (available <= totalEscrowDeficit): degrades to OnDemand (no buffer, deposit target = 0). + // JIT beforeCollection is the safety net for underfunded escrow. + if (maxNextClaim < availableTokens) { + assertEq(escrowBalance, maxNextClaim); + } else { + // Degraded to OnDemand: no deposit (no buffer or insufficient) + assertEq(escrowBalance, 0); + } + } + + function testFuzz_Offer_RequiredEscrowIncrements( + uint64 maxInitial1, + uint64 maxOngoing1, + uint32 maxSec1, + uint64 maxInitial2, + uint64 maxOngoing2, + uint32 maxSec2 + ) public { + vm.assume(0 < maxSec1 && 0 < maxSec2); + + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + maxInitial1, + maxOngoing1, + 60, + maxSec1, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + maxInitial2, + maxOngoing2, + 60, + maxSec2, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + _offerAgreement(rca1); + uint256 required1 = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + _offerAgreement(rca2); + uint256 required2 = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + uint256 maxClaim1 = uint256(maxOngoing1) * uint256(maxSec1) + uint256(maxInitial1); + uint256 maxClaim2 = uint256(maxOngoing2) * uint256(maxSec2) + uint256(maxInitial2); + + assertEq(required1, maxClaim1); + assertEq(required2, maxClaim1 + maxClaim2); + } + + // -- revokeOffer / reconcileAgreement -- + + function testFuzz_RevokeOffer_RequiredEscrowDecrements(uint64 maxInitial, uint64 maxOngoing, uint32 maxSec) public { + vm.assume(0 < maxSec); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 requiredBefore = agreementManager.getSumMaxNextClaim(_collector(), indexer); + assertTrue(0 < requiredBefore || (maxInitial == 0 && maxOngoing == 0)); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function testFuzz_Remove_AfterSPCancel_ClearsState(uint64 maxInitial, uint64 maxOngoing, uint32 maxSec) public { + vm.assume(0 < maxSec); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + _setAgreementCanceledBySP(agreementId, rca); + + agreementManager.reconcileAgreement(agreementId); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + } + + // -- reconcile -- + + function testFuzz_Reconcile_AfterCollection_UpdatesRequired( + uint64 maxInitial, + uint64 maxOngoing, + uint32 maxSec, + uint32 timeElapsed + ) public { + vm.assume(0 < maxSec); + vm.assume(0 < maxOngoing); + timeElapsed = uint32(bound(timeElapsed, 1, maxSec)); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 preAcceptRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // Simulate acceptance and a collection at block.timestamp + timeElapsed + uint64 acceptedAt = uint64(block.timestamp); + uint64 collectionAt = uint64(block.timestamp + timeElapsed); + _setAgreementCollected(agreementId, rca, acceptedAt, collectionAt); + + // Warp to collection time + vm.warp(collectionAt); + + agreementManager.reconcileAgreement(agreementId); + + uint256 postReconcileRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // After collection, the maxNextClaim should reflect remaining window (no initial tokens) + // and should be <= the pre-acceptance estimate + assertTrue(postReconcileRequired <= preAcceptRequired); + } + + // -- offerAgreementUpdate -- + + function testFuzz_OfferUpdate_DoubleFunding( + uint64 maxInitial, + uint64 maxOngoing, + uint32 maxSec, + uint64 updateMaxInitial, + uint64 updateMaxOngoing, + uint32 updateMaxSec + ) public { + vm.assume(0 < maxSec && 0 < updateMaxSec); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint256 originalMaxClaim = uint256(maxOngoing) * uint256(maxSec) + uint256(maxInitial); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + updateMaxInitial, + updateMaxOngoing, + 60, + updateMaxSec, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = uint256(updateMaxOngoing) * uint256(updateMaxSec) + uint256(updateMaxInitial); + + // Both original and pending are funded simultaneously + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + } + + // -- reconcileAgreement deadline -- + + function testFuzz_Remove_ExpiredOffer_DeadlineBoundary(uint32 extraTime) public { + extraTime = uint32(bound(extraTime, 1, 365 days)); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Before deadline: should return true (still claimable) + bool exists = agreementManager.reconcileAgreement(agreementId); + assertTrue(exists); + + // Warp past deadline + vm.warp(rca.deadline + extraTime); + + // After deadline: should succeed + agreementManager.reconcileAgreement(agreementId); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + // -- getEscrowAccount -- + + function testFuzz_GetEscrowAccount_MatchesUnderlying(uint128 maxOngoing, uint32 maxSec, uint128 available) public { + vm.assume(0 < maxSec); + vm.assume(0 < maxOngoing); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 0, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + + token.mint(address(agreementManager), available); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + + IPaymentsEscrow.EscrowAccount memory expected; + (expected.balance, expected.tokensThawing, expected.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + IPaymentsEscrow.EscrowAccount memory actual = agreementManager.getEscrowAccount(_collector(), indexer); + + assertEq(actual.balance, expected.balance); + assertEq(actual.tokensThawing, expected.tokensThawing); + assertEq(actual.thawEndTimestamp, expected.thawEndTimestamp); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/helper.t.sol b/packages/issuance/test/unit/agreement-manager/helper.t.sol new file mode 100644 index 000000000..29f83ec55 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/helper.t.sol @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementHelper } from "../../../contracts/agreement/RecurringAgreementHelper.sol"; +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // -- Constructor tests -- + + function test_Constructor_SetsManager() public view { + assertEq(address(agreementHelper.MANAGER()), address(agreementManager)); + } + + function test_Constructor_Revert_ZeroAddress() public { + vm.expectRevert(RecurringAgreementHelper.ZeroAddress.selector); + new RecurringAgreementHelper(address(0), token); + } + + // -- reconcile(provider) tests -- + + function test_Reconcile_AllAgreementsForIndexer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + // Cancel agreement 1 by SP + _setAgreementCanceledBySP(id1, rca1); + + // Accept agreement 2 (collected once) + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(id2, rca2, uint64(block.timestamp), lastCollectionAt); + vm.warp(lastCollectionAt); + + // Fund for reconcile + token.mint(address(agreementManager), 1_000_000 ether); + + agreementHelper.reconcile(indexer); + + // Agreement 1: CanceledBySP -> maxClaim = 0 + assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); + // Agreement 2: collected, remaining window large, capped at maxSecondsPerCollection = 7200 + // maxClaim = 2e18 * 7200 = 14400e18 (no initial since collected) + assertEq(agreementManager.getAgreementMaxNextClaim(id2), 14400 ether); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14400 ether); + } + + function test_Reconcile_EmptyProvider() public { + // reconcile for a provider with no agreements — should be a no-op + address unknown = makeAddr("unknown"); + agreementHelper.reconcile(unknown); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), unknown), 0); + } + + function test_Reconcile_IdempotentWhenUnchanged() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as accepted + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // First reconcile + agreementHelper.reconcile(indexer); + uint256 escrowAfterFirst = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 maxClaimAfterFirst = agreementManager.getAgreementMaxNextClaim(agreementId); + + // Second reconcile should produce identical results (idempotent) + vm.recordLogs(); + agreementHelper.reconcile(indexer); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), escrowAfterFirst); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaimAfterFirst); + + // No reconcile event on the second call since nothing changed + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 reconciledTopic = keccak256("AgreementReconciled(bytes16,uint256,uint256)"); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue(logs[i].topics[0] != reconciledTopic, "Unexpected AgreementReconciled event on idempotent call"); + } + } + + function test_Reconcile_MultipleAgreements_MixedStates() public { + // Three agreements for the same indexer, each in a different state + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCA( + 0, + 3 ether, + 60, + 1800, + uint64(block.timestamp + 365 days) + ); + rca3.nonce = 3; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + bytes16 id3 = _offerAgreement(rca3); + + // id1: Canceled by SP -> maxClaim = 0 + _setAgreementCanceledBySP(id1, rca1); + + // id2: Accepted, collected -> no initial tokens + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(id2, rca2, uint64(block.timestamp), lastCollectionAt); + + // id3: Not yet accepted -> keep pre-offer estimate + // (default mock returns NotAccepted) + + vm.warp(lastCollectionAt); + token.mint(address(agreementManager), 1_000_000 ether); + + agreementHelper.reconcile(indexer); + + assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(id2), 14400 ether); // 2e18 * 7200 + // id3 unchanged: 3e18 * 1800 = 5400e18 (pre-offer estimate) + assertEq(agreementManager.getAgreementMaxNextClaim(id3), 5400 ether); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14400 ether + 5400 ether); + } + + // -- reconcileBatch tests -- + + function test_ReconcileBatch_BasicBatch() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1 + maxClaim2); + + // Accept both and simulate CanceledBySP on agreement 1 + _setAgreementCanceledBySP(id1, rca1); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + // Reconcile both in batch + bytes16[] memory ids = new bytes16[](2); + ids[0] = id1; + ids[1] = id2; + agreementHelper.reconcileBatch(ids); + + // Agreement 1 canceled by SP -> maxNextClaim = 0 + assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); + // Agreement 2 accepted, never collected -> maxNextClaim = initial + ongoing + assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + // Required should be just agreement 2 now + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); + } + + function test_ReconcileBatch_SkipsNonExistent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 realId = _offerAgreement(rca); + bytes16 fakeId = bytes16(keccak256("nonexistent")); + + // Accept to enable reconciliation + _setAgreementAccepted(realId, rca, uint64(block.timestamp)); + + // Batch with a nonexistent id — should not revert + bytes16[] memory ids = new bytes16[](2); + ids[0] = fakeId; + ids[1] = realId; + agreementHelper.reconcileBatch(ids); + + // Real agreement should still be tracked + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getAgreementMaxNextClaim(realId), maxClaim); + } + + function test_ReconcileBatch_Empty() public { + // Empty array — should succeed silently + bytes16[] memory ids = new bytes16[](0); + agreementHelper.reconcileBatch(ids); + } + + function test_ReconcileBatch_CrossIndexer() public { + address indexer2 = makeAddr("indexer2"); + + // Agreement 1 for default indexer + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + // Agreement 2 for indexer2 + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.serviceProvider = indexer2; + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + + // Cancel both by SP + _setAgreementCanceledBySP(id1, rca1); + _setAgreementCanceledBySP(id2, rca2); + + bytes16[] memory ids = new bytes16[](2); + ids[0] = id1; + ids[1] = id2; + agreementHelper.reconcileBatch(ids); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), 0); + } + + function test_ReconcileBatch_Permissionless() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // Anyone can call + address anyone = makeAddr("anyone"); + bytes16[] memory ids = new bytes16[](1); + ids[0] = agreementId; + vm.prank(anyone); + agreementHelper.reconcileBatch(ids); + } + + function test_ReconcileBatch_ClearsPendingUpdate() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer a pending update (nonce 1) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + + // Simulate: accepted with the update already applied (pending <= updateNonce) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rcau.endsAt, + maxInitialTokens: rcau.maxInitialTokens, + maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, + minSecondsPerCollection: rcau.minSecondsPerCollection, + maxSecondsPerCollection: rcau.maxSecondsPerCollection, + updateNonce: 1, // matches pending nonce, so update was applied + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + bytes16[] memory ids = new bytes16[](1); + ids[0] = agreementId; + agreementHelper.reconcileBatch(ids); + + // Pending should be cleared; required escrow should be based on new terms + uint256 newMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), newMaxClaim); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol b/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol new file mode 100644 index 000000000..f957eee9f --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol @@ -0,0 +1,239 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementHelper } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + MockRecurringCollector internal collector2; + address internal indexer2; + + function setUp() public override { + super.setUp(); + collector2 = new MockRecurringCollector(); + vm.label(address(collector2), "RecurringCollector2"); + indexer2 = makeAddr("indexer2"); + + vm.prank(governor); + agreementManager.grantRole(COLLECTOR_ROLE, address(collector2)); + } + + // -- Helpers -- + + function _makeRCAForCollector( + MockRecurringCollector collector, + address provider, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca) { + rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(agreementManager), + dataService: dataService, + serviceProvider: provider, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + nonce: nonce, + metadata: "" + }); + } + + function _offerForCollector( + MockRecurringCollector collector, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal returns (bytes16) { + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + } + + // -- Tests: auditGlobal -- + + function test_AuditGlobal_EmptyState() public view { + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(g.tokenBalance, 0); + assertEq(g.sumMaxNextClaimAll, 0); + assertEq(g.totalEscrowDeficit, 0); + assertEq(g.totalAgreementCount, 0); + assertEq(uint256(g.escrowBasis), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + assertFalse(g.tempJit); + assertEq(g.collectorCount, 0); + } + + function test_AuditGlobal_WithAgreements() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(g.sumMaxNextClaimAll, maxClaim); + assertEq(g.totalAgreementCount, 1); + assertEq(g.collectorCount, 1); + // Token balance is the minted amount minus what was deposited to escrow + assertTrue(0 < g.tokenBalance); + } + + function test_AuditGlobal_MultiCollector() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector(collector2, indexer, 2); + _offerForCollector(collector2, rca2); + + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 2); + assertEq(g.collectorCount, 2); + } + + // -- Tests: auditPair -- + + function test_AuditPair_NonExistent() public view { + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.collector, address(recurringCollector)); + assertEq(p.provider, indexer); + assertEq(p.agreementCount, 0); + assertEq(p.sumMaxNextClaim, 0); + assertEq(p.escrow.balance, 0); + } + + function test_AuditPair_WithAgreement() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.agreementCount, 1); + assertEq(p.sumMaxNextClaim, maxClaim); + assertEq(p.escrow.balance, maxClaim); // Full mode deposits all + } + + function test_AuditPair_EscrowThawing() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + + // Cancel by SP to make maxNextClaim = 0, then reconcile (thaw starts) + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + // sumMaxNextClaim should be 0 after reconcile + assertEq(p.sumMaxNextClaim, 0); + // Escrow should be thawing + assertTrue(0 < p.escrow.tokensThawing); + } + + // -- Tests: auditPairs -- + + function test_AuditPairs_EmptyCollector() public view { + IRecurringAgreementHelper.PairAudit[] memory pairs = agreementHelper.auditPairs(address(recurringCollector)); + assertEq(pairs.length, 0); + } + + function test_AuditPairs_MultiplePairs() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector( + recurringCollector, + indexer2, + 2 + ); + _offerAgreement(rca2); + + IRecurringAgreementHelper.PairAudit[] memory pairs = agreementHelper.auditPairs(address(recurringCollector)); + assertEq(pairs.length, 2); + // Both should have agreementCount = 1 + assertEq(pairs[0].agreementCount, 1); + assertEq(pairs[1].agreementCount, 1); + } + + function test_AuditPairs_Paginated() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector( + recurringCollector, + indexer2, + 2 + ); + _offerAgreement(rca2); + + // First page + IRecurringAgreementHelper.PairAudit[] memory first = agreementHelper.auditPairs( + address(recurringCollector), + 0, + 1 + ); + assertEq(first.length, 1); + + // Second page + IRecurringAgreementHelper.PairAudit[] memory second = agreementHelper.auditPairs( + address(recurringCollector), + 1, + 1 + ); + assertEq(second.length, 1); + + // Past end + IRecurringAgreementHelper.PairAudit[] memory empty = agreementHelper.auditPairs( + address(recurringCollector), + 2, + 1 + ); + assertEq(empty.length, 0); + } + + function test_AuditPairs_IsolatesCollectors() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector(collector2, indexer, 2); + _offerForCollector(collector2, rca2); + + IRecurringAgreementHelper.PairAudit[] memory c1Pairs = agreementHelper.auditPairs(address(recurringCollector)); + assertEq(c1Pairs.length, 1); + + IRecurringAgreementHelper.PairAudit[] memory c2Pairs = agreementHelper.auditPairs(address(collector2)); + assertEq(c2Pairs.length, 1); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol b/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol new file mode 100644 index 000000000..8a56264f2 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + MockRecurringCollector internal collector2; + address internal indexer2; + + function setUp() public override { + super.setUp(); + collector2 = new MockRecurringCollector(); + vm.label(address(collector2), "RecurringCollector2"); + indexer2 = makeAddr("indexer2"); + + vm.prank(governor); + agreementManager.grantRole(COLLECTOR_ROLE, address(collector2)); + } + + // -- Helpers -- + + function _makeRCAFor( + address provider, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca) { + rca = _makeRCA(100 ether, 1 ether, 60, 3600, uint64(block.timestamp + 365 days)); + rca.serviceProvider = provider; + rca.nonce = nonce; + } + + function _offerForCollector( + MockRecurringCollector collector, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal returns (bytes16) { + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + } + + function _setCanceledBySPOnCollector( + MockRecurringCollector collector, + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal { + collector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: uint64(block.timestamp), + state: IRecurringCollector.AgreementState.CanceledByServiceProvider + }) + ); + } + + // -- Tests: reconcile (provider) -- + + function test_Reconcile_RemovesCanceledBySP() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_Reconcile_SkipsStillClaimable() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementAccepted(id, rca, uint64(block.timestamp)); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_Reconcile_MixedStates() public { + // Agreement 1: canceled by SP (removable) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Agreement 2: still active (not removable) + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + bytes16 id2 = _offerAgreement(rca2); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_Reconcile_EmptyProvider() public { + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 0); + } + + function test_Reconcile_ExpiredOffer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + _offerAgreement(rca); + + // Warp past deadline + vm.warp(rca.deadline + 1); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_Reconcile_Permissionless() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + address anyone = makeAddr("anyone"); + vm.prank(anyone); + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + } + + // -- Tests: reconcilePair -- + + function test_ReconcilePair_RemovesAgreementButPairStaysWhileThawing() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); // escrow still thawing — pair stays tracked + + // Drain escrow, then pair can be removed + vm.warp(block.timestamp + 1 days + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + (, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertFalse(pairExists); + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + } + + function test_ReconcilePair_PairExistsWhenAgreementsRemain() public { + // Two agreements, only one removable + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + bytes16 id2 = _offerAgreement(rca2); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); + } + + function test_ReconcilePair_IsolatesCollectors() public { + // Collector1 + indexer: canceled (removable) + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Collector2 + indexer: active (not removable) + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + rca2.dataService = dataService; + _offerForCollector(collector2, rca2); + + // Reconcile only collector1's pair — escrow still thawing + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); // escrow still thawing + + // Collector2's agreement untouched + assertEq(agreementManager.getPairAgreementCount(address(collector2), indexer), 1); + } + + // -- Tests: reconcileCollector -- + + function test_ReconcileCollector_AllPairs() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer2, 2); + bytes16 id2 = _offerAgreement(rca2); + _setAgreementCanceledBySP(id2, rca2); + + (uint256 removed, bool collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); + assertEq(removed, 2); + assertTrue(collectorExists); // escrow still thawing for both pairs + + // Drain escrows, then collector can be removed + vm.warp(block.timestamp + 1 days + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + + (, collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); + assertFalse(collectorExists); + assertEq(agreementManager.getCollectorCount(), 0); + } + + function test_ReconcileCollector_PartialCleanup() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Active agreement — not removable + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer2, 2); + bytes16 id2 = _offerAgreement(rca2); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + (uint256 removed, bool collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); + assertEq(removed, 1); + assertTrue(collectorExists); // indexer2 still has an active agreement + } + + // -- Tests: reconcileAll -- + + function test_ReconcileAll_FullSweep() public { + // Collector1 + indexer + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Collector2 + indexer + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + bytes16 id2 = _offerForCollector(collector2, rca2); + _setCanceledBySPOnCollector(collector2, id2, rca2); + + uint256 removed = agreementHelper.reconcileAll(); + assertEq(removed, 2); + assertEq(agreementManager.getTotalAgreementCount(), 0); + assertEq(agreementManager.getCollectorCount(), 2); // escrow still thawing + + // Drain escrows, then collectors can be removed + vm.warp(block.timestamp + 1 days + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileCollectorProvider(address(collector2), indexer); + + agreementHelper.reconcileAll(); + assertEq(agreementManager.getCollectorCount(), 0); + } + + function test_ReconcileAll_EmptyState() public { + uint256 removed = agreementHelper.reconcileAll(); + assertEq(removed, 0); + } + + function test_ReconcileAll_PartialCleanup() public { + // Removable + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Not removable + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer2, 2); + bytes16 id2 = _offerAgreement(rca2); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + uint256 removed = agreementHelper.reconcileAll(); + assertEq(removed, 1); + assertEq(agreementManager.getTotalAgreementCount(), 1); + } + + // -- Tests: reconcilePair (value reconciliation + cleanup) -- + + function test_ReconcilePair_OnlyReconcilesPairAgreements() public { + // Collector1 + indexer: cancel by SP + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + // Collector2 + indexer: still active (same provider, different collector) + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + _offerForCollector(collector2, rca2); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Before reconcile, collector1's pair still has the old maxNextClaim + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + + // Reconcile only collector1's pair + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + + // Collector1's pair reconciled to 0 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + + // Collector2's pair untouched + assertEq(agreementManager.getSumMaxNextClaim(IRecurringCollector(address(collector2)), indexer), maxClaim); + } + + // -- Tests: reconcileAll (value reconciliation + cleanup) -- + + function test_ReconcileAll_AllCollectorsAllProviders() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor(indexer, 1); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor(indexer, 2); + bytes16 id2 = _offerForCollector(collector2, rca2); + _setCanceledBySPOnCollector(collector2, id2, rca2); + + uint256 removed = agreementHelper.reconcileAll(); + assertEq(removed, 2); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(IRecurringCollector(address(collector2)), indexer), 0); + } + + // -- Tests: reconcile does reconcile+cleanup in single pass -- + + function test_Reconcile_ReconcilesThenRemoves() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + // Set as CanceledBySP — after reconcile, maxNextClaim=0, then removable + _setAgreementCanceledBySP(id, rca); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_Reconcile_NoopWhenAllActive() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementAccepted(id, rca, uint64(block.timestamp)); + + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + // -- Tests: reconcilePair does reconcile+cleanup+pair removal -- + + function test_ReconcilePair_RemovesAgreementAndPairAfterThaw() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor(indexer, 1); + bytes16 id = _offerAgreement(rca); + _setAgreementCanceledBySP(id, rca); + + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); // escrow still thawing + + // Drain escrow, then pair can be removed + vm.warp(block.timestamp + 1 days + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + (, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertFalse(pairExists); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol b/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol new file mode 100644 index 000000000..843d929ea --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementHelper } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + uint256 internal constant THAW_PERIOD = 1 days; + + MockRecurringCollector internal collector2; + address internal indexer2; + + function setUp() public override { + super.setUp(); + collector2 = new MockRecurringCollector(); + vm.label(address(collector2), "RecurringCollector2"); + indexer2 = makeAddr("indexer2"); + + vm.prank(governor); + agreementManager.grantRole(COLLECTOR_ROLE, address(collector2)); + } + + // -- Helpers -- + + function _makeRCAFor( + MockRecurringCollector, + address provider, + uint256 maxInitial, + uint256 maxOngoing, + uint32 maxSec, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca) { + rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(agreementManager), + dataService: dataService, + serviceProvider: provider, + maxInitialTokens: maxInitial, + maxOngoingTokensPerSecond: maxOngoing, + minSecondsPerCollection: 60, + maxSecondsPerCollection: maxSec, + nonce: nonce, + metadata: "" + }); + } + + function _offerForCollector( + MockRecurringCollector collector, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal returns (bytes16) { + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + } + + function _setCanceledBySPOnCollector( + MockRecurringCollector collector, + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal { + collector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: uint64(block.timestamp), + state: IRecurringCollector.AgreementState.CanceledByServiceProvider + }) + ); + } + + // -- Tests: Single Agreement Full Lifecycle -- + + function test_Lifecycle_OfferAcceptCancelReconcileCleanup() public { + // 1. Start empty + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 0); + + // 2. Offer + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // 3. Audit: agreement tracked, escrow deposited + g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 1); + assertEq(g.sumMaxNextClaimAll, maxClaim); + assertEq(g.collectorCount, 1); + + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.agreementCount, 1); + assertEq(p.sumMaxNextClaim, maxClaim); + assertEq(p.escrow.balance, maxClaim); // Full mode + + // 4. Accept + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // 5. Simulate first collection + vm.warp(block.timestamp + 1800); + _setAgreementCollected(agreementId, rca, uint64(block.timestamp - 1800), uint64(block.timestamp)); + + // 6. Reconcile — maxInitialTokens drops out after first collection + agreementHelper.reconcile(indexer); + uint256 reducedMaxClaim = 1 ether * 3600; // no more initial + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), reducedMaxClaim); + + // 7. Cancel by SP + _setAgreementCanceledBySP(agreementId, rca); + + // 8. Reconcile + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + + // 9. Agreements gone, but escrow still thawing — collector stays tracked + g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 0); + assertEq(g.sumMaxNextClaimAll, 0); + assertEq(g.collectorCount, 1); // still tracked — escrow not yet drained + + // 10. Escrow is thawing + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertTrue(0 < p.escrow.tokensThawing); + + // 11. Wait for thaw and withdraw + vm.warp(block.timestamp + THAW_PERIOD + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, 0); + assertEq(p.escrow.tokensThawing, 0); + + // 12. Now that escrow is drained, reconcilePair removes tracking + agreementHelper.reconcilePair(address(recurringCollector), indexer); + + g = agreementHelper.auditGlobal(); + assertEq(g.collectorCount, 0); // fully cleaned up + } + + // -- Tests: Escrow Basis Changes -- + + function test_Lifecycle_EscrowBasisChange_FullToOnDemand() public { + // Offer in Full mode — escrow deposited + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 agreementId = _offerAgreement(rca); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, maxClaim); + assertEq(p.escrow.tokensThawing, 0); + + // Switch to OnDemand + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(uint256(g.escrowBasis), uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand)); + + // reconcileCollectorProvider — OnDemand has min=0, max=sumMaxNextClaim. + // Balance == max so no thaw needed (balanced) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + // In OnDemand with balance == max, no thaw + assertEq(p.escrow.balance, maxClaim); + + // Switch to JustInTime — should start thawing everything + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.tokensThawing, maxClaim); // thawing everything + + // Wait for thaw and withdraw + vm.warp(block.timestamp + THAW_PERIOD + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, 0); + + // Switch back to Full — should deposit again + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, maxClaim); + assertEq(p.escrow.tokensThawing, 0); + } + + // -- Tests: Multi-Collector Multi-Provider -- + + function test_Lifecycle_MultiCollectorMultiProvider() public { + // Offer: collector1+indexer, collector1+indexer2, collector2+indexer + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 id1 = _offerAgreement(rca1); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor( + recurringCollector, + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + bytes16 id2 = _offerAgreement(rca2); + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCAFor( + collector2, + indexer, + 50 ether, + 0.5 ether, + 1800, + 3 + ); + bytes16 id3 = _offerForCollector(collector2, rca3); + uint256 maxClaim3 = 0.5 ether * 1800 + 50 ether; + + // Audit global + IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 3); + assertEq(g.sumMaxNextClaimAll, maxClaim1 + maxClaim2 + maxClaim3); + assertEq(g.collectorCount, 2); + + // Audit pairs per collector + IRecurringAgreementHelper.PairAudit[] memory c1Pairs = agreementHelper.auditPairs(address(recurringCollector)); + assertEq(c1Pairs.length, 2); + + IRecurringAgreementHelper.PairAudit[] memory c2Pairs = agreementHelper.auditPairs(address(collector2)); + assertEq(c2Pairs.length, 1); + assertEq(c2Pairs[0].sumMaxNextClaim, maxClaim3); + + // Accept all, cancel collector1+indexer by SP + _setAgreementAccepted(id1, rca1, uint64(block.timestamp)); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + _setAgreementCanceledBySP(id1, rca1); + + // Selective reconcile: only collector1+indexer — escrow still thawing + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); // escrow still thawing + + // collector1 still has indexer2 (+ c1+indexer pair tracked due to thawing escrow) + assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 2); + + // Global state updated + g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 2); + assertEq(g.sumMaxNextClaimAll, maxClaim2 + maxClaim3); + + // Cancel remaining and full reconcile + _setAgreementCanceledBySP(id2, rca2); + _setCanceledBySPOnCollector(collector2, id3, rca3); + + // Reconcile all (reconcile + cleanup in single pass) + uint256 totalRemoved = agreementHelper.reconcileAll(); + assertEq(totalRemoved, 2); + + // Agreements gone, but escrows still thawing — collectors stay tracked + g = agreementHelper.auditGlobal(); + assertEq(g.totalAgreementCount, 0); + assertEq(g.sumMaxNextClaimAll, 0); + assertEq(g.collectorCount, 2); // still tracked — escrow not yet drained + + // Escrows should be thawing for all pairs + IRecurringAgreementHelper.PairAudit memory p1 = agreementHelper.auditPair(address(recurringCollector), indexer); + assertTrue(0 < p1.escrow.tokensThawing, "c1+indexer should be thawing"); + + IRecurringAgreementHelper.PairAudit memory p2 = agreementHelper.auditPair( + address(recurringCollector), + indexer2 + ); + assertTrue(0 < p2.escrow.tokensThawing, "c1+indexer2 should be thawing"); + + IRecurringAgreementHelper.PairAudit memory p3 = agreementHelper.auditPair(address(collector2), indexer); + assertTrue(0 < p3.escrow.tokensThawing, "c2+indexer should be thawing"); + + // Wait for thaw, withdraw all + vm.warp(block.timestamp + THAW_PERIOD + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + agreementManager.reconcileCollectorProvider(address(collector2), indexer); + + // All escrows drained + p1 = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p1.escrow.balance, 0); + assertEq(p1.escrow.tokensThawing, 0); + + p2 = agreementHelper.auditPair(address(recurringCollector), indexer2); + assertEq(p2.escrow.balance, 0); + assertEq(p2.escrow.tokensThawing, 0); + + p3 = agreementHelper.auditPair(address(collector2), indexer); + assertEq(p3.escrow.balance, 0); + assertEq(p3.escrow.tokensThawing, 0); + + // Now reconcile tracking (escrow drained, so reconcileCollectorProvider succeeds) + agreementHelper.reconcileAll(); + + g = agreementHelper.auditGlobal(); + assertEq(g.collectorCount, 0); // fully cleaned up + } + + // -- Tests: Expired Offer Cleanup -- + + function test_Lifecycle_ExpiredOffer_CleanupRemoves() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + + // Before deadline: not removable + uint256 removed = agreementHelper.reconcile(indexer); + assertEq(removed, 0); + + // Warp past deadline + vm.warp(rca.deadline + 1); + + // Now removable + removed = agreementHelper.reconcile(indexer); + assertEq(removed, 1); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + + // Escrow deposited in Full mode should now be thawing + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertTrue(0 < p.escrow.tokensThawing, "escrow should be thawing after expired offer removal"); + + // Wait for thaw and withdraw + vm.warp(block.timestamp + THAW_PERIOD + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, 0); + assertEq(p.escrow.tokensThawing, 0); + } + + // -- Tests: reconcilePair Isolation -- + + function test_Lifecycle_ReconcilePair_IsolatesCollectors() public { + // Both collectors have agreements with the same indexer + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + bytes16 id1 = _offerAgreement(rca1); + _setAgreementCanceledBySP(id1, rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAFor( + collector2, + indexer, + 200 ether, + 2 ether, + 7200, + 2 + ); + _offerForCollector(collector2, rca2); + + // Reconcile only collector1's pair — escrow still thawing so pair still exists + (uint256 removed, bool pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertEq(removed, 1); + assertTrue(pairExists); // escrow still thawing, pair stays tracked + + // Collector2's agreement untouched + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(IRecurringCollector(address(collector2)), indexer), maxClaim2); + assertEq(agreementManager.getPairAgreementCount(address(collector2), indexer), 1); + + // Collector1's escrow should be thawing after reconcile + IRecurringAgreementHelper.PairAudit memory p1 = agreementHelper.auditPair(address(recurringCollector), indexer); + assertTrue(0 < p1.escrow.tokensThawing, "c1 escrow should be thawing after reconcile"); + + // Collector2's escrow should still be fully deposited (not thawing) + IRecurringAgreementHelper.PairAudit memory p2 = agreementHelper.auditPair(address(collector2), indexer); + assertEq(p2.escrow.balance, maxClaim2); + assertEq(p2.escrow.tokensThawing, 0); + + // Wait for thaw, then drain collector1's escrow + vm.warp(block.timestamp + THAW_PERIOD + 1); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p1 = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p1.escrow.balance, 0); + assertEq(p1.escrow.tokensThawing, 0); + + // Now pair can be fully removed + (, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); + assertFalse(pairExists); // escrow drained, pair removed + } + + // -- Tests: Escrow Basis Mid-Lifecycle with Audit Verification -- + + function test_Lifecycle_EscrowBasisChange_OnDemandToFull() public { + // Start in OnDemand mode + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + + // Offer — OnDemand: min=0, max=sumMaxNextClaim. No deposit (min=0). + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor( + recurringCollector, + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.sumMaxNextClaim, maxClaim); + // OnDemand: no deposit, but _updateEscrow in offerAgreement may have deposited + // Actually in OnDemand min=0 so no deposit happens + assertEq(p.escrow.balance, 0); + + // Switch to Full + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + p = agreementHelper.auditPair(address(recurringCollector), indexer); + assertEq(p.escrow.balance, maxClaim); // Full deposits everything + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockEligibilityOracle.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockEligibilityOracle.sol new file mode 100644 index 000000000..746c95de1 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockEligibilityOracle.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; + +/// @notice Simple mock eligibility oracle for testing SAM passthrough +contract MockEligibilityOracle is IProviderEligibility { + mapping(address => bool) public eligible; + bool public defaultEligible; + + function setEligible(address indexer, bool _eligible) external { + eligible[indexer] = _eligible; + } + + function setDefaultEligible(bool _default) external { + defaultEligible = _default; + } + + function isEligible(address indexer) external view override returns (bool) { + if (eligible[indexer]) return true; + return defaultEligible; + } +} diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockGraphToken.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockGraphToken.sol new file mode 100644 index 000000000..dd07fab6e --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockGraphToken.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/// @notice Minimal ERC20 token for testing. Mints initial supply to deployer. +contract MockGraphToken is ERC20 { + constructor() ERC20("Graph Token", "GRT") { + _mint(msg.sender, 1_000_000_000 ether); + } + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } +} diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockPaymentsEscrow.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockPaymentsEscrow.sol new file mode 100644 index 000000000..7cab89243 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockPaymentsEscrow.sol @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; + +/// @notice Stateful mock of PaymentsEscrow for RecurringAgreementManager testing. +/// Tracks deposits per (payer, collector, receiver) and transfers tokens on deposit. +/// Supports thaw/withdraw lifecycle for escrow rebalancing testing. +contract MockPaymentsEscrow is IPaymentsEscrow { + IERC20 public token; + + struct Account { + uint256 balance; + uint256 tokensThawing; + uint256 thawEndTimestamp; + } + + // accounts[payer][collector][receiver] + mapping(address => mapping(address => mapping(address => Account))) public accounts; + + /// @notice Thawing period for testing (set to 1 day by default) + uint256 public constant THAWING_PERIOD = 1 days; + + constructor(address _token) { + token = IERC20(_token); + } + + function deposit(address collector, address receiver, uint256 tokens) external { + token.transferFrom(msg.sender, address(this), tokens); + accounts[msg.sender][collector][receiver].balance += tokens; + } + + function thaw(address collector, address receiver, uint256 tokens) external { + _thaw(collector, receiver, tokens, true); + } + + function adjustThaw( + address collector, + address receiver, + uint256 tokens, + bool evenIfTimerReset + ) external returns (uint256) { + return _thaw(collector, receiver, tokens, evenIfTimerReset); + } + + function cancelThaw(address collector, address receiver) external { + _thaw(collector, receiver, 0, true); + } + + function _thaw( + address collector, + address receiver, + uint256 tokens, + bool evenIfTimerReset + ) private returns (uint256 tokensThawing) { + Account storage account = accounts[msg.sender][collector][receiver]; + tokensThawing = tokens < account.balance ? tokens : account.balance; + if (tokensThawing == account.tokensThawing) { + return tokensThawing; + } + uint256 newThawEndTimestamp = block.timestamp + THAWING_PERIOD; + if (tokensThawing < account.tokensThawing) { + account.tokensThawing = tokensThawing; + if (tokensThawing == 0) account.thawEndTimestamp = 0; + } else { + if (!evenIfTimerReset && account.thawEndTimestamp != 0 && account.thawEndTimestamp != newThawEndTimestamp) + return account.tokensThawing; + account.tokensThawing = tokensThawing; + account.thawEndTimestamp = newThawEndTimestamp; + } + } + + function withdraw(address collector, address receiver) external { + Account storage account = accounts[msg.sender][collector][receiver]; + if (account.thawEndTimestamp == 0 || block.timestamp <= account.thawEndTimestamp) { + return; + } + uint256 tokens = account.tokensThawing; + account.balance -= tokens; + account.tokensThawing = 0; + account.thawEndTimestamp = 0; + token.transfer(msg.sender, tokens); + } + + function escrowAccounts( + address payer, + address collector, + address receiver + ) external view returns (uint256, uint256, uint256) { + Account storage account = accounts[payer][collector][receiver]; + return (account.balance, account.tokensThawing, account.thawEndTimestamp); + } + + function getBalance(address payer, address collector, address receiver) external view returns (uint256) { + Account storage account = accounts[payer][collector][receiver]; + return account.tokensThawing < account.balance ? account.balance - account.tokensThawing : 0; + } + + /// @notice Test helper: set arbitrary account state for data-driven tests + function setAccount( + address payer, + address collector, + address receiver, + uint256 balance_, + uint256 tokensThawing_, + uint256 thawEndTimestamp_ + ) external { + Account storage account = accounts[payer][collector][receiver]; + account.balance = balance_; + account.tokensThawing = tokensThawing_; + account.thawEndTimestamp = thawEndTimestamp_; + } + + // -- Stubs (not used by RecurringAgreementManager) -- + + function initialize() external {} + function depositTo(address, address, address, uint256) external {} + function collect(IGraphPayments.PaymentTypes, address, address, uint256, address, uint256, address) external {} + function MAX_WAIT_PERIOD() external pure returns (uint256) { + return 0; + } + function WITHDRAW_ESCROW_THAWING_PERIOD() external pure returns (uint256) { + return THAWING_PERIOD; + } +} diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol new file mode 100644 index 000000000..36275f404 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +/// @notice Minimal mock of RecurringCollector for RecurringAgreementManager testing. +/// Stores agreement data set by tests, computes agreementId and hashRCA deterministically. +contract MockRecurringCollector { + mapping(bytes16 => IRecurringCollector.AgreementData) private _agreements; + mapping(bytes16 => bool) private _agreementExists; + + // -- Test helpers -- + + function setAgreement(bytes16 agreementId, IRecurringCollector.AgreementData memory data) external { + _agreements[agreementId] = data; + _agreementExists[agreementId] = true; + } + + // -- IRecurringCollector subset -- + + function getAgreement(bytes16 agreementId) external view returns (IRecurringCollector.AgreementData memory) { + return _agreements[agreementId]; + } + + function getMaxNextClaim(bytes16 agreementId) external view returns (uint256) { + IRecurringCollector.AgreementData memory a = _agreements[agreementId]; + // Mirror RecurringCollector._getMaxNextClaim logic + if (a.state == IRecurringCollector.AgreementState.CanceledByServiceProvider) return 0; + if ( + a.state != IRecurringCollector.AgreementState.Accepted && + a.state != IRecurringCollector.AgreementState.CanceledByPayer + ) return 0; + + uint256 collectionStart = 0 < a.lastCollectionAt ? a.lastCollectionAt : a.acceptedAt; + uint256 collectionEnd; + if (a.state == IRecurringCollector.AgreementState.CanceledByPayer) { + collectionEnd = a.canceledAt < a.endsAt ? a.canceledAt : a.endsAt; + } else { + collectionEnd = a.endsAt; + } + if (collectionEnd <= collectionStart) return 0; + + uint256 windowSeconds = collectionEnd - collectionStart; + uint256 maxSeconds = windowSeconds < a.maxSecondsPerCollection ? windowSeconds : a.maxSecondsPerCollection; + uint256 maxClaim = a.maxOngoingTokensPerSecond * maxSeconds; + if (a.lastCollectionAt == 0) maxClaim += a.maxInitialTokens; + return maxClaim; + } + + function generateAgreementId( + address payer, + address dataService, + address serviceProvider, + uint64 deadline, + uint256 nonce + ) external pure returns (bytes16) { + return bytes16(keccak256(abi.encode(payer, dataService, serviceProvider, deadline, nonce))); + } + + function hashRCA(IRecurringCollector.RecurringCollectionAgreement calldata rca) external pure returns (bytes32) { + return + keccak256( + abi.encode( + rca.deadline, + rca.endsAt, + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.maxInitialTokens, + rca.maxOngoingTokensPerSecond, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection, + rca.nonce, + rca.metadata + ) + ); + } + + function hashRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau + ) external pure returns (bytes32) { + return + keccak256( + abi.encode( + rcau.agreementId, + rcau.deadline, + rcau.endsAt, + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection, + rcau.nonce, + rcau.metadata + ) + ); + } +} diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol new file mode 100644 index 000000000..c74bf72cb --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +/// @notice Minimal mock of SubgraphService for RecurringAgreementManager cancelAgreement testing. +/// Records cancel calls and can be configured to revert. +contract MockSubgraphService { + mapping(bytes16 => bool) public canceled; + mapping(bytes16 => uint256) public cancelCallCount; + + bool public shouldRevert; + string public revertMessage; + + function cancelIndexingAgreementByPayer(bytes16 agreementId) external { + if (shouldRevert) { + revert(revertMessage); + } + canceled[agreementId] = true; + cancelCallCount[agreementId]++; + } + + // -- Test helpers -- + + function setRevert(bool _shouldRevert, string memory _message) external { + shouldRevert = _shouldRevert; + revertMessage = _message; + } +} diff --git a/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol b/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol new file mode 100644 index 000000000..f5785dcbd --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + MockRecurringCollector internal collector2; + + function setUp() public override { + super.setUp(); + collector2 = new MockRecurringCollector(); + vm.label(address(collector2), "RecurringCollector2"); + + vm.prank(governor); + agreementManager.grantRole(COLLECTOR_ROLE, address(collector2)); + } + + // -- Helpers -- + + function _makeRCAForCollector( + MockRecurringCollector collector, + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 maxSecondsPerCollection, + uint64 endsAt, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: endsAt, + payer: address(agreementManager), + dataService: dataService, + serviceProvider: indexer, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: 60, + maxSecondsPerCollection: maxSecondsPerCollection, + nonce: nonce, + metadata: "" + }); + agreementId = collector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + } + + // -- Tests -- + + function test_MultiCollector_RequiredEscrowIsolation() public { + // Offer agreement via collector1 (the default recurringCollector) + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector( + recurringCollector, + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + agreementManager.offerAgreement(rca1, _collector()); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + + // Offer agreement via collector2 with different terms + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector( + collector2, + 200 ether, + 2 ether, + 7200, + uint64(block.timestamp + 365 days), + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Required escrow is independent per collector + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1); + assertEq(agreementManager.getSumMaxNextClaim(IRecurringCollector(address(collector2)), indexer), maxClaim2); + } + + function test_MultiCollector_BeforeCollectionOnlyOwnAgreements() public { + // Offer agreement via collector1 + (IRecurringCollector.RecurringCollectionAgreement memory rca1, bytes16 agreementId1) = _makeRCAForCollector( + recurringCollector, + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + agreementManager.offerAgreement(rca1, _collector()); + + // collector2 cannot call beforeCollection on collector1's agreement + vm.prank(address(collector2)); + vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + agreementManager.beforeCollection(agreementId1, 100 ether); + + // collector1 can call beforeCollection on its own agreement + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId1, 100 ether); + } + + function test_MultiCollector_AfterCollectionOnlyOwnAgreements() public { + // Offer agreement via collector1 + (IRecurringCollector.RecurringCollectionAgreement memory rca1, bytes16 agreementId1) = _makeRCAForCollector( + recurringCollector, + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + agreementManager.offerAgreement(rca1, _collector()); + + // collector2 cannot call afterCollection on collector1's agreement + vm.prank(address(collector2)); + vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + agreementManager.afterCollection(agreementId1, 100 ether); + } + + function test_MultiCollector_SeparateEscrowAccounts() public { + // Offer via collector1 + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector( + recurringCollector, + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + // Fund with surplus so Full mode stays active (deficit < balance required) + token.mint(address(agreementManager), maxClaim1 + 1); + vm.prank(operator); + agreementManager.offerAgreement(rca1, _collector()); + + // Offer via collector2 + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector( + collector2, + 200 ether, + 2 ether, + 7200, + uint64(block.timestamp + 365 days), + 2 + ); + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + // Fund with surplus so Full mode stays active (deficit < balance required) + token.mint(address(agreementManager), maxClaim2 + 1); + vm.prank(operator); + agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + + // Escrow accounts are separate per (collector, provider) + (uint256 collector1Balance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(collector1Balance, maxClaim1); + (uint256 collector2Balance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(collector2), + indexer + ); + assertEq(collector2Balance, maxClaim2); + } + + function test_MultiCollector_RevokeOnlyAffectsOwnCollectorEscrow() public { + // Offer via both collectors + (IRecurringCollector.RecurringCollectionAgreement memory rca1, bytes16 agreementId1) = _makeRCAForCollector( + recurringCollector, + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + token.mint(address(agreementManager), 1_000_000 ether); + vm.prank(operator); + agreementManager.offerAgreement(rca1, _collector()); + + (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector( + collector2, + 200 ether, + 2 ether, + 7200, + uint64(block.timestamp + 365 days), + 2 + ); + vm.prank(operator); + agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Revoke collector1's agreement + vm.prank(operator); + agreementManager.revokeOffer(agreementId1); + + // Collector1 escrow cleared, collector2 unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(IRecurringCollector(address(collector2)), indexer), maxClaim2); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol b/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol new file mode 100644 index 000000000..168f8208b --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + address internal indexer2; + address internal indexer3; + + function setUp() public virtual override { + super.setUp(); + indexer2 = makeAddr("indexer2"); + indexer3 = makeAddr("indexer3"); + } + + // -- Helpers -- + + function _makeRCAForIndexer( + address sp, + uint256 maxInitial, + uint256 maxOngoing, + uint32 maxSec, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = sp; + rca.nonce = nonce; + return rca; + } + + // -- Isolation: offer/sumMaxNextClaim -- + + function test_MultiIndexer_OfferIsolation() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCAForIndexer( + indexer3, + 50 ether, + 0.5 ether, + 1800, + 3 + ); + + _offerAgreement(rca1); + _offerAgreement(rca2); + _offerAgreement(rca3); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + uint256 maxClaim3 = 0.5 ether * 1800 + 50 ether; + + // Each indexer has independent sumMaxNextClaim + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer3), maxClaim3); + + // Each has exactly 1 agreement + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + assertEq(agreementManager.getProviderAgreementCount(indexer3), 1); + + // Each has independent escrow balance + (uint256 indexerBalance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + indexerBalance, + maxClaim1 + ); + (uint256 indexer2Balance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer2); + assertEq( + indexer2Balance, + maxClaim2 + ); + (uint256 indexer3Balance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer3); + assertEq( + indexer3Balance, + maxClaim3 + ); + } + + // -- Isolation: revoke one indexer doesn't affect others -- + + function test_MultiIndexer_RevokeIsolation() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Revoke indexer1's agreement + vm.prank(operator); + agreementManager.revokeOffer(id1); + + // Indexer1 cleared + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + + // Indexer2 unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + } + + // -- Isolation: reconcile one indexer doesn't affect others -- + + function test_MultiIndexer_RemoveIsolation() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // SP cancels indexer1, reconcile it + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + // Indexer1 cleared + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + + // Indexer2 unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + } + + // -- Isolation: reconcile one indexer doesn't affect others -- + + function test_MultiIndexer_ReconcileIsolation() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Accept and cancel indexer1's agreement by SP + _setAgreementCanceledBySP(id1, rca1); + + // Reconcile only indexer1 + agreementManager.reconcileAgreement(id1); + + // Indexer1 required escrow drops to 0 (CanceledBySP -> maxNextClaim=0) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + + // Indexer2 completely unaffected (still pre-offered estimate) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + } + + // -- Multiple agreements per indexer -- + + function test_MultiIndexer_MultipleAgreementsPerIndexer() public { + // Two agreements for indexer, one for indexer2 + IRecurringCollector.RecurringCollectionAgreement memory rca1a = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca1b = _makeRCAForIndexer( + indexer, + 50 ether, + 0.5 ether, + 1800, + 2 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 3 + ); + + bytes16 id1a = _offerAgreement(rca1a); + _offerAgreement(rca1b); + _offerAgreement(rca2); + + uint256 maxClaim1a = 1 ether * 3600 + 100 ether; + uint256 maxClaim1b = 0.5 ether * 1800 + 50 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + assertEq(agreementManager.getProviderAgreementCount(indexer), 2); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1a + maxClaim1b); + assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + + // Reconcile one of indexer's agreements + _setAgreementCanceledBySP(id1a, rca1a); + agreementManager.reconcileAgreement(id1a); + + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1b); + + // Indexer2 still unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + } + + // -- Cancel one indexer, reconcile another -- + + function test_MultiIndexer_CancelAndReconcileIndependently() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + // Accept both + _setAgreementAccepted(id1, rca1, uint64(block.timestamp)); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + // Cancel indexer1's agreement via operator + vm.prank(operator); + agreementManager.cancelAgreement(id1); + + // Indexer1's required escrow updated by cancelAgreement's inline reconcile + // (still has maxNextClaim from RC since it's CanceledByPayer not CanceledBySP) + // But the mock just calls SubgraphService — the RC state doesn't change automatically. + // The cancelAgreement reconciles against whatever the mock RC says. + + // Reconcile indexer2 independently + agreementManager.reconcileAgreement(id2); + + // Both indexers tracked independently + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + } + + // -- Maintain isolation -- + + function test_MultiIndexer_MaintainOnlyAffectsTargetIndexer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Reconcile indexer1's agreement + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + // Update escrow for indexer1 — should thaw excess + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Indexer1 escrow thawing (excess = maxClaim1, required = 0) + IPaymentsEscrow.EscrowAccount memory acct1; + (acct1.balance, acct1.tokensThawing, acct1.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(acct1.balance - acct1.tokensThawing, 0); + + // Indexer2 escrow completely unaffected + (uint256 indexer2Bal,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer2); + assertEq( + indexer2Bal, + maxClaim2 + ); + + // reconcileCollectorProvider on indexer2 is a no-op (balance == required, no excess) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + } + + // -- Full lifecycle across multiple indexers -- + + function test_MultiIndexer_FullLifecycle() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // 1. Offer both + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + + // 2. Accept both + _setAgreementAccepted(id1, rca1, uint64(block.timestamp)); + _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); + + // 3. Simulate collection on indexer1 (reduce remaining window) + uint64 collectionTime = uint64(block.timestamp + 1800); + _setAgreementCollected(id1, rca1, uint64(block.timestamp), collectionTime); + vm.warp(collectionTime); + + // 4. Reconcile indexer1 — required should decrease (no more initial tokens) + agreementManager.reconcileAgreement(id1); + assertTrue(agreementManager.getSumMaxNextClaim(_collector(), indexer) < maxClaim1); + + // Indexer2 unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); + + // 5. Cancel indexer2 by SP + _setAgreementCanceledBySP(id2, rca2); + agreementManager.reconcileAgreement(id2); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), 0); + + // 6. Reconcile indexer2's agreement + agreementManager.reconcileAgreement(id2); + assertEq(agreementManager.getProviderAgreementCount(indexer2), 0); + + // 7. Update escrow for indexer2 (thaw excess) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + IPaymentsEscrow.EscrowAccount memory acct2; + (acct2.balance, acct2.tokensThawing, acct2.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer2 + ); + assertEq(acct2.balance - acct2.tokensThawing, 0); + + // 8. Indexer1 still active + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertTrue(0 < agreementManager.getSumMaxNextClaim(_collector(), indexer)); + } + + // -- getAgreementInfo across indexers -- + + function test_MultiIndexer_GetAgreementInfo() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + IRecurringAgreements.AgreementInfo memory info1 = agreementManager.getAgreementInfo(id1); + IRecurringAgreements.AgreementInfo memory info2 = agreementManager.getAgreementInfo(id2); + + assertEq(info1.provider, indexer); + assertEq(info2.provider, indexer2); + assertTrue(info1.provider != address(0)); + assertTrue(info2.provider != address(0)); + assertEq(info1.maxNextClaim, 1 ether * 3600 + 100 ether); + assertEq(info2.maxNextClaim, 2 ether * 7200 + 200 ether); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol new file mode 100644 index 000000000..9267c549d --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol @@ -0,0 +1,455 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_OfferUpdate_SetsState() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + _offerAgreementUpdate(rcau); + + // pendingMaxNextClaim = 2e18 * 7200 + 200e18 = 14600e18 + uint256 expectedPendingMaxClaim = 2 ether * 7200 + 200 ether; + // Original maxNextClaim = 1e18 * 3600 + 100e18 = 3700e18 + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Required escrow should include both + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + expectedPendingMaxClaim); + // Original maxNextClaim unchanged + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); + } + + function test_OfferUpdate_AuthorizesHash() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + _offerAgreementUpdate(rcau); + + // The update hash should be authorized for the IAgreementOwner callback + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + bytes4 result = agreementManager.approveAgreement(updateHash); + assertEq(result, agreementManager.approveAgreement.selector); + } + + function test_OfferUpdate_FundsEscrow() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + uint256 sumMaxNextClaim = originalMaxClaim + pendingMaxClaim; + + // Fund and offer agreement + token.mint(address(agreementManager), sumMaxNextClaim); + vm.prank(operator); + bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + + // Offer update (should fund the deficit) + token.mint(address(agreementManager), pendingMaxClaim); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau); + + // Verify escrow was funded for both + (uint256 escrowBalance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + escrowBalance, + sumMaxNextClaim + ); + } + + function test_OfferUpdate_ReplacesExistingPending() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // First pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + uint256 pendingMaxClaim1 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim1); + + // Second pending update (replaces first — same nonce since first was never accepted) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 180 days), + 1 + ); + _offerAgreementUpdate(rcau2); + + uint256 pendingMaxClaim2 = 0.5 ether * 1800 + 50 ether; + // Old pending removed, new pending added + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim2); + } + + function test_OfferUpdate_EmitsEvent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementUpdateOffered(agreementId, pendingMaxClaim, 1); + + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau); + } + + function test_OfferUpdate_Revert_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + fakeId, + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days), + 1 + ); + + vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotOffered.selector, fakeId)); + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau); + } + + function test_OfferUpdate_Revert_WhenNotOperator() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + address nonOperator = makeAddr("nonOperator"); + vm.expectRevert( + abi.encodeWithSelector(IAccessControl.AccessControlUnauthorizedAccount.selector, nonOperator, AGREEMENT_MANAGER_ROLE) + ); + vm.prank(nonOperator); + agreementManager.offerAgreementUpdate(rcau); + } + + function test_OfferUpdate_Revert_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + // Grant pause role and pause + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau); + } + + function test_OfferUpdate_Revert_WhenNonceWrong() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Try nonce=2 when collector expects nonce=1 (updateNonce=0) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 2 + ); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.InvalidUpdateNonce.selector, agreementId, 1, 2) + ); + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau); + } + + function test_OfferUpdate_Nonce2_AfterFirstAccepted() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer first update (nonce=1) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + // Simulate: agreement accepted with update nonce=1 applied + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 7200, + updateNonce: 1, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + // Offer second update (nonce=2) — should succeed because collector's updateNonce=1 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 300 ether, + 3 ether, + 60, + 3600, + uint64(block.timestamp + 1095 days), + 2 + ); + _offerAgreementUpdate(rcau2); + + // Verify pending state was set + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2Check = rcau2; + bytes32 updateHash = recurringCollector.hashRCAU(rcau2Check); + assertEq(agreementManager.approveAgreement(updateHash), agreementManager.approveAgreement.selector); + } + + function test_OfferUpdate_Revert_Nonce1_AfterFirstAccepted() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer first update (nonce=1) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + // Simulate: agreement accepted with update nonce=1 applied + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 7200, + updateNonce: 1, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + // Try nonce=1 again — should fail because collector already at updateNonce=1 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 300 ether, + 3 ether, + 60, + 3600, + uint64(block.timestamp + 1095 days), + 1 + ); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.InvalidUpdateNonce.selector, agreementId, 2, 1) + ); + vm.prank(operator); + agreementManager.offerAgreementUpdate(rcau2); + } + + function test_OfferUpdate_ReconcilesDuringOffer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 preOfferMax = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // Simulate acceptance with a collection (maxNextClaim should change) + uint64 acceptedAt = uint64(block.timestamp); + uint64 collectionAt = uint64(block.timestamp + 1800); + vm.warp(collectionAt); + _setAgreementCollected(agreementId, rca, acceptedAt, collectionAt); + + // Offer an update — this should reconcile first, updating maxNextClaim + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 365 days), + 1 + ); + _offerAgreementUpdate(rcau); + + // The base maxNextClaim should have been reconciled (reduced from pre-offer estimate) + // and the pending update added on top + uint256 pendingMaxClaim = 0.5 ether * 1800 + 50 ether; + uint256 postOfferMax = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // Post-reconcile base should be less than the pre-offer estimate + // (collection happened, so remaining window is smaller) + assertTrue(postOfferMax < preOfferMax + pendingMaxClaim); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/reconcile.t.sol b/packages/issuance/test/unit/agreement-manager/reconcile.t.sol new file mode 100644 index 000000000..b2d45f413 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/reconcile.t.sol @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_ReconcileAgreement_AfterFirstCollection() public { + // Offer: maxNextClaim = 1e18 * 3600 + 100e18 = 3700e18 + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 initialMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + assertEq(initialMaxClaim, 3700 ether); + + // Simulate: agreement accepted and first collection happened + uint64 acceptedAt = uint64(block.timestamp); + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(agreementId, rca, acceptedAt, lastCollectionAt); + + // After first collection, maxInitialTokens no longer applies + // New max = maxOngoingTokensPerSecond * min(remaining, maxSecondsPerCollection) + // remaining = endsAt - lastCollectionAt (large), capped by maxSecondsPerCollection = 3600 + // New max = 1e18 * 3600 = 3600e18 + vm.warp(lastCollectionAt); + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertTrue(exists); + uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + assertEq(newMaxClaim, 3600 ether); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3600 ether); + } + + function test_ReconcileAgreement_CanceledByServiceProvider() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 3700 ether); + + // SP cancels - immediately non-collectable → reconcile deletes + _setAgreementCanceledBySP(agreementId, rca); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertFalse(exists); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_ReconcileAgreement_CanceledByPayer_WindowOpen() public { + uint64 startTime = uint64(block.timestamp); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(startTime + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Payer cancels 2 hours from now, never collected + uint64 acceptedAt = startTime; + uint64 canceledAt = uint64(startTime + 2 hours); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertTrue(exists); + // Window = canceledAt - acceptedAt = 7200s, capped by maxSecondsPerCollection = 3600s + // maxClaim = 1e18 * 3600 + 100e18 (never collected, so includes initial) + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + } + + function test_ReconcileAgreement_CanceledByPayer_WindowExpired() public { + uint64 startTime = uint64(block.timestamp); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(startTime + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Payer cancels, and the collection already happened covering the full window + uint64 acceptedAt = startTime; + uint64 canceledAt = uint64(startTime + 2 hours); + // lastCollectionAt == canceledAt means window is empty + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, canceledAt); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + // collectionEnd = canceledAt, collectionStart = lastCollectionAt = canceledAt + // window is empty -> maxClaim = 0 → deleted + assertFalse(exists); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_ReconcileAgreement_SkipsNotAccepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + + // Mock returns NotAccepted (default state in mock - zero struct) + // reconcile should skip recalculation and preserve the original estimate + + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertTrue(exists); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); + } + + function test_ReconcileAgreement_EmitsEvent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // SP cancels + _setAgreementCanceledBySP(agreementId, rca); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementReconciled(agreementId, 3700 ether, 0); + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId, indexer); + + agreementManager.reconcileAgreement(agreementId); + } + + function test_ReconcileAgreement_NoEmitWhenUnchanged() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as accepted with same parameters - should produce same maxNextClaim + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // maxClaim should remain 3700e18 (never collected, maxSecondsPerCollection < window) + // No event should be emitted + vm.recordLogs(); + agreementManager.reconcileAgreement(agreementId); + + // Check no AgreementReconciled or AgreementRemoved events were emitted + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 reconciledTopic = keccak256("AgreementReconciled(bytes16,uint256,uint256)"); + bytes32 removedTopic = keccak256("AgreementRemoved(bytes16,address)"); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue(logs[i].topics[0] != reconciledTopic, "Unexpected AgreementReconciled event"); + assertTrue(logs[i].topics[0] != removedTopic, "Unexpected AgreementRemoved event"); + } + } + + function test_ReconcileAgreement_ReturnsFalse_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); + + // Returns false (not exists) when agreement not found (idempotent) + bool exists = agreementManager.reconcileAgreement(fakeId); + assertFalse(exists); + } + + function test_ReconcileAgreement_ExpiredAgreement() public { + uint64 endsAt = uint64(block.timestamp + 1 hours); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + endsAt + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as accepted, collected at endsAt (fully expired) + _setAgreementCollected(agreementId, rca, uint64(block.timestamp), endsAt); + vm.warp(endsAt); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + // collectionEnd = endsAt, collectionStart = lastCollectionAt = endsAt + // window empty -> maxClaim = 0 → deleted + assertFalse(exists); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_ReconcileAgreement_ClearsPendingUpdate() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Offer a pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + + // Simulate: agreement accepted and update applied on-chain (updateNonce = 1) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rcau.endsAt, + maxInitialTokens: rcau.maxInitialTokens, + maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, + minSecondsPerCollection: rcau.minSecondsPerCollection, + maxSecondsPerCollection: rcau.maxSecondsPerCollection, + updateNonce: 1, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertTrue(exists); + // Pending should be cleared, maxNextClaim recalculated from new terms + // newMaxClaim = 2e18 * 7200 + 200e18 = 14600e18 (never collected, maxSecondsPerCollection < window) + uint256 newMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), newMaxClaim); + // Required = only new maxClaim (pending cleared) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), newMaxClaim); + } + + function test_ReconcileAgreement_KeepsPendingUpdate_WhenNotYetApplied() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Offer a pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + + // Simulate: agreement accepted but update NOT yet applied (updateNonce = 0) + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertTrue(exists); + // maxNextClaim recalculated from original terms (same value since never collected) + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); + // Pending still present + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + } + + // -- Tests merged from remove (cleanup behavior) -- + + function test_ReconcileAgreement_ReturnsTrue_WhenStillClaimable_Accepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Set as accepted but never collected - still claimable + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + bool exists = agreementManager.reconcileAgreement(agreementId); + assertTrue(exists); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_ReconcileAgreement_DeletesExpiredOffer() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Warp past the RCA deadline (default: block.timestamp + 1 hours in _makeRCA) + vm.warp(block.timestamp + 2 hours); + + // Agreement not accepted + past deadline — should be deleted + bool exists = agreementManager.reconcileAgreement(agreementId); + + assertFalse(exists); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_ReconcileAgreement_ReturnsTrue_WhenStillClaimable_NotAccepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Not accepted yet, before deadline - still potentially claimable + bool exists = agreementManager.reconcileAgreement(agreementId); + assertTrue(exists); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_ReconcileAgreement_ReturnsTrue_WhenCanceledByPayer_WindowStillOpen() public { + uint64 startTime = uint64(block.timestamp); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(startTime + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Payer canceled but window is still open (not yet collected) + uint64 canceledAt = uint64(startTime + 2 hours); + _setAgreementCanceledByPayer(agreementId, rca, startTime, canceledAt, 0); + + // Still claimable: window = canceledAt - acceptedAt = 7200s, capped at 3600s + bool exists = agreementManager.reconcileAgreement(agreementId); + assertTrue(exists); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_ReconcileAgreement_ReducesRequiredEscrow_WithMultipleAgreements() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; // 3700e18 + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; // 14600e18 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1 + maxClaim2); + + // Cancel agreement 1 by SP and reconcile it (deletes) + _setAgreementCanceledBySP(id1, rca1); + bool exists = agreementManager.reconcileAgreement(id1); + assertFalse(exists); + + // Only agreement 2's original maxClaim remains + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + + // Agreement 2 still tracked + assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + } + + function test_ReconcileAgreement_Permissionless() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // SP cancels + _setAgreementCanceledBySP(agreementId, rca); + + // Anyone can reconcile + address anyone = makeAddr("anyone"); + vm.prank(anyone); + bool exists = agreementManager.reconcileAgreement(agreementId); + assertFalse(exists); + + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + function test_ReconcileAgreement_ClearsPendingUpdate_WhenCanceled() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer a pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + + // SP cancels - immediately removable + _setAgreementCanceledBySP(agreementId, rca); + + bool exists = agreementManager.reconcileAgreement(agreementId); + assertFalse(exists); + + // Both original and pending should be cleared from sumMaxNextClaim + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/register.t.sol b/packages/issuance/test/unit/agreement-manager/register.t.sol new file mode 100644 index 000000000..2f97d25ea --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/register.t.sol @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_Offer_SetsAgreementState() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 expectedId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + assertEq(agreementId, expectedId); + // maxNextClaim = maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens + // = 1e18 * 3600 + 100e18 = 3700e18 + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + } + + function test_Offer_FundsEscrow() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + + // Fund with surplus so Full mode stays active (deficit < balance required) + token.mint(address(agreementManager), expectedMaxClaim + 1); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + + // Verify escrow was funded + (uint256 escrowBalance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + escrowBalance, + expectedMaxClaim + ); + } + + function test_Offer_PartialFunding_WhenInsufficientBalance() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + uint256 available = 500 ether; // Less than expectedMaxClaim + + // Fund with less than needed + token.mint(address(agreementManager), available); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + + // Since available < required, Full degrades to OnDemand (deposit target = 0). + // No proactive deposit; JIT beforeCollection is the safety net. + (uint256 escrowBalanceAfter,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + escrowBalanceAfter, + 0 + ); + // Escrow balance is 0 since no deposit was made + assertEq(agreementManager.getEscrowAccount(_collector(), indexer).balance, 0); + } + + function test_Offer_EmitsEvent() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 expectedId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + + token.mint(address(agreementManager), expectedMaxClaim); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementOffered(expectedId, indexer, expectedMaxClaim); + + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + function test_Offer_AuthorizesHash() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + + // The agreement hash should be authorized for the IAgreementOwner callback + bytes32 agreementHash = recurringCollector.hashRCA(rca); + bytes4 result = agreementManager.approveAgreement(agreementHash); + assertEq(result, agreementManager.approveAgreement.selector); + } + + function test_Offer_MultipleAgreements_SameIndexer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + assertTrue(id1 != id2); + assertEq(agreementManager.getProviderAgreementCount(indexer), 2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1 + maxClaim2); + } + + function test_Offer_Revert_WhenPayerMismatch() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.payer = address(0xdead); // Wrong payer + + vm.expectRevert( + abi.encodeWithSelector( + IRecurringAgreementManagement.PayerMustBeManager.selector, + address(0xdead), + address(agreementManager) + ) + ); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + function test_Offer_Revert_WhenAlreadyOffered() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.AgreementAlreadyOffered.selector, agreementId) + ); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + function test_Offer_Revert_WhenNotOperator() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + address nonOperator = makeAddr("nonOperator"); + vm.expectRevert( + abi.encodeWithSelector(IAccessControl.AccessControlUnauthorizedAccount.selector, nonOperator, AGREEMENT_MANAGER_ROLE) + ); + vm.prank(nonOperator); + agreementManager.offerAgreement(rca, _collector()); + } + + function test_Offer_Revert_WhenUnauthorizedCollector() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + address fakeCollector = makeAddr("fakeCollector"); + token.mint(address(agreementManager), 10_000 ether); + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedCollector.selector, fakeCollector) + ); + vm.prank(operator); + agreementManager.offerAgreement(rca, IRecurringCollector(fakeCollector)); + } + + function test_Offer_Revert_WhenPaused() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + // Grant pause role and pause + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + vm.prank(operator); + agreementManager.offerAgreement(rca, _collector()); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/remove.t.sol b/packages/issuance/test/unit/agreement-manager/remove.t.sol new file mode 100644 index 000000000..e21010bfb --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/remove.t.sol @@ -0,0 +1,4 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +// Tests merged into reconcile.t.sol — reconcileAgreement now handles cleanup inline. diff --git a/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol new file mode 100644 index 000000000..2ad9d1bca --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol @@ -0,0 +1,260 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; +import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_RevokeAgreementUpdate_ClearsPendingState() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + + // Offer a pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + + // Revoke the pending update + vm.prank(operator); + bool revoked = agreementManager.revokeAgreementUpdate(agreementId); + assertTrue(revoked); + + // Pending state should be fully cleared + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.pendingUpdateMaxNextClaim, 0, "pending escrow should be zero"); + assertEq(info.pendingUpdateNonce, 0, "pending nonce should be zero"); + assertEq(info.pendingUpdateHash, bytes32(0), "pending hash should be zero"); + + // sumMaxNextClaim should only include the base claim + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + + // The update hash should no longer be authorized + bytes32 updateHash = recurringCollector.hashRCAU(rcau); + bytes4 result = agreementManager.approveAgreement(updateHash); + assertTrue(result != agreementManager.approveAgreement.selector, "hash should not be authorized"); + } + + function test_RevokeAgreementUpdate_EmitsEvent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementUpdateRevoked(agreementId, pendingMaxClaim, 1); + + vm.prank(operator); + agreementManager.revokeAgreementUpdate(agreementId); + } + + function test_RevokeAgreementUpdate_ReturnsFalse_WhenNoPending() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // No pending update — should return false + vm.prank(operator); + bool revoked = agreementManager.revokeAgreementUpdate(agreementId); + assertFalse(revoked); + } + + function test_RevokeAgreementUpdate_ReturnsFalse_WhenAlreadyApplied() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + // Simulate: accepted with update already applied (updateNonce=1) + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rcau.endsAt, + maxInitialTokens: rcau.maxInitialTokens, + maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, + minSecondsPerCollection: rcau.minSecondsPerCollection, + maxSecondsPerCollection: rcau.maxSecondsPerCollection, + updateNonce: 1, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + + // Reconcile inside revokeAgreementUpdate detects the update was applied + // and clears it — returns false (nothing left to revoke) + vm.prank(operator); + bool revoked = agreementManager.revokeAgreementUpdate(agreementId); + assertFalse(revoked); + } + + function test_RevokeAgreementUpdate_CanOfferNewUpdateAfterRevoke() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer update nonce=1 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau1); + + // Revoke it + vm.prank(operator); + agreementManager.revokeAgreementUpdate(agreementId); + + // Offer a new update with the same nonce (1) — should succeed since the + // collector's updateNonce is still 0 and the pending was cleared + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( + agreementId, + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 180 days), + 1 + ); + _offerAgreementUpdate(rcau2); + + // New pending should be set + uint256 newPendingMaxClaim = 0.5 ether * 1800 + 50 ether; + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); + assertEq(info.pendingUpdateMaxNextClaim, newPendingMaxClaim); + assertEq(info.pendingUpdateNonce, 1); + } + + function test_RevokeAgreementUpdate_Revert_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); + + vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotOffered.selector, fakeId)); + vm.prank(operator); + agreementManager.revokeAgreementUpdate(fakeId); + } + + function test_RevokeAgreementUpdate_Revert_WhenNotOperator() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + address nonOperator = makeAddr("nonOperator"); + vm.expectRevert( + abi.encodeWithSelector( + IAccessControl.AccessControlUnauthorizedAccount.selector, + nonOperator, + AGREEMENT_MANAGER_ROLE + ) + ); + vm.prank(nonOperator); + agreementManager.revokeAgreementUpdate(agreementId); + } + + function test_RevokeAgreementUpdate_Revert_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + bytes16 agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + vm.prank(operator); + agreementManager.revokeAgreementUpdate(agreementId); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol b/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol new file mode 100644 index 000000000..71efb325e --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_RevokeOffer_ClearsAgreement() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); + + vm.prank(operator); + bool gone = agreementManager.revokeOffer(agreementId); + assertTrue(gone); + + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + } + + function test_RevokeOffer_InvalidatesHash() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Hash is authorized before revoke + bytes32 rcaHash = recurringCollector.hashRCA(rca); + agreementManager.approveAgreement(rcaHash); // should not revert + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + // Hash should be rejected after revoke (agreement no longer exists) + assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + } + + function test_RevokeOffer_ClearsPendingUpdate() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Offer a pending update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + _offerAgreementUpdate(rcau); + + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + + // Both original and pending should be cleared + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_RevokeOffer_EmitsEvent() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.OfferRevoked(agreementId, indexer); + + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + } + + function test_RevokeOffer_Revert_WhenAlreadyAccepted() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Simulate acceptance in RC + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.AgreementAlreadyAccepted.selector, agreementId) + ); + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + } + + function test_RevokeOffer_ReturnsTrue_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); + vm.prank(operator); + bool gone = agreementManager.revokeOffer(fakeId); + assertTrue(gone); + } + + function test_RevokeOffer_Revert_WhenNotOperator() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + address nonOperator = makeAddr("nonOperator"); + vm.expectRevert( + abi.encodeWithSelector(IAccessControl.AccessControlUnauthorizedAccount.selector, nonOperator, AGREEMENT_MANAGER_ROLE) + ); + vm.prank(nonOperator); + agreementManager.revokeOffer(agreementId); + } + + function test_RevokeOffer_Revert_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + vm.prank(operator); + agreementManager.revokeOffer(agreementId); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/shared.t.sol b/packages/issuance/test/unit/agreement-manager/shared.t.sol new file mode 100644 index 000000000..97056e564 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/shared.t.sol @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; + +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; +import { RecurringAgreementManager } from "../../../contracts/agreement/RecurringAgreementManager.sol"; +import { RecurringAgreementHelper } from "../../../contracts/agreement/RecurringAgreementHelper.sol"; +import { MockGraphToken } from "./mocks/MockGraphToken.sol"; +import { MockPaymentsEscrow } from "./mocks/MockPaymentsEscrow.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; +import { MockSubgraphService } from "./mocks/MockSubgraphService.sol"; + +/// @notice Shared test setup for RecurringAgreementManager tests. +contract RecurringAgreementManagerSharedTest is Test { + // -- Contracts -- + MockGraphToken internal token; + MockPaymentsEscrow internal paymentsEscrow; + MockRecurringCollector internal recurringCollector; + MockSubgraphService internal mockSubgraphService; + RecurringAgreementManager internal agreementManager; + RecurringAgreementHelper internal agreementHelper; + + // -- Accounts -- + address internal governor; + address internal operator; + address internal indexer; + address internal dataService; + + // -- Constants -- + bytes32 internal constant GOVERNOR_ROLE = keccak256("GOVERNOR_ROLE"); + bytes32 internal constant OPERATOR_ROLE = keccak256("OPERATOR_ROLE"); + bytes32 internal constant DATA_SERVICE_ROLE = keccak256("DATA_SERVICE_ROLE"); + bytes32 internal constant COLLECTOR_ROLE = keccak256("COLLECTOR_ROLE"); + bytes32 internal constant AGREEMENT_MANAGER_ROLE = keccak256("AGREEMENT_MANAGER_ROLE"); + + function setUp() public virtual { + governor = makeAddr("governor"); + operator = makeAddr("operator"); + indexer = makeAddr("indexer"); + + // Deploy mocks + token = new MockGraphToken(); + paymentsEscrow = new MockPaymentsEscrow(address(token)); + recurringCollector = new MockRecurringCollector(); + mockSubgraphService = new MockSubgraphService(); + dataService = address(mockSubgraphService); + + // Deploy RecurringAgreementManager behind proxy + RecurringAgreementManager impl = new RecurringAgreementManager( + IGraphToken(address(token)), + IPaymentsEscrow(address(paymentsEscrow)) + ); + bytes memory initData = abi.encodeCall(RecurringAgreementManager.initialize, (governor)); + TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy( + address(impl), + address(this), // proxy admin + initData + ); + agreementManager = RecurringAgreementManager(address(proxy)); + + // Deploy RecurringAgreementHelper pointing at the manager + agreementHelper = new RecurringAgreementHelper(address(agreementManager), token); + + // Grant roles + vm.startPrank(governor); + agreementManager.grantRole(OPERATOR_ROLE, operator); + agreementManager.grantRole(DATA_SERVICE_ROLE, dataService); + agreementManager.grantRole(COLLECTOR_ROLE, address(recurringCollector)); + vm.stopPrank(); + + // Operator grants AGREEMENT_MANAGER_ROLE to itself (OPERATOR_ROLE is its admin) + vm.prank(operator); + agreementManager.grantRole(AGREEMENT_MANAGER_ROLE, operator); + + // Label addresses for trace output + vm.label(address(token), "GraphToken"); + vm.label(address(paymentsEscrow), "PaymentsEscrow"); + vm.label(address(recurringCollector), "RecurringCollector"); + vm.label(address(agreementManager), "RecurringAgreementManager"); + vm.label(address(agreementHelper), "RecurringAgreementHelper"); + vm.label(address(mockSubgraphService), "SubgraphService"); + } + + // -- Helpers -- + + /// @notice Get the default recurring collector as a typed IRecurringCollector + function _collector() internal view returns (IRecurringCollector) { + return IRecurringCollector(address(recurringCollector)); + } + + /// @notice Create a standard RCA with RecurringAgreementManager as payer + function _makeRCA( + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection, + uint64 endsAt + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: endsAt, + payer: address(agreementManager), + dataService: dataService, + serviceProvider: indexer, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: minSecondsPerCollection, + maxSecondsPerCollection: maxSecondsPerCollection, + nonce: 1, + metadata: "" + }); + } + + /// @notice Create a standard RCA and compute its agreementId + function _makeRCAWithId( + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 maxSecondsPerCollection, + uint64 endsAt + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = _makeRCA(maxInitialTokens, maxOngoingTokensPerSecond, 60, maxSecondsPerCollection, endsAt); + agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + } + + /// @notice Offer an RCA via the operator and return the agreementId + function _offerAgreement(IRecurringCollector.RecurringCollectionAgreement memory rca) internal returns (bytes16) { + // Fund RecurringAgreementManager with enough tokens + token.mint(address(agreementManager), 1_000_000 ether); + + vm.prank(operator); + return agreementManager.offerAgreement(rca, _collector()); + } + + /// @notice Create a standard RCAU for an existing agreement + function _makeRCAU( + bytes16 agreementId, + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection, + uint64 endsAt, + uint32 nonce + ) internal pure returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + return + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: 0, // Not used for unsigned path + endsAt: endsAt, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: minSecondsPerCollection, + maxSecondsPerCollection: maxSecondsPerCollection, + nonce: nonce, + metadata: "" + }); + } + + /// @notice Offer an RCAU via the operator + function _offerAgreementUpdate( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau + ) internal returns (bytes16) { + vm.prank(operator); + return agreementManager.offerAgreementUpdate(rcau); + } + + /// @notice Set up a mock agreement in RecurringCollector as Accepted + function _setAgreementAccepted( + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint64 acceptedAt + ) internal { + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: acceptedAt, + lastCollectionAt: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + } + + /// @notice Set up a mock agreement as CanceledByServiceProvider + function _setAgreementCanceledBySP( + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal { + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: uint64(block.timestamp), + lastCollectionAt: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: uint64(block.timestamp), + state: IRecurringCollector.AgreementState.CanceledByServiceProvider + }) + ); + } + + /// @notice Set up a mock agreement as CanceledByPayer + function _setAgreementCanceledByPayer( + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint64 acceptedAt, + uint64 canceledAt, + uint64 lastCollectionAt + ) internal { + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: acceptedAt, + lastCollectionAt: lastCollectionAt, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: canceledAt, + state: IRecurringCollector.AgreementState.CanceledByPayer + }) + ); + } + + /// @notice Set up a mock agreement as having been collected + function _setAgreementCollected( + bytes16 agreementId, + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint64 acceptedAt, + uint64 lastCollectionAt + ) internal { + recurringCollector.setAgreement( + agreementId, + IRecurringCollector.AgreementData({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: acceptedAt, + lastCollectionAt: lastCollectionAt, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + updateNonce: 0, + canceledAt: 0, + state: IRecurringCollector.AgreementState.Accepted + }) + ); + } +} diff --git a/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol b/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol new file mode 100644 index 000000000..f454f0080 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol @@ -0,0 +1,742 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; + +contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== Basic Thaw / Withdraw ==================== + + function test_UpdateEscrow_ThawsExcessWhenNoAgreements() public { + // Create agreement, fund escrow, then reconcile it + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Verify escrow was funded + (uint256 fundedBalance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + fundedBalance, + maxClaim + ); + + // SP cancels — reconcileAgreement triggers escrow update, thawing the full balance + _setAgreementCanceledBySP(agreementId, rca); + + agreementManager.reconcileAgreement(agreementId); + + assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + + // balance should now be fully thawing + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance - account.tokensThawing, 0); + } + + function test_UpdateEscrow_WithdrawsCompletedThaw() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // SP cancels and reconcile (triggers thaw) + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + // Fast forward past thawing period (1 day in mock) + vm.warp(block.timestamp + 1 days + 1); + + uint256 agreementManagerBalanceBefore = token.balanceOf(address(agreementManager)); + + // reconcileCollectorProvider: withdraw + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Tokens should be back in RecurringAgreementManager + uint256 agreementManagerBalanceAfter = token.balanceOf(address(agreementManager)); + assertEq(agreementManagerBalanceAfter - agreementManagerBalanceBefore, maxClaim); + } + + function test_UpdateEscrow_NoopWhenNoBalance() public { + // No agreements, no balance — should succeed silently + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + } + + function test_UpdateEscrow_NoopWhenStillThawing() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // SP cancels and reconcile (triggers thaw) + _setAgreementCanceledBySP(agreementId, rca); + agreementManager.reconcileAgreement(agreementId); + + // Subsequent call before thaw complete: no-op (thaw in progress, amount is correct) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Balance should still be fully thawing + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.balance - account.tokensThawing, 0); + } + + function test_UpdateEscrow_Permissionless() public { + // Anyone can call reconcileCollectorProvider + address anyone = makeAddr("anyone"); + vm.prank(anyone); + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + } + + // ==================== Excess Thawing With Active Agreements ==================== + + function test_UpdateEscrow_ThawsExcessWithActiveAgreements() public { + // Offer agreement, accept, then reconcile down — excess should be thawed + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Accept and simulate a collection (reduces maxNextClaim) + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + uint64 collectionTime = uint64(block.timestamp + 1800); + _setAgreementCollected(agreementId, rca, uint64(block.timestamp), collectionTime); + vm.warp(collectionTime); + + // Reconcile — should reduce required escrow + agreementManager.reconcileAgreement(agreementId); + uint256 newRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + assertTrue(newRequired < maxClaim, "Required should have decreased"); + + // Escrow balance is still maxClaim — excess exists + // The reconcileAgreement call already invoked _updateEscrow which thawed the excess + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + uint256 expectedExcess = maxClaim - newRequired; + assertEq(account.tokensThawing, expectedExcess, "Excess should be thawing"); + + // Liquid balance should equal required + uint256 liquid = account.balance - account.tokensThawing; + assertEq(liquid, newRequired, "Liquid balance should equal required"); + } + + // ==================== Partial Cancel ==================== + + function test_OfferAgreement_PartialCancelPreservesThawTimer() public { + // Setup: two agreements, reconcile one down to create excess, thaw it + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaimEach = 1 ether * 3600 + 100 ether; + + // SP cancels agreement 1, reconcile to 0 (triggers thaw of excess) + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(id1); + + // Verify excess is thawing + IPaymentsEscrow.EscrowAccount memory accountBefore; + (accountBefore.balance, accountBefore.tokensThawing, accountBefore.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(accountBefore.tokensThawing, maxClaimEach, "Excess should be thawing"); + uint256 thawEndBefore = accountBefore.thawEndTimestamp; + assertTrue(0 < thawEndBefore, "Thaw should be in progress"); + + // Now offer a small new agreement — should partial-cancel, NOT restart timer + IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCA( + 10 ether, + 0.1 ether, + 60, + 1800, + uint64(block.timestamp + 180 days) + ); + rca3.nonce = 3; + _offerAgreement(rca3); + + uint256 maxClaim3 = 0.1 ether * 1800 + 10 ether; + + // Check that thaw was partially canceled (not fully canceled) + IPaymentsEscrow.EscrowAccount memory accountAfter; + (accountAfter.balance, accountAfter.tokensThawing, accountAfter.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + // New required = maxClaimEach + maxClaim3 + // Excess = 2*maxClaimEach - (maxClaimEach + maxClaim3) = maxClaimEach - maxClaim3 + uint256 expectedThawing = maxClaimEach - maxClaim3; + assertEq(accountAfter.tokensThawing, expectedThawing, "Thaw should be partially canceled"); + + // Timer should be preserved (not reset) + assertEq(accountAfter.thawEndTimestamp, thawEndBefore, "Thaw timer should be preserved"); + + // Liquid balance should cover new required + uint256 newRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 liquid = accountAfter.balance - accountAfter.tokensThawing; + assertEq(liquid, newRequired, "Liquid should cover required"); + } + + function test_UpdateEscrow_FullCancelWhenDeficit() public { + // Setup: agreement funded, then increase required beyond balance + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 id1 = _offerAgreement(rca1); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + + // SP cancels, reconcile to 0 (triggers thaw of all excess) + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(id1); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, maxClaim1, "All should be thawing"); + + // Now offer a new agreement larger than what's in escrow + // This will make balance < required, so all thawing should be canceled + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 500 ether, + 5 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + _offerAgreement(rca2); + + // Thaw should have been fully canceled + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq(account.tokensThawing, 0, "Thaw should be fully canceled for deficit"); + } + + function test_UpdateEscrow_SkipsThawIncreaseToPreserveTimer() public { + // Setup: two agreements, thaw excess from removing first + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + uint256 maxClaimEach = 1 ether * 3600 + 100 ether; + + // Reconcile agreement 1 to create excess (triggers thaw) + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(id1); + + IPaymentsEscrow.EscrowAccount memory accountBefore; + (accountBefore.balance, accountBefore.tokensThawing, accountBefore.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(accountBefore.tokensThawing, maxClaimEach); + uint256 thawEndBefore = accountBefore.thawEndTimestamp; + + // Advance time halfway through thawing + vm.warp(block.timestamp + 12 hours); + + // Reconcile agreement 2 — excess grows to 2*maxClaimEach + // Uses evenIfTimerReset=false internally, so thaw increase is skipped + bytes16 id2 = bytes16( + recurringCollector.generateAgreementId( + rca2.payer, + rca2.dataService, + rca2.serviceProvider, + rca2.deadline, + rca2.nonce + ) + ); + _setAgreementCanceledBySP(id2, rca2); + agreementManager.reconcileAgreement(id2); + agreementManager.reconcileAgreement(id2); + + IPaymentsEscrow.EscrowAccount memory accountAfter; + (accountAfter.balance, accountAfter.tokensThawing, accountAfter.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + // Timer preserved — thaw increase was skipped to avoid resetting it + assertEq(accountAfter.thawEndTimestamp, thawEndBefore, "Thaw timer should be preserved"); + // Thaw amount stays at original (increase skipped) + assertEq(accountAfter.tokensThawing, maxClaimEach, "Thaw should stay at original amount"); + } + + // ==================== Data-driven: _updateEscrow combinations ==================== + // + // Tests all (escrowBasis, accountState) combinations via a helper that: + // 1. Sets escrowBasis (controls min/max) + // 2. Overrides mock escrow to desired (balance, tokensThawing, thawReady) + // 3. Calls reconcileCollectorProvider + // 4. Asserts expected (balance, tokensThawing) + // + // Desired behavior (the 4 objectives): + // Obj 1: liquid stays in [min, max] + // Obj 2: withdraw excess above min if thaw completed + // Obj 3: never increase thaw amount (would reset timer) + // Obj 4: minimize transactions — no needless deposit/thaw/cancel + + function _check( + IRecurringEscrowManagement.EscrowBasis basis, + uint256 bal, + uint256 thawing, + bool ready, + uint256 expBal, + uint256 expThaw, + string memory label + ) internal { + uint256 snap = vm.snapshot(); + + vm.prank(operator); + agreementManager.setEscrowBasis(basis); + + paymentsEscrow.setAccount( + address(agreementManager), + address(recurringCollector), + indexer, + bal, + thawing, + ready ? block.timestamp - 1 : (0 < thawing ? block.timestamp + 1 days : 0) + ); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory r; + (r.balance, r.tokensThawing, r.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(r.balance, expBal, string.concat(label, ": balance")); + assertEq(r.tokensThawing, expThaw, string.concat(label, ": thawing")); + + assertTrue(vm.revertTo(snap)); + } + + /// @dev Like _check but sets thawEndTimestamp to an exact value (for boundary testing) + function _checkAtTimestamp( + IRecurringEscrowManagement.EscrowBasis basis, + uint256 bal, + uint256 thawing, + uint256 thawEndTimestamp, + uint256 expBal, + uint256 expThaw, + string memory label + ) internal { + uint256 snap = vm.snapshot(); + + vm.prank(operator); + agreementManager.setEscrowBasis(basis); + + paymentsEscrow.setAccount( + address(agreementManager), + address(recurringCollector), + indexer, + bal, + thawing, + thawEndTimestamp + ); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory r; + (r.balance, r.tokensThawing, r.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(r.balance, expBal, string.concat(label, ": balance")); + assertEq(r.tokensThawing, expThaw, string.concat(label, ": thawing")); + + assertTrue(vm.revertTo(snap)); + } + + function test_UpdateEscrow_Combinations() public { + // S = sumMaxNextClaim, established by offering one agreement in Full mode. + // After offer: escrow balance = S, manager minted 1M in setUp. + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _offerAgreement(rca); + uint256 S = 1 ether * 3600 + 100 ether; // 3700 ether + + // Ensure mock has enough ERC20 for large-balance test cases + token.mint(address(paymentsEscrow), 10 * S); + // Ensure 1 < block.timestamp so "thawReady" timestamps are non-zero + vm.warp(100); + + // ── Full mode: min = S, max = S ───────────────────────────────── + IRecurringEscrowManagement.EscrowBasis F = IRecurringEscrowManagement.EscrowBasis.Full; + + // basis bal thaw ready expBal expThaw + _check(F, S, 0, false, S, 0, "F1:balanced"); + _check(F, 2 * S, 0, false, 2 * S, S, "F2:excess->thaw"); + _check(F, S / 2, 0, false, S, 0, "F3:deficit->deposit"); + _check(F, 0, 0, false, S, 0, "F4:empty->deposit"); + _check(F, 2 * S, S, false, 2 * S, S, "F5:thaw,liquid=min->leave"); + _check(F, 2 * S, (S * 3) / 2, false, 2 * S, S, "F6:thaw,liquidcancel-to-min"); + _check(F, 2 * S, S, true, S, 0, "F7:ready,liquid=min->withdraw"); + _check(F, S, S, true, S, 0, "F8:ready,liquid=0->cancel-all"); + _check(F, S, S, false, S, 0, "F9:thaw,liquid=0->cancel-all"); + + // ── OnDemand mode: min = 0, max = S ───────────────────────────── + IRecurringEscrowManagement.EscrowBasis O = IRecurringEscrowManagement.EscrowBasis.OnDemand; + + _check(O, S, 0, false, S, 0, "O1:balanced"); + _check(O, 2 * S, 0, false, 2 * S, S, "O2:excess->thaw"); + _check(O, S / 2, 0, false, S / 2, 0, "O3:no-deposit(min=0)"); + _check(O, 0, 0, false, 0, 0, "O4:empty,no-op"); + _check(O, 2 * S, S, false, 2 * S, S, "O5:thaw,liquid>=min->leave"); + _check(O, 2 * S, (S * 3) / 2, false, 2 * S, (S * 3) / 2, "O6:thaw,liquid>=min->LEAVE(key)"); + _check(O, 2 * S, S, true, S, 0, "O7:ready->withdraw"); + _check(O, S, S, true, 0, 0, "O8:ready,all-thaw->withdraw-all"); + _check(O, S, S, false, S, S, "O9:thaw,liquid=0>=min->leave"); + + // ── JIT mode: min = 0, max = 0 ────────────────────────────────── + IRecurringEscrowManagement.EscrowBasis J = IRecurringEscrowManagement.EscrowBasis.JustInTime; + + _check(J, S, 0, false, S, S, "J1:thaw-all(max=0)"); + _check(J, 0, 0, false, 0, 0, "J2:empty,no-op"); + _check(J, 2 * S, S, false, 2 * S, 2 * S, "J3:same-block->increase-ok"); + _check(J, S, S, true, 0, 0, "J4:ready->withdraw-all"); + _check(J, 2 * S, S, true, S, S, "J5:ready->withdraw,thaw-rest"); + + // ── Boundary: thawEndTimestamp == block.timestamp should NOT withdraw ── + // PaymentsEscrow requires block.timestamp > thawEnd (strict); at the + // exact boundary the thaw has not yet completed. + _checkAtTimestamp(F, 2 * S, S, block.timestamp, 2 * S, S, "B1:boundary-full->no-withdraw"); + _checkAtTimestamp(O, 2 * S, S, block.timestamp, 2 * S, S, "B2:boundary-ondemand->no-withdraw"); + _checkAtTimestamp(J, S, S, block.timestamp, S, S, "B3:boundary-jit->no-withdraw"); + } + + // ==================== Cross-Indexer Isolation ==================== + + function test_UpdateEscrow_CrossIndexerIsolation() public { + address indexer2 = makeAddr("indexer2"); + + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 365 days) + ); + rca2.serviceProvider = indexer2; + rca2.nonce = 2; + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; + + // Reconcile indexer1's agreement (triggers thaw) + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + IPaymentsEscrow.EscrowAccount memory acct1; + (acct1.balance, acct1.tokensThawing, acct1.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(acct1.balance - acct1.tokensThawing, 0); + + // Indexer2 escrow should be unaffected + (uint256 indexer2Balance,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer2); + assertEq( + indexer2Balance, + maxClaim2 + ); + + // reconcileCollectorProvider on indexer2 should be a no-op (balance == required) + agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + (uint256 indexer2BalanceAfter,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer2); + assertEq( + indexer2BalanceAfter, + maxClaim2 + ); + } + + // ==================== NoopWhenBalanced ==================== + + function test_UpdateEscrow_NoopWhenBalanced() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Balance should exactly match required — no excess, no deficit + (uint256 balanceBefore,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + balanceBefore, + maxClaim + ); + + // reconcileCollectorProvider should be a no-op + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // Nothing changed + (uint256 balanceAfter,,) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq( + balanceAfter, + maxClaim + ); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, 0, "No thawing should occur"); + } + + // ==================== Automatic Thaw on Reconcile ==================== + + function test_Reconcile_AutomaticallyThawsExcess() public { + // Reconcile calls _updateEscrow, which should thaw excess automatically + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Accept and simulate a collection + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + uint64 collectionTime = uint64(block.timestamp + 1800); + _setAgreementCollected(agreementId, rca, uint64(block.timestamp), collectionTime); + vm.warp(collectionTime); + + // Reconcile — triggers _updateEscrow internally + agreementManager.reconcileAgreement(agreementId); + + // Excess should already be thawing + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + uint256 newRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 expectedExcess = maxClaim - newRequired; + assertEq(account.tokensThawing, expectedExcess, "Excess should auto-thaw after reconcile"); + } + + // ==================== Withdraw guard: compare against liquid, not total ==================== + + function test_UpdateEscrow_WithdrawsPartialWhenLiquidCoversMin() public { + // Two agreements: keep the big one, reconcile the small one. + // After thaw completes, min <= liquid (= big max claim) -> withdraw proceeds. + // Only the small agreement's tokens leave escrow; min stays behind. + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca1.nonce = 1; + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + + _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; // 3700 ether + uint256 maxClaim2 = 0.5 ether * 1800 + 50 ether; // 950 ether + + // Cancel and reconcile rca2 -> excess (950) thawed, rca1 remains + _setAgreementCanceledBySP(id2, rca2); + agreementManager.reconcileAgreement(id2); + + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, maxClaim2, "Excess from rca2 should be thawing"); + assertEq(account.balance - account.tokensThawing, maxClaim1, "Liquid should cover rca1"); + + // Wait for thaw to complete + vm.warp(block.timestamp + 1 days + 1); + + // Expect the withdraw event for the thawed amount + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim2); + + agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + + // After withdraw: only rca1's required amount remains, nothing thawing + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq(account.balance, maxClaim1, "Balance should equal remaining min"); + assertEq(account.tokensThawing, 0, "Nothing should be thawing after withdraw"); + } + + function test_UpdateEscrow_PartialCancelAndWithdrawInOneCall() public { + // Scenario: all tokens thawing and ready, offer a smaller replacement. + // _updateEscrow partial-cancels thaw (to balance - min), then withdraws the + // reduced amount in a single call. No round-trip: balance ends at min, no redeposit. + + (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 id1 = _offerAgreement(rca1); + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; // 3700 ether + + // Reconcile -> full thaw + _setAgreementCanceledBySP(id1, rca1); + agreementManager.reconcileAgreement(id1); + + // Verify: entire balance is thawing, liquid = 0 + IPaymentsEscrow.EscrowAccount memory account; + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(account.tokensThawing, maxClaim1, "All should be thawing"); + assertEq(account.balance - account.tokensThawing, 0, "Liquid should be zero"); + + // Wait for thaw to complete + vm.warp(block.timestamp + 1 days + 1); + + // Offer smaller replacement -> _updateEscrow fires + // Partial-cancels thaw (3700 -> 2750), then withdraws 2750. Balance = 950 = min. + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 50 ether, + 0.5 ether, + 60, + 1800, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + uint256 maxClaim2 = 0.5 ether * 1800 + 50 ether; // 950 ether + + _offerAgreement(rca2); + + (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts(address(agreementManager), address(recurringCollector), indexer); + assertEq(account.balance, maxClaim2, "Balance should equal min after partial-cancel + withdraw"); + assertEq(account.tokensThawing, 0, "Nothing thawing after withdraw"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/allocator/construction.t.sol b/packages/issuance/test/unit/allocator/construction.t.sol index 7df34bc42..552863397 100644 --- a/packages/issuance/test/unit/allocator/construction.t.sol +++ b/packages/issuance/test/unit/allocator/construction.t.sol @@ -1,10 +1,11 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { BaseUpgradeable } from "../../../contracts/common/BaseUpgradeable.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { IssuanceAllocator } from "../../../contracts/allocate/IssuanceAllocator.sol"; import { IssuanceAllocatorSharedTest } from "./shared.t.sol"; @@ -14,11 +15,11 @@ contract IssuanceAllocatorConstructionTest is IssuanceAllocatorSharedTest { function test_Revert_ZeroGraphTokenAddress() public { vm.expectRevert(BaseUpgradeable.GraphTokenCannotBeZeroAddress.selector); - new IssuanceAllocator(address(0)); + new IssuanceAllocator(IGraphToken(address(0))); } function test_Revert_ZeroGovernorAddress() public { - IssuanceAllocator impl = new IssuanceAllocator(address(token)); + IssuanceAllocator impl = new IssuanceAllocator(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(IssuanceAllocator.initialize, (address(0))); vm.expectRevert(BaseUpgradeable.GovernorCannotBeZeroAddress.selector); new TransparentUpgradeableProxy(address(impl), address(this), initData); diff --git a/packages/issuance/test/unit/allocator/defensiveChecks.t.sol b/packages/issuance/test/unit/allocator/defensiveChecks.t.sol index 2ba79fc21..f8f3f0a41 100644 --- a/packages/issuance/test/unit/allocator/defensiveChecks.t.sol +++ b/packages/issuance/test/unit/allocator/defensiveChecks.t.sol @@ -1,11 +1,12 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { IssuanceAllocator } from "../../../contracts/allocate/IssuanceAllocator.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { IssuanceAllocatorTestHarness } from "../../../contracts/test/allocate/IssuanceAllocatorTestHarness.sol"; import { MockGraphToken } from "../mocks/MockGraphToken.sol"; @@ -17,7 +18,7 @@ contract IssuanceAllocatorDefensiveChecksTest is Test { function setUp() public { MockGraphToken token = new MockGraphToken(); - IssuanceAllocatorTestHarness impl = new IssuanceAllocatorTestHarness(address(token)); + IssuanceAllocatorTestHarness impl = new IssuanceAllocatorTestHarness(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(IssuanceAllocator.initialize, (address(this))); TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(this), initData); harness = IssuanceAllocatorTestHarness(address(proxy)); diff --git a/packages/issuance/test/unit/allocator/distribution.t.sol b/packages/issuance/test/unit/allocator/distribution.t.sol index 466f013d5..fb94737de 100644 --- a/packages/issuance/test/unit/allocator/distribution.t.sol +++ b/packages/issuance/test/unit/allocator/distribution.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; diff --git a/packages/issuance/test/unit/allocator/distributionAccounting.t.sol b/packages/issuance/test/unit/allocator/distributionAccounting.t.sol index 30638a0e4..ae40b10f7 100644 --- a/packages/issuance/test/unit/allocator/distributionAccounting.t.sol +++ b/packages/issuance/test/unit/allocator/distributionAccounting.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; import { Allocation } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceAllocatorTypes.sol"; diff --git a/packages/issuance/test/unit/allocator/interfaceIdStability.t.sol b/packages/issuance/test/unit/allocator/interfaceIdStability.t.sol index b7b8a4d42..463416bbd 100644 --- a/packages/issuance/test/unit/allocator/interfaceIdStability.t.sol +++ b/packages/issuance/test/unit/allocator/interfaceIdStability.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; diff --git a/packages/issuance/test/unit/allocator/shared.t.sol b/packages/issuance/test/unit/allocator/shared.t.sol index e1cc41100..5be20cc33 100644 --- a/packages/issuance/test/unit/allocator/shared.t.sol +++ b/packages/issuance/test/unit/allocator/shared.t.sol @@ -1,11 +1,12 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { IssuanceAllocator } from "../../../contracts/allocate/IssuanceAllocator.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { MockGraphToken } from "../mocks/MockGraphToken.sol"; import { MockSimpleTarget } from "../../../contracts/test/allocate/MockSimpleTarget.sol"; import { MockNotificationTracker } from "../../../contracts/test/allocate/MockNotificationTracker.sol"; @@ -51,7 +52,7 @@ contract IssuanceAllocatorSharedTest is Test { token = new MockGraphToken(); // Deploy IssuanceAllocator behind proxy - IssuanceAllocator impl = new IssuanceAllocator(address(token)); + IssuanceAllocator impl = new IssuanceAllocator(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(IssuanceAllocator.initialize, (governor)); TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(this), initData); allocator = IssuanceAllocator(address(proxy)); diff --git a/packages/issuance/test/unit/allocator/targetManagement.t.sol b/packages/issuance/test/unit/allocator/targetManagement.t.sol index bf1229c93..111621715 100644 --- a/packages/issuance/test/unit/allocator/targetManagement.t.sol +++ b/packages/issuance/test/unit/allocator/targetManagement.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; import { diff --git a/packages/issuance/test/unit/direct-allocation/DirectAllocation.t.sol b/packages/issuance/test/unit/direct-allocation/DirectAllocation.t.sol index dab61dc44..112126a38 100644 --- a/packages/issuance/test/unit/direct-allocation/DirectAllocation.t.sol +++ b/packages/issuance/test/unit/direct-allocation/DirectAllocation.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; @@ -12,6 +12,7 @@ import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/al import { ISendTokens } from "@graphprotocol/interfaces/contracts/issuance/allocate/ISendTokens.sol"; import { BaseUpgradeable } from "../../../contracts/common/BaseUpgradeable.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { DirectAllocation } from "../../../contracts/allocate/DirectAllocation.sol"; import { MockGraphToken } from "../mocks/MockGraphToken.sol"; @@ -39,7 +40,7 @@ contract DirectAllocationTest is Test { token = new MockGraphToken(); - DirectAllocation impl = new DirectAllocation(address(token)); + DirectAllocation impl = new DirectAllocation(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(DirectAllocation.initialize, (governor)); TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(this), initData); directAlloc = DirectAllocation(address(proxy)); @@ -52,11 +53,11 @@ contract DirectAllocationTest is Test { function test_Revert_ZeroGraphTokenAddress() public { vm.expectRevert(BaseUpgradeable.GraphTokenCannotBeZeroAddress.selector); - new DirectAllocation(address(0)); + new DirectAllocation(IGraphToken(address(0))); } function test_Revert_ZeroGovernorAddress() public { - DirectAllocation impl = new DirectAllocation(address(token)); + DirectAllocation impl = new DirectAllocation(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(DirectAllocation.initialize, (address(0))); vm.expectRevert(BaseUpgradeable.GovernorCannotBeZeroAddress.selector); new TransparentUpgradeableProxy(address(impl), address(this), initData); @@ -178,7 +179,7 @@ contract DirectAllocationTest is Test { function test_Revert_SendTokens_TransferReturnsFalse() public { // Deploy DirectAllocation with a mock token that returns false on transfer MockFalseTransferToken falseToken = new MockFalseTransferToken(); - DirectAllocation impl2 = new DirectAllocation(address(falseToken)); + DirectAllocation impl2 = new DirectAllocation(IGraphToken(address(falseToken))); bytes memory initData2 = abi.encodeCall(DirectAllocation.initialize, (governor)); TransparentUpgradeableProxy proxy2 = new TransparentUpgradeableProxy(address(impl2), address(this), initData2); DirectAllocation da2 = DirectAllocation(address(proxy2)); diff --git a/packages/issuance/test/unit/eligibility/accessControl.t.sol b/packages/issuance/test/unit/eligibility/accessControl.t.sol index f1e9d15db..3f0a3dd56 100644 --- a/packages/issuance/test/unit/eligibility/accessControl.t.sol +++ b/packages/issuance/test/unit/eligibility/accessControl.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { RewardsEligibilityOracleSharedTest } from "./shared.t.sol"; diff --git a/packages/issuance/test/unit/eligibility/construction.t.sol b/packages/issuance/test/unit/eligibility/construction.t.sol index f623baee2..d63964c5b 100644 --- a/packages/issuance/test/unit/eligibility/construction.t.sol +++ b/packages/issuance/test/unit/eligibility/construction.t.sol @@ -1,10 +1,11 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/Initializable.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { BaseUpgradeable } from "../../../contracts/common/BaseUpgradeable.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { RewardsEligibilityOracle } from "../../../contracts/eligibility/RewardsEligibilityOracle.sol"; import { RewardsEligibilityOracleSharedTest } from "./shared.t.sol"; @@ -16,11 +17,11 @@ contract RewardsEligibilityOracleConstructionTest is RewardsEligibilityOracleSha function test_Revert_ZeroGraphTokenAddress() public { vm.expectRevert(BaseUpgradeable.GraphTokenCannotBeZeroAddress.selector); - new RewardsEligibilityOracle(address(0)); + new RewardsEligibilityOracle(IGraphToken(address(0))); } function test_Revert_ZeroGovernorAddress() public { - RewardsEligibilityOracle impl = new RewardsEligibilityOracle(address(token)); + RewardsEligibilityOracle impl = new RewardsEligibilityOracle(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(RewardsEligibilityOracle.initialize, (address(0))); vm.expectRevert(BaseUpgradeable.GovernorCannotBeZeroAddress.selector); diff --git a/packages/issuance/test/unit/eligibility/eligibility.t.sol b/packages/issuance/test/unit/eligibility/eligibility.t.sol index 5ceb13fbe..aaa74e0c6 100644 --- a/packages/issuance/test/unit/eligibility/eligibility.t.sol +++ b/packages/issuance/test/unit/eligibility/eligibility.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { RewardsEligibilityOracleSharedTest } from "./shared.t.sol"; diff --git a/packages/issuance/test/unit/eligibility/helper.t.sol b/packages/issuance/test/unit/eligibility/helper.t.sol new file mode 100644 index 000000000..51d40980f --- /dev/null +++ b/packages/issuance/test/unit/eligibility/helper.t.sol @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { RewardsEligibilityHelper } from "../../../contracts/eligibility/RewardsEligibilityHelper.sol"; + +import { RewardsEligibilityOracleSharedTest } from "./shared.t.sol"; + +/// @notice Tests for the stateless RewardsEligibilityHelper contract. +contract RewardsEligibilityHelperTest is RewardsEligibilityOracleSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + RewardsEligibilityHelper internal helper; + + function setUp() public override { + super.setUp(); + _setupOracleRole(); + helper = new RewardsEligibilityHelper(address(oracle)); + vm.label(address(helper), "RewardsEligibilityHelper"); + } + + // ==================== Constructor ==================== + + function test_Constructor_SetsOracle() public view { + assertEq(helper.ORACLE(), address(oracle)); + } + + function test_Constructor_Revert_ZeroAddress() public { + vm.expectRevert(RewardsEligibilityHelper.ZeroAddress.selector); + new RewardsEligibilityHelper(address(0)); + } + + // ==================== Batch by Address List ==================== + + function test_RemoveExpiredIndexers_List_AllExpired() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + address[] memory indexers = new address[](2); + indexers[0] = indexer1; + indexers[1] = indexer2; + + uint256 gone = helper.removeExpiredIndexers(indexers); + assertEq(gone, 2); + assertEq(oracle.getIndexerCount(), 0); + } + + function test_RemoveExpiredIndexers_List_MixedExpiry() public { + _renewEligibility(indexer1); + + // Advance time, then renew indexer2 (so only indexer1 is expired) + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + _renewEligibility(indexer2); + + address[] memory indexers = new address[](2); + indexers[0] = indexer1; + indexers[1] = indexer2; + + uint256 gone = helper.removeExpiredIndexers(indexers); + // indexer1 removed (gone), indexer2 still tracked (not expired) + assertEq(gone, 1); + assertEq(oracle.getIndexerCount(), 1); + } + + function test_RemoveExpiredIndexers_List_IncludesUntracked() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + address untracked = makeAddr("untracked"); + address[] memory indexers = new address[](2); + indexers[0] = indexer1; + indexers[1] = untracked; + + // Both are now absent — indexer1 removed, untracked was never there + uint256 gone = helper.removeExpiredIndexers(indexers); + assertEq(gone, 2); + } + + function test_RemoveExpiredIndexers_List_Empty() public { + address[] memory indexers = new address[](0); + uint256 gone = helper.removeExpiredIndexers(indexers); + assertEq(gone, 0); + } + + // ==================== Batch All ==================== + + function test_RemoveExpiredIndexers_All_AllExpired() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + uint256 gone = helper.removeExpiredIndexers(); + assertEq(gone, 2); + assertEq(oracle.getIndexerCount(), 0); + } + + function test_RemoveExpiredIndexers_All_MixedExpiry() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + _renewEligibility(indexer2); + + uint256 gone = helper.removeExpiredIndexers(); + assertEq(gone, 1); + assertEq(oracle.getIndexerCount(), 1); + } + + function test_RemoveExpiredIndexers_All_NoneTracked() public { + uint256 gone = helper.removeExpiredIndexers(); + assertEq(gone, 0); + } + + // ==================== Batch by Paginated Scan ==================== + + function test_RemoveExpiredIndexers_Scan_AllExpired() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + uint256 gone = helper.removeExpiredIndexers(0, 10); + assertEq(gone, 2); + assertEq(oracle.getIndexerCount(), 0); + } + + function test_RemoveExpiredIndexers_Scan_MixedExpiry() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + _renewEligibility(indexer2); + + // Both are tracked, but only indexer1 is expired + uint256 gone = helper.removeExpiredIndexers(0, 10); + assertEq(gone, 1); + assertEq(oracle.getIndexerCount(), 1); + } + + function test_RemoveExpiredIndexers_Scan_OffsetPastEnd() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + uint256 gone = helper.removeExpiredIndexers(100, 10); + assertEq(gone, 0); + // indexer1 still tracked — scan didn't reach it + assertEq(oracle.getIndexerCount(), 1); + } + + function test_RemoveExpiredIndexers_Scan_PartialPage() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + // Only process first indexer + uint256 gone = helper.removeExpiredIndexers(0, 1); + assertEq(gone, 1); + assertEq(oracle.getIndexerCount(), 1); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/eligibility/indexerManagement.t.sol b/packages/issuance/test/unit/eligibility/indexerManagement.t.sol index 1411d97c9..bffb14e60 100644 --- a/packages/issuance/test/unit/eligibility/indexerManagement.t.sol +++ b/packages/issuance/test/unit/eligibility/indexerManagement.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IRewardsEligibilityEvents } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol"; diff --git a/packages/issuance/test/unit/eligibility/indexerTracking.t.sol b/packages/issuance/test/unit/eligibility/indexerTracking.t.sol new file mode 100644 index 000000000..2599310ad --- /dev/null +++ b/packages/issuance/test/unit/eligibility/indexerTracking.t.sol @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IRewardsEligibilityEvents } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityEvents.sol"; + +import { RewardsEligibilityOracleSharedTest } from "./shared.t.sol"; + +/// @notice Tests for enumerable indexer tracking and staleness-based cleanup. +contract RewardsEligibilityOracleIndexerTrackingTest is RewardsEligibilityOracleSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function setUp() public override { + super.setUp(); + _setupOracleRole(); + } + + // ==================== Tracking on Renewal ==================== + + function test_Renewal_AddsToTrackedSet() public { + assertEq(oracle.getIndexerCount(), 0); + + _renewEligibility(indexer1); + + assertEq(oracle.getIndexerCount(), 1); + address[] memory indexers = oracle.getIndexers(); + assertEq(indexers.length, 1); + assertEq(indexers[0], indexer1); + } + + function test_Renewal_SecondIndexerIncreasesCount() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + assertEq(oracle.getIndexerCount(), 2); + address[] memory indexers = oracle.getIndexers(); + assertEq(indexers.length, 2); + } + + function test_Renewal_SameIndexerNoDuplicate() public { + _renewEligibility(indexer1); + assertEq(oracle.getIndexerCount(), 1); + + // Advance time so renewal actually updates timestamp + vm.warp(block.timestamp + 1); + _renewEligibility(indexer1); + + assertEq(oracle.getIndexerCount(), 1); + } + + function test_Renewal_EmitsTrackingEvent_OnlyFirstTime() public { + // First renewal — expect tracking event + address[] memory indexers = new address[](1); + indexers[0] = indexer1; + + vm.expectEmit(address(oracle)); + emit IRewardsEligibilityEvents.IndexerTrackingUpdated(indexer1, true); + + vm.prank(oracleAccount); + oracle.renewIndexerEligibility(indexers, ""); + + // Second renewal (new block) — no tracking event, only renewal event + vm.warp(block.timestamp + 1); + + vm.recordLogs(); + vm.prank(oracleAccount); + oracle.renewIndexerEligibility(indexers, ""); + + // Check that no IndexerTrackingUpdated was emitted + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 trackingSig = keccak256("IndexerTrackingUpdated(address,bool)"); + for (uint256 i = 0; i < logs.length; ++i) { + assertTrue(logs[i].topics[0] != trackingSig, "unexpected IndexerTrackingUpdated event"); + } + } + + // ==================== Pagination ==================== + + function test_GetIndexers_Paginated() public { + _renewEligibility(indexer1); + _renewEligibility(indexer2); + + address[] memory all = oracle.getIndexers(); + assertEq(all.length, 2); + + address[] memory first = oracle.getIndexers(0, 1); + assertEq(first.length, 1); + assertEq(first[0], all[0]); + + address[] memory second = oracle.getIndexers(1, 1); + assertEq(second.length, 1); + assertEq(second[0], all[1]); + } + + function test_GetIndexers_OffsetPastEnd_ReturnsEmpty() public { + _renewEligibility(indexer1); + + address[] memory result = oracle.getIndexers(5, 10); + assertEq(result.length, 0); + } + + function test_GetIndexers_CountClamped() public { + _renewEligibility(indexer1); + + address[] memory result = oracle.getIndexers(0, 100); + assertEq(result.length, 1); + assertEq(result[0], indexer1); + } + + // ==================== Indexer Retention Period Configuration ==================== + + function test_DefaultIndexerRetentionPeriod() public view { + assertEq(oracle.getIndexerRetentionPeriod(), DEFAULT_INDEXER_RETENTION_PERIOD); + } + + function test_SetIndexerRetentionPeriod() public { + _setupOperatorRole(); + + vm.expectEmit(address(oracle)); + emit IRewardsEligibilityEvents.IndexerRetentionPeriodSet(DEFAULT_INDEXER_RETENTION_PERIOD, 90 days); + + vm.prank(operator); + bool result = oracle.setIndexerRetentionPeriod(90 days); + assertTrue(result); + + assertEq(oracle.getIndexerRetentionPeriod(), 90 days); + } + + function test_SetIndexerRetentionPeriod_SameValue_NoEvent() public { + _setupOperatorRole(); + + vm.recordLogs(); + vm.prank(operator); + oracle.setIndexerRetentionPeriod(DEFAULT_INDEXER_RETENTION_PERIOD); + + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 sig = keccak256("IndexerRetentionPeriodSet(uint256,uint256)"); + for (uint256 i = 0; i < logs.length; ++i) { + assertTrue(logs[i].topics[0] != sig, "unexpected IndexerRetentionPeriodSet event"); + } + } + + function test_Revert_SetIndexerRetentionPeriod_Unauthorized() public { + vm.expectRevert(); + vm.prank(unauthorized); + oracle.setIndexerRetentionPeriod(90 days); + } + + // ==================== Expired Indexer Removal ==================== + + function test_RemoveExpiredIndexer_ReturnsFalse_WhenNotExpired() public { + _renewEligibility(indexer1); + + bool gone = oracle.removeExpiredIndexer(indexer1); + assertFalse(gone); + assertEq(oracle.getIndexerCount(), 1); + } + + function test_RemoveExpiredIndexer_ReturnsTrue_WhenExpired() public { + _renewEligibility(indexer1); + + // Warp past retention period + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + bool gone = oracle.removeExpiredIndexer(indexer1); + assertTrue(gone); + assertEq(oracle.getIndexerCount(), 0); + } + + function test_RemoveExpiredIndexer_ReturnsTrue_WhenNotTracked() public { + bool gone = oracle.removeExpiredIndexer(indexer1); + assertTrue(gone); + } + + function test_RemoveExpiredIndexer_DeletesTimestamp() public { + _renewEligibility(indexer1); + assertGt(oracle.getEligibilityRenewalTime(indexer1), 0); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + oracle.removeExpiredIndexer(indexer1); + + assertEq(oracle.getEligibilityRenewalTime(indexer1), 0); + } + + function test_RemoveExpiredIndexer_EmitsEvent() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + vm.expectEmit(address(oracle)); + emit IRewardsEligibilityEvents.IndexerTrackingUpdated(indexer1, false); + + oracle.removeExpiredIndexer(indexer1); + } + + function test_RemoveExpiredIndexer_ReAddAfterRemoval() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + oracle.removeExpiredIndexer(indexer1); + assertEq(oracle.getIndexerCount(), 0); + + // Oracle renews the removed indexer — should re-add + _renewEligibility(indexer1); + assertEq(oracle.getIndexerCount(), 1); + assertGt(oracle.getEligibilityRenewalTime(indexer1), 0); + } + + function test_RemoveExpiredIndexer_Permissionless() public { + _renewEligibility(indexer1); + + vm.warp(block.timestamp + DEFAULT_INDEXER_RETENTION_PERIOD); + + address anyone = makeAddr("anyone"); + vm.prank(anyone); + bool gone = oracle.removeExpiredIndexer(indexer1); + assertTrue(gone); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/eligibility/interfaceCompliance.t.sol b/packages/issuance/test/unit/eligibility/interfaceCompliance.t.sol index 45668b582..d6e14ef81 100644 --- a/packages/issuance/test/unit/eligibility/interfaceCompliance.t.sol +++ b/packages/issuance/test/unit/eligibility/interfaceCompliance.t.sol @@ -1,11 +1,12 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; import { IRewardsEligibilityAdministration } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityAdministration.sol"; +import { IRewardsEligibilityMaintenance } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityMaintenance.sol"; import { IRewardsEligibilityReporting } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityReporting.sol"; import { IRewardsEligibilityStatus } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibilityStatus.sol"; import { IPausableControl } from "@graphprotocol/interfaces/contracts/issuance/common/IPausableControl.sol"; @@ -22,14 +23,18 @@ contract RewardsEligibilityOracleInterfaceTest is RewardsEligibilityOracleShared assertTrue(oracle.supportsInterface(type(IERC165).interfaceId)); } - function test_SupportsIRewardsEligibility() public view { - assertTrue(oracle.supportsInterface(type(IRewardsEligibility).interfaceId)); + function test_SupportsIProviderEligibility() public view { + assertTrue(oracle.supportsInterface(type(IProviderEligibility).interfaceId)); } function test_SupportsIRewardsEligibilityAdministration() public view { assertTrue(oracle.supportsInterface(type(IRewardsEligibilityAdministration).interfaceId)); } + function test_SupportsIRewardsEligibilityMaintenance() public view { + assertTrue(oracle.supportsInterface(type(IRewardsEligibilityMaintenance).interfaceId)); + } + function test_SupportsIRewardsEligibilityReporting() public view { assertTrue(oracle.supportsInterface(type(IRewardsEligibilityReporting).interfaceId)); } @@ -53,12 +58,16 @@ contract RewardsEligibilityOracleInterfaceTest is RewardsEligibilityOracleShared // ==================== Interface ID Stability ==================== // These guard against accidental interface changes that would break compatibility. - function test_InterfaceId_IRewardsEligibility() public pure { - assertEq(type(IRewardsEligibility).interfaceId, bytes4(0x66e305fd)); + function test_InterfaceId_IProviderEligibility() public pure { + assertEq(type(IProviderEligibility).interfaceId, bytes4(0x66e305fd)); } function test_InterfaceId_IRewardsEligibilityAdministration() public pure { - assertEq(type(IRewardsEligibilityAdministration).interfaceId, bytes4(0x9a69f6aa)); + assertEq(type(IRewardsEligibilityAdministration).interfaceId, bytes4(0x428f54e5)); + } + + function test_InterfaceId_IRewardsEligibilityMaintenance() public pure { + assertEq(type(IRewardsEligibilityMaintenance).interfaceId, bytes4(0x6f001113)); } function test_InterfaceId_IRewardsEligibilityReporting() public pure { @@ -66,7 +75,7 @@ contract RewardsEligibilityOracleInterfaceTest is RewardsEligibilityOracleShared } function test_InterfaceId_IRewardsEligibilityStatus() public pure { - assertEq(type(IRewardsEligibilityStatus).interfaceId, bytes4(0x53740f19)); + assertEq(type(IRewardsEligibilityStatus).interfaceId, bytes4(0x054cdbc2)); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/eligibility/operatorFunctions.t.sol b/packages/issuance/test/unit/eligibility/operatorFunctions.t.sol index 07a3eedad..3d7fa4a1d 100644 --- a/packages/issuance/test/unit/eligibility/operatorFunctions.t.sol +++ b/packages/issuance/test/unit/eligibility/operatorFunctions.t.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Vm } from "forge-std/Vm.sol"; diff --git a/packages/issuance/test/unit/eligibility/shared.t.sol b/packages/issuance/test/unit/eligibility/shared.t.sol index 5c564d857..40d790f77 100644 --- a/packages/issuance/test/unit/eligibility/shared.t.sol +++ b/packages/issuance/test/unit/eligibility/shared.t.sol @@ -1,11 +1,12 @@ // SPDX-License-Identifier: MIT -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { RewardsEligibilityOracle } from "../../../contracts/eligibility/RewardsEligibilityOracle.sol"; +import { IGraphToken } from "../../../contracts/common/IGraphToken.sol"; import { MockGraphToken } from "../mocks/MockGraphToken.sol"; /// @notice Shared test setup for RewardsEligibilityOracle tests. @@ -30,6 +31,7 @@ contract RewardsEligibilityOracleSharedTest is Test { uint256 internal constant DEFAULT_ELIGIBILITY_PERIOD = 14 days; uint256 internal constant DEFAULT_ORACLE_TIMEOUT = 7 days; + uint256 internal constant DEFAULT_INDEXER_RETENTION_PERIOD = 365 days; function setUp() public virtual { // Use a realistic timestamp so eligibility period math works correctly @@ -46,7 +48,7 @@ contract RewardsEligibilityOracleSharedTest is Test { token = new MockGraphToken(); // Deploy RewardsEligibilityOracle behind proxy - RewardsEligibilityOracle impl = new RewardsEligibilityOracle(address(token)); + RewardsEligibilityOracle impl = new RewardsEligibilityOracle(IGraphToken(address(token))); bytes memory initData = abi.encodeCall(RewardsEligibilityOracle.initialize, (governor)); TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy(address(impl), address(this), initData); oracle = RewardsEligibilityOracle(address(proxy)); diff --git a/packages/issuance/test/unit/mocks/MockGraphToken.sol b/packages/issuance/test/unit/mocks/MockGraphToken.sol index f4478cd7a..dd07fab6e 100644 --- a/packages/issuance/test/unit/mocks/MockGraphToken.sol +++ b/packages/issuance/test/unit/mocks/MockGraphToken.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: MIT -pragma solidity ^0.8.33; +pragma solidity ^0.8.27; import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; diff --git a/packages/subgraph-service/contracts/DisputeManager.sol b/packages/subgraph-service/contracts/DisputeManager.sol index 130182e4b..1ee798c5b 100644 --- a/packages/subgraph-service/contracts/DisputeManager.sol +++ b/packages/subgraph-service/contracts/DisputeManager.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable function-max-lines, gas-strict-inequalities @@ -11,10 +11,11 @@ import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-se import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IAttestation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAttestation.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; -import { MathUtils } from "@graphprotocol/horizon/contracts/libraries/MathUtils.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; import { Attestation } from "./libraries/Attestation.sol"; import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; @@ -138,6 +139,20 @@ contract DisputeManager is return _createIndexingDisputeWithAllocation(msg.sender, disputeDeposit, allocationId, poi, blockNumber); } + /// @inheritdoc IDisputeManager + function createIndexingFeeDisputeV1( + bytes16 agreementId, + bytes32 poi, + uint256 entities, + uint256 blockNumber + ) external override returns (bytes32) { + // Get funds from fisherman + _graphToken().pullTokens(msg.sender, disputeDeposit); + + // Create a dispute + return _createIndexingFeeDisputeV1(msg.sender, disputeDeposit, agreementId, poi, entities, blockNumber); + } + /// @inheritdoc IDisputeManager function createQueryDispute(bytes calldata attestationData) external override returns (bytes32) { // Get funds from fisherman @@ -205,46 +220,6 @@ contract DisputeManager is return (dId1, dId2); } - /// @inheritdoc IDisputeManager - function createAndAcceptLegacyDispute( - address allocationId, - address fisherman, - uint256 tokensSlash, - uint256 tokensRewards - ) external override onlyArbitrator returns (bytes32) { - // Create a disputeId - bytes32 disputeId = keccak256(abi.encodePacked(allocationId, "legacy")); - - // Get the indexer for the legacy allocation - address indexer = _graphStaking().getAllocation(allocationId).indexer; - require(indexer != address(0), DisputeManagerIndexerNotFound(allocationId)); - - // Store dispute - disputes[disputeId] = Dispute( - indexer, - fisherman, - 0, - 0, - DisputeType.LegacyDispute, - IDisputeManager.DisputeStatus.Accepted, - block.timestamp, - block.timestamp + disputePeriod, - 0 - ); - - // Slash the indexer - ISubgraphService subgraphService_ = _getSubgraphService(); - subgraphService_.slash(indexer, abi.encode(tokensSlash, tokensRewards)); - - // Reward the fisherman - _graphToken().pushTokens(fisherman, tokensRewards); - - emit LegacyDisputeCreated(disputeId, indexer, fisherman, allocationId, tokensSlash, tokensRewards); - emit DisputeAccepted(disputeId, indexer, fisherman, tokensRewards); - - return disputeId; - } - /// @inheritdoc IDisputeManager function acceptDispute( bytes32 disputeId, @@ -507,6 +482,75 @@ contract DisputeManager is return disputeId; } + /** + * @notice Create indexing fee (version 1) dispute internal function. + * @param _fisherman The fisherman creating the dispute + * @param _deposit Amount of tokens staked as deposit + * @param _agreementId The agreement id being disputed + * @param _poi The POI being disputed + * @param _entities The number of entities disputed + * @param _blockNumber The block number of the disputed POI + * @return The dispute id + */ + function _createIndexingFeeDisputeV1( + address _fisherman, + uint256 _deposit, + bytes16 _agreementId, + bytes32 _poi, + uint256 _entities, + uint256 _blockNumber + ) private returns (bytes32) { + IIndexingAgreement.AgreementWrapper memory wrapper = _getSubgraphService().getIndexingAgreement(_agreementId); + + // Agreement must have been collected on and be a version 1 + require( + wrapper.collectorAgreement.lastCollectionAt > 0, + DisputeManagerIndexingAgreementNotDisputable(_agreementId) + ); + require( + wrapper.agreement.version == IIndexingAgreement.IndexingAgreementVersion.V1, + DisputeManagerIndexingAgreementInvalidVersion(wrapper.agreement.version) + ); + + // Create a disputeId + bytes32 disputeId = keccak256( + abi.encodePacked("IndexingFeeDisputeWithAgreement", _agreementId, _poi, _entities, _blockNumber) + ); + + // Only one dispute at a time + require(!isDisputeCreated(disputeId), DisputeManagerDisputeAlreadyCreated(disputeId)); + + // The indexer must be disputable + uint256 stakeSnapshot = _getStakeSnapshot(wrapper.collectorAgreement.serviceProvider); + require(stakeSnapshot != 0, DisputeManagerZeroTokens()); + + disputes[disputeId] = Dispute( + wrapper.collectorAgreement.serviceProvider, + _fisherman, + _deposit, + 0, // no related dispute, + DisputeType.IndexingFeeDispute, + IDisputeManager.DisputeStatus.Pending, + block.timestamp, + block.timestamp + disputePeriod, + stakeSnapshot + ); + + emit IndexingFeeDisputeCreated( + disputeId, + wrapper.collectorAgreement.serviceProvider, + _fisherman, + _deposit, + wrapper.collectorAgreement.payer, + _agreementId, + _poi, + _entities, + stakeSnapshot + ); + + return disputeId; + } + /** * @notice Accept a dispute * @param _disputeId The id of the dispute @@ -588,8 +632,8 @@ contract DisputeManager is // - The applied cut is the minimum between the provision's maxVerifierCut and the current fishermanRewardCut. This // protects the indexer from sudden changes to the fishermanRewardCut while ensuring the slashing does not revert due // to excessive rewards being requested. - uint256 maxRewardableTokens = MathUtils.min(_tokensSlash, provision.tokens); - uint256 effectiveCut = MathUtils.min(provision.maxVerifierCut, fishermanRewardCut); + uint256 maxRewardableTokens = Math.min(_tokensSlash, provision.tokens); + uint256 effectiveCut = Math.min(provision.maxVerifierCut, fishermanRewardCut); uint256 tokensRewards = effectiveCut.mulPPM(maxRewardableTokens); subgraphService_.slash(_indexer, abi.encode(_tokensSlash, tokensRewards)); diff --git a/packages/subgraph-service/contracts/DisputeManagerStorage.sol b/packages/subgraph-service/contracts/DisputeManagerStorage.sol index cb0766023..5c2295b73 100644 --- a/packages/subgraph-service/contracts/DisputeManagerStorage.sol +++ b/packages/subgraph-service/contracts/DisputeManagerStorage.sol @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; diff --git a/packages/subgraph-service/contracts/SubgraphService.sol b/packages/subgraph-service/contracts/SubgraphService.sol index 2eb8e0a9f..b0b4b5944 100644 --- a/packages/subgraph-service/contracts/SubgraphService.sol +++ b/packages/subgraph-service/contracts/SubgraphService.sol @@ -1,14 +1,17 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; import { IDataService } from "@graphprotocol/interfaces/contracts/data-service/IDataService.sol"; +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; import { MulticallUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/MulticallUpgradeable.sol"; @@ -23,6 +26,8 @@ import { SubgraphServiceV1Storage } from "./SubgraphServiceStorage.sol"; import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; import { Allocation } from "./libraries/Allocation.sol"; +import { IndexingAgreementDecoder } from "./libraries/IndexingAgreementDecoder.sol"; +import { IndexingAgreement } from "./libraries/IndexingAgreement.sol"; /** * @title SubgraphService contract @@ -48,13 +53,23 @@ contract SubgraphService is using Allocation for mapping(address => IAllocation.State); using Allocation for IAllocation.State; using TokenUtils for IGraphToken; + using IndexingAgreement for IndexingAgreement.StorageManager; + + uint256 private constant DEFAULT = 0; + uint256 private constant VALID_PROVISION = 1 << 0; + uint256 private constant REGISTERED = 1 << 1; /** - * @notice Checks that an indexer is registered - * @param indexer The address of the indexer + * @notice Modifier that enforces service provider requirements. + * @dev Always checks pause state and caller authorization. Additional checks + * (provision validity, indexer registration) are selected via a bitmask. + * Delegates to {_enforceServiceRequirements} which is emitted once in bytecode + * and JUMPed to from each call site, avoiding repeated modifier inlining. + * @param serviceProvider The address of the service provider. + * @param requirements Bitmask of additional requirement flags. */ - modifier onlyRegisteredIndexer(address indexer) { - _checkRegisteredIndexer(indexer); + modifier enforceService(address serviceProvider, uint256 requirements) { + _enforceServiceRequirements(serviceProvider, requirements); _; } @@ -65,13 +80,18 @@ contract SubgraphService is * @param disputeManager The address of the DisputeManager contract * @param graphTallyCollector The address of the GraphTallyCollector contract * @param curation The address of the Curation contract + * @param recurringCollector The address of the RecurringCollector contract */ constructor( address graphController, address disputeManager, address graphTallyCollector, - address curation - ) DataService(graphController) Directory(address(this), disputeManager, graphTallyCollector, curation) { + address curation, + address recurringCollector + ) + DataService(graphController) + Directory(address(this), disputeManager, graphTallyCollector, curation, recurringCollector) + { _disableInitializers(); } @@ -111,10 +131,7 @@ contract SubgraphService is * Use zero address for automatically restaking payments. */ /// @inheritdoc IDataService - function register( - address indexer, - bytes calldata data - ) external override onlyAuthorizedForProvision(indexer) onlyValidProvision(indexer) whenNotPaused { + function register(address indexer, bytes calldata data) external override enforceService(indexer, VALID_PROVISION) { (string memory url, string memory geohash, address paymentsDestination_) = abi.decode( data, (string, string, address) @@ -147,7 +164,7 @@ contract SubgraphService is function acceptProvisionPendingParameters( address indexer, bytes calldata - ) external override onlyAuthorizedForProvision(indexer) whenNotPaused { + ) external override enforceService(indexer, DEFAULT) { _acceptProvisionParameters(indexer); emit ProvisionPendingParametersAccepted(indexer); } @@ -180,14 +197,7 @@ contract SubgraphService is function startService( address indexer, bytes calldata data - ) - external - override - onlyAuthorizedForProvision(indexer) - onlyValidProvision(indexer) - onlyRegisteredIndexer(indexer) - whenNotPaused - { + ) external override enforceService(indexer, VALID_PROVISION | REGISTERED) { (bytes32 subgraphDeploymentId, uint256 tokens, address allocationId, bytes memory allocationProof) = abi.decode( data, (bytes32, uint256, address, bytes) @@ -216,22 +226,17 @@ contract SubgraphService is * - address `allocationId`: The id of the allocation */ /// @inheritdoc IDataService - function stopService( - address indexer, - bytes calldata data - ) external override onlyAuthorizedForProvision(indexer) onlyRegisteredIndexer(indexer) whenNotPaused { + function stopService(address indexer, bytes calldata data) external override enforceService(indexer, REGISTERED) { address allocationId = abi.decode(data, (address)); - require( - _allocations.get(allocationId).indexer == indexer, - SubgraphServiceAllocationNotAuthorized(indexer, allocationId) - ); + _checkAllocationOwnership(indexer, allocationId); + _onCloseAllocation(allocationId, false); _closeAllocation(allocationId, false); emit ServiceStopped(indexer, data); } /** * @notice Collects payment for the service provided by the indexer - * Allows collecting different types of payments such as query fees and indexing rewards. + * Allows collecting different types of payments such as query fees, indexing rewards and indexing fees. * It uses Graph Horizon payments protocol to process payments. * Reverts if the payment type is not supported. * @dev This function is the equivalent of the `collect` function for query fees and the `closeAllocation` function @@ -245,6 +250,12 @@ contract SubgraphService is * * For query fees, see {SubgraphService-_collectQueryFees} for more details. * For indexing rewards, see {AllocationManager-_collectIndexingRewards} for more details. + * For indexing fees, see {SubgraphService-_collectIndexingFees} for more details. + * + * Note that collecting any type of payment will require locking provisioned stake as collateral for a period of time. + * All types of payment share the same pool of provisioned stake however they each have separate accounting: + * - Indexing rewards can make full use of the available stake + * - Query and indexing fees share the pool, combined they can also make full use of the available stake * * @param indexer The address of the indexer * @param paymentType The type of payment to collect as defined in {IGraphPayments} @@ -255,27 +266,30 @@ contract SubgraphService is * - address `allocationId`: The id of the allocation * - bytes32 `poi`: The POI being presented * - bytes `poiMetadata`: The metadata associated with the POI. See {AllocationManager-_collectIndexingRewards} for more details. + * - For indexing fees: + * - bytes16 `agreementId`: The id of the indexing agreement + * - bytes `agreementCollectionMetadata`: The metadata required by the indexing agreement version. */ /// @inheritdoc IDataService function collect( address indexer, IGraphPayments.PaymentTypes paymentType, bytes calldata data - ) - external - override - onlyAuthorizedForProvision(indexer) - onlyValidProvision(indexer) - onlyRegisteredIndexer(indexer) - whenNotPaused - returns (uint256) - { + ) external override enforceService(indexer, VALID_PROVISION | REGISTERED) returns (uint256) { uint256 paymentCollected = 0; if (paymentType == IGraphPayments.PaymentTypes.QueryFee) { paymentCollected = _collectQueryFees(indexer, data); } else if (paymentType == IGraphPayments.PaymentTypes.IndexingRewards) { paymentCollected = _collectIndexingRewards(indexer, data); + } else if (paymentType == IGraphPayments.PaymentTypes.IndexingFee) { + (bytes16 agreementId, bytes memory iaCollectionData) = IndexingAgreementDecoder.decodeCollectData(data); + paymentCollected = _collectIndexingFees( + indexer, + agreementId, + paymentsDestination[indexer], + iaCollectionData + ); } else { revert SubgraphServiceInvalidPaymentType(paymentType); } @@ -301,6 +315,7 @@ contract SubgraphService is IAllocation.State memory allocation = _allocations.get(allocationId); require(allocation.isStale(maxPOIStaleness), SubgraphServiceCannotForceCloseAllocation(allocationId)); require(!allocation.isAltruistic(), SubgraphServiceAllocationIsAltruistic(allocationId)); + _onCloseAllocation(allocationId, true); _closeAllocation(allocationId, true); } @@ -309,29 +324,11 @@ contract SubgraphService is address indexer, address allocationId, uint256 tokens - ) - external - onlyAuthorizedForProvision(indexer) - onlyValidProvision(indexer) - onlyRegisteredIndexer(indexer) - whenNotPaused - { - require( - _allocations.get(allocationId).indexer == indexer, - SubgraphServiceAllocationNotAuthorized(indexer, allocationId) - ); + ) external enforceService(indexer, VALID_PROVISION | REGISTERED) { + _checkAllocationOwnership(indexer, allocationId); _resizeAllocation(allocationId, tokens, _delegationRatio); } - /// @inheritdoc ISubgraphService - function migrateLegacyAllocation( - address indexer, - address allocationId, - bytes32 subgraphDeploymentId - ) external override onlyOwner { - _migrateLegacyAllocation(indexer, allocationId, subgraphDeploymentId); - } - /// @inheritdoc ISubgraphService function setPauseGuardian(address pauseGuardian, bool allowed) external override onlyOwner { _setPauseGuardian(pauseGuardian, allowed); @@ -357,7 +354,6 @@ contract SubgraphService is _setStakeToFeesRatio(stakeToFeesRatio_); } - // forge-lint: disable-next-item(mixed-case-function) /// @inheritdoc ISubgraphService function setMaxPOIStaleness(uint256 maxPoiStaleness_) external override onlyOwner { _setMaxPoiStaleness(maxPoiStaleness_); @@ -370,6 +366,115 @@ contract SubgraphService is emit CurationCutSet(curationCut); } + /// @inheritdoc ISubgraphService + function setIndexingFeesCut(uint256 indexingFeesCut_) external override onlyOwner { + require(PPMMath.isValidPPM(indexingFeesCut_), SubgraphServiceInvalidIndexingFeesCut(indexingFeesCut_)); + indexingFeesCut = indexingFeesCut_; + emit IndexingFeesCutSet(indexingFeesCut_); + } + + /** + * @inheritdoc ISubgraphService + * @notice Accept an indexing agreement. + * + * See {ISubgraphService.acceptIndexingAgreement}. + * + * Requirements: + * - The agreement's indexer must be registered + * - The caller must be authorized by the agreement's indexer + * - The provision must be valid according to the subgraph service rules + * - Allocation must belong to the indexer and be open + * - Agreement must be for this data service + * - Agreement's subgraph deployment must match the allocation's subgraph deployment + * - Agreement must not have been accepted before + * - Allocation must not have an agreement already + * + * @dev rca.metadata is an encoding of {IndexingAgreement.AcceptIndexingAgreementMetadata} + * + * Emits {IndexingAgreement.IndexingAgreementAccepted} event + * + * @param allocationId The id of the allocation + * @param rca The Recurring Collection Agreement + * @param signature ECDSA signature bytes, or empty for contract-approved agreements + * @return agreementId The ID of the accepted indexing agreement + */ + function acceptIndexingAgreement( + address allocationId, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + bytes calldata signature + ) external enforceService(rca.serviceProvider, VALID_PROVISION | REGISTERED) returns (bytes16) { + return IndexingAgreement._getStorageManager().accept(_allocations, allocationId, rca, signature); + } + + /** + * @inheritdoc ISubgraphService + * @notice Update an indexing agreement. + * + * See {IndexingAgreement.update}. + * + * Requirements: + * - The contract must not be paused + * - The indexer must be valid + * + * @param indexer The indexer address + * @param rcau The Recurring Collection Agreement Update + * @param signature ECDSA signature bytes, or empty for contract-approved updates + */ + function updateIndexingAgreement( + address indexer, + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata signature + ) external enforceService(indexer, VALID_PROVISION | REGISTERED) { + IndexingAgreement._getStorageManager().update(indexer, rcau, signature); + } + + /** + * @inheritdoc ISubgraphService + * @notice Cancel an indexing agreement by indexer / operator. + * + * See {IndexingAgreement.cancel}. + * + * @dev Can only be canceled on behalf of a valid indexer. + * + * Requirements: + * - The contract must not be paused + * - The indexer must be valid + * + * @param indexer The indexer address + * @param agreementId The id of the agreement + */ + function cancelIndexingAgreement( + address indexer, + bytes16 agreementId + ) external enforceService(indexer, VALID_PROVISION | REGISTERED) { + IndexingAgreement._getStorageManager().cancel(indexer, agreementId); + } + + /** + * @inheritdoc IDataServiceAgreements + * @notice Cancel an indexing agreement by payer / signer. + * + * See {IDataServiceAgreements.cancelIndexingAgreementByPayer}. + * + * Requirements: + * - The caller must be authorized by the payer + * - The agreement must be active + * + * Emits {IndexingAgreementCanceled} event + * + * @param agreementId The id of the agreement + */ + function cancelIndexingAgreementByPayer(bytes16 agreementId) external whenNotPaused { + IndexingAgreement._getStorageManager().cancelByPayer(agreementId); + } + + /// @inheritdoc ISubgraphService + function getIndexingAgreement( + bytes16 agreementId + ) external view returns (IIndexingAgreement.AgreementWrapper memory) { + return IndexingAgreement._getStorageManager().get(agreementId); + } + /// @inheritdoc ISubgraphService function getAllocation(address allocationId) external view override returns (IAllocation.State memory) { return _allocations[allocationId]; @@ -425,6 +530,16 @@ contract SubgraphService is return _isOverAllocated(indexer, _delegationRatio); } + /** + * @notice Internal function to handle closing an allocation + * @dev This function is called when an allocation is closed, either by the indexer or by a third party + * @param _allocationId The id of the allocation being closed + * @param _forceClosed Whether the allocation was force closed + */ + function _onCloseAllocation(address _allocationId, bool _forceClosed) internal { + IndexingAgreement._getStorageManager().onCloseAllocation(_allocationId, _forceClosed); + } + /** * @notice Sets the payments destination for an indexer to receive payments * @dev Emits a {PaymentsDestinationSet} event @@ -459,11 +574,35 @@ contract SubgraphService is } /** - * @notice Checks that an indexer is registered - * @param indexer The address of the indexer + * @notice Enforces service provider requirements. + * @dev Always checks pause state and caller authorization. Additional checks + * (provision validity, indexer registration) are selected via bitmask flags. + * Single dispatch point emitted once in bytecode, JUMPed to from each call site + * via the {enforceService} modifier. + * @param _serviceProvider The address of the service provider. + * @param _checks Bitmask of additional requirement flags (VALID_PROVISION, REGISTERED). */ - function _checkRegisteredIndexer(address indexer) private view { - require(bytes(indexers[indexer].url).length > 0, SubgraphServiceIndexerNotRegistered(indexer)); + function _enforceServiceRequirements(address _serviceProvider, uint256 _checks) private view { + _requireNotPaused(); + _requireAuthorizedForProvision(_serviceProvider); + if (_checks & VALID_PROVISION != 0) _requireValidProvision(_serviceProvider); + if (_checks & REGISTERED != 0) + require( + bytes(indexers[_serviceProvider].url).length > 0, + SubgraphServiceIndexerNotRegistered(_serviceProvider) + ); + } + + /** + * @notice Checks that the allocation belongs to the given indexer. + * @param _indexer The address of the indexer. + * @param _allocationId The id of the allocation. + */ + function _checkAllocationOwnership(address _indexer, address _allocationId) internal view { + require( + _allocations.get(_allocationId).indexer == _indexer, + SubgraphServiceAllocationNotAuthorized(_indexer, _allocationId) + ); } /** @@ -581,11 +720,78 @@ contract SubgraphService is */ function _collectIndexingRewards(address _indexer, bytes calldata _data) private returns (uint256) { (address allocationId, bytes32 poi_, bytes memory poiMetadata_) = abi.decode(_data, (address, bytes32, bytes)); - require( - _allocations.get(allocationId).indexer == _indexer, - SubgraphServiceAllocationNotAuthorized(_indexer, allocationId) + _checkAllocationOwnership(_indexer, allocationId); + + (uint256 paymentCollected, bool allocationForceClosed) = _presentPoi( + allocationId, + poi_, + poiMetadata_, + _delegationRatio, + paymentsDestination[_indexer] + ); + + if (allocationForceClosed) { + _onCloseAllocation(allocationId, true); + } + + return paymentCollected; + } + + /** + * @notice Collect Indexing fees + * Stake equal to the amount being collected times the `stakeToFeesRatio` is locked into a stake claim. + * This claim can be released at a later stage once expired. + * + * It's important to note that before collecting this function will attempt to release any expired stake claims. + * This could lead to an out of gas error if there are too many expired claims. In that case, the indexer will need to + * manually release the claims, see {IDataServiceFees-releaseStake}, before attempting to collect again. + * + * @dev Uses the {RecurringCollector} to collect payment from Graph Horizon payments protocol. + * Fees are distributed to service provider and delegators by {GraphPayments} + * + * Requirements: + * - Indexer must have enough available tokens to lock as economic security for fees + * - Allocation must be open + * + * Emits a {StakeClaimsReleased} event, and a {StakeClaimReleased} event for each claim released. + * Emits a {StakeClaimLocked} event. + * Emits a {IndexingFeesCollectedV1} event. + * + * @param _indexer The address of the indexer + * @param _agreementId The id of the indexing agreement + * @param _paymentsDestination The address where the fees should be sent + * @param _data The indexing agreement collection data + * @return The amount of fees collected + */ + function _collectIndexingFees( + address _indexer, + bytes16 _agreementId, + address _paymentsDestination, + bytes memory _data + ) private returns (uint256) { + (address indexer, uint256 tokensCollected) = IndexingAgreement._getStorageManager().collect( + _allocations, + IndexingAgreement.CollectParams({ + indexer: _indexer, + agreementId: _agreementId, + currentEpoch: _graphEpochManager().currentEpoch(), + receiverDestination: _paymentsDestination, + data: _data, + indexingFeesCut: indexingFeesCut + }) ); - return _presentPoi(allocationId, poi_, poiMetadata_, _delegationRatio, paymentsDestination[_indexer]); + + _releaseStake(indexer, 0); + if (tokensCollected > 0) { + // lock stake as economic security for fees + _lockStake( + indexer, + tokensCollected * stakeToFeesRatio, + block.timestamp + _disputeManager().getDisputePeriod() + ); + } + + return tokensCollected; } /** diff --git a/packages/subgraph-service/contracts/SubgraphServiceStorage.sol b/packages/subgraph-service/contracts/SubgraphServiceStorage.sol index 67accbb5a..2ecb69293 100644 --- a/packages/subgraph-service/contracts/SubgraphServiceStorage.sol +++ b/packages/subgraph-service/contracts/SubgraphServiceStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; @@ -22,4 +22,7 @@ abstract contract SubgraphServiceV1Storage is ISubgraphService { /// @notice Destination of indexer payments mapping(address indexer => address destination) public override paymentsDestination; + + /// @notice The cut data service takes from indexing fee payments. In PPM. + uint256 public indexingFeesCut; } diff --git a/packages/subgraph-service/contracts/libraries/Allocation.sol b/packages/subgraph-service/contracts/libraries/Allocation.sol index d5018e482..404dc8cec 100644 --- a/packages/subgraph-service/contracts/libraries/Allocation.sol +++ b/packages/subgraph-service/contracts/libraries/Allocation.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // forge-lint: disable-start(mixed-case-variable, mixed-case-function) diff --git a/packages/subgraph-service/contracts/libraries/AllocationHandler.sol b/packages/subgraph-service/contracts/libraries/AllocationHandler.sol new file mode 100644 index 000000000..0519b3e3f --- /dev/null +++ b/packages/subgraph-service/contracts/libraries/AllocationHandler.sol @@ -0,0 +1,692 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import { IEpochManager } from "@graphprotocol/interfaces/contracts/contracts/epochs/IEpochManager.sol"; +import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; +import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManager.sol"; +import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; +import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; +import { RewardsCondition } from "@graphprotocol/interfaces/contracts/contracts/rewards/RewardsCondition.sol"; +import { ProvisionTracker } from "@graphprotocol/horizon/contracts/data-service/libraries/ProvisionTracker.sol"; +import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; + +import { Allocation } from "../libraries/Allocation.sol"; +import { LegacyAllocation } from "../libraries/LegacyAllocation.sol"; + +/** + * @title AllocationHandler contract + * @author Edge & Node + * @notice A helper contract implementing allocation lifecycle management. + * Allows opening, resizing, and closing allocations, as well as collecting indexing rewards by presenting a Proof + * of Indexing (POI). + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +library AllocationHandler { + using ProvisionTracker for mapping(address => uint256); + using Allocation for mapping(address => IAllocation.State); + using Allocation for IAllocation.State; + using LegacyAllocation for mapping(address => ILegacyAllocation.State); + using PPMMath for uint256; + using TokenUtils for IGraphToken; + + /** + * @notice Parameters for the allocation creation + * @param currentEpoch The current epoch at the time of allocation creation + * @param graphStaking The Horizon staking contract to handle token locking + * @param graphRewardsManager The rewards manager to handle rewards distribution + * @param _encodeAllocationProof The EIP712 encoded allocation proof + * @param _indexer The address of the indexer creating the allocation + * @param _delegationRatio The delegation ratio to consider when locking tokens + * @param _allocationId The id of the allocation to be created + * @param _subgraphDeploymentId The id of the subgraph deployment for which the allocation is created + * @param _tokens The amount of tokens to allocate + * @param _allocationProof The EIP712 proof, an EIP712 signed message of (indexer,allocationId) + */ + struct AllocateParams { + uint256 currentEpoch; + IHorizonStaking graphStaking; + IRewardsManager graphRewardsManager; + bytes32 _encodeAllocationProof; + address _indexer; + uint32 _delegationRatio; + address _allocationId; + bytes32 _subgraphDeploymentId; + uint256 _tokens; + bytes _allocationProof; + } + + /** + * @notice Parameters for the POI presentation + * @param maxPOIStaleness The maximum staleness of the POI in epochs + * @param graphEpochManager The epoch manager to get the current epoch + * @param graphStaking The Horizon staking contract to handle token locking + * @param graphRewardsManager The rewards manager to handle rewards distribution + * @param graphToken The Graph token contract to handle token transfers + * @param dataService The data service address (for delegation pool lookups) + * @param _allocationId The id of the allocation for which the POI is presented + * @param _poi The proof of indexing (POI) to be presented + * @param _poiMetadata The metadata associated with the POI + * @param _delegationRatio The delegation ratio to consider when locking tokens + * @param _paymentsDestination The address to which the indexing rewards should be sent + */ + struct PresentParams { + uint256 maxPOIStaleness; + IEpochManager graphEpochManager; + IHorizonStaking graphStaking; + IRewardsManager graphRewardsManager; + IGraphToken graphToken; + address dataService; + address _allocationId; + bytes32 _poi; + bytes _poiMetadata; + uint32 _delegationRatio; + address _paymentsDestination; + } + + /** + * @notice Emitted when an indexer creates an allocation + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param tokens The amount of tokens allocated + * @param currentEpoch The current epoch + */ + event AllocationCreated( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId, + uint256 tokens, + uint256 currentEpoch + ); + + /** + * @notice Emitted when an indexer collects indexing rewards for an allocation + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param tokensRewards The amount of tokens collected + * @param tokensIndexerRewards The amount of tokens collected for the indexer + * @param tokensDelegationRewards The amount of tokens collected for delegators + * @param poi The POI presented + * @param poiMetadata The metadata associated with the POI + * @param currentEpoch The current epoch + */ + event IndexingRewardsCollected( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId, + uint256 tokensRewards, + uint256 tokensIndexerRewards, + uint256 tokensDelegationRewards, + bytes32 poi, + bytes poiMetadata, + uint256 currentEpoch + ); + + /** + * @notice Emitted when an indexer resizes an allocation + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param newTokens The new amount of tokens allocated + * @param oldTokens The old amount of tokens allocated + */ + event AllocationResized( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId, + uint256 newTokens, + uint256 oldTokens + ); + + /** + * @notice Emitted when an indexer closes an allocation + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param tokens The amount of tokens allocated + * @param forceClosed Whether the allocation was force closed + */ + event AllocationClosed( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId, + uint256 tokens, + bool forceClosed + ); + + /** + * @notice Emitted when a legacy allocation is migrated into the subgraph service + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + */ + event LegacyAllocationMigrated( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId + ); + + /** + * @notice Emitted when the maximum POI staleness is updated + * @param maxPOIStaleness The max POI staleness in seconds + */ + event MaxPOIStalenessSet(uint256 maxPOIStaleness); + // solhint-disable-previous-line gas-indexed-events + + /** + * @notice Emitted when an indexer presents a POI for an allocation + * @param indexer The address of the indexer + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param poi The POI presented + * @param poiMetadata The metadata associated with the POI + * @param condition The rewards condition determined for this POI + */ + event POIPresented( + address indexed indexer, + address indexed allocationId, + bytes32 indexed subgraphDeploymentId, + bytes32 poi, + bytes poiMetadata, + bytes32 condition + ); + + /** + * @notice Thrown when an allocation proof is invalid + * Both `signer` and `allocationId` should match for a valid proof. + * @param signer The address that signed the proof + * @param allocationId The id of the allocation + */ + error AllocationHandlerInvalidAllocationProof(address signer, address allocationId); + + /** + * @notice Thrown when attempting to create an allocation with a zero allocation id + */ + error AllocationHandlerInvalidZeroAllocationId(); + + /** + * @notice Thrown when attempting to collect indexing rewards on a closed allocation + * @param allocationId The id of the allocation + */ + error AllocationHandlerAllocationClosed(address allocationId); + + /** + * @notice Thrown when attempting to resize an allocation with the same size + * @param allocationId The id of the allocation + * @param tokens The amount of tokens + */ + error AllocationHandlerAllocationSameSize(address allocationId, uint256 tokens); + + /** + * @notice Create an allocation + * @dev The `_allocationProof` is a 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationId)` + * + * Requirements: + * - `_allocationId` must not be the zero address + * + * Emits a {AllocationCreated} event + * + * @param _allocations The mapping of allocation ids to allocation states + * @param _legacyAllocations The mapping of legacy allocation ids to legacy allocation states + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens + * @param params The parameters for the allocation + */ + function allocate( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address allocationId => ILegacyAllocation.State allocation) storage _legacyAllocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + AllocateParams calldata params + ) external { + require(params._allocationId != address(0), AllocationHandler.AllocationHandlerInvalidZeroAllocationId()); + + _verifyAllocationProof(params._encodeAllocationProof, params._allocationId, params._allocationProof); + + // Ensure allocation id is not reused + // need to check both subgraph service (on allocations.create()) and legacy allocations + _legacyAllocations.revertIfExists(params.graphStaking, params._allocationId); + + IAllocation.State memory allocation = _allocations.create( + params._indexer, + params._allocationId, + params._subgraphDeploymentId, + params._tokens, + params.graphRewardsManager.onSubgraphAllocationUpdate(params._subgraphDeploymentId), + params.currentEpoch + ); + + // Check that the indexer has enough tokens available + // Note that the delegation ratio ensures overdelegation cannot be used + allocationProvisionTracker.lock(params.graphStaking, params._indexer, params._tokens, params._delegationRatio); + + // Update total allocated tokens for the subgraph deployment + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] = + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] + allocation.tokens; + + emit AllocationHandler.AllocationCreated( + params._indexer, + params._allocationId, + params._subgraphDeploymentId, + allocation.tokens, + params.currentEpoch + ); + } + + /* solhint-disable function-max-lines */ + /** + * @notice Present a POI to collect indexing rewards for an allocation + * Mints indexing rewards using the {RewardsManager} and distributes them to the indexer and delegators. + * + * Requirements for indexing rewards: + * - POI must be non-zero + * - POI must not be stale (older than `maxPOIStaleness`) + * - Allocation must be open for at least one epoch (returns early with 0 if too young) + * + * ## Reward Paths + * + * Rewards follow one of three paths based on allocation and POI state: + * + * **CLAIMED** (normal path): Valid POI, not stale, allocation mature, subgraph not denied + * - Calls `takeRewards()` to mint tokens to this contract + * - Distributes to indexer (stake or payments destination) and delegators + * - Snapshots allocation to prevent double-counting + * + * **RECLAIMED** (redirect path): STALE_POI or ZERO_POI conditions + * - Calls `reclaimRewards()` to mint tokens to configured reclaim address + * - If no reclaim address configured, rewards are dropped (not minted) + * - Snapshots allocation to prevent double-counting + * + * **DEFERRED** (early return): ALLOCATION_TOO_YOUNG or SUBGRAPH_DENIED conditions + * - Returns 0 without calling take or reclaim + * - Does NOT snapshot allocation (preserves rewards for later collection) + * - Allows rewards to be claimed when condition clears + * + * Emits a {POIPresented} event. + * Emits a {IndexingRewardsCollected} event. + * + * @param _allocations The mapping of allocation ids to allocation states + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens + * @param params The parameters for the POI presentation + * @return rewardsCollected The amount of tokens collected + * @return allocationForceClosed True if the allocation was automatically closed due to over-allocation, false otherwise + */ + function presentPOI( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + PresentParams calldata params + ) external returns (uint256 rewardsCollected, bool allocationForceClosed) { + IAllocation.State memory allocation = _allocations.get(params._allocationId); + require(allocation.isOpen(), AllocationHandler.AllocationHandlerAllocationClosed(params._allocationId)); + _allocations.presentPOI(params._allocationId); // Always record POI presentation to prevent staleness + + uint256 currentEpoch = params.graphEpochManager.currentEpoch(); + // Scoped for stack management + { + // Determine rewards condition + bytes32 condition = RewardsCondition.NONE; + if (allocation.isStale(params.maxPOIStaleness)) condition = RewardsCondition.STALE_POI; + else if (params._poi == bytes32(0)) + condition = RewardsCondition.ZERO_POI; + // solhint-disable-next-line gas-strict-inequalities + else if (currentEpoch <= allocation.createdAtEpoch) condition = RewardsCondition.ALLOCATION_TOO_YOUNG; + else if (params.graphRewardsManager.isDenied(allocation.subgraphDeploymentId)) + condition = RewardsCondition.SUBGRAPH_DENIED; + + emit AllocationHandler.POIPresented( + allocation.indexer, + params._allocationId, + allocation.subgraphDeploymentId, + params._poi, + params._poiMetadata, + condition + ); + + // Early return skips the overallocation check intentionally to avoid loss of uncollected rewards + if (condition == RewardsCondition.ALLOCATION_TOO_YOUNG || condition == RewardsCondition.SUBGRAPH_DENIED) { + // Keep reward and reclaim accumulation current even if rewards are not collected + params.graphRewardsManager.onSubgraphAllocationUpdate(allocation.subgraphDeploymentId); + + return (0, false); + } + + bool rewardsReclaimable = condition == RewardsCondition.STALE_POI || condition == RewardsCondition.ZERO_POI; + if (rewardsReclaimable) params.graphRewardsManager.reclaimRewards(condition, params._allocationId); + else rewardsCollected = params.graphRewardsManager.takeRewards(params._allocationId); + } + + // Snapshot rewards to prevent accumulation for next POI, then clear pending + _allocations.snapshotRewards( + params._allocationId, + params.graphRewardsManager.onSubgraphAllocationUpdate(allocation.subgraphDeploymentId) + ); + _allocations.clearPendingRewards(params._allocationId); + + // Scoped for stack management + { + (uint256 tokensIndexerRewards, uint256 tokensDelegationRewards) = _distributeIndexingRewards( + allocation, + rewardsCollected, + params + ); + + emit AllocationHandler.IndexingRewardsCollected( + allocation.indexer, + params._allocationId, + allocation.subgraphDeploymentId, + rewardsCollected, + tokensIndexerRewards, + tokensDelegationRewards, + params._poi, + params._poiMetadata, + currentEpoch + ); + } + + // Check if the indexer is over-allocated and force close the allocation if necessary + if ( + _isOverAllocated( + allocationProvisionTracker, + params.graphStaking, + allocation.indexer, + params._delegationRatio + ) + ) { + allocationForceClosed = true; + _closeAllocation( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + params.graphRewardsManager, + params._allocationId, + true + ); + } + } + /* solhint-enable function-max-lines */ + + /** + * @notice Close an allocation + * Does not require presenting a POI, use {_collectIndexingRewards} to present a POI and collect rewards + * @dev Note that allocations are long lived. All service payments, including indexing rewards, should be collected periodically + * without the need of closing the allocation. Allocations should only be closed when indexers want to reclaim the allocated + * tokens for other purposes. + * + * Emits a {AllocationClosed} event + * + * @param _allocations The mapping of allocation ids to allocation states + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens + * @param graphRewardsManager The rewards manager to handle rewards distribution + * @param _allocationId The id of the allocation to be closed + * @param _forceClosed Whether the allocation was force closed + */ + function closeAllocation( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + IRewardsManager graphRewardsManager, + address _allocationId, + bool _forceClosed + ) external { + _closeAllocation( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + graphRewardsManager, + _allocationId, + _forceClosed + ); + } + + /* solhint-disable function-max-lines */ + /** + * @notice Resize an allocation + * @dev Will lock or release tokens in the provision tracker depending on the new allocation size. + * Rewards accrued but not issued before the resize will be accounted for as pending rewards, + * unless the allocation is stale, in which case pending rewards are reclaimed. + * These will be paid out when the indexer presents a POI. + * + * Requirements: + * - `_indexer` must be the owner of the allocation + * - Allocation must be open + * - `_tokens` must be different from the current allocation size + * + * Emits a {AllocationResized} event. + * + * @param _allocations The mapping of allocation ids to allocation states + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens + * @param graphStaking The Horizon staking contract to handle token locking + * @param graphRewardsManager The rewards manager to handle rewards distribution + * @param _allocationId The id of the allocation to be resized + * @param _tokens The new amount of tokens to allocate + * @param _delegationRatio The delegation ratio to consider when locking tokens + * @param _maxPOIStaleness The maximum staleness of the POI in seconds + */ + function resizeAllocation( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + IHorizonStaking graphStaking, + IRewardsManager graphRewardsManager, + address _allocationId, + uint256 _tokens, + uint32 _delegationRatio, + uint256 _maxPOIStaleness + ) external { + IAllocation.State memory allocation = _allocations.get(_allocationId); + require(allocation.isOpen(), AllocationHandler.AllocationHandlerAllocationClosed(_allocationId)); + require( + _tokens != allocation.tokens, + AllocationHandler.AllocationHandlerAllocationSameSize(_allocationId, _tokens) + ); + + // Update provision tracker + uint256 oldTokens = allocation.tokens; + if (_tokens > oldTokens) { + allocationProvisionTracker.lock(graphStaking, allocation.indexer, _tokens - oldTokens, _delegationRatio); + } else { + allocationProvisionTracker.release(allocation.indexer, oldTokens - _tokens); + } + + // Calculate rewards that have been accrued since the last snapshot but not yet issued + uint256 accRewardsPerAllocatedToken = graphRewardsManager.onSubgraphAllocationUpdate( + allocation.subgraphDeploymentId + ); + uint256 accRewardsPerAllocatedTokenPending = !allocation.isAltruistic() + ? accRewardsPerAllocatedToken - allocation.accRewardsPerAllocatedToken + : 0; + + // Update the allocation + _allocations[_allocationId].tokens = _tokens; + _allocations[_allocationId].accRewardsPerAllocatedToken = accRewardsPerAllocatedToken; + _allocations[_allocationId].accRewardsPending += graphRewardsManager.calcRewards( + oldTokens, + accRewardsPerAllocatedTokenPending + ); + + // If allocation is stale, reclaim pending rewards defensively. + // Stale allocations are not performing, so rewards should not accumulate. + if (allocation.isStale(_maxPOIStaleness)) { + graphRewardsManager.reclaimRewards(RewardsCondition.STALE_POI, _allocationId); + _allocations.clearPendingRewards(_allocationId); + } + + // Update total allocated tokens for the subgraph deployment + if (_tokens > oldTokens) { + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] += (_tokens - oldTokens); + } else { + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] -= (oldTokens - _tokens); + } + + emit AllocationHandler.AllocationResized( + allocation.indexer, + _allocationId, + allocation.subgraphDeploymentId, + _tokens, + oldTokens + ); + } + /* solhint-enable function-max-lines */ + + /** + * @notice Checks if an allocation is over-allocated + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param graphStaking The Horizon staking contract to check delegation ratios + * @param _indexer The address of the indexer + * @param _delegationRatio The delegation ratio to consider when locking tokens + * @return True if the allocation is over-allocated, false otherwise + */ + function isOverAllocated( + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + IHorizonStaking graphStaking, + address _indexer, + uint32 _delegationRatio + ) external view returns (bool) { + return _isOverAllocated(allocationProvisionTracker, graphStaking, _indexer, _delegationRatio); + } + + /** + * @notice Close an allocation (internal) + * @dev Reclaims uncollected rewards before closing. + * + * Emits a {AllocationClosed} event + * + * @param _allocations The mapping of allocation ids to allocation states + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens + * @param graphRewardsManager The rewards manager to handle rewards distribution + * @param _allocationId The id of the allocation to be closed + * @param _forceClosed Whether the allocation was force closed + */ + function _closeAllocation( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + IRewardsManager graphRewardsManager, + address _allocationId, + bool _forceClosed + ) private { + IAllocation.State memory allocation = _allocations.get(_allocationId); + + // Reclaim uncollected rewards before closing + uint256 reclaimedRewards = graphRewardsManager.reclaimRewards(RewardsCondition.CLOSE_ALLOCATION, _allocationId); + + // Take rewards snapshot to prevent other allos from counting tokens from this allo + _allocations.snapshotRewards( + _allocationId, + graphRewardsManager.onSubgraphAllocationUpdate(allocation.subgraphDeploymentId) + ); + + // Clear pending rewards only if rewards were reclaimed. This marks them as consumed, + // which could be useful for future logic that searches for unconsumed rewards. + // Known limitation: This capture is incomplete due to other code paths (e.g., _presentPOI) + // that clear pending even when rewards are not consumed. + if (0 < reclaimedRewards) _allocations.clearPendingRewards(_allocationId); + + _allocations.close(_allocationId); + allocationProvisionTracker.release(allocation.indexer, allocation.tokens); + + // Update total allocated tokens for the subgraph deployment + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] = + _subgraphAllocatedTokens[allocation.subgraphDeploymentId] - allocation.tokens; + + emit AllocationHandler.AllocationClosed( + allocation.indexer, + _allocationId, + allocation.subgraphDeploymentId, + allocation.tokens, + _forceClosed + ); + } + + /** + * @notice Distributes indexing rewards to delegators and indexer + * @param _allocation The allocation state + * @param _rewardsCollected Total rewards to distribute + * @param _params The present params containing staking, token, and destination info + * @return tokensIndexerRewards Amount sent to indexer + * @return tokensDelegationRewards Amount sent to delegation pool + */ + function _distributeIndexingRewards( + IAllocation.State memory _allocation, + uint256 _rewardsCollected, + PresentParams memory _params + ) private returns (uint256 tokensIndexerRewards, uint256 tokensDelegationRewards) { + if (_rewardsCollected == 0) return (0, 0); + + // Calculate and distribute delegator share + uint256 delegatorCut = _params.graphStaking.getDelegationFeeCut( + _allocation.indexer, + _params.dataService, + IGraphPayments.PaymentTypes.IndexingRewards + ); + IHorizonStakingTypes.DelegationPool memory pool = _params.graphStaking.getDelegationPool( + _allocation.indexer, + _params.dataService + ); + tokensDelegationRewards = pool.shares > 0 ? _rewardsCollected.mulPPM(delegatorCut) : 0; + if (tokensDelegationRewards > 0) { + _params.graphToken.approve(address(_params.graphStaking), tokensDelegationRewards); + _params.graphStaking.addToDelegationPool(_allocation.indexer, _params.dataService, tokensDelegationRewards); + } + + // Distribute indexer share + tokensIndexerRewards = _rewardsCollected - tokensDelegationRewards; + if (tokensIndexerRewards > 0) { + if (_params._paymentsDestination == address(0)) { + _params.graphToken.approve(address(_params.graphStaking), tokensIndexerRewards); + _params.graphStaking.stakeToProvision(_allocation.indexer, _params.dataService, tokensIndexerRewards); + } else { + _params.graphToken.pushTokens(_params._paymentsDestination, tokensIndexerRewards); + } + } + } + + /** + * @notice Checks if an allocation is over-allocated + * @param allocationProvisionTracker The mapping of indexers to their locked tokens + * @param graphStaking The Horizon staking contract to check delegation ratios + * @param _indexer The address of the indexer + * @param _delegationRatio The delegation ratio to consider when locking tokens + * @return True if the allocation is over-allocated, false otherwise + */ + function _isOverAllocated( + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + IHorizonStaking graphStaking, + address _indexer, + uint32 _delegationRatio + ) private view returns (bool) { + return !allocationProvisionTracker.check(graphStaking, _indexer, _delegationRatio); + } + + /** + * @notice Verifies ownership of an allocation id by verifying an EIP712 allocation proof + * @dev Requirements: + * - Signer must be the allocation id address + * @param _encodeAllocationProof The EIP712 encoded allocation proof + * @param _allocationId The id of the allocation + * @param _proof The EIP712 proof, an EIP712 signed message of (indexer,allocationId) + */ + function _verifyAllocationProof( + bytes32 _encodeAllocationProof, + address _allocationId, + bytes memory _proof + ) private pure { + address signer = ECDSA.recover(_encodeAllocationProof, _proof); + require( + signer == _allocationId, + AllocationHandler.AllocationHandlerInvalidAllocationProof(signer, _allocationId) + ); + } +} diff --git a/packages/subgraph-service/contracts/libraries/Attestation.sol b/packages/subgraph-service/contracts/libraries/Attestation.sol index 77c3a3fc2..54bd2c2f2 100644 --- a/packages/subgraph-service/contracts/libraries/Attestation.sol +++ b/packages/subgraph-service/contracts/libraries/Attestation.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-strict-inequalities diff --git a/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol b/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol new file mode 100644 index 000000000..abe148e5e --- /dev/null +++ b/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; + +import { AllocationHandler } from "../libraries/AllocationHandler.sol"; +import { Directory } from "../utilities/Directory.sol"; +import { Allocation } from "./Allocation.sol"; +import { IndexingAgreementDecoder } from "./IndexingAgreementDecoder.sol"; + +/** + * @title IndexingAgreement library + * @author Edge & Node + * @notice Manages indexing agreement lifecycle: acceptance, updates, cancellation and fee collection. + * @custom:security-contact Please email security+contracts@thegraph.com if you find any + * bugs. We may have an active bug bounty program. + */ +library IndexingAgreement { + using IndexingAgreement for StorageManager; + using Allocation for IAllocation.State; + using Allocation for mapping(address => IAllocation.State); + + /** + * @notice Accept Indexing Agreement metadata + * @param subgraphDeploymentId The subgraph deployment ID + * @param version The indexing agreement version + * @param terms The indexing agreement terms + */ + struct AcceptIndexingAgreementMetadata { + bytes32 subgraphDeploymentId; + IIndexingAgreement.IndexingAgreementVersion version; + bytes terms; + } + + /** + * @notice Update Indexing Agreement metadata + * @param version The indexing agreement version + * @param terms The indexing agreement terms + */ + struct UpdateIndexingAgreementMetadata { + IIndexingAgreement.IndexingAgreementVersion version; + bytes terms; + } + + /** + * @notice Indexing Agreement Terms (Version 1) + * @param tokensPerSecond The amount of tokens per second + * @param tokensPerEntityPerSecond The amount of tokens per entity per second + */ + struct IndexingAgreementTermsV1 { + uint256 tokensPerSecond; + uint256 tokensPerEntityPerSecond; + } + + /** + * @notice Parameters for collecting indexing fees + * @param indexer The address of the indexer + * @param agreementId The ID of the indexing agreement + * @param currentEpoch The current epoch + * @param receiverDestination The address where the collected fees should be sent + * @param data The encoded data containing the number of entities indexed, proof of indexing, and epoch + * @param indexingFeesCut The indexing fees cut in PPM + */ + struct CollectParams { + address indexer; + bytes16 agreementId; + uint256 currentEpoch; + address receiverDestination; + bytes data; + uint256 indexingFeesCut; + } + + /** + * @notice Nested data for collecting indexing fees V1. + * + * @param entities The number of entities + * @param poi The proof of indexing (POI) + * @param poiBlockNumber The block number of the POI + * @param metadata Additional metadata associated with the collection + * @param maxSlippage Max acceptable tokens to lose due to rate limiting, or type(uint256).max to ignore + */ + struct CollectIndexingFeeDataV1 { + uint256 entities; + bytes32 poi; + uint256 poiBlockNumber; + bytes metadata; + uint256 maxSlippage; + } + + /** + * @notice Storage manager for indexing agreements + * @dev This struct holds the state of indexing agreements and their terms. + * It is used to manage the lifecycle of indexing agreements in the subgraph service. + * @param agreements Mapping of agreement IDs to their states + * @param termsV1 Mapping of agreement IDs to their terms for version 1 agreements + * @param allocationToActiveAgreementId Mapping of allocation IDs to their active agreement IDs + * @custom:storage-location erc7201:graphprotocol.subgraph-service.storage.StorageManager.IndexingAgreement + */ + struct StorageManager { + mapping(bytes16 agreementId => IIndexingAgreement.State) agreements; + mapping(bytes16 agreementId => IndexingAgreementTermsV1 data) termsV1; + mapping(address allocationId => bytes16 agreementId) allocationToActiveAgreementId; + } + + /** + * @notice Storage location for the indexing agreement storage manager + * @dev Equals keccak256(abi.encode(uint256(keccak256("graphprotocol.subgraph-service.storage.StorageManager.IndexingAgreement")) - 1)) & ~bytes32(uint256(0xff)) + */ + bytes32 public constant INDEXING_AGREEMENT_STORAGE_MANAGER_LOCATION = + 0xb59b65b7215c7fb95ac34d2ad5aed7c775c8bc77ad936b1b43e17b95efc8e400; + + /** + * @notice Emitted when an indexer collects indexing fees from a V1 agreement + * @param indexer The address of the indexer + * @param payer The address paying for the indexing fees + * @param agreementId The id of the agreement + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param currentEpoch The current epoch + * @param tokensCollected The amount of tokens collected + * @param entities The number of entities indexed + * @param poi The proof of indexing + * @param poiBlockNumber The block number of the proof of indexing + * @param metadata Additional metadata associated with the collection + */ + event IndexingFeesCollectedV1( + address indexed indexer, + address indexed payer, + bytes16 indexed agreementId, + address allocationId, + bytes32 subgraphDeploymentId, + uint256 currentEpoch, + uint256 tokensCollected, + uint256 entities, + bytes32 poi, + uint256 poiBlockNumber, + bytes metadata + ); + + /** + * @notice Emitted when an indexing agreement is canceled + * @param indexer The address of the indexer + * @param payer The address of the payer + * @param agreementId The id of the agreement + * @param canceledOnBehalfOf The address of the entity that canceled the agreement + */ + event IndexingAgreementCanceled( + address indexed indexer, + address indexed payer, + bytes16 indexed agreementId, + address canceledOnBehalfOf + ); + + /** + * @notice Emitted when an indexing agreement is accepted + * @param indexer The address of the indexer + * @param payer The address of the payer + * @param agreementId The id of the agreement + * @param allocationId The id of the allocation + * @param subgraphDeploymentId The id of the subgraph deployment + * @param version The version of the indexing agreement + * @param versionTerms The version data of the indexing agreement + */ + event IndexingAgreementAccepted( + address indexed indexer, + address indexed payer, + bytes16 indexed agreementId, + address allocationId, + bytes32 subgraphDeploymentId, + IIndexingAgreement.IndexingAgreementVersion version, + bytes versionTerms + ); + + /** + * @notice Emitted when an indexing agreement is updated + * @param indexer The address of the indexer + * @param payer The address of the payer + * @param agreementId The id of the agreement + * @param allocationId The id of the allocation + * @param version The version of the indexing agreement + * @param versionTerms The version data of the indexing agreement + */ + event IndexingAgreementUpdated( + address indexed indexer, + address indexed payer, + bytes16 indexed agreementId, + address allocationId, + IIndexingAgreement.IndexingAgreementVersion version, + bytes versionTerms + ); + + /** + * @notice Thrown when trying to interact with an agreement with an invalid version + * @param version The invalid version + */ + error IndexingAgreementInvalidVersion(IIndexingAgreement.IndexingAgreementVersion version); + + /** + * @notice Thrown when an agreement is not for the subgraph data service + * @param expectedDataService The expected data service address + * @param wrongDataService The wrong data service address + */ + error IndexingAgreementWrongDataService(address expectedDataService, address wrongDataService); + + /** + * @notice Thrown when an agreement and the allocation correspond to different deployment IDs + * @param agreementDeploymentId The agreement's deployment ID + * @param allocationId The allocation ID + * @param allocationDeploymentId The allocation's deployment ID + */ + error IndexingAgreementDeploymentIdMismatch( + bytes32 agreementDeploymentId, + address allocationId, + bytes32 allocationDeploymentId + ); + + /** + * @notice Thrown when the agreement is already accepted + * @param agreementId The agreement ID + */ + error IndexingAgreementAlreadyAccepted(bytes16 agreementId); + + /** + * @notice Thrown when an allocation already has an active agreement + * @param allocationId The allocation ID + */ + error AllocationAlreadyHasIndexingAgreement(address allocationId); + + /** + * @notice Thrown when caller or proxy can not cancel an agreement + * @param owner The address of the owner of the agreement + * @param unauthorized The unauthorized caller + */ + error IndexingAgreementNonCancelableBy(address owner, address unauthorized); + + /** + * @notice Thrown when the agreement is not active + * @param agreementId The agreement ID + */ + error IndexingAgreementNotActive(bytes16 agreementId); + + /** + * @notice Thrown when the agreement is not collectable + * @param agreementId The agreement ID + */ + error IndexingAgreementNotCollectable(bytes16 agreementId); + + /** + * @notice Thrown when trying to interact with an agreement not owned by the indexer + * @param agreementId The agreement ID + * @param unauthorizedIndexer The unauthorized indexer + */ + error IndexingAgreementNotAuthorized(bytes16 agreementId, address unauthorizedIndexer); + + /** + * @notice Thrown when indexing agreement terms are invalid + * @param tokensPerSecond The indexing agreement tokens per second + * @param maxOngoingTokensPerSecond The RCA maximum tokens per second + */ + error IndexingAgreementInvalidTerms(uint256 tokensPerSecond, uint256 maxOngoingTokensPerSecond); + + /* solhint-disable function-max-lines */ + /** + * @notice Accept an indexing agreement. + * + * Requirements: + * - Allocation must belong to the indexer and be open + * - Agreement must be for this data service + * - Agreement's subgraph deployment must match the allocation's subgraph deployment + * - Agreement must not have been accepted before + * - Allocation must not have an agreement already + * + * @dev rca.metadata is an encoding of {IndexingAgreement.AcceptIndexingAgreementMetadata}. + * If `authData` is non-empty it is treated as an ECDSA signature; if empty the payer + * must be a contract implementing {IAgreementOwner}. + * + * Emits {IndexingAgreementAccepted} event + * + * @param self The indexing agreement storage manager + * @param allocations The mapping of allocation IDs to their states + * @param allocationId The id of the allocation + * @param rca The Recurring Collection Agreement + * @param authData ECDSA signature bytes, or empty for contract-approved agreements + * @return The agreement ID assigned to the accepted indexing agreement + */ + function accept( + StorageManager storage self, + mapping(address allocationId => IAllocation.State allocation) storage allocations, + address allocationId, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + bytes calldata authData + ) external returns (bytes16) { + IAllocation.State memory allocation = _requireValidAllocation(allocations, allocationId, rca.serviceProvider); + + require(rca.dataService == address(this), IndexingAgreementWrongDataService(address(this), rca.dataService)); + + AcceptIndexingAgreementMetadata memory metadata = IndexingAgreementDecoder.decodeRCAMetadata(rca.metadata); + + bytes16 agreementId = _directory().recurringCollector().generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + IIndexingAgreement.State storage agreement = self.agreements[agreementId]; + + require(agreement.allocationId == address(0), IndexingAgreementAlreadyAccepted(agreementId)); + + require( + allocation.subgraphDeploymentId == metadata.subgraphDeploymentId, + IndexingAgreementDeploymentIdMismatch( + metadata.subgraphDeploymentId, + allocationId, + allocation.subgraphDeploymentId + ) + ); + + // Ensure that an allocation can only have one active indexing agreement + require( + self.allocationToActiveAgreementId[allocationId] == bytes16(0), + AllocationAlreadyHasIndexingAgreement(allocationId) + ); + self.allocationToActiveAgreementId[allocationId] = agreementId; + + agreement.version = metadata.version; + agreement.allocationId = allocationId; + + require( + metadata.version == IIndexingAgreement.IndexingAgreementVersion.V1, + IndexingAgreementInvalidVersion(metadata.version) + ); + _setTermsV1(self, agreementId, metadata.terms, rca.maxOngoingTokensPerSecond); + + emit IndexingAgreementAccepted( + rca.serviceProvider, + rca.payer, + agreementId, + allocationId, + metadata.subgraphDeploymentId, + metadata.version, + metadata.terms + ); + + require( + _directory().recurringCollector().accept(rca, authData) == agreementId, + "internal: agreement ID mismatch" + ); + return agreementId; + } + /* solhint-enable function-max-lines */ + + /** + * @notice Update an indexing agreement. + * + * Requirements: + * - Agreement must be active + * - The indexer must be the service provider of the agreement + * + * @dev rcau.metadata is an encoding of {IndexingAgreement.UpdateIndexingAgreementMetadata}. + * If `authData` is non-empty it is treated as an ECDSA signature; if empty the payer + * must be a contract implementing {IAgreementOwner}. + * + * Emits {IndexingAgreementUpdated} event + * + * @param self The indexing agreement storage manager + * @param indexer The indexer address + * @param rcau The Recurring Collection Agreement Update + * @param authData ECDSA signature bytes, or empty for contract-approved updates + */ + function update( + StorageManager storage self, + address indexer, + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata authData + ) external { + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, rcau.agreementId); + require(_isActive(wrapper), IndexingAgreementNotActive(rcau.agreementId)); + require( + wrapper.collectorAgreement.serviceProvider == indexer, + IndexingAgreementNotAuthorized(rcau.agreementId, indexer) + ); + + UpdateIndexingAgreementMetadata memory metadata = IndexingAgreementDecoder.decodeRCAUMetadata(rcau.metadata); + + require( + wrapper.agreement.version == IIndexingAgreement.IndexingAgreementVersion.V1, + "internal: invalid version" + ); + require( + metadata.version == IIndexingAgreement.IndexingAgreementVersion.V1, + IndexingAgreementInvalidVersion(metadata.version) + ); + _setTermsV1(self, rcau.agreementId, metadata.terms, wrapper.collectorAgreement.maxOngoingTokensPerSecond); + + emit IndexingAgreementUpdated({ + indexer: wrapper.collectorAgreement.serviceProvider, + payer: wrapper.collectorAgreement.payer, + agreementId: rcau.agreementId, + allocationId: wrapper.agreement.allocationId, + version: metadata.version, + versionTerms: metadata.terms + }); + + _directory().recurringCollector().update(rcau, authData); + } + + /** + * @notice Cancel an indexing agreement. + * + * @dev This function allows the indexer to cancel an indexing agreement. + * + * Requirements: + * - Agreement must be active + * - The indexer must be the service provider of the agreement + * + * Emits {IndexingAgreementCanceled} event + * + * @param self The indexing agreement storage manager + * @param indexer The indexer address + * @param agreementId The id of the agreement to cancel + */ + function cancel(StorageManager storage self, address indexer, bytes16 agreementId) external { + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); + require(_isActive(wrapper), IndexingAgreementNotActive(agreementId)); + require( + wrapper.collectorAgreement.serviceProvider == indexer, + IndexingAgreementNonCancelableBy(wrapper.collectorAgreement.serviceProvider, indexer) + ); + _cancel( + self, + agreementId, + wrapper.agreement, + wrapper.collectorAgreement, + IRecurringCollector.CancelAgreementBy.ServiceProvider + ); + } + + /** + * @notice Cancel an allocation's indexing agreement if it exists. + * + * @dev This function is to be called by the data service when an allocation is closed. + * + * Requirements: + * - The allocation must have an active agreement + * - Agreement must be active + * + * Emits {IndexingAgreementCanceled} event + * + * @param self The indexing agreement storage manager + * @param _allocationId The allocation ID + * @param forceClosed Whether the allocation was force closed + * + */ + function onCloseAllocation(StorageManager storage self, address _allocationId, bool forceClosed) external { + bytes16 agreementId = self.allocationToActiveAgreementId[_allocationId]; + if (agreementId == bytes16(0)) { + return; + } + + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); + if (!_isActive(wrapper)) { + return; + } + + _cancel( + self, + agreementId, + wrapper.agreement, + wrapper.collectorAgreement, + forceClosed + ? IRecurringCollector.CancelAgreementBy.ThirdParty + : IRecurringCollector.CancelAgreementBy.ServiceProvider + ); + } + + /** + * @notice Cancel an indexing agreement by the payer. + * + * @dev This function allows the payer to cancel an indexing agreement. + * + * Requirements: + * - Agreement must be active + * - The caller must be authorized to cancel the agreement in the collector on the payer's behalf + * + * Emits {IndexingAgreementCanceled} event + * + * @param self The indexing agreement storage manager + * @param agreementId The id of the agreement to cancel + */ + function cancelByPayer(StorageManager storage self, bytes16 agreementId) external { + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); + require(_isActive(wrapper), IndexingAgreementNotActive(agreementId)); + require( + msg.sender == wrapper.collectorAgreement.payer || + _directory().recurringCollector().isAuthorized(wrapper.collectorAgreement.payer, msg.sender), + IndexingAgreementNonCancelableBy(wrapper.collectorAgreement.payer, msg.sender) + ); + _cancel( + self, + agreementId, + wrapper.agreement, + wrapper.collectorAgreement, + IRecurringCollector.CancelAgreementBy.Payer + ); + } + + /* solhint-disable function-max-lines */ + /** + * @notice Collect Indexing fees + * @dev Uses the {RecurringCollector} to collect payment from Graph Horizon payments protocol. + * Fees are distributed to service provider and delegators by {GraphPayments} + * + * Requirements: + * - Allocation must be open + * - Agreement must be active + * - Agreement must be of version V1 + * - The data must be encoded as per {IndexingAgreementDecoder.decodeCollectIndexingFeeDataV1} + * + * Emits a {IndexingFeesCollectedV1} event. + * + * @param self The indexing agreement storage manager + * @param allocations The mapping of allocation IDs to their states + * @param params The parameters for collecting indexing fees + * @return The address of the service provider that collected the fees + * @return The amount of fees collected + */ + function collect( + StorageManager storage self, + mapping(address allocationId => IAllocation.State allocation) storage allocations, + CollectParams calldata params + ) external returns (address, uint256) { + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, params.agreementId); + IAllocation.State memory allocation = _requireValidAllocation( + allocations, + wrapper.agreement.allocationId, + wrapper.collectorAgreement.serviceProvider + ); + require( + allocation.indexer == params.indexer, + IndexingAgreementNotAuthorized(params.agreementId, params.indexer) + ); + // Get collection info from RecurringCollector (single source of truth for temporal logic) + (bool isCollectable, uint256 collectionSeconds, ) = _directory().recurringCollector().getCollectionInfo( + wrapper.collectorAgreement + ); + require(_isValid(wrapper) && isCollectable, IndexingAgreementNotCollectable(params.agreementId)); + + require( + wrapper.agreement.version == IIndexingAgreement.IndexingAgreementVersion.V1, + IndexingAgreementInvalidVersion(wrapper.agreement.version) + ); + + CollectIndexingFeeDataV1 memory data = IndexingAgreementDecoder.decodeCollectIndexingFeeDataV1(params.data); + + uint256 expectedTokens = _tokensToCollect(self, params.agreementId, data.entities, collectionSeconds); + + // `tokensCollected` <= `expectedTokens` because the recurring collector will further narrow + // down the tokens allowed, based on the RCA terms. + uint256 tokensCollected = _directory().recurringCollector().collect( + IGraphPayments.PaymentTypes.IndexingFee, + abi.encode( + IRecurringCollector.CollectParams({ + agreementId: params.agreementId, + collectionId: bytes32(uint256(uint160(wrapper.agreement.allocationId))), + tokens: expectedTokens, + dataServiceCut: params.indexingFeesCut, + receiverDestination: params.receiverDestination, + maxSlippage: data.maxSlippage + }) + ) + ); + + emit IndexingFeesCollectedV1( + wrapper.collectorAgreement.serviceProvider, + wrapper.collectorAgreement.payer, + params.agreementId, + wrapper.agreement.allocationId, + allocation.subgraphDeploymentId, + params.currentEpoch, + tokensCollected, + data.entities, + data.poi, + data.poiBlockNumber, + data.metadata + ); + + return (wrapper.collectorAgreement.serviceProvider, tokensCollected); + } + /* solhint-enable function-max-lines */ + + /** + * @notice Get the indexing agreement for a given agreement ID. + * + * @param self The indexing agreement storage manager + * @param agreementId The id of the indexing agreement + * @return The indexing agreement wrapper containing the agreement state and collector agreement data + */ + function get( + StorageManager storage self, + bytes16 agreementId + ) external view returns (IIndexingAgreement.AgreementWrapper memory) { + IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); + require(wrapper.collectorAgreement.dataService == address(this), IndexingAgreementNotActive(agreementId)); + + return wrapper; + } + + /** + * @notice Get the storage manager for indexing agreements. + * @dev This function retrieves the storage manager for indexing agreements. + * @return m The storage manager for indexing agreements + */ + function _getStorageManager() internal pure returns (StorageManager storage m) { + // solhint-disable-next-line no-inline-assembly + assembly { + m.slot := INDEXING_AGREEMENT_STORAGE_MANAGER_LOCATION + } + } + + /** + * @notice Set the terms for an indexing agreement of version V1. + * @dev This function updates the terms of an indexing agreement in the storage manager. + * @param _manager The indexing agreement storage manager + * @param _agreementId The id of the agreement to update + * @param _data The encoded terms data + * @param maxOngoingTokensPerSecond The RCA maximum tokens per second limit for validation + */ + function _setTermsV1( + StorageManager storage _manager, + bytes16 _agreementId, + bytes memory _data, + uint256 maxOngoingTokensPerSecond + ) private { + IndexingAgreementTermsV1 memory newTerms = IndexingAgreementDecoder.decodeIndexingAgreementTermsV1(_data); + _validateTermsAgainstRCA(newTerms, maxOngoingTokensPerSecond); + _manager.termsV1[_agreementId].tokensPerSecond = newTerms.tokensPerSecond; + _manager.termsV1[_agreementId].tokensPerEntityPerSecond = newTerms.tokensPerEntityPerSecond; + } + + /** + * @notice Cancel an indexing agreement. + * + * @dev This function does the actual agreement cancelation. + * + * Emits {IndexingAgreementCanceled} event + * + * @param _manager The indexing agreement storage manager + * @param _agreementId The id of the agreement to cancel + * @param _agreement The indexing agreement state + * @param _collectorAgreement The collector agreement data + * @param _cancelBy The entity that is canceling the agreement + */ + function _cancel( + StorageManager storage _manager, + bytes16 _agreementId, + IIndexingAgreement.State memory _agreement, + IRecurringCollector.AgreementData memory _collectorAgreement, + IRecurringCollector.CancelAgreementBy _cancelBy + ) private { + // Delete the allocation to active agreement link, so that the allocation + // can be assigned a new indexing agreement in the future. + delete _manager.allocationToActiveAgreementId[_agreement.allocationId]; + + emit IndexingAgreementCanceled( + _collectorAgreement.serviceProvider, + _collectorAgreement.payer, + _agreementId, + _cancelBy == IRecurringCollector.CancelAgreementBy.Payer + ? _collectorAgreement.payer + : _collectorAgreement.serviceProvider + ); + + _directory().recurringCollector().cancel(_agreementId, _cancelBy); + } + + /** + * @notice Requires that the allocation is valid and owned by the indexer. + * + * Requirements: + * - Allocation must belong to the indexer + * - Allocation must be open + * + * @param _allocations The mapping of allocation IDs to their states + * @param _allocationId The id of the allocation + * @param _indexer The address of the indexer + * @return The allocation state + */ + function _requireValidAllocation( + mapping(address => IAllocation.State) storage _allocations, + address _allocationId, + address _indexer + ) private view returns (IAllocation.State memory) { + IAllocation.State memory allocation = _allocations.get(_allocationId); + require( + allocation.indexer == _indexer, + ISubgraphService.SubgraphServiceAllocationNotAuthorized(_indexer, _allocationId) + ); + require(allocation.isOpen(), AllocationHandler.AllocationHandlerAllocationClosed(_allocationId)); + + return allocation; + } + + /** + * @notice Calculate tokens to collect based on pre-validated duration + * @param _manager The storage manager + * @param _agreementId The agreement ID + * @param _entities The number of entities indexed + * @param _collectionSeconds Pre-calculated valid collection duration + * @return The number of tokens to collect + */ + function _tokensToCollect( + StorageManager storage _manager, + bytes16 _agreementId, + uint256 _entities, + uint256 _collectionSeconds + ) private view returns (uint256) { + IndexingAgreementTermsV1 memory termsV1 = _manager.termsV1[_agreementId]; + return _collectionSeconds * (termsV1.tokensPerSecond + termsV1.tokensPerEntityPerSecond * _entities); + } + + /** + * @notice Checks if the agreement is active + * Requirements: + * - The indexing agreement is valid + * - The underlying collector agreement has been accepted + * @param wrapper The agreement wrapper containing the indexing agreement and collector agreement data + * @return True if the agreement is active, false otherwise + **/ + function _isActive(IIndexingAgreement.AgreementWrapper memory wrapper) private view returns (bool) { + return _isValid(wrapper) && wrapper.collectorAgreement.state == IRecurringCollector.AgreementState.Accepted; + } + + /** + * @notice Checks if the agreement is valid + * Requirements: + * - The underlying collector agreement's data service is this contract + * - The indexing agreement has been accepted and has a valid allocation ID + * @param wrapper The agreement wrapper containing the indexing agreement and collector agreement data + * @return True if the agreement is valid, false otherwise + **/ + function _isValid(IIndexingAgreement.AgreementWrapper memory wrapper) private view returns (bool) { + return wrapper.collectorAgreement.dataService == address(this) && wrapper.agreement.allocationId != address(0); + } + + /** + * @notice Gets the Directory + * @return The Directory contract + */ + function _directory() private view returns (Directory) { + return Directory(address(this)); + } + + /** + * @notice Gets the indexing agreement wrapper for a given agreement ID. + * @dev This function retrieves the indexing agreement wrapper containing the agreement state and collector agreement data. + * @param self The indexing agreement storage manager + * @param agreementId The id of the indexing agreement + * @return The indexing agreement wrapper containing the agreement state and collector agreement data + */ + function _get( + StorageManager storage self, + bytes16 agreementId + ) private view returns (IIndexingAgreement.AgreementWrapper memory) { + return + IIndexingAgreement.AgreementWrapper({ + agreement: self.agreements[agreementId], + collectorAgreement: _directory().recurringCollector().getAgreement(agreementId) + }); + } + + /** + * @notice Validates indexing agreement terms against RCA limits + * @param terms The indexing agreement terms to validate + * @param maxOngoingTokensPerSecond The RCA maximum tokens per second limit + */ + function _validateTermsAgainstRCA( + IndexingAgreementTermsV1 memory terms, + uint256 maxOngoingTokensPerSecond + ) private pure { + require( + // solhint-disable-next-line gas-strict-inequalities + terms.tokensPerSecond <= maxOngoingTokensPerSecond, + IndexingAgreementInvalidTerms(terms.tokensPerSecond, maxOngoingTokensPerSecond) + ); + } +} diff --git a/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoder.sol b/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoder.sol new file mode 100644 index 000000000..a191e7d1f --- /dev/null +++ b/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoder.sol @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { IndexingAgreementDecoderRaw } from "./IndexingAgreementDecoderRaw.sol"; +import { IndexingAgreement } from "./IndexingAgreement.sol"; + +/** + * @title IndexingAgreementDecoder library + * @author Edge & Node + * @notice Safe decoder for indexing agreement data structures, reverting with typed errors on malformed input. + */ +library IndexingAgreementDecoder { + /** + * @notice Thrown when the data can't be decoded as expected + * @param t The type of data that was expected + * @param data The invalid data + */ + error IndexingAgreementDecoderInvalidData(string t, bytes data); + + /** + * @notice Decodes the data for collecting indexing fees. + * + * @param data The data to decode. + * @return agreementId The agreement ID + * @return nestedData The nested encoded data + */ + function decodeCollectData(bytes memory data) public pure returns (bytes16, bytes memory) { + try IndexingAgreementDecoderRaw.decodeCollectData(data) returns (bytes16 agreementId, bytes memory nestedData) { + return (agreementId, nestedData); + } catch { + revert IndexingAgreementDecoderInvalidData("decodeCollectData", data); + } + } + + /** + * @notice Decodes the RCA metadata. + * + * @param data The data to decode. + * @return The decoded data. See {IndexingAgreement.AcceptIndexingAgreementMetadata} + */ + function decodeRCAMetadata( + bytes memory data + ) public pure returns (IndexingAgreement.AcceptIndexingAgreementMetadata memory) { + try IndexingAgreementDecoderRaw.decodeRCAMetadata(data) returns ( + IndexingAgreement.AcceptIndexingAgreementMetadata memory decoded + ) { + return decoded; + } catch { + revert IndexingAgreementDecoderInvalidData("decodeRCAMetadata", data); + } + } + + /** + * @notice Decodes the RCAU metadata. + * + * @param data The data to decode. + * @return The decoded data. See {IndexingAgreement.UpdateIndexingAgreementMetadata} + */ + function decodeRCAUMetadata( + bytes memory data + ) public pure returns (IndexingAgreement.UpdateIndexingAgreementMetadata memory) { + try IndexingAgreementDecoderRaw.decodeRCAUMetadata(data) returns ( + IndexingAgreement.UpdateIndexingAgreementMetadata memory decoded + ) { + return decoded; + } catch { + revert IndexingAgreementDecoderInvalidData("decodeRCAUMetadata", data); + } + } + + /** + * @notice Decodes the collect data for indexing fees V1. + * + * @param data The data to decode. + * @return The decoded data structure. See {IndexingAgreement.CollectIndexingFeeDataV1} + */ + function decodeCollectIndexingFeeDataV1( + bytes memory data + ) public pure returns (IndexingAgreement.CollectIndexingFeeDataV1 memory) { + try IndexingAgreementDecoderRaw.decodeCollectIndexingFeeDataV1(data) returns ( + IndexingAgreement.CollectIndexingFeeDataV1 memory decoded + ) { + return decoded; + } catch { + revert IndexingAgreementDecoderInvalidData("decodeCollectIndexingFeeDataV1", data); + } + } + + /** + * @notice Decodes the data for indexing agreement terms V1. + * + * @param data The data to decode. + * @return The decoded data structure. See {IndexingAgreement.IndexingAgreementTermsV1} + */ + function decodeIndexingAgreementTermsV1( + bytes memory data + ) public pure returns (IndexingAgreement.IndexingAgreementTermsV1 memory) { + try IndexingAgreementDecoderRaw.decodeIndexingAgreementTermsV1(data) returns ( + IndexingAgreement.IndexingAgreementTermsV1 memory decoded + ) { + return decoded; + } catch { + revert IndexingAgreementDecoderInvalidData("decodeIndexingAgreementTermsV1", data); + } + } +} diff --git a/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoderRaw.sol b/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoderRaw.sol new file mode 100644 index 000000000..7478089c6 --- /dev/null +++ b/packages/subgraph-service/contracts/libraries/IndexingAgreementDecoderRaw.sol @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { IndexingAgreement } from "./IndexingAgreement.sol"; + +/** + * @title IndexingAgreementDecoderRaw library + * @author Edge & Node + * @notice Low-level decoder for indexing agreement data structures, propagating native revert on malformed input. + */ +library IndexingAgreementDecoderRaw { + /** + * @notice See {IndexingAgreementDecoder.decodeCollectIndexingFeeData} + * @param data The data to decode + * @return agreementId The agreement ID + * @return nestedData The nested encoded data + */ + function decodeCollectData(bytes calldata data) public pure returns (bytes16, bytes memory) { + return abi.decode(data, (bytes16, bytes)); + } + + /** + * @notice See {IndexingAgreementDecoder.decodeRCAMetadata} + * @dev The data should be encoded as {IndexingAgreement.AcceptIndexingAgreementMetadata} + * @param data The data to decode + * @return The decoded data + */ + function decodeRCAMetadata( + bytes calldata data + ) public pure returns (IndexingAgreement.AcceptIndexingAgreementMetadata memory) { + return abi.decode(data, (IndexingAgreement.AcceptIndexingAgreementMetadata)); + } + + /** + * @notice See {IndexingAgreementDecoder.decodeRCAUMetadata} + * @dev The data should be encoded as {IndexingAgreement.UpdateIndexingAgreementMetadata} + * @param data The data to decode + * @return The decoded data + */ + function decodeRCAUMetadata( + bytes calldata data + ) public pure returns (IndexingAgreement.UpdateIndexingAgreementMetadata memory) { + return abi.decode(data, (IndexingAgreement.UpdateIndexingAgreementMetadata)); + } + + /** + * @notice See {IndexingAgreementDecoder.decodeCollectIndexingFeeDataV1} + * @dev The data should be encoded as (uint256 entities, bytes32 poi, uint256 epoch) + * @param data The data to decode + * @return The decoded collect indexing fee V1 data + * + */ + function decodeCollectIndexingFeeDataV1( + bytes memory data + ) public pure returns (IndexingAgreement.CollectIndexingFeeDataV1 memory) { + return abi.decode(data, (IndexingAgreement.CollectIndexingFeeDataV1)); + } + + /** + * @notice See {IndexingAgreementDecoder.decodeIndexingAgreementTermsV1} + * @dev The data should be encoded as {IndexingAgreement.IndexingAgreementTermsV1} + * @param data The data to decode + * @return The decoded indexing agreement terms + */ + function decodeIndexingAgreementTermsV1( + bytes memory data + ) public pure returns (IndexingAgreement.IndexingAgreementTermsV1 memory) { + return abi.decode(data, (IndexingAgreement.IndexingAgreementTermsV1)); + } +} diff --git a/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol b/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol index 97b2be1dc..8439ed4fb 100644 --- a/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol +++ b/packages/subgraph-service/contracts/libraries/LegacyAllocation.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; @@ -14,45 +14,9 @@ import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph- library LegacyAllocation { using LegacyAllocation for ILegacyAllocation.State; - /** - * @notice Migrate a legacy allocation - * @dev Requirements: - * - The allocation must not have been previously migrated - * @param self The legacy allocation list mapping - * @param indexer The indexer that owns the allocation - * @param allocationId The allocation id - * @param subgraphDeploymentId The subgraph deployment id the allocation is for - * @custom:error LegacyAllocationAlreadyMigrated if the allocation has already been migrated - */ - function migrate( - mapping(address => ILegacyAllocation.State) storage self, - address indexer, - address allocationId, - bytes32 subgraphDeploymentId - ) internal { - require(!self[allocationId].exists(), ILegacyAllocation.LegacyAllocationAlreadyExists(allocationId)); - - self[allocationId] = ILegacyAllocation.State({ indexer: indexer, subgraphDeploymentId: subgraphDeploymentId }); - } - - /** - * @notice Get a legacy allocation - * @param self The legacy allocation list mapping - * @param allocationId The allocation id - * @return The legacy allocation details - */ - function get( - mapping(address => ILegacyAllocation.State) storage self, - address allocationId - ) internal view returns (ILegacyAllocation.State memory) { - return _get(self, allocationId); - } - /** * @notice Revert if a legacy allocation exists - * @dev We first check the migrated mapping then the old staking contract. - * @dev TRANSITION PERIOD: after the transition period when all the allocations are migrated we can - * remove the call to the staking contract. + * @dev We check both the migrated allocations mapping and the legacy staking contract. * @param self The legacy allocation list mapping * @param graphStaking The Horizon Staking contract * @param allocationId The allocation id @@ -77,19 +41,4 @@ library LegacyAllocation { function exists(ILegacyAllocation.State memory self) internal pure returns (bool) { return self.indexer != address(0); } - - /** - * @notice Get a legacy allocation - * @param self The legacy allocation list mapping - * @param allocationId The allocation id - * @return The legacy allocation details - */ - function _get( - mapping(address => ILegacyAllocation.State) storage self, - address allocationId - ) private view returns (ILegacyAllocation.State storage) { - ILegacyAllocation.State storage allocation = self[allocationId]; - require(allocation.exists(), ILegacyAllocation.LegacyAllocationDoesNotExist(allocationId)); - return allocation; - } } diff --git a/packages/subgraph-service/contracts/utilities/AllocationManager.sol b/packages/subgraph-service/contracts/utilities/AllocationManager.sol index e78fbc6f8..69d980b4d 100644 --- a/packages/subgraph-service/contracts/utilities/AllocationManager.sol +++ b/packages/subgraph-service/contracts/utilities/AllocationManager.sol @@ -1,24 +1,21 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; -import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token/IGraphToken.sol"; -import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; -import { RewardsCondition } from "@graphprotocol/interfaces/contracts/contracts/rewards/RewardsCondition.sol"; import { GraphDirectory } from "@graphprotocol/horizon/contracts/utilities/GraphDirectory.sol"; import { AllocationManagerV1Storage } from "./AllocationManagerStorage.sol"; import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; -import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; import { EIP712Upgradeable } from "@openzeppelin/contracts-upgradeable/utils/cryptography/EIP712Upgradeable.sol"; import { Allocation } from "../libraries/Allocation.sol"; import { LegacyAllocation } from "../libraries/LegacyAllocation.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; import { ProvisionTracker } from "@graphprotocol/horizon/contracts/data-service/libraries/ProvisionTracker.sol"; +import { AllocationHandler } from "../libraries/AllocationHandler.sol"; /** * @title AllocationManager contract @@ -47,7 +44,6 @@ abstract contract AllocationManager is keccak256("AllocationIdProof(address indexer,address allocationId)"); // solhint-disable-previous-line gas-small-strings - // forge-lint: disable-next-item(mixed-case-function) /** * @notice Initializes the contract and parent contracts * @param _name The name to use for EIP712 domain separation @@ -58,25 +54,11 @@ abstract contract AllocationManager is __AllocationManager_init_unchained(); } - // forge-lint: disable-next-item(mixed-case-function) /** * @notice Initializes the contract */ function __AllocationManager_init_unchained() internal onlyInitializing {} - /** - * @notice Imports a legacy allocation id into the subgraph service - * This is a governor only action that is required to prevent indexers from re-using allocation ids from the - * legacy staking contract. It will revert with LegacyAllocationAlreadyMigrated if the allocation has already been migrated. - * @param _indexer The address of the indexer - * @param _allocationId The id of the allocation - * @param _subgraphDeploymentId The id of the subgraph deployment - */ - function _migrateLegacyAllocation(address _indexer, address _allocationId, bytes32 _subgraphDeploymentId) internal { - _legacyAllocations.migrate(_indexer, _allocationId, _subgraphDeploymentId); - emit LegacyAllocationMigrated(_indexer, _allocationId, _subgraphDeploymentId); - } - /** * @notice Create an allocation * @dev The `_allocationProof` is a 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationId)` @@ -101,76 +83,33 @@ abstract contract AllocationManager is bytes memory _allocationProof, uint32 _delegationRatio ) internal { - require(_allocationId != address(0), AllocationManagerInvalidZeroAllocationId()); - - _verifyAllocationProof(_indexer, _allocationId, _allocationProof); - - // Ensure allocation id is not reused - // need to check both subgraph service (on allocations.create()) and legacy allocations - _legacyAllocations.revertIfExists(_graphStaking(), _allocationId); - - uint256 currentEpoch = _graphEpochManager().currentEpoch(); - IAllocation.State memory allocation = _allocations.create( - _indexer, - _allocationId, - _subgraphDeploymentId, - _tokens, - _graphRewardsManager().onSubgraphAllocationUpdate(_subgraphDeploymentId), - currentEpoch + AllocationHandler.allocate( + _allocations, + _legacyAllocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + AllocationHandler.AllocateParams({ + _allocationId: _allocationId, + _allocationProof: _allocationProof, + _encodeAllocationProof: _encodeAllocationProof(_indexer, _allocationId), + _delegationRatio: _delegationRatio, + _indexer: _indexer, + _subgraphDeploymentId: _subgraphDeploymentId, + _tokens: _tokens, + currentEpoch: _graphEpochManager().currentEpoch(), + graphRewardsManager: _graphRewardsManager(), + graphStaking: _graphStaking() + }) ); - - // Check that the indexer has enough tokens available - // Note that the delegation ratio ensures overdelegation cannot be used - allocationProvisionTracker.lock(_graphStaking(), _indexer, _tokens, _delegationRatio); - - // Update total allocated tokens for the subgraph deployment - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] = - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] + allocation.tokens; - - emit AllocationCreated(_indexer, _allocationId, _subgraphDeploymentId, allocation.tokens, currentEpoch); } /** * @notice Present a POI to collect indexing rewards for an allocation * Mints indexing rewards using the {RewardsManager} and distributes them to the indexer and delegators. * - * Requirements for indexing rewards: - * - POI must be non-zero - * - POI must not be stale (older than `maxPOIStaleness`) - * - Allocation must be open for at least one epoch (returns early with 0 if too young) - * - * ## Reward Paths - * - * Rewards follow one of three paths based on allocation and POI state: - * - * **CLAIMED** (normal path): Valid POI, not stale, allocation mature, subgraph not denied - * - Calls `takeRewards()` to mint tokens to this contract - * - Distributes to indexer (stake or payments destination) and delegators - * - Snapshots allocation to prevent double-counting - * - * **RECLAIMED** (redirect path): STALE_POI or ZERO_POI conditions - * - Calls `reclaimRewards()` to mint tokens to configured reclaim address - * - If no reclaim address configured, rewards are dropped (not minted) - * - Snapshots allocation to prevent double-counting - * - * **DEFERRED** (early return): ALLOCATION_TOO_YOUNG or SUBGRAPH_DENIED conditions - * - Returns 0 without calling take or reclaim - * - Does NOT snapshot allocation (preserves rewards for later collection) - * - Allows rewards to be claimed when condition clears - * - * ## Subgraph Denial (Soft Deny) - * - * When a subgraph is denied, this function implements "soft deny": - * - Returns early without claiming or reclaiming - * - Allocation state is preserved (pending rewards not cleared) - * - Pre-denial rewards remain claimable after undeny - * - Ongoing issuance during denial is reclaimed at RewardsManager level (hard deny) - * - * Note: Indexers should present POIs at least every `maxPOIStaleness` to avoid being locked out of rewards. - * A zero POI can be presented if a valid one is unavailable, to prevent staleness and slashing. - * - * Note: Reclaim address changes in RewardsManager apply retroactively to all unclaimed rewards. + * See {AllocationHandler-presentPOI} for detailed reward path documentation. * + * Emits a {POIPresented} event. * Emits a {IndexingRewardsCollected} event. * * @param _allocationId The id of the allocation to collect rewards for @@ -179,6 +118,7 @@ abstract contract AllocationManager is * @param _delegationRatio The delegation ratio to consider when locking tokens * @param _paymentsDestination The address where indexing rewards should be sent * @return rewardsCollected Indexing rewards collected + * @return allocationForceClosed True if the allocation was force closed due to over-allocation */ // solhint-disable-next-line function-max-lines function _presentPoi( @@ -187,75 +127,26 @@ abstract contract AllocationManager is bytes memory _poiMetadata, uint32 _delegationRatio, address _paymentsDestination - ) internal returns (uint256 rewardsCollected) { - IAllocation.State memory allocation = _allocations.get(_allocationId); - require(allocation.isOpen(), AllocationManagerAllocationClosed(_allocationId)); - _allocations.presentPOI(_allocationId); // Always record POI presentation to prevent staleness - - uint256 currentEpoch = _graphEpochManager().currentEpoch(); - // Scoped for stack management - { - // Determine rewards condition - bytes32 condition = RewardsCondition.NONE; - if (allocation.isStale(maxPOIStaleness)) condition = RewardsCondition.STALE_POI; - else if (_poi == bytes32(0)) - condition = RewardsCondition.ZERO_POI; - // solhint-disable-next-line gas-strict-inequalities - else if (currentEpoch <= allocation.createdAtEpoch) condition = RewardsCondition.ALLOCATION_TOO_YOUNG; - else if (_graphRewardsManager().isDenied(allocation.subgraphDeploymentId)) - condition = RewardsCondition.SUBGRAPH_DENIED; - - emit POIPresented( - allocation.indexer, - _allocationId, - allocation.subgraphDeploymentId, - _poi, - _poiMetadata, - condition - ); - - // Early return skips the overallocation check intentionally to avoid loss of uncollected rewards - if (condition == RewardsCondition.ALLOCATION_TOO_YOUNG || condition == RewardsCondition.SUBGRAPH_DENIED) { - // Keep reward and reclaim accumulation current even if rewards are not collected - _graphRewardsManager().onSubgraphAllocationUpdate(allocation.subgraphDeploymentId); - - return 0; - } - - bool rewardsReclaimable = condition == RewardsCondition.STALE_POI || condition == RewardsCondition.ZERO_POI; - if (rewardsReclaimable) _graphRewardsManager().reclaimRewards(condition, _allocationId); - else rewardsCollected = _graphRewardsManager().takeRewards(_allocationId); - } - - // Snapshot rewards to prevent accumulation for next POI, then clear pending - _allocations.snapshotRewards( - _allocationId, - _graphRewardsManager().onSubgraphAllocationUpdate(allocation.subgraphDeploymentId) - ); - _allocations.clearPendingRewards(_allocationId); - - // Scoped for stack management - { - (uint256 tokensIndexerRewards, uint256 tokensDelegationRewards) = _distributeIndexingRewards( - allocation, - rewardsCollected, - _paymentsDestination - ); - - emit IndexingRewardsCollected( - allocation.indexer, - _allocationId, - allocation.subgraphDeploymentId, - rewardsCollected, - tokensIndexerRewards, - tokensDelegationRewards, - _poi, - _poiMetadata, - currentEpoch + ) internal returns (uint256, bool) { + return + AllocationHandler.presentPOI( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + AllocationHandler.PresentParams({ + maxPOIStaleness: maxPOIStaleness, + graphEpochManager: _graphEpochManager(), + graphStaking: _graphStaking(), + graphRewardsManager: _graphRewardsManager(), + graphToken: _graphToken(), + dataService: address(this), + _allocationId: _allocationId, + _poi: _poi, + _poiMetadata: _poiMetadata, + _delegationRatio: _delegationRatio, + _paymentsDestination: _paymentsDestination + }) ); - } - - if (_isOverAllocated(allocation.indexer, _delegationRatio)) _closeAllocation(_allocationId, true); } /** @@ -277,49 +168,17 @@ abstract contract AllocationManager is * @param _delegationRatio The delegation ratio to consider when locking tokens */ function _resizeAllocation(address _allocationId, uint256 _tokens, uint32 _delegationRatio) internal { - IAllocation.State memory allocation = _allocations.get(_allocationId); - require(allocation.isOpen(), AllocationManagerAllocationClosed(_allocationId)); - require(_tokens != allocation.tokens, AllocationManagerAllocationSameSize(_allocationId, _tokens)); - - // Update provision tracker - uint256 oldTokens = allocation.tokens; - if (_tokens > oldTokens) { - allocationProvisionTracker.lock(_graphStaking(), allocation.indexer, _tokens - oldTokens, _delegationRatio); - } else { - allocationProvisionTracker.release(allocation.indexer, oldTokens - _tokens); - } - - // Calculate rewards that have been accrued since the last snapshot but not yet issued - uint256 accRewardsPerAllocatedToken = _graphRewardsManager().onSubgraphAllocationUpdate( - allocation.subgraphDeploymentId - ); - uint256 accRewardsPerAllocatedTokenPending = !allocation.isAltruistic() - ? accRewardsPerAllocatedToken - allocation.accRewardsPerAllocatedToken - : 0; - - // Update the allocation - _allocations[_allocationId].tokens = _tokens; - _allocations[_allocationId].accRewardsPerAllocatedToken = accRewardsPerAllocatedToken; - _allocations[_allocationId].accRewardsPending += _graphRewardsManager().calcRewards( - oldTokens, - accRewardsPerAllocatedTokenPending + AllocationHandler.resizeAllocation( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + _graphStaking(), + _graphRewardsManager(), + _allocationId, + _tokens, + _delegationRatio, + maxPOIStaleness ); - - // If allocation is stale, reclaim pending rewards defensively. - // Stale allocations are not performing, so rewards should not accumulate. - if (allocation.isStale(maxPOIStaleness)) { - _graphRewardsManager().reclaimRewards(RewardsCondition.STALE_POI, _allocationId); - _allocations.clearPendingRewards(_allocationId); - } - - // Update total allocated tokens for the subgraph deployment - if (_tokens > oldTokens) { - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] += (_tokens - oldTokens); - } else { - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] -= (oldTokens - _tokens); - } - - emit AllocationResized(allocation.indexer, _allocationId, allocation.subgraphDeploymentId, _tokens, oldTokens); } /** @@ -334,49 +193,18 @@ abstract contract AllocationManager is * - If reclaim address configured: tokens minted to that address * - If no reclaim address: rewards are dropped (not minted anywhere) * - * ## Known Limitation - * - * `clearPendingRewards()` is only called when `0 < reclaimedRewards`. This means: - * - If no reclaim address is configured, `accRewardsPending` may remain non-zero - * * Emits a {AllocationClosed} event * * @param _allocationId The id of the allocation to be closed * @param _forceClosed Whether the allocation was force closed */ function _closeAllocation(address _allocationId, bool _forceClosed) internal { - IAllocation.State memory allocation = _allocations.get(_allocationId); - - // Reclaim uncollected rewards before closing - uint256 reclaimedRewards = _graphRewardsManager().reclaimRewards( - RewardsCondition.CLOSE_ALLOCATION, - _allocationId - ); - - // Take rewards snapshot to prevent other allos from counting tokens from this allo - _allocations.snapshotRewards( + AllocationHandler.closeAllocation( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + _graphRewardsManager(), _allocationId, - _graphRewardsManager().onSubgraphAllocationUpdate(allocation.subgraphDeploymentId) - ); - - // Clear pending rewards only if rewards were reclaimed. This marks them as consumed, - // which could be useful for future logic that searches for unconsumed rewards. - // Known limitation: This capture is incomplete due to other code paths (e.g., _presentPOI) - // that clear pending even when rewards are not consumed. - if (0 < reclaimedRewards) _allocations.clearPendingRewards(_allocationId); - - _allocations.close(_allocationId); - allocationProvisionTracker.release(allocation.indexer, allocation.tokens); - - // Update total allocated tokens for the subgraph deployment - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] = - _subgraphAllocatedTokens[allocation.subgraphDeploymentId] - allocation.tokens; - - emit AllocationClosed( - allocation.indexer, - _allocationId, - allocation.subgraphDeploymentId, - allocation.tokens, _forceClosed ); } @@ -408,62 +236,7 @@ abstract contract AllocationManager is * @return True if the allocation is over-allocated, false otherwise */ function _isOverAllocated(address _indexer, uint32 _delegationRatio) internal view returns (bool) { - return !allocationProvisionTracker.check(_graphStaking(), _indexer, _delegationRatio); - } - - /** - * @notice Distributes indexing rewards to delegators and indexer - * @param _allocation The allocation state - * @param _rewardsCollected Total rewards to distribute - * @param _paymentsDestination Where to send indexer rewards (0 = stake) - * @return tokensIndexerRewards Amount sent to indexer - * @return tokensDelegationRewards Amount sent to delegation pool - */ - function _distributeIndexingRewards( - IAllocation.State memory _allocation, - uint256 _rewardsCollected, - address _paymentsDestination - ) private returns (uint256 tokensIndexerRewards, uint256 tokensDelegationRewards) { - if (_rewardsCollected == 0) return (0, 0); - - // Calculate and distribute delegator share - uint256 delegatorCut = _graphStaking().getDelegationFeeCut( - _allocation.indexer, - address(this), - IGraphPayments.PaymentTypes.IndexingRewards - ); - IHorizonStakingTypes.DelegationPool memory pool = _graphStaking().getDelegationPool( - _allocation.indexer, - address(this) - ); - tokensDelegationRewards = pool.shares > 0 ? _rewardsCollected.mulPPM(delegatorCut) : 0; - if (tokensDelegationRewards > 0) { - _graphToken().approve(address(_graphStaking()), tokensDelegationRewards); - _graphStaking().addToDelegationPool(_allocation.indexer, address(this), tokensDelegationRewards); - } - - // Distribute indexer share - tokensIndexerRewards = _rewardsCollected - tokensDelegationRewards; - if (tokensIndexerRewards > 0) { - if (_paymentsDestination == address(0)) { - _graphToken().approve(address(_graphStaking()), tokensIndexerRewards); - _graphStaking().stakeToProvision(_allocation.indexer, address(this), tokensIndexerRewards); - } else { - _graphToken().pushTokens(_paymentsDestination, tokensIndexerRewards); - } - } - } - - /** - * @notice Verifies ownership of an allocation id by verifying an EIP712 allocation proof - * @dev Requirements: - * - Signer must be the allocation id address - * @param _indexer The address of the indexer - * @param _allocationId The id of the allocation - * @param _proof The EIP712 proof, an EIP712 signed message of (indexer,allocationId) - */ - function _verifyAllocationProof(address _indexer, address _allocationId, bytes memory _proof) private view { - address signer = ECDSA.recover(_encodeAllocationProof(_indexer, _allocationId), _proof); - require(signer == _allocationId, AllocationManagerInvalidAllocationProof(signer, _allocationId)); + return + AllocationHandler.isOverAllocated(allocationProvisionTracker, _graphStaking(), _indexer, _delegationRatio); } } diff --git a/packages/subgraph-service/contracts/utilities/AllocationManagerStorage.sol b/packages/subgraph-service/contracts/utilities/AllocationManagerStorage.sol index 053b32a70..8f3460876 100644 --- a/packages/subgraph-service/contracts/utilities/AllocationManagerStorage.sol +++ b/packages/subgraph-service/contracts/utilities/AllocationManagerStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; diff --git a/packages/subgraph-service/contracts/utilities/AttestationManager.sol b/packages/subgraph-service/contracts/utilities/AttestationManager.sol index 4ba57e639..c050786c0 100644 --- a/packages/subgraph-service/contracts/utilities/AttestationManager.sol +++ b/packages/subgraph-service/contracts/utilities/AttestationManager.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-small-strings diff --git a/packages/subgraph-service/contracts/utilities/AttestationManagerStorage.sol b/packages/subgraph-service/contracts/utilities/AttestationManagerStorage.sol index 40f4c614c..2b7be6850 100644 --- a/packages/subgraph-service/contracts/utilities/AttestationManagerStorage.sol +++ b/packages/subgraph-service/contracts/utilities/AttestationManagerStorage.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; /** * @title AttestationManagerStorage diff --git a/packages/subgraph-service/contracts/utilities/Directory.sol b/packages/subgraph-service/contracts/utilities/Directory.sol index 09d180a5d..6c85af462 100644 --- a/packages/subgraph-service/contracts/utilities/Directory.sol +++ b/packages/subgraph-service/contracts/utilities/Directory.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-3.0-or-later -pragma solidity 0.8.33; +pragma solidity ^0.8.27; // TODO: Re-enable and fix issues when publishing a new version // solhint-disable gas-indexed-events @@ -8,6 +8,7 @@ pragma solidity 0.8.33; import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { ICuration } from "@graphprotocol/interfaces/contracts/contracts/curation/ICuration.sol"; /** @@ -30,6 +31,10 @@ abstract contract Directory { /// @dev Required to collect payments via Graph Horizon payments protocol IGraphTallyCollector private immutable GRAPH_TALLY_COLLECTOR; + /// @notice The Recurring Collector contract address + /// @dev Required to collect indexing agreement payments via Graph Horizon payments protocol + IRecurringCollector private immutable RECURRING_COLLECTOR; + /// @notice The Curation contract address /// @dev Required for curation fees distribution ICuration private immutable CURATION; @@ -40,12 +45,14 @@ abstract contract Directory { * @param disputeManager The Dispute Manager contract address * @param graphTallyCollector The Graph Tally Collector contract address * @param curation The Curation contract address + * @param recurringCollector The Recurring Collector contract address */ event SubgraphServiceDirectoryInitialized( address subgraphService, address disputeManager, address graphTallyCollector, - address curation + address curation, + address recurringCollector ); /** @@ -72,14 +79,36 @@ abstract contract Directory { * @param disputeManager The Dispute Manager contract address * @param graphTallyCollector The Graph Tally Collector contract address * @param curation The Curation contract address + * @param recurringCollector_ The Recurring Collector contract address */ - constructor(address subgraphService, address disputeManager, address graphTallyCollector, address curation) { + constructor( + address subgraphService, + address disputeManager, + address graphTallyCollector, + address curation, + address recurringCollector_ + ) { SUBGRAPH_SERVICE = ISubgraphService(subgraphService); DISPUTE_MANAGER = IDisputeManager(disputeManager); GRAPH_TALLY_COLLECTOR = IGraphTallyCollector(graphTallyCollector); CURATION = ICuration(curation); + RECURRING_COLLECTOR = IRecurringCollector(recurringCollector_); - emit SubgraphServiceDirectoryInitialized(subgraphService, disputeManager, graphTallyCollector, curation); + emit SubgraphServiceDirectoryInitialized( + subgraphService, + disputeManager, + graphTallyCollector, + curation, + recurringCollector_ + ); + } + + /** + * @notice Returns the Recurring Collector contract address + * @return The Recurring Collector contract + */ + function recurringCollector() external view returns (IRecurringCollector) { + return RECURRING_COLLECTOR; } /** diff --git a/packages/subgraph-service/hardhat.config.ts b/packages/subgraph-service/hardhat.config.ts index aca08e03c..f6f6b387e 100644 --- a/packages/subgraph-service/hardhat.config.ts +++ b/packages/subgraph-service/hardhat.config.ts @@ -19,7 +19,7 @@ const baseConfig = hardhatBaseConfig(require) const config: HardhatUserConfig = { ...baseConfig, solidity: { - version: '0.8.33', + version: '0.8.34', settings: { optimizer: { enabled: true, runs: 100 }, evmVersion: 'cancun', diff --git a/packages/subgraph-service/package.json b/packages/subgraph-service/package.json index 8161ecb45..068e81b8a 100644 --- a/packages/subgraph-service/package.json +++ b/packages/subgraph-service/package.json @@ -21,7 +21,7 @@ "lint": "pnpm lint:ts; pnpm lint:sol; pnpm lint:forge; pnpm lint:md; pnpm lint:json", "lint:ts": "eslint --fix --cache '**/*.{js,ts,cjs,mjs,jsx,tsx}'; prettier -w --cache --log-level warn '**/*.{js,ts,cjs,mjs,jsx,tsx}'", "lint:sol": "solhint --fix --noPrompt --noPoster 'contracts/**/*.sol'; prettier -w --cache --log-level warn '**/*.sol'", - "lint:forge": "forge lint", + "lint:forge": "forge lint contracts/", "lint:md": "markdownlint --fix --ignore-path ../../.gitignore '**/*.md'; prettier -w --cache --log-level warn '**/*.md'", "lint:json": "prettier -w --cache --log-level warn '**/*.json'", "clean": "rm -rf build dist cache cache_forge typechain-types", diff --git a/packages/subgraph-service/scripts/integration b/packages/subgraph-service/scripts/integration index d5d7f1c0d..58a7ba4fe 100755 --- a/packages/subgraph-service/scripts/integration +++ b/packages/subgraph-service/scripts/integration @@ -124,13 +124,6 @@ npx hardhat deploy:migrate --network localhost --horizon-config integration --st cd ../subgraph-service npx hardhat test:seed --network localhost -# Run integration tests - During transition period -npx hardhat test:integration --phase during-transition-period --network localhost - -# Clear thawing period -cd ../horizon -npx hardhat transition:clear-thawing --network localhost --governor-index 1 - # Run integration tests - After transition period cd ../subgraph-service npx hardhat test:integration --phase after-transition-period --network localhost diff --git a/packages/subgraph-service/tasks/test/integration.ts b/packages/subgraph-service/tasks/test/integration.ts index 130058e90..ef63c42f4 100644 --- a/packages/subgraph-service/tasks/test/integration.ts +++ b/packages/subgraph-service/tasks/test/integration.ts @@ -4,13 +4,9 @@ import { TASK_TEST } from 'hardhat/builtin-tasks/task-names' import { task } from 'hardhat/config' task('test:integration', 'Runs all integration tests') - .addParam( - 'phase', - 'Test phase to run: "during-transition-period", "after-transition-period", "after-delegation-slashing-enabled"', - ) + .addParam('phase', 'Test phase to run: "after-transition-period", "after-delegation-slashing-enabled"') .setAction(async (taskArgs, hre) => { // Get test files for each phase - const duringTransitionPeriodFiles = await glob('test/integration/during-transition-period/**/*.{js,ts}') const afterTransitionPeriodFiles = await glob('test/integration/after-transition-period/**/*.{js,ts}') // Display banner for the current test phase @@ -18,15 +14,12 @@ task('test:integration', 'Runs all integration tests') // Run tests for the current phase switch (taskArgs.phase) { - case 'during-transition-period': - await hre.run(TASK_TEST, { testFiles: duringTransitionPeriodFiles }) - break case 'after-transition-period': await hre.run(TASK_TEST, { testFiles: afterTransitionPeriodFiles }) break default: throw new Error( - 'Invalid phase. Must be "during-transition-period", "after-transition-period", "after-delegation-slashing-enabled", or "all"', + 'Invalid phase. Must be "after-transition-period", "after-delegation-slashing-enabled", or "all"', ) } }) diff --git a/packages/subgraph-service/test/integration/during-transition-period/dispute-manager.test.ts b/packages/subgraph-service/test/integration/during-transition-period/dispute-manager.test.ts deleted file mode 100644 index a24f9703a..000000000 --- a/packages/subgraph-service/test/integration/during-transition-period/dispute-manager.test.ts +++ /dev/null @@ -1,157 +0,0 @@ -import { - DisputeManager, - HorizonStaking, - L2GraphToken, - LegacyDisputeManager, - SubgraphService, -} from '@graphprotocol/interfaces' -import { generateLegacyIndexingDisputeId, generateLegacyTypeDisputeId } from '@graphprotocol/toolshed' -import { indexersData as indexers } from '@graphprotocol/toolshed/fixtures' -import { setGRTBalance } from '@graphprotocol/toolshed/hardhat' -import { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import { ethers } from 'hardhat' -import hre from 'hardhat' - -describe('Dispute Manager', () => { - let disputeManager: DisputeManager - let legacyDisputeManager: LegacyDisputeManager - let graphToken: L2GraphToken - let staking: HorizonStaking - let subgraphService: SubgraphService - - let snapshotId: string - - // Test addresses - let governor: HardhatEthersSigner - let fisherman: HardhatEthersSigner - let arbitrator: HardhatEthersSigner - let indexer: HardhatEthersSigner - - let disputeDeposit: bigint - - // Allocation variables - let allocationId: string - - before(async () => { - // Get contracts - const graph = hre.graph() - disputeManager = graph.subgraphService.contracts.DisputeManager - legacyDisputeManager = graph.subgraphService.contracts.LegacyDisputeManager - graphToken = graph.horizon.contracts.GraphToken - staking = graph.horizon.contracts.HorizonStaking - subgraphService = graph.subgraphService.contracts.SubgraphService - - // Get signers - governor = await graph.accounts.getGovernor() - arbitrator = await graph.accounts.getArbitrator() - ;[fisherman] = await graph.accounts.getTestAccounts() - - // Get indexer - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - - // Get allocation - const allocation = indexerFixture.legacyAllocations[0] - allocationId = allocation.allocationID - - // Get dispute deposit - disputeDeposit = ethers.parseEther('10000') - - // Set GRT balance for fisherman - await setGRTBalance(graph.provider, graphToken.target, fisherman.address, ethers.parseEther('1000000')) - - // Set arbitrator - await legacyDisputeManager.connect(governor).setArbitrator(arbitrator.address) - }) - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Legacy dispute type', () => { - describe('Arbitrator', () => { - it('should allow arbitrator to create and accept a legacy dispute on the new dispute manager after slashing on the legacy dispute manager', async () => { - // Create an indexing dispute on legacy dispute manager - await graphToken.connect(fisherman).approve(legacyDisputeManager.target, disputeDeposit) - await legacyDisputeManager.connect(fisherman).createIndexingDispute(allocationId, disputeDeposit) - const legacyDisputeId = generateLegacyIndexingDisputeId(allocationId) - - // Accept the dispute on the legacy dispute manager - await legacyDisputeManager.connect(arbitrator).acceptDispute(legacyDisputeId) - - // Get fisherman's balance before creating dispute - const fishermanBalanceBefore = await graphToken.balanceOf(fisherman.address) - - // Get indexer's provision before creating dispute - const provision = await staking.getProviderTokensAvailable(indexer.address, await subgraphService.getAddress()) - - // Create and accept legacy dispute using the same allocation ID - const tokensToSlash = ethers.parseEther('100000') - const tokensToReward = tokensToSlash / 2n - await disputeManager - .connect(arbitrator) - .createAndAcceptLegacyDispute(allocationId, fisherman.address, tokensToSlash, tokensToReward) - - // Get dispute ID from event - const disputeId = generateLegacyTypeDisputeId(allocationId) - - // Verify dispute was created and accepted - const dispute = await disputeManager.disputes(disputeId) - expect(dispute.indexer).to.equal(indexer.address, 'Indexer address mismatch') - expect(dispute.fisherman).to.equal(fisherman.address, 'Fisherman address mismatch') - expect(dispute.disputeType).to.equal(3, 'Dispute type should be legacy') - expect(dispute.status).to.equal(1, 'Dispute status should be accepted') - - // Verify indexer's stake was slashed - const updatedProvision = await staking.getProviderTokensAvailable( - indexer.address, - await subgraphService.getAddress(), - ) - expect(updatedProvision).to.equal(provision - tokensToSlash, 'Indexer stake should be slashed') - - // Verify fisherman got the reward - const fishermanBalance = await graphToken.balanceOf(fisherman.address) - expect(fishermanBalance).to.equal( - fishermanBalanceBefore + tokensToReward, - 'Fisherman balance should be increased by the reward', - ) - }) - - it('should not allow creating a legacy dispute for non-existent allocation', async () => { - const tokensToSlash = ethers.parseEther('1000') - const tokensToReward = tokensToSlash / 2n - - // Attempt to create legacy dispute with non-existent allocation - await expect( - disputeManager - .connect(arbitrator) - .createAndAcceptLegacyDispute( - ethers.Wallet.createRandom().address, - fisherman.address, - tokensToSlash, - tokensToReward, - ), - ).to.be.revertedWithCustomError(disputeManager, 'DisputeManagerIndexerNotFound') - }) - }) - - it('should not allow non-arbitrator to create a legacy dispute', async () => { - const tokensToSlash = ethers.parseEther('1000') - const tokensToReward = tokensToSlash / 2n - - // Attempt to create legacy dispute as fisherman - await expect( - disputeManager - .connect(fisherman) - .createAndAcceptLegacyDispute(allocationId, fisherman.address, tokensToSlash, tokensToReward), - ).to.be.revertedWithCustomError(disputeManager, 'DisputeManagerNotArbitrator') - }) - }) -}) diff --git a/packages/subgraph-service/test/integration/during-transition-period/governance.test.ts b/packages/subgraph-service/test/integration/during-transition-period/governance.test.ts deleted file mode 100644 index ad638b306..000000000 --- a/packages/subgraph-service/test/integration/during-transition-period/governance.test.ts +++ /dev/null @@ -1,76 +0,0 @@ -import { SubgraphService } from '@graphprotocol/interfaces' -import { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import { ethers } from 'hardhat' -import hre from 'hardhat' - -describe('Governance', () => { - let subgraphService: SubgraphService - let snapshotId: string - - // Test addresses - let governor: HardhatEthersSigner - let indexer: HardhatEthersSigner - let nonOwner: HardhatEthersSigner - let allocationId: string - let subgraphDeploymentId: string - - const graph = hre.graph() - - before(() => { - subgraphService = graph.subgraphService.contracts.SubgraphService - }) - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - - // Get signers - governor = await graph.accounts.getGovernor() - ;[indexer, nonOwner] = await graph.accounts.getTestAccounts() - - // Generate test addresses - allocationId = ethers.Wallet.createRandom().address - subgraphDeploymentId = ethers.keccak256(ethers.toUtf8Bytes('test-subgraph-deployment')) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Legacy Allocation Migration', () => { - it('should migrate legacy allocation', async () => { - // Migrate legacy allocation - await subgraphService - .connect(governor) - .migrateLegacyAllocation(indexer.address, allocationId, subgraphDeploymentId) - - // Verify the legacy allocation was migrated - const legacyAllocation = await subgraphService.getLegacyAllocation(allocationId) - expect(legacyAllocation.indexer).to.equal(indexer.address) - expect(legacyAllocation.subgraphDeploymentId).to.equal(subgraphDeploymentId) - }) - - it('should not allow non-owner to migrate legacy allocation', async () => { - // Attempt to migrate legacy allocation as non-owner - await expect( - subgraphService.connect(nonOwner).migrateLegacyAllocation(indexer.address, allocationId, subgraphDeploymentId), - ).to.be.revertedWithCustomError(subgraphService, 'OwnableUnauthorizedAccount') - }) - - it('should not allow migrating a legacy allocation that was already migrated', async () => { - // First migration - await subgraphService - .connect(governor) - .migrateLegacyAllocation(indexer.address, allocationId, subgraphDeploymentId) - - // Attempt to migrate the same allocation again - await expect( - subgraphService.connect(governor).migrateLegacyAllocation(indexer.address, allocationId, subgraphDeploymentId), - ) - .to.be.revertedWithCustomError(subgraphService, 'LegacyAllocationAlreadyExists') - .withArgs(allocationId) - }) - }) -}) diff --git a/packages/subgraph-service/test/integration/during-transition-period/indexer.test.ts b/packages/subgraph-service/test/integration/during-transition-period/indexer.test.ts deleted file mode 100644 index 7fd508c40..000000000 --- a/packages/subgraph-service/test/integration/during-transition-period/indexer.test.ts +++ /dev/null @@ -1,100 +0,0 @@ -import { SubgraphService } from '@graphprotocol/interfaces' -import { encodeStartServiceData, generateAllocationProof } from '@graphprotocol/toolshed' -import { indexersData as indexers } from '@graphprotocol/toolshed/fixtures' -import { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import { ethers } from 'hardhat' -import hre from 'hardhat' - -describe('Indexer', () => { - let subgraphService: SubgraphService - let snapshotId: string - let chainId: number - - // Test addresses - let governor: HardhatEthersSigner - let indexer: HardhatEthersSigner - let allocationId: string - let subgraphDeploymentId: string - let allocationPrivateKey: string - let subgraphServiceAddress: string - - const graph = hre.graph() - - before(async () => { - // Get contracts - subgraphService = graph.subgraphService.contracts.SubgraphService - - // Get governor and non-owner - governor = await graph.accounts.getGovernor() - - // Get chain id - chainId = Number((await hre.ethers.provider.getNetwork()).chainId) - - // Get subgraph service address - subgraphServiceAddress = await subgraphService.getAddress() - }) - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Allocation', () => { - beforeEach(async () => { - // Get indexer - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - - // Generate test addresses - const allocation = indexerFixture.legacyAllocations[0] - allocationId = allocation.allocationID - subgraphDeploymentId = allocation.subgraphDeploymentID - allocationPrivateKey = allocation.allocationPrivateKey - }) - - it('should not be able to create an allocation with an AllocationID that already exists in HorizonStaking contract', async () => { - // Build allocation proof - const signature = await generateAllocationProof( - indexer.address, - allocationPrivateKey, - subgraphServiceAddress, - chainId, - ) - - // Attempt to create an allocation with the same ID - const data = encodeStartServiceData(subgraphDeploymentId, 1000n, allocationId, signature) - - await expect(subgraphService.connect(indexer).startService(indexer.address, data)) - .to.be.revertedWithCustomError(subgraphService, 'LegacyAllocationAlreadyExists') - .withArgs(allocationId) - }) - - it('should not be able to create an allocation that was already migrated by the owner', async () => { - // Migrate legacy allocation - await subgraphService - .connect(governor) - .migrateLegacyAllocation(indexer.address, allocationId, subgraphDeploymentId) - - // Build allocation proof - const signature = await generateAllocationProof( - indexer.address, - allocationPrivateKey, - subgraphServiceAddress, - chainId, - ) - - // Attempt to create the same allocation - const data = encodeStartServiceData(subgraphDeploymentId, 1000n, allocationId, signature) - - await expect(subgraphService.connect(indexer).startService(indexer.address, data)) - .to.be.revertedWithCustomError(subgraphService, 'LegacyAllocationAlreadyExists') - .withArgs(allocationId) - }) - }) -}) diff --git a/packages/subgraph-service/test/integration/during-transition-period/legacy-dispute-manager.test.ts b/packages/subgraph-service/test/integration/during-transition-period/legacy-dispute-manager.test.ts deleted file mode 100644 index 51cfc557c..000000000 --- a/packages/subgraph-service/test/integration/during-transition-period/legacy-dispute-manager.test.ts +++ /dev/null @@ -1,256 +0,0 @@ -import { HorizonStaking, L2GraphToken, LegacyDisputeManager } from '@graphprotocol/interfaces' -import { - generateAttestationData, - generateLegacyIndexingDisputeId, - generateLegacyQueryDisputeId, -} from '@graphprotocol/toolshed' -import { indexersData as indexers } from '@graphprotocol/toolshed/fixtures' -import { setGRTBalance } from '@graphprotocol/toolshed/hardhat' -import { HardhatEthersSigner } from '@nomicfoundation/hardhat-ethers/signers' -import { expect } from 'chai' -import { ethers } from 'hardhat' -import hre from 'hardhat' - -describe('Legacy Dispute Manager', () => { - let legacyDisputeManager: LegacyDisputeManager - let graphToken: L2GraphToken - let staking: HorizonStaking - - let snapshotId: string - - let governor: HardhatEthersSigner - let arbitrator: HardhatEthersSigner - let indexer: HardhatEthersSigner - let fisherman: HardhatEthersSigner - - let disputeDeposit: bigint - - const graph = hre.graph() - - // We have to use Aribtrm Sepolia since we're testing an already deployed contract but running on a hardhat fork - const chainId = 421614 - - before(async () => { - governor = await graph.accounts.getGovernor() - ;[arbitrator, fisherman] = await graph.accounts.getTestAccounts() - - // Get contract instances with correct types - legacyDisputeManager = graph.subgraphService.contracts.LegacyDisputeManager - graphToken = graph.horizon.contracts.GraphToken - staking = graph.horizon.contracts.HorizonStaking - - // Set GRT balances - await setGRTBalance(graph.provider, graphToken.target, fisherman.address, ethers.parseEther('100000')) - }) - - beforeEach(async () => { - // Take a snapshot before each test - snapshotId = await ethers.provider.send('evm_snapshot', []) - - // Legacy dispute manager - disputeDeposit = ethers.parseEther('10000') - - // Set arbitrator - await legacyDisputeManager.connect(governor).setArbitrator(arbitrator.address) - }) - - afterEach(async () => { - // Revert to the snapshot after each test - await ethers.provider.send('evm_revert', [snapshotId]) - }) - - describe('Indexing Disputes', () => { - let allocationId: string - - beforeEach(async () => { - // Get Indexer - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - - // Get allocation - allocationId = indexerFixture.legacyAllocations[0].allocationID - }) - - it('should allow creating and accepting indexing disputes', async () => { - // Create an indexing dispute - await graphToken.connect(fisherman).approve(legacyDisputeManager.target, disputeDeposit) - await legacyDisputeManager.connect(fisherman).createIndexingDispute(allocationId, disputeDeposit) - const disputeId = generateLegacyIndexingDisputeId(allocationId) - - // Verify dispute was created - const disputeExists = await legacyDisputeManager.isDisputeCreated(disputeId) - expect(disputeExists).to.be.true - - // Get state before slashing - const idxSlashingPercentage = 25000n - const indexerStakeBefore = (await staking.getServiceProvider(indexer.address)).tokensStaked - const slashedAmount = (indexerStakeBefore * idxSlashingPercentage) / 1_000_000n - const fishermanBalanceBefore = await graphToken.balanceOf(fisherman.address) - - // Accept the dispute - await legacyDisputeManager.connect(arbitrator).acceptDispute(disputeId) - - // Verify indexer was slashed for 2.5% of their stake - const indexerStake = (await staking.getServiceProvider(indexer.address)).tokensStaked - expect(indexerStake).to.equal(indexerStakeBefore - slashedAmount, 'Indexer stake was not slashed correctly') - - // Verify fisherman received their deposit and 50% of the slashed amount - const fishermanBalance = await graphToken.balanceOf(fisherman.address) - expect(fishermanBalance).to.equal( - fishermanBalanceBefore + slashedAmount / 2n + disputeDeposit, - 'Fisherman balance was not updated correctly', - ) - }) - }) - - describe('Query Disputes', () => { - let allocationPrivateKey: string - let subgraphDeploymentId: string - - beforeEach(async () => { - // Get Indexer - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - - // Get allocation - const allocation = indexerFixture.legacyAllocations[0] - allocationPrivateKey = allocation.allocationPrivateKey - subgraphDeploymentId = allocation.subgraphDeploymentID - }) - - it('should allow creating and accepting query disputes', async () => { - // Create attestation data - const queryHash = ethers.keccak256(ethers.toUtf8Bytes('test-query')) - const responseHash = ethers.keccak256(ethers.toUtf8Bytes('test-response')) - const attestationData = await generateAttestationData( - queryHash, - responseHash, - subgraphDeploymentId, - allocationPrivateKey, - await legacyDisputeManager.getAddress(), - chainId, - ) - - // Create a query dispute - await graphToken.connect(fisherman).approve(legacyDisputeManager.target, disputeDeposit) - await legacyDisputeManager.connect(fisherman).createQueryDispute(attestationData, disputeDeposit) - const disputeId = generateLegacyQueryDisputeId( - queryHash, - responseHash, - subgraphDeploymentId, - indexer.address, - fisherman.address, - ) - - // Verify dispute was created - const disputeExists = await legacyDisputeManager.isDisputeCreated(disputeId) - expect(disputeExists).to.be.true - - // Get state before slashing - const qrySlashingPercentage = 25000n - const indexerStakeBefore = (await staking.getServiceProvider(indexer.address)).tokensStaked - const slashedAmount = (indexerStakeBefore * qrySlashingPercentage) / 1_000_000n - const fishermanBalanceBefore = await graphToken.balanceOf(fisherman.address) - - // Accept the dispute - await legacyDisputeManager.connect(arbitrator).acceptDispute(disputeId) - - // Verify indexer was slashed for 2.5% of their stake - const indexerStake = (await staking.getServiceProvider(indexer.address)).tokensStaked - expect(indexerStake).to.equal(indexerStakeBefore - slashedAmount, 'Indexer stake was not slashed correctly') - - // Verify fisherman received their deposit and 50% of the slashed amount - const fishermanBalance = await graphToken.balanceOf(fisherman.address) - expect(fishermanBalance).to.equal( - fishermanBalanceBefore + slashedAmount / 2n + disputeDeposit, - 'Fisherman balance was not updated correctly', - ) - }) - }) - - describe('Query Dispute Conflict', () => { - let allocationPrivateKey: string - let subgraphDeploymentId: string - - beforeEach(async () => { - // Get Indexer - const indexerFixture = indexers[0] - indexer = await ethers.getSigner(indexerFixture.address) - - // Get allocation - const allocation = indexerFixture.legacyAllocations[0] - allocationPrivateKey = allocation.allocationPrivateKey - subgraphDeploymentId = allocation.subgraphDeploymentID - }) - - it('should allow creating conflicting query disputes', async () => { - // Create first attestation data - const queryHash = ethers.keccak256(ethers.toUtf8Bytes('test-query')) - const responseHash1 = ethers.keccak256(ethers.toUtf8Bytes('test-response-1')) - const attestationData1 = await generateAttestationData( - queryHash, - responseHash1, - subgraphDeploymentId, - allocationPrivateKey, - await legacyDisputeManager.getAddress(), - chainId, - ) - - // Create second attestation data with different query/response - const responseHash2 = ethers.keccak256(ethers.toUtf8Bytes('test-response-2')) - const attestationData2 = await generateAttestationData( - queryHash, - responseHash2, - subgraphDeploymentId, - allocationPrivateKey, - await legacyDisputeManager.getAddress(), - chainId, - ) - - // Create query dispute - await legacyDisputeManager.connect(fisherman).createQueryDisputeConflict(attestationData1, attestationData2) - - // Create dispute IDs - const disputeId1 = generateLegacyQueryDisputeId( - queryHash, - responseHash1, - subgraphDeploymentId, - indexer.address, - fisherman.address, - ) - const disputeId2 = generateLegacyQueryDisputeId( - queryHash, - responseHash2, - subgraphDeploymentId, - indexer.address, - fisherman.address, - ) - - // Verify both disputes were created - const disputeExists1 = await legacyDisputeManager.isDisputeCreated(disputeId1) - const disputeExists2 = await legacyDisputeManager.isDisputeCreated(disputeId2) - expect(disputeExists1).to.be.true - expect(disputeExists2).to.be.true - - // Get state before slashing - const qrySlashingPercentage = 25000n - const indexerStakeBefore = (await staking.getServiceProvider(indexer.address)).tokensStaked - const slashedAmount = (indexerStakeBefore * qrySlashingPercentage) / 1_000_000n - const fishermanBalanceBefore = await graphToken.balanceOf(fisherman.address) - - // Accept one dispute - await legacyDisputeManager.connect(arbitrator).acceptDispute(disputeId1) - - // Verify indexer was slashed for 2.5% of their stake - const indexerStake = (await staking.getServiceProvider(indexer.address)).tokensStaked - expect(indexerStake).to.equal(indexerStakeBefore - slashedAmount, 'Indexer stake was not slashed correctly') - - // Verify fisherman received 50% of the slashed amount - const fishermanBalance = await graphToken.balanceOf(fisherman.address) - expect(fishermanBalance).to.equal( - fishermanBalanceBefore + slashedAmount / 2n, - 'Fisherman balance was not updated correctly', - ) - }) - }) -}) diff --git a/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol b/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol index dcaaf77e5..31f18bbe0 100644 --- a/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol +++ b/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol @@ -6,10 +6,10 @@ import { GraphPayments } from "@graphprotocol/horizon/contracts/payments/GraphPa import { GraphProxy } from "@graphprotocol/contracts/contracts/upgrades/GraphProxy.sol"; import { GraphProxyAdmin } from "@graphprotocol/contracts/contracts/upgrades/GraphProxyAdmin.sol"; import { HorizonStaking } from "@graphprotocol/horizon/contracts/staking/HorizonStaking.sol"; -import { HorizonStakingExtension } from "@graphprotocol/horizon/contracts/staking/HorizonStakingExtension.sol"; import { IHorizonStaking } from "@graphprotocol/interfaces/contracts/horizon/IHorizonStaking.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { GraphTallyCollector } from "@graphprotocol/horizon/contracts/payments/collectors/GraphTallyCollector.sol"; +import { RecurringCollector } from "@graphprotocol/horizon/contracts/payments/collectors/RecurringCollector.sol"; import { PaymentsEscrow } from "@graphprotocol/horizon/contracts/payments/PaymentsEscrow.sol"; import { UnsafeUpgrades } from "@openzeppelin/foundry-upgrades/src/Upgrades.sol"; @@ -39,9 +39,9 @@ abstract contract SubgraphBaseTest is Utils, Constants { GraphPayments graphPayments; IPaymentsEscrow escrow; GraphTallyCollector graphTallyCollector; + RecurringCollector recurringCollector; HorizonStaking private stakingBase; - HorizonStakingExtension private stakingExtension; MockCuration curation; MockGRTToken token; @@ -152,12 +152,20 @@ abstract contract SubgraphBaseTest is Utils, Constants { address(controller), REVOKE_SIGNER_THAWING_PERIOD ); + recurringCollector = new RecurringCollector( + "RecurringCollector", + "1", + address(controller), + REVOKE_SIGNER_THAWING_PERIOD + ); + address subgraphServiceImplementation = address( new SubgraphService( address(controller), address(disputeManager), address(graphTallyCollector), - address(curation) + address(curation), + address(recurringCollector) ) ); address subgraphServiceProxy = UnsafeUpgrades.deployTransparentProxy( @@ -170,8 +178,7 @@ abstract contract SubgraphBaseTest is Utils, Constants { ); subgraphService = SubgraphService(subgraphServiceProxy); - stakingExtension = new HorizonStakingExtension(address(controller), address(subgraphService)); - stakingBase = new HorizonStaking(address(controller), address(stakingExtension), address(subgraphService)); + stakingBase = new HorizonStaking(address(controller), address(subgraphService)); graphPayments = new GraphPayments{ salt: saltGraphPayments }(address(controller), PROTOCOL_PAYMENT_CUT); escrow = new PaymentsEscrow{ salt: saltEscrow }(address(controller), WITHDRAW_ESCROW_THAWING_PERIOD); diff --git a/packages/subgraph-service/test/unit/disputeManager/DisputeManager.t.sol b/packages/subgraph-service/test/unit/disputeManager/DisputeManager.t.sol index 8354e1cf0..7662dc1c3 100644 --- a/packages/subgraph-service/test/unit/disputeManager/DisputeManager.t.sol +++ b/packages/subgraph-service/test/unit/disputeManager/DisputeManager.t.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; -import { MathUtils } from "@graphprotocol/horizon/contracts/libraries/MathUtils.sol"; +import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; import { IAttestation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAttestation.sol"; @@ -203,81 +203,6 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { return _disputeId; } - struct Balances { - uint256 indexer; - uint256 fisherman; - uint256 arbitrator; - uint256 disputeManager; - uint256 staking; - } - - function _createAndAcceptLegacyDispute( - address _allocationId, - address _fisherman, - uint256 _tokensSlash, - uint256 _tokensRewards - ) internal returns (bytes32) { - (, address arbitrator, ) = vm.readCallers(); - address indexer = staking.getAllocation(_allocationId).indexer; - - Balances memory beforeBalances = Balances({ - indexer: token.balanceOf(indexer), - fisherman: token.balanceOf(_fisherman), - arbitrator: token.balanceOf(arbitrator), - disputeManager: token.balanceOf(address(disputeManager)), - staking: token.balanceOf(address(staking)) - }); - - vm.expectEmit(address(disputeManager)); - emit IDisputeManager.LegacyDisputeCreated( - keccak256(abi.encodePacked(_allocationId, "legacy")), - indexer, - _fisherman, - _allocationId, - _tokensSlash, - _tokensRewards - ); - vm.expectEmit(address(disputeManager)); - emit IDisputeManager.DisputeAccepted( - keccak256(abi.encodePacked(_allocationId, "legacy")), - indexer, - _fisherman, - _tokensRewards - ); - bytes32 _disputeId = disputeManager.createAndAcceptLegacyDispute( - _allocationId, - _fisherman, - _tokensSlash, - _tokensRewards - ); - - Balances memory afterBalances = Balances({ - indexer: token.balanceOf(indexer), - fisherman: token.balanceOf(_fisherman), - arbitrator: token.balanceOf(arbitrator), - disputeManager: token.balanceOf(address(disputeManager)), - staking: token.balanceOf(address(staking)) - }); - - assertEq(afterBalances.indexer, beforeBalances.indexer); - assertEq(afterBalances.fisherman, beforeBalances.fisherman + _tokensRewards); - assertEq(afterBalances.arbitrator, beforeBalances.arbitrator); - assertEq(afterBalances.disputeManager, beforeBalances.disputeManager); - assertEq(afterBalances.staking, beforeBalances.staking - _tokensSlash); - - IDisputeManager.Dispute memory dispute = _getDispute(_disputeId); - assertEq(dispute.indexer, indexer); - assertEq(dispute.fisherman, _fisherman); - assertEq(dispute.deposit, 0); - assertEq(dispute.relatedDisputeId, bytes32(0)); - assertEq(uint8(dispute.disputeType), uint8(IDisputeManager.DisputeType.LegacyDispute)); - assertEq(uint8(dispute.status), uint8(IDisputeManager.DisputeStatus.Accepted)); - assertEq(dispute.createdAt, block.timestamp); - assertEq(dispute.stakeSnapshot, 0); - - return _disputeId; - } - struct BeforeValuesCreateQueryDisputeConflict { IAttestation.State attestation1; IAttestation.State attestation2; @@ -423,10 +348,7 @@ contract DisputeManagerTest is SubgraphServiceSharedTest { uint32 provisionMaxVerifierCut = staking .getProvision(dispute.indexer, address(subgraphService)) .maxVerifierCut; - uint256 fishermanRewardPercentage = MathUtils.min( - disputeManager.fishermanRewardCut(), - provisionMaxVerifierCut - ); + uint256 fishermanRewardPercentage = Math.min(disputeManager.fishermanRewardCut(), provisionMaxVerifierCut); fishermanReward = _tokensSlash.mulPPM(fishermanRewardPercentage); } diff --git a/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol b/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol new file mode 100644 index 000000000..73ca400bf --- /dev/null +++ b/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "../../../subgraphService/indexing-agreement/shared.t.sol"; + +contract DisputeManagerIndexingFeeCreateDisputeTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * HELPERS + */ + + /// @dev Sets up an indexer with an accepted indexing agreement that has been collected on. + /// Returns the agreement ID and indexer state needed to create a dispute. + function _setupCollectedAgreement( + Seed memory seed, + uint256 unboundedTokensCollected + ) internal returns (bytes16 agreementId, IndexerState memory indexerState) { + Context storage ctx = _newCtx(seed); + indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + agreementId = acceptedAgreementId; + + // Set payments destination + resetPrank(indexerState.addr); + subgraphService.setPaymentsDestination(indexerState.addr); + + // Mock the collect call to succeed with some tokens + uint256 tokensCollected = bound(unboundedTokensCollected, 1, indexerState.tokens / STAKE_TO_FEES_RATIO); + bytes memory data = abi.encode( + IRecurringCollector.CollectParams({ + agreementId: acceptedAgreementId, + collectionId: bytes32(uint256(uint160(indexerState.allocationId))), + tokens: 0, + dataServiceCut: 0, + receiverDestination: indexerState.addr, + maxSlippage: type(uint256).max + }) + ); + vm.mockCall( + address(recurringCollector), + abi.encodeWithSelector(IPaymentsCollector.collect.selector, IGraphPayments.PaymentTypes.IndexingFee, data), + abi.encode(tokensCollected) + ); + + skip(1); // Make agreement collectable + + // Collect to set lastCollectionAt > 0 + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1( + acceptedAgreementId, + 100, // entities + // forge-lint: disable-next-line(unsafe-typecast) + bytes32("POI1"), + epochManager.currentEpochBlock(), + bytes("") + ) + ); + + // The collect mock prevented the real RecurringCollector from updating lastCollectionAt. + // Mock getAgreement to return lastCollectionAt > 0 so the dispute can be created. + IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreement(acceptedAgreementId); + agreementData.lastCollectionAt = uint64(block.timestamp); + vm.mockCall( + address(recurringCollector), + abi.encodeWithSelector(recurringCollector.getAgreement.selector, acceptedAgreementId), + abi.encode(agreementData) + ); + } + + /* + * TESTS + */ + + function test_IndexingFee_Create_Dispute(Seed memory seed, uint256 unboundedTokensCollected) public { + (bytes16 agreementId, IndexerState memory indexerState) = _setupCollectedAgreement( + seed, + unboundedTokensCollected + ); + + // Create dispute as fisherman + resetPrank(users.fisherman); + token.approve(address(disputeManager), disputeManager.disputeDeposit()); + + bytes32 disputeId = disputeManager.createIndexingFeeDisputeV1( + agreementId, + // forge-lint: disable-next-line(unsafe-typecast) + bytes32("disputePOI"), + 200, + block.number + ); + + assertTrue(disputeManager.isDisputeCreated(disputeId)); + + // Verify dispute fields + ( + address indexer, + address fisherman, + uint256 deposit, + , + IDisputeManager.DisputeType disputeType, + IDisputeManager.DisputeStatus status, + , + , + uint256 stakeSnapshot + ) = disputeManager.disputes(disputeId); + + assertEq(indexer, indexerState.addr); + assertEq(fisherman, users.fisherman); + assertEq(deposit, disputeManager.disputeDeposit()); + assertEq(uint8(disputeType), uint8(IDisputeManager.DisputeType.IndexingFeeDispute)); + assertEq(uint8(status), uint8(IDisputeManager.DisputeStatus.Pending)); + assertTrue(stakeSnapshot > 0); + } + + function test_IndexingFee_Create_Dispute_RevertWhen_NotCollected(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Attempt to create dispute without collecting first (lastCollectionAt == 0) + resetPrank(users.fisherman); + token.approve(address(disputeManager), disputeManager.disputeDeposit()); + + vm.expectRevert( + abi.encodeWithSelector( + IDisputeManager.DisputeManagerIndexingAgreementNotDisputable.selector, + acceptedAgreementId + ) + ); + // forge-lint: disable-next-line(unsafe-typecast) + disputeManager.createIndexingFeeDisputeV1(acceptedAgreementId, bytes32("POI"), 100, block.number); + } + + function test_IndexingFee_Create_Dispute_EmitsEvent(Seed memory seed, uint256 unboundedTokensCollected) public { + (bytes16 agreementId, IndexerState memory indexerState) = _setupCollectedAgreement( + seed, + unboundedTokensCollected + ); + + // Read the payer from the (mocked) agreement data + IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreement(agreementId); + + resetPrank(users.fisherman); + uint256 deposit = disputeManager.disputeDeposit(); + token.approve(address(disputeManager), deposit); + + // forge-lint: disable-next-line(unsafe-typecast) + bytes32 poi = bytes32("disputePOI"); + uint256 entities = 200; + uint256 blockNumber = block.number; + + bytes32 expectedDisputeId = keccak256( + abi.encodePacked("IndexingFeeDisputeWithAgreement", agreementId, poi, entities, blockNumber) + ); + + vm.expectEmit(address(disputeManager)); + emit IDisputeManager.IndexingFeeDisputeCreated( + expectedDisputeId, + indexerState.addr, + users.fisherman, + deposit, + agreementData.payer, + agreementId, + poi, + entities, + indexerState.tokens // stakeSnapshot + ); + + bytes32 disputeId = disputeManager.createIndexingFeeDisputeV1(agreementId, poi, entities, blockNumber); + assertEq(disputeId, expectedDisputeId); + } + + function test_IndexingFee_Create_Dispute_RevertWhen_AlreadyCreated( + Seed memory seed, + uint256 unboundedTokensCollected + ) public { + (bytes16 agreementId, ) = _setupCollectedAgreement(seed, unboundedTokensCollected); + + // Create first dispute + resetPrank(users.fisherman); + token.approve(address(disputeManager), disputeManager.disputeDeposit() * 2); + + // forge-lint: disable-next-line(unsafe-typecast) + bytes32 disputeId = disputeManager.createIndexingFeeDisputeV1(agreementId, bytes32("POI"), 100, block.number); + + // Attempt to create a duplicate dispute + vm.expectRevert( + abi.encodeWithSelector(IDisputeManager.DisputeManagerDisputeAlreadyCreated.selector, disputeId) + ); + // forge-lint: disable-next-line(unsafe-typecast) + disputeManager.createIndexingFeeDisputeV1(agreementId, bytes32("POI"), 100, block.number); + } +} diff --git a/packages/subgraph-service/test/unit/disputeManager/disputes/legacy.t.sol b/packages/subgraph-service/test/unit/disputeManager/disputes/legacy.t.sol deleted file mode 100644 index c6f57df93..000000000 --- a/packages/subgraph-service/test/unit/disputeManager/disputes/legacy.t.sol +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; -import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; -import { DisputeManagerTest } from "../DisputeManager.t.sol"; - -contract DisputeManagerLegacyDisputeTest is DisputeManagerTest { - using PPMMath for uint256; - - bytes32 private requestCid = keccak256(abi.encodePacked("Request CID")); - bytes32 private responseCid = keccak256(abi.encodePacked("Response CID")); - bytes32 private subgraphDeploymentId = keccak256(abi.encodePacked("Subgraph Deployment ID")); - - /* - * TESTS - */ - - function test_LegacyDispute( - uint256 tokensStaked, - uint256 tokensProvisioned, - uint256 tokensSlash, - uint256 tokensRewards - ) public { - vm.assume(tokensStaked <= MAX_TOKENS); - vm.assume(tokensStaked >= MINIMUM_PROVISION_TOKENS); - tokensProvisioned = bound(tokensProvisioned, MINIMUM_PROVISION_TOKENS, tokensStaked); - tokensSlash = bound(tokensSlash, 2, tokensProvisioned); - tokensRewards = bound(tokensRewards, 1, tokensSlash.mulPPM(FISHERMAN_REWARD_PERCENTAGE)); - - // setup indexer state - resetPrank(users.indexer); - _stake(tokensStaked); - _setStorageAllocationHardcoded(users.indexer, allocationId, tokensStaked - tokensProvisioned); - _provision(users.indexer, tokensProvisioned, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); - - resetPrank(users.arbitrator); - _createAndAcceptLegacyDispute(allocationId, users.fisherman, tokensSlash, tokensRewards); - } - - function test_LegacyDispute_RevertIf_NotArbitrator() public useIndexer { - vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerNotArbitrator.selector)); - disputeManager.createAndAcceptLegacyDispute(allocationId, users.fisherman, 0, 0); - } - - function test_LegacyDispute_RevertIf_AllocationNotFound() public useIndexer { - resetPrank(users.arbitrator); - vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerIndexerNotFound.selector, address(0))); - disputeManager.createAndAcceptLegacyDispute(address(0), users.fisherman, 0, 0); - } -} diff --git a/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol b/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol new file mode 100644 index 000000000..a5270e436 --- /dev/null +++ b/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { IndexingAgreement } from "../../../contracts/libraries/IndexingAgreement.sol"; +import { Directory } from "../../../contracts/utilities/Directory.sol"; + +contract IndexingAgreementTest is Test { + IndexingAgreement.StorageManager private _storageManager; + address private _mockCollector; + + function setUp() public { + _mockCollector = makeAddr("mockCollector"); + } + + function test_IndexingAgreement_Get(bytes16 agreementId) public { + vm.assume(agreementId != bytes16(0)); + + vm.mockCall( + address(this), + abi.encodeWithSelector(Directory.recurringCollector.selector), + abi.encode(IRecurringCollector(_mockCollector)) + ); + + IRecurringCollector.AgreementData memory collectorAgreement; + vm.mockCall( + _mockCollector, + abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encode(collectorAgreement) + ); + + vm.expectRevert(abi.encodeWithSelector(IndexingAgreement.IndexingAgreementNotActive.selector, agreementId)); + IndexingAgreement.get(_storageManager, agreementId); + + collectorAgreement.dataService = address(this); + vm.mockCall( + _mockCollector, + abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encode(collectorAgreement) + ); + + IIndexingAgreement.AgreementWrapper memory wrapper = IndexingAgreement.get(_storageManager, agreementId); + assertEq(wrapper.collectorAgreement.dataService, address(this)); + } + + function test_IndexingAgreement_OnCloseAllocation(bytes16 agreementId, address allocationId, bool stale) public { + vm.assume(agreementId != bytes16(0)); + vm.assume(allocationId != address(0)); + + delete _storageManager; + vm.clearMockedCalls(); + + // No active agreement for allocation ID, returns early, no assertions needed + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + + // Active agreement for allocation ID, but collector agreement is not set, returns early, no assertions needed + _storageManager.allocationToActiveAgreementId[allocationId] = agreementId; + + IRecurringCollector.AgreementData memory collectorAgreement; + + vm.mockCall( + address(this), + abi.encodeWithSelector(Directory.recurringCollector.selector), + abi.encode(IRecurringCollector(_mockCollector)) + ); + + vm.mockCall( + _mockCollector, + abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encode(collectorAgreement) + ); + + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + + // Active agreement for allocation ID, collector agreement is set, should cancel the agreement + collectorAgreement.dataService = address(this); + collectorAgreement.state = IRecurringCollector.AgreementState.Accepted; + + _storageManager.agreements[agreementId] = IIndexingAgreement.State({ + allocationId: allocationId, + version: IIndexingAgreement.IndexingAgreementVersion.V1 + }); + + vm.mockCall( + _mockCollector, + abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encode(collectorAgreement) + ); + + vm.expectCall(_mockCollector, abi.encodeWithSelector(IRecurringCollector.cancel.selector, agreementId)); + + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + } + + function test_IndexingAgreement_StorageManagerLocation() public pure { + assertEq( + IndexingAgreement.INDEXING_AGREEMENT_STORAGE_MANAGER_LOCATION, + keccak256( + abi.encode( + uint256(keccak256("graphprotocol.subgraph-service.storage.StorageManager.IndexingAgreement")) - 1 + ) + ) & ~bytes32(uint256(0xff)) + ); + } +} diff --git a/packages/subgraph-service/test/unit/libraries/LegacyAllocationLibrary.t.sol b/packages/subgraph-service/test/unit/libraries/LegacyAllocationLibrary.t.sol deleted file mode 100644 index 5cb34703e..000000000 --- a/packages/subgraph-service/test/unit/libraries/LegacyAllocationLibrary.t.sol +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -import { Test } from "forge-std/Test.sol"; -import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; -import { LegacyAllocationHarness } from "../mocks/LegacyAllocationHarness.sol"; - -contract LegacyAllocationLibraryTest is Test { - LegacyAllocationHarness private harness; - address private allocationId; - - function setUp() public { - harness = new LegacyAllocationHarness(); - allocationId = makeAddr("allocationId"); - } - - function test_LegacyAllocation_Get() public { - // forge-lint: disable-next-line(unsafe-typecast) - harness.migrate(address(1), allocationId, bytes32("sdid")); - - ILegacyAllocation.State memory alloc = harness.get(allocationId); - assertEq(alloc.indexer, address(1)); - // forge-lint: disable-next-line(unsafe-typecast) - assertEq(alloc.subgraphDeploymentId, bytes32("sdid")); - } - - function test_LegacyAllocation_Get_RevertWhen_NotExists() public { - address nonExistent = makeAddr("nonExistent"); - vm.expectRevert(abi.encodeWithSelector(ILegacyAllocation.LegacyAllocationDoesNotExist.selector, nonExistent)); - harness.get(nonExistent); - } -} diff --git a/packages/subgraph-service/test/unit/mocks/LegacyAllocationHarness.sol b/packages/subgraph-service/test/unit/mocks/LegacyAllocationHarness.sol deleted file mode 100644 index 30b4147aa..000000000 --- a/packages/subgraph-service/test/unit/mocks/LegacyAllocationHarness.sol +++ /dev/null @@ -1,20 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; -import { LegacyAllocation } from "../../../contracts/libraries/LegacyAllocation.sol"; - -/// @notice Test harness to exercise LegacyAllocation library guard branches directly -contract LegacyAllocationHarness { - using LegacyAllocation for mapping(address => ILegacyAllocation.State); - - mapping(address => ILegacyAllocation.State) private _legacyAllocations; - - function migrate(address indexer, address allocationId, bytes32 subgraphDeploymentId) external { - _legacyAllocations.migrate(indexer, allocationId, subgraphDeploymentId); - } - - function get(address allocationId) external view returns (ILegacyAllocation.State memory) { - return _legacyAllocations.get(allocationId); - } -} diff --git a/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol b/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol index 7ae75636f..b6da3bb75 100644 --- a/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol +++ b/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol @@ -3,7 +3,6 @@ pragma solidity ^0.8.27; import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManager.sol"; import { IIssuanceAllocationDistribution } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceAllocationDistribution.sol"; -import { IRewardsEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IRewardsEligibility.sol"; import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; @@ -94,10 +93,6 @@ contract MockRewardsManager is IRewardsManager { return address(0); } - function getRewardsEligibilityOracle() external pure returns (IRewardsEligibility) { - return IRewardsEligibility(address(0)); - } - function getNewRewardsPerSignal() external view returns (uint256) {} function getAccRewardsPerSignal() external view returns (uint256) {} @@ -116,10 +111,6 @@ contract MockRewardsManager is IRewardsManager { function getRawIssuancePerBlock() external view returns (uint256) {} - // -- Setters -- - - function setRewardsEligibilityOracle(address newRewardsEligibilityOracle) external {} - // -- Updates -- function updateAccRewardsPerSignal() external returns (uint256) {} diff --git a/packages/subgraph-service/test/unit/shared/HorizonStakingShared.t.sol b/packages/subgraph-service/test/unit/shared/HorizonStakingShared.t.sol index 093890d3c..c48622106 100644 --- a/packages/subgraph-service/test/unit/shared/HorizonStakingShared.t.sol +++ b/packages/subgraph-service/test/unit/shared/HorizonStakingShared.t.sol @@ -3,7 +3,6 @@ pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; -import { IHorizonStakingExtension } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingExtension.sol"; import { SubgraphBaseTest } from "../SubgraphBaseTest.t.sol"; @@ -36,6 +35,12 @@ abstract contract HorizonStakingSharedTest is SubgraphBaseTest { staking.addToProvision(_indexer, address(subgraphService), _tokens); } + function _removeFromProvision(address _indexer, uint256 _tokens) internal { + staking.thaw(_indexer, address(subgraphService), _tokens); + skip(staking.getProvision(_indexer, address(subgraphService)).thawingPeriod + 1); + staking.deprovision(_indexer, address(subgraphService), 0); + } + function _delegate(address _indexer, address _verifier, uint256 _tokens, uint256 _minSharesOut) internal { staking.delegate(_indexer, _verifier, _tokens, _minSharesOut); } @@ -75,68 +80,6 @@ abstract contract HorizonStakingSharedTest is SubgraphBaseTest { staking.setProvisionParameters(_indexer, _verifier, _maxVerifierCut, _thawingPeriod); } - function _setStorageAllocationHardcoded(address indexer, address allocationId, uint256 tokens) internal { - IHorizonStakingExtension.Allocation memory allocation = IHorizonStakingExtension.Allocation({ - indexer: indexer, - // forge-lint: disable-next-line(unsafe-typecast) - subgraphDeploymentID: bytes32("0x12344321"), - tokens: tokens, - createdAtEpoch: 1234, - closedAtEpoch: 1235, - collectedFees: 1234, - __DEPRECATED_effectiveAllocation: 1222234, - accRewardsPerAllocatedToken: 1233334, - distributedRebates: 1244434 - }); - - // __DEPRECATED_allocations - uint256 allocationsSlot = 15; - bytes32 allocationBaseSlot = keccak256(abi.encode(allocationId, allocationsSlot)); - vm.store(address(staking), allocationBaseSlot, bytes32(uint256(uint160(allocation.indexer)))); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 1), allocation.subgraphDeploymentID); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 2), bytes32(tokens)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 3), bytes32(allocation.createdAtEpoch)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 4), bytes32(allocation.closedAtEpoch)); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 5), bytes32(allocation.collectedFees)); - vm.store( - address(staking), - bytes32(uint256(allocationBaseSlot) + 6), - bytes32(allocation.__DEPRECATED_effectiveAllocation) - ); - vm.store( - address(staking), - bytes32(uint256(allocationBaseSlot) + 7), - bytes32(allocation.accRewardsPerAllocatedToken) - ); - vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 8), bytes32(allocation.distributedRebates)); - - // _serviceProviders - uint256 serviceProviderSlot = 14; - bytes32 serviceProviderBaseSlot = keccak256(abi.encode(allocation.indexer, serviceProviderSlot)); - uint256 currentTokensStaked = uint256(vm.load(address(staking), serviceProviderBaseSlot)); - uint256 currentTokensProvisioned = uint256( - vm.load(address(staking), bytes32(uint256(serviceProviderBaseSlot) + 1)) - ); - vm.store( - address(staking), - bytes32(uint256(serviceProviderBaseSlot) + 0), - bytes32(currentTokensStaked + tokens) - ); - vm.store( - address(staking), - bytes32(uint256(serviceProviderBaseSlot) + 1), - bytes32(currentTokensProvisioned + tokens) - ); - - // __DEPRECATED_subgraphAllocations - uint256 subgraphsAllocationsSlot = 16; - bytes32 subgraphAllocationsBaseSlot = keccak256( - abi.encode(allocation.subgraphDeploymentID, subgraphsAllocationsSlot) - ); - uint256 currentAllocatedTokens = uint256(vm.load(address(staking), subgraphAllocationsBaseSlot)); - vm.store(address(staking), subgraphAllocationsBaseSlot, bytes32(currentAllocatedTokens + tokens)); - } - function _stakeTo(address _indexer, uint256 _tokens) internal { token.approve(address(staking), _tokens); staking.stakeTo(_indexer, _tokens); diff --git a/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol b/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol index bd3091935..5002900f1 100644 --- a/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol @@ -8,12 +8,12 @@ import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizo import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; import { LinkedList } from "@graphprotocol/horizon/contracts/libraries/LinkedList.sol"; -import { IDataServiceFees } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceFees.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; import { ILinkedList } from "@graphprotocol/interfaces/contracts/horizon/internal/ILinkedList.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; +import { StakeClaims } from "@graphprotocol/horizon/contracts/data-service/libraries/StakeClaims.sol"; import { Allocation } from "../../../contracts/libraries/Allocation.sol"; import { SubgraphServiceSharedTest } from "../shared/SubgraphServiceShared.t.sol"; @@ -202,7 +202,7 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { uint256 paymentCollected = 0; address allocationId; IndexingRewardsData memory indexingRewardsData; - CollectPaymentData memory collectPaymentDataBefore = _collectPaymentDataBefore(_indexer); + CollectPaymentData memory collectPaymentDataBefore = _collectPaymentData(_indexer); if (_paymentType == IGraphPayments.PaymentTypes.QueryFee) { paymentCollected = _handleQueryFeeCollection(_indexer, _data); @@ -216,7 +216,7 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { // collect rewards subgraphService.collect(_indexer, _paymentType, _data); - CollectPaymentData memory collectPaymentDataAfter = _collectPaymentDataAfter(_indexer); + CollectPaymentData memory collectPaymentDataAfter = _collectPaymentData(_indexer); if (_paymentType == IGraphPayments.PaymentTypes.QueryFee) { _verifyQueryFeeCollection( @@ -237,42 +237,24 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { } } - function _collectPaymentDataBefore(address _indexer) private view returns (CollectPaymentData memory) { + function _collectPaymentData( + address _indexer + ) internal view returns (CollectPaymentData memory collectPaymentData) { address paymentsDestination = subgraphService.paymentsDestination(_indexer); - CollectPaymentData memory collectPaymentDataBefore; - collectPaymentDataBefore.rewardsDestinationBalance = token.balanceOf(paymentsDestination); - collectPaymentDataBefore.indexerProvisionBalance = staking.getProviderTokensAvailable( + collectPaymentData.rewardsDestinationBalance = token.balanceOf(paymentsDestination); + collectPaymentData.indexerProvisionBalance = staking.getProviderTokensAvailable( _indexer, address(subgraphService) ); - collectPaymentDataBefore.delegationPoolBalance = staking.getDelegatedTokensAvailable( + collectPaymentData.delegationPoolBalance = staking.getDelegatedTokensAvailable( _indexer, address(subgraphService) ); - collectPaymentDataBefore.indexerBalance = token.balanceOf(_indexer); - collectPaymentDataBefore.curationBalance = token.balanceOf(address(curation)); - collectPaymentDataBefore.lockedTokens = subgraphService.feesProvisionTracker(_indexer); - collectPaymentDataBefore.indexerStake = staking.getStake(_indexer); - return collectPaymentDataBefore; - } - - function _collectPaymentDataAfter(address _indexer) private view returns (CollectPaymentData memory) { - CollectPaymentData memory collectPaymentDataAfter; - address paymentsDestination = subgraphService.paymentsDestination(_indexer); - collectPaymentDataAfter.rewardsDestinationBalance = token.balanceOf(paymentsDestination); - collectPaymentDataAfter.indexerProvisionBalance = staking.getProviderTokensAvailable( - _indexer, - address(subgraphService) - ); - collectPaymentDataAfter.delegationPoolBalance = staking.getDelegatedTokensAvailable( - _indexer, - address(subgraphService) - ); - collectPaymentDataAfter.indexerBalance = token.balanceOf(_indexer); - collectPaymentDataAfter.curationBalance = token.balanceOf(address(curation)); - collectPaymentDataAfter.lockedTokens = subgraphService.feesProvisionTracker(_indexer); - collectPaymentDataAfter.indexerStake = staking.getStake(_indexer); - return collectPaymentDataAfter; + collectPaymentData.indexerBalance = token.balanceOf(_indexer); + collectPaymentData.curationBalance = token.balanceOf(address(curation)); + collectPaymentData.lockedTokens = subgraphService.feesProvisionTracker(_indexer); + collectPaymentData.indexerStake = staking.getStake(_indexer); + return collectPaymentData; } function _handleQueryFeeCollection( @@ -423,7 +405,7 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { // Check the stake claim ILinkedList.List memory claimsList = _getClaimList(_indexer); bytes32 claimId = _buildStakeClaimId(_indexer, claimsList.nonce - 1); - IDataServiceFees.StakeClaim memory stakeClaim = _getStakeClaim(claimId); + StakeClaims.StakeClaim memory stakeClaim = _getStakeClaim(claimId); uint64 disputePeriod = disputeManager.getDisputePeriod(); assertEq(stakeClaim.tokens, tokensToLock); assertEq(stakeClaim.createdAt, block.timestamp); @@ -494,16 +476,61 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { } function _migrateLegacyAllocation(address _indexer, address _allocationId, bytes32 _subgraphDeploymentId) internal { - vm.expectEmit(address(subgraphService)); - emit IAllocationManager.LegacyAllocationMigrated(_indexer, _allocationId, _subgraphDeploymentId); + // migrate fn was removed, we simulate history by manually setting the storage state + uint256 legacyAllocationsSlot = 208; + bytes32 legacyAllocationBaseSlot = keccak256(abi.encode(_allocationId, legacyAllocationsSlot)); - subgraphService.migrateLegacyAllocation(_indexer, _allocationId, _subgraphDeploymentId); + vm.store(address(subgraphService), legacyAllocationBaseSlot, bytes32(uint256(uint160(_indexer)))); + vm.store( + address(subgraphService), + bytes32(uint256(legacyAllocationBaseSlot) + 1), + bytes32(_subgraphDeploymentId) + ); ILegacyAllocation.State memory afterLegacyAllocation = subgraphService.getLegacyAllocation(_allocationId); assertEq(afterLegacyAllocation.indexer, _indexer); assertEq(afterLegacyAllocation.subgraphDeploymentId, _subgraphDeploymentId); } + /** + * @notice Sets a legacy allocation directly in HorizonStaking storage + * @dev The __DEPRECATED_allocations mapping is at storage slot 15 in HorizonStaking + * Use `forge inspect HorizonStaking storage-layout` to verify + * The LegacyAllocation struct has the following layout: + * - slot 0: indexer (address) + * - slot 1: subgraphDeploymentID (bytes32) + * - slot 2: tokens (uint256) + * - slot 3: createdAtEpoch (uint256) + * - slot 4: closedAtEpoch (uint256) + * - slot 5: collectedFees (uint256) + * - slot 6: __DEPRECATED_effectiveAllocation (uint256) + * - slot 7: accRewardsPerAllocatedToken (uint256) + * - slot 8: distributedRebates (uint256) + */ + function _setLegacyAllocationInStaking( + address _allocationId, + address _indexer, + bytes32 _subgraphDeploymentId + ) internal { + // Storage slot for __DEPRECATED_allocations mapping in HorizonStaking + uint256 allocationsSlot = 15; + bytes32 allocationBaseSlot = keccak256(abi.encode(_allocationId, allocationsSlot)); + + // Set indexer (slot 0) + vm.store(address(staking), allocationBaseSlot, bytes32(uint256(uint160(_indexer)))); + // Set subgraphDeploymentID (slot 1) + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 1), _subgraphDeploymentId); + // Set tokens (slot 2) - non-zero to indicate active allocation + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 2), bytes32(uint256(1000 ether))); + // Set createdAtEpoch (slot 3) - non-zero + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 3), bytes32(uint256(1))); + // Set closedAtEpoch (slot 4) - non-zero to indicate closed + vm.store(address(staking), bytes32(uint256(allocationBaseSlot) + 4), bytes32(uint256(10))); + + // Verify the allocation is now visible via isAllocation + assertTrue(staking.isAllocation(_allocationId)); + } + /* * HELPERS */ @@ -540,12 +567,12 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { } function _buildStakeClaimId(address _indexer, uint256 _nonce) private view returns (bytes32) { - return keccak256(abi.encodePacked(address(subgraphService), _indexer, _nonce)); + return StakeClaims.buildStakeClaimId(address(subgraphService), _indexer, _nonce); } - function _getStakeClaim(bytes32 _claimId) private view returns (IDataServiceFees.StakeClaim memory) { + function _getStakeClaim(bytes32 _claimId) private view returns (StakeClaims.StakeClaim memory) { (uint256 tokens, uint256 createdAt, uint256 releasableAt, bytes32 nextClaim) = subgraphService.claims(_claimId); - return IDataServiceFees.StakeClaim(tokens, createdAt, releasableAt, nextClaim); + return StakeClaims.StakeClaim(tokens, createdAt, releasableAt, nextClaim); } // This doesn't matter for testing because the metadata is not decoded onchain but it's expected to be of the form: diff --git a/packages/subgraph-service/test/unit/subgraphService/allocation/resize.t.sol b/packages/subgraph-service/test/unit/subgraphService/allocation/resize.t.sol index 40635570e..7b33537d2 100644 --- a/packages/subgraph-service/test/unit/subgraphService/allocation/resize.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/allocation/resize.t.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.27; import { SubgraphServiceTest } from "../SubgraphService.t.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; -import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; @@ -85,11 +85,7 @@ contract SubgraphServiceAllocationResizeTest is SubgraphServiceTest { uint256 tokens ) public useIndexer useAllocation(tokens) { vm.expectRevert( - abi.encodeWithSelector( - IAllocationManager.AllocationManagerAllocationSameSize.selector, - allocationId, - tokens - ) + abi.encodeWithSelector(AllocationHandler.AllocationHandlerAllocationSameSize.selector, allocationId, tokens) ); subgraphService.resizeAllocation(users.indexer, allocationId, tokens); } @@ -102,7 +98,7 @@ contract SubgraphServiceAllocationResizeTest is SubgraphServiceTest { bytes memory data = abi.encode(allocationId); _stopService(users.indexer, data); vm.expectRevert( - abi.encodeWithSelector(IAllocationManager.AllocationManagerAllocationClosed.selector, allocationId) + abi.encodeWithSelector(AllocationHandler.AllocationHandlerAllocationClosed.selector, allocationId) ); subgraphService.resizeAllocation(users.indexer, allocationId, resizeTokens); } diff --git a/packages/subgraph-service/test/unit/subgraphService/allocation/start.t.sol b/packages/subgraph-service/test/unit/subgraphService/allocation/start.t.sol index 0896e9473..68c3c6674 100644 --- a/packages/subgraph-service/test/unit/subgraphService/allocation/start.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/allocation/start.t.sol @@ -5,7 +5,7 @@ import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; import { ProvisionTracker } from "@graphprotocol/horizon/contracts/data-service/libraries/ProvisionTracker.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; -import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { SubgraphServiceTest } from "../SubgraphService.t.sol"; @@ -94,7 +94,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { bytes32 digest = subgraphService.encodeAllocationProof(users.indexer, address(0)); (uint8 v, bytes32 r, bytes32 s) = vm.sign(allocationIdPrivateKey, digest); bytes memory data = abi.encode(subgraphDeployment, tokens, address(0), abi.encodePacked(r, s, v)); - vm.expectRevert(abi.encodeWithSelector(IAllocationManager.AllocationManagerInvalidZeroAllocationId.selector)); + vm.expectRevert(abi.encodeWithSelector(AllocationHandler.AllocationHandlerInvalidZeroAllocationId.selector)); subgraphService.startService(users.indexer, data); } @@ -110,7 +110,7 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { bytes memory data = abi.encode(subgraphDeployment, tokens, allocationId, abi.encodePacked(r, s, v)); vm.expectRevert( abi.encodeWithSelector( - IAllocationManager.AllocationManagerInvalidAllocationProof.selector, + AllocationHandler.AllocationHandlerInvalidAllocationProof.selector, signer, allocationId ) @@ -165,8 +165,9 @@ contract SubgraphServiceAllocationStartTest is SubgraphServiceTest { _createProvision(users.indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); _register(users.indexer, abi.encode("url", "geoHash", address(0))); - // create dummy allo in staking contract - _setStorageAllocationHardcoded(users.indexer, allocationId, tokens); + // Set a legacy allocation directly in HorizonStaking storage + // This simulates an allocation that was created before Horizon and exists in the staking contract + _setLegacyAllocationInStaking(allocationId, users.indexer, subgraphDeployment); bytes memory data = _generateData(tokens); vm.expectRevert(abi.encodeWithSelector(ILegacyAllocation.LegacyAllocationAlreadyExists.selector, allocationId)); diff --git a/packages/subgraph-service/test/unit/subgraphService/collect/collect.t.sol b/packages/subgraph-service/test/unit/subgraphService/collect/collect.t.sol index e77942714..982d7fe83 100644 --- a/packages/subgraph-service/test/unit/subgraphService/collect/collect.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/collect/collect.t.sol @@ -3,7 +3,7 @@ pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; -import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; import { SubgraphServiceTest } from "../SubgraphService.t.sol"; contract SubgraphServiceCollectTest is SubgraphServiceTest { @@ -14,10 +14,14 @@ contract SubgraphServiceCollectTest is SubgraphServiceTest { function test_SubgraphService_Collect_RevertWhen_InvalidPayment( uint256 tokens ) public useIndexer useAllocation(tokens) { - IGraphPayments.PaymentTypes invalidPaymentType = IGraphPayments.PaymentTypes.IndexingFee; + IGraphPayments.PaymentTypes paymentType = IGraphPayments.PaymentTypes.IndexingFee; vm.expectRevert( - abi.encodeWithSelector(ISubgraphService.SubgraphServiceInvalidPaymentType.selector, invalidPaymentType) + abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeCollectData", + "" + ) ); - subgraphService.collect(users.indexer, invalidPaymentType, ""); + subgraphService.collect(users.indexer, paymentType, ""); } } diff --git a/packages/subgraph-service/test/unit/subgraphService/collect/indexing/indexing.t.sol b/packages/subgraph-service/test/unit/subgraphService/collect/indexing/indexing.t.sol index 94f11e0e5..49c034e52 100644 --- a/packages/subgraph-service/test/unit/subgraphService/collect/indexing/indexing.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/collect/indexing/indexing.t.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; -import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; +import { AllocationHandler } from "../../../../../contracts/libraries/AllocationHandler.sol"; import { IRewardsManager } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsManager.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; @@ -270,7 +270,7 @@ contract SubgraphServiceCollectIndexingTest is SubgraphServiceTest { // Attempt to collect on closed allocation should revert vm.expectRevert( - abi.encodeWithSelector(IAllocationManager.AllocationManagerAllocationClosed.selector, allocationId) + abi.encodeWithSelector(AllocationHandler.AllocationHandlerAllocationClosed.selector, allocationId) ); subgraphService.collect(users.indexer, paymentType, data); } diff --git a/packages/subgraph-service/test/unit/subgraphService/getters.t.sol b/packages/subgraph-service/test/unit/subgraphService/getters.t.sol index 27c9aafbb..5f884cfcb 100644 --- a/packages/subgraph-service/test/unit/subgraphService/getters.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/getters.t.sol @@ -23,6 +23,11 @@ contract SubgraphServiceGettersTest is SubgraphServiceTest { assertEq(result, address(curation)); } + function test_GetRecurringCollector() public view { + address result = address(subgraphService.recurringCollector()); + assertEq(result, address(recurringCollector)); + } + function test_GetAllocationData(uint256 tokens) public useIndexer useAllocation(tokens) { ( bool isOpen, diff --git a/packages/subgraph-service/test/unit/subgraphService/governance/indexingFeesCut.t.sol b/packages/subgraph-service/test/unit/subgraphService/governance/indexingFeesCut.t.sol new file mode 100644 index 000000000..8bd374c01 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/governance/indexingFeesCut.t.sol @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { SubgraphServiceTest } from "../SubgraphService.t.sol"; +import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; + +contract SubgraphServiceGovernanceIndexingFeesCutTest is SubgraphServiceTest { + /* + * TESTS + */ + + function test_Governance_SetIndexingFeesCut(uint256 indexingFeesCut) public useGovernor { + vm.assume(indexingFeesCut <= MAX_PPM); + + vm.expectEmit(address(subgraphService)); + emit ISubgraphService.IndexingFeesCutSet(indexingFeesCut); + subgraphService.setIndexingFeesCut(indexingFeesCut); + + assertEq(subgraphService.indexingFeesCut(), indexingFeesCut); + } + + function test_Governance_SetIndexingFeesCut_RevertWhen_InvalidPPM(uint256 indexingFeesCut) public useGovernor { + vm.assume(indexingFeesCut > MAX_PPM); + + vm.expectRevert( + abi.encodeWithSelector(ISubgraphService.SubgraphServiceInvalidIndexingFeesCut.selector, indexingFeesCut) + ); + subgraphService.setIndexingFeesCut(indexingFeesCut); + } + + function test_Governance_SetIndexingFeesCut_RevertWhen_NotGovernor() public useIndexer { + uint256 indexingFeesCut = 100_000; // 10% + vm.expectRevert(abi.encodeWithSelector(OwnableUpgradeable.OwnableUnauthorizedAccount.selector, users.indexer)); + subgraphService.setIndexingFeesCut(indexingFeesCut); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/governance/legacy.t.sol b/packages/subgraph-service/test/unit/subgraphService/governance/legacy.t.sol deleted file mode 100644 index 65aadf2a5..000000000 --- a/packages/subgraph-service/test/unit/subgraphService/governance/legacy.t.sol +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; -import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; - -import { SubgraphServiceTest } from "../SubgraphService.t.sol"; - -contract SubgraphServiceLegacyAllocation is SubgraphServiceTest { - /* - * TESTS - */ - - function test_MigrateAllocation() public useGovernor { - _migrateLegacyAllocation(users.indexer, allocationId, subgraphDeployment); - } - - function test_MigrateAllocation_WhenNotGovernor() public useIndexer { - vm.expectRevert(abi.encodeWithSelector(OwnableUpgradeable.OwnableUnauthorizedAccount.selector, users.indexer)); - subgraphService.migrateLegacyAllocation(users.indexer, allocationId, subgraphDeployment); - } - - function test_MigrateAllocation_RevertWhen_AlreadyMigrated() public useGovernor { - _migrateLegacyAllocation(users.indexer, allocationId, subgraphDeployment); - - vm.expectRevert(abi.encodeWithSelector(ILegacyAllocation.LegacyAllocationAlreadyExists.selector, allocationId)); - subgraphService.migrateLegacyAllocation(users.indexer, allocationId, subgraphDeployment); - } -} diff --git a/packages/subgraph-service/test/unit/subgraphService/governance/maxPOIStaleness.t.sol b/packages/subgraph-service/test/unit/subgraphService/governance/maxPOIStaleness.t.sol index 5968cf623..f0c597e4a 100644 --- a/packages/subgraph-service/test/unit/subgraphService/governance/maxPOIStaleness.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/governance/maxPOIStaleness.t.sol @@ -1,7 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; -import { IAllocationManager } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocationManager.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; import { SubgraphServiceTest } from "../SubgraphService.t.sol"; import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; @@ -12,7 +12,7 @@ contract SubgraphServiceGovernanceMaxPOIStalenessTest is SubgraphServiceTest { function test_Governance_SetMaxPOIStaleness(uint256 maxPOIStaleness) public useGovernor { vm.expectEmit(address(subgraphService)); - emit IAllocationManager.MaxPOIStalenessSet(maxPOIStaleness); + emit AllocationHandler.MaxPOIStalenessSet(maxPOIStaleness); subgraphService.setMaxPOIStaleness(maxPOIStaleness); assertEq(subgraphService.maxPOIStaleness(), maxPOIStaleness); diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol new file mode 100644 index 000000000..4296c8415 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol @@ -0,0 +1,356 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; +import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenPaused( + address allocationId, + address operator, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + bytes calldata authData + ) public withSafeIndexerOrOperator(operator) { + resetPrank(users.pauseGuardian); + subgraphService.pause(); + + resetPrank(operator); + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenNotAuthorized( + address allocationId, + address operator, + IRecurringCollector.RecurringCollectionAgreement calldata rca, + bytes calldata authData + ) public withSafeIndexerOrOperator(operator) { + vm.assume(operator != rca.serviceProvider); + resetPrank(operator); + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerNotAuthorized.selector, + rca.serviceProvider, + operator + ); + vm.expectRevert(expectedErr); + subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidProvision( + address indexer, + uint256 unboundedTokens, + address allocationId, + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes memory authData + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + + rca.serviceProvider = indexer; + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerInvalidValue.selector, + "tokens", + tokens, + MINIMUM_PROVISION_TOKENS, + MAXIMUM_PROVISION_TOKENS + ); + vm.expectRevert(expectedErr); + subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenIndexerNotRegistered( + address indexer, + uint256 unboundedTokens, + address allocationId, + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes memory authData + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + rca.serviceProvider = indexer; + bytes memory expectedErr = abi.encodeWithSelector( + ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, + indexer + ); + vm.expectRevert(expectedErr); + subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenNotDataService( + Seed memory seed, + address incorrectDataService + ) public { + vm.assume(incorrectDataService != address(subgraphService)); + + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + ctx, + indexerState.addr + ); + acceptableRca.dataService = incorrectDataService; + ( + IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementWrongDataService.selector, + address(subgraphService), + unacceptableRca.dataService + ); + vm.expectRevert(expectedErr); + vm.prank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidMetadata(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + ctx, + indexerState.addr + ); + acceptableRca.metadata = bytes("invalid"); + ( + IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeRCAMetadata", + unacceptableRca.metadata + ); + vm.expectRevert(expectedErr); + vm.prank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidAllocation( + Seed memory seed, + address invalidAllocationId + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, + bytes memory signature + ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + + bytes memory expectedErr = abi.encodeWithSelector( + IAllocation.AllocationDoesNotExist.selector, + invalidAllocationId + ); + vm.expectRevert(expectedErr); + vm.prank(indexerState.addr); + subgraphService.acceptIndexingAgreement(invalidAllocationId, acceptableRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAllocationNotAuthorized(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerStateA = _withIndexer(ctx); + IndexerState memory indexerStateB = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRcaA, + bytes memory signatureA + ) = _generateAcceptableSignedRCA(ctx, indexerStateA.addr); + + bytes memory expectedErr = abi.encodeWithSelector( + ISubgraphService.SubgraphServiceAllocationNotAuthorized.selector, + indexerStateA.addr, + indexerStateB.allocationId + ); + vm.expectRevert(expectedErr); + vm.prank(indexerStateA.addr); + subgraphService.acceptIndexingAgreement(indexerStateB.allocationId, acceptableRcaA, signatureA); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAllocationClosed(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, + bytes memory signature + ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + + resetPrank(indexerState.addr); + subgraphService.stopService(indexerState.addr, abi.encode(indexerState.allocationId)); + + bytes memory expectedErr = abi.encodeWithSelector( + AllocationHandler.AllocationHandlerAllocationClosed.selector, + indexerState.allocationId + ); + vm.expectRevert(expectedErr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, acceptableRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenDeploymentIdMismatch( + Seed memory seed, + bytes32 wrongSubgraphDeploymentId + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + vm.assume(indexerState.subgraphDeploymentId != wrongSubgraphDeploymentId); + (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + ctx, + indexerState.addr + ); + acceptableRca.metadata = abi.encode(_newAcceptIndexingAgreementMetadataV1(wrongSubgraphDeploymentId)); + ( + IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementDeploymentIdMismatch.selector, + wrongSubgraphDeploymentId, + indexerState.allocationId, + indexerState.subgraphDeploymentId + ); + vm.expectRevert(expectedErr); + vm.prank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAgreementAlreadyAccepted(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Re-sign for the re-accept attempt (the original signature was consumed) + (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA( + acceptedRca, + ctx.payer.signerPrivateKey + ); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementAlreadyAccepted.selector, + agreementId + ); + vm.expectRevert(expectedErr); + resetPrank(ctx.indexers[0].addr); + subgraphService.acceptIndexingAgreement(ctx.indexers[0].allocationId, acceptedRca, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAgreementAlreadyAllocated( + Seed memory seed, + uint256 alternativeNonce + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + // First, accept an indexing agreement on the allocation + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + vm.assume(acceptedRca.nonce != alternativeNonce); + + // Now try to accept a different agreement on the same allocation + // Create a new agreement with different nonce to ensure different agreement ID + IRecurringCollector.RecurringCollectionAgreement + memory newRCA = _generateAcceptableRecurringCollectionAgreement(ctx, indexerState.addr); + newRCA.nonce = alternativeNonce; // Different nonce to ensure different agreement ID + + // Sign the new agreement + ( + IRecurringCollector.RecurringCollectionAgreement memory newSignedRca, + bytes memory newSignature + ) = _recurringCollectorHelper.generateSignedRCA(newRCA, ctx.payer.signerPrivateKey); + + // Expect the error when trying to accept a second agreement on the same allocation + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.AllocationAlreadyHasIndexingAgreement.selector, + indexerState.allocationId + ); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, newSignedRca, newSignature); + } + + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidTermsData(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + ctx, + indexerState.addr + ); + // forge-lint: disable-next-line(mixed-case-variable) + IRecurringCollector.RecurringCollectionAgreement memory notAcceptableRCA = acceptableRca; + bytes memory invalidTermsData = bytes("invalid terms data"); + notAcceptableRCA.metadata = abi.encode( + _newAcceptIndexingAgreementMetadataV1Terms(indexerState.subgraphDeploymentId, invalidTermsData) + ); + ( + IRecurringCollector.RecurringCollectionAgreement memory notAcceptableRcaSigned, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(notAcceptableRCA, ctx.payer.signerPrivateKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeIndexingAgreementTermsV1", + invalidTermsData + ); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, notAcceptableRcaSigned, signature); + } + + function test_SubgraphService_AcceptIndexingAgreement(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, + bytes memory signature + ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + IndexingAgreement.AcceptIndexingAgreementMetadata memory metadata = abi.decode( + acceptableRca.metadata, + (IndexingAgreement.AcceptIndexingAgreementMetadata) + ); + // Generate deterministic agreement ID for event expectation + bytes16 expectedAgreementId = recurringCollector.generateAgreementId( + acceptableRca.payer, + acceptableRca.dataService, + acceptableRca.serviceProvider, + acceptableRca.deadline, + acceptableRca.nonce + ); + + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingAgreementAccepted( + acceptableRca.serviceProvider, + acceptableRca.payer, + expectedAgreementId, + indexerState.allocationId, + metadata.subgraphDeploymentId, + metadata.version, + metadata.terms + ); + + resetPrank(indexerState.addr); + subgraphService.acceptIndexingAgreement(indexerState.allocationId, acceptableRca, signature); + } + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol new file mode 100644 index 000000000..e01d157c0 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementBaseTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_GetIndexingAgreement( + Seed memory seed, + address operator, + bytes16 fuzzyAgreementId + ) public { + vm.assume(_isSafeSubgraphServiceCaller(operator)); + + resetPrank(address(operator)); + + // Get unkown indexing agreement + vm.expectRevert( + abi.encodeWithSelector(IndexingAgreement.IndexingAgreementNotActive.selector, fuzzyAgreementId) + ); + subgraphService.getIndexingAgreement(fuzzyAgreementId); + + // Accept an indexing agreement + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + IIndexingAgreement.AgreementWrapper memory agreement = subgraphService.getIndexingAgreement(agreementId); + _assertEqualAgreement(acceptedRca, agreement); + } + + function test_SubgraphService_Revert_WhenUnsafeAddress_WhenProxyAdmin(address indexer, bytes16 agreementId) public { + address operator = _transparentUpgradeableProxyAdmin(); + assertFalse(_isSafeSubgraphServiceCaller(operator)); + + vm.expectRevert(TransparentUpgradeableProxy.ProxyDeniedAdminAccess.selector); + resetPrank(address(operator)); + subgraphService.cancelIndexingAgreement(indexer, agreementId); + } + + function test_SubgraphService_Revert_WhenUnsafeAddress_WhenGraphProxyAdmin(uint256 unboundedTokens) public { + address indexer = GRAPH_PROXY_ADMIN_ADDRESS; + assertFalse(_isSafeSubgraphServiceCaller(indexer)); + + uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + mint(indexer, tokens); + resetPrank(indexer); + vm.expectRevert("Cannot fallback to proxy target"); + staking.provision(indexer, address(subgraphService), tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol new file mode 100644 index 000000000..a0d4ed2d1 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol @@ -0,0 +1,232 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; +import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; + +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementCancelTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenPaused( + address rando, + bytes16 agreementId + ) public withSafeIndexerOrOperator(rando) { + resetPrank(users.pauseGuardian); + subgraphService.pause(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + resetPrank(rando); + subgraphService.cancelIndexingAgreementByPayer(agreementId); + } + + function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenNotAuthorized( + Seed memory seed, + address rando + ) public withSafeIndexerOrOperator(rando) { + Context storage ctx = _newCtx(seed); + vm.assume(rando != seed.rca.payer); + vm.assume(rando != ctx.payer.signer); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNonCancelableBy.selector, + acceptedRca.payer, + rando + ); + vm.expectRevert(expectedErr); + resetPrank(rando); + subgraphService.cancelIndexingAgreementByPayer(agreementId); + } + + function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenNotAccepted( + Seed memory seed, + bytes16 agreementId + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + resetPrank(indexerState.addr); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotActive.selector, + agreementId + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreementByPayer(agreementId); + } + + function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenCanceled( + Seed memory seed, + bool cancelSource + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + IRecurringCollector.CancelAgreementBy by = cancelSource + ? IRecurringCollector.CancelAgreementBy.ServiceProvider + : IRecurringCollector.CancelAgreementBy.Payer; + _cancelAgreement(ctx, acceptedAgreementId, indexerState.addr, acceptedRca.payer, by); + + resetPrank(indexerState.addr); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotActive.selector, + acceptedAgreementId + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreementByPayer(acceptedAgreementId); + } + + function test_SubgraphService_CancelIndexingAgreementByPayer(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); + + _cancelAgreement( + ctx, + acceptedAgreementId, + acceptedRca.serviceProvider, + acceptedRca.payer, + IRecurringCollector.CancelAgreementBy.Payer + ); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenPaused( + address operator, + address indexer, + bytes16 agreementId + ) public withSafeIndexerOrOperator(operator) { + resetPrank(users.pauseGuardian); + subgraphService.pause(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + resetPrank(operator); + subgraphService.cancelIndexingAgreement(indexer, agreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenNotAuthorized( + address operator, + address indexer, + bytes16 agreementId + ) public withSafeIndexerOrOperator(operator) { + vm.assume(operator != indexer); + resetPrank(operator); + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerNotAuthorized.selector, + indexer, + operator + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreement(indexer, agreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenInvalidProvision( + address indexer, + bytes16 agreementId, + uint256 unboundedTokens + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerInvalidValue.selector, + "tokens", + tokens, + MINIMUM_PROVISION_TOKENS, + MAXIMUM_PROVISION_TOKENS + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreement(indexer, agreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenIndexerNotRegistered( + address indexer, + bytes16 agreementId, + uint256 unboundedTokens + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + bytes memory expectedErr = abi.encodeWithSelector( + ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, + indexer + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreement(indexer, agreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenNotAccepted( + Seed memory seed, + bytes16 agreementId + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + resetPrank(indexerState.addr); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotActive.selector, + agreementId + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreement(indexerState.addr, agreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_Revert_WhenCanceled( + Seed memory seed, + bool cancelSource + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca2, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + IRecurringCollector.CancelAgreementBy by = cancelSource + ? IRecurringCollector.CancelAgreementBy.ServiceProvider + : IRecurringCollector.CancelAgreementBy.Payer; + _cancelAgreement(ctx, acceptedAgreementId, acceptedRca2.serviceProvider, acceptedRca2.payer, by); + + resetPrank(indexerState.addr); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotActive.selector, + acceptedAgreementId + ); + vm.expectRevert(expectedErr); + subgraphService.cancelIndexingAgreement(indexerState.addr, acceptedAgreementId); + } + + function test_SubgraphService_CancelIndexingAgreement_OK(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); + + _cancelAgreement( + ctx, + acceptedAgreementId, + acceptedRca.serviceProvider, + acceptedRca.payer, + IRecurringCollector.CancelAgreementBy.ServiceProvider + ); + } + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol new file mode 100644 index 000000000..5818a1d63 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol @@ -0,0 +1,341 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; +import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; + +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementCollectTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_CollectIndexingFees_OK( + Seed memory seed, + uint256 entities, + bytes32 poi, + uint256 unboundedTokensCollected + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + + assertEq(subgraphService.feesProvisionTracker(indexerState.addr), 0, "Should be 0 before collect"); + + resetPrank(indexerState.addr); + subgraphService.setPaymentsDestination(indexerState.addr); + + bytes memory data = abi.encode( + IRecurringCollector.CollectParams({ + agreementId: acceptedAgreementId, + collectionId: bytes32(uint256(uint160(indexerState.allocationId))), + tokens: 0, + dataServiceCut: 0, + receiverDestination: indexerState.addr, + maxSlippage: type(uint256).max + }) + ); + uint256 tokensCollected = bound(unboundedTokensCollected, 1, indexerState.tokens / STAKE_TO_FEES_RATIO); + + vm.mockCall( + address(recurringCollector), + abi.encodeWithSelector(IPaymentsCollector.collect.selector, IGraphPayments.PaymentTypes.IndexingFee, data), + abi.encode(tokensCollected) + ); + _expectCollectCallAndEmit(data, indexerState, acceptedRca, acceptedAgreementId, tokensCollected, entities, poi); + + skip(1); // To make agreement collectable + + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(acceptedAgreementId, entities, poi, epochManager.currentEpochBlock(), bytes("")) + ); + + assertEq( + subgraphService.feesProvisionTracker(indexerState.addr), + tokensCollected * STAKE_TO_FEES_RATIO, + "Should be exactly locked tokens" + ); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenPaused( + address indexer, + bytes16 agreementId, + uint256 entities, + bytes32 poi + ) public withSafeIndexerOrOperator(indexer) { + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + resetPrank(users.pauseGuardian); + subgraphService.pause(); + + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + resetPrank(indexer); + subgraphService.collect( + indexer, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(agreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenNotAuthorized( + address operator, + address indexer, + bytes16 agreementId, + uint256 entities, + bytes32 poi + ) public withSafeIndexerOrOperator(operator) { + vm.assume(operator != indexer); + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + resetPrank(operator); + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerNotAuthorized.selector, + indexer, + operator + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + indexer, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(agreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenInvalidProvision( + uint256 unboundedTokens, + address indexer, + bytes16 agreementId, + uint256 entities, + bytes32 poi + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerInvalidValue.selector, + "tokens", + tokens, + MINIMUM_PROVISION_TOKENS, + MAXIMUM_PROVISION_TOKENS + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + indexer, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(agreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenIndexerNotRegistered( + uint256 unboundedTokens, + address indexer, + bytes16 agreementId, + uint256 entities, + bytes32 poi + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + bytes memory expectedErr = abi.encodeWithSelector( + ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, + indexer + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + indexer, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(agreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenInvalidData(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + bytes memory invalidData = bytes("invalid data"); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeCollectData", + invalidData + ); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.collect(indexerState.addr, IGraphPayments.PaymentTypes.IndexingFee, invalidData); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenInvalidAgreement( + Seed memory seed, + bytes16 agreementId, + uint256 entities, + bytes32 poi + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + + bytes memory expectedErr = abi.encodeWithSelector(IAllocation.AllocationDoesNotExist.selector, address(0)); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(agreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Reverts_WhenInvalidNestedData(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + resetPrank(indexerState.addr); + + bytes memory invalidNestedData = bytes("invalid nested data"); + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeCollectIndexingFeeDataV1", + invalidNestedData + ); + vm.expectRevert(expectedErr); + + skip(1); // To make agreement collectable + + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectData(acceptedAgreementId, invalidNestedData) + ); + } + + function test_SubgraphService_CollectIndexingFees_Reverts_WhenIndexingAgreementNotAuthorized( + Seed memory seed, + uint256 entities, + bytes32 poi + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + IndexerState memory otherIndexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + vm.assume(otherIndexerState.addr != indexerState.addr); + + resetPrank(otherIndexerState.addr); + + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotAuthorized.selector, + acceptedAgreementId, + otherIndexerState.addr + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + otherIndexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(acceptedAgreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Reverts_WhenStopService( + Seed memory seed, + uint256 entities, + bytes32 poi + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + resetPrank(indexerState.addr); + subgraphService.stopService(indexerState.addr, abi.encode(indexerState.allocationId)); + + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + + bytes memory expectedErr = abi.encodeWithSelector( + AllocationHandler.AllocationHandlerAllocationClosed.selector, + indexerState.allocationId + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(acceptedAgreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + function test_SubgraphService_CollectIndexingFees_Reverts_WhenCloseStaleAllocation( + Seed memory seed, + uint256 entities, + bytes32 poi + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + skip(MAX_POI_STALENESS + 1); + resetPrank(indexerState.addr); + subgraphService.closeStaleAllocation(indexerState.allocationId); + + uint256 currentEpochBlock = epochManager.currentEpochBlock(); + + bytes memory expectedErr = abi.encodeWithSelector( + AllocationHandler.AllocationHandlerAllocationClosed.selector, + indexerState.allocationId + ); + vm.expectRevert(expectedErr); + subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(acceptedAgreementId, entities, poi, currentEpochBlock, bytes("")) + ); + } + + /* solhint-enable graph/func-name-mixedcase */ + + function _expectCollectCallAndEmit( + bytes memory _data, + IndexerState memory _indexerState, + IRecurringCollector.RecurringCollectionAgreement memory _acceptedRca, + bytes16 _acceptedAgreementId, + uint256 _tokensCollected, + uint256 _entities, + bytes32 _poi + ) private { + vm.expectCall( + address(recurringCollector), + abi.encodeCall(IPaymentsCollector.collect, (IGraphPayments.PaymentTypes.IndexingFee, _data)) + ); + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingFeesCollectedV1( + _indexerState.addr, + _acceptedRca.payer, + _acceptedAgreementId, + _indexerState.allocationId, + _indexerState.subgraphDeploymentId, + epochManager.currentEpoch(), + _tokensCollected, + _entities, + _poi, + epochManager.currentEpochBlock(), + bytes("") + ); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol new file mode 100644 index 000000000..d6f69414f --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol @@ -0,0 +1,282 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndexingAgreementSharedTest { + using PPMMath for uint256; + + struct TestState { + uint256 escrowBalance; + uint256 indexerBalance; + uint256 indexerTokensLocked; + } + + struct ExpectedTokens { + uint256 expectedTotalTokensCollected; + uint256 expectedTokensLocked; + uint256 expectedProtocolTokensBurnt; + uint256 expectedIndexerTokensCollected; + } + + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_CollectIndexingFee_Integration( + Seed memory seed, + uint256 fuzzyTokensCollected + ) public { + // Setup + ExpectedTokens memory expectedTokens = _newExpectedTokens(fuzzyTokensCollected); + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + _addTokensToProvision(indexerState, expectedTokens.expectedTokensLocked); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + ctx.ctxInternal.seed.rca + ); + bytes16 acceptedAgreementId = _sharedSetup(ctx, rca, indexerState, expectedTokens); + + TestState memory beforeCollect = _getState(rca.payer, indexerState.addr); + + // Collect + resetPrank(indexerState.addr); + uint256 tokensCollected = subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1( + acceptedAgreementId, + 1, + keccak256(abi.encodePacked("poi")), + epochManager.currentEpochBlock(), + bytes("") + ) + ); + + TestState memory afterCollect = _getState(rca.payer, indexerState.addr); + _sharedAssert(beforeCollect, afterCollect, expectedTokens, tokensCollected); + } + + function test_SubgraphService_CollectIndexingFee_WhenCanceledByPayer_Integration( + Seed memory seed, + uint256 fuzzyTokensCollected + ) public { + // Setup + ExpectedTokens memory expectedTokens = _newExpectedTokens(fuzzyTokensCollected); + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + ctx.ctxInternal.seed.rca + ); + bytes16 acceptedAgreementId = _sharedSetup(ctx, rca, indexerState, expectedTokens); + + // Cancel the indexing agreement by the payer + resetPrank(ctx.payer.signer); + subgraphService.cancelIndexingAgreementByPayer(acceptedAgreementId); + + TestState memory beforeCollect = _getState(rca.payer, indexerState.addr); + + // Collect + resetPrank(indexerState.addr); + uint256 tokensCollected = subgraphService.collect( + indexerState.addr, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1( + acceptedAgreementId, + 1, + keccak256(abi.encodePacked("poi")), + epochManager.currentEpochBlock(), + bytes("") + ) + ); + + TestState memory afterCollect = _getState(rca.payer, indexerState.addr); + _sharedAssert(beforeCollect, afterCollect, expectedTokens, tokensCollected); + } + + function test_SubgraphService_CollectIndexingRewards_CancelsAgreementWhenOverAllocated_Integration( + Seed memory seed + ) public { + // Setup context and indexer with active agreement + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Ensure enough gap so that reward distribution (1% of tokens) doesn't undo the over-allocation + vm.assume(indexerState.tokens > MINIMUM_PROVISION_TOKENS * 2); + + // Reduce indexer's provision to force over-allocation after collecting rewards + uint256 extraTokens = indexerState.tokens - MINIMUM_PROVISION_TOKENS; + _removeTokensFromProvision(indexerState, extraTokens); + + // Verify indexer will be over-allocated after presenting POI + assertTrue(subgraphService.isOverAllocated(indexerState.addr)); + + // Advance past allocation creation epoch so POI is not considered "too young" + vm.roll(block.number + EPOCH_LENGTH); + + // Collect indexing rewards - this should trigger allocation closure and agreement cancellation + bytes memory collectData = abi.encode(indexerState.allocationId, keccak256("poi"), bytes("metadata")); + resetPrank(indexerState.addr); + subgraphService.collect(indexerState.addr, IGraphPayments.PaymentTypes.IndexingRewards, collectData); + + // Verify the indexing agreement was properly cancelled + IIndexingAgreement.AgreementWrapper memory agreement = subgraphService.getIndexingAgreement(agreementId); + assertEq( + uint8(agreement.collectorAgreement.state), + uint8(IRecurringCollector.AgreementState.CanceledByServiceProvider) + ); + } + + /* solhint-enable graph/func-name-mixedcase */ + + function _sharedSetup( + Context storage _ctx, + IRecurringCollector.RecurringCollectionAgreement memory _rca, + IndexerState memory _indexerState, + ExpectedTokens memory _expectedTokens + ) internal returns (bytes16) { + _addTokensToProvision(_indexerState, _expectedTokens.expectedTokensLocked); + + IndexingAgreement.IndexingAgreementTermsV1 memory terms = IndexingAgreement.IndexingAgreementTermsV1({ + tokensPerSecond: 1, + tokensPerEntityPerSecond: 0 // no payment for entities + }); + _rca.deadline = uint64(block.timestamp); // accept now + _rca.endsAt = type(uint64).max; // no expiration + _rca.maxInitialTokens = 0; // no initial payment + _rca.maxOngoingTokensPerSecond = type(uint32).max; // unlimited tokens per second + _rca.minSecondsPerCollection = 1; // 1 second between collections + _rca.maxSecondsPerCollection = type(uint32).max; // no maximum time between collections + _rca.serviceProvider = _indexerState.addr; // service provider is the indexer + _rca.dataService = address(subgraphService); // data service is the subgraph service + _rca.metadata = _encodeAcceptIndexingAgreementMetadataV1(_indexerState.subgraphDeploymentId, terms); + + _setupPayerWithEscrow( + _rca.payer, + _ctx.payer.signerPrivateKey, + _indexerState.addr, + _expectedTokens.expectedTotalTokensCollected + ); + + resetPrank(_indexerState.addr); + // Set the payments destination to the indexer address + subgraphService.setPaymentsDestination(_indexerState.addr); + + // Accept the Indexing Agreement + ( + IRecurringCollector.RecurringCollectionAgreement memory signedRca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(_rca, _ctx.payer.signerPrivateKey); + bytes16 agreementId = subgraphService.acceptIndexingAgreement(_indexerState.allocationId, signedRca, signature); + + // Skip ahead to collection point + skip(_expectedTokens.expectedTotalTokensCollected / terms.tokensPerSecond); + + return agreementId; + } + + function _newExpectedTokens(uint256 _fuzzyTokensCollected) internal view returns (ExpectedTokens memory) { + uint256 expectedTotalTokensCollected = bound(_fuzzyTokensCollected, 1000, 1_000_000); + uint256 expectedTokensLocked = STAKE_TO_FEES_RATIO * expectedTotalTokensCollected; + uint256 expectedProtocolTokensBurnt = expectedTotalTokensCollected.mulPPMRoundUp( + graphPayments.PROTOCOL_PAYMENT_CUT() + ); + uint256 expectedIndexerTokensCollected = expectedTotalTokensCollected - expectedProtocolTokensBurnt; + return + ExpectedTokens({ + expectedTotalTokensCollected: expectedTotalTokensCollected, + expectedTokensLocked: expectedTokensLocked, + expectedProtocolTokensBurnt: expectedProtocolTokensBurnt, + expectedIndexerTokensCollected: expectedIndexerTokensCollected + }); + } + + function _sharedAssert( + TestState memory _beforeCollect, + TestState memory _afterCollect, + ExpectedTokens memory _expectedTokens, + uint256 _tokensCollected + ) internal pure { + uint256 indexerTokensCollected = _afterCollect.indexerBalance - _beforeCollect.indexerBalance; + assertEq(_expectedTokens.expectedTotalTokensCollected, _tokensCollected, "Total tokens collected should match"); + assertEq( + _expectedTokens.expectedProtocolTokensBurnt, + _tokensCollected - indexerTokensCollected, + "Protocol tokens burnt should match" + ); + assertEq( + _expectedTokens.expectedIndexerTokensCollected, + indexerTokensCollected, + "Indexer tokens collected should match" + ); + assertEq( + _afterCollect.escrowBalance, + _beforeCollect.escrowBalance - _expectedTokens.expectedTotalTokensCollected, + "_Escrow balance should be reduced by the amount collected" + ); + + assertEq( + _afterCollect.indexerTokensLocked, + _beforeCollect.indexerTokensLocked + _expectedTokens.expectedTokensLocked, + "_Locked tokens should match" + ); + } + + function _addTokensToProvision(IndexerState memory _indexerState, uint256 _tokens) private { + deal({ token: address(token), to: _indexerState.addr, give: _tokens }); + vm.startPrank(_indexerState.addr); + _addToProvision(_indexerState.addr, _tokens); + vm.stopPrank(); + } + + function _removeTokensFromProvision(IndexerState memory _indexerState, uint256 _tokens) private { + deal({ token: address(token), to: _indexerState.addr, give: _tokens }); + vm.startPrank(_indexerState.addr); + _removeFromProvision(_indexerState.addr, _tokens); + vm.stopPrank(); + } + + function _setupPayerWithEscrow( + address _payer, + uint256 _signerPrivateKey, + address _indexer, + uint256 _escrowTokens + ) private { + _recurringCollectorHelper.authorizeSignerWithChecks(_payer, _signerPrivateKey); + + deal({ token: address(token), to: _payer, give: _escrowTokens }); + vm.startPrank(_payer); + _escrow(_escrowTokens, _indexer); + vm.stopPrank(); + } + + function _escrow(uint256 _tokens, address _indexer) private { + token.approve(address(escrow), _tokens); + escrow.deposit(address(recurringCollector), _indexer, _tokens); + } + + function _getState(address _payer, address _indexer) private view returns (TestState memory) { + CollectPaymentData memory collect = _collectPaymentData(_indexer); + (uint256 escrowBal, uint256 escrowThawing, ) = escrow.escrowAccounts( + _payer, + address(recurringCollector), + _indexer + ); + + return + TestState({ + escrowBalance: escrowBal - escrowThawing, + indexerBalance: collect.indexerBalance, + indexerTokensLocked: collect.lockedTokens + }); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol new file mode 100644 index 000000000..ea371e237 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol @@ -0,0 +1,439 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { Bounder } from "@graphprotocol/horizon/test/unit/utils/Bounder.t.sol"; +import { RecurringCollectorHelper } from "@graphprotocol/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol"; +import { SubgraphServiceTest } from "../SubgraphService.t.sol"; + +contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Bounder { + struct Context { + PayerState payer; + IndexerState[] indexers; + mapping(address allocationId => address indexer) allocations; + ContextInternal ctxInternal; + } + + struct IndexerState { + address addr; + address allocationId; + bytes32 subgraphDeploymentId; + uint256 tokens; + } + + struct PayerState { + address signer; + uint256 signerPrivateKey; + } + + struct ContextInternal { + IndexerSeed[] indexers; + Seed seed; + bool initialized; + } + + struct Seed { + IndexerSeed indexer0; + IndexerSeed indexer1; + IRecurringCollector.RecurringCollectionAgreement rca; + IRecurringCollector.RecurringCollectionAgreementUpdate rcau; + IndexingAgreement.IndexingAgreementTermsV1 termsV1; + PayerSeed payer; + } + + struct IndexerSeed { + address addr; + string label; + uint256 unboundedProvisionTokens; + uint256 unboundedAllocationPrivateKey; + bytes32 subgraphDeploymentId; + } + + struct PayerSeed { + uint256 unboundedSignerPrivateKey; + } + + Context internal _context; + + bytes32 internal constant TRANSPARENT_UPGRADEABLE_PROXY_ADMIN_ADDRESS_SLOT = + 0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103; + address internal constant GRAPH_PROXY_ADMIN_ADDRESS = 0x15c603B7eaA8eE1a272a69C4af3462F926de777F; + + RecurringCollectorHelper internal _recurringCollectorHelper; + + modifier withSafeIndexerOrOperator(address operator) { + vm.assume(_isSafeSubgraphServiceCaller(operator)); + _; + } + + function setUp() public override { + super.setUp(); + + _recurringCollectorHelper = new RecurringCollectorHelper(recurringCollector); + } + + /* + * HELPERS + */ + + function _subgraphServiceSafePrank(address _addr) internal returns (address) { + address originalPrankAddress = msg.sender; + vm.assume(_isSafeSubgraphServiceCaller(_addr)); + resetPrank(_addr); + + return originalPrankAddress; + } + + function _stopOrResetPrank(address _originalSender) internal { + if (_originalSender == 0x1804c8AB1F12E6bbf3894d4083f33e07309d1f38) { + vm.stopPrank(); + } else { + resetPrank(_originalSender); + } + } + + function _cancelAgreement( + Context storage _ctx, + bytes16 _agreementId, + address _indexer, + address _payer, + IRecurringCollector.CancelAgreementBy _by + ) internal { + bool byIndexer = _by == IRecurringCollector.CancelAgreementBy.ServiceProvider; + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingAgreementCanceled(_indexer, _payer, _agreementId, byIndexer ? _indexer : _payer); + + if (byIndexer) { + _subgraphServiceSafePrank(_indexer); + subgraphService.cancelIndexingAgreement(_indexer, _agreementId); + } else { + _subgraphServiceSafePrank(_ctx.payer.signer); + subgraphService.cancelIndexingAgreementByPayer(_agreementId); + } + } + + function _withIndexer(Context storage _ctx) internal returns (IndexerState memory) { + require(_ctx.ctxInternal.indexers.length > 0, "No indexer seeds available"); + + IndexerSeed memory indexerSeed = _ctx.ctxInternal.indexers[_ctx.ctxInternal.indexers.length - 1]; + _ctx.ctxInternal.indexers.pop(); + + indexerSeed.label = string.concat("_withIndexer-", Strings.toString(_ctx.ctxInternal.indexers.length)); + + return _setupIndexer(_ctx, indexerSeed); + } + + function _setupIndexer(Context storage _ctx, IndexerSeed memory _seed) internal returns (IndexerState memory) { + vm.assume(_getIndexer(_ctx, _seed.addr).addr == address(0)); + + (uint256 allocationKey, address allocationId) = boundKeyAndAddr(_seed.unboundedAllocationPrivateKey); + vm.assume(_ctx.allocations[allocationId] == address(0)); + _ctx.allocations[allocationId] = _seed.addr; + + uint256 tokens = bound(_seed.unboundedProvisionTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + + IndexerState memory indexer = IndexerState({ + addr: _seed.addr, + allocationId: allocationId, + subgraphDeploymentId: _seed.subgraphDeploymentId, + tokens: tokens + }); + vm.label(indexer.addr, string.concat("_setupIndexer-", _seed.label)); + + // Mint tokens to the indexer + mint(_seed.addr, tokens); + + // Create the indexer + address originalPrank = _subgraphServiceSafePrank(indexer.addr); + _createProvision(indexer.addr, indexer.tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + _register(indexer.addr, abi.encode("url", "geoHash", address(0))); + bytes memory data = _createSubgraphAllocationData( + indexer.addr, + indexer.subgraphDeploymentId, + allocationKey, + indexer.tokens + ); + _startService(indexer.addr, data); + + _ctx.indexers.push(indexer); + + _stopOrResetPrank(originalPrank); + + return indexer; + } + + function _withAcceptedIndexingAgreement( + Context storage _ctx, + IndexerState memory _indexerState + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes16 agreementId) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _ctx.ctxInternal.seed.rca; + + IndexingAgreement.AcceptIndexingAgreementMetadata memory metadata = _newAcceptIndexingAgreementMetadataV1( + _indexerState.subgraphDeploymentId + ); + rca.serviceProvider = _indexerState.addr; + rca.dataService = address(subgraphService); + rca.metadata = abi.encode(metadata); + + rca = _recurringCollectorHelper.sensibleRCA(rca); + + ( + IRecurringCollector.RecurringCollectionAgreement memory signedRca, + bytes memory signature + ) = _recurringCollectorHelper.generateSignedRCA(rca, _ctx.payer.signerPrivateKey); + _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, _ctx.payer.signerPrivateKey); + + // Generate deterministic agreement ID for event expectation + agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingAgreementAccepted( + rca.serviceProvider, + rca.payer, + agreementId, + _indexerState.allocationId, + metadata.subgraphDeploymentId, + metadata.version, + metadata.terms + ); + _subgraphServiceSafePrank(_indexerState.addr); + bytes16 actualAgreementId = subgraphService.acceptIndexingAgreement( + _indexerState.allocationId, + signedRca, + signature + ); + + // Verify the agreement ID matches expectation + assertEq(actualAgreementId, agreementId); + return (signedRca, agreementId); + } + + function _newCtx(Seed memory _seed) internal returns (Context storage) { + require(_context.ctxInternal.initialized == false, "Context already initialized"); + Context storage ctx = _context; + + // Initialize + ctx.ctxInternal.initialized = true; + + // Setup seeds + ctx.ctxInternal.seed = _seed; + ctx.ctxInternal.indexers.push(_seed.indexer0); + ctx.ctxInternal.indexers.push(_seed.indexer1); + + // Setup payer + ctx.payer.signerPrivateKey = boundKey(ctx.ctxInternal.seed.payer.unboundedSignerPrivateKey); + ctx.payer.signer = vm.addr(ctx.payer.signerPrivateKey); + + return ctx; + } + + function _generateAcceptableSignedRCA( + Context storage _ctx, + address _indexerAddress + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRecurringCollectionAgreement( + _ctx, + _indexerAddress + ); + _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, _ctx.payer.signerPrivateKey); + + return _recurringCollectorHelper.generateSignedRCA(rca, _ctx.payer.signerPrivateKey); + } + + function _generateAcceptableRecurringCollectionAgreement( + Context storage _ctx, + address _indexerAddress + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + IndexerState memory indexer = _requireIndexer(_ctx, _indexerAddress); + IndexingAgreement.AcceptIndexingAgreementMetadata memory metadata = _newAcceptIndexingAgreementMetadataV1( + indexer.subgraphDeploymentId + ); + IRecurringCollector.RecurringCollectionAgreement memory rca = _ctx.ctxInternal.seed.rca; + rca.serviceProvider = indexer.addr; + rca.dataService = address(subgraphService); + rca.metadata = abi.encode(metadata); + + return _recurringCollectorHelper.sensibleRCA(rca); + } + + function _generateAcceptableSignedRCAU( + Context storage _ctx, + IRecurringCollector.RecurringCollectionAgreement memory _rca + ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { + IRecurringCollector.RecurringCollectionAgreementUpdate + memory rcau = _generateAcceptableRecurringCollectionAgreementUpdate(_ctx, _rca); + // Set correct nonce for first update (should be 1) + rcau.nonce = 1; + return _recurringCollectorHelper.generateSignedRCAU(rcau, _ctx.payer.signerPrivateKey); + } + + function _generateAcceptableRecurringCollectionAgreementUpdate( + Context storage _ctx, + IRecurringCollector.RecurringCollectionAgreement memory _rca + ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _ctx.ctxInternal.seed.rcau; + // Generate deterministic agreement ID for the update + rcau.agreementId = recurringCollector.generateAgreementId( + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.deadline, + _rca.nonce + ); + rcau.metadata = _encodeUpdateIndexingAgreementMetadataV1( + _newUpdateIndexingAgreementMetadataV1( + bound(_ctx.ctxInternal.seed.termsV1.tokensPerSecond, 0, _rca.maxOngoingTokensPerSecond), + _ctx.ctxInternal.seed.termsV1.tokensPerEntityPerSecond + ) + ); + return _recurringCollectorHelper.sensibleRCAU(rcau); + } + + function _requireIndexer(Context storage _ctx, address _indexer) internal view returns (IndexerState memory) { + IndexerState memory indexerState = _getIndexer(_ctx, _indexer); + require(indexerState.addr != address(0), "Indexer not found in context"); + + return indexerState; + } + + function _getIndexer(Context storage _ctx, address _indexer) internal view returns (IndexerState memory zero) { + for (uint256 i = 0; i < _ctx.indexers.length; i++) { + if (_ctx.indexers[i].addr == _indexer) { + return _ctx.indexers[i]; + } + } + + return zero; + } + + function _isSafeSubgraphServiceCaller(address _candidate) internal view returns (bool) { + return + _candidate != address(0) && + _candidate != address(_transparentUpgradeableProxyAdmin()) && + _candidate != address(proxyAdmin); + } + + function _transparentUpgradeableProxyAdmin() internal view returns (address) { + return + address( + uint160(uint256(vm.load(address(subgraphService), TRANSPARENT_UPGRADEABLE_PROXY_ADMIN_ADDRESS_SLOT))) + ); + } + + function _newAcceptIndexingAgreementMetadataV1( + bytes32 _subgraphDeploymentId + ) internal pure returns (IndexingAgreement.AcceptIndexingAgreementMetadata memory) { + return + _newAcceptIndexingAgreementMetadataV1Terms( + _subgraphDeploymentId, + abi.encode( + IndexingAgreement.IndexingAgreementTermsV1({ tokensPerSecond: 0, tokensPerEntityPerSecond: 0 }) + ) + ); + } + + function _newAcceptIndexingAgreementMetadataV1Terms( + bytes32 _subgraphDeploymentId, + bytes memory _terms + ) internal pure returns (IndexingAgreement.AcceptIndexingAgreementMetadata memory) { + return + IndexingAgreement.AcceptIndexingAgreementMetadata({ + subgraphDeploymentId: _subgraphDeploymentId, + version: IIndexingAgreement.IndexingAgreementVersion.V1, + terms: _terms + }); + } + + function _newUpdateIndexingAgreementMetadataV1( + uint256 _tokensPerSecond, + uint256 _tokensPerEntityPerSecond + ) internal pure returns (IndexingAgreement.UpdateIndexingAgreementMetadata memory) { + return + IndexingAgreement.UpdateIndexingAgreementMetadata({ + version: IIndexingAgreement.IndexingAgreementVersion.V1, + terms: abi.encode( + IndexingAgreement.IndexingAgreementTermsV1({ + tokensPerSecond: _tokensPerSecond, + tokensPerEntityPerSecond: _tokensPerEntityPerSecond + }) + ) + }); + } + + function _encodeCollectDataV1( + bytes16 _agreementId, + uint256 _entities, + bytes32 _poi, + uint256 _poiBlock, + bytes memory _metadata + ) internal pure returns (bytes memory) { + return _encodeCollectData(_agreementId, _encodeV1Data(_entities, _poi, _poiBlock, _metadata)); + } + + function _encodeCollectData(bytes16 _agreementId, bytes memory _nestedData) internal pure returns (bytes memory) { + return abi.encode(_agreementId, _nestedData); + } + + function _encodeV1Data( + uint256 _entities, + bytes32 _poi, + uint256 _poiBlock, + bytes memory _metadata + ) internal pure returns (bytes memory) { + return + abi.encode( + IndexingAgreement.CollectIndexingFeeDataV1({ + entities: _entities, + poi: _poi, + poiBlockNumber: _poiBlock, + metadata: _metadata, + maxSlippage: type(uint256).max + }) + ); + } + + function _encodeAcceptIndexingAgreementMetadataV1( + bytes32 _subgraphDeploymentId, + IndexingAgreement.IndexingAgreementTermsV1 memory _terms + ) internal pure returns (bytes memory) { + return + abi.encode( + IndexingAgreement.AcceptIndexingAgreementMetadata({ + subgraphDeploymentId: _subgraphDeploymentId, + version: IIndexingAgreement.IndexingAgreementVersion.V1, + terms: abi.encode(_terms) + }) + ); + } + + function _encodeUpdateIndexingAgreementMetadataV1( + IndexingAgreement.UpdateIndexingAgreementMetadata memory _t + ) internal pure returns (bytes memory) { + return abi.encode(_t); + } + + function _assertEqualAgreement( + IRecurringCollector.RecurringCollectionAgreement memory _expected, + IIndexingAgreement.AgreementWrapper memory _actual + ) internal pure { + assertEq(_expected.dataService, _actual.collectorAgreement.dataService); + assertEq(_expected.payer, _actual.collectorAgreement.payer); + assertEq(_expected.serviceProvider, _actual.collectorAgreement.serviceProvider); + assertEq(_expected.endsAt, _actual.collectorAgreement.endsAt); + assertEq(_expected.maxInitialTokens, _actual.collectorAgreement.maxInitialTokens); + assertEq(_expected.maxOngoingTokensPerSecond, _actual.collectorAgreement.maxOngoingTokensPerSecond); + assertEq(_expected.minSecondsPerCollection, _actual.collectorAgreement.minSecondsPerCollection); + assertEq(_expected.maxSecondsPerCollection, _actual.collectorAgreement.maxSecondsPerCollection); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol new file mode 100644 index 000000000..b77d91644 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; +import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; + +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * TESTS + */ + + /* solhint-disable graph/func-name-mixedcase */ + function test_SubgraphService_UpdateIndexingAgreementIndexingAgreement_Revert_WhenPaused( + address operator, + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata authData + ) public withSafeIndexerOrOperator(operator) { + resetPrank(users.pauseGuardian); + subgraphService.pause(); + + resetPrank(operator); + vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + subgraphService.updateIndexingAgreement(operator, rcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAuthorized( + address indexer, + address notAuthorized, + IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, + bytes calldata authData + ) public withSafeIndexerOrOperator(notAuthorized) { + vm.assume(notAuthorized != indexer); + resetPrank(notAuthorized); + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerNotAuthorized.selector, + indexer, + notAuthorized + ); + vm.expectRevert(expectedErr); + subgraphService.updateIndexingAgreement(indexer, rcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenInvalidProvision( + address indexer, + uint256 unboundedTokens, + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + bytes memory authData + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + + bytes memory expectedErr = abi.encodeWithSelector( + ProvisionManager.ProvisionManagerInvalidValue.selector, + "tokens", + tokens, + MINIMUM_PROVISION_TOKENS, + MAXIMUM_PROVISION_TOKENS + ); + vm.expectRevert(expectedErr); + subgraphService.updateIndexingAgreement(indexer, rcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenIndexerNotRegistered( + address indexer, + uint256 unboundedTokens, + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + bytes memory authData + ) public withSafeIndexerOrOperator(indexer) { + uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); + mint(indexer, tokens); + resetPrank(indexer); + _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + + bytes memory expectedErr = abi.encodeWithSelector( + ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, + indexer + ); + vm.expectRevert(expectedErr); + subgraphService.updateIndexingAgreement(indexer, rcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAccepted(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, + bytes memory authData + ) = _generateAcceptableSignedRCAU(ctx, _generateAcceptableRecurringCollectionAgreement(ctx, indexerState.addr)); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotActive.selector, + acceptableRcau.agreementId + ); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.updateIndexingAgreement(indexerState.addr, acceptableRcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAuthorizedForAgreement( + Seed memory seed + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerStateA = _withIndexer(ctx); + IndexerState memory indexerStateB = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerStateA + ); + ( + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, + bytes memory authData + ) = _generateAcceptableSignedRCAU(ctx, acceptedRca); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementNotAuthorized.selector, + acceptableRcau.agreementId, + indexerStateB.addr + ); + vm.expectRevert(expectedErr); + resetPrank(indexerStateB.addr); + subgraphService.updateIndexingAgreement(indexerStateB.addr, acceptableRcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenInvalidMetadata(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + IRecurringCollector.RecurringCollectionAgreementUpdate + memory acceptableUpdate = _generateAcceptableRecurringCollectionAgreementUpdate(ctx, acceptedRca); + acceptableUpdate.metadata = bytes("invalid"); + // Set correct nonce for first update (should be 1) + acceptableUpdate.nonce = 1; + ( + IRecurringCollector.RecurringCollectionAgreementUpdate memory unacceptableRcau, + bytes memory authData + ) = _recurringCollectorHelper.generateSignedRCAU(acceptableUpdate, ctx.payer.signerPrivateKey); + + bytes memory expectedErr = abi.encodeWithSelector( + IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, + "decodeRCAUMetadata", + unacceptableRcau.metadata + ); + vm.expectRevert(expectedErr); + resetPrank(indexerState.addr); + subgraphService.updateIndexingAgreement(indexerState.addr, unacceptableRcau, authData); + } + + function test_SubgraphService_UpdateIndexingAgreement_OK(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + ( + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, + bytes memory authData + ) = _generateAcceptableSignedRCAU(ctx, acceptedRca); + + IndexingAgreement.UpdateIndexingAgreementMetadata memory metadata = abi.decode( + acceptableRcau.metadata, + (IndexingAgreement.UpdateIndexingAgreementMetadata) + ); + + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingAgreementUpdated( + acceptedRca.serviceProvider, + acceptedRca.payer, + acceptableRcau.agreementId, + indexerState.allocationId, + metadata.version, + metadata.terms + ); + + resetPrank(indexerState.addr); + subgraphService.updateIndexingAgreement(indexerState.addr, acceptableRcau, authData); + } + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/toolshed/src/deployments/horizon/actions.ts b/packages/toolshed/src/deployments/horizon/actions.ts index 8fc9bd4df..144342a82 100644 --- a/packages/toolshed/src/deployments/horizon/actions.ts +++ b/packages/toolshed/src/deployments/horizon/actions.ts @@ -62,15 +62,6 @@ export function loadActions(contracts: GraphHorizonContracts) { */ provision: (signer: HardhatEthersSigner, args: Parameters) => provision(contracts, signer, args), - /** - * [Legacy] Collects query fees from the Horizon staking contract - * Note that it will approve HorizonStaking to spend the tokens - * @param signer - The signer that will execute the collect transaction - * @param args Parameters: - * - `[tokens, allocationID]` - The collect parameters - */ - collect: (signer: HardhatEthersSigner, args: Parameters) => - collect(contracts, signer, args), /** * Delegates tokens in the Horizon staking contract * Note that it will approve HorizonStaking to spend the tokens @@ -157,18 +148,6 @@ async function provision( await HorizonStaking.connect(signer).provision(serviceProvider, verifier, tokens, maxVerifierCut, thawingPeriod) } -async function collect( - contracts: GraphHorizonContracts, - signer: HardhatEthersSigner, - args: Parameters, -) { - const { GraphToken, HorizonStaking } = contracts - const [tokens, allocationID] = args - - await GraphToken.connect(signer).approve(HorizonStaking.target, tokens) - await HorizonStaking.connect(signer).collect(tokens, allocationID) -} - async function delegate( contracts: GraphHorizonContracts, signer: HardhatEthersSigner,