Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
c49ebe8
feat: upgrade to ethers v6, bump common-ts and use toolshed for contr…
tmigone May 16, 2025
db6b450
chore: add is horizon ready flag to network monitor (#1118)
Maikol May 21, 2025
9fbfacc
feat: support split address books (#1119)
tmigone May 22, 2025
70490e0
feat: update registration to support horizon flow, add provisioning f…
tmigone May 22, 2025
da8f53d
Horizon fixes (#1121)
tmigone May 23, 2025
0e6c962
Horizon: allocation management (#1123)
tmigone Jun 2, 2025
f508a92
Merge remote-tracking branch 'origin/main' into horizon
tmigone Jun 2, 2025
f9cc1ff
chore: add isLegacy column to actions table
tmigone Jun 2, 2025
d2bfd5b
chore: cli support isLegacy for actions
tmigone Jun 3, 2025
2b44aaa
wip: full allocation management
tmigone Jun 6, 2025
606a8e7
fix: allocation cli bugs
tmigone Jun 9, 2025
325d6fa
feat: add column wrapping to table format in cli
tmigone Jun 9, 2025
3d4562b
feat: add wrap to allocation commands
tmigone Jun 9, 2025
09898ce
feat: allocate and unallocate in action queue
tmigone Jun 9, 2025
65f6fd2
fix: action update serialization bug for bigints
tmigone Jun 10, 2025
dc590ce
feat: unallocate action queue management
tmigone Jun 10, 2025
557f002
chore: clean up todos
tmigone Jun 10, 2025
0e448be
wip: graph tally support
tmigone Jun 10, 2025
bff35ba
chore: update toolshed to 0.6.4
tmigone Jun 18, 2025
7697a93
fix: rewardsmanager events
tmigone Jun 18, 2025
112e1e8
fix: wip refactor of batch execution to account for non atomic batches
tmigone Jun 18, 2025
3579e8e
chore: better tx filtering on executeTransaction
tmigone Jun 19, 2025
660ce64
feat: fix graphtally database model and add migrations
tmigone Jun 19, 2025
55d2284
fix: database migrations for horizon
tmigone Jun 20, 2025
19a4df1
feat: add graph-tally collector
tmigone Jun 24, 2025
30e3682
fix: tests in the horizon branch (#1135)
pcarranzav Jul 28, 2025
5a51888
core: replace horizon and subgraph dependency with address book
tmigone Aug 5, 2025
ce21845
fix: ensure allocation keys derive from root wallet (#1136)
Maikol Aug 15, 2025
b97fafb
Merge branch 'main' into horizon
tmigone Aug 19, 2025
dd9cd06
chore: lint
tmigone Aug 19, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -120,3 +120,7 @@ yalc.lock
.idea/
.envrc
.vscode

# local-network override mode files
tap-contracts.json
config/config.yaml
239 changes: 136 additions & 103 deletions README.md

Large diffs are not rendered by default.

8 changes: 2 additions & 6 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,16 @@
"lerna": "6.1.0"
},
"resolutions": {
"ethers": "5.7.0",
"ethers": "6.13.7",
"sequelize": "6.33.0",
"@ethersproject/bignumber": "5.7.0",
"@ethersproject/providers": "5.7.0",
"@urql/core": "3.1.0",
"@urql/exchange-execute": "2.1.0",
"punycode": "2.3.1",
"uri-js": "4.2.2"
},
"overrides": {
"ethers": "5.7.0",
"ethers": "6.13.7",
"sequelize": "6.33.0",
"@ethersproject/bignumber": "5.7.0",
"@ethersproject/providers": "5.7.0",
"@urql/core": "3.1.0",
"@urql/exchange-execute": "2.1.0",
"graphql": "16.8.0"
Expand Down
1 change: 1 addition & 0 deletions packages/indexer-agent/jest.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ module.exports = {
preset: 'ts-jest',
testEnvironment: 'node',
testPathIgnorePatterns: ['/node_modules/', '/dist/', '.yalc'],
transformIgnorePatterns: ['!node_modules/'],
globals: {
__DATABASE__: {
host:
Expand Down
12 changes: 5 additions & 7 deletions packages/indexer-agent/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,13 @@
"graph-indexer-agent": "bin/graph-indexer-agent"
},
"dependencies": {
"@graphprotocol/common-ts": "2.0.11",
"@graphprotocol/indexer-common": "^0.24.3",
"@graphprotocol/common-ts": "3.0.1",
"@graphprotocol/indexer-common": "0.24.3",
"@thi.ng/heaps": "^1.3.1",
"axios": "0.26.1",
"bs58": "5.0.0",
"delay": "^5.0.0",
"ethers": "5.7.0",
"ethers": "6.13.7",
"evt": "1.9.12",
"global": "4.4.0",
"graphql": "16.8.0",
Expand Down Expand Up @@ -80,10 +80,8 @@
"typescript": "5.2.2"
},
"resolutions": {
"ethers": "5.7.0",
"sequelize": "6.33.0",
"@ethersproject/bignumber": "5.7.0",
"@ethersproject/providers": "5.7.0"
"ethers": "6.13.7",
"sequelize": "6.33.0"
},
"gitHead": "972ab96774007b2aee15b1da169d2ff4be9f9d27"
}
3 changes: 1 addition & 2 deletions packages/indexer-agent/src/__tests__/indexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import {
MultiNetworks,
loadTestYamlConfig,
} from '@graphprotocol/indexer-common'
import { BigNumber } from 'ethers'
import { Sequelize } from 'sequelize'

const TEST_DISPUTE_1: POIDisputeAttributes = {
Expand Down Expand Up @@ -69,7 +68,7 @@ const TEST_DISPUTE_2: POIDisputeAttributes = {

const POI_DISPUTES_CONVERTERS_FROM_GRAPHQL: Record<
keyof POIDisputeAttributes,
(x: never) => string | BigNumber | number | undefined
(x: never) => string | bigint | number | undefined
> = {
allocationID: x => x,
subgraphDeploymentID: x => x,
Expand Down
124 changes: 75 additions & 49 deletions packages/indexer-agent/src/agent.ts
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ import {
DeploymentManagementMode,
SubgraphStatus,
sequentialTimerMap,
HorizonTransitionValue,
} from '@graphprotocol/indexer-common'

import PQueue from 'p-queue'
Expand All @@ -47,7 +48,11 @@ import mapValues from 'lodash.mapvalues'
import zip from 'lodash.zip'
import { AgentConfigs, NetworkAndOperator } from './types'

type ActionReconciliationContext = [AllocationDecision[], number, number]
type ActionReconciliationContext = [
AllocationDecision[],
number,
HorizonTransitionValue,
]

const deploymentInList = (
list: SubgraphDeploymentID[],
Expand Down Expand Up @@ -233,6 +238,7 @@ export class Agent {
try {
await operator.ensureGlobalIndexingRule()
await this.ensureAllSubgraphsIndexing(network)
await network.provision()
await network.register()
} catch (err) {
this.logger.critical(
Expand Down Expand Up @@ -270,21 +276,22 @@ export class Agent {
},
)

const maxAllocationEpochs: Eventual<NetworkMapped<number>> =
sequentialTimerMap(
{ logger, milliseconds: requestIntervalLarge },
() =>
this.multiNetworks.map(({ network }) => {
logger.trace('Fetching max allocation epochs', {
protocolNetwork: network.specification.networkIdentifier,
})
return network.contracts.staking.maxAllocationEpochs()
}),
{
onError: error =>
logger.warn(`Failed to fetch max allocation epochs`, { error }),
},
)
const maxAllocationDuration: Eventual<
NetworkMapped<HorizonTransitionValue>
> = sequentialTimerMap(
{ logger, milliseconds: requestIntervalLarge },
() =>
this.multiNetworks.map(({ network }) => {
logger.trace('Fetching max allocation duration', {
protocolNetwork: network.specification.networkIdentifier,
})
return network.networkMonitor.maxAllocationDuration()
}),
{
onError: error =>
logger.warn(`Failed to fetch max allocation duration`, { error }),
},
)

const indexingRules: Eventual<NetworkMapped<IndexingRuleAttributes[]>> =
sequentialTimerMap(
Expand All @@ -304,9 +311,9 @@ export class Agent {
await network.networkMonitor.subgraphs(subgraphRuleIds)
if (subgraphsMatchingRules.length >= 1) {
const epochLength =
await network.contracts.epochManager.epochLength()
await network.contracts.EpochManager.epochLength()
const blockPeriod = 15
const bufferPeriod = epochLength.toNumber() * blockPeriod * 100 // 100 epochs
const bufferPeriod = Number(epochLength) * blockPeriod * 100 // 100 epochs
rules = convertSubgraphBasedRulesToDeploymentBased(
rules,
subgraphsMatchingRules,
Expand Down Expand Up @@ -486,7 +493,7 @@ export class Agent {
const matchingTransfer = eligibleTransferDeployments.find(
deployment =>
deployment.ipfsHash == decision.deployment.ipfsHash &&
deployment.startedTransferToL2At.toNumber() > oneWeekAgo,
Number(deployment.startedTransferToL2At) > oneWeekAgo,
)
if (matchingTransfer) {
logger.debug('Found a matching subgraph transfer', {
Expand Down Expand Up @@ -652,7 +659,7 @@ export class Agent {
join({
ticker: timer(requestIntervalLarge),
currentEpochNumber,
maxAllocationEpochs,
maxAllocationDuration,
activeDeployments,
targetDeployments,
activeAllocations,
Expand All @@ -662,7 +669,7 @@ export class Agent {
}).pipe(
async ({
currentEpochNumber,
maxAllocationEpochs,
maxAllocationDuration,
activeDeployments,
targetDeployments,
activeAllocations,
Expand All @@ -682,7 +689,8 @@ export class Agent {
currentEpochNumber: number,
) =>
currentEpochNumber -
network.specification.indexerOptions.poiDisputableEpochs,
(network.specification.indexerOptions
.poiDisputableEpochs as number),
)

// Find disputable allocations
Expand Down Expand Up @@ -744,7 +752,7 @@ export class Agent {
await this.reconcileActions(
networkDeploymentAllocationDecisions,
currentEpochNumber,
maxAllocationEpochs,
maxAllocationDuration,
)
} catch (err) {
logger.warn(`Exited early while reconciling actions`, {
Expand Down Expand Up @@ -821,22 +829,22 @@ export class Agent {
await network.networkProvider.getBlock(
pool.previousEpochStartBlockHash!,
)
pool.closedAtEpochStartBlockNumber = closedAtEpochStartBlock.number
pool.closedAtEpochStartBlockNumber = closedAtEpochStartBlock!.number
pool.referencePOI = await this.graphNode.proofOfIndexing(
pool.subgraphDeployment,
{
number: closedAtEpochStartBlock.number,
hash: closedAtEpochStartBlock.hash,
number: closedAtEpochStartBlock!.number,
hash: closedAtEpochStartBlock!.hash!,
},
pool.allocationIndexer,
)
pool.previousEpochStartBlockHash = previousEpochStartBlock.hash
pool.previousEpochStartBlockNumber = previousEpochStartBlock.number
pool.previousEpochStartBlockHash = previousEpochStartBlock!.hash!
pool.previousEpochStartBlockNumber = previousEpochStartBlock!.number
pool.referencePreviousPOI = await this.graphNode.proofOfIndexing(
pool.subgraphDeployment,
{
number: previousEpochStartBlock.number,
hash: previousEpochStartBlock.hash,
number: previousEpochStartBlock!.number,
hash: previousEpochStartBlock!.hash!,
},
pool.allocationIndexer,
)
Expand Down Expand Up @@ -1006,18 +1014,25 @@ export class Agent {
activeAllocations: Allocation[],
deploymentAllocationDecision: AllocationDecision,
epoch: number,
maxAllocationEpochs: number,
maxAllocationDuration: HorizonTransitionValue,
network: Network,
): Promise<Allocation[]> {
const desiredAllocationLifetime = deploymentAllocationDecision.ruleMatch
.rule?.allocationLifetime
? deploymentAllocationDecision.ruleMatch.rule.allocationLifetime
: Math.max(1, maxAllocationEpochs - 1)

// Identify expiring allocations
let expiredAllocations = activeAllocations.filter(
allocation =>
epoch >= allocation.createdAtEpoch + desiredAllocationLifetime,
async (allocation: Allocation) => {
let desiredAllocationLifetime: number = 0
if (allocation.isLegacy) {
desiredAllocationLifetime = deploymentAllocationDecision.ruleMatch
.rule?.allocationLifetime
? deploymentAllocationDecision.ruleMatch.rule.allocationLifetime
: Math.max(1, maxAllocationDuration.legacy - 1)
} else {
desiredAllocationLifetime = deploymentAllocationDecision.ruleMatch
.rule?.allocationLifetime
? deploymentAllocationDecision.ruleMatch.rule.allocationLifetime
: maxAllocationDuration.horizon
}
return epoch >= allocation.createdAtEpoch + desiredAllocationLifetime
},
)
// The allocations come from the network subgraph; due to short indexing
// latencies, this data may be slightly outdated. Cross-check with the
Expand All @@ -1027,9 +1042,17 @@ export class Agent {
expiredAllocations,
async (allocation: Allocation) => {
try {
const onChainAllocation =
await network.contracts.staking.getAllocation(allocation.id)
return onChainAllocation.closedAtEpoch.eq('0')
if (allocation.isLegacy) {
const onChainAllocation =
await network.contracts.LegacyStaking.getAllocation(allocation.id)
return onChainAllocation.closedAtEpoch == 0n
} else {
const onChainAllocation =
await network.contracts.SubgraphService.getAllocation(
allocation.id,
)
return onChainAllocation.closedAt == 0n
}
} catch (err) {
this.logger.warn(
`Failed to cross-check allocation state with contracts; assuming it needs to be closed`,
Expand All @@ -1050,7 +1073,7 @@ export class Agent {
deploymentAllocationDecision: AllocationDecision,
activeAllocations: Allocation[],
epoch: number,
maxAllocationEpochs: number,
maxAllocationDuration: HorizonTransitionValue,
network: Network,
operator: Operator,
): Promise<void> {
Expand All @@ -1060,6 +1083,8 @@ export class Agent {
epoch,
})

const isHorizon = await network.isHorizon.value()

// TODO: Can we replace `filter` for `find` here? Is there such a case when we
// would have multiple allocations for the same subgraph?
const activeDeploymentAllocations = activeAllocations.filter(
Expand Down Expand Up @@ -1110,6 +1135,7 @@ export class Agent {
logger,
deploymentAllocationDecision,
mostRecentlyClosedAllocation,
isHorizon,
)
}
} else if (activeDeploymentAllocations.length > 0) {
Expand All @@ -1126,7 +1152,7 @@ export class Agent {
activeDeploymentAllocations,
deploymentAllocationDecision,
epoch,
maxAllocationEpochs,
maxAllocationDuration,
network,
)
if (expiringAllocations.length > 0) {
Expand All @@ -1145,7 +1171,7 @@ export class Agent {
async reconcileActions(
networkDeploymentAllocationDecisions: NetworkMapped<AllocationDecision[]>,
epoch: NetworkMapped<number>,
maxAllocationEpochs: NetworkMapped<number>,
maxAllocationDuration: NetworkMapped<HorizonTransitionValue>,
): Promise<void> {
// --------------------------------------------------------------------------------
// Filter out networks set to `manual` allocation management mode, and ensure the
Expand Down Expand Up @@ -1198,14 +1224,14 @@ export class Agent {
this.multiNetworks.zip3(
validatedAllocationDecisions,
epoch,
maxAllocationEpochs,
maxAllocationDuration,
),
async (
{ network, operator }: NetworkAndOperator,
[
allocationDecisions,
epoch,
maxAllocationEpochs,
maxAllocationDuration,
]: ActionReconciliationContext,
) => {
// Do nothing if there are already approved actions in the queue awaiting execution
Expand All @@ -1230,7 +1256,7 @@ export class Agent {
this.logger.trace(`Reconcile allocation actions`, {
protocolNetwork: network.specification.networkIdentifier,
epoch,
maxAllocationEpochs,
maxAllocationDuration,
targetDeployments: allocationDecisions
.filter(decision => decision.toAllocate)
.map(decision => decision.deployment.ipfsHash),
Expand All @@ -1246,7 +1272,7 @@ export class Agent {
decision,
activeAllocations,
epoch,
maxAllocationEpochs,
maxAllocationDuration,
network,
operator,
),
Expand Down
Loading
Loading