|
| 1 | +package block |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "fmt" |
| 6 | + "math/big" |
| 7 | + "sort" |
| 8 | + "sync" |
| 9 | + "time" |
| 10 | + |
| 11 | + "github.com/pkg/errors" |
| 12 | + |
| 13 | + "github.com/smartcontractkit/chainlink-common/pkg/logger" |
| 14 | + "github.com/smartcontractkit/chainlink-evm/pkg/block/utils" |
| 15 | + evmclient "github.com/smartcontractkit/chainlink-evm/pkg/client" |
| 16 | + evmtypes "github.com/smartcontractkit/chainlink-evm/pkg/types" |
| 17 | +) |
| 18 | + |
| 19 | +// ArbitrumBlockTranslator uses Arbitrum's special L1BlockNumber to optimise log lookups |
| 20 | +// Performance matters here hence aggressive use of the cache |
| 21 | +// We want to minimise fetches because calling eth_getBlockByNumber is |
| 22 | +// relatively expensive |
| 23 | +type ArbitrumBlockTranslator struct { |
| 24 | + ethClient evmclient.Client |
| 25 | + lggr logger.Logger |
| 26 | + // l2->l1 cache |
| 27 | + cache map[int64]int64 |
| 28 | + cacheMu sync.RWMutex |
| 29 | + l2Locks utils.KeyedMutex |
| 30 | +} |
| 31 | + |
| 32 | +// NewArbitrumBlockTranslator returns a concrete ArbitrumBlockTranslator |
| 33 | +func NewArbitrumBlockTranslator(ethClient evmclient.Client, lggr logger.Logger) *ArbitrumBlockTranslator { |
| 34 | + return &ArbitrumBlockTranslator{ |
| 35 | + ethClient, |
| 36 | + logger.Named(lggr, "ArbitrumBlockTranslator"), |
| 37 | + make(map[int64]int64), |
| 38 | + sync.RWMutex{}, |
| 39 | + utils.KeyedMutex{}, |
| 40 | + } |
| 41 | +} |
| 42 | + |
| 43 | +// NumberToQueryRange implements BlockTranslator interface |
| 44 | +func (a *ArbitrumBlockTranslator) NumberToQueryRange(ctx context.Context, changedInL1Block uint64) (fromBlock *big.Int, toBlock *big.Int) { |
| 45 | + var err error |
| 46 | + fromBlock, toBlock, err = a.BinarySearch(ctx, int64(changedInL1Block)) //nolint:gosec // disable G115 |
| 47 | + if err != nil { |
| 48 | + a.lggr.Warnw("Failed to binary search L2->L1, falling back to slow scan over entire chain", "err", err) |
| 49 | + return big.NewInt(0), nil |
| 50 | + } |
| 51 | + |
| 52 | + return |
| 53 | +} |
| 54 | + |
| 55 | +// BinarySearch uses both cache and RPC calls to find the smallest possible range of L2 block numbers that encompasses the given L1 block number |
| 56 | +// |
| 57 | +// Imagine as a virtual array of L1 block numbers indexed by L2 block numbers |
| 58 | +// L1 values are likely duplicated so it looks something like |
| 59 | +// [42, 42, 42, 42, 42, 155, 155, 155, 430, 430, 430, 430, 430, ...] |
| 60 | +// Theoretical max difference between L1 values is typically about 5, "worst case" is 6545 but can be arbitrarily high if sequencer is broken |
| 61 | +// The returned range of L2s from leftmost thru rightmost represent all possible L2s that correspond to the L1 value we are looking for |
| 62 | +// nil can be returned as a rightmost value if the range has no upper bound |
| 63 | +func (a *ArbitrumBlockTranslator) BinarySearch(ctx context.Context, targetL1 int64) (l2lowerBound *big.Int, l2upperBound *big.Int, err error) { |
| 64 | + mark := time.Now() |
| 65 | + var n int |
| 66 | + defer func() { |
| 67 | + duration := time.Since(mark) |
| 68 | + a.lggr.Debugw(fmt.Sprintf("BinarySearch completed in %s with %d total lookups", duration, n), "finishedIn", duration, "err", err, "nLookups", n) |
| 69 | + }() |
| 70 | + var h *evmtypes.Head |
| 71 | + |
| 72 | + // l2lower..l2upper is the inclusive range of L2 block numbers in which |
| 73 | + // transactions that called block.number will return the given L1 block |
| 74 | + // number |
| 75 | + var l2lower int64 |
| 76 | + var l2upper int64 |
| 77 | + |
| 78 | + var skipUpperBound bool |
| 79 | + |
| 80 | + { |
| 81 | + var maybeL2Upper *int64 |
| 82 | + l2lower, maybeL2Upper = a.reverseLookup(targetL1) |
| 83 | + if maybeL2Upper != nil { |
| 84 | + l2upper = *maybeL2Upper |
| 85 | + } else { |
| 86 | + // Initial query to get highest L1 and L2 numbers |
| 87 | + h, err = a.ethClient.HeadByNumber(ctx, nil) |
| 88 | + n++ |
| 89 | + if err != nil { |
| 90 | + return nil, nil, err |
| 91 | + } |
| 92 | + if h == nil { |
| 93 | + return nil, nil, errors.New("got nil head") |
| 94 | + } |
| 95 | + if !h.L1BlockNumber.Valid { |
| 96 | + return nil, nil, errors.New("head was missing L1 block number") |
| 97 | + } |
| 98 | + currentL1 := h.L1BlockNumber.Int64 |
| 99 | + currentL2 := h.Number |
| 100 | + |
| 101 | + a.cachePut(currentL2, currentL1) |
| 102 | + |
| 103 | + // NOTE: This case shouldn't ever happen but we ought to handle it in the least broken way possible |
| 104 | + if targetL1 > currentL1 { |
| 105 | + // real upper must always be nil, we can skip the upper limit part of the binary search |
| 106 | + a.lggr.Debugf("BinarySearch target of %d is above current L1 block number of %d, using nil for upper bound", targetL1, currentL1) |
| 107 | + return big.NewInt(currentL2), nil, nil |
| 108 | + } else if targetL1 == currentL1 { |
| 109 | + // NOTE: If the latest seen L2 block corresponds to the target L1 |
| 110 | + // block, we have to leave the top end of the range open because future |
| 111 | + // L2 blocks can be produced that would also match |
| 112 | + skipUpperBound = true |
| 113 | + } |
| 114 | + l2upper = currentL2 |
| 115 | + } |
| 116 | + } |
| 117 | + |
| 118 | + a.lggr.Debugf("TRACE: BinarySearch starting search for L2 range wrapping L1 block number %d between bounds [%d, %d]", targetL1, l2lower, l2upper) |
| 119 | + |
| 120 | + var exactMatch bool |
| 121 | + |
| 122 | + // LEFT EDGE |
| 123 | + // First, use binary search to find the smallest L2 block number for which L1 >= changedInBlock |
| 124 | + // This L2 block number represents the lower bound on a range of L2s corresponding to this L1 |
| 125 | + { |
| 126 | + l2lower, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) { |
| 127 | + l1, miss, err2 := a.arbL2ToL1(ctx, l2) |
| 128 | + if miss { |
| 129 | + n++ |
| 130 | + } |
| 131 | + if err2 != nil { |
| 132 | + return false, err2 |
| 133 | + } |
| 134 | + if targetL1 == l1 { |
| 135 | + exactMatch = true |
| 136 | + } |
| 137 | + return l1 >= targetL1, nil |
| 138 | + }) |
| 139 | + if err != nil { |
| 140 | + return nil, nil, err |
| 141 | + } |
| 142 | + } |
| 143 | + |
| 144 | + // RIGHT EDGE |
| 145 | + // Second, use binary search again to find the smallest L2 block number for which L1 > changedInBlock |
| 146 | + // Now we can subtract one to get the largest L2 that corresponds to this L1 |
| 147 | + // This can be skipped if we know we are already at the top of the range, and the upper limit will be returned as nil |
| 148 | + if !skipUpperBound { |
| 149 | + var r int64 |
| 150 | + r, err = search(l2lower, l2upper+1, func(l2 int64) (bool, error) { |
| 151 | + l1, miss, err2 := a.arbL2ToL1(ctx, l2) |
| 152 | + if miss { |
| 153 | + n++ |
| 154 | + } |
| 155 | + if err2 != nil { |
| 156 | + return false, err2 |
| 157 | + } |
| 158 | + if targetL1 == l1 { |
| 159 | + exactMatch = true |
| 160 | + } |
| 161 | + return l1 > targetL1, nil |
| 162 | + }) |
| 163 | + if err != nil { |
| 164 | + return nil, nil, err |
| 165 | + } |
| 166 | + l2upper = r - 1 |
| 167 | + l2upperBound = big.NewInt(l2upper) |
| 168 | + } |
| 169 | + |
| 170 | + // NOTE: We expect either left or right search to make an exact match, if they don't something has gone badly wrong |
| 171 | + if !exactMatch { |
| 172 | + return nil, nil, errors.Errorf("target L1 block number %d is not represented by any L2 block", targetL1) |
| 173 | + } |
| 174 | + return big.NewInt(l2lower), l2upperBound, nil |
| 175 | +} |
| 176 | + |
| 177 | +// reverseLookup takes an l1 and returns lower and upper bounds for an L2 based on cache data |
| 178 | +func (a *ArbitrumBlockTranslator) reverseLookup(targetL1 int64) (from int64, to *int64) { |
| 179 | + type val struct { |
| 180 | + l1 int64 |
| 181 | + l2 int64 |
| 182 | + } |
| 183 | + vals := make([]val, 0) |
| 184 | + |
| 185 | + a.cacheMu.RLock() |
| 186 | + defer a.cacheMu.RUnlock() |
| 187 | + |
| 188 | + for l2, l1 := range a.cache { |
| 189 | + vals = append(vals, val{l1, l2}) |
| 190 | + } |
| 191 | + |
| 192 | + sort.Slice(vals, func(i, j int) bool { return vals[i].l1 < vals[j].l1 }) |
| 193 | + |
| 194 | + for _, val := range vals { |
| 195 | + if val.l1 < targetL1 { |
| 196 | + from = val.l2 |
| 197 | + } else if val.l1 > targetL1 && to == nil { |
| 198 | + // workaround golang footgun; can't take a pointer to val |
| 199 | + l2 := val.l2 |
| 200 | + to = &l2 |
| 201 | + } |
| 202 | + } |
| 203 | + return |
| 204 | +} |
| 205 | + |
| 206 | +func (a *ArbitrumBlockTranslator) arbL2ToL1(ctx context.Context, l2 int64) (l1 int64, cacheMiss bool, err error) { |
| 207 | + // This locking block synchronises access specifically around one l2 number so we never fetch the same data concurrently |
| 208 | + // One thread will wait while the other fetches |
| 209 | + unlock := a.l2Locks.LockInt64(l2) |
| 210 | + defer unlock() |
| 211 | + |
| 212 | + var exists bool |
| 213 | + if l1, exists = a.cacheGet(l2); exists { |
| 214 | + return l1, false, err |
| 215 | + } |
| 216 | + |
| 217 | + h, err := a.ethClient.HeadByNumber(ctx, big.NewInt(l2)) |
| 218 | + if err != nil { |
| 219 | + return 0, true, err |
| 220 | + } |
| 221 | + if h == nil { |
| 222 | + return 0, true, errors.New("got nil head") |
| 223 | + } |
| 224 | + if !h.L1BlockNumber.Valid { |
| 225 | + return 0, true, errors.New("head was missing L1 block number") |
| 226 | + } |
| 227 | + l1 = h.L1BlockNumber.Int64 |
| 228 | + |
| 229 | + a.cachePut(l2, l1) |
| 230 | + |
| 231 | + return l1, true, nil |
| 232 | +} |
| 233 | + |
| 234 | +func (a *ArbitrumBlockTranslator) cacheGet(l2 int64) (l1 int64, exists bool) { |
| 235 | + a.cacheMu.RLock() |
| 236 | + defer a.cacheMu.RUnlock() |
| 237 | + l1, exists = a.cache[l2] |
| 238 | + return |
| 239 | +} |
| 240 | + |
| 241 | +func (a *ArbitrumBlockTranslator) cachePut(l2, l1 int64) { |
| 242 | + a.cacheMu.Lock() |
| 243 | + defer a.cacheMu.Unlock() |
| 244 | + a.cache[l2] = l1 |
| 245 | +} |
| 246 | + |
| 247 | +// stolen from golang standard library and modified for 64-bit ints, |
| 248 | +// customisable range and erroring function |
| 249 | +// see: https://golang.org/src/sort/search.go |
| 250 | +func search(i, j int64, f func(int64) (bool, error)) (int64, error) { |
| 251 | + // Define f(-1) == false and f(n) == true. |
| 252 | + // Invariant: f(i-1) == false, f(j) == true. |
| 253 | + for i < j { |
| 254 | + h := int64(uint64(i+j) >> 1) //nolint:gosec // disable G115 // avoid overflow when computing h |
| 255 | + // i ≤ h < j |
| 256 | + is, err := f(h) |
| 257 | + if err != nil { |
| 258 | + return 0, err |
| 259 | + } |
| 260 | + if !is { |
| 261 | + i = h + 1 // preserves f(i-1) == false |
| 262 | + } else { |
| 263 | + j = h // preserves f(j) == true |
| 264 | + } |
| 265 | + } |
| 266 | + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. |
| 267 | + return i, nil |
| 268 | +} |
0 commit comments