diff --git a/.tool-versions b/.tool-versions index 25e3e8a1..a50ce8eb 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,5 +1,5 @@ circleci 0.1.31425 -golang 1.23.6 +golang 1.24.5 golangci-lint 1.64.5 nodejs 18.17.0 postgres 14.2 diff --git a/docker-test-setup-api/.gitignore b/docker-test-setup-api/.gitignore new file mode 100644 index 00000000..3af0ccb6 --- /dev/null +++ b/docker-test-setup-api/.gitignore @@ -0,0 +1 @@ +/data diff --git a/docker-test-setup-api/00-build.sh b/docker-test-setup-api/00-build.sh new file mode 100755 index 00000000..838bf839 --- /dev/null +++ b/docker-test-setup-api/00-build.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +source ./common.sh + +$DC --profile dev build diff --git a/docker-test-setup-api/01-init-db.sh b/docker-test-setup-api/01-init-db.sh new file mode 100755 index 00000000..d0d26121 --- /dev/null +++ b/docker-test-setup-api/01-init-db.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +source ./common.sh + +mkdb() { + $DC exec -T db createdb -U postgres $1 + $DC run -T --rm --no-deps $1 initdb --config /config/${1}.toml +} + +$DC stop db +$DC rm -f db + +${BB} rm -rf data/db + +$DC up -d db +$DC run --rm --no-deps wait-for-db + +for cmd in keyper-0 keyper-1 keyper-2; do + mkdb $cmd & +done + +wait + +$DC stop db diff --git a/docker-test-setup-api/02-init-chain.sh b/docker-test-setup-api/02-init-chain.sh new file mode 100755 index 00000000..762f9c4a --- /dev/null +++ b/docker-test-setup-api/02-init-chain.sh @@ -0,0 +1,67 @@ +#!/usr/bin/env bash + +source ./common.sh + +$DC stop blockchain +$DC rm -f blockchain +$DC stop chain-{0..2}-validator chain-seed +$DC rm -f chain-{0..2}-validator chain-seed + +${BB} rm -rf data/chain-{0..2}-validator data/chain-seed +${BB} mkdir -p data/chain-{0..2}-validator/config data/chain-seed/config +${BB} chmod -R a+rwX data/chain-{0..2}-validator/config data/chain-seed/config +${BB} rm -rf data/deployments + +# has blockchain as dependency +$DC up -d blockchain +sleep 5 +$DC up deploy-contracts +$DC run --rm contract-scripts --broadcast --rpc-url http://blockchain:8545 ./script/DeployEventTriggerTestHelper.service.s.sol:DeployEventTriggerTestHelperScript + +bash get-contracts.sh +# setup chain-seed +$DC run --rm --no-deps chain-seed init \ + --root /chain \ + --blocktime 1 \ + --listen-address tcp://0.0.0.0:${TM_RPC_PORT} \ + --role seed + +seed_node=$(cat data/chain-seed/config/node_key.json.id)@chain-seed:${TM_P2P_PORT} + +${BB} sed -i "/^moniker/c\moniker = \"chain-seed\"" data/chain-seed/config/config.toml + +# configure validators and keypers 0-2 +for num in {0..2}; do + validator_cmd=chain-$num-validator + + $DC run --rm --no-deps ${validator_cmd} init \ + --root /chain \ + --genesis-keyper 0x440Dc6F164e9241F04d282215ceF2780cd0B755e \ + --blocktime 1 \ + --listen-address tcp://0.0.0.0:${TM_RPC_PORT} \ + --role validator + + validator_id=$(cat data/${validator_cmd}/config/node_key.json.id) + validator_node=${validator_id}@${validator_cmd}:${TM_P2P_PORT} + validator_config_path=data/${validator_cmd}/config/config.toml + + # share genesis + if [ $num -eq 0 ]; then + for destination in data/chain-seed/config/ data/chain-{1..2}-validator/config/; do + ${BB} cp -v data/chain-0-validator/config/genesis.json "${destination}" + done + fi + + # set validator publickey for keyper + ${BB} sed -i "/ValidatorPublicKey/c\ValidatorPublicKey = \"$(cat data/${validator_cmd}/config/priv_validator_pubkey.hex)\"" /config/keyper-${num}.toml + + # set seed node for chain bootstrap + ${BB} sed -i "/^seeds =/c\seeds = \"${seed_node}\"" "${validator_config_path}" + # fix external address for docker internal communication + ${BB} sed -i "/^external_address =/c\external_address = \"${validator_cmd}:${TM_P2P_PORT}\"" "${validator_config_path}" + # give a nice name + ${BB} sed -i "/^moniker/c\moniker = \"${validator_cmd}\"" "${validator_config_path}" + +done + +$DC stop -t 30 diff --git a/docker-test-setup-api/03-run.sh b/docker-test-setup-api/03-run.sh new file mode 100755 index 00000000..bd86eb2e --- /dev/null +++ b/docker-test-setup-api/03-run.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +source ./common.sh + +echo "Starting entire system" +$DC up -d diff --git a/docker-test-setup-api/04-addkeyperset.sh b/docker-test-setup-api/04-addkeyperset.sh new file mode 100755 index 00000000..75c5db18 --- /dev/null +++ b/docker-test-setup-api/04-addkeyperset.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash + +source ./common.sh +source .env +set -e + +CONTRACTS_JSON=$(jq '.transactions[]|(select(.function==null))|{(.contractName|tostring): .contractAddress}' data/deployments/Deploy.service.s.sol/31337/run-latest.json) + +for s in $(echo ${CONTRACTS_JSON} | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")| .[] "); do + export $s +done + +# Get keyper addresses from node-deploy.json +export KEYPER_ADDRESSES=$(jq -r '.keypers[0] | join(",")' config/node-deploy.json) + +echo "Submitting Add Keyper Set transaction" +export THRESHOLD=2 +export KEYPERSETMANAGER_ADDRESS=${KeyperSetManager} +export KEYBROADCAST_ADDRESS=${KeyBroadcastContract} +export ACTIVATION_DELTA=10 + +$DC run --rm --no-deps add-keyper-set diff --git a/docker-test-setup-api/05-bootstrap.sh b/docker-test-setup-api/05-bootstrap.sh new file mode 100755 index 00000000..76fee6fd --- /dev/null +++ b/docker-test-setup-api/05-bootstrap.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +source ./common.sh + +echo "Submitting bootstrap transaction" + +$DC run --rm --no-deps --entrypoint /rolling-shutter chain-0-validator op-bootstrap fetch-keyperset \ + --config /config/op-bootstrap.toml + +$DC run --rm --no-deps --entrypoint /rolling-shutter chain-0-validator op-bootstrap \ + --config /config/op-bootstrap.toml diff --git a/docker-test-setup-api/06-test-decryption.sh b/docker-test-setup-api/06-test-decryption.sh new file mode 100755 index 00000000..af83183f --- /dev/null +++ b/docker-test-setup-api/06-test-decryption.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +source ./common.sh +set -e + +echo "Submitting identity registration transaction" + +CONTRACTS_JSON=$(jq '.transactions[]|(select(.function==null))|{(.contractName|tostring): .contractAddress}' data/deployments/Deploy.service.s.sol/31337/run-latest.json) + +for s in $(echo ${CONTRACTS_JSON} | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")| .[] "); do + export $s +done + +export TIMESTAMP=$(($(date +%s) + 50)) +export IDENTITY_PREFIX=0x$(LC_ALL=C tr -dc 'a-f0-9' /dev/null 2>&1; then + DC="docker compose" +else + DC=docker-compose +fi + +set -xe diff --git a/docker-test-setup-api/config/bootstrap.toml b/docker-test-setup-api/config/bootstrap.toml new file mode 100644 index 00000000..dcac06ff --- /dev/null +++ b/docker-test-setup-api/config/bootstrap.toml @@ -0,0 +1,21 @@ +# Peer identity: /p2p/12D3KooWJN7262vmnEQHkYG7VrZDwz9fMyJtHyvGp4XSenuUYfeJ +# Peer role: bootstrap + +# whether to register handlers on the messages and log them +InstanceID = 0 +ListenMessages = true + +[P2P] +P2PKey = "CAESQKFtiMAqd2c8bQ/mfPStxViY970MNtWUVWdn44rUoQXAfv7ztSQ9nLeqliXrkuqKi3XUObyAfH+Py3eMbHFvIpM=" +ListenAddresses = ["/ip4/0.0.0.0/tcp/23000"] +# Overwrite p2p boostrap nodes +CustomBootstrapAddresses = [ + "/dns4/bootnode-0/tcp/23000/p2p/12D3KooWJN7262vmnEQHkYG7VrZDwz9fMyJtHyvGp4XSenuUYfeJ", +] +IsAccessNode = false +DiscoveryNamespace = "shutter-local" + +[P2P.FloodSubDiscovery] +Enabled = false +Interval = 10 +Topics = [] \ No newline at end of file diff --git a/docker-test-setup-api/config/keyper-0.toml b/docker-test-setup-api/config/keyper-0.toml new file mode 100644 index 00000000..9793dc1d --- /dev/null +++ b/docker-test-setup-api/config/keyper-0.toml @@ -0,0 +1,55 @@ +# Peer identity: /p2p/12D3KooWQ8iKQe4iEfkTh3gdBtpxWsKwx3BwrA18km5rq3Zwt2QF +# Ethereum address: 0xCDD50A6F9B1439dc14c4f2A7eaF14dA1EF5A476c + + +InstanceID = 0 +# If it's empty, we use the standard PG_ environment variables +DatabaseURL = "postgres://postgres@db:5432/keyper-0" +HTTPEnabled = false +HTTPListenAddress = ':3000' +MaxNumKeysPerMessage = 500 + +[P2P] +P2PKey = 'CAESQJ3NdZ6mRrDAW/Z59OKwcKdOCbZQ45z5o8K+tLHOL8Xw1LbawPZLk3mXNyiDyADcLk1bqYMe3uQ6T8xi65zkM0A=' +ListenAddresses = ["/ip4/0.0.0.0/tcp/23000"] +# Overwrite p2p boostrap nodes +CustomBootstrapAddresses = [ + "/dns4/bootnode-0/tcp/23000/p2p/12D3KooWJN7262vmnEQHkYG7VrZDwz9fMyJtHyvGp4XSenuUYfeJ", +] +DiscoveryNamespace = 'shutter-local' +# Optional, to be set to true if running an access node +IsAccessNode = false + +[P2P.FloodSubDiscovery] +Enabled = false +Interval = 10 +Topics = [] + +[Chain] +SyncStartBlockNumber = 0 +SyncMonitorCheckInterval = 60 + +[Chain.Node] +PrivateKey = '82904d1c48d3a27d218408fc2db3e743f554a69b05b91d28c2897a9026ea47df' +# Contract source directory +DeploymentDir = '/deployments/localhost/' +# The layer 1 JSON RPC endpoint +EthereumURL = "ws://blockchain:8545/" + +[Chain.Contracts] +KeyperSetManager = "0x5fbdb2315678afecb367f032d93f642f64180aa3" +ShutterRegistry = "0x0165878a594ca255338adfa4d48449f69242eb8f" +ShutterEventTriggerRegistry = "0xa513e6e4b8f2a923d98304ec87f64353c4d5c853" +KeyBroadcastContract = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707" + +[Shuttermint] +ShuttermintURL = "http://chain-0-validator:26657" +ValidatorPublicKey = "f015590ac9416b1b51d81a5cdb3756c2a5194af42d376c7abc2fa833854cd0df" +EncryptionKey = '6bf7e6c8d6753d435f885f398f62f221a84f0ccb0c5e2a382290489441e16f77' +DKGPhaseLength = 8 +DKGStartBlockDelta = 5 + +[Metrics] +Enabled = true +Host = "[::]" +Port = 9100 diff --git a/docker-test-setup-api/config/keyper-1.toml b/docker-test-setup-api/config/keyper-1.toml new file mode 100644 index 00000000..d0f2ebd8 --- /dev/null +++ b/docker-test-setup-api/config/keyper-1.toml @@ -0,0 +1,55 @@ +# Peer identity: /p2p/12D3KooWGksg5G2oau6EgdJFLiQoKaMvzPJnTCuoPScZcmZRdkny +# Ethereum address: 0x539cF80D345d26525A47dB80De0fAb147D588fDa + + +InstanceID = 0 +# If it's empty, we use the standard PG_ environment variables +DatabaseURL = "postgres://postgres@db:5432/keyper-1" +HTTPEnabled = false +HTTPListenAddress = ':3000' +MaxNumKeysPerMessage = 500 + +[P2P] +P2PKey = 'CAESQO+t2CR93jdMq/FDqZf2+KIY9kLhKA1rZY9WFherSqvZZxzY8W4y5hSBrW5u79SDCvbLcmo7kEwu6VsK0NjZnxY=' +ListenAddresses = ["/ip4/0.0.0.0/tcp/23000"] +# Overwrite p2p boostrap nodes +CustomBootstrapAddresses = [ + "/dns4/bootnode-0/tcp/23000/p2p/12D3KooWJN7262vmnEQHkYG7VrZDwz9fMyJtHyvGp4XSenuUYfeJ", +] +DiscoveryNamespace = 'shutter-local' +# Optional, to be set to true if running an access node +IsAccessNode = false + +[P2P.FloodSubDiscovery] +Enabled = false +Interval = 10 +Topics = [] + +[Chain] +SyncStartBlockNumber = 0 +SyncMonitorCheckInterval = 60 + +[Chain.Node] +PrivateKey = '939babbad75cbcc42eef92496ce86ede989ba96918bbc6cc0efcc498f9cc0887' +# Contract source directory +DeploymentDir = '/deployments/localhost/' +# The layer 1 JSON RPC endpoint +EthereumURL = "ws://blockchain:8545/" + +[Chain.Contracts] +KeyperSetManager = "0x5fbdb2315678afecb367f032d93f642f64180aa3" +ShutterRegistry = "0x0165878a594ca255338adfa4d48449f69242eb8f" +ShutterEventTriggerRegistry = "0xa513e6e4b8f2a923d98304ec87f64353c4d5c853" +KeyBroadcastContract = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707" + +[Shuttermint] +ShuttermintURL = "http://chain-0-validator:26657" +ValidatorPublicKey = "2f2706f1e80406a7c9cba52ca917421b835f54820fa3fc90c5763fe84e1a4e89" +EncryptionKey = 'd10fcd3a9db97ecf80a41b5fb30bb91ea16bf6a8575e389912a335b19cc4b3b8' +DKGPhaseLength = 8 +DKGStartBlockDelta = 5 + +[Metrics] +Enabled = true +Host = "[::]" +Port = 9100 diff --git a/docker-test-setup-api/config/keyper-2.toml b/docker-test-setup-api/config/keyper-2.toml new file mode 100644 index 00000000..860c6e67 --- /dev/null +++ b/docker-test-setup-api/config/keyper-2.toml @@ -0,0 +1,55 @@ +# Peer identity: /p2p/12D3KooWLwtKMLJqRGWB3AGi87u8Sc7hwp6a6PDMAyWEAsHnjGJG +# Ethereum address: 0x4F01A5A4Ef09c08Df83A85885516424A4a53be68 + + +InstanceID = 0 +# If it's empty, we use the standard PG_ environment variables +DatabaseURL = "postgres://postgres@db:5432/keyper-2" +HTTPEnabled = false +HTTPListenAddress = ':3000' +MaxNumKeysPerMessage = 500 + +[P2P] +P2PKey = 'CAESQJrsyuTYZZpjqfbXTtMbrQOuS1XtzD+M6ssAZLo5R26fpV218+TTRXw4mkAdaxWpIDX0ZEQH5NC8A8+1nWvbF2U=' +ListenAddresses = ["/ip4/0.0.0.0/tcp/23000"] +# Overwrite p2p boostrap nodes +CustomBootstrapAddresses = [ + "/dns4/bootnode-0/tcp/23000/p2p/12D3KooWJN7262vmnEQHkYG7VrZDwz9fMyJtHyvGp4XSenuUYfeJ", +] +DiscoveryNamespace = 'shutter-local' +# Optional, to be set to true if running an access node +IsAccessNode = false + +[P2P.FloodSubDiscovery] +Enabled = false +Interval = 10 +Topics = [] + +[Chain] +SyncStartBlockNumber = 0 +SyncMonitorCheckInterval = 60 + +[Chain.Node] +PrivateKey = 'e9383a24352f05bc11895c0da19efb6b83c726f05643c38f64b4146f19215125' +# Contract source directory +DeploymentDir = '/deployments/localhost/' +# The layer 1 JSON RPC endpoint +EthereumURL = "ws://blockchain:8545/" + +[Chain.Contracts] +KeyperSetManager = "0x5fbdb2315678afecb367f032d93f642f64180aa3" +ShutterRegistry = "0x0165878a594ca255338adfa4d48449f69242eb8f" +ShutterEventTriggerRegistry = "0xa513e6e4b8f2a923d98304ec87f64353c4d5c853" +KeyBroadcastContract = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707" + +[Shuttermint] +ShuttermintURL = "http://chain-0-validator:26657" +ValidatorPublicKey = "2059d619d5dffb92fc57c9e16f739070012e03c3edcf3ea7700564755a58c5d1" +EncryptionKey = '782629977650cc003b122a611aa4d2e075fcfbc8bb92a49a832ca518c87b6ce8' +DKGPhaseLength = 8 +DKGStartBlockDelta = 5 + +[Metrics] +Enabled = true +Host = "[::]" +Port = 9100 diff --git a/docker-test-setup-api/config/node-deploy.json b/docker-test-setup-api/config/node-deploy.json new file mode 100644 index 00000000..892dbd24 --- /dev/null +++ b/docker-test-setup-api/config/node-deploy.json @@ -0,0 +1,9 @@ +{ + "keypers": [ + [ + "0xCDD50A6F9B1439dc14c4f2A7eaF14dA1EF5A476c", + "0x539cF80D345d26525A47dB80De0fAb147D588fDa", + "0x4F01A5A4Ef09c08Df83A85885516424A4a53be68" + ] + ] +} diff --git a/docker-test-setup-api/config/op-bootstrap.toml b/docker-test-setup-api/config/op-bootstrap.toml new file mode 100644 index 00000000..7f3e5ce6 --- /dev/null +++ b/docker-test-setup-api/config/op-bootstrap.toml @@ -0,0 +1,7 @@ +InstanceID = 42 +JSONRPCURL = 'ws://blockchain:8545' +KeyperSetManager = "0x5fbdb2315678afecb367f032d93f642f64180aa3" +ByIndex = 1 +KeyperSetFilePath = '/config/keyperset.json' +ShuttermintURL = 'http://chain-0-validator:26657' +SigningKey = "479968ffa5ee4c84514a477a8f15f3db0413964fd4c20b08a55fed9fed790fad" diff --git a/docker-test-setup-api/docker-compose.yml b/docker-test-setup-api/docker-compose.yml new file mode 100644 index 00000000..e6fb4ce1 --- /dev/null +++ b/docker-test-setup-api/docker-compose.yml @@ -0,0 +1,274 @@ +x-logging: &logging + driver: local + options: + max-file: 10 + +services: + rs-build: + build: + dockerfile: docker/build-src/rolling-shutter/Dockerfile + context: .. + image: rolling-shutter + command: "-v" + + db: + image: postgres + restart: always + environment: + POSTGRES_HOST_AUTH_METHOD: trust + volumes: + - ./data/db:/var/lib/postgresql/data + healthcheck: + test: pg_isready -U postgres + start_period: "30s" + start_interval: "2s" + logging: *logging + + blockchain: + build: + dockerfile: docker-test-setup-api/build-src/setup-blockchain/Dockerfile + context: .. + restart: unless-stopped + ports: + - "8545:8545" + volumes: + - ./data/blockchain:/data + # currently health checks fail + healthcheck: + test: > + curl -sSf -X POST http://127.0.0.1:8545 -H "Content-Type: application/json" --data-raw '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[], "id": 1}' | grep -o '"result":"0x' + start_period: "30s" + start_interval: "2s" + logging: *logging + + deploy-contracts: + build: + dockerfile: docker-test-setup-api/build-src/deploy_contracts/Dockerfile + context: .. + command: + - "./script/Deploy.service.s.sol" + - "--rpc-url" + - "http://blockchain:8545" + - "--broadcast" + environment: + DEPLOY_KEY: ${DEPLOY_KEY} + PRIVATE_KEY: ${DEPLOY_KEY} + volumes: + - ./data/deployments:/contracts/broadcast + - ./config:/config + depends_on: + - blockchain + logging: *logging + + add-keyper-set: + build: + dockerfile: docker-test-setup-api/build-src/deploy_contracts/Dockerfile + context: .. + command: + - "./script/AddKeyperSet.s.sol" + - "--rpc-url" + - "http://blockchain:8545" + - "--broadcast" + environment: + DEPLOY_KEY: ${DEPLOY_KEY} + PRIVATE_KEY: ${DEPLOY_KEY} + ACTIVATION_DELTA: ${ACTIVATION_DELTA} + KEYPERSETMANAGER_ADDRESS: ${KEYPERSETMANAGER_ADDRESS} + KEYBROADCAST_ADDRESS: ${KEYBROADCAST_ADDRESS} + KEYPER_ADDRESSES: ${KEYPER_ADDRESSES} + THRESHOLD: ${THRESHOLD} + volumes: + - ./data/deployments:/contracts/broadcast + - ./config:/config + depends_on: + - deploy-contracts + - blockchain + logging: *logging + + contract-scripts: + profiles: + - maintenance + build: + dockerfile: docker-test-setup-api/build-src/deploy_contracts/Dockerfile + context: .. + command: + - "--rpc-url" + - "http://blockchain:8545" + environment: + DEPLOY_KEY: ${DEPLOY_KEY} + TX_SENDER_KEY: ${DEPLOY_KEY} + volumes: + - ./data/deployments:/contracts/broadcast + - ./config:/config + logging: *logging + + register-identity: + build: + dockerfile: docker-test-setup-api/build-src/deploy_contracts/Dockerfile + context: .. + command: + - "./script/SubmitTransaction.service.s.sol" + - "--rpc-url" + - "http://blockchain:8545" + - "--broadcast" + environment: + TX_SENDER_KEY: ${DEPLOY_KEY} + REGISTRY_ADDRESS: ${ShutterRegistry} + EON: 1 + IDENTITY_PREFIX: ${IDENTITY_PREFIX} + TIMESTAMP: ${TIMESTAMP} + volumes: + - ./data/deployments:/contracts/broadcast + - ./config:/config + depends_on: + - blockchain + logging: *logging + + chain-0-validator: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - chain + volumes: + - ./data/chain-0-validator:/chain + - ./data/deployments:/deployments + - ./config:/config + command: "--config /chain/config/config.toml" + depends_on: + - deploy-contracts + logging: *logging + + chain-1-validator: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - chain + volumes: + - ./data/chain-1-validator:/chain + - ./data/deployments:/deployments + command: "--config /chain/config/config.toml" + depends_on: + - deploy-contracts + logging: *logging + + chain-2-validator: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - chain + volumes: + - ./data/chain-2-validator:/chain + - ./data/deployments:/deployments + command: "--config /chain/config/config.toml" + depends_on: + - deploy-contracts + logging: *logging + + chain-seed: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - chain + volumes: + - ./data/chain-seed:/chain + command: "--config /chain/config/config.toml" + logging: *logging + + bootnode-0: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - p2pnode + command: "--config /config/bootstrap.toml" + volumes: + - ./config:/config + - ./data/deployments:/deployments + depends_on: + db: + condition: service_healthy + logging: *logging + + keyper-0: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - "--loglevel=:debug,basichost:info,swarm:info,swarm2:info,net/identify:info,pubsub:info,p2p-config:info,upgrader:info,dht:info" + - shutterservicekeyper + command: "--config /config/keyper-0.toml" + volumes: + - ./config:/config + - ./data/deployments:/deployments + ports: + - "9200:9100" + depends_on: + db: + condition: service_healthy + bootnode-0: + condition: service_started + chain-0-validator: + condition: service_started + logging: *logging + + keyper-1: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - shutterservicekeyper + command: "--config /config/keyper-1.toml" + volumes: + - ./config:/config + - ./data/deployments:/deployments + ports: + - "9201:9100" + depends_on: + db: + condition: service_healthy + bootnode-0: + condition: service_started + chain-1-validator: + condition: service_started + logging: *logging + + keyper-2: + image: rolling-shutter + restart: always + entrypoint: + - /rolling-shutter + - shutterservicekeyper + command: "--config /config/keyper-2.toml" + volumes: + - ./config:/config + - ./data/deployments:/deployments + ports: + - "9202:9100" + depends_on: + db: + condition: service_healthy + bootnode-0: + condition: service_started + chain-2-validator: + condition: service_started + logging: *logging + + wait-for-db: + image: postgres:latest + profiles: + - script + command: > + sh -c ' + until pg_isready -h db -p 5432 -U postgres; do + echo "Waiting for db to be ready..." + sleep 1 + done + echo "db is ready" + ' + depends_on: + - db + logging: *logging diff --git a/docker-test-setup-api/get-contracts.sh b/docker-test-setup-api/get-contracts.sh new file mode 100755 index 00000000..e53d07ab --- /dev/null +++ b/docker-test-setup-api/get-contracts.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +## Needs to be bash, for the variable expansion to work! +source ./common.sh +source .env +set -e + +CONTRACTS_JSON=$(jq '.transactions[]|(select(.function==null))|{(.contractName|tostring): .contractAddress}' data/deployments/Deploy.service.s.sol/31337/run-latest.json) +#echo ${CONTRACTS_JSON} | jq -r ".[]|to_entries" + +for s in $(echo ${CONTRACTS_JSON} | jq -r "to_entries|map(\"\(.key)=\(.value|tostring)\")| .[] "); do + export $s +done + +for cfg in keyper-{0..2}.toml; do + config_path=config/${cfg} + echo $config_path + + for name in KeyperSetManager KeyperSet KeyBroadcastContract ShutterRegistry ShutterEventTriggerRegistry; do + key=$name + value="${!name}" + ${BB} sed -i "/^$key =/c$key = \"$value\"" "${config_path}" + done +done + +echo "Setting up bootstrap.toml and op-bootstrap.toml" +${BB} sed -i "/^KeyperSetManager =/cKeyperSetManager = \"${KeyperSetManager}\"" "config/bootstrap.toml" +${BB} sed -i "/^KeyperSetManager =/cKeyperSetManager = \"${KeyperSetManager}\"" "config/op-bootstrap.toml" diff --git a/rolling-shutter/go.mod b/rolling-shutter/go.mod index ead1c91b..85932595 100644 --- a/rolling-shutter/go.mod +++ b/rolling-shutter/go.mod @@ -32,7 +32,7 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 github.com/rs/zerolog v1.28.0 - github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250108084129-3b743179373a + github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250817075507-b37d6f4f2cc8 github.com/shutter-network/gnosh-contracts v0.4.0 github.com/shutter-network/shop-contracts v0.0.0-20240407151512-08ef5d8355b6 github.com/shutter-network/shutter/shlib v0.1.19 diff --git a/rolling-shutter/go.sum b/rolling-shutter/go.sum index 4fe2175c..4e830df2 100644 --- a/rolling-shutter/go.sum +++ b/rolling-shutter/go.sum @@ -873,8 +873,8 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250108084129-3b743179373a h1:hWw2nJLGPZU7Rvd6YmEEDfvlTUbwZKZl5UP2ThVVNSU= -github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250108084129-3b743179373a/go.mod h1:V8KhVM75wyWVSzZJ6GeC9dWCjRrinIQVb7mYNP+knbg= +github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250817075507-b37d6f4f2cc8 h1:f2gnaYNSRH+/YitvOF0NxJrOZv2LNC0LPWWXNbo8xW8= +github.com/shutter-network/contracts/v2 v2.0.0-beta.2.0.20250817075507-b37d6f4f2cc8/go.mod h1:V8KhVM75wyWVSzZJ6GeC9dWCjRrinIQVb7mYNP+knbg= github.com/shutter-network/gnosh-contracts v0.4.0 h1:2GJcHK9w4lJZMsccklmxDhNnrkRLJDRwsL1acBnAeak= github.com/shutter-network/gnosh-contracts v0.4.0/go.mod h1:QB0d64ybbVFKMrLjrc1tldri87KNjTmKQjhk9jaso2E= github.com/shutter-network/shop-contracts v0.0.0-20240407151512-08ef5d8355b6 h1:m6Ti1/IH+GBTtGqyAX3xbh+ruUKvC+m+/uzYDUa+JDQ= diff --git a/rolling-shutter/keyperimpl/shutterservice/config.go b/rolling-shutter/keyperimpl/shutterservice/config.go index c1df5f35..c496dc59 100644 --- a/rolling-shutter/keyperimpl/shutterservice/config.go +++ b/rolling-shutter/keyperimpl/shutterservice/config.go @@ -81,6 +81,10 @@ func (c *Config) GetAddress() common.Address { return c.Chain.Node.PrivateKey.EthereumAddress() } +func (c *Config) EventBasedTriggersEnabled() bool { + return c.Chain.Contracts.ShutterEventTriggerRegistry != common.Address{} +} + type ChainConfig struct { Node *configuration.EthnodeConfig `shconfig:",required"` Contracts *ContractsConfig `shconfig:",required"` @@ -128,9 +132,10 @@ func (c *ChainConfig) TOMLWriteHeader(_ io.Writer) (int, error) { } type ContractsConfig struct { - KeyperSetManager common.Address `shconfig:",required"` - ShutterRegistry common.Address `shconfig:",required"` - KeyBroadcastContract common.Address `shconfig:",required"` + KeyperSetManager common.Address `shconfig:",required"` + ShutterRegistry common.Address `shconfig:",required"` + ShutterEventTriggerRegistry common.Address + KeyBroadcastContract common.Address `shconfig:",required"` } func NewContractsConfig() *ContractsConfig { diff --git a/rolling-shutter/keyperimpl/shutterservice/database/models.sqlc.gen.go b/rolling-shutter/keyperimpl/shutterservice/database/models.sqlc.gen.go index e62bcae0..3c1b1fb4 100644 --- a/rolling-shutter/keyperimpl/shutterservice/database/models.sqlc.gen.go +++ b/rolling-shutter/keyperimpl/shutterservice/database/models.sqlc.gen.go @@ -17,6 +17,30 @@ type DecryptionSignature struct { Signature []byte } +type EventTriggerRegisteredEvent struct { + BlockNumber int64 + BlockHash []byte + TxIndex int64 + LogIndex int64 + Eon int64 + IdentityPrefix []byte + Sender string + Definition []byte + Ttl int64 + Decrypted bool + Identity []byte +} + +type FiredTrigger struct { + Eon int64 + IdentityPrefix []byte + Sender string + BlockNumber int64 + BlockHash []byte + TxIndex int64 + LogIndex int64 +} + type IdentityRegisteredEvent struct { BlockNumber int64 BlockHash []byte @@ -35,3 +59,9 @@ type IdentityRegisteredEventsSyncedUntil struct { BlockHash []byte BlockNumber int64 } + +type MultiEventSyncStatus struct { + EnforceOneRow bool + BlockNumber int64 + BlockHash []byte +} diff --git a/rolling-shutter/keyperimpl/shutterservice/database/shutterservice.sqlc.gen.go b/rolling-shutter/keyperimpl/shutterservice/database/shutterservice.sqlc.gen.go index b860afe6..173bae2e 100644 --- a/rolling-shutter/keyperimpl/shutterservice/database/shutterservice.sqlc.gen.go +++ b/rolling-shutter/keyperimpl/shutterservice/database/shutterservice.sqlc.gen.go @@ -11,6 +11,24 @@ import ( "github.com/jackc/pgconn" ) +const deleteEventTriggerRegisteredEventsFromBlockNumber = `-- name: DeleteEventTriggerRegisteredEventsFromBlockNumber :exec +DELETE FROM event_trigger_registered_event WHERE block_number >= $1 +` + +func (q *Queries) DeleteEventTriggerRegisteredEventsFromBlockNumber(ctx context.Context, blockNumber int64) error { + _, err := q.db.Exec(ctx, deleteEventTriggerRegisteredEventsFromBlockNumber, blockNumber) + return err +} + +const deleteFiredTriggersFromBlockNumber = `-- name: DeleteFiredTriggersFromBlockNumber :exec +DELETE FROM fired_triggers WHERE block_number >= $1 +` + +func (q *Queries) DeleteFiredTriggersFromBlockNumber(ctx context.Context, blockNumber int64) error { + _, err := q.db.Exec(ctx, deleteFiredTriggersFromBlockNumber, blockNumber) + return err +} + const deleteIdentityRegisteredEventsFromBlockNumber = `-- name: DeleteIdentityRegisteredEventsFromBlockNumber :exec DELETE FROM identity_registered_event WHERE block_number >= $1 ` @@ -20,6 +38,49 @@ func (q *Queries) DeleteIdentityRegisteredEventsFromBlockNumber(ctx context.Cont return err } +const getActiveEventTriggerRegisteredEvents = `-- name: GetActiveEventTriggerRegisteredEvents :many +SELECT block_number, block_hash, tx_index, log_index, eon, identity_prefix, sender, definition, ttl, decrypted, identity FROM event_trigger_registered_event e +WHERE e.block_number + ttl >= $1 -- TTL not expired at given block +AND e.decrypted = false -- not decrypted yet +AND NOT EXISTS ( -- not fired yet + SELECT 1 FROM fired_triggers t + WHERE t.identity_prefix = e.identity_prefix + AND t.sender = e.sender +) +` + +func (q *Queries) GetActiveEventTriggerRegisteredEvents(ctx context.Context, blockNumber int64) ([]EventTriggerRegisteredEvent, error) { + rows, err := q.db.Query(ctx, getActiveEventTriggerRegisteredEvents, blockNumber) + if err != nil { + return nil, err + } + defer rows.Close() + var items []EventTriggerRegisteredEvent + for rows.Next() { + var i EventTriggerRegisteredEvent + if err := rows.Scan( + &i.BlockNumber, + &i.BlockHash, + &i.TxIndex, + &i.LogIndex, + &i.Eon, + &i.IdentityPrefix, + &i.Sender, + &i.Definition, + &i.Ttl, + &i.Decrypted, + &i.Identity, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const getCurrentDecryptionTrigger = `-- name: GetCurrentDecryptionTrigger :one SELECT eon, triggered_block_number, identities_hash FROM current_decryption_trigger WHERE eon = $1 ORDER BY triggered_block_number DESC LIMIT 1 @@ -81,6 +142,17 @@ func (q *Queries) GetIdentityRegisteredEventsSyncedUntil(ctx context.Context) (I return i, err } +const getMultiEventSyncStatus = `-- name: GetMultiEventSyncStatus :one +SELECT enforce_one_row, block_number, block_hash FROM multi_event_sync_status LIMIT 1 +` + +func (q *Queries) GetMultiEventSyncStatus(ctx context.Context) (MultiEventSyncStatus, error) { + row := q.db.QueryRow(ctx, getMultiEventSyncStatus) + var i MultiEventSyncStatus + err := row.Scan(&i.EnforceOneRow, &i.BlockNumber, &i.BlockHash) + return i, err +} + const getNotDecryptedIdentityRegisteredEvents = `-- name: GetNotDecryptedIdentityRegisteredEvents :many SELECT block_number, block_hash, tx_index, log_index, eon, identity_prefix, sender, timestamp, decrypted, identity FROM identity_registered_event WHERE timestamp >= $1 AND timestamp <= $2 AND decrypted = false @@ -123,6 +195,72 @@ func (q *Queries) GetNotDecryptedIdentityRegisteredEvents(ctx context.Context, a return items, nil } +const getUndecryptedFiredTriggers = `-- name: GetUndecryptedFiredTriggers :many +SELECT + f.identity_prefix, + f.sender, + f.block_number, + f.block_hash, + f.tx_index, + f.log_index, + e.eon AS eon, + e.ttl AS ttl, + e.identity AS identity, + e.decrypted AS decrypted +FROM fired_triggers f +INNER JOIN event_trigger_registered_event e ON f.identity_prefix = e.identity_prefix AND f.sender = e.sender +WHERE NOT EXISTS ( -- not decrypted yet + SELECT 1 FROM event_trigger_registered_event e + WHERE e.identity_prefix = f.identity_prefix + AND e.sender = f.sender + AND e.decrypted = true +) +` + +type GetUndecryptedFiredTriggersRow struct { + IdentityPrefix []byte + Sender string + BlockNumber int64 + BlockHash []byte + TxIndex int64 + LogIndex int64 + Eon int64 + Ttl int64 + Identity []byte + Decrypted bool +} + +func (q *Queries) GetUndecryptedFiredTriggers(ctx context.Context) ([]GetUndecryptedFiredTriggersRow, error) { + rows, err := q.db.Query(ctx, getUndecryptedFiredTriggers) + if err != nil { + return nil, err + } + defer rows.Close() + var items []GetUndecryptedFiredTriggersRow + for rows.Next() { + var i GetUndecryptedFiredTriggersRow + if err := rows.Scan( + &i.IdentityPrefix, + &i.Sender, + &i.BlockNumber, + &i.BlockHash, + &i.TxIndex, + &i.LogIndex, + &i.Eon, + &i.Ttl, + &i.Identity, + &i.Decrypted, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const insertDecryptionSignature = `-- name: InsertDecryptionSignature :exec INSERT INTO decryption_signatures (eon, keyper_index, identities_hash, signature) VALUES ($1, $2, $3, $4) @@ -146,6 +284,87 @@ func (q *Queries) InsertDecryptionSignature(ctx context.Context, arg InsertDecry return err } +const insertEventTriggerRegisteredEvent = `-- name: InsertEventTriggerRegisteredEvent :execresult +INSERT INTO event_trigger_registered_event ( + block_number, + block_hash, + tx_index, + log_index, + eon, + identity_prefix, + sender, + definition, + ttl, + identity +) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) +ON CONFLICT (eon, identity_prefix, sender) DO UPDATE SET +block_number = $1, +block_hash = $2, +tx_index = $3, +log_index = $4, +definition = $8, +ttl = $9, +identity = $10 +` + +type InsertEventTriggerRegisteredEventParams struct { + BlockNumber int64 + BlockHash []byte + TxIndex int64 + LogIndex int64 + Eon int64 + IdentityPrefix []byte + Sender string + Definition []byte + Ttl int64 + Identity []byte +} + +func (q *Queries) InsertEventTriggerRegisteredEvent(ctx context.Context, arg InsertEventTriggerRegisteredEventParams) (pgconn.CommandTag, error) { + return q.db.Exec(ctx, insertEventTriggerRegisteredEvent, + arg.BlockNumber, + arg.BlockHash, + arg.TxIndex, + arg.LogIndex, + arg.Eon, + arg.IdentityPrefix, + arg.Sender, + arg.Definition, + arg.Ttl, + arg.Identity, + ) +} + +const insertFiredTrigger = `-- name: InsertFiredTrigger :exec +INSERT INTO fired_triggers (eon, identity_prefix, sender, block_number, block_hash, tx_index, log_index) +VALUES ($1, $2, $3, $4, $5, $6, $7) +ON CONFLICT (eon, identity_prefix, sender) DO NOTHING +` + +type InsertFiredTriggerParams struct { + Eon int64 + IdentityPrefix []byte + Sender string + BlockNumber int64 + BlockHash []byte + TxIndex int64 + LogIndex int64 +} + +func (q *Queries) InsertFiredTrigger(ctx context.Context, arg InsertFiredTriggerParams) error { + _, err := q.db.Exec(ctx, insertFiredTrigger, + arg.Eon, + arg.IdentityPrefix, + arg.Sender, + arg.BlockNumber, + arg.BlockHash, + arg.TxIndex, + arg.LogIndex, + ) + return err +} + const insertIdentityRegisteredEvent = `-- name: InsertIdentityRegisteredEvent :execresult INSERT INTO identity_registered_event ( block_number, @@ -229,7 +448,41 @@ func (q *Queries) SetIdentityRegisteredEventSyncedUntil(ctx context.Context, arg return err } -const updateDecryptedFlag = `-- name: UpdateDecryptedFlag :exec +const setMultiEventSyncStatus = `-- name: SetMultiEventSyncStatus :exec +INSERT INTO multi_event_sync_status (block_number, block_hash) VALUES ($1, $2) +ON CONFLICT (enforce_one_row) DO UPDATE +SET block_number = $1, block_hash = $2 +` + +type SetMultiEventSyncStatusParams struct { + BlockNumber int64 + BlockHash []byte +} + +func (q *Queries) SetMultiEventSyncStatus(ctx context.Context, arg SetMultiEventSyncStatusParams) error { + _, err := q.db.Exec(ctx, setMultiEventSyncStatus, arg.BlockNumber, arg.BlockHash) + return err +} + +const updateEventBasedDecryptedFlags = `-- name: UpdateEventBasedDecryptedFlags :exec +UPDATE event_trigger_registered_event +SET decrypted = TRUE +WHERE (eon, identity) IN ( + SELECT UNNEST($1::bigint[]), UNNEST($2::bytea[]) +) +` + +type UpdateEventBasedDecryptedFlagsParams struct { + Eons []int64 + Identities [][]byte +} + +func (q *Queries) UpdateEventBasedDecryptedFlags(ctx context.Context, arg UpdateEventBasedDecryptedFlagsParams) error { + _, err := q.db.Exec(ctx, updateEventBasedDecryptedFlags, arg.Eons, arg.Identities) + return err +} + +const updateTimeBasedDecryptedFlags = `-- name: UpdateTimeBasedDecryptedFlags :exec UPDATE identity_registered_event SET decrypted = TRUE WHERE (eon, identity) IN ( @@ -237,12 +490,12 @@ WHERE (eon, identity) IN ( ) ` -type UpdateDecryptedFlagParams struct { - Column1 []int64 - Column2 [][]byte +type UpdateTimeBasedDecryptedFlagsParams struct { + Eons []int64 + Identities [][]byte } -func (q *Queries) UpdateDecryptedFlag(ctx context.Context, arg UpdateDecryptedFlagParams) error { - _, err := q.db.Exec(ctx, updateDecryptedFlag, arg.Column1, arg.Column2) +func (q *Queries) UpdateTimeBasedDecryptedFlags(ctx context.Context, arg UpdateTimeBasedDecryptedFlagsParams) error { + _, err := q.db.Exec(ctx, updateTimeBasedDecryptedFlags, arg.Eons, arg.Identities) return err } diff --git a/rolling-shutter/keyperimpl/shutterservice/database/sql/migrations/V2_fix_fired_triggers_foreign_key.sql b/rolling-shutter/keyperimpl/shutterservice/database/sql/migrations/V2_fix_fired_triggers_foreign_key.sql new file mode 100644 index 00000000..e69de29b diff --git a/rolling-shutter/keyperimpl/shutterservice/database/sql/queries/shutterservice.sql b/rolling-shutter/keyperimpl/shutterservice/database/sql/queries/shutterservice.sql index 4218b4de..3bbc5725 100644 --- a/rolling-shutter/keyperimpl/shutterservice/database/sql/queries/shutterservice.sql +++ b/rolling-shutter/keyperimpl/shutterservice/database/sql/queries/shutterservice.sql @@ -27,11 +27,42 @@ WHERE eon = $1 AND identities_hash = $2 ORDER BY keyper_index ASC LIMIT $3; --- name: UpdateDecryptedFlag :exec +-- name: InsertEventTriggerRegisteredEvent :execresult +INSERT INTO event_trigger_registered_event ( + block_number, + block_hash, + tx_index, + log_index, + eon, + identity_prefix, + sender, + definition, + ttl, + identity +) +VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) +ON CONFLICT (eon, identity_prefix, sender) DO UPDATE SET +block_number = $1, +block_hash = $2, +tx_index = $3, +log_index = $4, +definition = $8, +ttl = $9, +identity = $10; + + +-- name: UpdateTimeBasedDecryptedFlags :exec UPDATE identity_registered_event SET decrypted = TRUE WHERE (eon, identity) IN ( - SELECT UNNEST($1::bigint[]), UNNEST($2::bytea[]) + SELECT UNNEST(@eons::bigint[]), UNNEST(@identities::bytea[]) +); + +-- name: UpdateEventBasedDecryptedFlags :exec +UPDATE event_trigger_registered_event +SET decrypted = TRUE +WHERE (eon, identity) IN ( + SELECT UNNEST(@eons::bigint[]), UNNEST(@identities::bytea[]) ); -- name: InsertIdentityRegisteredEvent :execresult @@ -63,3 +94,53 @@ SET block_hash = $1, block_number = $2; -- name: DeleteIdentityRegisteredEventsFromBlockNumber :exec DELETE FROM identity_registered_event WHERE block_number >= $1; + +-- name: GetMultiEventSyncStatus :one +SELECT * FROM multi_event_sync_status LIMIT 1; + +-- name: SetMultiEventSyncStatus :exec +INSERT INTO multi_event_sync_status (block_number, block_hash) VALUES ($1, $2) +ON CONFLICT (enforce_one_row) DO UPDATE +SET block_number = $1, block_hash = $2; + +-- name: DeleteEventTriggerRegisteredEventsFromBlockNumber :exec +DELETE FROM event_trigger_registered_event WHERE block_number >= $1; + +-- name: InsertFiredTrigger :exec +INSERT INTO fired_triggers (eon, identity_prefix, sender, block_number, block_hash, tx_index, log_index) +VALUES ($1, $2, $3, $4, $5, $6, $7) +ON CONFLICT (eon, identity_prefix, sender) DO NOTHING; + +-- name: DeleteFiredTriggersFromBlockNumber :exec +DELETE FROM fired_triggers WHERE block_number >= $1; + +-- name: GetActiveEventTriggerRegisteredEvents :many +SELECT * FROM event_trigger_registered_event e +WHERE e.block_number + ttl >= @block_number -- TTL not expired at given block +AND e.decrypted = false -- not decrypted yet +AND NOT EXISTS ( -- not fired yet + SELECT 1 FROM fired_triggers t + WHERE t.identity_prefix = e.identity_prefix + AND t.sender = e.sender +); + +-- name: GetUndecryptedFiredTriggers :many +SELECT + f.identity_prefix, + f.sender, + f.block_number, + f.block_hash, + f.tx_index, + f.log_index, + e.eon AS eon, + e.ttl AS ttl, + e.identity AS identity, + e.decrypted AS decrypted +FROM fired_triggers f +INNER JOIN event_trigger_registered_event e ON f.identity_prefix = e.identity_prefix AND f.sender = e.sender +WHERE NOT EXISTS ( -- not decrypted yet + SELECT 1 FROM event_trigger_registered_event e + WHERE e.identity_prefix = f.identity_prefix + AND e.sender = f.sender + AND e.decrypted = true +); diff --git a/rolling-shutter/keyperimpl/shutterservice/database/sql/schemas/shutterservice.sql b/rolling-shutter/keyperimpl/shutterservice/database/sql/schemas/shutterservice.sql index 8ab32d3c..53aac322 100644 --- a/rolling-shutter/keyperimpl/shutterservice/database/sql/schemas/shutterservice.sql +++ b/rolling-shutter/keyperimpl/shutterservice/database/sql/schemas/shutterservice.sql @@ -1,7 +1,22 @@ --- schema-version: shutterservice-1 -- +-- schema-version: shutterservice-2 -- -- Please change the version above if you make incompatible changes to -- the schema. We'll use this to check we're using the right schema. +CREATE TABLE event_trigger_registered_event ( + block_number bigint NOT NULL CHECK (block_number >= 0), + block_hash bytea NOT NULL, + tx_index bigint NOT NULL CHECK (tx_index >= 0), + log_index bigint NOT NULL CHECK (log_index >= 0), + eon bigint NOT NULL CHECK (eon >= 0), + identity_prefix bytea NOT NULL, + sender text NOT NULL, + definition bytea NOT NULL, + ttl bigint NOT NULL CHECK (ttl >= 0), + decrypted boolean NOT NULL DEFAULT false, + identity bytea NOT NULL, + PRIMARY KEY (eon, identity_prefix, sender) +); + CREATE TABLE identity_registered_event ( block_number bigint NOT NULL CHECK (block_number >= 0), block_hash bytea NOT NULL, @@ -35,4 +50,22 @@ CREATE TABLE decryption_signatures( identities_hash bytea NOT NULL, signature bytea NOT NULL, PRIMARY KEY (eon, keyper_index, identities_hash) +); + +CREATE TABLE multi_event_sync_status ( + enforce_one_row bool PRIMARY KEY DEFAULT true, + block_number bigint NOT NULL CHECK (block_number >= 0), + block_hash bytea NOT NULL +); + +CREATE TABLE fired_triggers ( + eon bigint NOT NULL, + identity_prefix bytea NOT NULL, + sender text NOT NULL, + block_number bigint NOT NULL CHECK (block_number >= 0), + block_hash bytea NOT NULL, + tx_index bigint NOT NULL CHECK (tx_index >= 0), + log_index bigint NOT NULL CHECK (log_index >= 0), + PRIMARY KEY (eon, identity_prefix, sender), + FOREIGN KEY (eon, identity_prefix, sender) REFERENCES event_trigger_registered_event (eon, identity_prefix, sender) ON DELETE CASCADE ); \ No newline at end of file diff --git a/rolling-shutter/keyperimpl/shutterservice/eventprocessor.go b/rolling-shutter/keyperimpl/shutterservice/eventprocessor.go new file mode 100644 index 00000000..9ee8225a --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/eventprocessor.go @@ -0,0 +1,22 @@ +package shutterservice + +import ( + "context" + + "github.com/jackc/pgx/v4" +) + +// EventProcessor defines the interface that event processors for MultiEventSyncer must implement. +type EventProcessor interface { + // GetProcessorName returns a unique name for this processor. + GetProcessorName() string + // FetchEvents retrieves events in the given block range (inclusive). + FetchEvents(ctx context.Context, start, end uint64) ([]Event, error) + // ProcessEvents processes the fetched events and stores them in the database. + ProcessEvents(ctx context.Context, tx pgx.Tx, events []Event) error + // RollbackEvents removes events with block numbers greater than the specified block number. + RollbackEvents(ctx context.Context, tx pgx.Tx, toBlock int64) error +} + +// Event represents a generic blockchain event that can be processed. +type Event interface{} diff --git a/rolling-shutter/keyperimpl/shutterservice/eventtrigger.go b/rolling-shutter/keyperimpl/shutterservice/eventtrigger.go new file mode 100644 index 00000000..1cb28c2a --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/eventtrigger.go @@ -0,0 +1,460 @@ +package shutterservice + +import ( + "bytes" + "fmt" + "io" + "math" + "math/big" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + Word = 32 + Version = 0x1 +) + +// EventTriggerDefinition specifies an event-based trigger. +type EventTriggerDefinition struct { + Contract common.Address + LogPredicates []LogPredicate +} + +// LogPredicate defines a condition on the events emitted by a contract that must be satisfied for a +// corresponding event trigger to fire. +type LogPredicate struct { + LogValueRef LogValueRef + ValuePredicate ValuePredicate +} + +// LogValueRef references a value contained in an event log. +// - If 0 <= Offset < 4, it refers to the topic of the log at index Offset. In this case, Length +// must be 1. +// - If Offset >= 4, it refers to a slice of 32-byte words from the log's data. Its start index is +// Offset - 4 and the length is Length. E.g., for offset 5 and length 2, the slice starts at +// byte 32, ends at byte 96 (exclusive), and is 64 bytes long. +type LogValueRef struct { + Offset uint64 + Length uint64 +} + +// ValuePredicate defines a condition on a value contained in an event log that must be satisfied +// for a corresponding event trigger to fire. It consists of an operation and a set of arguments, +// e.g. `<` and `100` for a predicate that checks if a value is less than 100. The type and number +// of arguments that are required depend on the operation. +type ValuePredicate struct { + Op Op + IntArgs []*big.Int + ByteArgs [][]byte +} + +// Op enumerates the operation to be performed when evaluating a constraint. +type Op uint64 + +func (d *EventTriggerDefinition) UnmarshalBytes(data []byte) error { + if len(data) == 0 { + return fmt.Errorf("data is empty") + } + version := data[0] + if version != Version { + return fmt.Errorf("unsupported version %d, expected %d", version, Version) + } + + if err := rlp.DecodeBytes(data[1:], d); err != nil { + return fmt.Errorf("failed to decode EventTriggerDefinitionRLP: %w", err) + } + if err := d.Validate(); err != nil { + return fmt.Errorf("invalid EventTriggerDefinitionRLP: %w", err) + } + return nil +} + +func (d *EventTriggerDefinition) MarshalBytes() ([]byte, error) { + var buf bytes.Buffer + buf.WriteByte(Version) + if err := rlp.Encode(&buf, d); err != nil { + return nil, fmt.Errorf("failed to encode EventTriggerDefinitionRLP: %w", err) + } + return buf.Bytes(), nil +} + +// Validate checks if the event trigger definition is valid. +// +// A trigger definition is valid if +// - all log predicates are valid and +// - there are no two log BytesEq predicates for the same topic +func (d *EventTriggerDefinition) Validate() error { + for i, lp := range d.LogPredicates { + if err := lp.Validate(); err != nil { + return fmt.Errorf("invalid log predicate at index %d: %w", i, err) + } + } + + topicMap := make(map[uint64]struct{}) + for i, lp := range d.LogPredicates { + if !lp.LogValueRef.IsTopic() || lp.ValuePredicate.Op != BytesEq { + continue + } + if _, exists := topicMap[lp.LogValueRef.Offset]; exists { + return fmt.Errorf("duplicate BytesEq log predicate for topic %d at index %d", lp.LogValueRef.Offset, i) + } + topicMap[lp.LogValueRef.Offset] = struct{}{} + } + + return nil +} + +// ToFilterQuery creates an Ethereum filter query based on the event trigger definition. +// +// The returned filter includes: +// - Contract address filtering: Only events from the specified contract are matched +// - Topic filtering: BytesEq operations on topics are converted to topic filters +// +// Any other operation is not included in the filter and must be checked by the caller. +// +// The method returns an error if +// - there are multiple BytesEq log predicates for the same topic +// - the argument for a topic BytesEq log predicate is not a 32-byte value +// +// These errors do not occur if Validate passes. +func (d *EventTriggerDefinition) ToFilterQuery() (ethereum.FilterQuery, error) { + topics := [][]common.Hash{} + for _, logPredicate := range d.LogPredicates { + if !logPredicate.LogValueRef.IsTopic() { + continue + } + if logPredicate.ValuePredicate.Op != BytesEq { + continue + } + + topicIndex := logPredicate.LogValueRef.Offset + for uint64(len(topics)) <= topicIndex { + topics = append(topics, []common.Hash{}) + } + if len(topics[topicIndex]) != 0 { + return ethereum.FilterQuery{}, fmt.Errorf("multiple log predicates for topic %d", topicIndex) + } + topic := logPredicate.ValuePredicate.ByteArgs[0] + if len(topic) != Word { + return ethereum.FilterQuery{}, fmt.Errorf("log predicate for topic %d must have a 32-byte value, got %d bytes", topicIndex, len(topic)) + } + topics[logPredicate.LogValueRef.Offset] = []common.Hash{common.BytesToHash(topic)} + } + return ethereum.FilterQuery{ + BlockHash: nil, + FromBlock: nil, + ToBlock: nil, + Addresses: []common.Address{d.Contract}, + Topics: topics, + }, nil +} + +// Match checks if the log matches the event trigger definition by checking all log predicates. +// +// This may panic if Validate does not pass. +func (d *EventTriggerDefinition) Match(log *types.Log) (bool, error) { + if log.Address != d.Contract { + return false, nil + } + for _, logPredicate := range d.LogPredicates { + match, err := logPredicate.Match(log) + if err != nil { + return false, err + } + if !match { + return false, nil + } + } + return true, nil +} + +func (p *LogPredicate) Validate() error { + if err := p.LogValueRef.Validate(); err != nil { + return err + } + if err := p.ValuePredicate.Validate(p.LogValueRef.Length); err != nil { + return err + } + return nil +} + +func (p *LogPredicate) Match(log *types.Log) (bool, error) { + value := p.LogValueRef.GetValue(log) + return p.ValuePredicate.Match(value) +} + +func (r *LogValueRef) Validate() error { + if r.Length == 0 { + return fmt.Errorf("log value reference length must be positive, got %d", r.Length) + } + if r.Offset < 4 && r.Length != 1 { + return fmt.Errorf("log value reference offset < 4 requires length to be 1, got %d", r.Length) + } + // Check that the offset and length are within reasonable bounds so that we can convert them + // to bytes and bits without worrying. + if r.Offset > math.MaxUint32 { + return fmt.Errorf("log value reference offset must be less than 2^32, got %d", r.Offset) + } + if r.Length > math.MaxUint32 { + return fmt.Errorf("log value reference length must be less than 2^32, got %d", r.Length) + } + return nil +} + +func (r *LogValueRef) EncodeRLP(w io.Writer) error { + buf := rlp.NewEncoderBuffer(w) + if r.Length == 1 { + buf.WriteUint64(r.Offset) + } else { + listIndex := buf.List() + buf.WriteUint64(r.Offset) + buf.WriteUint64(r.Length) + buf.ListEnd(listIndex) + } + return buf.Flush() +} + +func (r *LogValueRef) DecodeRLP(s *rlp.Stream) error { + var offset, length uint64 + kind, _, err := s.Kind() + if err != nil { + return fmt.Errorf("failed to decode LogValueRef: %w", err) + } + switch kind { + case rlp.Byte, rlp.String: + offset, err = s.Uint64() + if err != nil { + return fmt.Errorf("failed to read offset from LogValueRef: %w", err) + } + length = 1 + case rlp.List: + _, err = s.List() + if err != nil { + return fmt.Errorf("failed to read LogValueRef list: %w", err) + } + offset, err = s.Uint64() + if err != nil { + return fmt.Errorf("failed to read offset from LogValueRef: %w", err) + } + length, err = s.Uint64() + if err != nil { + return fmt.Errorf("failed to read length from LogValueRef: %w", err) + } + err = s.ListEnd() + if err != nil { + return fmt.Errorf("failed to decode LogValueRef: %w", err) + } + default: + panic(fmt.Sprintf("unexpected kind %d for LogValueRef", kind)) + } + r.Offset = offset + r.Length = length + if err := r.Validate(); err != nil { + return fmt.Errorf("invalid LogValueRef: %w", err) + } + return nil +} + +func (r *LogValueRef) IsTopic() bool { + return r.Offset < 4 +} + +// GetValue retrieves the value from the log based on the LogValueRef. +// +// In case a slice of log data is referenced and the slice exceeds the log's data length, the +// result will be zero-padded on the right to the expected length. +func (r *LogValueRef) GetValue(log *types.Log) []byte { + if r.IsTopic() { + if uint64(len(log.Topics)) <= r.Offset { + return nil + } + return log.Topics[r.Offset].Bytes() + } + + dataOffset := r.Offset - 4 + value := make([]byte, r.Length*Word) + + startByte := dataOffset * Word + endByte := (dataOffset + r.Length) * Word + + if startByte < uint64(len(log.Data)) { + availableEnd := uint64(len(log.Data)) + if endByte < availableEnd { + availableEnd = endByte + } + copy(value, log.Data[startByte:availableEnd]) + } + + return value +} + +const ( + UintLt Op = iota + UintLte + UintEq + UintGt + UintGte + BytesEq +) + +func (op Op) Validate() error { + switch op { + case UintLt, UintLte, UintEq, UintGt, UintGte, BytesEq: + return nil + default: + return fmt.Errorf("invalid operation: %d", op) + } +} + +func (op Op) NumIntArgs() int { + switch op { + case UintLt, UintLte, UintEq, UintGt, UintGte: + return 1 + case BytesEq: + return 0 + default: + return 0 + } +} + +func (op Op) NumByteArgs() int { + switch op { + case UintLt, UintLte, UintEq, UintGt, UintGte: + return 0 + case BytesEq: + return 1 + default: + return 0 + } +} + +func (p *ValuePredicate) EncodeRLP(w io.Writer) error { + var elements []interface{} + elements = append(elements, uint64(p.Op)) + for _, intArg := range p.IntArgs { + elements = append(elements, intArg) + } + for _, byteArg := range p.ByteArgs { + elements = append(elements, byteArg) + } + return rlp.Encode(w, elements) +} + +func (p *ValuePredicate) DecodeRLP(s *rlp.Stream) error { + _, err := s.List() + if err != nil { + return fmt.Errorf("failed to decode ValuePredicate: %w", err) + } + opInt, err := s.Uint64() + if err != nil { + return fmt.Errorf("failed to read operation from ValuePredicate: %w", err) + } + op := Op(opInt) + if err := op.Validate(); err != nil { + return fmt.Errorf("invalid operation: %w", err) + } + + intArgs := []*big.Int{} + for i := 0; i < op.NumIntArgs(); i++ { + intArg, err := s.BigInt() + if err != nil { + return fmt.Errorf("failed to read integer argument %d: %w", i, err) + } + intArgs = append(intArgs, intArg) + } + + byteArgs := [][]byte{} + for i := 0; i < op.NumByteArgs(); i++ { + byteArg, err := s.Bytes() + if err != nil { + return fmt.Errorf("failed to read byte argument %d: %w", i, err) + } + byteArgs = append(byteArgs, byteArg) + } + + err = s.ListEnd() + if err != nil { + return fmt.Errorf("failed to decode ValuePredicate: %w", err) + } + + p.Op = op + p.IntArgs = intArgs + p.ByteArgs = byteArgs + + return nil +} + +func (p *ValuePredicate) Validate(numWords uint64) error { + if err := p.Op.Validate(); err != nil { + return err + } + if err := p.validateArgNums(); err != nil { + return err + } + if err := p.validateArgValues(numWords); err != nil { + return err + } + return nil +} + +func (p *ValuePredicate) validateArgNums() error { + requiredIntArgs := p.Op.NumIntArgs() + requiredByteArgs := p.Op.NumByteArgs() + + if len(p.IntArgs) != requiredIntArgs { + return fmt.Errorf("operation %d requires exactly %d integer argument(s), got %d", p.Op, requiredIntArgs, len(p.IntArgs)) + } + if len(p.ByteArgs) != requiredByteArgs { + return fmt.Errorf("operation %d requires exactly %d bytes argument(s), got %d", p.Op, requiredByteArgs, len(p.ByteArgs)) + } + return nil +} + +func (p *ValuePredicate) validateArgValues(numWords uint64) error { + for i, arg := range p.IntArgs { + if arg == nil { + return fmt.Errorf("integer argument %d cannot be nil for operation %d", i, p.Op) + } + if arg.Sign() < 0 { + return fmt.Errorf("integer argument %d cannot be negative for operation %d", i, p.Op) + } + if uint64(arg.BitLen()) > numWords*Word*8 { + return fmt.Errorf( + "bit length of integer argument %d cannot exceed value bit length %d for operation %d, got %d bits", + i, numWords*Word*8, p.Op, arg.BitLen(), + ) + } + } + for i, arg := range p.ByteArgs { + if uint64(len(arg)) != numWords*Word { + return fmt.Errorf( + "size of byte argument %d must match size of value (%d bytes) for operation %d, got %d bytes", + i, numWords*Word, p.Op, len(arg), + ) + } + } + return nil +} + +func (p *ValuePredicate) Match(value []byte) (bool, error) { + n := new(big.Int).SetBytes(value) + switch p.Op { + case UintLt: + return n.Cmp(p.IntArgs[0]) < 0, nil + case UintLte: + return n.Cmp(p.IntArgs[0]) <= 0, nil + case UintEq: + return n.Cmp(p.IntArgs[0]) == 0, nil + case UintGt: + return n.Cmp(p.IntArgs[0]) > 0, nil + case UintGte: + return n.Cmp(p.IntArgs[0]) >= 0, nil + case BytesEq: + return bytes.Equal(value, p.ByteArgs[0]), nil + } + return false, fmt.Errorf("unknown operation %d", p.Op) +} diff --git a/rolling-shutter/keyperimpl/shutterservice/eventtrigger_test.go b/rolling-shutter/keyperimpl/shutterservice/eventtrigger_test.go new file mode 100644 index 00000000..cc3f6dfd --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/eventtrigger_test.go @@ -0,0 +1,2387 @@ +package shutterservice + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" + "gotest.tools/assert" +) + +func TestOpValidate(t *testing.T) { + tests := []struct { + name string + op Op + wantErr bool + }{ + { + name: "UintLt is valid", + op: UintLt, + wantErr: false, + }, + { + name: "UintLte is valid", + op: UintLte, + wantErr: false, + }, + { + name: "UintEq is valid", + op: UintEq, + wantErr: false, + }, + { + name: "UintGt is valid", + op: UintGt, + wantErr: false, + }, + { + name: "UintGte is valid", + op: UintGte, + wantErr: false, + }, + { + name: "BytesEq is valid", + op: BytesEq, + wantErr: false, + }, + { + name: "invalid operation value 6", + op: Op(6), + wantErr: true, + }, + { + name: "invalid operation value 100", + op: Op(100), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.op.Validate() + if tt.wantErr { + assert.ErrorContains(t, err, "invalid operation:") + } else { + assert.NilError(t, err) + } + }) + } +} + +func TestLogValueRefValidate(t *testing.T) { + tests := []struct { + name string + ref LogValueRef + wantErr bool + errMsg string + }{ + { + name: "valid topic reference - offset 0", + ref: LogValueRef{ + Offset: 0, + Length: 1, + }, + wantErr: false, + }, + { + name: "valid topic reference - offset 3", + ref: LogValueRef{ + Offset: 3, + Length: 1, + }, + wantErr: false, + }, + { + name: "valid data reference - offset 4, length 1", + ref: LogValueRef{ + Offset: 4, + Length: 1, + }, + wantErr: false, + }, + { + name: "valid data reference - offset 5, length 2", + ref: LogValueRef{ + Offset: 5, + Length: 2, + }, + wantErr: false, + }, + { + name: "valid data reference - large offset and length", + ref: LogValueRef{ + Offset: 100, + Length: 10, + }, + wantErr: false, + }, + { + name: "invalid zero length", + ref: LogValueRef{ + Offset: 0, + Length: 0, + }, + wantErr: true, + errMsg: "log value reference length must be positive, got 0", + }, + { + name: "invalid topic reference with length > 1 - offset 0", + ref: LogValueRef{ + Offset: 0, + Length: 2, + }, + wantErr: true, + errMsg: "log value reference offset < 4 requires length to be 1, got 2", + }, + { + name: "invalid topic reference with length > 1 - offset 1", + ref: LogValueRef{ + Offset: 1, + Length: 3, + }, + wantErr: true, + errMsg: "log value reference offset < 4 requires length to be 1, got 3", + }, + { + name: "invalid topic reference with length > 1 - offset 3", + ref: LogValueRef{ + Offset: 3, + Length: 5, + }, + wantErr: true, + errMsg: "log value reference offset < 4 requires length to be 1, got 5", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.ref.Validate() + if tt.wantErr { + assert.Error(t, err, tt.errMsg) + } else { + assert.NilError(t, err) + } + }) + } +} + +func TestValuePredicateValidate(t *testing.T) { + tests := []struct { + name string + predicate ValuePredicate + numWords uint64 + wantErr bool + errMsg string + }{ + { + name: "valid UintLt predicate", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: false, + }, + { + name: "valid UintEq predicate", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: false, + }, + { + name: "valid BytesEq predicate", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, // 32 bytes for 1 word + }, + numWords: 1, + wantErr: false, + }, + { + name: "valid BytesEq predicate with 2 words", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 64)}, // 64 bytes for 2 words + }, + numWords: 2, + wantErr: false, + }, + { + name: "invalid operation", + predicate: ValuePredicate{ + Op: Op(999), + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "invalid operation: 999", + }, + { + name: "UintLt with wrong number of int args - too few", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "operation 0 requires exactly 1 integer argument(s), got 0", + }, + { + name: "UintLt with wrong number of int args - too many", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(1), big.NewInt(2)}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "operation 0 requires exactly 1 integer argument(s), got 2", + }, + { + name: "BytesEq with wrong number of byte args - too few", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "operation 5 requires exactly 1 bytes argument(s), got 0", + }, + { + name: "BytesEq with wrong number of byte args - too many", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32), make([]byte, 32)}, + }, + numWords: 1, + wantErr: true, + errMsg: "operation 5 requires exactly 1 bytes argument(s), got 2", + }, + { + name: "UintLt with nil integer argument", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{nil}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "integer argument 0 cannot be nil for operation 0", + }, + { + name: "UintLt with negative integer argument", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(-1)}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "integer argument 0 cannot be negative for operation 0", + }, + { + name: "UintLt with integer argument too large for 1 word", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{func() *big.Int { + // Create a number that requires more than 256 bits (1 word = 32 bytes = 256 bits) + val := big.NewInt(1) + val.Lsh(val, 257) // 2^257 + return val + }()}, + ByteArgs: [][]byte{}, + }, + numWords: 1, + wantErr: true, + errMsg: "bit length of integer argument 0 cannot exceed value bit length 256 for operation 0, got 258 bits", + }, + { + name: "UintLt with integer argument fitting exactly in 2 words", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{func() *big.Int { + // Create a number that requires exactly 512 bits (2 words = 64 bytes = 512 bits) + val := big.NewInt(1) + val.Lsh(val, 511) // 2^511 + return val + }()}, + ByteArgs: [][]byte{}, + }, + numWords: 2, + wantErr: false, + }, + { + name: "BytesEq with wrong byte argument size - too small", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 16)}, // 16 bytes instead of 32 + }, + numWords: 1, + wantErr: true, + errMsg: "size of byte argument 0 must match size of value (32 bytes) for operation 5, got 16 bytes", + }, + { + name: "BytesEq with wrong byte argument size - too large", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 48)}, // 48 bytes instead of 32 + }, + numWords: 1, + wantErr: true, + errMsg: "size of byte argument 0 must match size of value (32 bytes) for operation 5, got 48 bytes", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.predicate.Validate(tt.numWords) + if tt.wantErr { + assert.ErrorContains(t, err, tt.errMsg) + } else { + assert.NilError(t, err) + } + }) + } +} + +func TestValuePredicateMatch(t *testing.T) { + tests := []struct { + name string + predicate ValuePredicate + value []byte + want bool + }{ + // UintLt tests + { + name: "UintLt - value less than argument", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(50).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintLt - value equal to argument", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) + return val + }(), + want: false, + }, + { + name: "UintLt - value greater than argument", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(150).Bytes()) + return val + }(), + want: false, + }, + { + name: "UintLt - zero value", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(1)}, + }, + value: make([]byte, 32), // 32 bytes of zeros + want: true, + }, + // UintLte tests + { + name: "UintLte - value less than argument", + predicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(50).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintLte - value equal to argument", + predicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintLte - value greater than argument", + predicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(150).Bytes()) + return val + }(), + want: false, + }, + // UintEq tests + { + name: "UintEq - values equal", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(42).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintEq - values not equal", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(43).Bytes()) + return val + }(), + want: false, + }, + { + name: "UintEq - zero values", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(0)}, + }, + value: make([]byte, 32), // 32 bytes of zeros + want: true, + }, + // UintGt tests + { + name: "UintGt - value greater than argument", + predicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(150).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintGt - value equal to argument", + predicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) + return val + }(), + want: false, + }, + { + name: "UintGt - value less than argument", + predicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(50).Bytes()) + return val + }(), + want: false, + }, + // UintGte tests + { + name: "UintGte - value greater than argument", + predicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(150).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintGte - value equal to argument", + predicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) + return val + }(), + want: true, + }, + { + name: "UintGte - value less than argument", + predicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(100)}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val[31:], big.NewInt(50).Bytes()) + return val + }(), + want: false, + }, + // BytesEq tests + { + name: "BytesEq - equal bytes", + predicate: ValuePredicate{ + Op: BytesEq, + ByteArgs: [][]byte{func() []byte { + val := make([]byte, 32) + copy(val, "hello") + return val + }()}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val, "hello") + return val + }(), + want: true, + }, + { + name: "BytesEq - different bytes", + predicate: ValuePredicate{ + Op: BytesEq, + ByteArgs: [][]byte{func() []byte { + val := make([]byte, 32) + copy(val, "hello") + return val + }()}, + }, + value: func() []byte { + val := make([]byte, 32) + copy(val, "world") + return val + }(), + want: false, + }, + { + name: "BytesEq - empty bytes", + predicate: ValuePredicate{ + Op: BytesEq, + ByteArgs: [][]byte{make([]byte, 32)}, // 32 bytes of zeros + }, + value: make([]byte, 32), // 32 bytes of zeros + want: true, + }, + { + name: "BytesEq - 32-byte values (typical for Ethereum)", + predicate: ValuePredicate{ + Op: BytesEq, + ByteArgs: [][]byte{make([]byte, 32)}, // all zeros + }, + value: make([]byte, 32), // all zeros + want: true, + }, + { + name: "BytesEq - 64-byte values (2 words)", + predicate: ValuePredicate{ + Op: BytesEq, + ByteArgs: [][]byte{func() []byte { + val := make([]byte, 64) + copy(val[:5], "hello") + copy(val[32:37], "world") + return val + }()}, + }, + value: func() []byte { + val := make([]byte, 64) + copy(val[:5], "hello") + copy(val[32:37], "world") + return val + }(), + want: true, + }, + // Large number tests + { + name: "UintLt - large numbers", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{func() *big.Int { + val := big.NewInt(1) + val.Lsh(val, 200) // 2^200 + return val + }()}, + }, + value: func() []byte { + val := big.NewInt(1) + val.Lsh(val, 199) // 2^199 + bytes := val.Bytes() + // Pad to 32 bytes + result := make([]byte, 32) + copy(result[32-len(bytes):], bytes) + return result + }(), + want: true, + }, + { + name: "UintGt - large numbers", + predicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{func() *big.Int { + val := big.NewInt(1) + val.Lsh(val, 200) // 2^200 + return val + }()}, + }, + value: func() []byte { + val := big.NewInt(1) + val.Lsh(val, 201) // 2^201 + bytes := val.Bytes() + // Pad to 32 bytes + result := make([]byte, 32) + copy(result[32-len(bytes):], bytes) + return result + }(), + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := tt.predicate.Match(tt.value) + assert.NilError(t, err, "Match should not return an error") + assert.Equal(t, tt.want, result) + }) + } +} + +func TestValuePredicateEncodeDecode(t *testing.T) { + tests := []struct { + name string + predicate ValuePredicate + }{ + { + name: "UintLt predicate", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "UintLte predicate", + predicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{big.NewInt(500)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "UintEq predicate", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "UintGt predicate", + predicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(200)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "UintGte predicate", + predicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(1000)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "BytesEq predicate with 32 bytes", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes(), + }, + }, + }, + { + name: "BytesEq predicate with different 32 bytes", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{ + common.HexToHash("0xabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdef").Bytes(), + }, + }, + }, + { + name: "UintEq predicate with zero value", + predicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(0)}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "UintLt predicate with large number", + predicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{func() *big.Int { + val := big.NewInt(1) + val.Lsh(val, 200) // 2^200 + return val + }()}, + ByteArgs: [][]byte{}, + }, + }, + { + name: "BytesEq predicate with all zeros", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, // All zeros + }, + }, + { + name: "BytesEq predicate with all 0xFF", + predicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{func() []byte { + data := make([]byte, 32) + for i := range data { + data[i] = 0xFF + } + return data + }()}, + }, + }, + { + name: "UintGte predicate with maximum uint64 value", + predicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(int64(^uint64(0) >> 1))}, // Max int64 value + ByteArgs: [][]byte{}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Test encoding + var buf bytes.Buffer + err := tt.predicate.EncodeRLP(&buf) + assert.NilError(t, err, "EncodeRLP should not fail") + + encodedData := buf.Bytes() + assert.Assert(t, len(encodedData) > 0, "Encoded data should not be empty") + + // Test decoding + var decoded ValuePredicate + err = rlp.DecodeBytes(encodedData, &decoded) + assert.NilError(t, err, "DecodeRLP should not fail") + + // Compare the original and decoded predicates + assert.Equal(t, tt.predicate.Op, decoded.Op, "Operation should match") + assert.Equal(t, len(tt.predicate.IntArgs), len(decoded.IntArgs), "Number of int args should match") + assert.Equal(t, len(tt.predicate.ByteArgs), len(decoded.ByteArgs), "Number of byte args should match") + + // Compare integer arguments + for i, originalIntArg := range tt.predicate.IntArgs { + decodedIntArg := decoded.IntArgs[i] + assert.Equal(t, originalIntArg.Cmp(decodedIntArg), 0, "Integer argument %d should match", i) + } + + // Compare byte arguments + for i, originalByteArg := range tt.predicate.ByteArgs { + decodedByteArg := decoded.ByteArgs[i] + assert.DeepEqual(t, originalByteArg, decodedByteArg) + } + + // Test round-trip: encode the decoded predicate and compare with original encoding + var buf2 bytes.Buffer + err = decoded.EncodeRLP(&buf2) + assert.NilError(t, err, "Second encoding should not fail") + assert.DeepEqual(t, encodedData, buf2.Bytes()) + }) + } +} + +func TestValuePredicateDecodeErrors(t *testing.T) { + tests := []struct { + name string + encodedData []byte + expectedErr string + }{ + { + name: "empty data", + encodedData: []byte{}, + expectedErr: "failed to decode ValuePredicate", + }, + { + name: "invalid RLP data", + encodedData: []byte{0xFF, 0xFF, 0xFF}, + expectedErr: "failed to decode ValuePredicate", + }, + { + name: "invalid operation value", + encodedData: func() []byte { + // Manually encode an invalid operation + var buf bytes.Buffer + elements := []interface{}{uint64(999)} // Invalid operation + err := rlp.Encode(&buf, elements) + assert.NilError(t, err, "Encoding should not fail") + return buf.Bytes() + }(), + expectedErr: "invalid operation", + }, + { + name: "missing integer argument for UintLt", + encodedData: func() []byte { + // Encode UintLt operation but without the required integer argument + var buf bytes.Buffer + elements := []interface{}{uint64(UintLt)} // Missing the integer argument + err := rlp.Encode(&buf, elements) + assert.NilError(t, err, "Encoding should not fail") + return buf.Bytes() + }(), + expectedErr: "failed to read integer argument", + }, + { + name: "missing byte argument for BytesEq", + encodedData: func() []byte { + // Encode BytesEq operation but without the required byte argument + var buf bytes.Buffer + elements := []interface{}{uint64(BytesEq)} // Missing the byte argument + err := rlp.Encode(&buf, elements) + assert.NilError(t, err, "Encoding should not fail") + return buf.Bytes() + }(), + expectedErr: "failed to read byte argument", + }, + { + name: "too many elements for UintEq operation", + encodedData: func() []byte { + // Encode UintEq with two integer arguments instead of one + var buf bytes.Buffer + elements := []interface{}{ + uint64(UintEq), // Operation + big.NewInt(42), // First integer argument (valid) + big.NewInt(100), // Second integer argument (invalid - UintEq only needs one) + } + err := rlp.Encode(&buf, elements) + assert.NilError(t, err, "Encoding should not fail") + return buf.Bytes() + }(), + expectedErr: "failed to decode ValuePredicate", + }, + { + name: "too many elements for BytesEq operation", + encodedData: func() []byte { + // Encode BytesEq with two byte arguments instead of one + var buf bytes.Buffer + elements := []interface{}{ + uint64(BytesEq), // Operation + make([]byte, 32), // First byte argument (valid) + make([]byte, 32), // Second byte argument (invalid - BytesEq only needs one) + } + err := rlp.Encode(&buf, elements) + assert.NilError(t, err, "Encoding should not fail") + return buf.Bytes() + }(), + expectedErr: "failed to decode ValuePredicate", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var decoded ValuePredicate + err := rlp.DecodeBytes(tt.encodedData, &decoded) + assert.Assert(t, err != nil, "DecodeRLP should fail for invalid data") + assert.ErrorContains(t, err, tt.expectedErr) + }) + } +} + +func TestLogValueRefGetValue(t *testing.T) { + tests := []struct { + name string + ref LogValueRef + log *types.Log + want []byte + }{ + // Topic tests + { + name: "get topic 0", + ref: LogValueRef{ + Offset: 0, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), + }, + Data: make([]byte, 64), + }, + want: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef").Bytes(), + }, + { + name: "get topic 1", + ref: LogValueRef{ + Offset: 1, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321"), + }, + Data: make([]byte, 64), + }, + want: common.HexToHash("0xfedcba0987654321fedcba0987654321fedcba0987654321fedcba0987654321").Bytes(), + }, + { + name: "get topic 3 (last valid topic)", + ref: LogValueRef{ + Offset: 3, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), + common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333"), + common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444"), + }, + Data: make([]byte, 64), + }, + want: common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444").Bytes(), + }, + { + name: "get topic that doesn't exist - returns nil", + ref: LogValueRef{ + Offset: 2, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: make([]byte, 64), + }, + want: nil, + }, + // Data tests + { + name: "get first data word (offset 4, length 1)", + ref: LogValueRef{ + Offset: 4, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: func() []byte { + data := make([]byte, 64) + // First word: 0x1111...1111 + for i := 0; i < 32; i++ { + data[i] = 0x11 + } + // Second word: 0x2222...2222 + for i := 32; i < 64; i++ { + data[i] = 0x22 + } + return data + }(), + }, + want: func() []byte { + result := make([]byte, 32) + for i := 0; i < 32; i++ { + result[i] = 0x11 + } + return result + }(), + }, + { + name: "get second data word (offset 5, length 1)", + ref: LogValueRef{ + Offset: 5, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: func() []byte { + data := make([]byte, 64) + // First word: 0x1111...1111 + for i := 0; i < 32; i++ { + data[i] = 0x11 + } + // Second word: 0x2222...2222 + for i := 32; i < 64; i++ { + data[i] = 0x22 + } + return data + }(), + }, + want: func() []byte { + result := make([]byte, 32) + for i := 0; i < 32; i++ { + result[i] = 0x22 + } + return result + }(), + }, + { + name: "get two data words (offset 4, length 2)", + ref: LogValueRef{ + Offset: 4, + Length: 2, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: func() []byte { + data := make([]byte, 64) + // First word: 0x1111...1111 + for i := 0; i < 32; i++ { + data[i] = 0x11 + } + // Second word: 0x2222...2222 + for i := 32; i < 64; i++ { + data[i] = 0x22 + } + return data + }(), + }, + want: func() []byte { + result := make([]byte, 64) + for i := 0; i < 32; i++ { + result[i] = 0x11 + } + for i := 32; i < 64; i++ { + result[i] = 0x22 + } + return result + }(), + }, + { + name: "get data beyond log length - zero padded", + ref: LogValueRef{ + Offset: 6, // Third word, but log only has 2 words + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: func() []byte { + data := make([]byte, 64) // Only 2 words + for i := 0; i < 64; i++ { + data[i] = 0xff + } + return data + }(), + }, + want: make([]byte, 32), // Should return 32 zero bytes + }, + { + name: "get partial data beyond log length - partial zero padding", + ref: LogValueRef{ + Offset: 5, // Second word, but we want 2 words and log only has 2 words total + Length: 2, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: func() []byte { + data := make([]byte, 64) // Only 2 words + // First word: 0x1111...1111 + for i := 0; i < 32; i++ { + data[i] = 0x11 + } + // Second word: 0x2222...2222 + for i := 32; i < 64; i++ { + data[i] = 0x22 + } + return data + }(), + }, + want: func() []byte { + result := make([]byte, 64) + // Second word from log data + for i := 0; i < 32; i++ { + result[i] = 0x22 + } + // Third word is zero-padded (bytes 32-63 remain zero) + return result + }(), + }, + { + name: "get data from empty log data", + ref: LogValueRef{ + Offset: 4, + Length: 1, + }, + log: &types.Log{ + Topics: []common.Hash{ + common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"), + }, + Data: []byte{}, // Empty data + }, + want: make([]byte, 32), // Should return 32 zero bytes + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.ref.GetValue(tt.log) + assert.DeepEqual(t, tt.want, result) + }) + } +} + +func TestEventTriggerDefinitionValidate(t *testing.T) { + tests := []struct { + name string + definition EventTriggerDefinition + wantErr bool + }{ + { + name: "valid definition with no predicates", + definition: EventTriggerDefinition{ + Contract: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LogPredicates: []LogPredicate{}, + }, + wantErr: false, + }, + { + name: "valid definition with single predicate", + definition: EventTriggerDefinition{ + Contract: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid definition with invalid log predicate", + definition: EventTriggerDefinition{ + Contract: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 0, // Invalid zero length + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "invalid definition with duplicate BytesEq predicates for same topic", + definition: EventTriggerDefinition{ + Contract: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 0, // Same topic as previous predicate + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "valid definition with multiple non-BytesEq predicates for same topic", + definition: EventTriggerDefinition{ + Contract: common.HexToAddress("0x1234567890123456789012345678901234567890"), + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintLt, // Not BytesEq, so multiple predicates allowed + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 0, // Same topic, but different operation + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGt, // Not BytesEq, so allowed + IntArgs: []*big.Int{big.NewInt(50)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.definition.Validate() + if tt.wantErr { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + } + }) + } +} + +func TestEventTriggerDefinitionToFilterQuery(t *testing.T) { + contractAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + + tests := []struct { + name string + definition EventTriggerDefinition + wantQuery ethereum.FilterQuery + wantErr bool + }{ + { + name: "definition with no predicates", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{}, + }, + wantQuery: ethereum.FilterQuery{ + Addresses: []common.Address{contractAddr}, + Topics: [][]common.Hash{}, + }, + wantErr: false, + }, + { + name: "definition with single topic predicate", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + wantQuery: ethereum.FilterQuery{ + Addresses: []common.Address{contractAddr}, + Topics: [][]common.Hash{{common.BytesToHash(make([]byte, 32))}}, + }, + wantErr: false, + }, + { + name: "definition with multiple topic predicates", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 2, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222").Bytes()}, + }, + }, + }, + }, + wantQuery: ethereum.FilterQuery{ + Addresses: []common.Address{contractAddr}, + Topics: [][]common.Hash{ + {common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111")}, + {}, + {common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222")}, + }, + }, + wantErr: false, + }, + { + name: "definition with data predicate (ignored in filter)", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + wantQuery: ethereum.FilterQuery{ + Addresses: []common.Address{contractAddr}, + Topics: [][]common.Hash{}, + }, + wantErr: false, + }, + { + name: "definition with non-BytesEq topic predicate (ignored in filter)", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 1, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(50)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + wantQuery: ethereum.FilterQuery{ + Addresses: []common.Address{contractAddr}, + Topics: [][]common.Hash{}, + }, + wantErr: false, + }, + { + name: "definition with duplicate BytesEq predicates for same topic", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 0, // Same topic + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "definition with invalid topic byte length", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 16)}, // Invalid: not 32 bytes + }, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + query, err := tt.definition.ToFilterQuery() + if tt.wantErr { + assert.Assert(t, err != nil) + } else { + assert.NilError(t, err) + assert.DeepEqual(t, tt.wantQuery.Addresses, query.Addresses) + assert.DeepEqual(t, tt.wantQuery.Topics, query.Topics) + // FromBlock, ToBlock, and BlockHash should be nil by default + assert.Assert(t, query.FromBlock == nil) + assert.Assert(t, query.ToBlock == nil) + assert.Assert(t, query.BlockHash == nil) + } + }) + } +} + +func TestEventTriggerDefinitionMatch(t *testing.T) { + contractAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + + tests := []struct { + name string + definition EventTriggerDefinition + log *types.Log + want bool + }{ + { + name: "definition with no predicates matches any log", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{}, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: make([]byte, 64), + }, + want: true, + }, + { + name: "log from different contract address should not match", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{}, + }, + log: &types.Log{ + Address: common.HexToAddress("0x9999999999999999999999999999999999999999"), // Different contract + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: make([]byte, 64), + }, + want: false, + }, + { + name: "single matching topic predicate", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: make([]byte, 64), + }, + want: true, + }, + { + name: "single non-matching topic predicate", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222"), + }, + Data: make([]byte, 64), + }, + want: false, + }, + { + name: "multiple matching predicates", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 1, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(50)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + func() common.Hash { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) + return common.BytesToHash(val) + }(), + }, + Data: make([]byte, 64), + }, + want: true, + }, + { + name: "multiple predicates with one not matching", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 1, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(200)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + func() common.Hash { + val := make([]byte, 32) + copy(val[31:], big.NewInt(100).Bytes()) // 100 is not > 200 + return common.BytesToHash(val) + }(), + }, + Data: make([]byte, 64), + }, + want: false, + }, + { + name: "data predicate matching", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(1000)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: func() []byte { + data := make([]byte, 64) + // First word: 500 (which is < 1000) + copy(data[30:32], big.NewInt(500).Bytes()) + return data + }(), + }, + want: true, + }, + { + name: "data predicate not matching", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: func() []byte { + data := make([]byte, 64) + // First word: 500 (which is not < 100) + copy(data[28:32], big.NewInt(500).Bytes()) + return data + }(), + }, + want: false, + }, + { + name: "topic reference that doesn't exist in log", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 2, // Topic index 2, but log only has 1 topic + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 32)}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: make([]byte, 64), + }, + want: false, + }, + { + name: "mixed topic and data predicates all matching", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 5, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + log: &types.Log{ + Address: contractAddr, + Topics: []common.Hash{ + common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), + }, + Data: func() []byte { + data := make([]byte, 96) // 3 words + // First word: 42 + copy(data[31:32], big.NewInt(42).Bytes()) + // Second word: 150 + copy(data[63:64], big.NewInt(150).Bytes()) + return data + }(), + }, + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := tt.definition.Match(tt.log) + assert.NilError(t, err) + assert.Equal(t, tt.want, result) + }) + } +} + +func TestEventTriggerDefinitionMarshalUnmarshal(t *testing.T) { + contractAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + + tests := []struct { + name string + definition EventTriggerDefinition + }{ + { + name: "empty definition with no predicates", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{}, + }, + }, + { + name: "definition with single BytesEq predicate", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + }, + }, + }, + { + name: "definition with single UintLt predicate", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(1000)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + }, + { + name: "definition with multiple predicates of different types", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 0, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111").Bytes()}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 1, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(50)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 1, + }, + ValuePredicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + }, + { + name: "definition with large integer values", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 2, // 2 words for large numbers + }, + ValuePredicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{func() *big.Int { + val := big.NewInt(1) + val.Lsh(val, 400) // Very large number + return val + }()}, + ByteArgs: [][]byte{}, + }, + }, + }, + }, + }, + { + name: "definition with BytesEq predicate on multi-word data", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{ + Offset: 4, + Length: 2, // 2 words = 64 bytes + }, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{make([]byte, 64)}, // 64 bytes for 2 words + }, + }, + }, + }, + }, + { + name: "definition with all operation types", + definition: EventTriggerDefinition{ + Contract: contractAddr, + LogPredicates: []LogPredicate{ + { + LogValueRef: LogValueRef{Offset: 0, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: UintLt, + IntArgs: []*big.Int{big.NewInt(100)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{Offset: 1, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: UintLte, + IntArgs: []*big.Int{big.NewInt(200)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{Offset: 2, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: UintEq, + IntArgs: []*big.Int{big.NewInt(42)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{Offset: 3, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: UintGt, + IntArgs: []*big.Int{big.NewInt(300)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{Offset: 4, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: UintGte, + IntArgs: []*big.Int{big.NewInt(400)}, + ByteArgs: [][]byte{}, + }, + }, + { + LogValueRef: LogValueRef{Offset: 5, Length: 1}, + ValuePredicate: ValuePredicate{ + Op: BytesEq, + IntArgs: []*big.Int{}, + ByteArgs: [][]byte{common.HexToHash("0xabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdefabcdef").Bytes()}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Marshal the original definition + marshaled, err := tt.definition.MarshalBytes() + assert.NilError(t, err, "MarshalBytes should not fail") + + // Unmarshal to get a new definition + var unmarshaled EventTriggerDefinition + err = unmarshaled.UnmarshalBytes(marshaled) + assert.NilError(t, err, "UnmarshalBytes should not fail") + + // Compare the original and unmarshaled definitions + assert.DeepEqual(t, tt.definition.Contract, unmarshaled.Contract) + assert.Equal(t, len(tt.definition.LogPredicates), len(unmarshaled.LogPredicates)) + + for i, originalPredicate := range tt.definition.LogPredicates { + unmarshaledPredicate := unmarshaled.LogPredicates[i] + + // Compare LogValueRef + assert.Equal(t, originalPredicate.LogValueRef.Offset, unmarshaledPredicate.LogValueRef.Offset) + assert.Equal(t, originalPredicate.LogValueRef.Length, unmarshaledPredicate.LogValueRef.Length) + + // Compare ValuePredicate + assert.Equal(t, originalPredicate.ValuePredicate.Op, unmarshaledPredicate.ValuePredicate.Op) + + // Compare IntArgs + assert.Equal(t, len(originalPredicate.ValuePredicate.IntArgs), len(unmarshaledPredicate.ValuePredicate.IntArgs)) + for j, originalIntArg := range originalPredicate.ValuePredicate.IntArgs { + unmarshaledIntArg := unmarshaledPredicate.ValuePredicate.IntArgs[j] + assert.Equal(t, originalIntArg.Cmp(unmarshaledIntArg), 0) + } + + // Compare ByteArgs + assert.Equal(t, len(originalPredicate.ValuePredicate.ByteArgs), len(unmarshaledPredicate.ValuePredicate.ByteArgs)) + for j, originalByteArg := range originalPredicate.ValuePredicate.ByteArgs { + unmarshaledByteArg := unmarshaledPredicate.ValuePredicate.ByteArgs[j] + assert.DeepEqual(t, originalByteArg, unmarshaledByteArg) + } + } + + // Additional validation: marshal the unmarshaled definition and compare bytes + remarshaled, err := unmarshaled.MarshalBytes() + assert.NilError(t, err) + assert.DeepEqual(t, marshaled, remarshaled) + }) + } +} + +func TestEventTriggerDefinitionUnmarshalErrors(t *testing.T) { + tests := []struct { + name string + data []byte + }{ + { + name: "empty data", + data: []byte{}, + }, + { + name: "invalid version", + data: []byte{0x99}, // Wrong version + }, + { + name: "version only, no RLP data", + data: []byte{Version}, + }, + { + name: "invalid RLP data", + data: []byte{Version, 0xFF, 0xFF, 0xFF}, // Invalid RLP + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var definition EventTriggerDefinition + err := definition.UnmarshalBytes(tt.data) + assert.Assert(t, err != nil, "UnmarshalBytes should fail for invalid data") + }) + } +} + +func TestLogValueRefEncodeRLP(t *testing.T) { + tests := []struct { + name string + ref LogValueRef + expected []byte + }{ + { + name: "topic reference - offset 0, length 1", + ref: LogValueRef{ + Offset: 0, + Length: 1, + }, + expected: []byte{0x80}, // RLP encoding of uint64(0) + }, + { + name: "topic reference - offset 3, length 1", + ref: LogValueRef{ + Offset: 3, + Length: 1, + }, + expected: []byte{0x03}, // RLP encoding of uint64(3) + }, + { + name: "data reference - offset 4, length 1", + ref: LogValueRef{ + Offset: 4, + Length: 1, + }, + expected: []byte{0x04}, // RLP encoding of uint64(4) + }, + { + name: "data reference - offset 5, length 2", + ref: LogValueRef{ + Offset: 5, + Length: 2, + }, + expected: []byte{0xc2, 0x05, 0x02}, // RLP encoding of [5, 2] + }, + { + name: "data reference - offset 10, length 5", + ref: LogValueRef{ + Offset: 10, + Length: 5, + }, + expected: []byte{0xc2, 0x0a, 0x05}, // RLP encoding of [10, 5] + }, + { + name: "large offset", + ref: LogValueRef{ + Offset: 1000, + Length: 3, + }, + expected: []byte{0xc4, 0x82, 0x03, 0xe8, 0x03}, // RLP encoding of [1000, 3] + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + err := tt.ref.EncodeRLP(&buf) + assert.NilError(t, err, "EncodeRLP should not fail") + assert.DeepEqual(t, tt.expected, buf.Bytes()) + }) + } +} + +func TestLogValueRefDecodeRLP(t *testing.T) { + tests := []struct { + name string + encoded []byte + expected LogValueRef + wantErr bool + errMsg string + }{ + { + name: "topic reference - offset 0, length 1", + encoded: []byte{0x80}, // RLP encoding of uint64(0) + expected: LogValueRef{ + Offset: 0, + Length: 1, + }, + wantErr: false, + }, + { + name: "topic reference - offset 3, length 1", + encoded: []byte{0x03}, // RLP encoding of uint64(3) + expected: LogValueRef{ + Offset: 3, + Length: 1, + }, + wantErr: false, + }, + { + name: "data reference - offset 4, length 1", + encoded: []byte{0xc2, 0x04, 0x01}, // RLP encoding of [4, 1] + expected: LogValueRef{ + Offset: 4, + Length: 1, + }, + wantErr: false, + }, + { + name: "data reference - offset 5, length 2", + encoded: []byte{0xc2, 0x05, 0x02}, // RLP encoding of [5, 2] + expected: LogValueRef{ + Offset: 5, + Length: 2, + }, + wantErr: false, + }, + { + name: "data reference - offset 10, length 5", + encoded: []byte{0xc2, 0x0a, 0x05}, // RLP encoding of [10, 5] + expected: LogValueRef{ + Offset: 10, + Length: 5, + }, + wantErr: false, + }, + { + name: "large offset", + encoded: []byte{0xc4, 0x82, 0x03, 0xe8, 0x03}, // RLP encoding of [1000, 3] + expected: LogValueRef{ + Offset: 1000, + Length: 3, + }, + wantErr: false, + }, + { + name: "invalid - zero length", + encoded: []byte{0xc2, 0x04, 0x80}, // RLP encoding of [4, 0] + wantErr: true, + errMsg: "log value reference length must be positive", + }, + { + name: "invalid - topic with length > 1", + encoded: []byte{0xc2, 0x02, 0x03}, // RLP encoding of [2, 3] - topic offset with length > 1 + wantErr: true, + errMsg: "log value reference offset < 4 requires length to be 1", + }, + { + name: "invalid - empty RLP data", + encoded: []byte{}, + wantErr: true, + errMsg: "failed to decode LogValueRef", + }, + { + name: "invalid - malformed RLP", + encoded: []byte{0xFF, 0xFF}, + wantErr: true, + errMsg: "failed to decode LogValueRef", + }, + { + name: "invalid - incomplete list", + encoded: []byte{0xc1, 0x04}, // List with only one element + wantErr: true, + errMsg: "failed to read length from LogValueRef", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ref LogValueRef + err := rlp.DecodeBytes(tt.encoded, &ref) + + if tt.wantErr { + assert.Assert(t, err != nil, "DecodeRLP should fail") + assert.ErrorContains(t, err, tt.errMsg) + } else { + assert.NilError(t, err, "DecodeRLP should not fail") + assert.Equal(t, tt.expected.Offset, ref.Offset) + assert.Equal(t, tt.expected.Length, ref.Length) + } + }) + } +} + +func TestLogValueRefRLPRoundTrip(t *testing.T) { + tests := []struct { + name string + ref LogValueRef + }{ + { + name: "topic reference - offset 0", + ref: LogValueRef{ + Offset: 0, + Length: 1, + }, + }, + { + name: "topic reference - offset 3", + ref: LogValueRef{ + Offset: 3, + Length: 1, + }, + }, + { + name: "data reference - single word", + ref: LogValueRef{ + Offset: 4, + Length: 1, + }, + }, + { + name: "data reference - multiple words", + ref: LogValueRef{ + Offset: 5, + Length: 10, + }, + }, + { + name: "large values", + ref: LogValueRef{ + Offset: 65535, + Length: 1000, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Encode + var buf bytes.Buffer + err := tt.ref.EncodeRLP(&buf) + assert.NilError(t, err, "EncodeRLP should not fail") + + encoded := buf.Bytes() + assert.Assert(t, len(encoded) > 0, "Encoded data should not be empty") + + // Decode + var decoded LogValueRef + err = rlp.DecodeBytes(encoded, &decoded) + assert.NilError(t, err, "DecodeRLP should not fail") + + // Verify round trip + assert.Equal(t, tt.ref.Offset, decoded.Offset) + assert.Equal(t, tt.ref.Length, decoded.Length) + + // Encode again and verify consistency + var buf2 bytes.Buffer + err = decoded.EncodeRLP(&buf2) + assert.NilError(t, err, "Second EncodeRLP should not fail") + assert.DeepEqual(t, encoded, buf2.Bytes()) + }) + } +} diff --git a/rolling-shutter/keyperimpl/shutterservice/eventtriggerregisteredprocessor.go b/rolling-shutter/keyperimpl/shutterservice/eventtriggerregisteredprocessor.go new file mode 100644 index 00000000..701a0b05 --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/eventtriggerregisteredprocessor.go @@ -0,0 +1,128 @@ +package shutterservice + +import ( + "bytes" + "context" + "math" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/crypto" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + triggerRegistryBindings "github.com/shutter-network/contracts/v2/bindings/shuttereventtriggerregistry" + + "github.com/shutter-network/rolling-shutter/rolling-shutter/keyperimpl/shutterservice/database" + "github.com/shutter-network/rolling-shutter/rolling-shutter/shdb" +) + +// EventTriggerRegisteredEventProcessor implements the EventProcessor interface for EventTriggerRegistered events +// in the ShutterRegistry contract. +type EventTriggerRegisteredEventProcessor struct { + Contract *triggerRegistryBindings.Shuttereventtriggerregistry + DBPool *pgxpool.Pool +} + +func NewEventTriggerRegisteredEventProcessor( + contract *triggerRegistryBindings.Shuttereventtriggerregistry, + dbPool *pgxpool.Pool, +) *EventTriggerRegisteredEventProcessor { + return &EventTriggerRegisteredEventProcessor{ + Contract: contract, + DBPool: dbPool, + } +} + +func (p *EventTriggerRegisteredEventProcessor) GetProcessorName() string { + return "event_trigger_registered" +} + +func (p *EventTriggerRegisteredEventProcessor) FetchEvents(ctx context.Context, start, end uint64) ([]Event, error) { + opts := bind.FilterOpts{ + Start: start, + End: &end, + Context: ctx, + } + it, err := p.Contract.FilterEventTriggerRegistered(&opts, []uint64{}) + if err != nil { + return nil, errors.Wrap(err, "failed to query identity registered events") + } + + var events []Event + for it.Next() { + events = append(events, it.Event) + } + if it.Error() != nil { + return nil, errors.Wrap(it.Error(), "failed to iterate identity registered events") + } + return events, nil +} + +func (p *EventTriggerRegisteredEventProcessor) ProcessEvents(ctx context.Context, tx pgx.Tx, events []Event) error { + queries := database.New(tx) + for _, event := range events { + registryEvent := event.(*triggerRegistryBindings.ShuttereventtriggerregistryEventTriggerRegistered) + evLog := log.With(). + Uint64("block-number", registryEvent.Raw.BlockNumber). + Hex("block-hash", registryEvent.Raw.BlockHash.Bytes()). + Uint("tx-index", registryEvent.Raw.TxIndex). + Uint("log-index", registryEvent.Raw.Index). + Uint64("eon", registryEvent.Eon). + Hex("identity-prefix", registryEvent.IdentityPrefix[:]). + Str("sender", registryEvent.Sender.Hex()). + Hex("definition", registryEvent.TriggerDefinition). + Uint64("ttl", registryEvent.Ttl). + Logger() + + if registryEvent.Eon > math.MaxInt64 { + evLog.Info().Msg("skipping event trigger registered event with Eon > math.MaxInt64") + continue + } + if registryEvent.Ttl > math.MaxInt64 { + evLog.Info().Msg("skipping event trigger registered event with TTL > math.MaxInt64") + continue + } + + triggerDefinition := EventTriggerDefinition{} + err := triggerDefinition.UnmarshalBytes(registryEvent.TriggerDefinition) + if err != nil { + evLog.Info().Err(err).Msg("skipping invalid trigger definition") + continue + } + + _, err = queries.InsertEventTriggerRegisteredEvent(ctx, database.InsertEventTriggerRegisteredEventParams{ + BlockNumber: int64(registryEvent.Raw.BlockNumber), + BlockHash: registryEvent.Raw.BlockHash[:], + TxIndex: int64(registryEvent.Raw.TxIndex), + LogIndex: int64(registryEvent.Raw.Index), + Eon: int64(registryEvent.Eon), + IdentityPrefix: registryEvent.IdentityPrefix[:], + Sender: shdb.EncodeAddress(registryEvent.Sender), + Definition: registryEvent.TriggerDefinition, + Ttl: int64(registryEvent.Ttl), + Identity: computeEventTriggerIdentity(registryEvent), + }) + if err != nil { + return errors.Wrap(err, "failed to insert event trigger registered event into db") + } + evLog.Info().Msg("processed event trigger registered event") + } + return nil +} + +func (p *EventTriggerRegisteredEventProcessor) RollbackEvents(ctx context.Context, tx pgx.Tx, toBlock int64) error { + queries := database.New(tx) + err := queries.DeleteEventTriggerRegisteredEventsFromBlockNumber(ctx, toBlock+1) + if err != nil { + return errors.Wrap(err, "failed to delete event trigger registered events during rollback") + } + return nil +} + +func computeEventTriggerIdentity(event *triggerRegistryBindings.ShuttereventtriggerregistryEventTriggerRegistered) []byte { + var buf bytes.Buffer + buf.Write(event.IdentityPrefix[:]) + buf.Write(event.Sender.Bytes()) + return crypto.Keccak256(buf.Bytes()) +} diff --git a/rolling-shutter/keyperimpl/shutterservice/keyper.go b/rolling-shutter/keyperimpl/shutterservice/keyper.go index 137febc1..be9e7be0 100644 --- a/rolling-shutter/keyperimpl/shutterservice/keyper.go +++ b/rolling-shutter/keyperimpl/shutterservice/keyper.go @@ -2,6 +2,7 @@ package shutterservice import ( "context" + "fmt" "log/slog" "time" @@ -10,6 +11,7 @@ import ( "github.com/jackc/pgx/v4/pgxpool" "github.com/pkg/errors" "github.com/rs/zerolog/log" + triggerRegistryBindings "github.com/shutter-network/contracts/v2/bindings/shuttereventtriggerregistry" registryBindings "github.com/shutter-network/contracts/v2/bindings/shutterregistry" "github.com/shutter-network/rolling-shutter/rolling-shutter/eonkeypublisher" @@ -37,6 +39,7 @@ type Keyper struct { eonKeyPublisher *eonkeypublisher.EonKeyPublisher latestTriggeredTime *uint64 syncMonitor *SyncMonitor + multiEventSyncer *MultiEventSyncer // input events newBlocks chan *syncevent.LatestBlock @@ -113,6 +116,13 @@ func (kpr *Keyper) Start(ctx context.Context, runner service.Runner) error { return err } + if kpr.config.EventBasedTriggersEnabled() { + err = kpr.initMultiEventSyncer(ctx) + if err != nil { + return err + } + } + kpr.syncMonitor = &SyncMonitor{ DBPool: kpr.dbpool, CheckInterval: time.Duration(kpr.config.Chain.SyncMonitorCheckInterval) * time.Second, @@ -183,6 +193,65 @@ func (kpr *Keyper) initRegistrySyncer(ctx context.Context) error { return nil } +// initMultiEventSyncer initializes the multi event syncer and all its event processors. +func (kpr *Keyper) initMultiEventSyncer(ctx context.Context) error { + triggerRegistryClient, err := ethclient.DialContext(ctx, kpr.config.Chain.Node.EthereumURL) + if err != nil { + return fmt.Errorf("failed to dial Ethereum execution node: %w", err) + } + eventTriggerRegistryContract, err := triggerRegistryBindings.NewShuttereventtriggerregistry( + kpr.config.Chain.Contracts.ShutterEventTriggerRegistry, + triggerRegistryClient, + ) + if err != nil { + return fmt.Errorf("failed to create ShutterRegistry contract instance: %w", err) + } + eventTriggerRegisteredProcessor := NewEventTriggerRegisteredEventProcessor( + eventTriggerRegistryContract, + kpr.dbpool, + ) + + triggerClient, err := ethclient.DialContext(ctx, kpr.config.Chain.Node.EthereumURL) + if err != nil { + return fmt.Errorf("failed to dial Ethereum execution node: %w", err) + } + triggerProcessor := NewTriggerProcessor(triggerClient, kpr.dbpool) + + processors := []EventProcessor{ + eventTriggerRegisteredProcessor, + triggerProcessor, + } + + multiEventSyncerClient, err := ethclient.DialContext(ctx, kpr.config.Chain.Node.EthereumURL) + if err != nil { + return fmt.Errorf("failed to dial Ethereum node at %s: %w", kpr.config.Chain.Node.EthereumURL, err) + } + kpr.multiEventSyncer, err = NewMultiEventSyncer( + kpr.dbpool, + multiEventSyncerClient, + kpr.config.Chain.SyncStartBlockNumber, + processors, + ) + if err != nil { + return fmt.Errorf("failed to initialize multi event syncer: %w", err) + } + + // Perform an initial sync now because it might take some time and doing so during regular + // slot processing might hold up things + log.Info().Msg("performing initial sync of multi event syncer") + latestHeader, err := multiEventSyncerClient.HeaderByNumber(ctx, nil) + if err != nil { + return fmt.Errorf("failed to get latest block header: %w", err) + } + err = kpr.multiEventSyncer.Sync(ctx, latestHeader) + if err != nil { + return fmt.Errorf("failed to perform initial sync: %w", err) + } + log.Info().Msg("multi event syncer initialized") + + return nil +} + func (kpr *Keyper) processInputs(ctx context.Context) error { var err error for { diff --git a/rolling-shutter/keyperimpl/shutterservice/messagingmiddleware.go b/rolling-shutter/keyperimpl/shutterservice/messagingmiddleware.go index 4e2c3e1b..04d61c65 100644 --- a/rolling-shutter/keyperimpl/shutterservice/messagingmiddleware.go +++ b/rolling-shutter/keyperimpl/shutterservice/messagingmiddleware.go @@ -213,19 +213,28 @@ func (i *MessagingMiddleware) interceptDecryptionKeys( } func updateEventFlag(ctx context.Context, serviceDB *database.Queries, keys *p2pmsg.DecryptionKeys) error { - column1 := make([]int64, 0) - column2 := make([][]byte, 0) + eons := make([]int64, 0) + identities := make([][]byte, 0) for _, key := range keys.Keys { - column1 = append(column1, int64(keys.Eon)) - column2 = append(column2, key.IdentityPreimage) + eons = append(eons, int64(keys.Eon)) + identities = append(identities, key.IdentityPreimage) } - err := serviceDB.UpdateDecryptedFlag(ctx, database.UpdateDecryptedFlagParams{ - Column1: column1, - Column2: column2, + // We don't know a priori if the keys where triggered by time-based or event-based triggers, + // so we just check both tables and update whatever we find there. + err := serviceDB.UpdateTimeBasedDecryptedFlags(ctx, database.UpdateTimeBasedDecryptedFlagsParams{ + Eons: eons, + Identities: identities, }) if err != nil { - return errors.Wrap(err, "failed to update decrypted flag") + return errors.Wrap(err, "failed to update decrypted flags for time based triggers") + } + err = serviceDB.UpdateEventBasedDecryptedFlags(ctx, database.UpdateEventBasedDecryptedFlagsParams{ + Eons: eons, + Identities: identities, + }) + if err != nil { + return errors.Wrap(err, "failed to update decrypted flags for event based triggers") } return nil } diff --git a/rolling-shutter/keyperimpl/shutterservice/multieventsyncer.go b/rolling-shutter/keyperimpl/shutterservice/multieventsyncer.go new file mode 100644 index 00000000..1c27cdd2 --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/multieventsyncer.go @@ -0,0 +1,245 @@ +package shutterservice + +import ( + "bytes" + "context" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + + "github.com/shutter-network/rolling-shutter/rolling-shutter/keyperimpl/shutterservice/database" + "github.com/shutter-network/rolling-shutter/rolling-shutter/medley" +) + +const ( + DefaultAssumedReorgDepth = 10 + DefaultMaxRequestBlockRange = 10_000 +) + +type MultiEventSyncer struct { + Processors map[string]EventProcessor + DBPool *pgxpool.Pool + ExecutionClient *ethclient.Client + SyncStartBlockNumber uint64 + AssumedReorgDepth int + MaxRequestBlockRange uint64 +} + +type SyncStatus struct { + BlockNumber int64 + BlockHash []byte +} + +func NewMultiEventSyncer( + dbPool *pgxpool.Pool, + executionClient *ethclient.Client, + syncStartBlockNumber uint64, + processors []EventProcessor, +) (*MultiEventSyncer, error) { + processorMap := make(map[string]EventProcessor) + for _, processor := range processors { + name := processor.GetProcessorName() + if _, exists := processorMap[name]; exists { + return nil, errors.Errorf("duplicate processor name: %s", name) + } + processorMap[name] = processor + } + + return &MultiEventSyncer{ + Processors: processorMap, + DBPool: dbPool, + ExecutionClient: executionClient, + SyncStartBlockNumber: syncStartBlockNumber, + AssumedReorgDepth: DefaultAssumedReorgDepth, + MaxRequestBlockRange: DefaultMaxRequestBlockRange, + }, nil +} + +func (s *MultiEventSyncer) Sync(ctx context.Context, header *types.Header) error { + if err := s.handlePotentialReorg(ctx, header); err != nil { + return errors.Wrap(err, "failed to handle potential reorg") + } + + syncedUntil, err := s.getSyncedUntil(ctx) + if err != nil { + return errors.Wrap(err, "failed to determine sync start point") + } + start := uint64(syncedUntil + 1) + end := header.Number.Uint64() + if start > end { + log.Debug(). + Uint64("start-block", start). + Uint64("end-block", end). + Msg("already synced up to target block") + return nil + } + + syncRanges := medley.GetSyncRanges(start, end, s.MaxRequestBlockRange) + log.Debug(). + Uint64("start-block", start). + Uint64("end-block", end). + Int("num-sync-ranges", len(syncRanges)). + Msg("starting multi event sync") + numEvents := 0 + for _, r := range syncRanges { + numEventsInRange, err := s.syncRange(ctx, r[0], r[1]) + if err != nil { + return errors.Wrapf(err, "failed to sync range [%d, %d]", r[0], r[1]) + } + numEvents += numEventsInRange + } + + log.Info(). + Uint64("start-block", start). + Uint64("end-block", end). + Int("num-events", numEvents). + Msg("completed multi event sync") + return nil +} + +func (s *MultiEventSyncer) syncRange(ctx context.Context, start, end uint64) (int, error) { + header, err := s.ExecutionClient.HeaderByNumber(ctx, new(big.Int).SetUint64(end)) + if err != nil { + return 0, errors.Wrap(err, "failed to get execution block header") + } + + allEvents := make(map[string][]Event) + numEvents := 0 + for name, processor := range s.Processors { + events, err := processor.FetchEvents(ctx, start, end) + if err != nil { + return 0, errors.Wrapf(err, "failed to fetch events for processor %s in range [%d, %d]", name, start, end) + } + allEvents[name] = events + numEvents += len(events) + } + + err = s.DBPool.BeginFunc(ctx, func(tx pgx.Tx) error { + for name, processor := range s.Processors { + events := allEvents[name] + err := processor.ProcessEvents(ctx, tx, events) + if err != nil { + return errors.Wrapf(err, "failed to process events for processor %s", name) + } + } + + err := s.setSyncStatus(ctx, tx, int64(end), header.Hash().Bytes()) + if err != nil { + return errors.Wrap(err, "failed to update global sync status") + } + + return nil + }) + if err != nil { + return 0, err + } + + return numEvents, nil +} + +func (s *MultiEventSyncer) getSyncStatus(ctx context.Context) (*SyncStatus, error) { + queries := database.New(s.DBPool) + status, err := queries.GetMultiEventSyncStatus(ctx) + if err != nil { + return nil, err + } + return &SyncStatus{ + BlockNumber: status.BlockNumber, + BlockHash: status.BlockHash, + }, nil +} + +func (s *MultiEventSyncer) setSyncStatus(ctx context.Context, tx pgx.Tx, blockNumber int64, blockHash []byte) error { + queries := database.New(tx) + return queries.SetMultiEventSyncStatus(ctx, database.SetMultiEventSyncStatusParams{ + BlockNumber: blockNumber, + BlockHash: blockHash, + }) +} + +func (s *MultiEventSyncer) getSyncedUntil(ctx context.Context) (int64, error) { + status, err := s.getSyncStatus(ctx) + if err != nil { + if err == pgx.ErrNoRows { + return int64(s.SyncStartBlockNumber), nil + } + return 0, err + } + return status.BlockNumber, nil +} + +func calculateReorgDepth(status *SyncStatus, header *types.Header, assumedReorgDepth int) int { + shouldBeParent := header.Number.Int64() == status.BlockNumber+1 + isParent := bytes.Equal(header.ParentHash.Bytes(), status.BlockHash) + isReorg := shouldBeParent && !isParent + if !isReorg { + return 0 + } + + // To avoid finding the exact branch point, we just assume a fixed, conservative depth + depth := assumedReorgDepth + if status.BlockNumber < int64(depth) { + return int(status.BlockNumber) + } + return depth +} + +func (s *MultiEventSyncer) handlePotentialReorg(ctx context.Context, header *types.Header) error { + status, err := s.getSyncStatus(ctx) + if err != nil { + if err == pgx.ErrNoRows { + return nil // if nothing is synced yet, no reorg is necessary + } + return errors.Wrap(err, "failed to get sync status") + } + numReorgedBlocks := calculateReorgDepth(status, header, s.AssumedReorgDepth) + if numReorgedBlocks == 0 { + return nil + } + + toBlock := status.BlockNumber - int64(numReorgedBlocks) + log.Info(). + Int("reorg-depth", numReorgedBlocks). + Int64("rollback-to-block-number", toBlock). + Uint64("current-block-number", header.Number.Uint64()). + Hex("current-block-hash", header.Hash().Bytes()). + Msg("detected blockchain reorg, rolling back processors") + return s.rollback(ctx, toBlock) +} + +func (s *MultiEventSyncer) rollback(ctx context.Context, toBlock int64) error { + status, err := s.getSyncStatus(ctx) + if err != nil { + return errors.Wrap(err, "failed to get sync status during rollback") + } + + if toBlock > status.BlockNumber { + return errors.Errorf("invalid rollback target: toBlock (%d) is greater than current synced block (%d)", + toBlock, status.BlockNumber) + } + + return s.DBPool.BeginFunc(ctx, func(tx pgx.Tx) error { + for name, processor := range s.Processors { + err = processor.RollbackEvents(ctx, tx, toBlock) + if err != nil { + return errors.Wrapf(err, "failed to rollback events for processor %s", name) + } + } + + err = s.setSyncStatus(ctx, tx, toBlock, []byte{}) + if err != nil { + return errors.Wrap(err, "failed to update sync status during rollback") + } + + log.Info(). + Int64("previous-synced-until", status.BlockNumber). + Int64("new-synced-until", toBlock). + Msg("rolled back all processors due to reorg") + return nil + }) +} diff --git a/rolling-shutter/keyperimpl/shutterservice/newblock.go b/rolling-shutter/keyperimpl/shutterservice/newblock.go index 1c0ff04a..e5ed26f0 100644 --- a/rolling-shutter/keyperimpl/shutterservice/newblock.go +++ b/rolling-shutter/keyperimpl/shutterservice/newblock.go @@ -25,15 +25,36 @@ func (kpr *Keyper) processNewBlock(ctx context.Context, ev *syncevent.LatestBloc return err } } + if kpr.config.EventBasedTriggersEnabled() { + err := kpr.multiEventSyncer.Sync(ctx, ev.Header) + if err != nil { + return err + } + } return kpr.maybeTriggerDecryption(ctx, ev) } -// maybeTriggerDecryption triggers decryption for the identities registered if -// - it hasn't been triggered for thos identities before and -// - the keyper is part of the corresponding keyper set. func (kpr *Keyper) maybeTriggerDecryption(ctx context.Context, block *syncevent.LatestBlock) error { + timeBasedTriggers, err := kpr.prepareTimeBasedTriggers(ctx, block) + if err != nil { + return errors.Wrap(err, "failed to get time based triggers") + } + kpr.sendTriggers(ctx, timeBasedTriggers) + + if kpr.config.EventBasedTriggersEnabled() { + eventBasedTriggers, err := kpr.prepareEventBasedTriggers(ctx) + if err != nil { + return errors.Wrap(err, "failed to get event based triggers") + } + kpr.sendTriggers(ctx, eventBasedTriggers) + } + + return nil +} + +func (kpr *Keyper) prepareTimeBasedTriggers(ctx context.Context, block *syncevent.LatestBlock) ([]epochkghandler.DecryptionTrigger, error) { if kpr.latestTriggeredTime != nil && block.Header.Time <= *kpr.latestTriggeredTime { - return nil + return nil, nil } lastTriggeredTime := 0 @@ -55,7 +76,7 @@ func (kpr *Keyper) maybeTriggerDecryption(ctx context.Context, block *syncevent. if err != nil && err != pgx.ErrNoRows { // pgx.ErrNoRows is expected if we're not part of the keyper set (which is checked later). // That's because non-keypers don't sync identity registered events. TODO: this needs to be implemented - return errors.Wrap(err, "failed to query non decrypted identity registered events from db") + return nil, errors.Wrap(err, "failed to query non decrypted identity registered events from db") } obsDB := obskeyper.New(kpr.dbpool) @@ -66,7 +87,7 @@ func (kpr *Keyper) maybeTriggerDecryption(ctx context.Context, block *syncevent. } } - return kpr.triggerDecryption(ctx, eventsToDecrypt, block) + return kpr.createTriggersFromIdentityRegisteredEvents(ctx, eventsToDecrypt, block) } func (kpr *Keyper) shouldTriggerDecryption( @@ -104,10 +125,11 @@ func (kpr *Keyper) shouldTriggerDecryption( return true } -func (kpr *Keyper) triggerDecryption(ctx context.Context, +func (kpr *Keyper) createTriggersFromIdentityRegisteredEvents( + ctx context.Context, triggeredEvents []servicedatabase.IdentityRegisteredEvent, triggeredBlock *syncevent.LatestBlock, -) error { +) ([]epochkghandler.DecryptionTrigger, error) { coreKeyperDB := corekeyperdatabase.New(kpr.dbpool) identityPreimages := make(map[int64][]identitypreimage.IdentityPreimage) @@ -117,7 +139,7 @@ func (kpr *Keyper) triggerDecryption(ctx context.Context, eonStruct, err := coreKeyperDB.GetEonForBlockNumber(ctx, nextBlock) if err != nil { - return errors.Wrapf(err, "failed to query eon for block number %d from db", nextBlock) + return nil, errors.Wrapf(err, "failed to query eon for block number %d from db", nextBlock) } if eonStruct.Eon != event.Eon { @@ -139,6 +161,7 @@ func (kpr *Keyper) triggerDecryption(ctx context.Context, } } + triggers := []epochkghandler.DecryptionTrigger{} for eon, preImages := range identityPreimages { sortedIdentityPreimages := sortIdentityPreimages(preImages) @@ -147,16 +170,89 @@ func (kpr *Keyper) triggerDecryption(ctx context.Context, BlockNumber: uint64(lastEonBlock[eon]), IdentityPreimages: sortedIdentityPreimages, } + triggers = append(triggers, trigger) + } + return triggers, nil +} + +func (kpr *Keyper) prepareEventBasedTriggers(ctx context.Context) ([]epochkghandler.DecryptionTrigger, error) { + coreKeyperDB := corekeyperdatabase.New(kpr.dbpool) + serviceDB := servicedatabase.New(kpr.dbpool) + obsDB := obskeyper.New(kpr.dbpool) + + firedTriggers, err := serviceDB.GetUndecryptedFiredTriggers(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to get undecrypted fired triggers from db") + } + + firedTriggersByEon := make(map[int64][]servicedatabase.GetUndecryptedFiredTriggersRow) + for _, firedTrigger := range firedTriggers { + firedTriggersByEon[firedTrigger.Eon] = append(firedTriggersByEon[firedTrigger.Eon], firedTrigger) + } + + var decryptionTriggers []epochkghandler.DecryptionTrigger + for eon, firedTriggers := range firedTriggersByEon { + if len(firedTriggers) == 0 { + continue + } + eonStruct, err := coreKeyperDB.GetEon(ctx, eon) + if err != nil { + if err == pgx.ErrNoRows { + log.Info(). + Int64("eon", eon). + Msg("ignoring fired triggers as eon not found in db") + continue + } + return nil, errors.Wrapf(err, "failed to query eon %d from db", eon) + } + keyperSet, err := obsDB.GetKeyperSet(ctx, eonStruct.ActivationBlockNumber) + if err != nil { + log.Err(err). + Int64("eon", eon). + Int64("activation-block-number", eonStruct.ActivationBlockNumber). + Msg("ignoring fired triggers as keyper set not found in db") + continue + } + if !keyperSet.Contains(kpr.config.GetAddress()) { + log.Info(). + Int64("eon", eon). + Int64("activation-block-number", eonStruct.ActivationBlockNumber). + Str("address", kpr.config.GetAddress().Hex()). + Msg("ignoring fired triggers as not part of keyper set") + continue + } + + identities := []identitypreimage.IdentityPreimage{} + for _, firedTrigger := range firedTriggers { + identities = append(identities, firedTrigger.Identity) + } + + decryptionTrigger := epochkghandler.DecryptionTrigger{ + BlockNumber: uint64(eonStruct.ActivationBlockNumber), + IdentityPreimages: identities, + } + decryptionTriggers = append(decryptionTriggers, decryptionTrigger) + } + return decryptionTriggers, nil +} +func (kpr *Keyper) sendTriggers(ctx context.Context, triggers []epochkghandler.DecryptionTrigger) { + for _, trigger := range triggers { event := broker.NewEvent(&trigger) log.Debug(). - Uint64("block-number", uint64(lastEonBlock[eon])). + Uint64("eon", trigger.BlockNumber). Int("num-identities", len(trigger.IdentityPreimages)). Msg("sending decryption trigger") - kpr.decryptionTriggerChannel <- event - } - return nil + select { + case kpr.decryptionTriggerChannel <- event: + case <-ctx.Done(): + log.Warn(). + Err(ctx.Err()). + Msg("context canceled while sending decryption trigger") + return + } + } } func sortIdentityPreimages(identityPreimages []identitypreimage.IdentityPreimage) []identitypreimage.IdentityPreimage { diff --git a/rolling-shutter/keyperimpl/shutterservice/newblock_test.go b/rolling-shutter/keyperimpl/shutterservice/newblock_test.go index 973071d6..0e5e89ee 100644 --- a/rolling-shutter/keyperimpl/shutterservice/newblock_test.go +++ b/rolling-shutter/keyperimpl/shutterservice/newblock_test.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/shutter-network/contracts/v2/bindings/shutterregistry" "gotest.tools/assert" obskeyper "github.com/shutter-network/rolling-shutter/rolling-shutter/chainobserver/db/keyper" @@ -62,10 +61,8 @@ func TestProcessBlockSuccess(t *testing.T) { activationBlockNumber := 100 identityPrefix, _ := generateRandom32Bytes() - identity := computeIdentity(&shutterregistry.ShutterregistryIdentityRegistered{ - IdentityPrefix: [32]byte(identityPrefix), - Sender: sender, - }) + identity := identityPrefix + identity = append(identity, sender.Bytes()...) keyperConfigIndex := uint64(1) err := coreKeyperDB.InsertEon(ctx, corekeyperdatabase.InsertEonParams{ diff --git a/rolling-shutter/keyperimpl/shutterservice/triggerprocessor.go b/rolling-shutter/keyperimpl/shutterservice/triggerprocessor.go new file mode 100644 index 00000000..a6ad4369 --- /dev/null +++ b/rolling-shutter/keyperimpl/shutterservice/triggerprocessor.go @@ -0,0 +1,152 @@ +package shutterservice + +import ( + "context" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/jackc/pgx/v4" + "github.com/jackc/pgx/v4/pgxpool" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + + "github.com/shutter-network/rolling-shutter/rolling-shutter/keyperimpl/shutterservice/database" +) + +// TriggerProcessor implements the EventProcessor interface for processing trigger events. +type TriggerProcessor struct { + ExecutionClient *ethclient.Client + DBPool *pgxpool.Pool +} + +type TriggerEvent struct { + EventTriggerRegisteredEvent database.EventTriggerRegisteredEvent + Log types.Log +} + +func NewTriggerProcessor( + executionClient *ethclient.Client, + dbPool *pgxpool.Pool, +) *TriggerProcessor { + return &TriggerProcessor{ + ExecutionClient: executionClient, + DBPool: dbPool, + } +} + +func (tp *TriggerProcessor) GetProcessorName() string { + return "trigger" +} + +func (tp *TriggerProcessor) FetchEvents(ctx context.Context, start, end uint64) ([]Event, error) { + queries := database.New(tp.DBPool) + // Consider event triggers that have not fired yet and have not expired at the start block. + // They might have expired at the end block though which will be checked later. + triggerRegisteredEvents, err := queries.GetActiveEventTriggerRegisteredEvents(ctx, int64(start)) + if err != nil { + return nil, errors.Wrap(err, "failed to get event trigger registered events") + } + + var events []Event + for _, triggerRegisteredEvent := range triggerRegisteredEvents { + triggerLog := log.With(). + Int64("block-number", triggerRegisteredEvent.BlockNumber). + Hex("block-hash", triggerRegisteredEvent.BlockHash). + Int64("tx-index", triggerRegisteredEvent.TxIndex). + Int64("log-index", triggerRegisteredEvent.LogIndex). + Hex("identity-prefix", triggerRegisteredEvent.IdentityPrefix). + Str("sender", triggerRegisteredEvent.Sender). + Hex("definition", triggerRegisteredEvent.Definition). + Int64("ttl", triggerRegisteredEvent.Ttl). + Logger() + + trigger := EventTriggerDefinition{} + err := trigger.UnmarshalBytes(triggerRegisteredEvent.Definition) + if err != nil { + // This is not supposed to happen as only valid triggers are inserted into the database. + triggerLog.Error().Err(err).Msg("ignoring invalid trigger definition in database") + continue + } + + filterQuery, err := trigger.ToFilterQuery() + if err != nil { + // This is not supposed to happen as only valid triggers are inserted into the database + // and valid triggers should always have a valid filter query. + triggerLog.Error().Err(err).Msg("failed to create filter query for trigger") + continue + } + filterQuery.FromBlock = new(big.Int).SetUint64(start) + filterQuery.ToBlock = new(big.Int).SetUint64(end) + + logs, err := tp.ExecutionClient.FilterLogs(ctx, filterQuery) + if err != nil { + return nil, errors.Wrapf(err, "failed to filter logs for event trigger") + } + + for _, eventLog := range logs { + // Check that the trigger has not expired at the time of the event. + if eventLog.BlockNumber > uint64(triggerRegisteredEvent.BlockNumber+triggerRegisteredEvent.Ttl) { + continue + } + match, err := trigger.Match(&eventLog) + if err != nil { + triggerLog.Error().Err(err).Msg("failed to match trigger with event log") + continue + } + if !match { + triggerLog.Debug(). + Str("log", fmt.Sprintf("%+v", eventLog)). + Msg("skipping log that matched filter but not additional predicates") + continue + } + events = append(events, &TriggerEvent{ + Log: eventLog, + EventTriggerRegisteredEvent: triggerRegisteredEvent, + }) + } + } + + return events, nil +} + +func (tp *TriggerProcessor) ProcessEvents(ctx context.Context, tx pgx.Tx, events []Event) error { + queries := database.New(tx) + for _, untypedEvent := range events { + event := untypedEvent.(*TriggerEvent) + err := queries.InsertFiredTrigger(ctx, database.InsertFiredTriggerParams{ + Eon: event.EventTriggerRegisteredEvent.Eon, + IdentityPrefix: event.EventTriggerRegisteredEvent.IdentityPrefix, + Sender: event.EventTriggerRegisteredEvent.Sender, + BlockNumber: int64(event.Log.BlockNumber), + BlockHash: event.Log.BlockHash[:], + TxIndex: int64(event.Log.TxIndex), + LogIndex: int64(event.Log.Index), + }) + if err != nil { + return fmt.Errorf("failed to insert fired trigger: %w", err) + } + log.Info(). + Int64("trigger-registered-block-number", event.EventTriggerRegisteredEvent.BlockNumber). + Hex("trigger-registered-block-hash", event.EventTriggerRegisteredEvent.BlockHash). + Int64("trigger-registered-tx-index", event.EventTriggerRegisteredEvent.TxIndex). + Int64("trigger-registered-log-index", event.EventTriggerRegisteredEvent.LogIndex). + Uint64("event-block-number", event.Log.BlockNumber). + Hex("event-block-hash", event.Log.BlockHash.Bytes()). + Uint("event-tx-index", event.Log.TxIndex). + Uint("event-log-index", event.Log.Index). + Msg("processed fired trigger event") + } + + return nil +} + +func (tp *TriggerProcessor) RollbackEvents(ctx context.Context, tx pgx.Tx, toBlock int64) error { + queries := database.New(tx) + err := queries.DeleteFiredTriggersFromBlockNumber(ctx, toBlock+1) + if err != nil { + return fmt.Errorf("failed to delete fired triggers from block number: %w", err) + } + return nil +}