From 81c8d0426f60d012e2a5249b5006ea3a94ca41df Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:19:54 -0600 Subject: [PATCH 1/9] fix(docker): resolve dev container startup and standardize compose configs - Fix dev containers exiting immediately by adding proper entrypoints - Consolidate profiling config from docker-compose.prof.yml into dev.yml - Standardize volume mounts and add missing horizon.json mappings - Update configs for Horizon upgrade (GraphTallyCollector vs TAPVerifier) - Add profiling support with proper security options and permissions Resolves issues preventing indexer-service and tap-agent from running in dev mode --- contrib/docker-compose.dev.yml | 32 ++++++++++---- contrib/docker-compose.prof.yml | 75 --------------------------------- contrib/docker-compose.yml | 14 +++--- 3 files changed, 33 insertions(+), 88 deletions(-) delete mode 100644 contrib/docker-compose.prof.yml diff --git a/contrib/docker-compose.dev.yml b/contrib/docker-compose.dev.yml index a836590d1..aad5cbc73 100644 --- a/contrib/docker-compose.dev.yml +++ b/contrib/docker-compose.dev.yml @@ -1,18 +1,23 @@ services: indexer-service: image: indexer-base:latest + build: + context: .. container_name: indexer-service volumes: - - ../target/release/indexer-service-rs:/usr/local/bin/indexer-service-rs - - ./indexer-service/start.sh:/usr/local/bin/start.sh - - ./indexer-service/config.toml:/opt/config/config.toml - ./local-network/tap-contracts.json:/opt/contracts.json:ro + - ./local-network/horizon.json:/opt/horizon.json:ro - ./local-network/.env:/opt/.env:ro + - ./profiling:/opt/profiling:rw + # - ./indexer-service/config.toml:/opt/config/config.toml + - ./indexer-service/start.sh:/usr/local/bin/start.sh - ../migrations:/opt/migrations:ro + - ../target/release/indexer-service-rs:/usr/local/bin/indexer-service-rs entrypoint: ["/usr/local/bin/start.sh"] environment: - RUST_BACKTRACE=1 - RUST_LOG=debug + - PROFILER=${PROFILER:-none} ports: - "7601:7601" networks: @@ -23,23 +28,29 @@ services: timeout: 3s retries: 10 start_period: 10s + cap_add: + - SYS_ADMIN + privileged: true + security_opt: + - seccomp:unconfined tap-agent: image: indexer-base:latest # Pre-built base image with dependencies container_name: tap-agent - depends_on: - indexer-service: - condition: service_healthy volumes: - ../target/release/indexer-tap-agent:/usr/local/bin/indexer-tap-agent - - ./tap-agent:/opt/config:ro + - ./tap-agent/start.sh:/usr/local/bin/start.sh + # - ./tap-agent:/opt/config:ro + - ./profiling:/opt/profiling:rw - ./local-network/.env:/opt/.env:ro - ./local-network/tap-contracts.json:/opt/contracts.json:ro + - ./local-network/horizon.json:/opt/horizon.json:ro - ../migrations:/opt/migrations:ro - entrypoint: ["/bin/bash", "-c", "/opt/config/start.sh"] + entrypoint: ["/usr/local/bin/start.sh"] environment: - RUST_BACKTRACE=1 - RUST_LOG=debug + - PROFILER=${PROFILER:-none} ports: # to expose the metrics port - "7300:7300" @@ -51,6 +62,11 @@ services: timeout: 3s retries: 10 start_period: 10s + cap_add: + - SYS_ADMIN + privileged: true + security_opt: + - seccomp:unconfined networks: local-network: diff --git a/contrib/docker-compose.prof.yml b/contrib/docker-compose.prof.yml deleted file mode 100644 index 58b998495..000000000 --- a/contrib/docker-compose.prof.yml +++ /dev/null @@ -1,75 +0,0 @@ -services: - indexer-service: - image: indexer-base:latest - build: - context: .. - container_name: indexer-service - volumes: - - ./local-network/contracts.json:/opt/contracts.json:ro - - ./local-network/.env:/opt/.env:ro - - ./profiling:/opt/profiling:rw - - ./indexer-service/config.toml:/opt/config/config.toml - - ./indexer-service/start.sh:/usr/local/bin/start.sh - - ../migrations:/opt/migrations:ro - - ../target/release/indexer-service-rs:/usr/local/bin/indexer-service-rs - entrypoint: ["/usr/local/bin/start.sh"] - environment: - - RUST_BACKTRACE=1 - - RUST_LOG=debug - - PROFILER=${PROFILER:-flamegraph} # Default to flamegraph if not specified - ports: - - "7601:7601" - networks: - - local-network - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:7601/"] - interval: 5s - timeout: 3s - retries: 10 - start_period: 10s - cap_add: - - SYS_ADMIN - privileged: true - security_opt: - - seccomp:unconfined - - tap-agent: - image: indexer-base:latest # Pre-built base image with dependencies - container_name: tap-agent - # depends_on: - # indexer-service: - # condition: service_healthy - volumes: - - ../target/release/indexer-tap-agent:/usr/local/bin/indexer-tap-agent - - ./tap-agent/start-perf.sh:/usr/local/bin/start-perf.sh - - ./tap-agent:/opt/config:ro - - ./profiling:/opt/profiling:rw - - ./local-network/.env:/opt/.env:ro - - ./local-network/contracts.json:/opt/contracts.json:ro - - ../migrations:/opt/migrations:ro - entrypoint: ["/usr/local/bin/start-perf.sh"] - environment: - - RUST_BACKTRACE=1 - - RUST_LOG=debug - - PROFILER=${PROFILER:-flamegraph} - ports: - # to expose the metrics port - - "7300:7300" - networks: - - local-network - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:7300/metrics"] - interval: 5s - timeout: 3s - retries: 10 - start_period: 10s - cap_add: - - SYS_ADMIN - privileged: true - security_opt: - - seccomp:unconfined - -networks: - local-network: - external: true - name: local-network_default diff --git a/contrib/docker-compose.yml b/contrib/docker-compose.yml index b0567c0fd..d5b5aed71 100644 --- a/contrib/docker-compose.yml +++ b/contrib/docker-compose.yml @@ -5,11 +5,10 @@ services: dockerfile: ./contrib/indexer-service/Dockerfile container_name: indexer-service volumes: - - ./indexer-service:/opt/config:ro # From contrib dir to indexer-service dir - ./local-network/tap-contracts.json:/opt/contracts.json:ro + - ./local-network/horizon.json:/opt/horizon.json:ro - ./local-network/.env:/opt/.env:ro - ../migrations:/opt/migrations:ro - entrypoint: ["/bin/bash", "-c", "/opt/config/start.sh"] environment: - RUST_BACKTRACE=1 - RUST_LOG=debug @@ -23,6 +22,11 @@ services: timeout: 3s retries: 10 start_period: 10s + cap_add: + - SYS_ADMIN + privileged: true + security_opt: + - seccomp:unconfined tap-agent: build: @@ -33,16 +37,16 @@ services: indexer-service: condition: service_healthy volumes: - - ./tap-agent:/opt/config:ro # From contrib dir to tap-agent dir + - ./profiling:/opt/profiling:rw - ./local-network/.env:/opt/.env:ro - ./local-network/tap-contracts.json:/opt/contracts.json:ro + - ./local-network/horizon.json:/opt/horizon.json:ro - ../migrations:/opt/migrations:ro - entrypoint: ["/bin/bash", "-c", "/opt/config/start.sh"] environment: - RUST_BACKTRACE=1 - RUST_LOG=debug + - PROFILER=${PROFILER:-none} ports: - # to expose the metrics port - "7300:7300" networks: - local-network From 796b0fdbb37bc372de8c55b831bffb0c22c112da Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:22:29 -0600 Subject: [PATCH 2/9] refactor(indexer-service): update for Horizon upgrade and improve config generation - Migrate from TAPVerifier to GraphTallyCollector contract addresses - Replace sed-based template config with inline TOML generation - Add horizon.json support and Horizon migration detection - Streamline profiling configuration and reduce script verbosity --- contrib/indexer-service/Dockerfile | 2 + contrib/indexer-service/start.sh | 134 ++++++++++++++--------------- 2 files changed, 66 insertions(+), 70 deletions(-) diff --git a/contrib/indexer-service/Dockerfile b/contrib/indexer-service/Dockerfile index 651ea8af9..87c8945a3 100644 --- a/contrib/indexer-service/Dockerfile +++ b/contrib/indexer-service/Dockerfile @@ -21,8 +21,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ COPY --from=build /root/target/release/indexer-service-rs /usr/local/bin/indexer-service-rs # Copy our start script into the image +COPY contrib/local-network/.env /opt/.env COPY contrib/indexer-service/start.sh /usr/local/bin/start.sh COPY contrib/indexer-service/config.toml /opt/config/config.toml +COPY contrib/local-network/horizon.json /opt/horizon.json RUN chmod +x /usr/local/bin/start.sh diff --git a/contrib/indexer-service/start.sh b/contrib/indexer-service/start.sh index d728dd8ce..e6f80718e 100755 --- a/contrib/indexer-service/start.sh +++ b/contrib/indexer-service/start.sh @@ -1,5 +1,6 @@ #!/bin/bash set -eu + # Source environment variables if available if [ -f "/opt/.env" ]; then source /opt/.env @@ -7,10 +8,10 @@ fi cat /opt/.env -# Extract TAPVerifier address from contracts.json -stdbuf -oL echo "🔍 DEBUG: Extracting TAPVerifier address from contracts.json..." -VERIFIER_ADDRESS=$(jq -r '."1337".TAPVerifier.address' /opt/contracts.json) -stdbuf -oL echo "🔍 DEBUG: TAPVerifier address: $VERIFIER_ADDRESS" +# Extract GraphTallyCollector address from horizon.json +stdbuf -oL echo "🔍 DEBUG: Extracting GraphTallyCollector address from horizon.json..." +GRAPH_TALLY_VERIFIER=$(jq -r '."1337".GraphTallyCollector.address' /opt/horizon.json) +stdbuf -oL echo "🔍 DEBUG: GraphTallyCollector address: $GRAPH_TALLY_VERIFIER" # Override with test values taken from test-assets/src/lib.rs ALLOCATION_ID="0xfa44c72b753a66591f241c7dc04e8178c30e13af" # ALLOCATION_ID_0 @@ -21,7 +22,6 @@ NETWORK_DEPLOYMENT=$(curl -s --max-time 10 "http://graph-node:8000/subgraphs/nam -H 'content-type: application/json' \ -d '{"query": "{ _meta { deployment } }"}' | jq -r '.data._meta.deployment' 2>/dev/null) stdbuf -oL echo "🔍 DEBUG: Network deployment result: $NETWORK_DEPLOYMENT" -stdbuf -oL echo "Graph-network subgraph deployment ID: $NETWORK_DEPLOYMENT" # Get escrow subgraph deployment ID stdbuf -oL echo "🔍 DEBUG: Fetching escrow subgraph deployment ID..." @@ -30,57 +30,72 @@ ESCROW_DEPLOYMENT=$(curl -s --max-time 10 "http://graph-node:8000/subgraphs/name -d '{"query": "{ _meta { deployment } }"}' | jq -r '.data._meta.deployment' 2>/dev/null) stdbuf -oL echo "🔍 DEBUG: Escrow deployment result: $ESCROW_DEPLOYMENT" -# Handle null deployment IDs by removing the lines entirely -if [ "$NETWORK_DEPLOYMENT" = "null" ] || [ -z "$NETWORK_DEPLOYMENT" ]; then - NETWORK_DEPLOYMENT="" -fi - -if [ "$ESCROW_DEPLOYMENT" = "null" ] || [ -z "$ESCROW_DEPLOYMENT" ]; then - ESCROW_DEPLOYMENT="" -fi - +# Run basic connectivity tests +stdbuf -oL echo "Testing graph-node endpoints..." +curl -s "http://graph-node:8000" >/dev/null && stdbuf -oL echo "Query endpoint OK" || stdbuf -oL echo "Query endpoint FAILED" +curl -s "http://graph-node:8030/graphql" >/dev/null && stdbuf -oL echo "Status endpoint OK" || stdbuf -oL echo "Status endpoint FAILED" -stdbuf -oL echo "Escrow subgraph deployment ID: $ESCROW_DEPLOYMENT" -stdbuf -oL echo "Using test Network subgraph deployment ID: $NETWORK_DEPLOYMENT" -stdbuf -oL echo "Using test Verifier address: $VERIFIER_ADDRESS" +stdbuf -oL echo "Using GraphTallyCollector address: $GRAPH_TALLY_VERIFIER" stdbuf -oL echo "Using test Indexer address: $RECEIVER_ADDRESS" -stdbuf -oL echo "Using TAPVerifier address from contracts.json: $VERIFIER_ADDRESS" stdbuf -oL echo "Using test Account0 address: $ACCOUNT0_ADDRESS" -# Create/copy config file -cp /opt/config/config.toml /opt/config.toml - -# Replace the placeholders with actual values -if [ -n "$NETWORK_DEPLOYMENT" ]; then - sed -i "s/NETWORK_DEPLOYMENT_PLACEHOLDER/$NETWORK_DEPLOYMENT/g" /opt/config.toml -else - # Remove the deployment_id line entirely for network subgraph - sed -i '/deployment_id = "NETWORK_DEPLOYMENT_PLACEHOLDER"/d' /opt/config.toml -fi - -if [ -n "$ESCROW_DEPLOYMENT" ]; then - sed -i "s/ESCROW_DEPLOYMENT_PLACEHOLDER/$ESCROW_DEPLOYMENT/g" /opt/config.toml -else - # Remove the deployment_id line entirely for escrow subgraph - sed -i '/deployment_id = "ESCROW_DEPLOYMENT_PLACEHOLDER"/d' /opt/config.toml -fi - -sed -i "s/VERIFIER_ADDRESS_PLACEHOLDER/$VERIFIER_ADDRESS/g" /opt/config.toml -sed -i "s/INDEXER_ADDRESS_PLACEHOLDER/$RECEIVER_ADDRESS/g" /opt/config.toml -sed -i "s/INDEXER_MNEMONIC_PLACEHOLDER/$INDEXER_MNEMONIC/g" /opt/config.toml -sed -i "s/ACCOUNT0_ADDRESS_PLACEHOLDER/$ACCOUNT0_ADDRESS/g" /opt/config.toml -sed -i "s/POSTGRES_PORT_PLACEHOLDER/$POSTGRES/g" /opt/config.toml +# Create config file inline (similar to the new run.sh approach) +cat >/opt/config.toml <<-EOF +[indexer] +indexer_address = "${RECEIVER_ADDRESS}" +operator_mnemonic = "${INDEXER_MNEMONIC}" + +[database] +postgres_url = "postgresql://postgres@postgres:${POSTGRES}/indexer_components_1" + +[graph_node] +query_url = "http://graph-node:8000" +status_url = "http://graph-node:8030/graphql" + +[subgraphs.network] +query_url = "http://graph-node:8000/subgraphs/name/graph-network"$(if [ -n "$NETWORK_DEPLOYMENT" ] && [ "$NETWORK_DEPLOYMENT" != "null" ]; then echo " +deployment_id = \"$NETWORK_DEPLOYMENT\""; fi) +recently_closed_allocation_buffer_secs = 60 +syncing_interval_secs = 30 + +[subgraphs.escrow] +query_url = "http://graph-node:8000/subgraphs/name/semiotic/tap"$(if [ -n "$ESCROW_DEPLOYMENT" ] && [ "$ESCROW_DEPLOYMENT" != "null" ]; then echo " +deployment_id = \"$ESCROW_DEPLOYMENT\""; fi) +syncing_interval_secs = 30 + +[blockchain] +chain_id = 1337 +receipts_verifier_address = "${GRAPH_TALLY_VERIFIER}" + +[service] +free_query_auth_token = "freestuff" +host_and_port = "0.0.0.0:7600" +url_prefix = "/" +serve_network_subgraph = false +serve_escrow_subgraph = false + +[tap] +max_amount_willing_to_lose_grt = 1000 + +[tap.rav_request] +timestamp_buffer_secs = 1000 + +[tap.sender_aggregator_endpoints] +${ACCOUNT0_ADDRESS} = "http://tap-aggregator:8080" + +[horizon] +# Enable Horizon migration support and detection +# When enabled: Check if Horizon contracts are active in the network +# - If Horizon contracts detected: Hybrid migration mode (new V2 receipts only, process existing V1 receipts) +# - If Horizon contracts not detected: Remain in legacy mode (V1 receipts only) +# When disabled: Pure legacy mode, no Horizon detection performed +enabled = true +EOF stdbuf -oL echo "Starting indexer-service with config:" cat /opt/config.toml -# Run basic connectivity tests -stdbuf -oL echo "Testing graph-node endpoints..." -curl -s "http://graph-node:8000" >/dev/null && stdbuf -oL echo "Query endpoint OK" || stdbuf -oL echo "Query endpoint FAILED" -curl -s "http://graph-node:8030/graphql" >/dev/null && stdbuf -oL echo "Status endpoint OK" || stdbuf -oL echo "Status endpoint FAILED" - -# Set profiling tool based on environment variable -# Default is no profiling +# Set profiling tool based on environment variable (keeping your existing profiling support) PROFILER="${PROFILER:-none}" stdbuf -oL echo "🔍 DEBUG: Profiling with: $PROFILER" @@ -88,36 +103,22 @@ stdbuf -oL echo "🔍 DEBUG: Profiling with: $PROFILER" export RUST_BACKTRACE=full export RUST_LOG="${RUST_LOG:-trace}" -# Create output directory if it doesn't exist +# Create output directory if it doesn't exist (for profiling) mkdir -p /opt/profiling/indexer-service chmod 777 /opt/profiling chmod 777 /opt/profiling/indexer-service -stdbuf -oL echo "📁 DEBUG: Profiling output directory: $(ls -la /opt/profiling)" - case "$PROFILER" in flamegraph) stdbuf -oL echo "🔥 Starting with profiler..." - - # Start the service in the background with output redirection - stdbuf -oL echo "🚀 Starting service..." exec /usr/local/bin/indexer-service-rs --config /opt/config.toml ;; strace) stdbuf -oL echo "🔍 Starting with strace..." - # -f: follow child processes - # -tt: print timestamps with microsecond precision - # -T: show time spent in each syscall - # -e trace=all: trace all system calls - # -s 256: show up to 256 characters per string - # -o: output file exec strace -f -tt -T -e trace=all -s 256 -o /opt/profiling/indexer-service/strace.log /usr/local/bin/indexer-service-rs --config /opt/config.toml ;; valgrind) stdbuf -oL echo "🔍 Starting with Valgrind profiling..." - - # Start with Massif memory profiler - stdbuf -oL echo "🔄 Starting Valgrind Massif memory profiling..." exec valgrind --tool=massif \ --massif-out-file=/opt/profiling/indexer-service/massif.out \ --time-unit=B \ @@ -126,13 +127,6 @@ valgrind) --threshold=0.5 \ /usr/local/bin/indexer-service-rs --config /opt/config.toml ;; -# Use callgrind_annotate indexer-service.callgrind.out -# for human-friendly report of callgrind output -# Ideally you should set: -# [profile.release.package."*"] -# debug = true -# force-frame-pointers = true -# in the Cargo.toml callgrind) stdbuf -oL echo "🔍 Starting with Callgrind CPU profiling..." exec valgrind --tool=callgrind \ From d981efdd67ea312d320ad415b0c018d20e9fbf2a Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:23:51 -0600 Subject: [PATCH 3/9] refactor(tap-agent): update for Horizon upgrade and add profiling capabilities - Migrate from TAPVerifier to GraphTallyCollector contract addresses - Replace sed-based config templates with inline generation - Bundle horizon.json and .env files in Docker image - Simplify startup script structure and improve logging - Remove indexer-service dependency for independent startup --- contrib/tap-agent/Dockerfile | 15 ++- contrib/tap-agent/start.sh | 219 +++++++++++++++++++++++++---------- 2 files changed, 162 insertions(+), 72 deletions(-) diff --git a/contrib/tap-agent/Dockerfile b/contrib/tap-agent/Dockerfile index 35276edba..756cb50f3 100644 --- a/contrib/tap-agent/Dockerfile +++ b/contrib/tap-agent/Dockerfile @@ -1,8 +1,6 @@ FROM rust:1.86-bookworm as build - WORKDIR /root COPY . . - # Force SQLx to use the offline mode to statically check the database queries against # the prepared files in the `.sqlx` directory. ENV SQLX_OFFLINE=true @@ -13,22 +11,23 @@ RUN cargo build --release --bin indexer-tap-agent ######################################################################################## FROM debian:bookworm-slim - RUN apt-get update && apt-get install -y --no-install-recommends \ openssl ca-certificates postgresql-client curl jq iproute2 \ + valgrind strace \ && rm -rf /var/lib/apt/lists/* # Create profiling directory with proper permissions RUN mkdir -p /opt/profiling && chmod 777 /opt/profiling # Copy our start script into the image -COPY contrib/tap-agent/start.sh /opt/config/start.sh -COPY contrib/tap-agent/start-perf.sh /usr/local/bin/start-perf.sh +COPY contrib/tap-agent/start.sh /usr/local/bin/start.sh COPY contrib/tap-agent/config.toml /opt/config/config.toml -RUN chmod +x /opt/config/start.sh -RUN chmod +x /usr/local/bin/start-perf.sh +# Copy the horizon.json and .env files +COPY contrib/local-network/horizon.json /opt/horizon.json +COPY contrib/local-network/.env /opt/.env +RUN chmod +x /usr/local/bin/start.sh COPY --from=build /root/target/release/indexer-tap-agent /usr/local/bin/indexer-tap-agent -ENTRYPOINT [ "/usr/local/bin/indexer-tap-agent" ] +ENTRYPOINT [ "/usr/local/bin/start.sh" ] diff --git a/contrib/tap-agent/start.sh b/contrib/tap-agent/start.sh index 6641a2be7..e3e746403 100755 --- a/contrib/tap-agent/start.sh +++ b/contrib/tap-agent/start.sh @@ -3,60 +3,39 @@ set -eu # Source environment variables from .env file if [ -f /opt/.env ]; then - echo "Sourcing environment variables from .env file" + stdbuf -oL echo "Sourcing environment variables from .env file" . /opt/.env fi -# Extract TAPVerifier address from contracts.json -VERIFIER_ADDRESS=$(jq -r '."1337".TAPVerifier.address' /opt/contracts.json) -ALLOCATION_ID="0xfa44c72b753a66591f241c7dc04e8178c30e13af" # ALLOCATION_ID_0 - -# Wait for postgres to be ready with timeout -echo "Waiting for postgres to be ready..." -MAX_ATTEMPTS=30 -ATTEMPT=0 -until pg_isready -h postgres -U postgres -d indexer_components_1 || [ $ATTEMPT -eq $MAX_ATTEMPTS ]; do - echo "Waiting for postgres... Attempt $((ATTEMPT + 1))/$MAX_ATTEMPTS" - ATTEMPT=$((ATTEMPT + 1)) - sleep 2 -done +cat /opt/.env -if [ $ATTEMPT -eq $MAX_ATTEMPTS ]; then - echo "ERROR: Failed to connect to postgres after $MAX_ATTEMPTS attempts" - exit 1 -fi +# Extract GraphTallyCollector address from horizon.json +stdbuf -oL echo "🔍 DEBUG: Extracting GraphTallyCollector address from horizon.json..." +GRAPH_TALLY_VERIFIER=$(jq -r '."1337".GraphTallyCollector.address' /opt/horizon.json) +stdbuf -oL echo "🔍 DEBUG: GraphTallyCollector address: $GRAPH_TALLY_VERIFIER" -echo "Postgres is ready!" +# Override with test values taken from test-assets/src/lib.rs +ALLOCATION_ID="0xfa44c72b753a66591f241c7dc04e8178c30e13af" # ALLOCATION_ID_0 -# Wait for indexer-service to be ready with timeout -echo "Waiting for indexer-service to be ready..." -MAX_ATTEMPTS=30 -ATTEMPT=0 -until curl -s http://indexer-service:7601/ >/dev/null 2>&1 || [ $ATTEMPT -eq $MAX_ATTEMPTS ]; do - echo "Waiting for indexer-service... Attempt $((ATTEMPT + 1))/$MAX_ATTEMPTS" - ATTEMPT=$((ATTEMPT + 1)) +# Wait for postgres to be ready +stdbuf -oL echo "🔍 DEBUG: Waiting for postgres to be ready..." +until pg_isready -h postgres -U postgres -d indexer_components_1; do + stdbuf -oL echo "Waiting for postgres..." sleep 2 done -if [ $ATTEMPT -eq $MAX_ATTEMPTS ]; then - echo "ERROR: Failed to connect to indexer-service after $MAX_ATTEMPTS attempts" - exit 1 -fi - -echo "Indexer-service is ready!" - -echo "Checking if required services are available..." +stdbuf -oL echo "Checking if required services are available..." for service in postgres graph-node tap-aggregator; do if getent hosts $service >/dev/null 2>&1; then IP=$(getent hosts $service | awk '{ print $1 }') - echo "✅ $service resolves to $IP" + stdbuf -oL echo "✅ $service resolves to $IP" else - echo "❌ Cannot resolve $service hostname" + stdbuf -oL echo "❌ Cannot resolve $service hostname" fi done # Get network subgraph deployment ID with retries -echo "Getting network subgraph deployment ID..." +stdbuf -oL echo "🔍 DEBUG: Getting network subgraph deployment ID..." MAX_ATTEMPTS=30 ATTEMPT=0 NETWORK_DEPLOYMENT="" @@ -78,10 +57,10 @@ if [ -z "$NETWORK_DEPLOYMENT" ] || [ "$NETWORK_DEPLOYMENT" = "null" ]; then exit 1 fi -echo "Network subgraph deployment ID: $NETWORK_DEPLOYMENT" +stdbuf -oL echo "🔍 DEBUG: Network subgraph deployment ID: $NETWORK_DEPLOYMENT" # Get escrow subgraph deployment ID with retries -echo "Getting escrow subgraph deployment ID..." +stdbuf -oL echo "🔍 DEBUG: Getting escrow subgraph deployment ID..." MAX_ATTEMPTS=30 ATTEMPT=0 ESCROW_DEPLOYMENT="" @@ -99,34 +78,146 @@ while [ -z "$ESCROW_DEPLOYMENT" ] || [ "$ESCROW_DEPLOYMENT" = "null" ] && [ $ATT done if [ -z "$ESCROW_DEPLOYMENT" ] || [ "$ESCROW_DEPLOYMENT" = "null" ]; then - echo "ERROR: Failed to get escrow subgraph deployment ID after $MAX_ATTEMPTS attempts" + stdbuf -oL echo "ERROR: Failed to get escrow subgraph deployment ID after $MAX_ATTEMPTS attempts" exit 1 fi -echo "Escrow subgraph deployment ID: $ESCROW_DEPLOYMENT" - - -# Copy the config template -cp /opt/config/config.toml /opt/config.toml - -# Replace the placeholders with actual values -sed -i "s/NETWORK_DEPLOYMENT_PLACEHOLDER/$NETWORK_DEPLOYMENT/g" /opt/config.toml -sed -i "s/ESCROW_DEPLOYMENT_PLACEHOLDER/$ESCROW_DEPLOYMENT/g" /opt/config.toml -sed -i "s/VERIFIER_ADDRESS_PLACEHOLDER/$VERIFIER_ADDRESS/g" /opt/config.toml -sed -i "s/INDEXER_ADDRESS_PLACEHOLDER/$RECEIVER_ADDRESS/g" /opt/config.toml -sed -i "s/INDEXER_MNEMONIC_PLACEHOLDER/$INDEXER_MNEMONIC/g" /opt/config.toml -sed -i "s/ACCOUNT0_ADDRESS_PLACEHOLDER/$ACCOUNT0_ADDRESS/g" /opt/config.toml -sed -i "s/TAP_AGGREGATOR_PORT_PLACEHOLDER/$TAP_AGGREGATOR/g" /opt/config.toml -sed -i "s/POSTGRES_PORT_PLACEHOLDER/$POSTGRES/g" /opt/config.toml -sed -i "s/GRAPH_NODE_GRAPHQL_PORT_PLACEHOLDER/$GRAPH_NODE_GRAPHQL/g" /opt/config.toml -sed -i "s/GRAPH_NODE_STATUS_PORT_PLACEHOLDER/$GRAPH_NODE_STATUS/g" /opt/config.toml -sed -i "s/INDEXER_SERVICE_PORT_PLACEHOLDER/$INDEXER_SERVICE/g" /opt/config.toml - -echo "Starting tap-agent with config:" -cat /opt/config.toml +stdbuf -oL echo "🔍 DEBUG: Escrow subgraph deployment ID: $ESCROW_DEPLOYMENT" + +stdbuf -oL echo "🔍 DEBUG: Using GraphTallyCollector address: $GRAPH_TALLY_VERIFIER" +stdbuf -oL echo "🔍 DEBUG: Using Indexer address: $RECEIVER_ADDRESS" +stdbuf -oL echo "🔍 DEBUG: Using Account0 address: $ACCOUNT0_ADDRESS" + +# Create endpoints.yaml file (matching the updated run.sh pattern) +cd /opt +cat >endpoints.yaml <<-EOF +${ACCOUNT0_ADDRESS}: "http://tap-aggregator:${TAP_AGGREGATOR}" +EOF + +# Create config file inline (matching the updated run.sh pattern) +cat >config.toml <<-EOF +[indexer] +indexer_address = "${RECEIVER_ADDRESS}" +operator_mnemonic = "${INDEXER_MNEMONIC}" + +[database] +postgres_url = "postgresql://postgres@postgres:${POSTGRES}/indexer_components_1" + +[graph_node] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL}" +status_url = "http://graph-node:${GRAPH_NODE_STATUS}/graphql" + +[subgraphs.network] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL}/subgraphs/name/graph-network"$(if [ -n "$NETWORK_DEPLOYMENT" ] && [ "$NETWORK_DEPLOYMENT" != "null" ]; then echo " +deployment_id = \"$NETWORK_DEPLOYMENT\""; fi) +recently_closed_allocation_buffer_secs = 60 +syncing_interval_secs = 30 + +[subgraphs.escrow] +query_url = "http://graph-node:${GRAPH_NODE_GRAPHQL}/subgraphs/name/semiotic/tap"$(if [ -n "$ESCROW_DEPLOYMENT" ] && [ "$ESCROW_DEPLOYMENT" != "null" ]; then echo " +deployment_id = \"$ESCROW_DEPLOYMENT\""; fi) +syncing_interval_secs = 30 + +[blockchain] +chain_id = 1337 +receipts_verifier_address = "${GRAPH_TALLY_VERIFIER}" + +[service] +host_and_port = "0.0.0.0:${INDEXER_SERVICE}" +url_prefix = "/" +serve_network_subgraph = false +serve_escrow_subgraph = false + +[tap] +max_amount_willing_to_lose_grt = 1000 + +[tap.rav_request] +timestamp_buffer_secs = 1000 + +[tap.sender_aggregator_endpoints] +${ACCOUNT0_ADDRESS} = "http://tap-aggregator:${TAP_AGGREGATOR}" + +[horizon] +# Enable Horizon migration support and detection +# When enabled: Check if Horizon contracts are active in the network +# - If Horizon contracts detected: Hybrid migration mode (new V2 receipts only, process existing V1 receipts) +# - If Horizon contracts not detected: Remain in legacy mode (V1 receipts only) +# When disabled: Pure legacy mode, no Horizon detection performed +enabled = true +EOF + +stdbuf -oL echo "Starting tap-agent with config:" +cat config.toml + +# Set profiling tool based on environment variable +# Default is no profiling +PROFILER="${PROFILER:-none}" +stdbuf -oL echo "🔍 DEBUG: Profiling with: $PROFILER" # Run agent with enhanced logging -echo "Starting tap-agent..." +stdbuf -oL echo "🔍 DEBUG: Starting tap-agent..." export RUST_BACKTRACE=full -export RUST_LOG=debug -exec /usr/local/bin/indexer-tap-agent --config /opt/config.toml +export RUST_LOG="${RUST_LOG:-debug}" + +# Create output directory if it doesn't exist +mkdir -p /opt/profiling/tap-agent +chmod 777 /opt/profiling +chmod 777 /opt/profiling/tap-agent + +stdbuf -oL echo "📁 DEBUG: Profiling output directory: $(ls -la /opt/profiling)" + +case "$PROFILER" in +flamegraph) + stdbuf -oL echo "🔥 Starting with profiler..." + stdbuf -oL echo "🚀 Starting service..." + exec /usr/local/bin/indexer-tap-agent --config /opt/config.toml + ;; +strace) + stdbuf -oL echo "🔍 Starting with strace..." + # -f: follow child processes + # -tt: print timestamps with microsecond precision + # -T: show time spent in each syscall + # -e trace=all: trace all system calls + # -s 256: show up to 256 characters per string + # -o: output file + exec strace -f -tt -T -e trace=all -s 256 -o /opt/profiling/tap-agent/strace.log /usr/local/bin/indexer-tap-agent --config /opt/config.toml + ;; +valgrind) + stdbuf -oL echo "🔍 Starting with Valgrind profiling..." + # Start with Massif memory profiler + stdbuf -oL echo "🔄 Starting Valgrind Massif memory profiling..." + exec valgrind --tool=massif \ + --massif-out-file=/opt/profiling/tap-agent/massif.out \ + --time-unit=B \ + --detailed-freq=10 \ + --max-snapshots=100 \ + --threshold=0.5 \ + /usr/local/bin/indexer-tap-agent --config /opt/config.toml + ;; +# Use callgrind_annotate tap-agent.callgrind.out +# or KcacheGrind viewer +# for human friendly report +# Ideally you should set: +# [profile.release.package."*"] +# debug = true +# force-frame-pointers = true +# in the Cargo.toml +callgrind) + stdbuf -oL echo "🔍 Starting with Callgrind CPU profiling..." + exec valgrind --tool=callgrind \ + --callgrind-out-file=/opt/profiling/tap-agent/callgrind.out \ + --cache-sim=yes \ + --branch-sim=yes \ + --collect-jumps=yes \ + --collect-systime=yes \ + --collect-bus=yes \ + --dump-instr=yes \ + --dump-line=yes \ + --compress-strings=no \ + /usr/local/bin/indexer-tap-agent --config /opt/config.toml + ;; +none) + stdbuf -oL echo "🔍 Starting without profiling..." + exec /usr/local/bin/indexer-tap-agent --config /opt/config.toml + ;; +esac From 2039e49efd29dd8adee0805c21df93976a552525 Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:27:04 -0600 Subject: [PATCH 4/9] chore(dev-workflow): improve container management - Separate prod/dev workflows and add services-status command - Improve container cleanup and colorized output for better UX --- dev-reload.sh | 62 +++++++++++++++++++++++++++++++++++++++------------ justfile | 62 ++++++++++++++++++++++++++++++++++++++------------- 2 files changed, 95 insertions(+), 29 deletions(-) diff --git a/dev-reload.sh b/dev-reload.sh index 5b2ec56aa..7e682e0a1 100755 --- a/dev-reload.sh +++ b/dev-reload.sh @@ -1,18 +1,38 @@ #!/bin/bash set -e -# Print what we're doing -echo "Rebuilding Rust binaries and restarting containers..." - # Define color codes for output GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color -# 1. Compile the binaries locally -echo -e "${BLUE}Compiling Rust code...${NC}" -cargo build --release +# Check if profiling is requested +PROFILER=${1:-""} +if [ -n "$PROFILER" ]; then + echo "Rebuilding Rust binaries with profiling and restarting dev containers..." + echo -e "${BLUE}Using profiler: ${GREEN}${PROFILER}${NC}" + # Ensure profiling output directory exists + mkdir -p contrib/profiling/indexer-service + mkdir -p contrib/profiling/tap-agent +else + echo "Rebuilding Rust binaries and restarting dev containers..." +fi + +echo -e "${BLUE}Using development profile with host binary mounts${NC}" + +# Always use docker-compose.dev.yml since this is dev-reload.sh +COMPOSE_FILE="-f docker-compose.dev.yml" +SERVICES="indexer-service tap-agent" + +# 1. Compile the binaries locally with appropriate flags +if [ -n "$PROFILER" ]; then + echo -e "${BLUE}Compiling Rust code with profiling support...${NC}" + RUSTFLAGS='-C force-frame-pointers=yes' CARGO_PROFILE_RELEASE_DEBUG=true cargo build --release --features "profiling" +else + echo -e "${BLUE}Compiling Rust code...${NC}" + cargo build --release +fi # 2. Check if compilation succeeded if [ $? -ne 0 ]; then @@ -22,17 +42,31 @@ fi echo -e "${GREEN}Compilation successful!${NC}" -# 3. Restart the containers with the newly compiled binaries -echo -e "${BLUE}Restarting containers...${NC}" +# 3. Stop and remove any conflicting containers +echo -e "${BLUE}Cleaning up conflicting containers...${NC}" cd contrib -docker compose -f docker-compose.dev.yml restart indexer-service tap-agent +# Force stop and remove all indexer containers to avoid port conflicts +# Stop containers from both production and dev compose files +docker compose -f docker-compose.yml stop indexer-service tap-agent 2>/dev/null || true +docker compose -f docker-compose.dev.yml stop indexer-service tap-agent 2>/dev/null || true +docker rm indexer-service tap-agent 2>/dev/null || true -# 4. Verify the containers are running -echo -e "${BLUE}Checking container status...${NC}" -docker compose -f docker-compose.dev.yml ps +# 4. Start only the dev containers +echo -e "${BLUE}Starting dev containers...${NC}" -echo -e "${GREEN}Done! Containers restarted with new binaries.${NC}" +# Set profiler environment variable if profiling +if [ -n "$PROFILER" ]; then + PROFILER=$PROFILER docker compose $COMPOSE_FILE up -d --force-recreate $SERVICES + echo -e "${GREEN}Done! Containers restarted with profiling.${NC}" +else + docker compose $COMPOSE_FILE up -d --force-recreate $SERVICES + echo -e "${GREEN}Done! Development containers restarted.${NC}" +fi + +# 5. Verify the containers are running +echo -e "${BLUE}Checking container status...${NC}" +docker compose $COMPOSE_FILE ps # Optional: Check logs for immediate errors echo -e "${BLUE}Showing recent logs (Ctrl+C to exit):${NC}" -docker compose -f docker-compose.dev.yml logs --tail=20 -f +docker compose $COMPOSE_FILE logs --tail=20 -f $SERVICES diff --git a/justfile b/justfile index 603743a57..cede806ef 100644 --- a/justfile +++ b/justfile @@ -53,21 +53,50 @@ setup-integration-env: setup: ./setup-test-network.sh -# Rebuild binaries and restart services after code changes +# Stop all services +down: + cd contrib && docker compose -f docker-compose.yml down --remove-orphans + cd contrib && docker compose -f docker-compose.dev.yml down --remove-orphans + cd contrib/local-network && docker compose down --remove-orphans + docker rm -f indexer-service tap-agent gateway block-oracle indexer-agent graph-node redpanda tap-aggregator tap-escrow-manager 2>/dev/null || true + docker network prune -f + + +# Check status of all project services +services-status: + @echo "🔍 Checking project services status..." + @echo "" + @echo "=== Project Containers ===" + @docker ps --format 'table {{{{.Names}}}}\t{{{{.Status}}}}\t{{{{.Ports}}}}' | grep -E "(indexer-service|tap-agent|gateway|graph-node|chain|block-oracle|indexer-agent|redpanda|tap-aggregator|tap-escrow-manager)" || echo "No project containers running" + @echo "" + @echo "=== Docker Compose Services ===" + @cd contrib && docker compose -f docker-compose.yml ps 2>/dev/null || echo "Production compose not running" + @cd contrib && docker compose -f docker-compose.dev.yml ps 2>/dev/null || echo "Dev compose not running" + @cd contrib/local-network && docker compose ps 2>/dev/null || echo "Local network compose not running" + @echo "" + @echo "=== Active Networks ===" + @docker network ls | grep -E "(contrib|local-network)" || echo "No project networks found" + +# Restart production services (uses Docker-built binaries) # Assumes local network is already running (run 'just setup' if not) reload: setup-integration-env + ./reload.sh + +# Rebuild binaries and restart development services (compiles and mounts host binaries) +# Assumes local network is already running (run 'just setup' if not) +reload-dev: setup-integration-env ./dev-reload.sh -# Watch log output from services +# Watch log output from services (production mode) # Assumes local network is already running (run 'just setup' if not) logs: + @cd contrib && docker compose -f docker-compose.yml logs -f + +# Watch log output from services (development mode) +# Assumes local network is already running (run 'just setup' if not) +logs-dev: @cd contrib && docker compose -f docker-compose.dev.yml logs -f -# Stop all services -down: - @cd contrib && docker compose -f docker-compose.dev.yml down - @cd contrib/local-network && docker compose down - docker rm -f indexer-service tap-agent gateway block-oracle indexer-agent graph-node redpanda tap-aggregator tap-escrow-manager 2>/dev/null || true # Profiling commands # ----------------------------- @@ -75,42 +104,44 @@ down: # Assumes local network is already running (run 'just setup' if not) profile-flamegraph: setup-integration-env @mkdir -p contrib/profiling/output - ./prof-reload.sh flamegraph + ./dev-reload.sh flamegraph # Profile indexer-service with valgrind # Assumes local network is already running (run 'just setup' if not) profile-valgrind: setup-integration-env @mkdir -p contrib/profiling/output - ./prof-reload.sh valgrind + ./dev-reload.sh valgrind # Profile indexer-service with strace # Assumes local network is already running (run 'just setup' if not) profile-strace: setup-integration-env @mkdir -p contrib/profiling/output - ./prof-reload.sh strace + ./dev-reload.sh strace # Profile indexer-service with callgrind # Assumes local network is already running (run 'just setup' if not) profile-callgrind: setup-integration-env @mkdir -p contrib/profiling/output - ./prof-reload.sh callgrind + ./dev-reload.sh callgrind # Stop the running indexer-service (useful after profiling) # This sends SIGTERM, allowing the trap in start-perf.sh to handle cleanup (e.g., generate flamegraph) stop-profiling: @echo "🛑 Stopping the indexer-service container (allowing profiling data generation)..." - cd contrib && docker compose -f docker-compose.prof.yml stop indexer-service tap-agent + cd contrib && docker compose -f docker-compose.dev.yml stop indexer-service tap-agent @echo "✅ Service stop signal sent. Check profiling output directory." # Restore normal service (without profiling) profile-restore: @echo "🔄 Restoring normal service..." - cd contrib && docker compose -f docker-compose.prof.yml up -d --force-recreate indexer-service tap-agent + cd contrib && docker compose -f docker-compose.dev.yml up -d --force-recreate indexer-service tap-agent @echo "✅ Normal service restored" # Integration test commands (assume local network is already running) # For fresh setup, run 'just setup' first to deploy all infrastructure # ----------------------------------------------------------------------------- +fund-escrow: setup-integration-env + @cd integration-tests && ./fund_escrow.sh # Test RAV v1 receipts (legacy TAP) # Assumes local network is running - run 'just setup' if services are not available @@ -123,8 +154,7 @@ test-local: setup-integration-env # Assumes local network is running - run 'just setup' if services are not available test-local-v2: setup-integration-env @echo "Running RAV v2 integration tests (assumes local network is running)..." - @cd integration-tests && ./fund_escrow.sh - @cd integration-tests && cargo run -- rav2 + @cd integration-tests && bash -x ./fund_escrow.sh && cargo run -- rav2 # Load test with v2 receipts # Assumes local network is running - run 'just setup' if services are not available @@ -132,3 +162,5 @@ load-test-v2 num_receipts="1000": setup-integration-env @echo "Running load test with {{num_receipts}} receipts (assumes local network is running)..." @cd integration-tests && ./fund_escrow.sh @cd integration-tests && cargo run -- load-v2 --num-receipts {{num_receipts}} + + From ee921cb9fc98cd8d5951ce93b5a0ef33ab4d3a8e Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:27:59 -0600 Subject: [PATCH 5/9] chore(test): remove profiling script no longer needed --- prof-reload.sh | 45 --------------------------------------------- 1 file changed, 45 deletions(-) delete mode 100755 prof-reload.sh diff --git a/prof-reload.sh b/prof-reload.sh deleted file mode 100755 index 3c0b09dfa..000000000 --- a/prof-reload.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/bin/bash -set -e - -# TODO: Might this file is redundant and we can use dev-reload -# script instead? - -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -NC='\033[0m' - -PROFILER=${1:-flamegraph} -echo -e "${BLUE}Using profiler: ${GREEN}${PROFILER}${NC}" - -# Ensure profiling output directory exists -mkdir -p contrib/profiling/indexer-service -mkdir -p contrib/profiling/tap-agent - -# 1. Compile the binaries locally -# and use profiling feature flag -# for flamegraph -echo -e "${BLUE}Compiling Rust code...${NC}" -RUSTFLAGS='-C force-frame-pointers=yes' CARGO_PROFILE_RELEASE_DEBUG=true cargo build --release --features "profiling" - -# 2. Check if compilation succeeded -if [ $? -ne 0 ]; then - echo -e "${YELLOW}Compilation failed. Not restarting containers.${NC}" - exit 1 -fi - -echo -e "${GREEN}Compilation successful!${NC}" - -echo -e "${BLUE}Restarting indexer-service with profiling...${NC}" -cd contrib - -# Stop the existing service and remove container -# to avoid conflicts. probably not needed and a restart could -# be enough. -docker compose -f docker-compose.prof.yml stop indexer-service tap-agent -docker rm -f indexer-service tap-agent 2>/dev/null || true - -export PROFILER=$PROFILER -docker compose -f docker-compose.prof.yml up -d indexer-service tap-agent - -echo -e "${GREEN}Done! Containers restarted with profiling.${NC}" From 622fa976944f53e0b93a6ac3716fba946954d053 Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:30:08 -0600 Subject: [PATCH 6/9] fix(integration-tests): update fund_escrow.sh for correct file paths and formatting - Fix contract file paths to use ../contrib/local-network/ directory - Update error message to reflect correct execution directory - Remove set -e to allow graceful error handling - Clean up formatting and spacing consistency --- integration-tests/fund_escrow.sh | 40 ++++++++++++++++---------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/integration-tests/fund_escrow.sh b/integration-tests/fund_escrow.sh index 32468395a..8d22b95c8 100755 --- a/integration-tests/fund_escrow.sh +++ b/integration-tests/fund_escrow.sh @@ -1,5 +1,4 @@ #!/bin/bash -set -e # ============================================================================== # FUND ESCROW FOR BOTH V1 AND V2 (HORIZON) @@ -29,20 +28,20 @@ get_contract_address() { if [ -f ".env" ]; then source .env else - echo "Error: .env file not found. Please run from local-network directory." + echo "Error: .env file not found. Please run from integration-tests directory." exit 1 fi -# Get contract addresses -GRAPH_TOKEN=$(get_contract_address "horizon.json" "L2GraphToken") -TAP_ESCROW_V1=$(get_contract_address "tap-contracts.json" "TAPEscrow") -PAYMENTS_ESCROW_V2=$(get_contract_address "horizon.json" "PaymentsEscrow") -GRAPH_TALLY_COLLECTOR_V2=$(get_contract_address "horizon.json" "GraphTallyCollector") +# Get contract addresses - Updated paths to local-network directory +GRAPH_TOKEN=$(get_contract_address "../contrib/local-network/horizon.json" "L2GraphToken") +TAP_ESCROW_V1=$(get_contract_address "../contrib/local-network/tap-contracts.json" "TAPEscrow") +PAYMENTS_ESCROW_V2=$(get_contract_address "../contrib/local-network/horizon.json" "PaymentsEscrow") +GRAPH_TALLY_COLLECTOR_V2=$(get_contract_address "../contrib/local-network/horizon.json" "GraphTallyCollector") # Use environment variables from .env SENDER_ADDRESS="$ACCOUNT0_ADDRESS" SENDER_KEY="$ACCOUNT0_SECRET" -AMOUNT="10000000000000000000" # 10 GRT per escrow +AMOUNT="10000000000000000000" # 10 GRT per escrow echo "============ FUNDING BOTH V1 AND V2 ESCROWS ============" echo "L2GraphToken address: $GRAPH_TOKEN" @@ -128,8 +127,8 @@ echo "========== FUNDING V2 ESCROW ==========" # For V2, we need to specify payer, collector, and receiver # Payer is the test account, but receiver must be the indexer address PAYER=$SENDER_ADDRESS -COLLECTOR=$SENDER_ADDRESS -RECEIVER="0xf4EF6650E48d099a4972ea5B414daB86e1998Bd3" # This must be the indexer address +COLLECTOR=$SENDER_ADDRESS +RECEIVER="0xf4EF6650E48d099a4972ea5B414daB86e1998Bd3" # This must be the indexer address # Check current V2 escrow balance before funding echo "Checking current V2 escrow balance..." @@ -152,21 +151,21 @@ docker exec chain cast send \ # For V2, we also need to authorize the signer echo "Authorizing signer for V2..." # Create authorization proof: payer authorizes signer (same address in test) -PROOF_DEADLINE=$(($(date +%s) + 3600)) # 1 hour from now +PROOF_DEADLINE=$(($(date +%s) + 3600)) # 1 hour from now echo "Creating authorization proof with deadline: $PROOF_DEADLINE" -# Create the message to sign according to _verifyAuthorizationProof +# Create the message to sign according to _verifyAuthorizationProof # abi.encodePacked(chainId, contractAddress, "authorizeSignerProof", deadline, authorizer) -CHAIN_ID_HEX=$(printf "%064x" 1337) # uint256: 32 bytes -CONTRACT_HEX=${GRAPH_TALLY_COLLECTOR_V2:2} # address: 20 bytes (remove 0x) -DOMAIN_HEX=$(echo -n "authorizeSignerProof" | xxd -p) # string: no length prefix -DEADLINE_HEX=$(printf "%064x" $PROOF_DEADLINE) # uint256: 32 bytes -AUTHORIZER_HEX=${SENDER_ADDRESS:2} # address: 20 bytes (remove 0x) +CHAIN_ID_HEX=$(printf "%064x" 1337) # uint256: 32 bytes +CONTRACT_HEX=${GRAPH_TALLY_COLLECTOR_V2:2} # address: 20 bytes (remove 0x) +DOMAIN_HEX=$(echo -n "authorizeSignerProof" | xxd -p) # string: no length prefix +DEADLINE_HEX=$(printf "%064x" $PROOF_DEADLINE) # uint256: 32 bytes +AUTHORIZER_HEX=${SENDER_ADDRESS:2} # address: 20 bytes (remove 0x) MESSAGE_DATA="${CHAIN_ID_HEX}${CONTRACT_HEX}${DOMAIN_HEX}${DEADLINE_HEX}${AUTHORIZER_HEX}" MESSAGE_HASH=$(docker exec chain cast keccak "0x$MESSAGE_DATA") -# Sign the message with the signer's private key +# Sign the message with the signer's private key PROOF=$(docker exec chain cast wallet sign --private-key $SENDER_KEY "$MESSAGE_HASH") echo "Calling authorizeSigner with proof..." @@ -203,11 +202,12 @@ ESCROW_BALANCE_V2="(check via subgraph)" # Since we can't easily check balance via contract call, we'll verify via transaction success echo "✅ V2 escrow deposit transaction completed!" echo " Payer: $PAYER" -echo " Collector: $COLLECTOR" +echo " Collector: $COLLECTOR" echo " Receiver: $RECEIVER" echo " Amount: $AMOUNT" echo "" echo "Note: V2 escrow balance can be verified via the TAP V2 subgraph" echo "" -echo "✅ Successfully funded both V1 and V2 escrows!" \ No newline at end of file +echo "✅ Successfully funded both V1 and V2 escrows!" + From 42dff0152c2439e08b85c341f241f1160bd869b9 Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 13:31:26 -0600 Subject: [PATCH 7/9] feat(setup): add timing reports and improve disk usage tracking - Add script execution time tracking with minutes/seconds display - Improve Docker size calculation with robust fallback methods - Remove debug output (ls, pwd) and unused environment variables - Update gateway container to use horizon.json instead of tap-contracts.json - Disable set -e to allow graceful error handling - Add structured completion report with timing and disk usage metrics --- setup-test-network.sh | 70 +++++++++++++++++++++++++++++-------------- 1 file changed, 48 insertions(+), 22 deletions(-) diff --git a/setup-test-network.sh b/setup-test-network.sh index 20b3a1b67..17b0d926f 100755 --- a/setup-test-network.sh +++ b/setup-test-network.sh @@ -1,5 +1,5 @@ #!/bin/bash -set -e +# set -e # ============================================================================== # SETUP LOCAL GRAPH NETWORK FOR TESTING (HORIZON VERSION) @@ -15,12 +15,39 @@ set -e # # - The script checks for existing services and skips those already running # ============================================================================== -# + +get_docker_sizes() { + local df_output=$(docker system df 2>/dev/null) + + # Extract sizes using awk (more reliable) + local images_size=$(echo "$df_output" | awk '/Images/ {print $4}' | head -1) + local containers_size=$(echo "$df_output" | awk '/Containers/ {print $4}' | head -1) + local volumes_size=$(echo "$df_output" | awk '/Local Volumes/ {print $5}' | head -1) + + # If awk fails, try alternative method + if [ -z "$images_size" ] || [ -z "$containers_size" ] || [ -z "$volumes_size" ]; then + # Method 2: Use docker system df --format table and parse + images_size=$(docker system df --format "table {{.Type}}\t{{.TotalCount}}\t{{.Size}}" 2>/dev/null | grep "Images" | awk '{print $4}' || echo "N/A") + containers_size=$(docker system df --format "table {{.Type}}\t{{.TotalCount}}\t{{.Size}}" 2>/dev/null | grep "Containers" | awk '{print $4}' || echo "N/A") + volumes_size=$(docker system df --format "table {{.Type}}\t{{.TotalCount}}\t{{.Size}}" 2>/dev/null | grep "Local Volumes" | awk '{print $5}' || echo "N/A") + fi + + # Set defaults if still empty + images_size=${images_size:-"N/A"} + containers_size=${containers_size:-"N/A"} + volumes_size=${volumes_size:-"N/A"} + + echo "$images_size $containers_size $volumes_size" +} + +# Track build times +SCRIPT_START_TIME=$(date +%s) # Save the starting disk usage START_SPACE=$(df -h --output=used /var/lib/docker | tail -1) -START_IMAGES_SIZE=$(docker system df --format '{{.ImagesSize}}' 2>/dev/null || echo "N/A") -START_CONTAINERS_SIZE=$(docker system df --format '{{.ContainersSize}}' 2>/dev/null || echo "N/A") -START_VOLUMES_SIZE=$(docker system df --format '{{.VolumesSize}}' 2>/dev/null || echo "N/A") +START_SIZES=($(get_docker_sizes)) +START_IMAGES_SIZE=${START_SIZES[0]} +START_CONTAINERS_SIZE=${START_SIZES[1]} +START_VOLUMES_SIZE=${START_SIZES[2]} echo "============ STARTING DISK USAGE ============" echo "Docker directory usage: $START_SPACE" @@ -107,8 +134,6 @@ if container_running "indexer-service" && container_running "tap-agent" && conta fi cd contrib/ -ls -pwd # Clone local-network repo if it doesn't exist if [ ! -d "local-network" ]; then @@ -239,15 +264,10 @@ echo "Running gateway container..." docker run -d --name gateway \ --network local-network_default \ -p 7700:7700 \ - -v "$(pwd)/local-network/tap-contracts.json":/opt/tap-contracts.json:ro \ + -v "$(pwd)/local-network/horizon.json":/opt/horizon.json:ro \ -v "$(pwd)/local-network/subgraph-service.json":/opt/subgraph-service.json:ro \ + -v "$(pwd)/local-network/.env":/opt/.env:ro \ -e RUST_LOG=info,graph_gateway=trace \ - -e ACCOUNT0_SECRET="$ACCOUNT0_SECRET" \ - -e ACCOUNT0_ADDRESS="$ACCOUNT0_ADDRESS" \ - -e GATEWAY_API_KEY="$GATEWAY_API_KEY" \ - -e GRAPH_NODE_GRAPHQL="$GRAPH_NODE_GRAPHQL" \ - -e REDPANDA_KAFKA="$REDPANDA_KAFKA" \ - -e INDEXER_SERVICE="$INDEXER_SERVICE" \ --restart on-failure:3 \ local-gateway:latest @@ -268,15 +288,21 @@ done # Ensure gateway is ready before testing timeout 100 bash -c 'until curl -f http://localhost:7700/ > /dev/null 2>&1; do echo "Waiting for gateway service..."; sleep 5; done' -# After all services are running, measure the disk space used -END_SPACE=$(df -h --output=used /var/lib/docker | tail -1) -END_IMAGES_SIZE=$(docker system df --format '{{.ImagesSize}}' 2>/dev/null || echo "N/A") -END_CONTAINERS_SIZE=$(docker system df --format '{{.ContainersSize}}' 2>/dev/null || echo "N/A") -END_VOLUMES_SIZE=$(docker system df --format '{{.VolumesSize}}' 2>/dev/null || echo "N/A") - -echo "All services are now running!" -echo "You can enjoy your new local network setup for testing with horizon upgrade." +# Calculate timing and final reports +SCRIPT_END_TIME=$(date +%s) +TOTAL_DURATION=$((SCRIPT_END_TIME - SCRIPT_START_TIME)) +MINUTES=$((TOTAL_DURATION / 60)) +SECONDS=$((TOTAL_DURATION % 60)) +END_SPACE=$(df -h --output=used /var/lib/docker | tail -1) +END_SIZES=($(get_docker_sizes)) +END_IMAGES_SIZE=${END_SIZES[0]} +END_CONTAINERS_SIZE=${END_SIZES[1]} +END_VOLUMES_SIZE=${END_SIZES[2]} + +echo "============ SETUP COMPLETED ============" +echo "Total setup time: ${MINUTES}m ${SECONDS}s" +echo "" echo "============ FINAL DISK USAGE ============" echo "Docker directory usage: $END_SPACE" echo "Images size: $END_IMAGES_SIZE" From 1d20d10764f95c9bb46914266f885aed5729ce49 Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 16:45:44 -0600 Subject: [PATCH 8/9] fix(test): clone local-network submodule necessary for testing containers --- .github/workflows/tap_integration_test.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/tap_integration_test.yml b/.github/workflows/tap_integration_test.yml index b02e07d60..6a44fa589 100644 --- a/.github/workflows/tap_integration_test.yml +++ b/.github/workflows/tap_integration_test.yml @@ -45,6 +45,17 @@ jobs: # registry: ghcr.io # username: ${{ github.actor }} # password: ${{ secrets.GITHUB_TOKEN }} + # TODO: Improve this later + # evaluate if it makes sense to use a git submodule instead + - name: Clone local-network dependency for Docker builds + run: | + cd contrib/ + if [ ! -d "local-network" ]; then + git clone https://github.com/semiotic-ai/local-network.git + cd local-network + git checkout suchapalaver/test/horizon + fi + - name: Build indexer-service image with Docker Compose run: docker compose -f contrib/docker-compose.yml build indexer-service From 8129222beaea2dd3b28b191118311ba8374c729c Mon Sep 17 00:00:00 2001 From: Natanael Mojica Date: Mon, 11 Aug 2025 21:33:01 -0600 Subject: [PATCH 9/9] fix(test): remove unnecessary sections for compose file --- contrib/docker-compose.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/contrib/docker-compose.yml b/contrib/docker-compose.yml index d5b5aed71..4d317939d 100644 --- a/contrib/docker-compose.yml +++ b/contrib/docker-compose.yml @@ -22,11 +22,6 @@ services: timeout: 3s retries: 10 start_period: 10s - cap_add: - - SYS_ADMIN - privileged: true - security_opt: - - seccomp:unconfined tap-agent: build: @@ -37,7 +32,6 @@ services: indexer-service: condition: service_healthy volumes: - - ./profiling:/opt/profiling:rw - ./local-network/.env:/opt/.env:ro - ./local-network/tap-contracts.json:/opt/contracts.json:ro - ./local-network/horizon.json:/opt/horizon.json:ro