Skip to content

fix code cov issue

fix code cov issue #12

Workflow file for this run

---

Check failure on line 1 in .github/workflows/_go-tests.yml

View workflow run for this annotation

GitHub Actions / .github/workflows/_go-tests.yml

Invalid workflow file

(Line: 50, Col: 19): Unexpected value 'false,'
name: nitro-go-tests
on:
workflow_call:
inputs:
run-name:
required: true
type: string
description: "The name of the test run. It will be used for labeling artifacts."
run-defaults-a:
required: false
type: boolean
run-defaults-b:
required: false
type: boolean
run-flaky:
required: false
type: boolean
run-pathdb:
required: false
type: boolean
run-stylus:
required: false
type: boolean
run-challenge:
required: false
type: boolean
run-l3challenge:
required: false
type: boolean
run-legacy-challenge:
required: false
type: boolean
run-race:
required: false
type: boolean
run-long:
required: false
type: boolean
run-execution-spec-tests:
required: false
type: boolean
run-pebble-a:
required: false
type: boolean
run-pebble-b:
required: false
type: boolean
run-experimental:
required: false,
type: boolean
jobs:
go-tests:
name: Go tests
runs-on: arbitrator-ci
services:
redis:
image: redis
ports:
- 6379:6379
steps:
- name: Checkout
uses: actions/checkout@v6
with:
submodules: recursive
- name: Setup CI
uses: ./.github/actions/ci-setup
- name: Set environment variables
run: |
mkdir -p target/tmp/deadbeefbee
echo "TMPDIR=$(pwd)/target/tmp/deadbeefbee" >> "$GITHUB_ENV"
echo "GOMEMLIMIT=6GiB" >> "$GITHUB_ENV"
echo "GOGC=80" >> "$GITHUB_ENV"
echo "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> "$GITHUB_ENV"
- name: Build
run: make -j8 build test-go-deps
- name: Build node dependencies
run: make -j8 build-node-deps
# --------------------- PATHDB MODE ---------------------
- name: run tests without race detection and path state scheme
if: inputs.run-pathdb
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 90m --cover --test_state_scheme path
--junitfile test-results/junit-pathdb.xml
# --------------------- DEFAULTS MODE ---------------------
- name: run tests without race detection and hash state scheme (A-batch)
if: inputs.run-defaults-a
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash
--junitfile test-results/junit-defaults-a.xml --run '^Test[A-L]'
- name: run tests without race detection and hash state scheme (B-batch)
if: inputs.run-defaults-b
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash
--junitfile test-results/junit-defaults-b.xml --skip '^Test[A-L]'
- name: run tests with consensus and execution nodes connected over json rpc (A-batch)
if: inputs.run-defaults-a
continue-on-error: true
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash --consensus_execution_in_same_process_use_rpc --run '^Test[A-L]'
- name: run tests with consensus and execution nodes connected over json rpc (B-batch)
if: inputs.run-defaults-b
continue-on-error: true
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash --consensus_execution_in_same_process_use_rpc --skip '^Test[A-L]'
- name: run redis tests
if: inputs.run-defaults-a
run: >-
gotestsum --format short-verbose -- -p 1 -run TestRedis ./arbnode/... ./system_tests/...
-coverprofile=coverage-redis.txt -covermode=atomic -coverpkg=./... -- --test_redis=redis://localhost:6379/0
- name: create block input json file
if: inputs.run-defaults-a
run: >-
gotestsum --format short-verbose -- -run TestProgramStorage$ ./system_tests/... --count 1 --
--recordBlockInputs.enable=true --recordBlockInputs.WithBaseDir="${{ github.workspace }}/target"
--recordBlockInputs.WithTimestampDirEnabled=false --recordBlockInputs.WithBlockIdInFileNameEnabled=false
- name: run arbitrator prover on block input json
if: inputs.run-defaults-a
run: |
make build-prover-bin
target/bin/prover target/machines/latest/machine.wavm.br -b \
--json-inputs="${{ github.workspace }}/target/TestProgramStorage/block_inputs.json"
- name: run jit prover on block input json
if: inputs.run-defaults-a
id: jit
run: |
make build-jit
OUTPUT=$(target/bin/jit --binary target/machines/latest/replay.wasm --require-success --debug \
json --inputs '${{ github.workspace }}/target/TestProgramStorage/block_inputs.json')
JIT_HASH=$(echo "$OUTPUT" | grep -oP 'hash \K[a-f0-9]+')
echo "jit_hash=$JIT_HASH" >> $GITHUB_OUTPUT
- name: run the Rust validation server on block input json
if: inputs.run-defaults-a
run: |
# 1. Start server in background
make build-validation-server
target/bin/validator --module-root-path ./target/machines/latest/module-root.txt &
SERVER_PID=$!
# 2. Wait for server to respond (up to 5 seconds)
echo "Waiting for validator to start..."
timeout 5s bash -c 'until curl -s localhost:4141 > /dev/null; do sleep 1; done'
# 3. Send validation request
RESPONSE=$(curl -s -X POST http://localhost:4141/validation_validate \
-H "Content-Type: application/json" \
-d @target/TestProgramStorage/block_inputs.json)
# 4. Stop the server
kill $SERVER_PID
# 5. Compare hashes
SERVER_HASH=$(echo "$RESPONSE" | jq -r '.BlockHash')
JIT_HASH="${{ steps.jit.outputs.jit_hash }}"
echo "JIT Hash: $JIT_HASH"
echo "Server Hash: $SERVER_HASH"
if [ "$JIT_HASH" == "$SERVER_HASH" ]; then
echo "✅ Validation successful"
else
echo "❌ Validation failed: hashes do not match"
exit 1
fi
# --------------------- FLAKY MODE --------------------------
- name: run flaky tests
if: inputs.run-flaky
continue-on-error: true
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags cionly --timeout 60m --test_state_scheme hash --flaky
--junitfile test-results/junit-flaky.xml
# --------------------- CHALLENGE MODES ---------------------
- name: build challenge tests
if: inputs.run-challenge
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh --tags challengetest
--run TestChallenge --timeout 120m --cover
--junitfile test-results/junit-challenge.xml
- name: run L3 challenge tests
if: inputs.run-l3challenge
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags challengetest --run TestL3Challenge --timeout 120m --cover
--junitfile test-results/junit-l3challenge.xml
- name: run legacy challenge tests
if: inputs.run-legacy-challenge
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh --tags legacychallengetest
--run TestChallenge --timeout 60m --cover
--junitfile test-results/junit-legacy-challenge.xml
# --------------------- STYLUS MODE ---------------------
- name: run stylus tests
if: inputs.run-stylus
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags stylustest --run TestProgramArbitrator --timeout 60m --cover
--junitfile test-results/junit-stylus.xml
# --------------------- RACE MODE ---------------------
- name: run race tests
if: inputs.run-race
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--race --timeout 90m --test_state_scheme hash
--junitfile test-results/junit-race.xml
# --------------------- LONG MODE ---------------------
- name: run long tests
if: inputs.run-long
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh --tags stylustest
--run TestProgramLong --timeout 60m --cover
--junitfile test-results/junit-long.xml
# --------------------- EXECUTION SPEC MODE ---------------------
- name: run execution spec tests
if: inputs.run-execution-spec-tests
run: ${{ github.workspace }}/.github/workflows/runExecutionSpecTests.sh
# --------------------- PEBBLE MODE ---------------------
- name: run pebble tests (A-batch)
if: inputs.run-pebble-a
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh --timeout 90m --test_database_engine pebble
--junitfile test-results/junit-pebble-a.xml --run '^Test[A-N]' --reduce-parallelism
- name: run pebble tests (B-batch)
if: inputs.run-pebble-b
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh --timeout 90m --test_database_engine pebble
--junitfile test-results/junit-pebble-b.xml --skip '^Test[A-N]' --reduce-parallelism
# --------------------- PROCESS JUNIT LOGS ---------------------
- name: Process JUnit XML logs
if: always()
run: python3 ${{ github.workspace }}/.github/workflows/process_junit.py test-results/
- name: Upload Go test Artifacts
if: always()
uses: actions/upload-artifact@v6
with:
name: junit-reports-go-${{ inputs.run-name }}
path: test-results/junit*.xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v5
with:
fail_ci_if_error: false
files: ./coverage.txt,./coverage-redis.txt
verbose: false
token: ${{ secrets.CODECOV_TOKEN }}
# --------------------- EXPERIMENTAL MODE --------------------
- name: run experimental tooling tests
if: inputs.run-experimental
run: >-
${{ github.workspace }}/.github/workflows/gotestsum.sh
--tags debugblock --run TestExperimental --timeout 60m --cover
# --------------------- ARCHIVE LOGS FOR ALL MODES ---------------------
- name: Archive detailed run log
if: always()
uses: actions/upload-artifact@v6
with:
name: ${{ inputs.run-name }}-full.log
path: full.log