Skip to content

HIP-1261 add fee calculation library #1469

HIP-1261 add fee calculation library

HIP-1261 add fee calculation library #1469

Workflow file for this run

# SPDX-License-Identifier: Apache-2.0
name: End-to-end Tests
on:
pull_request:
branches:
- "main"
- "release/**"
paths:
- ".github/workflows/acceptance.yaml"
- "test/**"
push:
branches:
- "main"
- "release/**"
workflow_dispatch:
inputs:
branch:
description: "Branch"
required: true
type: string
permissions:
contents: read
defaults:
run:
shell: bash
env:
LC_ALL: C.UTF-8
jobs:
acceptance:
runs-on: hiero-mirror-node-linux-large
strategy:
fail-fast: false
matrix:
stream-type:
- RECORD
- BLOCK
timeout-minutes: 50
env:
BLOCK_NODE_CHART_VERSION: v0.28.0
CONSENSUS_VERSION: v0.72.0-rc.1
HELM_RELEASE_NAME: mirror-1
SOLO_CLUSTER_NAME: test
SOLO_NAMESPACE: mirror
SOLO_CLUSTER_SETUP_NAMESPACE: solo
SOLO_DEPLOYMENT: solo-deployment
SOLO_VERSION: v0.59.1
permissions:
pull-requests: read
steps:
- name: Harden Runner
uses: step-security/harden-runner@58077d3c7e43986b6b15fba718e8ea69e387dfcc # v2.15.1
with:
egress-policy: audit
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
with:
ref: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.branch || '' }}
- name: Setup Node
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
with:
node-version: 24
- name: Install Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4 # v4.3.1
with:
version: "4.0.5"
- name: Setup Kind
uses: helm/kind-action@ef37e7f390d99f746eb8b610417061a60e82a6cc # v1.14.0
with:
cluster_name: ${{ env.SOLO_CLUSTER_NAME }}
kubectl_version: v1.34.0
version: v0.31.0
- name: Install JDK
uses: actions/setup-java@be666c2fcd27ec809703dec50e508c2fdc7f6654 # v5.2.0
with:
distribution: temurin
java-version: 25
- name: Install Solo CLI via npm
run: npm install -g @hashgraph/solo@${SOLO_VERSION}
- name: Determine base and tag
timeout-minutes: 15
run: |
set -ex
TAG=$(sed -nE 's/^version: (.+)$/\1/p' charts/hedera-mirror/Chart.yaml)
BASE=""
TRIGGERED_BY_TAG=false
if [[ "$TAG" != *SNAPSHOT ]]; then
BASE="v$TAG"
# The dereferenced tag corresponds to the commit the annotated tag refers to
DEREFERENCED_TAG="v${TAG}^{}"
git ls-remote --tags origin "${DEREFERENCED_TAG}" | grep $(git rev-parse HEAD) && TRIGGERED_BY_TAG=true \
|| true
fi
echo "BASE=${BASE}" >> $GITHUB_ENV
echo "TAG=${TAG}" >> $GITHUB_ENV
echo "TRIGGERED_BY_TAG=${TRIGGERED_BY_TAG}" >> $GITHUB_ENV
if [[ "$TRIGGERED_BY_TAG" == "true" ]]; then
# wait for images becoming ready if it's a tagged commit
for module in grpc importer monitor rest rest-java test web3; do
while ! docker manifest inspect "gcr.io/mirrornode/hedera-mirror-$module:$TAG" >/dev/null 2>&1; do
sleep 10
done
done
fi
- name: Find changed modules
id: changeset
if: env.TRIGGERED_BY_TAG != 'true'
uses: step-security/paths-filter@6eee183b0d2fd101d3f8ee2935c127bca14c5625 # v3.0.5
with:
base: ${{ env.BASE }}
filters: |
grpc:
- 'grpc/**'
importer:
- 'importer/**'
monitor:
- 'monitor/**'
rest:
- 'rest/!(monitoring)?(/**)'
rest-java:
- 'rest-java/**'
test:
- 'test/**'
web3:
- 'web3/**'
- name: Setup Gradle
if: env.TRIGGERED_BY_TAG != 'true' && steps.changeset.outputs.changes != '[]'
uses: gradle/actions/setup-gradle@0723195856401067f7a2779048b490ace7a47d7c # v4
- name: Build images
if: env.TRIGGERED_BY_TAG != 'true' && steps.changeset.outputs.changes != '[]'
run: |
set -ex
echo '${{ steps.changeset.outputs.changes }}' | jq -r '.[]' | while read module; do
./gradlew ":${module}:dockerBuild" -x :rest:monitoring:dockerBuild -PimageTag="${TAG}"
kind load docker-image "gcr.io/mirrornode/hedera-mirror-${module}:${TAG}" -n "${SOLO_CLUSTER_NAME}"
done
- name: Setup Solo Cluster
run: |
set -ex
cat <<EOF > mirror.yaml
monitor:
env:
HIERO_MIRROR_MONITOR_HEALTH_RELEASE_FAILWHENINACTIVE: "true"
test:
enabled: true
test:
env:
HIERO_MIRROR_TEST_ACCEPTANCE_FEATURE_CONTRACTCALLLOCALESTIMATE: "true"
HIERO_MIRROR_TEST_ACCEPTANCE_NETWORK: OTHER
HIERO_MIRROR_TEST_ACCEPTANCE_WEB3_OPCODETRACER_ENABLED: "true"
HIERO_MIRROR_TEST_ACCEPTANCE_SKIPENTITIESCLEANUP: "true"
enabled: true
web3:
env:
HIERO_MIRROR_WEB3_OPCODE_TRACER_ENABLED: "true"
EOF
solo init
solo cluster-ref config connect --cluster-ref kind-"${SOLO_CLUSTER_NAME}" \
--context kind-"${SOLO_CLUSTER_NAME}"
solo deployment config create --deployment "${SOLO_DEPLOYMENT}" --namespace "${SOLO_NAMESPACE}"
solo deployment cluster attach --deployment "${SOLO_DEPLOYMENT}" --cluster-ref kind-"${SOLO_CLUSTER_NAME}" \
--num-consensus-nodes 1
solo cluster-ref config setup --cluster-ref kind-"${SOLO_CLUSTER_NAME}"
if [ "${{ matrix.stream-type }}" = "BLOCK" ]; then
cat <<EOF >> mirror.yaml
importer:
env:
HIERO_MIRROR_IMPORTER_BLOCK_NODES_0_HOST: 'block-node-1.${SOLO_NAMESPACE}.svc.cluster.local'
SPRING_PROFILES_ACTIVE: 'blocknode'
EOF
solo block node add --deployment "${SOLO_DEPLOYMENT}" --cluster-ref kind-"${SOLO_CLUSTER_NAME}" \
--chart-version "${BLOCK_NODE_CHART_VERSION}" --release-tag "${CONSENSUS_VERSION}"
fi
solo keys consensus generate --gossip-keys --tls-keys --deployment "${SOLO_DEPLOYMENT}" -i node1
solo consensus network deploy --deployment "${SOLO_DEPLOYMENT}" --release-tag "${CONSENSUS_VERSION}"
solo consensus node setup --deployment "${SOLO_DEPLOYMENT}" -i node1 --release-tag "${CONSENSUS_VERSION}"
solo consensus node start --deployment "${SOLO_DEPLOYMENT}" -i node1
helm dependency update charts/hedera-mirror
solo mirror node add --cluster-ref kind-"${SOLO_CLUSTER_NAME}" --deployment "${SOLO_DEPLOYMENT}" \
-f mirror.yaml --mirror-node-chart-dir ./charts --pinger --enable-ingress
APP_VERSION=$(helm ls -n "${SOLO_NAMESPACE}" -o json \
| jq -r '.[] | select (.chart | test("hedera-mirror-.*")) | .app_version')
echo "Installed hedera-mirror chart version $APP_VERSION"
- name: Run acceptance tests
run: |
helm test "${HELM_RELEASE_NAME}" -n "${SOLO_NAMESPACE}" --logs \
--timeout 20m | tee output_log.txt
- name: Run k6 tests
if: ${{ matrix.stream-type != 'BLOCK' }}
env:
BASE_URL: "http://127.0.0.1:8081"
DEFAULT_DURATION: "3s"
DEFAULT_GRACEFUL_STOP: "1s"
DEFAULT_VUS: 2
uses: ./.github/actions/k6
- name: Show Pod Logs on Failure
continue-on-error: true
if: ${{ failure() }}
run: |
echo "--------------------------------------------------"
echo "Workflow failed. Collecting logs for debugging..."
echo "--------------------------------------------------"
echo "--- Describing all pods in namespace ${SOLO_NAMESPACE} ---"
kubectl describe pods -n "${SOLO_NAMESPACE}" || echo "Could not describe pods"
echo "--- Fetching logs for all pods in namespace ${SOLO_NAMESPACE} ---"
for pod in $(kubectl get pods -n "${SOLO_NAMESPACE}" -o name); do
if [ -n "$pod" ]; then
echo "--- Logs for pod: $pod ---"
kubectl logs "$pod" -n "${SOLO_NAMESPACE}" --all-containers=true --tail=10000 || echo "Could not get logs for pod $pod"
fi
done