Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 69 additions & 0 deletions .github/actions/linux_browser_tests/action.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
name: Linux Browser Tests
description: Runs browser tests on-host for Linux.
inputs:
test_artifacts_key:
description: "Artifact key used to store test artifacts."
required: true
test_results_key:
description: "Artifact key used to store test results."
required: true
platform:
description: "The platform being tested."
required: true
config:
description: "The build config (e.g. devel, qa)."
required: true

runs:
using: "composite"
steps:
- name: Download Artifacts
uses: actions/download-artifact@v4
with:
name: ${{ inputs.test_artifacts_key }}
path: artifacts

- name: Run Browser Tests (Linux)
shell: bash
env:
XVFB_SERVER_ARGS: "-screen 0 1920x1080x24i +render +extension GLX -noreset"
run: |
set -eux
mkdir -p unit_test/
cd unit_test/
tar -I 'zstd -T0' -xf ../artifacts/test_artifacts.tar.zstd

out_dir="${GITHUB_WORKSPACE}/unit_test/out/${{ inputs.platform }}_${{ inputs.config }}"
LD_LIBRARY_PATH="${out_dir}/starboard"
LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${out_dir}"
export LD_LIBRARY_PATH

# Linux runner is located in the out directory.
RUNNER_BINARY="./out/${{ inputs.platform }}_${{ inputs.config }}/cobalt_browsertests_runner"

# Use the stable one-by-one runner script.
RUN_BROWSER_TESTS_PY="${GITHUB_WORKSPACE}/cobalt/testing/browser_tests/run_browser_tests.py"

# Apply test filter if exists
TEST_FILTER_JSON="${GITHUB_WORKSPACE}/cobalt/testing/filters/${{ inputs.platform }}/cobalt_browsertests_runner_filter.json"
TEST_FILTER="*"
if [ -f "${TEST_FILTER_JSON}" ]; then
FAILING_TESTS=$(jq -r '.failing_tests | join(":")' "${TEST_FILTER_JSON}")
if [ -n "${FAILING_TESTS}" ]; then
TEST_FILTER="-${FAILING_TESTS}"
fi
fi

mkdir -p "${GITHUB_WORKSPACE}/results"

# Wrap the execution in xvfb-run for virtual display.
# We pass --gtest_output to the runner script, which we should ensure it handles.
# Note: Using a single line to avoid backslash continuation issues in GitHub Actions.
xvfb-run -a --server-args="${XVFB_SERVER_ARGS}" python3 "${RUN_BROWSER_TESTS_PY}" "${RUNNER_BINARY}" --gtest_filter="${TEST_FILTER}" --gtest_output="xml:${GITHUB_WORKSPACE}/results/cobalt_browsertests_runner_result.xml" 2>&1 | tee "${GITHUB_WORKSPACE}/results/cobalt_browsertests_runner_log.txt"

- name: Archive Browser Test Results
if: always()
uses: actions/upload-artifact@v4
with:
name: ${{ inputs.test_results_key }}
path: results/*
33 changes: 27 additions & 6 deletions .github/actions/upload_test_artifacts/action.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ inputs:
test_targets_json_file:
description: "The path to test targets json file."
required: true
archive_browsertests:
description: "Indicates if browser tests should be archived."
default: "false"
runs:
using: "composite"
steps:
Expand Down Expand Up @@ -53,12 +56,30 @@ runs:
FLAGS="--compression zstd --compression-level=9"
fi

time vpython3 -u ./cobalt/build/archive_test_artifacts.py \
--source-dir ${GITHUB_WORKSPACE}/cobalt/src \
--out-dir out/${{ matrix.platform }}_${{ matrix.config }}/ \
--destination-dir ${GITHUB_WORKSPACE}/artifacts \
--targets $(cat "${TEST_TARGETS_JSON_FILE}" | jq -cr '.test_targets | join(",")') \
${FLAGS}
TARGETS=$(cat "${TEST_TARGETS_JSON_FILE}" | jq -cr '.test_targets | join(",")')

# Archive browser tests if requested.
if [[ "${{ inputs.archive_browsertests }}" == "true" ]]; then
# Determine the browser test target for this platform.
BROWSERTEST_TARGET="cobalt/testing/browser_tests:cobalt_browsertests"
if [[ "${{ matrix.platform }}" == *linux* ]]; then
BROWSERTEST_TARGET="cobalt/testing/browser_tests:cobalt_browsertests_runner"
fi
if [[ -n "${TARGETS}" ]]; then
TARGETS="${TARGETS},${BROWSERTEST_TARGET}"
else
TARGETS="${BROWSERTEST_TARGET}"
fi
fi

if [[ -n "${TARGETS}" ]]; then
time vpython3 -u ./cobalt/build/archive_test_artifacts.py \
--source-dir ${GITHUB_WORKSPACE}/cobalt/src \
--out-dir out/${{ matrix.platform }}_${{ matrix.config }}/ \
--destination-dir ${GITHUB_WORKSPACE}/artifacts \
--targets "${TARGETS}" \
${FLAGS}
fi
shell: bash
- name: Upload On-Host Test Artifacts Archive
if: inputs.upload_on_host_test_artifacts == 'true'
Expand Down
2 changes: 2 additions & 0 deletions .github/config/linux.json
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,12 @@
],
"test_package": true,
"test_on_host": true,
"test_browser": true,
"test_root_target": "//cobalt:gn_all",
"num_gtest_shards": 10,
"targets": [
"cobalt:gn_all",
"cobalt/testing/browser_tests:cobalt_browsertests_runner",
"chromedriver",
"dump_syms",
"minidump_stackwalk"
Expand Down
130 changes: 110 additions & 20 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
.github
cobalt/build
cobalt/docker
cobalt/testing/browser_tests/tools
cobalt/testing/filters
cobalt/tools

Expand Down Expand Up @@ -220,6 +221,20 @@
set -x
browser_test_targets=$(cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -cr '.browser_test_targets // []')
echo "browser_test_targets=${browser_test_targets}" >> $GITHUB_OUTPUT

# Also export just the target strings for the results matrix.
PLATFORM="${{ inputs.platform }}"
if [[ "$PLATFORM" == *linux* ]]; then
# For Linux host, the target is in the primary 'targets' list.
if cat ${GITHUB_WORKSPACE}/.github/config/${{ inputs.platform }}.json | jq -e '.targets[] | select(contains("cobalt_browsertests_runner"))' > /dev/null; then
browser_test_targets_json='["cobalt/testing/browser_tests:cobalt_browsertests_runner"]'
else
browser_test_targets_json='[]'
fi
else
browser_test_targets_json=$(echo "${browser_test_targets}" | jq -c 'map(.target)')
fi
echo "browser_test_targets_json=${browser_test_targets_json}" >> $GITHUB_OUTPUT
- name: Set YTS WPT test targets
id: set-yts-wpt-test-targets
shell: bash
Expand Down Expand Up @@ -280,7 +295,6 @@
targets: ${{ steps.set-targets.outputs.targets }}
build_configs: ${{ steps.set-build-configs.outputs.build_configs }}
includes: ${{ steps.set-includes.outputs.includes }}
docker_service: ${{ steps.set-docker-service.outputs.docker_service }}
docker_content_sha: ${{ steps.set-docker-hash.outputs.docker_content_sha }}
gtest_shards: ${{ steps.set-gtest-shards.outputs.gtest_shards }}
test_on_host: ${{ steps.set-test-on-host.outputs.test_on_host }}
Expand All @@ -299,10 +313,12 @@
browser_test_targets: ${{ steps.set-browser-test-targets.outputs.browser_test_targets }}
yts_wpt_test_targets: ${{ steps.set-yts-wpt-test-targets.outputs.yts_wpt_test_targets }}
e2e_test_targets: ${{ steps.set-e2e-test-targets.outputs.e2e_test_targets }}
browser_test_targets_json: ${{ steps.set-browser-test-targets.outputs.browser_test_targets_json }}
test_dimensions: ${{ steps.set-test-dimensions.outputs.test_dimensions }}
test_attempts: ${{ steps.set-test-attempts.outputs.test_attempts }}
num_gtest_shards: ${{ steps.set-gtest-shards.outputs.num_gtest_shards }}
enable_sccache: ${{ steps.set-enable-sccache.outputs.enable_sccache }}
docker_service: ${{ steps.set-docker-service.outputs.docker_service }}

# Builds, tags, and pushes Cobalt docker build images to ghr.
docker-build-image:
Expand Down Expand Up @@ -455,6 +471,7 @@
upload_on_device_test_artifacts: ${{ matrix.config == 'devel' && needs.initialize.outputs.test_on_device == 'true' }}
upload_e2e_test_artifacts: ${{ matrix.config == 'qa' && (needs.initialize.outputs.test_e2e == 'true' || needs.initialize.outputs.test_yts == 'true') }}
test_targets_json_file: out/${{ matrix.platform }}_${{ matrix.config }}/test_targets.json
archive_browsertests: ${{ matrix.config == 'devel' && needs.initialize.outputs.test_browser == 'true' }}

- name: Run API Leak Detector
uses: ./.github/actions/api_leak_detector
Expand Down Expand Up @@ -601,18 +618,20 @@
test_type: 'yts_test'
test_targets: '${{ needs.initialize.outputs.yts_playback_test_targets }}'

browser-test:
needs: [initialize, build]
browser-test-on-host:
needs: [initialize, build, docker-unittest-image]
if: |
needs.initialize.outputs.test_browser == 'true' &&
(
(github.event_name == 'pull_request' && github.event.pull_request.draft == false ) ||
(inputs.nightly == 'true' || github.event_name == 'schedule') ||
(github.event_name == 'push')
)
needs.initialize.outputs.test_on_host == 'true' &&
(
(github.event_name == 'pull_request' && github.event.pull_request.draft == false ) ||
((inputs.nightly == 'true' || github.event_name == 'schedule') && vars.RUN_ODT_TESTS_ON_NIGHTLY != 'False') ||
(github.event_name == 'push' && vars.RUN_ODT_TESTS_ON_POSTSUBMIT != 'False')
)
runs-on: [self-hosted, odt-runner]
name: ${{ matrix.name }}_browser_tests
permissions: {}
name: ${{ matrix.name }}_browser_tests_on_host
container:
image: ${{ needs.docker-unittest-image.outputs.docker_tag }}
strategy:
fail-fast: false
matrix:
Expand All @@ -626,11 +645,16 @@
fetch-depth: 1
filter: blob:none
sparse-checkout: ${{ env.CI_ESSENTIALS }}
- name: Run Browser Tests
uses: ./.github/actions/internal_tests
- name: Run Browser Tests (Linux)
uses: ./.github/actions/linux_browser_tests
with:
test_type: 'browser_test'
test_targets: '${{ needs.initialize.outputs.browser_test_targets }}'
test_artifacts_key: ${{ matrix.platform }}_${{ matrix.name }}_test_artifacts
test_results_key: ${{ matrix.platform }}_${{ matrix.name }}_test_results_browsertests
platform: ${{ matrix.platform }}
config: ${{ matrix.config }}
outputs:
# Browser tests are fixed to a single target.
test_targets_json: '["cobalt/testing/browser_tests:cobalt_browsertests"]'

yts-wpt-test:
needs: [initialize, build]
Expand Down Expand Up @@ -758,7 +782,8 @@
needs: [initialize, build, on-device-test, on-host-test]
if: always() &&
(
needs.on-device-test.result != 'skipped' || needs.on-host-test.result != 'skipped'
needs.on-device-test.result != 'skipped' ||
needs.on-host-test.result != 'skipped'
)
permissions: {}
runs-on: ubuntu-latest
Expand All @@ -771,7 +796,7 @@
platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
include: ${{ fromJson(needs.initialize.outputs.includes) }}
config: [devel]
test_target: ${{ fromJson(needs.on-device-test.outputs.test_targets_json || needs.on-host-test.outputs.test_targets_json) }}
test_target: ${{ fromJson(needs.on-device-test.outputs.test_targets_json || needs.on-host-test.outputs.test_targets_json || '[]') }}
steps:
- name: Restore CI Essentials
uses: actions/checkout@v4
Expand Down Expand Up @@ -826,7 +851,7 @@
# TODO: Enable symbolization when it works.
# symbolize: true
symbolize: 'false'
- name: ${{ matrix.test_target }} Test Logs

Check warning

Code scanning / Scorecard

Pinned-Dependencies Medium

score is 0: GitHub-owned GitHubAction not pinned by hash
Remediation tip: update your workflow using https://app.stepsecurity.io
Click Remediation section below for further remediation help
if: always() && steps.filter.outputs.skipped != 'true'
uses: ./.github/actions/print_logs
with:
Expand All @@ -836,11 +861,67 @@
# symbolize: ${{ needs.initialize.outputs.test_on_device == 'true' }}
symbolize: 'false'

browser-test-results:
needs: [initialize, build, browser-test-on-host]
if: always() && needs.browser-test-on-host.result != 'skipped'
permissions: {}
runs-on: ubuntu-latest
name: ${{ matrix.test_target }}
env:
TEST_RESULTS_KEY: ${{ matrix.platform }}_${{ matrix.name }}_test_results_browsertests
strategy:
fail-fast: false
matrix:
platform: ${{ fromJson(needs.initialize.outputs.platforms) }}
include: ${{ fromJson(needs.initialize.outputs.includes) }}
config: [devel]
test_target: ${{ fromJson(needs.initialize.outputs.browser_test_targets_json || '[]') }}
steps:
- name: Restore CI Essentials
uses: actions/checkout@v4
with:
fetch-depth: 1
filter: blob:none
sparse-checkout: ${{ env.CI_ESSENTIALS }}
- name: Extract Test Target Name
id: extract-target-name
env:
test_target_with_path: ${{ matrix.test_target }}
run: echo "test_target=${test_target_with_path#*:}" >> $GITHUB_OUTPUT
shell: bash
- name: Download Test Results
uses: actions/download-artifact@v5
continue-on-error: true
with:
pattern: ${{ env.TEST_RESULTS_KEY }}
path: results/
- name: Test Results
uses: ./.github/actions/process_test_results
with:
target_name: ${{ matrix.test_target }}
results_glob: results/**/${{ steps.extract-target-name.outputs.test_target }}*.xml
datadog_api_key: ${{ secrets.datadog_api_key }}
num_gtest_shards: 1
- name: ${{ matrix.test_target }} Device Logs
if: always() && needs.initialize.outputs.test_on_device == 'true'
continue-on-error: true
uses: ./.github/actions/print_logs
with:
log_glob: results/**/${{ steps.extract-target-name.outputs.test_target }}*logcat.txt
symbolize: 'false'
- name: ${{ matrix.test_target }} Test Logs
if: always()
uses: ./.github/actions/print_logs
with:
log_glob: results/**/${{ steps.extract-target-name.outputs.test_target }}*log.txt
results_glob: results/**/${{ steps.extract-target-name.outputs.test_target }}*.xml
symbolize: 'false'

validate-test-result:
needs: [initialize, build, test-results]
needs: [initialize, build, test-results, browser-test-results]
if: always()
permissions: {}
runs-on: ubuntu-latest

Check warning

Code scanning / Scorecard

Pinned-Dependencies Medium

score is 1: GitHub-owned GitHubAction not pinned by hash
Click Remediation section below to solve this issue
name: ${{ matrix.name }}_tests_passing
env:
TEST_RESULTS_KEY: ${{ matrix.platform }}_${{ matrix.name }}_test_results
Expand All @@ -856,16 +937,25 @@
fetch-depth: 1
filter: blob:none
sparse-checkout: ${{ env.CI_ESSENTIALS }}
- name: Download Test Results
- name: Download Unit Test Results
uses: actions/download-artifact@v5
with:
pattern: ${{ env.TEST_RESULTS_KEY }}*
path: results/
- name: Download Browser Test Results
uses: actions/download-artifact@v5
continue-on-error: true
with:
pattern: ${{ matrix.platform }}_${{ matrix.name }}_test_results_browsertests
path: results/
- name: Failed Tests
if: needs.test-results.result == 'failure'
if: needs.test-results.result == 'failure' || needs.browser-test-results.result == 'failure'
run: |
# Print test result summary.
shopt -s globstar nullglob
python3 cobalt/tools/junit_mini_parser.py results/**/*.xml
exit 1

if [[ "${{ needs.test-results.result }}" == "failure" || "${{ needs.browser-test-results.result }}" == "failure" ]]; then
exit 1
fi
shell: bash
20 changes: 17 additions & 3 deletions cobalt/browser/metrics/cobalt_metrics_browsertest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
#include "base/metrics/statistics_recorder.h"
#include "base/run_loop.h"
#include "base/test/metrics/histogram_tester.h"
#include "build/build_config.h"
#include "cobalt/browser/global_features.h"
#include "cobalt/browser/metrics/cobalt_metrics_service_client.h"
#include "cobalt/browser/metrics/cobalt_metrics_services_manager_client.h"
Expand All @@ -40,7 +39,14 @@ class CobaltMetricsBrowserTest : public content::ContentBrowserTest {
}
};

IN_PROC_BROWSER_TEST_F(CobaltMetricsBrowserTest, RecordsMemoryMetrics) {
// TODO: b/483460300 - Investigate memory metrics recording failures on
// Starboard.
#if BUILDFLAG(IS_STARBOARD)
#define MAYBE_RecordsMemoryMetrics DISABLED_RecordsMemoryMetrics
#else
#define MAYBE_RecordsMemoryMetrics RecordsMemoryMetrics
#endif
IN_PROC_BROWSER_TEST_F(CobaltMetricsBrowserTest, MAYBE_RecordsMemoryMetrics) {
base::HistogramTester histogram_tester;

base::ScopedAllowBlockingForTesting allow_blocking;
Expand Down Expand Up @@ -111,7 +117,15 @@ IN_PROC_BROWSER_TEST_F(CobaltMetricsBrowserTest, RecordsMemoryMetrics) {
check_histogram("Memory.Experimental.Browser2.Small.NumberOfNodes");
}

IN_PROC_BROWSER_TEST_F(CobaltMetricsBrowserTest, PeriodicRecordsMemoryMetrics) {
// TODO: b/483460300 - Investigate periodic memory metrics recording failures on
// Starboard.
#if BUILDFLAG(IS_STARBOARD)
#define MAYBE_PeriodicRecordsMemoryMetrics DISABLED_PeriodicRecordsMemoryMetrics
#else
#define MAYBE_PeriodicRecordsMemoryMetrics PeriodicRecordsMemoryMetrics
#endif
IN_PROC_BROWSER_TEST_F(CobaltMetricsBrowserTest,
MAYBE_PeriodicRecordsMemoryMetrics) {
base::HistogramTester histogram_tester;

base::ScopedAllowBlockingForTesting allow_blocking;
Expand Down
Loading
Loading