|
| 1 | +#!/usr/bin/env bash |
| 2 | +set -euo pipefail |
| 3 | + |
| 4 | +LIMIT=100 |
| 5 | +FAIL_ON_FAILURE=false |
| 6 | +# Comma-separated list of features to skip (default: Intl) |
| 7 | +SKIP_FEATURES="${SKIP_FEATURES:-Intl}" |
| 8 | +# Cap multiplier (LIMIT * CAP_MULTIPLIER) can be set via env or CLI; default 5 |
| 9 | +CAP_MULTIPLIER="${CAP_MULTIPLIER:-5}" |
| 10 | +# Focus (comma-separated) e.g., language,built-ins,intl - can be set via env or CLI |
| 11 | +FOCUS="${FOCUS:-}" |
| 12 | + |
| 13 | +usage() { |
| 14 | + cat <<EOF |
| 15 | +Usage: $0 [--limit N] [--fail-on-failure] [--cap-multiplier N] [--focus name] |
| 16 | +
|
| 17 | +--limit N Run at most N tests (default: 100) |
| 18 | +--fail-on-failure Exit non-zero if any test fails (default: false) |
| 19 | +--cap-multiplier N Cap multiplier used when collecting candidates (search cap = LIMIT * CAP_MULTIPLIER). Can also set env CAP_MULTIPLIER (default: 5) |
| 20 | +--focus name Comma-separated focus areas (language,built-ins,intl) or subdirs under test/; can also set env FOCUS |
| 21 | +EOF |
| 22 | +} |
| 23 | + |
| 24 | +while [[ $# -gt 0 ]]; do |
| 25 | + case $1 in |
| 26 | + --limit) |
| 27 | + LIMIT="$2"; shift 2;; |
| 28 | + --fail-on-failure) |
| 29 | + FAIL_ON_FAILURE=true; shift;; |
| 30 | + --cap-multiplier) |
| 31 | + CAP_MULTIPLIER="$2"; shift 2;; |
| 32 | + --focus) |
| 33 | + FOCUS="$2"; shift 2;; |
| 34 | + --help) |
| 35 | + usage; exit 0;; |
| 36 | + *) |
| 37 | + echo "Unknown argument: $1"; usage; exit 1;; |
| 38 | + esac |
| 39 | +done |
| 40 | + |
| 41 | +REPO_DIR=test262 |
| 42 | +RESULTS_FILE=test262-results.log |
| 43 | +: > "$RESULTS_FILE" |
| 44 | + |
| 45 | +if [[ ! -d "$REPO_DIR" ]]; then |
| 46 | + echo "Cloning test262..." |
| 47 | + git clone --depth 1 https://github.com/tc39/test262.git "$REPO_DIR" |
| 48 | +fi |
| 49 | + |
| 50 | +n=0; pass=0; fail=0; skip=0 |
| 51 | + |
| 52 | +echo "Building engine example..." |
| 53 | +cargo build --example js --all-features |
| 54 | + |
| 55 | +# Locate example binary |
| 56 | +if [[ -x "target/debug/examples/js" ]]; then |
| 57 | + BIN="target/debug/examples/js" |
| 58 | +elif [[ -x "target/debug/js" ]]; then |
| 59 | + BIN="target/debug/js" |
| 60 | +else |
| 61 | + BIN="" |
| 62 | +fi |
| 63 | + |
| 64 | +if [[ -n "$BIN" ]]; then |
| 65 | + RUN_CMD="$BIN" |
| 66 | +else |
| 67 | + echo "Warning: example binary not found, will use 'cargo run --example js --' (slower)" |
| 68 | + RUN_CMD="cargo run --example js --" |
| 69 | +fi |
| 70 | + |
| 71 | +# Build harness index to speed up include/harness lookups (fast and local to harness) |
| 72 | +declare -A HARNESS_INDEX |
| 73 | +while IFS= read -r -d '' p; do |
| 74 | + base=$(basename "$p") |
| 75 | + HARNESS_INDEX["$base"]="$p" |
| 76 | +done < <(find "$REPO_DIR/harness" -type f -print0) |
| 77 | + |
| 78 | +# Build the collection cap and support focused searches |
| 79 | +CAP=$((LIMIT * CAP_MULTIPLIER)) |
| 80 | + |
| 81 | +# Prepare search directories based on FOCUS (env or CLI) |
| 82 | +SEARCH_DIRS=() |
| 83 | +if [[ -n "$FOCUS" ]]; then |
| 84 | + IFS=',' read -ra TOKS <<< "$FOCUS" |
| 85 | + for tok in "${TOKS[@]}"; do |
| 86 | + tok="${tok// /}" |
| 87 | + case "$tok" in |
| 88 | + language) SEARCH_DIRS+=("$REPO_DIR/test/language") ;; |
| 89 | + built-ins|builtins) SEARCH_DIRS+=("$REPO_DIR/test/built-ins") ;; |
| 90 | + intl) SEARCH_DIRS+=("$REPO_DIR/test/intl402") ;; |
| 91 | + all) SEARCH_DIRS+=("$REPO_DIR/test") ;; |
| 92 | + *) |
| 93 | + if [[ -d "$REPO_DIR/test/$tok" ]]; then |
| 94 | + SEARCH_DIRS+=("$REPO_DIR/test/$tok") |
| 95 | + elif [[ -d "$tok" ]]; then |
| 96 | + SEARCH_DIRS+=("$tok") |
| 97 | + fi |
| 98 | + ;; |
| 99 | + esac |
| 100 | + done |
| 101 | +else |
| 102 | + SEARCH_DIRS+=("$REPO_DIR/test") |
| 103 | +fi |
| 104 | + |
| 105 | +echo "Collecting up to $CAP candidate tests (LIMIT=$LIMIT, CAP_MULTIPLIER=$CAP_MULTIPLIER). Search dirs: ${SEARCH_DIRS[*]}" |
| 106 | + |
| 107 | +basic=() |
| 108 | +other=() |
| 109 | +intl_tests=() |
| 110 | +for dir in "${SEARCH_DIRS[@]}"; do |
| 111 | + if [[ ! -d "$dir" ]]; then |
| 112 | + continue |
| 113 | + fi |
| 114 | + while IFS= read -r -d '' f; do |
| 115 | + meta=$(awk '/\/\*---/{flag=1; next} /---\*\//{flag=0} flag{print}' "$f" || true) |
| 116 | + if (echo "$meta" | grep -q 'features:' && echo "$meta" | grep -q 'Intl') || grep -q '\<Intl\>' "$f"; then |
| 117 | + intl_tests+=("$f") |
| 118 | + elif echo "$meta" | grep -q 'includes:' || echo "$meta" | grep -Eq 'flags:\s*\[.*module.*\]' || echo "$meta" | grep -q 'negative:' || echo "$meta" | grep -q 'features:'; then |
| 119 | + other+=("$f") |
| 120 | + else |
| 121 | + basic+=("$f") |
| 122 | + fi |
| 123 | + |
| 124 | + if [[ $(( ${#basic[@]} + ${#other[@]} + ${#intl_tests[@]} )) -ge $CAP ]]; then |
| 125 | + break 2 |
| 126 | + fi |
| 127 | + done < <(find "$dir" -name '*.js' -print0) |
| 128 | +done |
| 129 | + |
| 130 | +echo "Collected: basic=${#basic[@]} other=${#other[@]} intl=${#intl_tests[@]} (total=$((${#basic[@]}+${#other[@]}+${#intl_tests[@]})))" |
| 131 | + |
| 132 | +ordered=("${basic[@]}" "${other[@]}" "${intl_tests[@]}") |
| 133 | + |
| 134 | +# run tests from ordered list |
| 135 | +for f in "${ordered[@]}"; do |
| 136 | + # extract metadata inside /*--- ... ---*/ |
| 137 | + meta=$(awk '/\/\*---/{flag=1; next} /---\*\//{flag=0} flag{print}' "$f" || true) |
| 138 | + |
| 139 | + # skip tests that reference Intl (fast path) when SKIP_FEATURES contains Intl |
| 140 | + if echo "$meta" | grep -q 'features:' && echo "$meta" | grep -q 'Intl'; then |
| 141 | + skip=$((skip+1)) |
| 142 | + echo "SKIP (feature: Intl) $f" >> "$RESULTS_FILE" |
| 143 | + continue |
| 144 | + fi |
| 145 | + # also skip if the test source mentions the Intl symbol and SKIP_FEATURES includes Intl |
| 146 | + if echo "$SKIP_FEATURES" | tr ',' '\n' | grep -qx "Intl" && grep -q '\<Intl\>' "$f"; then |
| 147 | + skip=$((skip+1)) |
| 148 | + echo "SKIP (contains Intl) $f" >> "$RESULTS_FILE" |
| 149 | + continue |
| 150 | + fi |
| 151 | + |
| 152 | + # handle includes: try to resolve harness files and prepend them to a temporary test file |
| 153 | + tmp="" |
| 154 | + includes_list=$(echo "$meta" | sed -n "s/^includes:[[:space:]]*\[\(.*\)\].*/\1/p" || true) |
| 155 | + if [[ -n "$includes_list" ]]; then |
| 156 | + resolved_includes=() |
| 157 | + IFS=',' read -ra INCS <<< "$(echo "$includes_list" | tr -d '[:space:]')" |
| 158 | + missing=false |
| 159 | + for inc in "${INCS[@]}"; do |
| 160 | + inc=${inc//\"/} |
| 161 | + inc=${inc//\'/} |
| 162 | + # try harness first using index |
| 163 | + inc_path="${HARNESS_INDEX[$inc]:-}" |
| 164 | + if [[ -z "$inc_path" ]]; then |
| 165 | + inc_path=$(find "$REPO_DIR" -type f -name "$inc" -print -quit 2>/dev/null || true) |
| 166 | + fi |
| 167 | + if [[ -z "$inc_path" ]]; then |
| 168 | + echo "MISSING INCLUDE $inc for $f" >> "$RESULTS_FILE" |
| 169 | + missing=true |
| 170 | + break |
| 171 | + fi |
| 172 | + resolved_includes+=("$inc_path") |
| 173 | + done |
| 174 | + |
| 175 | + # if the test references `assert` but none of the includes supply it, prepend harness/assert.js if available |
| 176 | + if grep -q '\<assert\>' "$f"; then |
| 177 | + have_assert=false |
| 178 | + for p in "${resolved_includes[@]}"; do |
| 179 | + if grep -qE 'function[[:space:]]+assert|var[[:space:]]+assert|assert\.sameValue|assert\.throws' "$p"; then |
| 180 | + have_assert=true; break |
| 181 | + fi |
| 182 | + done |
| 183 | + if ! $have_assert; then |
| 184 | + inc_path="${HARNESS_INDEX['assert.js']:-}" |
| 185 | + if [[ -n "$inc_path" ]]; then |
| 186 | + # also prepend Test262Error/sta.js if present (assert uses Test262Error) |
| 187 | + sta_path="${HARNESS_INDEX['sta.js']:-}" |
| 188 | + if [[ -n "$sta_path" ]]; then |
| 189 | + resolved_includes=("$sta_path" "$inc_path" "${resolved_includes[@]}") |
| 190 | + else |
| 191 | + resolved_includes=("$inc_path" "${resolved_includes[@]}") |
| 192 | + fi |
| 193 | + fi |
| 194 | + fi |
| 195 | + fi |
| 196 | + |
| 197 | + if $missing; then |
| 198 | + skip=$((skip+1)) |
| 199 | + echo "SKIP (missing-include) $f" >> "$RESULTS_FILE" |
| 200 | + continue |
| 201 | + fi |
| 202 | + |
| 203 | + tmp=$(mktemp /tmp/test262.XXXXXX.js) |
| 204 | + for p in "${resolved_includes[@]}"; do |
| 205 | + cat "$p" >> "$tmp" |
| 206 | + echo -e "\n" >> "$tmp" |
| 207 | + done |
| 208 | + cat "$f" >> "$tmp" |
| 209 | + cleanup_tmp=true |
| 210 | + else |
| 211 | + cleanup_tmp=false |
| 212 | + fi |
| 213 | + |
| 214 | + # If the test uses `assert` but had no includes, automatically prepend harness/assert.js if available |
| 215 | + if [[ "$cleanup_tmp" != "true" ]]; then |
| 216 | + if grep -q '\<assert\>' "$f"; then |
| 217 | + inc_path="${HARNESS_INDEX['assert.js']:-}" |
| 218 | + if [[ -n "$inc_path" ]]; then |
| 219 | + sta_path="${HARNESS_INDEX['sta.js']:-}" |
| 220 | + tmp=$(mktemp /tmp/test262.XXXXXX.js) |
| 221 | + if [[ -n "$sta_path" ]]; then |
| 222 | + cat "$sta_path" >> "$tmp" |
| 223 | + echo -e "\n" >> "$tmp" |
| 224 | + fi |
| 225 | + cat "$inc_path" >> "$tmp" |
| 226 | + echo -e "\n" >> "$tmp" |
| 227 | + cat "$f" >> "$tmp" |
| 228 | + cleanup_tmp=true |
| 229 | + fi |
| 230 | + fi |
| 231 | + fi |
| 232 | + |
| 233 | + if echo "$meta" | grep -Eq 'flags:\s*\[.*module.*\]'; then |
| 234 | + skip=$((skip+1)) |
| 235 | + echo "SKIP (module) $f" >> "$RESULTS_FILE" |
| 236 | + continue |
| 237 | + fi |
| 238 | + |
| 239 | + # skip tests that require non-strict mode (noStrict) |
| 240 | + if echo "$meta" | grep -Eq 'flags:\s*\[.*noStrict.*\]'; then |
| 241 | + skip=$((skip+1)) |
| 242 | + echo "SKIP (noStrict) $f" >> "$RESULTS_FILE" |
| 243 | + continue |
| 244 | + fi |
| 245 | + |
| 246 | + if echo "$meta" | grep -q 'negative:'; then |
| 247 | + skip=$((skip+1)) |
| 248 | + echo "SKIP (negative) $f" >> "$RESULTS_FILE" |
| 249 | + continue |
| 250 | + fi |
| 251 | + |
| 252 | + if [[ $n -ge $LIMIT ]]; then |
| 253 | + break |
| 254 | + fi |
| 255 | + n=$((n+1)) |
| 256 | + |
| 257 | + test_to_run="$f" |
| 258 | + if [[ "$cleanup_tmp" == "true" && -n "$tmp" ]]; then |
| 259 | + test_to_run="$tmp" |
| 260 | + fi |
| 261 | + |
| 262 | + echo "RUN $f" |
| 263 | + # run with timeout to avoid hangs |
| 264 | + if timeout 10s $RUN_CMD "$test_to_run" > /tmp/test262_run_out 2>&1; then |
| 265 | + echo "PASS $f" | tee -a "$RESULTS_FILE" |
| 266 | + pass=$((pass+1)) |
| 267 | + else |
| 268 | + echo "FAIL $f" | tee -a "$RESULTS_FILE" |
| 269 | + echo "---- OUTPUT ----" >> "$RESULTS_FILE" |
| 270 | + cat /tmp/test262_run_out >> "$RESULTS_FILE" |
| 271 | + echo "----------------" >> "$RESULTS_FILE" |
| 272 | + fail=$((fail+1)) |
| 273 | + fi |
| 274 | + |
| 275 | + # cleanup temporary test file if created |
| 276 | + if [[ "$cleanup_tmp" == "true" && -n "$tmp" ]]; then |
| 277 | + rm -f "$tmp" |
| 278 | + # echo "$tmp" |
| 279 | + fi |
| 280 | + |
| 281 | +done |
| 282 | + |
| 283 | +# summary |
| 284 | +echo "Ran $n tests: pass=$pass fail=$fail skip=$skip" |
| 285 | +echo "Details in $RESULTS_FILE" |
| 286 | + |
| 287 | +if [[ "$FAIL_ON_FAILURE" == "true" && $fail -gt 0 ]]; then |
| 288 | + echo "One or more tests failed. Exiting with failure as requested." |
| 289 | + exit 1 |
| 290 | +fi |
0 commit comments