Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions .github/actions/run_benchmark/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: 'Run Benchmark'
description: 'Run pytest with benchmarking and process results'

inputs:
test_path:
description: 'Path to test files to run'
required: true
timeout:
description: 'Timeout in minutes'
required: false
default: '10'
suite_name:
description: 'Name of the test suite'
required: false
default: ${{ github.job }}
extra_args:
description: 'Additional pytest arguments'
required: false
default: ''
codecov_token:
description: 'Token for uploading to codecov'
required: false
codspeed_token:
description: 'Token for uploading to codspeed'
required: false

runs:
using: 'composite'
steps:
- name: Make benchmark directory
shell: bash
run: mkdir -p build/test-results/test
- name: Run benchmarks
uses: CodSpeedHQ/action@v3
with:
token: ${{ inputs.codspeed_token }}
run: bash -el -c "uv run --frozen pytest ${{ inputs.test_path }} ${{ inputs.extra_args }} -o junit_suite_name=${{ inputs.suite_name }}--codspeed"
- uses: ./.github/actions/report
with:
flag: no-flag
codecov_token: ${{ inputs.codecov_token }}
43 changes: 43 additions & 0 deletions .github/actions/run_pytest/action.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: 'Run Pytest'
description: 'Run pytest with configurable parameters'

inputs:
test_path:
description: 'Path to test files to run'
required: true
timeout:
description: 'Timeout in minutes'
required: false
default: '10'
suite_name:
description: 'Name of the test suite'
required: false
default: ${{ github.job }}
extra_args:
description: 'Additional pytest arguments'
required: false
default: ''
codecov_token:
description: 'Token for uploading to codecov'
required: false

runs:
using: 'composite'
steps:
- name: Test with pytest
shell: bash
env:
GITHUB_WORKSPACE: ${{ github.workspace }}
run: |
uv run pytest \
-o junit_suite_name="${{ inputs.suite_name || github.job }}" \
-n=auto \
--timeout=$((${{ inputs.timeout }} * 60)) \
${{ inputs.extra_args }} \
${{ inputs.test_path }}

- uses: ./.github/actions/report
if: inputs.codecov_token != ''
with:
flag: no-flag
codecov_token: ${{ inputs.codecov_token }}
125 changes: 25 additions & 100 deletions .github/workflows/unit-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,20 @@ on:
workflow_dispatch:

jobs:
benchmark-tests:
runs-on: ubuntu-latest-16
steps:
- uses: actions/checkout@v4
- name: Setup backend
uses: ./.github/actions/setup-backend

- name: Run benchmark tests
uses: ./.github/actions/run_benchmark
with:
test_path: tests/benchmark
timeout: 5
codecov_token: ${{ secrets.CODECOV_TOKEN }}
codspeed_token: ${{ secrets.CODSPEED_TOKEN }}
unit-tests:
# changing the following value will significantly affect github's cost. Be careful and consult with the team before changing it.
runs-on: ubuntu-latest-32
Expand All @@ -28,6 +42,7 @@ jobs:
codecov_token: ${{ secrets.CODECOV_TOKEN }}
collect_args: "--timeout 5"
codecov_flags: unit-tests

codemod-tests:
# changing the following value will significantly affect github's cost. Be careful and consult with the team before changing it.
runs-on: ubuntu-latest-32
Expand Down Expand Up @@ -62,34 +77,6 @@ jobs:
env:
GITHUB_WORKSPACE: $GITHUB_WORKSPACE

# test_verified_codemods:
# # changing the following value will significantly affect github's cost. Be careful and consult with the team before changing it.
# if: false
# runs-on: ubuntu-latest-32
# environment: testing
# concurrency:
# group: ${{ github.workflow }}-${{github.ref}}-${{github.event_name == 'push'&& github.sha}}
# cancel-in-progress: true
# name: "Verified Codemod tests: Sync Graph=false"
# steps:
# - uses: actions/checkout@v4
# - name: Setup backend
# uses: ./.github/actions/setup-backend
# - name: Run ATS and Tests
# uses: ./.github/actions/run_ats
# with:
# default_tests: "tests/integration/codemod/test_verified_codemods.py"
# codecov_static_token: ${{ secrets.CODECOV_STATIC_TOKEN }}
# codecov_token: ${{ secrets.CODECOV_TOKEN }}
# collect_args: "--cli-api-key ${{ secrets.PROD_CLI_API_KEY }} --token ${{ secrets.CODEGEN_BOT_GHE_TOKEN }}"
# ats_collect_args: "--cli-api-key=${{ secrets.PROD_CLI_API_KEY }},--token=${{ secrets.CODEGEN_BOT_GHE_TOKEN }},"
# base_sha: ${{github.event_name == 'pull_request' && github.event.pull_request.base.sha || github.event.before}}
# job_name: ${{ github.job }}
# codecov_flags: smart-tests-verified-codemod-tests
# env:
# CODEGEN_BOT_GHE_TOKEN: ${{ secrets.CODEGEN_BOT_GHE_TOKEN }}
# GITHUB_WORKSPACE: $GITHUB_WORKSPACE

parse-tests:
# changing the following value will significantly affect github's cost. Be careful and consult with the team before changing it.
runs-on: ubuntu-latest-32
Expand All @@ -99,28 +86,20 @@ jobs:
- uses: actions/checkout@v4
- name: Setup backend
uses: ./.github/actions/setup-backend

- name: Cache oss-repos
uses: ./.github/actions/setup-oss-repos

- name: Install yarn and pnpm
run: |
npm install -g yarn &
npm install -g pnpm

- name: Test with pytest
timeout-minutes: 15
env:
GITHUB_WORKSPACE: $GITHUB_WORKSPACE
run: |
uv run pytest \
-n auto \
-o junit_suite_name="${{github.job}}" \
tests/integration/codemod/test_parse.py
- uses: ./.github/actions/report
- name: Run benchmark tests
uses: ./.github/actions/run_benchmark
with:
flag: no-flag
test_path: tests/integration/codemod/test_parse.py
extra_args: "--codspeed-max-rounds=1"
timeout: 15
codecov_token: ${{ secrets.CODECOV_TOKEN }}
codspeed_token: ${{ secrets.CODSPEED_TOKEN }}
- name: Notify parse tests failure
uses: slackapi/[email protected]
if: failure() && github.event_name == 'push' && false
Expand Down Expand Up @@ -156,70 +135,16 @@ jobs:
}
]
}
# test_codemod_diffs:
# # changing the following value will significantly affect github's cost. Be careful and consult with the team before changing it.
# runs-on: ubuntu-latest-16
#
# steps:
# - uses: actions/checkout@v4
# - name: Setup backend
# uses: ./.github/actions/setup-backend
# - name: Cache oss-repos
# uses: ./.github/actions/setup-oss-repos
# with:
# CODEGEN_BOT_GHE_TOKEN: ${{ secrets.CODEGEN_BOT_GHE_TOKEN }}
# - name: Test with pytest
# timeout-minutes: 10
# run: |
# ENV=local \
# uv run pytest \
# -n auto \
# -vv \
# --token $CODEGEN_BOT_GHE_TOKEN \
# tests/codemod/test_diffs.py
# env:
# CODEGEN_BOT_GHE_TOKEN: ${{ secrets.CODEGEN_BOT_GHE_TOKEN }}
# GITHUB_WORKSPACE: $GITHUB_WORKSPACE
#
# - name: Publish Test Report (Verify diffs)
# uses: mikepenz/action-junit-report@v4
# if: (success() || failure()) # always publish report even if the tests fail
# continue-on-error: true
# with:
# report_paths: "**/build/test-results/test/TEST.xml"
# detailed_summary: true
# annotate_only: true

# # Codecov is a required check but won't pass without a coverage report
# # Wwhen there are no changes in the backend (ex: frontend only change) we do an empty upload to force the check to pass
# - name: Upload empty coverage report to Codecov
# if: env.skip == '0'
# continue-on-error: true
# env:
# CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
# run: |
# cd codegen-backend
# pip install codecov-cli
# codecovcli create-commit -C ${{ github.event.pull_request.head.sha }}
# codecovcli create-report -C ${{ github.event.pull_request.head.sha }}
# codecovcli do-upload --disable-search --file empty_coverage.xml -C ${{ github.event.pull_request.head.sha }}
# codecovcli empty-upload --force -C ${{ github.event.pull_request.head.sha }}
integration-tests:
runs-on: ubuntu-latest-16
steps:
- uses: actions/checkout@v4
- name: Setup backend
uses: ./.github/actions/setup-backend
- name: Test with pytest
timeout-minutes: 5
env:
GITHUB_WORKSPACE: $GITHUB_WORKSPACE
run: |
uv run pytest \
-n auto \
-o junit_suite_name="${{github.job}}" \
tests/integration/codegen
- uses: ./.github/actions/report
- name: Run pytest
uses: ./.github/actions/run_pytest
with:
flag: integration-tests
test_path: tests/integration/codegen
timeout: 5
codecov_token: ${{ secrets.CODECOV_TOKEN }}
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,4 @@ graph-sitter-types/typings/**
coverage.json
tests/integration/verified_codemods/codemod_data/repo_commits.json
.benchmarks/*
.codspeed/*
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ dev-dependencies = [
"black>=24.8.0",
"isort>=5.13.2",
"emoji>=2.14.0",
"pytest-benchmark[histogram]>=5.1.0",
"loguru>=0.7.3",
"pytest-codspeed>=3.2.0",
]
keyring-provider = "subprocess"
#extra-index-url = ["https://[email protected]/pypi/codegen/simple/"]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ def reset_codebase(codebase: Codebase):
def test_codebase_reset_stress_test(extension: str, tmp_path, benchmark):
def setup():
codebase, _ = setup_codebase(NUM_FILES, extension, tmp_path)
return ((codebase,), {})
return codebase

benchmark.pedantic(reset_codebase, setup=setup)
benchmark(lambda: reset_codebase(setup()))


@pytest.mark.skip("Skipping this test for now")
Expand Down
9 changes: 5 additions & 4 deletions tests/integration/codemod/test_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

import psutil
import pytest
from pytest_codspeed import BenchmarkFixture

from codegen.git.repo_operator.repo_operator import RepoOperator
from codegen.sdk.codebase.config import CodebaseConfig, DefaultFlags, ProjectConfig
Expand All @@ -18,7 +19,7 @@


@pytest.mark.timeout(60 * 12, func_only=True)
def test_codemods_parse(repo: Repo, op: RepoOperator, request) -> None:
def test_codemods_parse(repo: Repo, op: RepoOperator, request, codspeed_benchmark: BenchmarkFixture) -> None:
# Setup Feature Flags
if repo.feature_flags is not None:
feature_flags = repo.feature_flags
Expand All @@ -39,9 +40,9 @@ def test_codemods_parse(repo: Repo, op: RepoOperator, request) -> None:
# Setup Codebase
config = CodebaseConfig(feature_flags=feature_flags)
projects = [ProjectConfig(repo_operator=op, programming_language=repo.language, subdirectories=repo.subdirectories)]
codebase = Codebase(projects=projects, config=config)
process = psutil.Process(os.getpid())
memory_used = process.memory_info().rss
codebase = codspeed_benchmark(lambda: Codebase(projects=projects, config=config))
memory_used = psutil.Process(os.getpid()).memory_info().rss
codspeed_benchmark.extra_info["memory_used"] = memory_used
logger.info(f"Using {memory_used / BYTES_IN_GIGABYTE} GB of memory.")
assert memory_used <= BYTES_IN_GIGABYTE * MAX_ALLOWED_GIGABYTES, "Graph is using too much memory!"
validation_res = post_init_validation(codebase)
Expand Down
Loading
Loading