Skip to content

Commit b6b7eb8

Browse files
authored
Refactor and enhance the test-target e2e test step (#21030)
* refactor test * refactor
1 parent dbde384 commit b6b7eb8

File tree

1 file changed

+26
-42
lines changed

1 file changed

+26
-42
lines changed

.github/workflows/test-target.yml

Lines changed: 26 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -274,30 +274,22 @@ jobs:
274274
# ddev will interpret '-m' as an environment to run the e2e test on and fails
275275
# This is not required when no pytest args are provided and it will run all environments
276276
# by default
277-
if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then
278-
set +e # Disable immediate exit
279-
ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- ${{ inputs.pytest-args }} -k "not fips"
280-
exit_code=$?
281-
if [ $exit_code -eq 5 ]; then
282-
# Flaky test count can be zero, this is done to avoid pipeline failure
283-
echo "No tests were collected."
284-
exit 0
285-
else
286-
exit $exit_code
287-
fi
288-
elif [ '${{ inputs.pytest-args }}' = '-m "not flaky"' ]; then
289-
set +e # Disable immediate exit
290-
ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- ${{ inputs.pytest-args }} -k "not fips"
277+
set +e # Disable immediate exit
278+
PYTEST_ARGS='${{ inputs.pytest-args }}'
279+
280+
if [ "$PYTEST_ARGS" = '-m flaky' ] || [ "$PYTEST_ARGS" = '-m "not flaky"' ]; then
281+
ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- $PYTEST_ARGS -k "not fips"
291282
exit_code=$?
292-
if [ $exit_code -eq 5 ]; then
293-
# Flaky test count can be zero, this is done to avoid pipeline failure
294-
echo "No tests were collected."
295-
exit 0
296-
else
297-
exit $exit_code
298-
fi
299283
else
300284
ddev env test --base --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} ${{ inputs.pytest-args != '' && format('-- {0} -k "not fips"', inputs.pytest-args) || '-- -k "not fips"' }}
285+
exit_code=$?
286+
fi
287+
288+
if [ "$exit_code" -eq 5 ]; then
289+
echo "No tests were collected."
290+
exit 0
291+
else
292+
exit "$exit_code"
301293
fi
302294
303295
- name: Run E2E tests
@@ -312,30 +304,22 @@ jobs:
312304
# ddev will interpret '-m' as an environment to run the e2e test on and fails
313305
# This is not required when no pytest args are provided and it will run all environments
314306
# by default
315-
if [ '${{ inputs.pytest-args }}' = '-m flaky' ]; then
316-
set +e # Disable immediate exit
317-
ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- ${{ inputs.pytest-args }} -k "not fips"
318-
exit_code=$?
319-
if [ $exit_code -eq 5 ]; then
320-
# Flaky test count can be zero, this is done to avoid pipeline failure
321-
echo "No tests were collected."
322-
exit 0
323-
else
324-
exit $exit_code
325-
fi
326-
elif [ '${{ inputs.pytest-args }}' = '-m "not flaky"' ]; then
327-
set +e # Disable immediate exit
328-
ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- ${{ inputs.pytest-args }} -k "not fips"
307+
set +e # Disable immediate exit
308+
PYTEST_ARGS='${{ inputs.pytest-args }}'
309+
310+
if [ "$PYTEST_ARGS" = '-m flaky' ] || [ "$PYTEST_ARGS" = '-m "not flaky"' ]; then
311+
ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} -- $PYTEST_ARGS -k "not fips"
329312
exit_code=$?
330-
if [ $exit_code -eq 5 ]; then
331-
# Flaky test count can be zero, this is done to avoid pipeline failure
332-
echo "No tests were collected."
333-
exit 0
334-
else
335-
exit $exit_code
336-
fi
337313
else
338314
ddev env test --new-env --junit ${{ inputs.target }} ${{ inputs.target-env || 'all' }} ${{ inputs.pytest-args != '' && format('-- {0} -k "not fips"', inputs.pytest-args) || '-- -k "not fips"' }}
315+
exit_code=$?
316+
fi
317+
318+
if [ "$exit_code" -eq 5 ]; then
319+
echo "No tests were collected."
320+
exit 0
321+
else
322+
exit "$exit_code"
339323
fi
340324
341325
- name: Run benchmarks

0 commit comments

Comments
 (0)