|
| 1 | +from math import sqrt |
| 2 | +import pytest |
| 3 | + |
| 4 | +from pytest_cases.common_pytest_marks import has_pytest_param |
| 5 | +from pytest_cases import parametrize_with_cases |
| 6 | + |
| 7 | + |
| 8 | +def case_int_success(): |
| 9 | + return 1 |
| 10 | + |
| 11 | + |
| 12 | +def case_negative_int_failure(): |
| 13 | + # note that we decide to return the expected type of failure to check it |
| 14 | + return -1, ValueError, "math domain error" |
| 15 | + |
| 16 | + |
| 17 | +@parametrize_with_cases("data", cases='.', glob="*success") |
| 18 | +def test_good_datasets(data): |
| 19 | + assert sqrt(data) > 0 |
| 20 | + |
| 21 | + |
| 22 | +@parametrize_with_cases("data, err_type, err_msg", cases='.', glob="*failure") |
| 23 | +def test_bad_datasets(data, err_type, err_msg): |
| 24 | + with pytest.raises(err_type, match=err_msg): |
| 25 | + sqrt(data) |
| 26 | + |
| 27 | + |
| 28 | +def test_synthesis(module_results_dct): |
| 29 | + if has_pytest_param: |
| 30 | + assert list(module_results_dct) == [ |
| 31 | + 'test_good_datasets[int_success]', |
| 32 | + 'test_bad_datasets[negative_int_failure]' |
| 33 | + ] |
| 34 | + else: |
| 35 | + assert list(module_results_dct) == [ |
| 36 | + 'test_good_datasets[int_success]', |
| 37 | + 'test_bad_datasets[negative_int_failure[0]-negative_int_failure[1]-negative_int_failure[2]]' |
| 38 | + ] |
| 39 | + |
| 40 | + |
| 41 | +def create_filter(sub_str): |
| 42 | + def my_filter(case_func): |
| 43 | + return sub_str in case_func._pytestcase.id |
| 44 | + return my_filter |
| 45 | + |
| 46 | + |
| 47 | +@parametrize_with_cases("data", cases='.', filter=lambda case_func: "success" in case_func._pytestcase.id) |
| 48 | +def test_good_datasets2(data): |
| 49 | + assert sqrt(data) > 0 |
| 50 | + |
| 51 | + |
| 52 | +@parametrize_with_cases("data, err_type, err_msg", cases='.', filter=create_filter("failure")) |
| 53 | +def test_bad_datasets2(data, err_type, err_msg): |
| 54 | + with pytest.raises(err_type, match=err_msg): |
| 55 | + sqrt(data) |
| 56 | + |
| 57 | + |
| 58 | +def test_synthesis2(module_results_dct): |
| 59 | + if has_pytest_param: |
| 60 | + assert list(module_results_dct) == [ |
| 61 | + 'test_good_datasets[int_success]', |
| 62 | + 'test_bad_datasets[negative_int_failure]', |
| 63 | + 'test_synthesis', |
| 64 | + 'test_good_datasets2[int_success]', |
| 65 | + 'test_bad_datasets2[negative_int_failure]' |
| 66 | + ] |
| 67 | + else: |
| 68 | + assert list(module_results_dct) == [ |
| 69 | + 'test_good_datasets[int_success]', |
| 70 | + 'test_bad_datasets[negative_int_failure[0]-negative_int_failure[1]-negative_int_failure[2]]', |
| 71 | + 'test_synthesis', |
| 72 | + 'test_good_datasets2[int_success]', |
| 73 | + 'test_bad_datasets2[negative_int_failure[0]-negative_int_failure[1]-negative_int_failure[2]]' |
| 74 | + ] |
0 commit comments