diff --git a/.github/workflows/style.yaml b/.github/workflows/style.yaml index a451ac89d..3c2da4421 100644 --- a/.github/workflows/style.yaml +++ b/.github/workflows/style.yaml @@ -6,12 +6,10 @@ on: branches: - main - dev - - update-workflows push: branches: - main - dev - - update-workflows jobs: check-code-style: diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index f90389f69..ab3d03078 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -24,7 +24,7 @@ jobs: strategy: matrix: os: [ubuntu-latest, windows-latest] - python-version: ["3.10", "3.11"] + python-version: ["3.10"] # we usually only need to test the oldest python version backend: ["jax", "tensorflow", "torch"] runs-on: ${{ matrix.os }} diff --git a/tests/conftest.py b/tests/conftest.py index 6e1e69db1..560b7c59b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -41,7 +41,7 @@ def pytest_make_parametrize_id(config, val, argname): return f"{argname}={repr(val)}" -@pytest.fixture(params=[2, 3], scope="session") +@pytest.fixture(params=[2], scope="session") def batch_size(request): return request.param @@ -94,33 +94,3 @@ def random_set(batch_size, set_size, feature_size): @pytest.fixture(params=[2, 3], scope="session") def set_size(request): return request.param - - -@pytest.fixture(params=["two_moons"], scope="session") -def simulator(request): - return request.getfixturevalue(request.param) - - -@pytest.fixture(scope="session") -def training_dataset(simulator, batch_size): - from bayesflow.datasets import OfflineDataset - - num_batches = 128 - samples = simulator.sample((num_batches * batch_size,)) - return OfflineDataset(samples, batch_size=batch_size) - - -@pytest.fixture(scope="session") -def two_moons(batch_size): - from bayesflow.simulators import TwoMoonsSimulator - - return TwoMoonsSimulator() - - -@pytest.fixture(scope="session") -def validation_dataset(simulator, batch_size): - from bayesflow.datasets import OfflineDataset - - num_batches = 16 - samples = simulator.sample((num_batches * batch_size,)) - return OfflineDataset(samples, batch_size=batch_size) diff --git a/tests/test_approximators/test_point_approximators/__init__.py b/tests/test_approximators/test_point_approximators/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_approximators/test_point_approximators/conftest.py b/tests/test_approximators/test_point_approximators/conftest.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_distributions/conftest.py b/tests/test_distributions/conftest.py index 29c5b4139..e06ed18af 100644 --- a/tests/test_distributions/conftest.py +++ b/tests/test_distributions/conftest.py @@ -3,7 +3,7 @@ import keras -@pytest.fixture(params=[2, 3]) +@pytest.fixture(params=[2]) def batch_size(request): return request.param diff --git a/tests/test_links/conftest.py b/tests/test_links/conftest.py index 53e9eeac8..be7730ef2 100644 --- a/tests/test_links/conftest.py +++ b/tests/test_links/conftest.py @@ -82,8 +82,3 @@ def quantiles(request): @pytest.fixture() def unordered(batch_size, num_quantiles, num_variables): return keras.random.normal((batch_size, num_quantiles, num_variables)) - - -# @pytest.fixture() -# def random_matrix_batch(batch_size, num_variables): -# return keras.random.normal((batch_size, num_variables, num_variables)) diff --git a/tests/test_networks/test_summary_networks.py b/tests/test_networks/test_summary_networks.py index 50e1726c1..74ce1f5fd 100644 --- a/tests/test_networks/test_summary_networks.py +++ b/tests/test_networks/test_summary_networks.py @@ -103,7 +103,7 @@ def test_save_and_load(tmp_path, summary_network, random_set): @pytest.mark.parametrize("stage", ["training", "validation"]) def test_compute_metrics(stage, summary_network, random_set): if summary_network is None: - pytest.skip() + pytest.skip("Nothing to do, because there is no summary network.") summary_network.build(keras.ops.shape(random_set)) diff --git a/tests/utils/check_combinations.py b/tests/utils/check_combinations.py index 8d3fa5d46..8565703c8 100644 --- a/tests/utils/check_combinations.py +++ b/tests/utils/check_combinations.py @@ -13,12 +13,12 @@ def check_combination_simulator_adapter(simulator, adapter): with pytest.raises(KeyError): adapter(simulator.sample(1)) # Don't use this fixture combination for further tests. - pytest.skip() + pytest.skip(reason="Do not use this fixture combination for further tests") # TODO: better reason elif simulator_with_sample_weight and not adapter_with_sample_weight: # When a weight key is present, but the adapter does not configure it # to be used as sample weight, no error is raised currently. # Don't use this fixture combination for further tests. - pytest.skip() + pytest.skip(reason="Do not use this fixture combination for further tests") # TODO: better reason def check_approximator_multivariate_normal_score(approximator): @@ -28,4 +28,4 @@ def check_approximator_multivariate_normal_score(approximator): if isinstance(approximator, PointApproximator): for score in approximator.inference_network.scores.values(): if isinstance(score, MultivariateNormalScore): - pytest.skip() + pytest.skip(reason="MultivariateNormalScore is unstable")