Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/style.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,10 @@ on:
branches:
- main
- dev
- update-workflows
push:
branches:
- main
- dev
- update-workflows

jobs:
check-code-style:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
python-version: ["3.10", "3.11"]
python-version: ["3.10"] # we usually only need to test the oldest python version
backend: ["jax", "tensorflow", "torch"]

runs-on: ${{ matrix.os }}
Expand Down
32 changes: 1 addition & 31 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def pytest_make_parametrize_id(config, val, argname):
return f"{argname}={repr(val)}"


@pytest.fixture(params=[2, 3], scope="session")
@pytest.fixture(params=[2], scope="session")
def batch_size(request):
return request.param

Expand Down Expand Up @@ -94,33 +94,3 @@ def random_set(batch_size, set_size, feature_size):
@pytest.fixture(params=[2, 3], scope="session")
def set_size(request):
return request.param


@pytest.fixture(params=["two_moons"], scope="session")
def simulator(request):
return request.getfixturevalue(request.param)


@pytest.fixture(scope="session")
def training_dataset(simulator, batch_size):
from bayesflow.datasets import OfflineDataset

num_batches = 128
samples = simulator.sample((num_batches * batch_size,))
return OfflineDataset(samples, batch_size=batch_size)


@pytest.fixture(scope="session")
def two_moons(batch_size):
from bayesflow.simulators import TwoMoonsSimulator

return TwoMoonsSimulator()


@pytest.fixture(scope="session")
def validation_dataset(simulator, batch_size):
from bayesflow.datasets import OfflineDataset

num_batches = 16
samples = simulator.sample((num_batches * batch_size,))
return OfflineDataset(samples, batch_size=batch_size)
Empty file.
Empty file.
2 changes: 1 addition & 1 deletion tests/test_distributions/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import keras


@pytest.fixture(params=[2, 3])
@pytest.fixture(params=[2])
def batch_size(request):
return request.param

Expand Down
5 changes: 0 additions & 5 deletions tests/test_links/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,3 @@ def quantiles(request):
@pytest.fixture()
def unordered(batch_size, num_quantiles, num_variables):
return keras.random.normal((batch_size, num_quantiles, num_variables))


# @pytest.fixture()
# def random_matrix_batch(batch_size, num_variables):
# return keras.random.normal((batch_size, num_variables, num_variables))
2 changes: 1 addition & 1 deletion tests/test_networks/test_summary_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_save_and_load(tmp_path, summary_network, random_set):
@pytest.mark.parametrize("stage", ["training", "validation"])
def test_compute_metrics(stage, summary_network, random_set):
if summary_network is None:
pytest.skip()
pytest.skip("Nothing to do, because there is no summary network.")

summary_network.build(keras.ops.shape(random_set))

Expand Down
6 changes: 3 additions & 3 deletions tests/utils/check_combinations.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@ def check_combination_simulator_adapter(simulator, adapter):
with pytest.raises(KeyError):
adapter(simulator.sample(1))
# Don't use this fixture combination for further tests.
pytest.skip()
pytest.skip(reason="Do not use this fixture combination for further tests") # TODO: better reason
elif simulator_with_sample_weight and not adapter_with_sample_weight:
# When a weight key is present, but the adapter does not configure it
# to be used as sample weight, no error is raised currently.
# Don't use this fixture combination for further tests.
pytest.skip()
pytest.skip(reason="Do not use this fixture combination for further tests") # TODO: better reason


def check_approximator_multivariate_normal_score(approximator):
Expand All @@ -28,4 +28,4 @@ def check_approximator_multivariate_normal_score(approximator):
if isinstance(approximator, PointApproximator):
for score in approximator.inference_network.scores.values():
if isinstance(score, MultivariateNormalScore):
pytest.skip()
pytest.skip(reason="MultivariateNormalScore is unstable")