diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py index eea9bb3dc..981df49d4 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_backup_restore.py @@ -5,6 +5,7 @@ import kubernetes import kubernetes.client import pymongo +import pytest from kubernetes import client from kubetester import ( create_or_update_configmap, @@ -474,17 +475,9 @@ def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): @skip_if_local @mark.e2e_multi_cluster_backup_restore + @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_one_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) - raise Exception("❌ Failed to insert test data after multiple attempts") + mongodb_multi_one_collection.insert_one(TEST_DATA) @mark.e2e_multi_cluster_backup_restore def test_mdb_backed_up(self, project_one: OMTester): diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py index a2186995b..c2cc0d988 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster/multi_cluster_dr_connect.py @@ -54,18 +54,9 @@ def test_replica_set_is_reachable(mongodb_multi: MongoDBMulti): @pytest.mark.e2e_multi_cluster_dr +@pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): - # TODO: remove this retry mechanism, for some reason the resource exits the running state and then - # enters it later. The subsequent test fails because the resource is not actually - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) + mongodb_multi_collection.insert_one(TEST_DATA) @pytest.mark.e2e_multi_cluster_dr diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py index e612d4127..b46744278 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_appdb/multicluster_appdb_s3_based_backup_restore.py @@ -3,6 +3,7 @@ import kubernetes.client import pymongo +import pytest from kubetester import create_or_update_configmap, try_load from kubetester.kubetester import ensure_ent_version from kubetester.kubetester import fixture as yaml_fixture @@ -203,16 +204,9 @@ def test_mongodb_multi_one_running_state(self, mongodb_multi_one: MongoDBMulti): # we might fail connection in the beginning since we set a custom dns in coredns mongodb_multi_one.assert_reaches_phase(Phase.Running, ignore_errors=True, timeout=600) + @pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(self, mongodb_multi_one_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_one_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) + mongodb_multi_one_collection.insert_one(TEST_DATA) def test_mdb_backed_up(self, project_one: OMTester): project_one.wait_until_backup_snapshots_are_ready(expected_count=1) diff --git a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py index 7d6929f37..820a5defb 100644 --- a/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py +++ b/docker/mongodb-kubernetes-tests/tests/multicluster_om/multicluster_om_appdb_no_mesh.py @@ -557,16 +557,9 @@ def test_create_mongodb_multi(server_certs: str, mongodb_multi: MongoDBMulti): @skip_if_local @mark.e2e_multi_cluster_om_appdb_no_mesh +@pytest.mark.flaky(reruns=100, reruns_delay=6) def test_add_test_data(mongodb_multi_collection): - max_attempts = 100 - while max_attempts > 0: - try: - mongodb_multi_collection.insert_one(TEST_DATA) - return - except Exception as e: - print(e) - max_attempts -= 1 - time.sleep(6) + mongodb_multi_collection.insert_one(TEST_DATA) @mark.e2e_multi_cluster_om_appdb_no_mesh diff --git a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set.py b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set.py index 5ba57314b..1fa76f93b 100644 --- a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set.py +++ b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set.py @@ -117,6 +117,7 @@ def test_replica_set_sts_exists(self): sts = self.appsv1.read_namespaced_stateful_set(RESOURCE_NAME, self.namespace) assert sts + @pytest.mark.flaky(reruns=15, reruns_delay=5) def test_sts_creation(self): sts = self.appsv1.read_namespaced_stateful_set(RESOURCE_NAME, self.namespace) diff --git a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_liveness_probe.py b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_liveness_probe.py index 591d1240a..47e364587 100644 --- a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_liveness_probe.py +++ b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_liveness_probe.py @@ -25,26 +25,17 @@ def _get_pods(podname_template: str, qty: int = 3): @skip_if_static_containers @pytest.mark.e2e_replica_set_liveness_probe +@pytest.mark.flaky(reruns=10, reruns_delay=30) def test_pods_are_running(replica_set: MongoDB, namespace: str): corev1_client = client.CoreV1Api() running_pods: Set[str] = set() - tries = 10 # Wait for all the pods to be running # We can't wait for the replica set to be running # as it will never get to it (mongod is not starting) - while tries: - if len(running_pods) == 3: - break - for podname in _get_pods("my-replica-set-{}", 3): - try: - pod = corev1_client.read_namespaced_pod(podname, namespace) - if pod.status.phase == "Running": - running_pods.add(podname) - except: - # Pod not found, will retry - pass - tries -= 1 - time.sleep(30) + for podname in _get_pods("my-replica-set-{}", 3): + pod = corev1_client.read_namespaced_pod(podname, namespace) + if pod.status.phase == "Running": + running_pods.add(podname) assert len(running_pods) == 3 diff --git a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_pv.py b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_pv.py index cb4a48d10..8556c330e 100644 --- a/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_pv.py +++ b/docker/mongodb-kubernetes-tests/tests/replicaset/replica_set_pv.py @@ -22,6 +22,7 @@ def test_replica_set_sts_exists(self): sts = self.appsv1.read_namespaced_stateful_set("rs001-pv", self.namespace) assert sts + @pytest.mark.flaky(reruns=15, reruns_delay=5) def test_sts_creation(self): sts = self.appsv1.read_namespaced_stateful_set("rs001-pv", self.namespace) diff --git a/requirements.txt b/requirements.txt index 89b2868d2..d660d860a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -49,3 +49,4 @@ types-python-dateutil==2.9.0.20250809 pipupgrade==1.12.0 pytest-cov==6.2.1 pytest-socket==0.7.0 +pytest-rerunfailures==16.0