Skip to content

Commit dbf2f71

Browse files
committed
test: Integrate the pytest-global-fixture as postgres fixture
1 parent 93b7b30 commit dbf2f71

File tree

11 files changed

+334
-13
lines changed

11 files changed

+334
-13
lines changed

contrib/pyln-testing/pyln/testing/db.py

Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,56 @@ def stop(self) -> None:
148148
pass
149149

150150

151+
class GlobalPostgresDbProvider(object):
152+
"""
153+
Provider that uses pytest-global-fixture for a shared PostgreSQL instance.
154+
This provider coordinates with the global_resource fixture to get databases
155+
from a single globally-shared PostgreSQL instance.
156+
"""
157+
def __init__(self, directory):
158+
self.directory = directory
159+
self.global_resource = None
160+
self.port = None
161+
print("Starting GlobalPostgresDbProvider (using global fixture)")
162+
163+
def set_global_resource(self, global_resource):
164+
"""Called by the db_provider fixture to inject the global_resource."""
165+
self.global_resource = global_resource
166+
167+
def start(self):
168+
"""No-op: the global service is started by the coordinator."""
169+
if self.global_resource is None:
170+
raise RuntimeError(
171+
"GlobalPostgresDbProvider requires global_resource fixture. "
172+
"Make sure pytest-global-fixture plugin is loaded."
173+
)
174+
pass
175+
176+
def get_db(self, node_directory, testname, node_id):
177+
"""Get a database by requesting a tenant from the global service."""
178+
if self.global_resource is None:
179+
raise RuntimeError(
180+
"global_resource not set. Did you call set_global_resource()?"
181+
)
182+
183+
# Request a tenant from the global PostgreSQL service
184+
config = self.global_resource(
185+
"pytest_global_fixture.postgres_service:NativePostgresService"
186+
)
187+
188+
# Store port for compatibility
189+
if self.port is None:
190+
self.port = config["port"]
191+
192+
# Create a PostgresDb instance
193+
db = PostgresDb(config["dbname"], config["port"])
194+
return db
195+
196+
def stop(self):
197+
"""No-op: global service cleanup is handled by the coordinator."""
198+
pass
199+
200+
151201
class PostgresDbProvider(object):
152202
def __init__(self, directory):
153203
self.directory = directory

contrib/pyln-testing/pyln/testing/fixtures.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
from concurrent import futures
2-
from pyln.testing.db import SqliteDbProvider, PostgresDbProvider
2+
from pyln.testing.db import SqliteDbProvider, PostgresDbProvider, GlobalPostgresDbProvider
33
from pyln.testing.utils import NodeFactory, BitcoinD, ElementsD, env, LightningNode, TEST_DEBUG, TEST_NETWORK
44
from pyln.client import Millisatoshi
55
from typing import Dict
@@ -699,12 +699,20 @@ def checkMemleak(node):
699699
providers = {
700700
'sqlite3': SqliteDbProvider,
701701
'postgres': PostgresDbProvider,
702+
'gpostgres': GlobalPostgresDbProvider,
702703
}
703704

704705

705706
@pytest.fixture
706-
def db_provider(test_base_dir):
707+
def db_provider(request, test_base_dir):
707708
provider = providers[os.getenv('TEST_DB_PROVIDER', 'sqlite3')](test_base_dir)
709+
710+
# If using GlobalPostgresDbProvider, inject the global_resource fixture
711+
if isinstance(provider, GlobalPostgresDbProvider):
712+
# Get the global_resource fixture from the same request context
713+
global_resource = request.getfixturevalue('global_resource')
714+
provider.set_global_resource(global_resource)
715+
708716
provider.start()
709717
yield provider
710718
provider.stop()

contrib/pyln-testing/pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,8 @@ readme = "README.md"
88
requires-python = ">=3.9,<4.0"
99
dependencies = [
1010
"pytest>=8.0.0",
11+
"pytest-xdist>=3.0.0",
12+
"pytest-global-fixture",
1113
"ephemeral-port-reserve>=1.1.4",
1214
"psycopg2-binary>=2.9.0",
1315
"python-bitcoinlib>=0.11.0",
-1.81 KB
Loading

contrib/pytest-global-fixture/pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,9 @@ dependencies = [
99
"pytest-xdist>=3.0.0",
1010
]
1111

12+
[project.entry-points.pytest11]
13+
pytest_global_fixture = "pytest_global_fixture.plugin"
14+
1215
[project.optional-dependencies]
1316
dev = [
1417
"testcontainers>=3.7.0",
Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
"""
2+
pytest-global-fixture: A pytest plugin for globally shared infrastructure resources.
3+
"""
4+
5+
from .base import InfrastructureService
6+
from .postgres_service import NativePostgresService
7+
8+
__all__ = [
9+
'InfrastructureService',
10+
'NativePostgresService',
11+
]

contrib/pytest-global-fixture/pytest_global_fixture/plugin.py

Lines changed: 25 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ def pytest_configure(config):
1919
"""
2020
# Check if we are a worker (xdist). If no workerinput, we are Master.
2121
if not hasattr(config, "workerinput"):
22+
print("\n" + "="*80)
23+
print("PYTEST-GLOBAL-FIXTURE: Coordinator mode - managing shared resources")
24+
print("="*80)
2225
manager = ServiceManager()
2326

2427
# Bind to port 0 (ephemeral)
@@ -67,33 +70,44 @@ def coordinator_client(request):
6770
"""
6871
if hasattr(request.config, "workerinput"):
6972
addr = request.config.workerinput["infra_rpc_addr"]
73+
worker_id = request.config.workerinput.get("workerid", "unknown")
74+
print(f"[{worker_id}] PYTEST-GLOBAL-FIXTURE: Worker connecting to coordinator at {addr}")
7075
else:
7176
# We are running sequentially (no xdist), or we are the master
7277
addr = request.config.infra_rpc_addr
73-
78+
print(f"PYTEST-GLOBAL-FIXTURE: Sequential mode, using coordinator at {addr}")
79+
7480
return xmlrpc.client.ServerProxy(addr)
7581

7682
@pytest.fixture(scope="function")
77-
def global_resource(request, coordinator_client):
83+
def global_resource(request):
7884
"""
7985
Factory fixture.
8086
Usage: global_resource("path.to:Class")
8187
"""
82-
88+
89+
# Get RPC address
90+
if hasattr(request.config, "workerinput"):
91+
addr = request.config.workerinput["infra_rpc_addr"]
92+
else:
93+
addr = request.config.infra_rpc_addr
94+
8395
# Track resources created in this scope for cleanup
8496
created_resources = []
8597

8698
def _provision(class_path):
8799
# Create unique tenant ID: "gwX_testName_UUID"
88100
worker_id = getattr(request.config, "workerinput", {}).get("workerid", "master")
89-
test_name = request.node.name.replace("[", "_").replace("]", "_")
101+
test_name = request.node.name.replace("[", "_").replace("]", "_")
90102
# Short uuid for uniqueness
91103
uid = uuid.uuid4().hex[:6]
92104
tenant_id = f"{worker_id}_{uid}"
93-
94-
# RPC Call
95-
config = coordinator_client.rpc_provision(class_path, tenant_id)
96-
105+
106+
# Create a new ServerProxy for each call to avoid connection reuse issues
107+
# This prevents http.client.CannotSendRequest errors in multi-threaded scenarios
108+
client = xmlrpc.client.ServerProxy(addr)
109+
config = client.rpc_provision(class_path, tenant_id)
110+
97111
created_resources.append((class_path, tenant_id))
98112
return config
99113

@@ -102,7 +116,9 @@ def _provision(class_path):
102116
# Teardown logic
103117
for class_path, tenant_id in reversed(created_resources):
104118
try:
105-
coordinator_client.rpc_deprovision(class_path, tenant_id)
119+
# Create a new client for cleanup too
120+
client = xmlrpc.client.ServerProxy(addr)
121+
client.rpc_deprovision(class_path, tenant_id)
106122
except Exception as e:
107123
# We print but don't raise, to avoid masking test failures
108124
print(f"Warning: Failed to deprovision {tenant_id}: {e}")

0 commit comments

Comments
 (0)