|
10 | 10 | from redisvl.utils.vectorize import HFTextVectorizer |
11 | 11 |
|
12 | 12 |
|
| 13 | +@pytest.fixture(scope="session") |
| 14 | +def worker_id(request): |
| 15 | + """ |
| 16 | + Get the worker ID for the current test. |
| 17 | +
|
| 18 | + In pytest-xdist, the config has "workerid" in workerinput. |
| 19 | + This fixture abstracts that logic to provide a consistent worker_id |
| 20 | + across all tests. |
| 21 | + """ |
| 22 | + workerinput = getattr(request.config, "workerinput", {}) |
| 23 | + return workerinput.get("workerid", "master") |
| 24 | + |
| 25 | + |
13 | 26 | @pytest.fixture(autouse=True) |
14 | 27 | def set_tokenizers_parallelism(): |
15 | 28 | """Disable tokenizers parallelism in tests to avoid deadlocks""" |
16 | 29 | os.environ["TOKENIZERS_PARALLELISM"] = "false" |
17 | 30 |
|
18 | 31 |
|
19 | 32 | @pytest.fixture(scope="session", autouse=True) |
20 | | -def redis_container(request): |
| 33 | +def redis_container(worker_id): |
21 | 34 | """ |
22 | 35 | If using xdist, create a unique Compose project for each xdist worker by |
23 | 36 | setting COMPOSE_PROJECT_NAME. That prevents collisions on container/volume |
24 | 37 | names. |
25 | 38 | """ |
26 | | - # In xdist, the config has "workerid" in workerinput |
27 | | - workerinput = getattr(request.config, "workerinput", {}) |
28 | | - worker_id = workerinput.get("workerid", "master") |
29 | | - |
30 | 39 | # Set the Compose project name so containers do not clash across workers |
31 | 40 | os.environ["COMPOSE_PROJECT_NAME"] = f"redis_test_{worker_id}" |
32 | 41 | os.environ.setdefault("REDIS_IMAGE", "redis/redis-stack-server:latest") |
@@ -206,19 +215,16 @@ def pytest_collection_modifyitems( |
206 | 215 |
|
207 | 216 |
|
208 | 217 | @pytest.fixture |
209 | | -def flat_index(sample_data, redis_url, request): |
| 218 | +def flat_index(sample_data, redis_url, worker_id): |
210 | 219 | """ |
211 | 220 | A fixture that uses the "flag" algorithm for its vector field. |
212 | 221 | """ |
213 | | - # In xdist, the config has "workerid" in workerinput |
214 | | - workerinput = getattr(request.config, "workerinput", {}) |
215 | | - worker_id = workerinput.get("workerid", "master") |
216 | 222 |
|
217 | 223 | # construct a search index from the schema |
218 | 224 | index = SearchIndex.from_dict( |
219 | 225 | { |
220 | 226 | "index": { |
221 | | - "name": "user_index", |
| 227 | + "name": f"user_index_{worker_id}", |
222 | 228 | "prefix": f"v1_{worker_id}", |
223 | 229 | "storage_type": "hash", |
224 | 230 | }, |
@@ -264,19 +270,16 @@ def hash_preprocess(item: dict) -> dict: |
264 | 270 |
|
265 | 271 |
|
266 | 272 | @pytest.fixture |
267 | | -async def async_flat_index(sample_data, redis_url, request): |
| 273 | +async def async_flat_index(sample_data, redis_url, worker_id): |
268 | 274 | """ |
269 | 275 | A fixture that uses the "flag" algorithm for its vector field. |
270 | 276 | """ |
271 | | - # In xdist, the config has "workerid" in workerinput |
272 | | - workerinput = getattr(request.config, "workerinput", {}) |
273 | | - worker_id = workerinput.get("workerid", "master") |
274 | 277 |
|
275 | 278 | # construct a search index from the schema |
276 | 279 | index = AsyncSearchIndex.from_dict( |
277 | 280 | { |
278 | 281 | "index": { |
279 | | - "name": "user_index", |
| 282 | + "name": f"user_index_{worker_id}", |
280 | 283 | "prefix": f"v1_{worker_id}", |
281 | 284 | "storage_type": "hash", |
282 | 285 | }, |
@@ -322,18 +325,15 @@ def hash_preprocess(item: dict) -> dict: |
322 | 325 |
|
323 | 326 |
|
324 | 327 | @pytest.fixture |
325 | | -async def async_hnsw_index(sample_data, redis_url, request): |
| 328 | +async def async_hnsw_index(sample_data, redis_url, worker_id): |
326 | 329 | """ |
327 | 330 | A fixture that uses the "hnsw" algorithm for its vector field. |
328 | 331 | """ |
329 | | - # In xdist, the config has "workerid" in workerinput |
330 | | - workerinput = getattr(request.config, "workerinput", {}) |
331 | | - worker_id = workerinput.get("workerid", "master") |
332 | 332 |
|
333 | 333 | index = AsyncSearchIndex.from_dict( |
334 | 334 | { |
335 | 335 | "index": { |
336 | | - "name": "user_index", |
| 336 | + "name": f"user_index_{worker_id}", |
337 | 337 | "prefix": f"v1_{worker_id}", |
338 | 338 | "storage_type": "hash", |
339 | 339 | }, |
@@ -376,18 +376,15 @@ def hash_preprocess(item: dict) -> dict: |
376 | 376 |
|
377 | 377 |
|
378 | 378 | @pytest.fixture |
379 | | -def hnsw_index(sample_data, redis_url, request): |
| 379 | +def hnsw_index(sample_data, redis_url, worker_id): |
380 | 380 | """ |
381 | 381 | A fixture that uses the "hnsw" algorithm for its vector field. |
382 | 382 | """ |
383 | | - # In xdist, the config has "workerid" in workerinput |
384 | | - workerinput = getattr(request.config, "workerinput", {}) |
385 | | - worker_id = workerinput.get("workerid", "master") |
386 | 383 |
|
387 | 384 | index = SearchIndex.from_dict( |
388 | 385 | { |
389 | 386 | "index": { |
390 | | - "name": "user_index", |
| 387 | + "name": f"user_index_{worker_id}", |
391 | 388 | "prefix": f"v1_{worker_id}", |
392 | 389 | "storage_type": "hash", |
393 | 390 | }, |
|
0 commit comments