|
1 | 1 | # Copyright: 2011 MoinMoin:RonnyPfannschmidt |
2 | 2 | # Copyright: 2011 MoinMoin:ThomasWaldmann |
3 | 3 | # Copyright: 2011 MoinMoin:MichaelMayorov |
4 | | -# Copyright: 2024 MoinMoin:UlrichB |
| 4 | +# Copyright: 2024-2025 MoinMoin:UlrichB |
5 | 5 | # License: GNU GPL v2 (or any later version), see LICENSE.txt for details. |
6 | 6 |
|
7 | 7 | """ |
|
105 | 105 | INDEXER_TIMEOUT = 20.0 |
106 | 106 |
|
107 | 107 |
|
108 | | -def get_indexer(fn, **kw): |
| 108 | +def get_doc(fn, revid, retry=False, **kwargs): |
109 | 109 | """ |
110 | | - Return a valid indexer or raise a KeyError. |
| 110 | + Return a valid document or raise a KeyError. |
111 | 111 |
|
112 | 112 | Under heavy loads, the Whoosh AsyncWriter writer may be delayed in writing |
113 | 113 | indexes to storage. Try several times before failing. |
114 | 114 |
|
115 | | - FIXME: runs into timeout for a non-existing revid |
116 | | -
|
117 | 115 | :param fn: the indexer function |
118 | | - :param **kw: "revid" is required, index name optional |
| 116 | + :param revid: revision to search |
| 117 | + :param retry: retry backend search if document not found, required when server load is high |
| 118 | + :param kwargs: idx_name, name of index used for searching (optional) |
119 | 119 | """ |
120 | 120 | until = time.time() + INDEXER_TIMEOUT |
121 | 121 | while True: |
122 | | - indexer = fn(**kw) |
123 | | - if indexer is not None: |
| 122 | + doc = fn(revid=revid, **kwargs) |
| 123 | + if doc is not None: |
124 | 124 | break |
125 | | - time.sleep(2) |
| 125 | + if not retry: |
| 126 | + msg = f"revid: {revid} not found. Please check meta data and indexes" |
| 127 | + raise KeyError(msg) |
126 | 128 | if time.time() > until: |
127 | | - raise KeyError(kw.get("revid", "") + " - server overload or corrupt index") |
128 | | - return indexer |
| 129 | + msg = f"revid: {revid} - Server overload may have corrupted the index; rebuild it." |
| 130 | + raise KeyError(msg) |
| 131 | + time.sleep(2) |
| 132 | + return doc |
129 | 133 |
|
130 | 134 |
|
131 | 135 | def parent_names(names): |
@@ -1357,9 +1361,9 @@ def store_revision( |
1357 | 1361 | self.indexer.index_revision(meta, content, backend_name, force_latest=not overwrite) |
1358 | 1362 | gc.collect() # triggers close of index files from is_latest search |
1359 | 1363 | if not overwrite: |
1360 | | - self._current = get_indexer(self.indexer._document, revid=revid) |
| 1364 | + self._current = get_doc(self.indexer._document, revid=revid, retry=True) |
1361 | 1365 | if return_rev: |
1362 | | - return Revision(self, revid) |
| 1366 | + return Revision(self, revid, retry=True) |
1363 | 1367 |
|
1364 | 1368 | def store_all_revisions(self, meta, data): |
1365 | 1369 | """ |
@@ -1404,13 +1408,13 @@ class Revision(PropertiesMixin): |
1404 | 1408 | An existing revision (exists in the backend). |
1405 | 1409 | """ |
1406 | 1410 |
|
1407 | | - def __init__(self, item: Item, revid: str, doc=None, name=None): |
| 1411 | + def __init__(self, item: Item, revid: str, doc=None, name=None, retry=False): |
1408 | 1412 | is_current = revid == CURRENT |
1409 | 1413 | if doc is None: |
1410 | 1414 | if is_current: |
1411 | 1415 | doc = item._current |
1412 | 1416 | else: |
1413 | | - doc = get_indexer(item.indexer._document, idx_name=ALL_REVS, revid=revid) |
| 1417 | + doc = get_doc(item.indexer._document, idx_name=ALL_REVS, revid=revid, retry=retry) |
1414 | 1418 |
|
1415 | 1419 | if is_current: |
1416 | 1420 | revid = doc.get(REVID) |
|
0 commit comments