Skip to content
This repository was archived by the owner on Apr 26, 2024. It is now read-only.

Commit f39c1da

Browse files
committed
Remove debug logging
1 parent 4191f56 commit f39c1da

File tree

7 files changed

+11
-184
lines changed

7 files changed

+11
-184
lines changed

synapse/federation/federation_server.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -196,12 +196,8 @@ async def on_backfill_request(
196196
origin, room_id, versions, limit
197197
)
198198

199-
logger.info("on_backfill_request pdus(%d)=%s", len(pdus), pdus)
200-
201199
res = self._transaction_dict_from_pdus(pdus)
202200

203-
logger.info("on_backfill_request res=%s", res)
204-
205201
return 200, res
206202

207203
async def on_incoming_transaction(

synapse/handlers/federation.py

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -148,14 +148,14 @@ async def _maybe_backfill_inner(
148148
insertion_events_to_be_backfilled = (
149149
await self.store.get_insertion_event_backwards_extremities_in_room(room_id)
150150
)
151-
logger.info(
151+
logger.debug(
152152
"_maybe_backfill_inner: extremities oldest_events_with_depth=%s insertion_events_to_be_backfilled=%s",
153153
oldest_events_with_depth,
154154
insertion_events_to_be_backfilled,
155155
)
156156

157157
if not oldest_events_with_depth and not insertion_events_to_be_backfilled:
158-
logger.info("Not backfilling as no extremeties found.")
158+
logger.debug("Not backfilling as no extremeties found.")
159159
return False
160160

161161
# We only want to paginate if we can actually see the events we'll get,
@@ -203,7 +203,7 @@ async def _maybe_backfill_inner(
203203
redact=False,
204204
check_history_visibility_only=True,
205205
)
206-
logger.info(
206+
logger.debug(
207207
"_maybe_backfill_inner: filtered_extremities %s", filtered_extremities
208208
)
209209

@@ -230,7 +230,7 @@ async def _maybe_backfill_inner(
230230
# much larger factor will result in triggering a backfill request much
231231
# earlier than necessary.
232232
if current_depth - 2 * limit > max_depth:
233-
logger.info(
233+
logger.debug(
234234
"Not backfilling as we don't need to. %d < %d - 2 * %d",
235235
max_depth,
236236
current_depth,
@@ -249,7 +249,7 @@ async def _maybe_backfill_inner(
249249
t for t in sorted_extremeties_tuple if int(t[1]) <= current_depth
250250
]
251251

252-
logger.info(
252+
logger.debug(
253253
"room_id: %s, backfill: current_depth: %s, limit: %s, max_depth: %s, extrems (%d): %s filtered_sorted_extremeties_tuple: %s",
254254
room_id,
255255
current_depth,
@@ -271,8 +271,6 @@ async def _maybe_backfill_inner(
271271
# request URI to be too long.
272272
extremities = dict(sorted_extremeties_tuple[:5])
273273

274-
logger.info("backfill extremities=%s", extremities)
275-
276274
# Now we need to decide which hosts to hit first.
277275

278276
# First we try hosts that are already in the room
@@ -1061,7 +1059,6 @@ async def on_backfill_request(
10611059
events = await self.store.get_backfill_events(room_id, pdu_list, limit)
10621060

10631061
events = await filter_events_for_server(self.storage, origin, events)
1064-
logger.info("on_backfill_request resultant events(%d)=%s", len(events), events)
10651062

10661063
return events
10671064

synapse/handlers/federation_event.py

Lines changed: 1 addition & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -416,19 +416,6 @@ async def backfill(
416416
events = await self._federation_client.backfill(
417417
dest, room_id, limit=limit, extremities=extremities
418418
)
419-
logger.info(
420-
"from remote server: got backfill response events(%d)=%s",
421-
len(events),
422-
[
423-
{
424-
"event_id": ev.event_id,
425-
"type": ev["type"],
426-
"depth": ev["depth"],
427-
"content": ev["content"].get("body", None),
428-
}
429-
for ev in events
430-
],
431-
)
432419

433420
if not events:
434421
return
@@ -444,38 +431,10 @@ async def backfill(
444431

445432
await self._process_pulled_events(
446433
dest,
447-
# The /backfill response should start from `?v` and include the
448-
# events that preceded it (so the list will be newest -> oldest). We
449-
# reverse that order so the messages are oldest -> newest and we can
450-
# persist the backfilled events without constantly have to go fetch
451-
# missing prev_events which are probably included in the same
452-
# backfill chunk.
453-
# TODO: If we try to reverse this list, the stream_ordering will be backwards
454-
# reversed(events),
455434
events,
456435
backfilled=True,
457436
)
458437

459-
for ev in events:
460-
event_after_persisted = await self._store.get_event(
461-
ev.event_id, allow_none=True
462-
)
463-
464-
if event_after_persisted:
465-
logger.info(
466-
"from remote server: processed backfilled event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
467-
ev.event_id,
468-
event_after_persisted["type"],
469-
event_after_persisted["depth"],
470-
event_after_persisted.internal_metadata.stream_ordering,
471-
event_after_persisted["content"].get("body", None),
472-
)
473-
else:
474-
logger.info(
475-
"from remote server: processed backfilled event_id=%s failed to lookup",
476-
ev.event_id,
477-
)
478-
479438
async def _get_missing_events_for_pdu(
480439
self, origin: str, pdu: EventBase, prevs: Set[str], min_depth: int
481440
) -> None:
@@ -1277,7 +1236,7 @@ def prep(event: EventBase) -> Optional[Tuple[EventBase, EventContext]]:
12771236
await self.persist_events_and_notify(
12781237
room_id,
12791238
tuple(events_to_persist),
1280-
# TODO: Maybe this to get fetched missing events during backfill as backfilled also :/
1239+
# Events we fetch during backfill should be marked as backfilled as well
12811240
backfilled=True,
12821241
)
12831242

synapse/handlers/room_batch.py

Lines changed: 2 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,6 @@
1313
logger = logging.getLogger(__name__)
1414

1515

16-
def generate_fake_event_id() -> str:
17-
return "$fake_" + random_string(43)
18-
19-
2016
class RoomBatchHandler:
2117
def __init__(self, hs: "HomeServer"):
2218
self.hs = hs
@@ -184,7 +180,7 @@ async def persist_state_events_at_start(
184180

185181
# Make the state events float off on their own so we don't have a
186182
# bunch of `@mxid joined the room` noise between each batch
187-
prev_event_ids_for_state_chain = [] # generate_fake_event_id()
183+
prev_event_ids_for_state_chain: List[str] = []
188184

189185
for state_event in state_events_at_start:
190186
assert_params_in_dict(
@@ -227,15 +223,6 @@ async def persist_state_events_at_start(
227223
# reference and also update in the event when we append later.
228224
auth_event_ids=auth_event_ids.copy(),
229225
)
230-
231-
mem_event = await self.store.get_event(event_id)
232-
logger.info(
233-
"room_batch mem_event_id=%s depth=%s stream_ordering=%s prev_event_ids=%s",
234-
mem_event.event_id,
235-
mem_event.depth,
236-
mem_event.internal_metadata.stream_ordering,
237-
mem_event.prev_event_ids(),
238-
)
239226
else:
240227
# TODO: Add some complement tests that adds state that is not member joins
241228
# and will use this code path. Maybe we only want to support join state events
@@ -362,20 +349,13 @@ async def persist_historical_events(
362349
# Events are sorted by (topological_ordering, stream_ordering)
363350
# where topological_ordering is just depth.
364351
for (event, context) in reversed(events_to_persist):
365-
result_event = await self.event_creation_handler.handle_new_client_event(
352+
await self.event_creation_handler.handle_new_client_event(
366353
await self.create_requester_for_user_id_from_app_service(
367354
event["sender"], app_service_requester.app_service
368355
),
369356
event=event,
370357
context=context,
371358
)
372-
logger.info(
373-
"result_event depth=%s stream_ordering=%s event_id=%s body=%s",
374-
result_event.depth,
375-
result_event.internal_metadata.stream_ordering,
376-
result_event.event_id,
377-
result_event.content.get("body", None),
378-
)
379359

380360
return event_ids
381361

synapse/rest/client/room.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -561,7 +561,6 @@ async def on_GET(
561561
pagination_config = await PaginationConfig.from_request(
562562
self.store, request, default_limit=10
563563
)
564-
logger.info("/messages rest start pagination_config=%s", pagination_config)
565564
# Twisted will have processed the args by now.
566565
assert request.args is not None
567566
as_client_event = b"raw" not in request.args

synapse/storage/databases/main/event_federation.py

Lines changed: 3 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -1057,11 +1057,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
10571057
# we process the newest-in-time messages first going backwards in time.
10581058
queue = PriorityQueue()
10591059

1060-
logger.info(
1061-
"_get_backfill_events: seeding backfill with event_list(%d)=%s",
1062-
len(event_list),
1063-
event_list,
1064-
)
10651060
for event_id in event_list:
10661061
event_lookup_result = self.db_pool.simple_select_one_txn(
10671062
txn,
@@ -1075,14 +1070,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
10751070
allow_none=True,
10761071
)
10771072

1078-
logger.info(
1079-
"_get_backfill_events: seeding backfill with event_id=%s type=%s depth=%s stream_ordering=%s",
1080-
event_id,
1081-
event_lookup_result["type"],
1082-
event_lookup_result["depth"],
1083-
event_lookup_result["stream_ordering"],
1084-
)
1085-
10861073
if event_lookup_result["depth"]:
10871074
queue.put(
10881075
(
@@ -1102,39 +1089,6 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
11021089
if event_id in event_results:
11031090
continue
11041091

1105-
event_lookup_result = self.db_pool.simple_select_one_txn(
1106-
txn,
1107-
table="events",
1108-
keyvalues={"event_id": event_id},
1109-
retcols=["type", "depth", "stream_ordering", "content"],
1110-
allow_none=True,
1111-
)
1112-
1113-
event_json_lookup_result = self.db_pool.simple_select_one_onecol_txn(
1114-
txn,
1115-
table="event_json",
1116-
keyvalues={"event_id": event_id},
1117-
retcol="json",
1118-
allow_none=True,
1119-
)
1120-
1121-
ev = db_to_json(event_json_lookup_result)
1122-
1123-
if event_lookup_result:
1124-
logger.info(
1125-
"_get_backfill_events: event_results add event_id=%s type=%s depth=%s stream_ordering=%s content=%s",
1126-
event_id,
1127-
ev["type"],
1128-
ev["depth"],
1129-
event_lookup_result["stream_ordering"],
1130-
ev["content"].get("body", ev["content"]),
1131-
)
1132-
else:
1133-
logger.info(
1134-
"_get_backfill_events: event_results event_id=%s failed to lookup",
1135-
event_id,
1136-
)
1137-
11381092
event_results[event_id] = event_id
11391093

11401094
# Try and find any potential historical batches of message history.
@@ -1147,7 +1101,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
11471101
connected_insertion_event_query, (event_id, limit - len(event_results))
11481102
)
11491103
connected_insertion_event_id_results = txn.fetchall()
1150-
logger.info(
1104+
logger.debug(
11511105
"_get_backfill_events: connected_insertion_event_query %s",
11521106
connected_insertion_event_id_results,
11531107
)
@@ -1177,7 +1131,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
11771131
(event_id, limit - len(event_results)),
11781132
)
11791133
batch_start_event_id_results = txn.fetchall()
1180-
logger.info(
1134+
logger.debug(
11811135
"_get_backfill_events: batch_start_event_id_results %s",
11821136
batch_start_event_id_results,
11831137
)
@@ -1190,7 +1144,7 @@ def _get_backfill_events(self, txn, room_id, event_list, limit):
11901144
(event_id, False, limit - len(event_results)),
11911145
)
11921146
prev_event_id_results = txn.fetchall()
1193-
logger.info(
1147+
logger.debug(
11941148
"_get_backfill_events: prev_event_ids %s", prev_event_id_results
11951149
)
11961150

synapse/storage/databases/main/events.py

Lines changed: 0 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -169,14 +169,6 @@ async def _persist_events_and_state_updates(
169169

170170
async with stream_ordering_manager as stream_orderings:
171171
for (event, _), stream in zip(events_and_contexts, stream_orderings):
172-
logger.info(
173-
"_persist_events_and_state_updates backfilled=%s event_id=%s depth=%s stream_ordering=%s content=%s",
174-
backfilled,
175-
event.event_id,
176-
event.depth,
177-
stream,
178-
event["content"].get("body", None),
179-
)
180172
event.internal_metadata.stream_ordering = stream
181173

182174
await self.db_pool.runInteraction(
@@ -2139,38 +2131,6 @@ def _update_backward_extremeties(self, txn, events):
21392131
21402132
Forward extremities are handled when we first start persisting the events.
21412133
"""
2142-
logger.info(
2143-
"_update_backward_extremeties events=%s",
2144-
[
2145-
{
2146-
"event_id": ev.event_id,
2147-
"prev_events": ev.prev_event_ids(),
2148-
"outlier": ev.internal_metadata.is_outlier(),
2149-
}
2150-
for ev in events
2151-
],
2152-
)
2153-
2154-
for ev in events:
2155-
for e_id in ev.prev_event_ids():
2156-
query = """
2157-
SELECT 1 FROM event_edges
2158-
INNER JOIN events AS e USING (event_id, room_id)
2159-
WHERE event_id = ? AND room_id = ? AND e.outlier = TRUE
2160-
"""
2161-
2162-
txn.execute(
2163-
query,
2164-
(e_id, ev.room_id),
2165-
)
2166-
result = txn.fetchall()
2167-
logger.info(
2168-
"_update_backward_extremeties test ev=%s prev_event_id=%s result=%s",
2169-
ev.event_id,
2170-
e_id,
2171-
result,
2172-
)
2173-
21742134
# From the events passed in, add all of the prev events as backwards extremities.
21752135
# Ignore any events that are already backwards extrems or outliers.
21762136
query = (
@@ -2200,24 +2160,6 @@ def _update_backward_extremeties(self, txn, events):
22002160
],
22012161
)
22022162

2203-
for ev in events:
2204-
for e_id in ev.prev_event_ids():
2205-
query = """
2206-
SELECT * FROM event_backward_extremities
2207-
WHERE event_id = ? AND room_id = ?
2208-
"""
2209-
2210-
txn.execute(
2211-
query,
2212-
(e_id, ev.room_id),
2213-
)
2214-
result = txn.fetchall()
2215-
logger.info(
2216-
"_update_backward_extremeties ended up as prev_event_id=%s result=%s",
2217-
e_id,
2218-
result,
2219-
)
2220-
22212163
# Delete all these events that we've already fetched and now know that their
22222164
# prev events are the new backwards extremeties.
22232165
query = (

0 commit comments

Comments
 (0)