Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions synapse/config/experimental.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,3 +569,6 @@ def read_config(

# MSC4155: Invite filtering
self.msc4155_enabled: bool = experimental.get("msc4155_enabled", False)

# MSC4293: Redact on Kick/Ban
self.msc4239_enabled: bool = experimental.get("msc4239_enabled", False)
39 changes: 39 additions & 0 deletions synapse/handlers/federation_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -1966,6 +1966,9 @@ async def _check_for_soft_fail(
Does nothing for events in rooms with partial state, since we may not have an
accurate membership event for the sender in the current state.

Also checks if event should be redacted due to a MSC4293 redaction flag in kick/ban
event for user

Args:
event
context: The `EventContext` which we are about to persist the event with.
Expand Down Expand Up @@ -2065,6 +2068,42 @@ async def _check_for_soft_fail(
soft_failed_event_counter.inc()
event.internal_metadata.soft_failed = True

if self._config.experimental.msc4239_enabled:
# Use already calculated auth events to determine if the event should be redacted due to kick/ban
if event.type == EventTypes.Message:
for auth_event in current_auth_events:
if (
auth_event.type == EventTypes.Member
and auth_event.state_key == event.sender
):
if auth_event.membership == Membership.BAN or (
auth_event.membership == Membership.LEAVE
and auth_event.sender != event.sender
):
# we have a ban or kick for this sender, check for redaction flag and apply if found
autoredact = auth_event.content.get(
"org.matrix.msc4293.redact_events", False
)
if autoredact:
await self._store.db_pool.simple_upsert(
table="redactions",
keyvalues={
"event_id": auth_event.event_id,
"redacts": event.event_id,
},
values={"received_ts": self._clock.time_msec()},
insertion_values={
"event_id": auth_event.event_id,
"redacts": event.event_id,
"received_ts": self._clock.time_msec(),
},
)
await self._store.db_pool.runInteraction(
"invalidate cache",
self._store.invalidate_get_event_cache_after_txn,
event.event_id,
)

async def _load_or_fetch_auth_events_for_event(
self, destination: Optional[str], event: EventBase
) -> Collection[EventBase]:
Expand Down
69 changes: 67 additions & 2 deletions synapse/storage/databases/main/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,72 @@ async def _persist_events_and_state_updates(

event_counter.labels(event.type, origin_type, origin_entity).inc()

if self.hs.config.experimental.msc4239_enabled:
if event.type == EventTypes.Member and event.content.get(
"org.matrix.msc4293.redact_events", False
):
if event.membership != Membership.BAN and not (
event.membership == Membership.LEAVE
and event.sender != event.state_key
):
return
# check that sender can redact
state_filter = StateFilter.from_types(
[(EventTypes.PowerLevels, "")]
)
state = await self.store.get_partial_filtered_current_state_ids(
event.room_id, state_filter
)
pl_id = state[(EventTypes.PowerLevels, "")]
pl_event_map = await self.store.get_events([pl_id])
pl_event = pl_event_map.get(pl_id)
if pl_event:
sender_level = pl_event.content.get("users", {}).get(
event.sender
)
if sender_level is None:
sender_level = pl_event.content.get("users_default", 0)
redact_level = pl_event.content.get("redact", 50)

if sender_level > redact_level:
ids_to_redact = (
await self.store.get_events_sent_by_user_in_room(
event.state_key,
event.room_id,
limit=1000000, # arbitrarily large number
filter=[
"m.room.member",
"m.room.message",
"m.room.encrypted",
],
)
)
if ids_to_redact:
key_values = [
(event.event_id, x) for x in ids_to_redact
]
value_values = [
(self._clock.time_msec(),)
for x in ids_to_redact
]
await self.db_pool.simple_upsert_many(
table="redactions",
key_names=["event_id", "redacts"],
key_values=key_values,
value_names=["received_ts"],
value_values=value_values,
desc="redact_on_ban_redaction_txn",
)
# normally the cache entry for a redacted event would be invalidated
# by an arriving redaction event, but since we are not creating redaction
# events we invalidate manually
for id in ids_to_redact:
await self.db_pool.runInteraction(
"invalidate cache",
self.store.invalidate_get_event_cache_after_txn,
id,
)

if new_forward_extremities:
self.store.get_latest_event_ids_in_room.prefill(
(room_id,), frozenset(new_forward_extremities)
Expand Down Expand Up @@ -2768,9 +2834,8 @@ def _store_redaction(self, txn: LoggingTransaction, event: EventBase) -> None:
self.db_pool.simple_upsert_txn(
txn,
table="redactions",
keyvalues={"event_id": event.event_id},
keyvalues={"event_id": event.event_id, "redacts": event.redacts},
values={
"redacts": event.redacts,
"received_ts": self._clock.time_msec(),
},
)
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
ALTER TABLE ONLY redactions ADD CONSTRAINT redactions_event_id_redacts_key UNIQUE (event_id, redacts);

-- Replace the index we're about to drop
CREATE INDEX redactions_event_id ON redactions USING btree (event_id);

-- Drop the old constraint
ALTER TABLE ONLY redactions DROP CONSTRAINT redactions_event_id_key;
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
-- recreate table to drop UNIQUE(event_id) constraint and add a new unique constraint on (event_id, redacts)
CREATE TABLE redactions2 (
event_id TEXT NOT NULL,
redacts TEXT NOT NULL,
have_censored BOOL NOT NULL DEFAULT false,
received_ts BIGINT,
UNIQUE(event_id, redacts)
);

INSERT INTO redactions2 (
event_id,
redacts,
have_censored,
received_ts
) SELECT r.event_id, r.redacts, r.have_censored, r.received_ts FROM redactions AS r;

DROP INDEX redactions_redacts;
DROP TABLE redactions;
ALTER TABLE redactions2 RENAME TO redactions;

CREATE INDEX redactions_redacts ON redactions(redacts);
CREATE INDEX redactions_event_id ON redactions(event_id); -- replace the index we dropped
CREATE INDEX redactions_have_censored_ts ON redactions(received_ts) WHERE NOT have_censored;
Loading
Loading