Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Merge commit '2983049a7' into anoa/dinsic_release_1_21_x
Browse files Browse the repository at this point in the history
* commit '2983049a7':
  Factor out `_send_dummy_event_for_room` (#8370)
  Improve logging of state resolution (#8371)
  Fix bug which caused failure on join with malformed membership events (#8385)
  Use `async with` for ID gens (#8383)
  Don't push if an user account has expired (#8353)
  Do not check lint/test dependencies at runtime. (#8377)
  Add note to reverse_proxy.md about disabling Apache's mod_security2 (#8375)
  Changelog
  • Loading branch information
anoadragon453 committed Oct 21, 2020
2 parents e529fa7 + 2983049 commit 3729b15
Show file tree
Hide file tree
Showing 32 changed files with 293 additions and 224 deletions.
2 changes: 1 addition & 1 deletion changelog.d/8330.misc
@@ -1 +1 @@
Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this.
Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this.
1 change: 1 addition & 0 deletions changelog.d/8353.bugfix
@@ -0,0 +1 @@
Don't send push notifications to expired user accounts.
1 change: 1 addition & 0 deletions changelog.d/8370.misc
@@ -0,0 +1 @@
Factor out a `_send_dummy_event_for_room` method.
1 change: 1 addition & 0 deletions changelog.d/8371.misc
@@ -0,0 +1 @@
Improve logging of state resolution.
1 change: 1 addition & 0 deletions changelog.d/8373.bugfix
@@ -0,0 +1 @@
Include `guest_access` in the fields that are checked for null bytes when updating `room_stats_state`. Broke in v1.7.2.
1 change: 1 addition & 0 deletions changelog.d/8375.doc
@@ -0,0 +1 @@
Add note to the reverse proxy settings documentation about disabling Apache's mod_security2. Contributed by Julian Fietkau (@jfietkau).
1 change: 1 addition & 0 deletions changelog.d/8377.misc
@@ -0,0 +1 @@
Move lint-related dependencies to package-extra field, update CONTRIBUTING.md to utilise this.
1 change: 1 addition & 0 deletions changelog.d/8383.misc
@@ -0,0 +1 @@
Refactor ID generators to use `async with` syntax.
1 change: 1 addition & 0 deletions changelog.d/8385.bugfix
@@ -0,0 +1 @@
Fix a bug which could cause errors in rooms with malformed membership events, on servers using sqlite.
8 changes: 8 additions & 0 deletions docs/reverse_proxy.md
Expand Up @@ -121,6 +121,14 @@ example.com:8448 {

**NOTE**: ensure the `nocanon` options are included.

**NOTE 2**: It appears that Synapse is currently incompatible with the ModSecurity module for Apache (`mod_security2`). If you need it enabled for other services on your web server, you can disable it for Synapse's two VirtualHosts by including the following lines before each of the two `</VirtualHost>` above:

```
<IfModule security2_module>
SecRuleEngine off
</IfModule>
```

### HAProxy

```
Expand Down
16 changes: 16 additions & 0 deletions setup.py
Expand Up @@ -94,6 +94,22 @@ def exec_file(path_segments):
# Make `pip install matrix-synapse[all]` install all the optional dependencies.
CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS)

# Developer dependencies should not get included in "all".
#
# We pin black so that our tests don't start failing on new releases.
CONDITIONAL_REQUIREMENTS["lint"] = [
"isort==5.0.3",
"black==19.10b0",
"flake8-comprehensions",
"flake8",
]

# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# parameterized_class decorator was introduced in parameterized 0.7.0
CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"]

setup(
name="matrix-synapse",
Expand Down
6 changes: 1 addition & 5 deletions synapse/api/auth.py
Expand Up @@ -218,11 +218,7 @@ async def get_user_by_req(
# Deny the request if the user account has expired.
if self._account_validity.enabled and not allow_expired:
user_id = user.to_string()
expiration_ts = await self.store.get_expiration_ts_for_user(user_id)
if (
expiration_ts is not None
and self.clock.time_msec() >= expiration_ts
):
if await self.store.is_account_expired(user_id, self.clock.time_msec()):
raise AuthError(
403, "User account has expired", errcode=Codes.EXPIRED_ACCOUNT
)
Expand Down
102 changes: 54 additions & 48 deletions synapse/handlers/message.py
Expand Up @@ -1185,54 +1185,7 @@ async def _send_dummy_events_to_fill_extremities(self):
)

for room_id in room_ids:
# For each room we need to find a joined member we can use to send
# the dummy event with.

latest_event_ids = await self.store.get_prev_events_for_room(room_id)

members = await self.state.get_current_users_in_room(
room_id, latest_event_ids=latest_event_ids
)
dummy_event_sent = False
for user_id in members:
if not self.hs.is_mine_id(user_id):
continue
requester = create_requester(user_id)
try:
event, context = await self.create_event(
requester,
{
"type": "org.matrix.dummy_event",
"content": {},
"room_id": room_id,
"sender": user_id,
},
prev_event_ids=latest_event_ids,
)

event.internal_metadata.proactively_send = False

# Since this is a dummy-event it is OK if it is sent by a
# shadow-banned user.
await self.send_nonmember_event(
requester,
event,
context,
ratelimit=False,
ignore_shadow_ban=True,
)
dummy_event_sent = True
break
except ConsentNotGivenError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of consent. Will try another user" % (room_id, user_id)
)
except AuthError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of power. Will try another user" % (room_id, user_id)
)
dummy_event_sent = await self._send_dummy_event_for_room(room_id)

if not dummy_event_sent:
# Did not find a valid user in the room, so remove from future attempts
Expand All @@ -1245,6 +1198,59 @@ async def _send_dummy_events_to_fill_extremities(self):
now = self.clock.time_msec()
self._rooms_to_exclude_from_dummy_event_insertion[room_id] = now

async def _send_dummy_event_for_room(self, room_id: str) -> bool:
"""Attempt to send a dummy event for the given room.
Args:
room_id: room to try to send an event from
Returns:
True if a dummy event was successfully sent. False if no user was able
to send an event.
"""

# For each room we need to find a joined member we can use to send
# the dummy event with.
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
members = await self.state.get_current_users_in_room(
room_id, latest_event_ids=latest_event_ids
)
for user_id in members:
if not self.hs.is_mine_id(user_id):
continue
requester = create_requester(user_id)
try:
event, context = await self.create_event(
requester,
{
"type": "org.matrix.dummy_event",
"content": {},
"room_id": room_id,
"sender": user_id,
},
prev_event_ids=latest_event_ids,
)

event.internal_metadata.proactively_send = False

# Since this is a dummy-event it is OK if it is sent by a
# shadow-banned user.
await self.send_nonmember_event(
requester, event, context, ratelimit=False, ignore_shadow_ban=True,
)
return True
except ConsentNotGivenError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of consent. Will try another user" % (room_id, user_id)
)
except AuthError:
logger.info(
"Failed to send dummy event into room %s for user %s due to "
"lack of power. Will try another user" % (room_id, user_id)
)
return False

def _expire_rooms_to_exclude_from_dummy_event_insertion(self):
expire_before = self.clock.time_msec() - _DUMMY_EVENT_ROOM_EXCLUSION_EXPIRY
to_expire = set()
Expand Down
18 changes: 18 additions & 0 deletions synapse/push/pusherpool.py
Expand Up @@ -60,6 +60,8 @@ def __init__(self, hs: "HomeServer"):
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()

self._account_validity = hs.config.account_validity

# We shard the handling of push notifications by user ID.
self._pusher_shard_config = hs.config.push.pusher_shard_config
self._instance_name = hs.get_instance_name()
Expand Down Expand Up @@ -202,6 +204,14 @@ async def on_new_notifications(self, max_stream_id: int):
)

for u in users_affected:
# Don't push if the user account has expired
if self._account_validity.enabled:
expired = await self.store.is_account_expired(
u, self.clock.time_msec()
)
if expired:
continue

if u in self.pushers:
for p in self.pushers[u].values():
p.on_new_notifications(max_stream_id)
Expand All @@ -222,6 +232,14 @@ async def on_new_receipts(self, min_stream_id, max_stream_id, affected_room_ids)
)

for u in users_affected:
# Don't push if the user account has expired
if self._account_validity.enabled:
expired = await self.store.is_account_expired(
u, self.clock.time_msec()
)
if expired:
continue

if u in self.pushers:
for p in self.pushers[u].values():
p.on_new_receipts(min_stream_id, max_stream_id)
Expand Down
13 changes: 4 additions & 9 deletions synapse/python_dependencies.py
Expand Up @@ -37,6 +37,9 @@
# installed when that optional dependency requirement is specified. It is passed
# to setup() as extras_require in setup.py
#
# Note that these both represent runtime dependencies (and the versions
# installed are checked at runtime).
#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.

REQUIREMENTS = [
Expand Down Expand Up @@ -92,28 +95,20 @@
"oidc": ["authlib>=0.14.0"],
"systemd": ["systemd-python>=231"],
"url_preview": ["lxml>=3.5.0"],
# Dependencies which are exclusively required by unit test code. This is
# NOT a list of all modules that are necessary to run the unit tests.
# Tests assume that all optional dependencies are installed.
#
# parameterized_class decorator was introduced in parameterized 0.7.0
"test": ["mock>=2.0", "parameterized>=0.7.0"],
"sentry": ["sentry-sdk>=0.7.2"],
"opentracing": ["jaeger-client>=4.0.0", "opentracing>=2.2.0"],
"jwt": ["pyjwt>=1.6.4"],
# hiredis is not a *strict* dependency, but it makes things much faster.
# (if it is not installed, we fall back to slow code.)
"redis": ["txredisapi>=1.4.7", "hiredis"],
# We pin black so that our tests don't start failing on new releases.
"lint": ["isort==5.0.3", "black==19.10b0", "flake8-comprehensions", "flake8"],
}

ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str]

for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
# Exclude lint as it's a dev-based requirement.
if name not in ["systemd", "lint"]:
if name not in ["systemd"]:
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS


Expand Down
64 changes: 16 additions & 48 deletions synapse/state/__init__.py
Expand Up @@ -25,7 +25,6 @@
Sequence,
Set,
Union,
cast,
overload,
)

Expand All @@ -42,7 +41,7 @@
from synapse.state import v1, v2
from synapse.storage.databases.main.events_worker import EventRedactBehaviour
from synapse.storage.roommember import ProfileInfo
from synapse.types import Collection, MutableStateMap, StateMap
from synapse.types import Collection, StateMap
from synapse.util import Clock
from synapse.util.async_helpers import Linearizer
from synapse.util.caches.expiringcache import ExpiringCache
Expand Down Expand Up @@ -472,10 +471,9 @@ class StateResolutionHandler:
def __init__(self, hs):
self.clock = hs.get_clock()

# dict of set of event_ids -> _StateCacheEntry.
self._state_cache = None
self.resolve_linearizer = Linearizer(name="state_resolve_lock")

# dict of set of event_ids -> _StateCacheEntry.
self._state_cache = ExpiringCache(
cache_name="state_cache",
clock=self.clock,
Expand Down Expand Up @@ -519,57 +517,28 @@ async def resolve_state_groups(
Returns:
The resolved state
"""
logger.debug("resolve_state_groups state_groups %s", state_groups_ids.keys())

group_names = frozenset(state_groups_ids.keys())

with (await self.resolve_linearizer.queue(group_names)):
if self._state_cache is not None:
cache = self._state_cache.get(group_names, None)
if cache:
return cache
cache = self._state_cache.get(group_names, None)
if cache:
return cache

logger.info(
"Resolving state for %s with %d groups", room_id, len(state_groups_ids)
"Resolving state for %s with groups %s", room_id, list(group_names),
)

state_groups_histogram.observe(len(state_groups_ids))

# start by assuming we won't have any conflicted state, and build up the new
# state map by iterating through the state groups. If we discover a conflict,
# we give up and instead use `resolve_events_with_store`.
#
# XXX: is this actually worthwhile, or should we just let
# resolve_events_with_store do it?
new_state = {} # type: MutableStateMap[str]
conflicted_state = False
for st in state_groups_ids.values():
for key, e_id in st.items():
if key in new_state:
conflicted_state = True
break
new_state[key] = e_id
if conflicted_state:
break

if conflicted_state:
logger.info("Resolving conflicted state for %r", room_id)
with Measure(self.clock, "state._resolve_events"):
# resolve_events_with_store returns a StateMap, but we can
# treat it as a MutableStateMap as it is above. It isn't
# actually mutated anymore (and is frozen in
# _make_state_cache_entry below).
new_state = cast(
MutableStateMap,
await resolve_events_with_store(
self.clock,
room_id,
room_version,
list(state_groups_ids.values()),
event_map=event_map,
state_res_store=state_res_store,
),
)
with Measure(self.clock, "state._resolve_events"):
new_state = await resolve_events_with_store(
self.clock,
room_id,
room_version,
list(state_groups_ids.values()),
event_map=event_map,
state_res_store=state_res_store,
)

# if the new state matches any of the input state groups, we can
# use that state group again. Otherwise we will generate a state_id
Expand All @@ -579,8 +548,7 @@ async def resolve_state_groups(
with Measure(self.clock, "state.create_group_ids"):
cache = _make_state_cache_entry(new_state, state_groups_ids)

if self._state_cache is not None:
self._state_cache[group_names] = cache
self._state_cache[group_names] = cache

return cache

Expand Down
4 changes: 2 additions & 2 deletions synapse/storage/databases/main/account_data.py
Expand Up @@ -339,7 +339,7 @@ async def add_account_data_to_room(
"""
content_json = json_encoder.encode(content)

with await self._account_data_id_gen.get_next() as next_id:
async with self._account_data_id_gen.get_next() as next_id:
# no need to lock here as room_account_data has a unique constraint
# on (user_id, room_id, account_data_type) so simple_upsert will
# retry if there is a conflict.
Expand Down Expand Up @@ -387,7 +387,7 @@ async def add_account_data_for_user(
"""
content_json = json_encoder.encode(content)

with await self._account_data_id_gen.get_next() as next_id:
async with self._account_data_id_gen.get_next() as next_id:
# no need to lock here as account_data has a unique constraint on
# (user_id, account_data_type) so simple_upsert will retry if
# there is a conflict.
Expand Down

0 comments on commit 3729b15

Please sign in to comment.