Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Commit

Permalink
Run black on synapse/handlers/user_directory.py (#4812)
Browse files Browse the repository at this point in the history
This got done on the develop branch in #4635, but the subsequent merge to
hotfixes (88af031) discarded the changes for some reason.

Fixing this here and now means (a) there are fewer differences between
matrix-org-hotfixes and develop, making future patches easier to merge, and (b)
fixes some pep8 errors on the hotfixes branch which have been annoying me for
some time.
  • Loading branch information
richvdh committed Mar 6, 2019
1 parent c728560 commit 9e9572c
Showing 1 changed file with 47 additions and 43 deletions.
90 changes: 47 additions & 43 deletions synapse/handlers/user_directory.py
Expand Up @@ -14,7 +14,6 @@
# limitations under the License.

import logging
import synapse.metrics

from six import iteritems

Expand All @@ -29,6 +28,7 @@

logger = logging.getLogger(__name__)


class UserDirectoryHandler(object):
"""Handles querying of and keeping updated the user_directory.
Expand Down Expand Up @@ -130,7 +130,7 @@ def handle_local_profile_change(self, user_id, profile):
# Support users are for diagnostics and should not appear in the user directory.
if not is_support:
yield self.store.update_profile_in_user_dir(
user_id, profile.display_name, profile.avatar_url, None,
user_id, profile.display_name, profile.avatar_url, None
)

@defer.inlineCallbacks
Expand Down Expand Up @@ -166,8 +166,9 @@ def _unsafe_process(self):
self.pos = deltas[-1]["stream_id"]

# Expose current event processing position to prometheus
synapse.metrics.event_processing_positions.labels(
"user_dir").set(self.pos)
synapse.metrics.event_processing_positions.labels("user_dir").set(
self.pos
)

yield self.store.update_user_directory_stream_pos(self.pos)

Expand All @@ -191,21 +192,25 @@ def _do_initial_spam(self):
logger.info("Handling room %d/%d", num_processed_rooms + 1, len(room_ids))
yield self._handle_initial_room(room_id)
num_processed_rooms += 1
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)

logger.info("Processed all rooms.")

if self.search_all_users:
num_processed_users = 0
user_ids = yield self.store.get_all_local_users()
logger.info("Doing initial update of user directory. %d users", len(user_ids))
logger.info(
"Doing initial update of user directory. %d users", len(user_ids)
)
for user_id in user_ids:
# We add profiles for all users even if they don't match the
# include pattern, just in case we want to change it in future
logger.info("Handling user %d/%d", num_processed_users + 1, len(user_ids))
logger.info(
"Handling user %d/%d", num_processed_users + 1, len(user_ids)
)
yield self._handle_local_user(user_id)
num_processed_users += 1
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.)
yield self.clock.sleep(self.INITIAL_USER_SLEEP_MS / 1000.0)

logger.info("Processed all users")

Expand All @@ -224,24 +229,24 @@ def _handle_initial_room(self, room_id):
if not is_in_room:
return

is_public = yield self.store.is_room_world_readable_or_publicly_joinable(room_id)
is_public = yield self.store.is_room_world_readable_or_publicly_joinable(
room_id
)

users_with_profile = yield self.state.get_current_user_in_room(room_id)
user_ids = set(users_with_profile)
unhandled_users = user_ids - self.initially_handled_users

yield self.store.add_profiles_to_user_dir(
room_id, {
user_id: users_with_profile[user_id] for user_id in unhandled_users
}
room_id,
{user_id: users_with_profile[user_id] for user_id in unhandled_users},
)

self.initially_handled_users |= unhandled_users

if is_public:
yield self.store.add_users_to_public_room(
room_id,
user_ids=user_ids - self.initially_handled_users_in_public
room_id, user_ids=user_ids - self.initially_handled_users_in_public
)
self.initially_handled_users_in_public |= user_ids

Expand All @@ -253,7 +258,7 @@ def _handle_initial_room(self, room_id):
count = 0
for user_id in user_ids:
if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)

if not self.is_mine_id(user_id):
count += 1
Expand All @@ -268,7 +273,7 @@ def _handle_initial_room(self, room_id):
continue

if count % self.INITIAL_ROOM_SLEEP_COUNT == 0:
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.)
yield self.clock.sleep(self.INITIAL_ROOM_SLEEP_MS / 1000.0)
count += 1

user_set = (user_id, other_user_id)
Expand All @@ -290,25 +295,23 @@ def _handle_initial_room(self, room_id):

if len(to_insert) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.add_users_who_share_room(
room_id, not is_public, to_insert,
room_id, not is_public, to_insert
)
to_insert.clear()

if len(to_update) > self.INITIAL_ROOM_BATCH_SIZE:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update,
room_id, not is_public, to_update
)
to_update.clear()

if to_insert:
yield self.store.add_users_who_share_room(
room_id, not is_public, to_insert,
)
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)
to_insert.clear()

if to_update:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update,
room_id, not is_public, to_update
)
to_update.clear()

Expand All @@ -329,11 +332,12 @@ def _handle_deltas(self, deltas):
# may have become public or not and add/remove the users in said room
if typ in (EventTypes.RoomHistoryVisibility, EventTypes.JoinRules):
yield self._handle_room_publicity_change(
room_id, prev_event_id, event_id, typ,
room_id, prev_event_id, event_id, typ
)
elif typ == EventTypes.Member:
change = yield self._get_key_change(
prev_event_id, event_id,
prev_event_id,
event_id,
key_name="membership",
public_value=Membership.JOIN,
)
Expand All @@ -343,7 +347,7 @@ def _handle_deltas(self, deltas):
if change is None:
# Handle any profile changes
yield self._handle_profile_change(
state_key, room_id, prev_event_id, event_id,
state_key, room_id, prev_event_id, event_id
)
continue

Expand Down Expand Up @@ -375,13 +379,15 @@ def _handle_room_publicity_change(self, room_id, prev_event_id, event_id, typ):

if typ == EventTypes.RoomHistoryVisibility:
change = yield self._get_key_change(
prev_event_id, event_id,
prev_event_id,
event_id,
key_name="history_visibility",
public_value="world_readable",
)
elif typ == EventTypes.JoinRules:
change = yield self._get_key_change(
prev_event_id, event_id,
prev_event_id,
event_id,
key_name="join_rule",
public_value=JoinRules.PUBLIC,
)
Expand Down Expand Up @@ -506,7 +512,7 @@ def _handle_new_user(self, room_id, user_id, profile):
)
if self.is_mine_id(other_user_id) and not is_appservice:
shared_is_private = yield self.store.get_if_users_share_a_room(
other_user_id, user_id,
other_user_id, user_id
)
if shared_is_private is True:
# We've already marked in the database they share a private room
Expand All @@ -521,13 +527,11 @@ def _handle_new_user(self, room_id, user_id, profile):
to_insert.add((other_user_id, user_id))

if to_insert:
yield self.store.add_users_who_share_room(
room_id, not is_public, to_insert,
)
yield self.store.add_users_who_share_room(room_id, not is_public, to_insert)

if to_update:
yield self.store.update_users_who_share_room(
room_id, not is_public, to_update,
room_id, not is_public, to_update
)

@defer.inlineCallbacks
Expand All @@ -546,15 +550,15 @@ def _handle_remove_user(self, room_id, user_id):
row = yield self.store.get_user_in_public_room(user_id)
update_user_in_public = row and row["room_id"] == room_id

if (update_user_in_public or update_user_dir):
if update_user_in_public or update_user_dir:
# XXX: Make this faster?
rooms = yield self.store.get_rooms_for_user(user_id)
for j_room_id in rooms:
if (not update_user_in_public and not update_user_dir):
if not update_user_in_public and not update_user_dir:
break

is_in_room = yield self.store.is_host_joined(
j_room_id, self.server_name,
j_room_id, self.server_name
)

if not is_in_room:
Expand Down Expand Up @@ -582,19 +586,19 @@ def _handle_remove_user(self, room_id, user_id):
# Get a list of user tuples that were in the DB due to this room and
# users (this includes tuples where the other user matches `user_id`)
user_tuples = yield self.store.get_users_in_share_dir_with_room_id(
user_id, room_id,
user_id, room_id
)

for user_id, other_user_id in user_tuples:
# For each user tuple get a list of rooms that they still share,
# trying to find a private room, and update the entry in the DB
rooms = yield self.store.get_rooms_in_common_for_users(user_id, other_user_id)
rooms = yield self.store.get_rooms_in_common_for_users(
user_id, other_user_id
)

# If they dont share a room anymore, remove the mapping
if not rooms:
yield self.store.remove_user_who_share_room(
user_id, other_user_id,
)
yield self.store.remove_user_who_share_room(user_id, other_user_id)
continue

found_public_share = None
Expand All @@ -608,13 +612,13 @@ def _handle_remove_user(self, room_id, user_id):
else:
found_public_share = None
yield self.store.update_users_who_share_room(
room_id, not is_public, [(user_id, other_user_id)],
room_id, not is_public, [(user_id, other_user_id)]
)
break

if found_public_share:
yield self.store.update_users_who_share_room(
room_id, not is_public, [(user_id, other_user_id)],
room_id, not is_public, [(user_id, other_user_id)]
)

@defer.inlineCallbacks
Expand Down Expand Up @@ -642,7 +646,7 @@ def _handle_profile_change(self, user_id, room_id, prev_event_id, event_id):

if prev_name != new_name or prev_avatar != new_avatar:
yield self.store.update_profile_in_user_dir(
user_id, new_name, new_avatar, room_id,
user_id, new_name, new_avatar, room_id
)

@defer.inlineCallbacks
Expand Down

0 comments on commit 9e9572c

Please sign in to comment.