Skip to content

Commit

Permalink
micro-optimize methods lookups inside inner-loops
Browse files Browse the repository at this point in the history
  • Loading branch information
hannosch committed Apr 8, 2012
1 parent cf8af48 commit 3708251
Showing 1 changed file with 16 additions and 13 deletions.
29 changes: 16 additions & 13 deletions src/Products/ZCatalog/Catalog.py
Expand Up @@ -681,7 +681,8 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
_self__getitem__ = self.__getitem__
index_key_map = sort_index.documentToKeyMap()
result = []
append = result.append
r_append = result.append
r_insert = result.insert
if hasattr(rs, 'keys'):
rs = rs.keys()
if actual_result_count is None:
Expand Down Expand Up @@ -754,7 +755,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
# Is this ever true?
intset = keys()
length += len(intset)
append((k, intset, _self__getitem__))
r_append((k, intset, _self__getitem__))
result.sort(reverse=reverse)
else:
for k, intset in sort_index.items():
Expand All @@ -779,7 +780,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
pass
keysets[full_key].append(i)
for k2, v2 in keysets.items():
append((k2, v2, _self__getitem__))
r_append((k2, v2, _self__getitem__))
result = multisort(result, sort_spec)
sequence, slen = self._limit_sequence(result, length, b_start,
b_size, switched_reverse)
Expand All @@ -800,7 +801,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
# we do not merge now and need to intermingle the
# results with those of other catalogs while avoiding
# the cost of instantiating a LazyMap per result
append((key, did, _self__getitem__))
r_append((key, did, _self__getitem__))
if merge:
result.sort(reverse=reverse)
else:
Expand All @@ -813,7 +814,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
# This document is not in the sort key index, skip it.
pass
else:
append((full_key, did, _self__getitem__))
r_append((full_key, did, _self__getitem__))
if merge:
result = multisort(result, sort_spec)
if merge:
Expand All @@ -832,6 +833,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
# This is faster for large sets then a full sort
# And uses far less memory
keys = []
k_insert = keys.insert
n = 0
worst = None
if sort_index_length == 1:
Expand All @@ -845,8 +847,8 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
if n >= limit and key <= worst:
continue
i = bisect(keys, key)
keys.insert(i, key)
result.insert(i, (key, did, _self__getitem__))
k_insert(i, key)
r_insert(i, (key, did, _self__getitem__))
if n == limit:
del keys[0], result[0]
else:
Expand All @@ -867,8 +869,8 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
if n >= limit and key <= worst:
continue
i = bisect(keys, key)
keys.insert(i, key)
result.insert(i, (full_key, did, _self__getitem__))
k_insert(i, key)
r_insert(i, (full_key, did, _self__getitem__))
if n == limit:
del keys[0], result[0]
else:
Expand All @@ -887,6 +889,7 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
elif not first_reverse:
# Limit / sort results using N-Best algorithm in reverse (N-Worst?)
keys = []
k_insert = keys.insert
n = 0
best = None
if sort_index_length == 1:
Expand All @@ -900,8 +903,8 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
if n >= limit and key >= best:
continue
i = bisect(keys, key)
keys.insert(i, key)
result.insert(i, (key, did, _self__getitem__))
k_insert(i, key)
r_insert(i, (key, did, _self__getitem__))
if n == limit:
del keys[-1], result[-1]
else:
Expand All @@ -921,8 +924,8 @@ def sortResults(self, rs, sort_index, reverse=False, limit=None,
if n >= limit and key >= best:
continue
i = bisect(keys, key)
keys.insert(i, key)
result.insert(i, (full_key, did, _self__getitem__))
k_insert(i, key)
r_insert(i, (full_key, did, _self__getitem__))
if n == limit:
del keys[-1], result[-1]
else:
Expand Down

0 comments on commit 3708251

Please sign in to comment.