Permalink
Browse files

update_index: update all backends by default

* Change the default behaviour to update every backend
* Allow --using to be repeated to update multiple named backends
  • Loading branch information...
1 parent 4d199e0 commit a018d7b77e2ace03dad2b68ddf045a07cbbf6f2c @acdha committed Nov 6, 2012
Showing with 25 additions and 13 deletions.
  1. +25 −13 haystack/management/commands/update_index.py
@@ -1,7 +1,7 @@
from datetime import timedelta
from optparse import make_option
+import logging
import os
-import warnings
from django import db
from django.conf import settings
@@ -11,7 +11,6 @@
from django.utils.encoding import smart_str
from haystack import connections as haystack_connections
-from haystack.constants import DEFAULT_ALIAS
from haystack.query import SearchQuerySet
try:
@@ -121,8 +120,10 @@ class Command(LabelCommand):
make_option('-r', '--remove', action='store_true', dest='remove',
default=False, help='Remove objects from the index that are no longer present in the database.'
),
- make_option("-u", "--using", action="store", type="string", dest="using", default=DEFAULT_ALIAS,
- help='If provided, chooses a connection to work with.'
+ make_option("-u", "--using", action="append", dest="using",
+ default=[],
+ help='Update only the named backend (can be used multiple times). '
+ 'By default all backends will be updated.'
),
make_option('-k', '--workers', action='store', dest='workers',
default=0, type='int',
@@ -137,9 +138,11 @@ def handle(self, *items, **options):
self.start_date = None
self.end_date = None
self.remove = options.get('remove', False)
- self.using = options.get('using')
self.workers = int(options.get('workers', 0))
- self.backend = haystack_connections[self.using].get_backend()
+
+ self.backends = options.get('using')
+ if not self.backends:
+ self.backends = haystack_connections.connections_info.keys()
age = options.get('age', DEFAULT_AGE)
start_date = options.get('start_date')
@@ -202,9 +205,18 @@ def get_models(self, label):
return [get_model(app_label, model_name)]
def handle_label(self, label, **options):
+ for using in self.backends:
+ try:
+ self.update_backend(label, using)
+ except:
+ logging.exception("Error updating %s using %s ", label, using)
+ raise
+
+ def update_backend(self, label, using):
from haystack.exceptions import NotHandled
- unified_index = haystack_connections[self.using].get_unified_index()
+ backend = haystack_connections[using].get_backend()
+ unified_index = haystack_connections[using].get_unified_index()
if self.workers > 0:
import multiprocessing
@@ -223,7 +235,7 @@ def handle_label(self, label, **options):
# the loop continues and it accesses the ORM makes it better.
db.close_connection()
- qs = index.build_queryset(using=self.using, start_date=self.start_date,
+ qs = index.build_queryset(using=using, start_date=self.start_date,
end_date=self.end_date)
total = qs.count()
@@ -232,7 +244,7 @@ def handle_label(self, label, **options):
print "Indexing %d %s." % (total, smart_str(model._meta.verbose_name_plural))
pks_seen = set([smart_str(pk) for pk in qs.values_list('pk', flat=True)])
- batch_size = self.batchsize or self.backend.batch_size
+ batch_size = self.batchsize or backend.batch_size
if self.workers > 0:
ghetto_queue = []
@@ -241,9 +253,9 @@ def handle_label(self, label, **options):
end = min(start + batch_size, total)
if self.workers == 0:
- do_update(self.backend, index, qs, start, end, total, self.verbosity)
+ do_update(backend, index, qs, start, end, total, self.verbosity)
else:
- ghetto_queue.append(('do_update', model, start, end, total, self.using, self.start_date, self.end_date, self.verbosity))
+ ghetto_queue.append(('do_update', model, start, end, total, using, self.start_date, self.end_date, self.verbosity))
if self.workers > 0:
pool = multiprocessing.Pool(self.workers)
@@ -265,9 +277,9 @@ def handle_label(self, label, **options):
upper_bound = start + batch_size
if self.workers == 0:
- do_remove(self.backend, index, model, pks_seen, start, upper_bound)
+ do_remove(backend, index, model, pks_seen, start, upper_bound)
else:
- ghetto_queue.append(('do_remove', model, pks_seen, start, upper_bound, self.using, self.verbosity))
+ ghetto_queue.append(('do_remove', model, pks_seen, start, upper_bound, using, self.verbosity))
if self.workers > 0:
pool = multiprocessing.Pool(self.workers)

0 comments on commit a018d7b

Please sign in to comment.