Skip to content

Commit

Permalink
Clarify internal API borders
Browse files Browse the repository at this point in the history
  • Loading branch information
untitaker committed Dec 14, 2014
1 parent 6615349 commit 695d690
Showing 1 changed file with 51 additions and 48 deletions.
99 changes: 51 additions & 48 deletions vdirsyncer/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,27 @@ def get_collections_cache_key(pair_options, config_a, config_b):
return m.hexdigest()


def collections_for_pair(status_path, pair_name, all_pairs, all_storages,
skip_cache=False):
def collections_for_pair(status_path, name_a, name_b, pair_name, config_a,
config_b, pair_options, skip_cache=False):
'''Determine all configured collections for a given pair. Takes care of
shortcut expansion and result caching.
:param status_path: The path to the status directory.
:param name_a: The config name of storage A.
:param name_b: The config name of storage B.
:param pair_name: The config name of the pair.
:param config_a: The configuration for storage A, with pair-defined
defaults.
:param config_b: The configuration for storage B, with pair-defined
defaults.
:param pair_options: Pair-specific options.
:param skip_cache: Whether to skip the cached data and always do discovery.
Even with this option enabled, the new cache is written.
:returns: iterable of (collection, a_args, b_args)
'''
rv = load_status(status_path, pair_name, data_type='collections')
a, b, pair_options, storage_options = all_pairs[pair_name]
cache_key = get_collections_cache_key(pair_options, all_storages[a],
all_storages[b])
cache_key = get_collections_cache_key(pair_options, config_a, config_b)
if rv and not skip_cache:
if rv.get('cache_key', None) == cache_key:
return rv.get('collections', rv)
Expand All @@ -150,31 +165,18 @@ def collections_for_pair(status_path, pair_name, all_pairs, all_storages,
cli_logger.info('Discovering collections for pair {}'
.format(pair_name))

rv = list(_collections_for_pair_impl(status_path, pair_name, all_pairs,
all_storages))
rv = list(_collections_for_pair_impl(status_path, name_a, name_b,
pair_name, config_a, config_b,
pair_options))
save_status(status_path, pair_name, data_type='collections',
data={'collections': rv, 'cache_key': cache_key})
return rv


def _collections_for_pair_impl(status_path, pair_name, all_pairs,
all_storages):
a, b, pair_options, storage_options = all_pairs[pair_name]
shortcuts = set(_parse_old_config_list_value(pair_options, 'collections')
or [None])

for shortcut in shortcuts:
for x in expand_collection(pair_name, shortcut, all_pairs,
all_storages):
yield x


def expand_collection(pair_name, shortcut, all_pairs, all_storages):
a_name, b_name, pair_options, storage_defaults = all_pairs[pair_name]
def _collections_for_pair_impl(status_path, name_a, name_b, pair_name,
config_a, config_b, pair_options):

def _discover_from_config(storage_name):
config = dict(storage_defaults)
config.update(all_storages[storage_name])
def _discover_from_config(config):
storage_type = config['type']
cls, config = storage_class_from_config(config)

Expand Down Expand Up @@ -204,24 +206,24 @@ def _get_coll(discovered, collection, storage_name):
)
)

if shortcut in ('from a', 'from b'):
a_discovered = _discover_from_config(a_name)
b_discovered = _discover_from_config(b_name)

for collection in (a_discovered if shortcut == 'from a'
else b_discovered):
a_args = _get_coll(a_discovered, collection, a_name)
b_args = _get_coll(b_discovered, collection, b_name)
yield collection, a_args, b_args

shortcuts = set(_parse_old_config_list_value(pair_options, 'collections'))
if not shortcuts:
yield None, config_a, config_b
else:
a_config = dict(storage_defaults)
a_config.update(all_storages[a_name])
a_discovered = _discover_from_config(config_a)
b_discovered = _discover_from_config(config_b)

b_config = dict(storage_defaults)
b_config.update(all_storages[b_name])
for shortcut in shortcuts:
if shortcut in ('from a', 'from b'):
collections = (a_discovered if shortcut == 'from a'
else b_discovered)
else:
collections = [shortcut]

yield shortcut, a_config, b_config
for collection in collections:
a_args = _get_coll(a_discovered, collection, name_a)
b_args = _get_coll(b_discovered, collection, name_b)
yield collection, a_args, b_args


def validate_general_section(general_config):
Expand Down Expand Up @@ -467,8 +469,8 @@ def worker():
spawn_worker()
queue.put(
functools.partial(prepare_pair_for_sync, pair_name=pair_name,
collections=collections, general=general,
all_pairs=all_pairs,
collections_to_sync=collections,
general=general, all_pairs=all_pairs,
all_storages=all_storages,
force_delete=force_delete,
force_discover=force_discover))
Expand All @@ -484,8 +486,8 @@ def worker():


def prepare_pair_for_sync(queue, spawn_worker, handled_collections, pair_name,
collections, general, all_pairs, all_storages,
force_delete, force_discover):
collections_to_sync, general, all_pairs,
all_storages, force_delete, force_discover):
key = ('prepare', pair_name)
if key in handled_collections:
cli_logger.warning('Already prepared {}, skipping'.format(pair_name))
Expand All @@ -494,14 +496,15 @@ def prepare_pair_for_sync(queue, spawn_worker, handled_collections, pair_name,

a_name, b_name, pair_options, storage_defaults = all_pairs[pair_name]

jobs = list(collections_for_pair(general['status_path'], pair_name,
all_pairs, all_storages,
skip_cache=force_discover))
all_collections = list(collections_for_pair(
general['status_path'], a_name, b_name, pair_name,
all_storages[a_name], all_storages[b_name], pair_options
))

# spawn one worker less because we can reuse the current one
new_workers = -1
for collection, config_a, config_b in jobs:
if collections and collection not in collections:
for collection, config_a, config_b in all_collections:
if collections_to_sync and collection not in collections_to_sync:
continue
new_workers += 1
queue.put(functools.partial(
Expand Down

0 comments on commit 695d690

Please sign in to comment.