Skip to content

Commit

Permalink
Token janitor now forbids --chunksize for modification actions
Browse files Browse the repository at this point in the history
For several combinations of filter criteria and actions, the --chunksize
parameter resulted in matching tokens for which no action was performed.

Fixes #1322
  • Loading branch information
Friedrich Weber committed Nov 27, 2018
1 parent 44c02e0 commit 1382e49
Showing 1 changed file with 74 additions and 49 deletions.
123 changes: 74 additions & 49 deletions tools/privacyidea-token-janitor
Original file line number Diff line number Diff line change
Expand Up @@ -242,9 +242,13 @@ def build_tokenvalue_filter(key,


def _get_tokenlist(last_auth, assigned, active, tokeninfo_key,
tokeninfo_value_filter, orphaned, tokentype, serial, description, psize, page):
tlist = []
filter_tokentype = None
tokeninfo_value_filter, orphaned, tokentype, serial, description,
psize=None):
"""
This is a generator that generates one or more lists of token objects.
If ``psize`` is None, this generates one big list containing all matching token objects.
If ``psize`` is not None, this generates several lists, each of length ``psize``.
"""
filter_active = None
filter_assigned = None

Expand All @@ -253,48 +257,62 @@ def _get_tokenlist(last_auth, assigned, active, tokeninfo_key,
if active is not None:
filter_active = active.lower() == "true"

count, prev, next, tokenobj_list = get_tokens(tokentype=tokentype,
active=filter_active,
assigned=filter_assigned,
page=page,
psize=psize)

sys.stderr.write("++ Creating token object list.\n")
tok_count = 0
tok_found = 0
for token_obj in tokenobj_list:
sys.stderr.write('{0} Tokens processed / {1} Tokens found\r'.format(tok_count, tok_found))
sys.stderr.flush()
tok_count += 1
if last_auth and token_obj.check_last_auth_newer(last_auth):
continue
if serial and not re.search(serial, token_obj.token.serial):
continue
if description and not re.search(description,
token_obj.token.description):
continue
if tokeninfo_value_filter and tokeninfo_key:
value = token_obj.get_tokeninfo(tokeninfo_key)
# if the tokeninfo key is not even set, it does not match the filter
if value is None:
page = 1
while page is not None:
tlist = []
if psize is not None:
sys.stderr.write("+ Reading tokens from database in chunks of {}...\n".format(psize))
_, _, next_page, tokenobj_list = get_tokens(tokentype=tokentype,
active=filter_active,
assigned=filter_assigned,
page=page,
psize=psize)
# Read the next page in the next iteration of the loop
page = next_page
else:
sys.stderr.write("+ Reading tokens from database...\n")
tokenobj_list = get_tokens(tokentype=tokentype,
active=filter_active,
assigned=filter_assigned)
# Exit the loop after one iteration
page = None

sys.stderr.write("++ Creating token object list.\n")
tok_count = 0
tok_found = 0
for token_obj in tokenobj_list:
sys.stderr.write('{0} Tokens processed / {1} Tokens found\r'.format(tok_count, tok_found))
sys.stderr.flush()
tok_count += 1
if last_auth and token_obj.check_last_auth_newer(last_auth):
continue
# suppose not all comparator functions return True
# => at least one comparator function returns False
# => at least one user-supplied criterion does not match
# => the token object does not match the user-supplied criteria
if not all(comparator(value) for comparator in tokeninfo_value_filter):
if serial and not re.search(serial, token_obj.token.serial):
continue
if description and not re.search(description,
token_obj.token.description):
continue
if tokeninfo_value_filter and tokeninfo_key:
value = token_obj.get_tokeninfo(tokeninfo_key)
# if the tokeninfo key is not even set, it does not match the filter
if value is None:
continue
# suppose not all comparator functions return True
# => at least one comparator function returns False
# => at least one user-supplied criterion does not match
# => the token object does not match the user-supplied criteria
if not all(comparator(value) for comparator in tokeninfo_value_filter):
continue
if orphaned and not token_obj.is_orphaned():
continue
if orphaned and not token_obj.is_orphaned():
continue

tok_found += 1
# if everything matched, we append the token object
tlist.append(token_obj)
tok_found += 1
# if everything matched, we append the token object
tlist.append(token_obj)

sys.stderr.write('{0} Tokens processed / {1} Tokens found\r\n'.format(tok_count, tok_found))
sys.stderr.write("++ Token object list created.\n")
sys.stderr.flush()
return count, prev, next, tlist
sys.stderr.write('{0} Tokens processed / {1} Tokens found\r\n'.format(tok_count, tok_found))
sys.stderr.write("++ Token object list created.\n")
sys.stderr.flush()
yield tlist


def export_token_data(token_list):
Expand Down Expand Up @@ -379,7 +397,7 @@ def export_user_data(token_list):
@manager.option('--csv', dest='csv', action='store_true',
help='In case of a simple find, the output is written as CSV instead of the '
'formatted output.')
@manager.option('--chunksize', default=1000,
@manager.option('--chunksize', default=None,
help='Read tokens from the database in smaller chunks to perform operations.')
def find(last_auth, assigned, active, tokeninfo_key, tokeninfo_value,
tokeninfo_value_greater_than, tokeninfo_value_less_than,
Expand All @@ -401,13 +419,12 @@ def find(last_auth, assigned, active, tokeninfo_key, tokeninfo_value,
tokeninfo_value_after,
tokeninfo_value_before)

next = 1
chunksize = int(chunksize)
while next:
sys.stderr.write("+ Reading tokens in chunk from database...\n")
_, _, next, tlist = _get_tokenlist(last_auth, assigned, active, tokeninfo_key,
filter, orphaned, tokentype, serial,
description, chunksize, next)
if chunksize is not None:
chunksize = int(chunksize)
chunk_iterator = _get_tokenlist(last_auth, assigned, active, tokeninfo_key,
filter, orphaned, tokentype, serial,
description, chunksize)
for tlist in chunk_iterator:
sys.stderr.write("+ Tokens read. Starting action.\n")
if not action:
if not csv:
Expand Down Expand Up @@ -439,12 +456,20 @@ def find(last_auth, assigned, active, tokeninfo_key, tokeninfo_value,
print(u"{0!s},{1!s}".format(user, len(tokens)))

elif action == "export":
if chunksize is not None:
sys.stderr.write("The argument --chunksize in conjunction with --action export"
"is currently unsupported. Exiting.\n")
sys.exit(1)
key, token_num, soup = export_pskc(tlist)
sys.stderr.write("\n{0!s} tokens exported.\n".format(token_num))
sys.stderr.write("\nThis is the AES encryption key of the token seeds.\n"
"You need this key to import the tokens again:\n\n\t{0!s}\n\n".format(key))
print("{0!s}".format(soup))
else:
if chunksize is not None:
sys.stderr.write("The argument --chunksize in conjunction with actions that modify the "
"database is currently unsupported. Exiting.\n")
sys.exit(1)
for token_obj in tlist:
try:
if action == "disable":
Expand Down

0 comments on commit 1382e49

Please sign in to comment.