From f32650008bee8eff8fc8cb1529e47d5570bb237a Mon Sep 17 00:00:00 2001 From: Ambrus Simon Date: Mon, 7 Aug 2017 15:40:40 +0200 Subject: [PATCH 1/4] add test for wechat registration reset w/o admin privileges --- test/integration_tests/python/test_users.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/test/integration_tests/python/test_users.py b/test/integration_tests/python/test_users.py index ed3bf8872..55a96455e 100644 --- a/test/integration_tests/python/test_users.py +++ b/test/integration_tests/python/test_users.py @@ -141,9 +141,13 @@ def test_generate_api_key(data_builder, as_public): assert 'key' in r.json() -def test_reset_wechat_registration(data_builder, as_admin): +def test_reset_wechat_registration(data_builder, as_admin, as_user): new_user = data_builder.create_user() + # Try to reset wechat registration code without admin permissions + r = as_user.post('/users/' + new_user + '/reset-registration') + assert r.status_code == 403 + # Reset (create) wechat registration code for user r = as_admin.post('/users/' + new_user + '/reset-registration') assert r.ok From e20d2b8ef8d92bf65afd11f01e5d1df3221936bd Mon Sep 17 00:00:00 2001 From: Ambrus Simon Date: Mon, 7 Aug 2017 15:43:21 +0200 Subject: [PATCH 2/4] add custom "no cover" tag support via .coveragerc --- .coveragerc | 13 +++++++++++++ .gitignore | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..5816f164c --- /dev/null +++ b/.coveragerc @@ -0,0 +1,13 @@ +# pragma: no cover - the standard coverage exclusion pattern +# To be used for code that's impossible to reach and/or not worth the effort. + +# cover 100 - custom coverage exclusion pattern +# Used for one-time jump to 100% coverage to enable checking whether test were added for new code. +# Not meant for further use - existing exclusions are considered TODOs. + +# Adding code to excluded blocks should be considered carefully and avoided if possible. + +[report] +exclude_lines = + pragma: no cover + cover 100 diff --git a/.gitignore b/.gitignore index 14982fa1d..b2150de51 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ /runtime bootstrap.json .cache -/.coverage* +/.coverage coverage.xml /htmlcov node_modules/ From 1731c099ba390c88825b58f3807decae83aa5f1b Mon Sep 17 00:00:00 2001 From: Ambrus Simon Date: Mon, 7 Aug 2017 16:40:37 +0200 Subject: [PATCH 3/4] add "cover 100" to remaining uncovered lines --- api/auth/__init__.py | 2 +- api/auth/authproviders.py | 6 ++-- api/auth/containerauth.py | 24 +++++++-------- api/auth/groupauth.py | 14 ++++----- api/auth/listauth.py | 24 +++++++-------- api/auth/userauth.py | 14 ++++----- api/config.py | 8 ++--- api/dao/__init__.py | 2 +- api/dao/base.py | 20 ++++++------- api/dao/consistencychecker.py | 8 ++--- api/dao/containerstorage.py | 38 ++++++++++++------------ api/dao/containerutil.py | 26 ++++++++-------- api/dao/dbutil.py | 6 ++-- api/dao/hierarchy.py | 46 ++++++++++++++--------------- api/dao/liststorage.py | 14 ++++----- api/download.py | 10 +++---- api/files.py | 4 +-- api/handlers/collectionshandler.py | 36 +++++++++++----------- api/handlers/containerhandler.py | 36 +++++++++++----------- api/handlers/dataexplorerhandler.py | 16 +++++----- api/handlers/devicehandler.py | 2 +- api/handlers/grouphandler.py | 18 +++++------ api/handlers/listhandler.py | 44 +++++++++++++-------------- api/handlers/refererhandler.py | 28 +++++++++--------- api/handlers/reporthandler.py | 34 ++++++++++----------- api/handlers/userhandler.py | 36 +++++++++++----------- api/jobs/batch.py | 28 +++++++++--------- api/jobs/gears.py | 10 +++---- api/jobs/handlers.py | 6 ++-- api/jobs/jobs.py | 14 ++++----- api/jobs/queue.py | 26 ++++++++-------- api/jobs/rules.py | 10 +++---- api/placer.py | 16 +++++----- api/tempdir.py | 14 ++++----- api/upload.py | 20 ++++++------- api/util.py | 4 +-- api/validators.py | 10 +++---- api/web/base.py | 32 ++++++++++---------- api/web/encoder.py | 2 +- api/web/start.py | 4 +-- 40 files changed, 355 insertions(+), 357 deletions(-) diff --git a/api/auth/__init__.py b/api/auth/__init__.py index 800202b47..3ce507326 100644 --- a/api/auth/__init__.py +++ b/api/auth/__init__.py @@ -87,7 +87,7 @@ def check_superuser(self, *args, **kwargs): return handler_method(self, *args, **kwargs) return check_superuser -def require_drone(handler_method): +def require_drone(handler_method): # cover 100 """ A decorator to ensure the request is made as a drone. diff --git a/api/auth/authproviders.py b/api/auth/authproviders.py index 61a4e1789..e094d71a3 100644 --- a/api/auth/authproviders.py +++ b/api/auth/authproviders.py @@ -37,7 +37,7 @@ def factory(auth_type): else: raise NotImplementedError('Auth type {} is not supported'.format(auth_type)) - def validate_code(self, code, **kwargs): + def validate_code(self, code, **kwargs): # cover 100 raise NotImplementedError def ensure_user_exists(self, uid): @@ -71,7 +71,7 @@ def set_refresh_token_if_exists(self, uid, refresh_token): if not token: # user does not have refresh token, alert the client raise APIRefreshTokenException('invalid_refresh_token') - else: + else: # cover 100 # user does have a previously saved refresh token, move on return @@ -331,7 +331,7 @@ def validate_user_api_key(key): user = config.db.users.find_one_and_update({'api_key.key': key}, {'$set': {'api_key.last_used': timestamp}}, ['_id']) if user: return user['_id'] - else: + else: # cover 100 raise APIAuthProviderException('Invalid API key') diff --git a/api/auth/containerauth.py b/api/auth/containerauth.py index b6265bc2b..9e2f99ca3 100644 --- a/api/auth/containerauth.py +++ b/api/auth/containerauth.py @@ -34,7 +34,7 @@ def f(method, _id=None, payload=None, unset_payload=None, recursive=False, r_pay if target_parent_container: has_access = _get_access(handler.uid, target_parent_container) >= INTEGER_PERMISSIONS[required_perm] else: - has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS[required_perm] + has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS[required_perm] # cover 100 elif method == 'PUT' and target_parent_container is not None: has_access = ( _get_access(handler.uid, container) >= INTEGER_PERMISSIONS['admin'] and @@ -44,14 +44,14 @@ def f(method, _id=None, payload=None, unset_payload=None, recursive=False, r_pay required_perm = 'rw' has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS[required_perm] else: - has_access = False + has_access = False # cover 100 if has_access: return exec_op(method, _id=_id, payload=payload, unset_payload=unset_payload, recursive=recursive, r_payload=r_payload, replace_metadata=replace_metadata, projection=projection) else: error_msg = 'user not authorized to perform a {} operation on the container.'.format(method) if additional_error_msg: - error_msg += ' '+additional_error_msg + error_msg += ' '+additional_error_msg # cover 100 handler.abort(403, error_msg) return f return g @@ -67,20 +67,20 @@ def f(method, _id=None, payload = None): if method == 'GET' and container.get('public', False): has_access = True elif method == 'GET': - has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS['ro'] + has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS['ro'] # cover 100 elif method == 'DELETE': has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS['admin'] elif method == 'POST': - has_access = True + has_access = True # cover 100 elif method == 'PUT': has_access = _get_access(handler.uid, container) >= INTEGER_PERMISSIONS['rw'] else: - has_access = False + has_access = False # cover 100 if has_access: return exec_op(method, _id=_id, payload=payload) else: - handler.abort(403, 'user not authorized to perform a {} operation on the container'.format(method)) + handler.abort(403, 'user not authorized to perform a {} operation on the container'.format(method)) # cover 100 return f return g @@ -92,16 +92,16 @@ def f(method, _id=None, payload=None): if method == 'GET' and parent_container.get('public', False): has_access = True elif method == 'GET': - has_access = access >= INTEGER_PERMISSIONS['ro'] + has_access = access >= INTEGER_PERMISSIONS['ro'] # cover 100 elif method in ['POST', 'PUT', 'DELETE']: has_access = access >= INTEGER_PERMISSIONS['rw'] else: - has_access = False + has_access = False # cover 100 if has_access: return exec_op(method, _id=_id, payload=payload) else: - handler.abort(403, 'user not authorized to perform a {} operation on parent container'.format(method)) + handler.abort(403, 'user not authorized to perform a {} operation on parent container'.format(method)) # cover 100 return f return g @@ -115,7 +115,7 @@ def f(method, _id=None, payload = None): if method == 'GET' and container.get('public', False): return exec_op(method, _id, payload) else: - handler.abort(403, 'not authorized to perform a {} operation on this container'.format(method)) + handler.abort(403, 'not authorized to perform a {} operation on this container'.format(method)) # cover 100 return f return g @@ -126,7 +126,7 @@ def f(method, query=None, user=None, public=False, projection=None): handler.abort(403, 'User ' + handler.uid + ' may not see the Projects of User ' + user['_id']) query['permissions'] = {'$elemMatch': {'_id': handler.uid}} if handler.is_true('public'): - query['$or'] = [{'public': True}, {'permissions': query.pop('permissions')}] + query['$or'] = [{'public': True}, {'permissions': query.pop('permissions')}] # cover 100 return exec_op(method, query=query, user=user, public=public, projection=projection) return f return g diff --git a/api/auth/groupauth.py b/api/auth/groupauth.py index e690a98a7..11508684b 100644 --- a/api/auth/groupauth.py +++ b/api/auth/groupauth.py @@ -10,16 +10,16 @@ def f(method, _id=None, query=None, payload=None, projection=None): if handler.superuser_request: pass elif handler.public_request: - handler.abort(400, 'public request is not valid') + handler.abort(400, 'public request is not valid') # cover 100 elif handler.user_is_admin: pass - elif method in ['DELETE', 'POST']: + elif method in ['DELETE', 'POST']: # cover 100 handler.abort(403, 'not allowed to perform operation') - elif _get_access(handler.uid, group) >= INTEGER_PERMISSIONS['admin']: + elif _get_access(handler.uid, group) >= INTEGER_PERMISSIONS['admin']: # cover 100 pass - elif method == 'GET' and _get_access(handler.uid, group) >= INTEGER_PERMISSIONS['ro']: + elif method == 'GET' and _get_access(handler.uid, group) >= INTEGER_PERMISSIONS['ro']: # cover 100 pass - else: + else: # cover 100 handler.abort(403, 'not allowed to perform operation') return exec_op(method, _id=_id, query=query, payload=payload, projection=projection) return f @@ -30,7 +30,7 @@ def g(exec_op): def f(method, query=None, projection=None): if uid is not None: if uid != handler.uid and not handler.superuser_request and not handler.user_is_admin: - handler.abort(403, 'User ' + handler.uid + ' may not see the Groups of User ' + uid) + handler.abort(403, 'User ' + handler.uid + ' may not see the Groups of User ' + uid) # cover 100 query = query or {} query['permissions._id'] = uid projection = projection or {} @@ -40,7 +40,7 @@ def f(method, query=None, projection=None): query = query or {} projection = projection or {} if handler.is_true('admin'): - query['permissions'] = {'$elemMatch': {'_id': handler.uid, 'access': 'admin'}} + query['permissions'] = {'$elemMatch': {'_id': handler.uid, 'access': 'admin'}} # cover 100 else: query['permissions._id'] = handler.uid log.debug(query) diff --git a/api/auth/listauth.py b/api/auth/listauth.py index 0c6451e8e..43dbc89e3 100644 --- a/api/auth/listauth.py +++ b/api/auth/listauth.py @@ -27,12 +27,12 @@ def f(method, _id, query_params=None, payload=None, exclude_params=None): elif method in ['POST', 'PUT', 'DELETE']: min_access = INTEGER_PERMISSIONS['rw'] else: - min_access = sys.maxint + min_access = sys.maxint # cover 100 if access >= min_access: return exec_op(method, _id, query_params, payload, exclude_params) else: - handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) + handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) # cover 100 return f return g @@ -44,7 +44,7 @@ def group_permissions_sublist(handler, container): def g(exec_op): def f(method, _id, query_params = None, payload = None, exclude_params=None): if method in ['GET', 'DELETE'] and query_params.get('_id') == handler.uid: - return exec_op(method, _id, query_params, payload, exclude_params) + return exec_op(method, _id, query_params, payload, exclude_params) # cover 100 elif access >= INTEGER_PERMISSIONS['admin']: return exec_op(method, _id, query_params, payload, exclude_params) else: @@ -60,11 +60,11 @@ def group_tags_sublist(handler, container): def g(exec_op): def f(method, _id, query_params = None, payload = None, exclude_params=None): if method == 'GET' and access >= INTEGER_PERMISSIONS['ro']: - return exec_op(method, _id, query_params, payload, exclude_params) + return exec_op(method, _id, query_params, payload, exclude_params) # cover 100 elif access >= INTEGER_PERMISSIONS['rw']: return exec_op(method, _id, query_params, payload, exclude_params) else: - handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) + handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) # cover 100 return f return g @@ -77,11 +77,11 @@ def g(exec_op): def f(method, _id, query_params = None, payload = None, exclude_params=None): log.debug(query_params) if method in ['GET', 'DELETE'] and query_params.get('_id') == handler.uid: - return exec_op(method, _id, query_params, payload, exclude_params) + return exec_op(method, _id, query_params, payload, exclude_params) # cover 100 elif access >= INTEGER_PERMISSIONS['admin']: return exec_op(method, _id, query_params, payload, exclude_params) else: - handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) + handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) # cover 100 return f return g @@ -94,19 +94,19 @@ def g(exec_op): def f(method, _id, query_params = None, payload = None, exclude_params=None): if access >= INTEGER_PERMISSIONS['admin']: pass - elif method == 'POST' and access >= INTEGER_PERMISSIONS['rw'] and payload['user'] == handler.uid: + elif method == 'POST' and access >= INTEGER_PERMISSIONS['rw'] and payload['user'] == handler.uid: # cover 100 pass - elif method == 'GET' and (access >= INTEGER_PERMISSIONS['ro'] or container.get('public')): + elif method == 'GET' and (access >= INTEGER_PERMISSIONS['ro'] or container.get('public')): # cover 100 pass - elif method in ['GET', 'DELETE', 'PUT'] and container['notes'][0]['user'] == handler.uid: + elif method in ['GET', 'DELETE', 'PUT'] and container['notes'][0]['user'] == handler.uid: # cover 100 pass - else: + else: # cover 100 handler.abort(403, 'user not authorized to perform a {} operation on the list'.format(method)) return exec_op(method, _id, query_params, payload, exclude_params) return f return g -def public_request(handler, container): +def public_request(handler, container): # cover 100 """ For public requests we allow only GET operations on containers marked as public. """ diff --git a/api/auth/userauth.py b/api/auth/userauth.py index 3c1a0519c..29d9442d4 100644 --- a/api/auth/userauth.py +++ b/api/auth/userauth.py @@ -2,28 +2,28 @@ def default(handler, user=None): def g(exec_op): def f(method, _id=None, query=None, payload=None, projection=None): if handler.public_request: - handler.abort(403, 'public request is not authorized') + handler.abort(403, 'public request is not authorized') # cover 100 elif handler.superuser_request and not (method == 'DELETE' and _id == handler.uid): pass elif handler.user_is_admin and (method == 'DELETE' and not _id == handler.uid): pass elif method == 'PUT' and handler.uid == _id: - if 'root' in payload and payload['root'] != user['root']: + if 'root' in payload and payload['root'] != user['root']: # cover 100 handler.abort(400, 'user cannot alter own admin privilege') - elif 'disabled' in payload and payload['disabled'] != user.get('disabled'): + elif 'disabled' in payload and payload['disabled'] != user.get('disabled'): # cover 100 handler.abort(400, 'user cannot alter own disabled status') - else: + else: # cover 100 pass elif method == 'PUT' and handler.user_is_admin: pass elif method == 'POST' and not handler.superuser_request and not handler.user_is_admin: - handler.abort(403, 'only admins are allowed to create users') + handler.abort(403, 'only admins are allowed to create users') # cover 100 elif method == 'POST' and (handler.superuser_request or handler.user_is_admin): pass elif method == 'GET': pass else: - handler.abort(403, 'not allowed to perform operation') + handler.abort(403, 'not allowed to perform operation') # cover 100 return exec_op(method, _id=_id, query=query, payload=payload, projection=projection) return f return g @@ -32,7 +32,7 @@ def list_permission_checker(handler): def g(exec_op): def f(method, query=None, projection=None): if handler.public_request: - handler.abort(403, 'public request is not authorized') + handler.abort(403, 'public request is not authorized') # cover 100 return exec_op(method, query=query, projection=projection) return f return g diff --git a/api/config.py b/api/config.py index c1fd29cc3..74ae621ec 100644 --- a/api/config.py +++ b/api/config.py @@ -93,7 +93,7 @@ def apply_env_variables(config): elif value.lower() == 'none': value = None config[outer_key][inner_key] = value - except Exception: # pylint: disable=broad-except + except Exception: # cover 100 # pylint: disable=broad-except # ignore uniterable keys like `created` and `modified` pass return config @@ -257,7 +257,7 @@ def get_config(): log.info('Persisting configuration') db_config = db.singletons.find_one({'_id': 'config'}) - if db_config is not None: + if db_config is not None: # cover 100 startup_config = copy.deepcopy(__config) startup_config = util.deep_update(startup_config, db_config) # Precedence order for config is env vars -> db values -> default @@ -269,7 +269,7 @@ def get_config(): db.singletons.replace_one({'_id': 'config'}, __config, upsert=True) __config_persisted = True __last_update = now - elif now - __last_update > datetime.timedelta(seconds=120): + elif now - __last_update > datetime.timedelta(seconds=120): # cover 100 log.debug('Refreshing configuration from database') __config = db.singletons.find_one({'_id': 'config'}) __last_update = now @@ -303,7 +303,7 @@ def mongo_pipeline(table, pipeline): output = db.command('aggregate', table, pipeline=pipeline) result = output.get('result') - if output.get('ok') != 1.0 or result is None: + if output.get('ok') != 1.0 or result is None: # cover 100 raise Exception() return result diff --git a/api/dao/__init__.py b/api/dao/__init__.py index 1adfef681..c29bb68f6 100644 --- a/api/dao/__init__.py +++ b/api/dao/__init__.py @@ -14,7 +14,7 @@ class APIPermissionException(Exception): pass class APIValidationException(Exception): - def __init__(self, errors): + def __init__(self, errors): # cover 100 super(APIValidationException, self).__init__('Validation Error.') self.errors = errors diff --git a/api/dao/base.py b/api/dao/base.py index 5d01b2d2a..cadcec31b 100644 --- a/api/dao/base.py +++ b/api/dao/base.py @@ -60,7 +60,7 @@ def factory(cls, cont_name): for subclass in cls.__subclasses__(): if subclass.__name__ == cont_storage_name: return subclass() - return cls(containerutil.pluralize(cont_name)) + return cls(containerutil.pluralize(cont_name)) # cover 100 def _fill_default_values(self, cont): if cont: @@ -83,7 +83,7 @@ def get_container(self, _id, projection=None, get_children=False): def get_children(self, _id, projection=None, uid=None): try: child_name = CHILD_MAP[self.cont_name] - except KeyError: + except KeyError: # cover 100 raise APINotFoundException('Children cannot be listed from the {0} level'.format(self.cont_name)) if not self.use_object_id: query = {containerutil.singularize(self.cont_name): _id} @@ -122,20 +122,20 @@ def exec_op(self, action, _id=None, payload=None, query=None, user=None, return self.update_el(_id, payload, unset_payload=unset_payload, recursive=recursive, r_payload=r_payload, replace_metadata=replace_metadata) if action == 'POST': return self.create_el(payload) - raise ValueError('action should be one of GET, POST, PUT, DELETE') + raise ValueError('action should be one of GET, POST, PUT, DELETE') # cover 100 def create_el(self, payload): log.debug(payload) payload = self._to_mongo(payload) try: result = self.dbc.insert_one(payload) - except pymongo.errors.DuplicateKeyError: + except pymongo.errors.DuplicateKeyError: # cover 100 raise APIConflictException('Object with id {} already exists.'.format(payload['_id'])) return result def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload=None, replace_metadata=False): replace = None - if replace_metadata: + if replace_metadata: # cover 100 replace = {} if payload.get('info') is not None: replace['info'] = util.mongo_sanitize_fields(payload.pop('info')) @@ -152,12 +152,12 @@ def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload update['$unset'] = util.mongo_dict(unset_payload) if replace is not None: - update['$set'].update(replace) + update['$set'].update(replace) # cover 100 if self.use_object_id: try: _id = bson.ObjectId(_id) - except bson.InvalidId as e: + except bson.InvalidId as e: # cover 100 raise APIStorageException(e.message) if recursive and r_payload is not None: hierarchy.propagate_changes(self.cont_name, _id, {}, {'$set': util.mongo_dict(r_payload)}) @@ -167,7 +167,7 @@ def delete_el(self, _id): if self.use_object_id: try: _id = bson.ObjectId(_id) - except bson.InvalidId as e: + except bson.InvalidId as e: # cover 100 raise APIStorageException(e.message) return self.dbc.delete_one({'_id':_id}) @@ -175,7 +175,7 @@ def get_el(self, _id, projection=None, fill_defaults=False): if self.use_object_id: try: _id = bson.ObjectId(_id) - except bson.InvalidId as e: + except bson.InvalidId as e: # cover 100 raise APIStorageException(e.message) cont = self._from_mongo(self.dbc.find_one(_id, projection)) if fill_defaults: @@ -185,7 +185,7 @@ def get_el(self, _id, projection=None, fill_defaults=False): def get_all_el(self, query, user, projection, fill_defaults=False): if user: if query.get('permissions'): - query['$and'] = [{'permissions': {'$elemMatch': user}}, {'permissions': query.pop('permissions')}] + query['$and'] = [{'permissions': {'$elemMatch': user}}, {'permissions': query.pop('permissions')}] # cover 100 else: query['permissions'] = {'$elemMatch': user} log.debug(query) diff --git a/api/dao/consistencychecker.py b/api/dao/consistencychecker.py index be64e8304..8bb720068 100644 --- a/api/dao/consistencychecker.py +++ b/api/dao/consistencychecker.py @@ -10,7 +10,7 @@ def noop(*args, **kwargs): # pylint: disable=unused-argument def get_list_storage_checker(action, list_name): """Build the checker for the list storage""" if list_name == ['permissions', 'permissions'] and action == 'POST': - return user_on_permission + return user_on_permission # cover 100 return noop def get_container_storage_checker(action, cont_name): @@ -29,7 +29,7 @@ def get_container_storage_checker(action, cont_name): return check_children('session', 'acquisitions') return noop -def user_on_permission(data_op, **kwargs): # pylint: disable=unused-argument +def user_on_permission(data_op, **kwargs): # cover 100 # pylint: disable=unused-argument """Check that for a permission the user already exists. Used before PUT operations. @@ -44,7 +44,7 @@ def field_on_container(parent_field, parent_container_name): Used before POST/PUT operations. """ def f(data_op, **kwargs): # pylint: disable=unused-argument - if data_op.get(parent_field) and not config.db[parent_container_name].find_one({'_id': data_op[parent_field]}): + if data_op.get(parent_field) and not config.db[parent_container_name].find_one({'_id': data_op[parent_field]}): # cover 100 raise APIConsistencyException('{} {} does not exist'.format(parent_field, data_op[parent_field])) return f @@ -64,7 +64,7 @@ def check_children(foreign_key_field, container_name): APIConsistencyException: Child document found """ def f(data_op, **kwargs): # pylint: disable=unused-argument - if config.db[container_name].find_one({foreign_key_field: data_op['_id']}): + if config.db[container_name].find_one({foreign_key_field: data_op['_id']}): # cover 100 raise APIConsistencyException( 'DELETE not allowed. Children {} found for {}'.format(container_name, data_op['_id']) ) diff --git a/api/dao/containerstorage.py b/api/dao/containerstorage.py index f0b55fb98..3ba5ed018 100644 --- a/api/dao/containerstorage.py +++ b/api/dao/containerstorage.py @@ -23,7 +23,7 @@ def _fill_default_values(self, cont): cont = super(GroupStorage,self)._fill_default_values(cont) if cont: if 'permissions' not in cont: - cont['permissions'] = [] + cont['permissions'] = [] # cover 100 return cont def create_el(self, payload): @@ -47,7 +47,7 @@ def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload result = super(ProjectStorage, self).update_el(_id, payload, unset_payload=unset_payload, recursive=recursive, r_payload=r_payload, replace_metadata=replace_metadata) if result.modified_count < 1: - raise APINotFoundException('Could not find project {}'.format(_id)) + raise APINotFoundException('Could not find project {}'.format(_id)) # cover 100 if payload and 'template' in payload: # We are adding/changing the project template, update session compliance @@ -69,7 +69,7 @@ def recalc_sessions_compliance(self, project_id=None): if project_id is None: # Recalc all projects projects = self.get_all_el({'template': {'$exists': True}}, None, None) - else: + else: # cover 100 project = self.get_container(project_id) if project: projects = [project] @@ -79,14 +79,14 @@ def recalc_sessions_compliance(self, project_id=None): for project in projects: template = project.get('template',{}) - if not template: + if not template: # cover 100 continue else: session_storage = SessionStorage() sessions = session_storage.get_all_el({'project': project['_id']}, None, None) for s in sessions: changed = session_storage.recalc_session_compliance(s['_id'], session=s, template=template, hard=True) - if changed: + if changed: # cover 100 changed_sessions.append(s['_id']) return changed_sessions @@ -106,14 +106,14 @@ def _fill_default_values(self, cont): def create_el(self, payload): project = ProjectStorage().get_container(payload['project']) - if project.get('template'): + if project.get('template'): # cover 100 payload['project_has_template'] = True payload['satisfies_template'] = hierarchy.is_session_compliant(payload, project.get('template')) return super(SessionStorage, self).create_el(payload) def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload=None, replace_metadata=False): session = self.get_container(_id) - if session is None: + if session is None: # cover 100 raise APINotFoundException('Could not find session {}'.format(_id)) # If the subject code is changed, change the subject id to either @@ -130,7 +130,7 @@ def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload # First check if project is being changed if payload and payload.get('project'): project = ProjectStorage().get_container(payload['project']) - if not project: + if not project: # cover 100 raise APINotFoundException("Could not find project {}".format(payload['project'])) else: project = ProjectStorage().get_container(session['project']) @@ -159,13 +159,13 @@ def recalc_session_compliance(self, session_id, session=None, template=None, har """ if session is None: session = self.get_container(session_id) - if session is None: + if session is None: # cover 100 raise APINotFoundException('Could not find session {}'.format(session_id)) if hard: # A "hard" flag will also recalc if session is tracked by a project template project = ProjectStorage().get_container(session['project']) project_has_template = bool(project.get('template')) - if session.get('project_has_template', False) != project_has_template: + if session.get('project_has_template', False) != project_has_template: # cover 100 if project_has_template == True: self.update_el(session['_id'], {'project_has_template': True}) else: @@ -195,14 +195,14 @@ def create_el(self, payload): def update_el(self, _id, payload, unset_payload=None, recursive=False, r_payload=None, replace_metadata=False): result = super(AcquisitionStorage, self).update_el(_id, payload, unset_payload=unset_payload, recursive=recursive, r_payload=r_payload, replace_metadata=replace_metadata) acquisition = self.get_container(_id) - if acquisition is None: + if acquisition is None: # cover 100 raise APINotFoundException('Could not find acquisition {}'.format(_id)) SessionStorage().recalc_session_compliance(acquisition['session']) return result def delete_el(self, _id): acquisition = self.get_container(_id) - if acquisition is None: + if acquisition is None: # cover 100 raise APINotFoundException('Could not find acquisition {}'.format(_id)) result = super(AcquisitionStorage, self).delete_el(_id) SessionStorage().recalc_session_compliance(acquisition['session']) @@ -242,13 +242,13 @@ def get_all_for_targets(self, target_type, target_ids, session_ids = [s['_id'] for s in SessionStorage().get_all_el(query, user, {'_id':1})] elif target_type in ['session', 'sessions']: session_ids = target_ids - else: + else: # cover 100 raise ValueError('Target type must be of type project, session or acquisition.') # Using session ids, find acquisitions query.pop('project', None) query['session'] = {'$in':session_ids} - if collection_id: + if collection_id: # cover 100 query['collections'] = collection_id return self.get_all_el(query, user, projection) @@ -324,7 +324,7 @@ def create_job_and_analysis(self, cont_name, cid, analysis, job, origin): analysis['files'] = files result = self.create_el(analysis) - if not result.acknowledged: + if not result.acknowledged: # cover 100 raise APIStorageException('Analysis not created for container {} {}'.format(cont_name, cid)) # Prepare job @@ -334,7 +334,7 @@ def create_job_and_analysis(self, cont_name, cid, analysis, job, origin): # Config manifest check gear = get_gear(job['gear_id']) - if gear.get('gear', {}).get('custom', {}).get('flywheel', {}).get('invalid', False): + if gear.get('gear', {}).get('custom', {}).get('flywheel', {}).get('invalid', False): # cover 100 raise APIConflictException('Gear marked as invalid, will not run!') validate_gear_config(gear, job.get('config')) @@ -345,7 +345,7 @@ def create_job_and_analysis(self, cont_name, cid, analysis, job, origin): destination=destination, tags=tags, config_=job.get('config'), origin=origin, batch=job.get('batch')) job_id = job.insert() - if not job_id: + if not job_id: # cover 100 # NOTE #775 remove unusable analysis - until jobs have a 'hold' state self.delete_el(analysis['_id']) raise APIStorageException(500, 'Job not created for analysis of container {} {}'.format(cont_name, cid)) @@ -368,13 +368,13 @@ def inflate_job_info(self, analysis): return analysis try: job = Job.get(analysis['job']) - except: + except: # cover 100 raise Exception('No job with id {} found.'.format(analysis['job'])) # If the job currently tied to the analysis failed, try to find one that didn't while job.state == 'failed' and job.id_ is not None: next_job = config.db.jobs.find_one({'previous_job_id': job.id_}) - if next_job is None: + if next_job is None: # cover 100 break job = Job.load(next_job) if job.id_ != str(analysis['job']): diff --git a/api/dao/containerutil.py b/api/dao/containerutil.py index 3002df4c3..031712ecf 100644 --- a/api/dao/containerutil.py +++ b/api/dao/containerutil.py @@ -27,7 +27,7 @@ def add_id_to_subject(subject, pid): result = None if subject is None: subject = {} - if subject.get('_id') is not None: + if subject.get('_id') is not None: # cover 100 # Ensure _id is bson ObjectId subject['_id'] = bson.ObjectId(str(subject['_id'])) return subject @@ -95,7 +95,7 @@ def get_stats(cont, cont_type): if len(result) > 0: cont['subject_count'] = result[0].get('count', 0) - else: + else: # cover 100 cont['subject_count'] = 0 return cont @@ -106,12 +106,12 @@ class ContainerReference(object): # TODO: refactor to resolve pylint warning def __init__(self, type, id): - if type not in CONT_TYPES: + if type not in CONT_TYPES: # cover 100 raise Exception('Container type must be one of {}'.format(CONT_TYPES)) - if not isinstance(type, basestring): + if not isinstance(type, basestring): # cover 100 raise Exception('Container type must be of type str') - if not isinstance(id, basestring): + if not isinstance(id, basestring): # cover 100 raise Exception('Container id must be of type str') self.type = type @@ -120,7 +120,7 @@ def __init__(self, type, id): def __eq__(self, other): return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other): # cover 100 return not self.__dict__ == other.__dict__ @classmethod @@ -140,9 +140,9 @@ def from_filereference(cls, fr): def get(self): collection = pluralize(self.type) result = config.db[collection].find_one({'_id': bson.ObjectId(self.id)}) - if result is None: + if result is None: # cover 100 raise Exception('No such {} {} in database'.format(self.type, self.id)) - if 'parent' in result: + if 'parent' in result: # cover 100 parent_collection = pluralize(result['parent']['type']) parent = config.db[parent_collection].find_one({'_id': bson.ObjectId(result['parent']['id'])}) if parent is None: @@ -161,7 +161,7 @@ def find_file(self, filename): def file_uri(self, filename): collection = pluralize(self.type) cont = self.get() - if 'parent' in cont: + if 'parent' in cont: # cover 100 par_coll, par_id = pluralize(cont['parent']['type']), cont['parent']['id'] return '/{}/{}/{}/{}/files/{}'.format(par_coll, par_id, collection, self.id, filename) return '/{}/{}/files/{}'.format(collection, self.id, filename) @@ -185,7 +185,7 @@ def __init__(self, type, id, name): def __eq__(self, other): return self.__dict__ == other.__dict__ - def __ne__(self, other): + def __ne__(self, other): # cover 100 return not self.__dict__ == other.__dict__ @classmethod @@ -210,13 +210,13 @@ def create_containerreference_from_filereference(fr): def pluralize(cont_name): if cont_name in SINGULAR_TO_PLURAL: return SINGULAR_TO_PLURAL[cont_name] - elif cont_name in PLURAL_TO_SINGULAR: + elif cont_name in PLURAL_TO_SINGULAR: # cover 100 return cont_name - raise ValueError('Could not pluralize unknown container name {}'.format(cont_name)) + raise ValueError('Could not pluralize unknown container name {}'.format(cont_name)) # cover 100 def singularize(cont_name): if cont_name in PLURAL_TO_SINGULAR: return PLURAL_TO_SINGULAR[cont_name] elif cont_name in SINGULAR_TO_PLURAL: return cont_name - raise ValueError('Could not singularize unknown container name {}'.format(cont_name)) + raise ValueError('Could not singularize unknown container name {}'.format(cont_name)) # cover 100 diff --git a/api/dao/dbutil.py b/api/dao/dbutil.py index 07b6e227f..c3b94d61d 100644 --- a/api/dao/dbutil.py +++ b/api/dao/dbutil.py @@ -21,11 +21,9 @@ def fault_tolerant_replace_one(coll_name, query, update, upsert=False): attempts += 1 try: result = config.db[coll_name].replace_one(query, update, upsert=upsert) - except DuplicateKeyError: + except DuplicateKeyError: # cover 100 time.sleep(random.uniform(0.01,0.05)) else: return result - raise APIStorageException('Unable to replace object.') - - + raise APIStorageException('Unable to replace object.') # cover 100 diff --git a/api/dao/hierarchy.py b/api/dao/hierarchy.py index 44dd38865..2179b7faa 100644 --- a/api/dao/hierarchy.py +++ b/api/dao/hierarchy.py @@ -32,7 +32,7 @@ def __init__(self, container, level): self.id_ = container['_id'] self.file_prefix = 'files' - def find(self, filename): + def find(self, filename): # cover 100 for f in self.container.get('files', []): if f['name'] == filename: return f @@ -88,7 +88,7 @@ def get_parent_tree(cont_name, _id): cont_name = containerutil.singularize(cont_name) - if cont_name not in ['acquisition', 'session', 'project', 'group', 'analysis']: + if cont_name not in ['acquisition', 'session', 'project', 'group', 'analysis']: # cover 100 raise ValueError('Can only construct tree from group, project, session, analysis or acquisition level') analysis_id = None @@ -117,7 +117,7 @@ def get_parent_tree(cont_name, _id): subject = session.get('subject') if subject: tree['subject'] = subject - project_id = session['project'] + project_id = session['project'] # cover 100 if cont_name == 'project' or project_id: if not project_id: project_id = bson.ObjectId(_id) @@ -125,7 +125,7 @@ def get_parent_tree(cont_name, _id): tree['project'] = project group_id = project['group'] if cont_name == 'group' or group_id: - if not group_id: + if not group_id: # cover 100 group_id = _id tree['group'] = get_container('group', group_id) @@ -168,7 +168,7 @@ def propagate_changes(cont_name, _id, query, update): elif cont_name == 'sessions': query['session'] = _id config.db.acquisitions.update_many(query, update) - else: + else: # cover 100 raise ValueError('changes can only be propagated from group, project or session level') def is_session_compliant(session, template): @@ -193,13 +193,13 @@ def check_req(cont, req_k, req_v): if re.search(req_v, v, re.IGNORECASE): found_in_list = True break - if not found_in_list: + if not found_in_list: # cover 100 return False else: # Assume regex for now if not re.search(req_v, cont_v, re.IGNORECASE): return False - else: + else: # cover 100 return False return True @@ -240,7 +240,7 @@ def check_cont(cont, reqs): return False if a_requirements: - if not session.get('_id'): + if not session.get('_id'): # cover 100 # New session, won't have any acquisitions. Compliance check fails return False acquisitions = list(config.db.acquisitions.find({'session': session['_id'], 'archived':{'$ne':True}})) @@ -285,7 +285,7 @@ def upsert_fileinfo(cont_name, _id, fileinfo): def update_fileinfo(cont_name, _id, fileinfo): if fileinfo.get('size') is not None: - if type(fileinfo['size']) != int: + if type(fileinfo['size']) != int: # cover 100 log.warn('Fileinfo passed with non-integer size') fileinfo['size'] = int(fileinfo['size']) @@ -302,7 +302,7 @@ def update_fileinfo(cont_name, _id, fileinfo): def add_fileinfo(cont_name, _id, fileinfo): if fileinfo.get('size') is not None: - if type(fileinfo['size']) != int: + if type(fileinfo['size']) != int: # cover 100 log.warn('Fileinfo passed with non-integer size') fileinfo['size'] = int(fileinfo['size']) @@ -317,7 +317,7 @@ def _group_id_fuzzy_match(group_id, project_label): if group_id.lower() in existing_group_ids: return group_id.lower(), project_label group_id_matches = difflib.get_close_matches(group_id, existing_group_ids, cutoff=0.8) - if len(group_id_matches) == 1: + if len(group_id_matches) == 1: # cover 100 group_id = group_id_matches[0] else: if group_id != '' or project_label != '': @@ -369,13 +369,13 @@ def _create_query(cont, cont_type, parent_type, parent_id, upload_type): return { 'uid': cont['uid'] } - else: + else: # cover 100 raise NotImplementedError('upload type {} is not handled by _create_query'.format(upload_type)) def _upsert_container(cont, cont_type, parent, parent_type, upload_type, timestamp): cont['modified'] = timestamp - if cont.get('timestamp'): + if cont.get('timestamp'): # cover 100 cont['timestamp'] = dateutil.parser.parse(cont['timestamp']) if cont_type == 'acquisition': @@ -409,7 +409,7 @@ def _upsert_container(cont, cont_type, parent, parent_type, upload_type, timesta def _get_targets(project_obj, session, acquisition, type_, timestamp): target_containers = [] - if not session: + if not session: # cover 100 return target_containers session_files = dict_fileinfos(session.pop('files', [])) @@ -427,7 +427,7 @@ def _get_targets(project_obj, session, acquisition, type_, timestamp): (TargetContainer(session_obj, 'subject'), subject_files) ) - if not acquisition: + if not acquisition: # cover 100 return target_containers acquisition_files = dict_fileinfos(acquisition.pop('files', [])) acquisition_obj = _upsert_container(acquisition, 'acquisition', session_obj, 'session', type_, timestamp) @@ -447,7 +447,7 @@ def find_existing_hierarchy(metadata, type_='uid', user=None): try: acquisition_uid = acquisition['uid'] session_uid = session['uid'] - except Exception as e: + except Exception as e: # cover 100 log.error(metadata) raise APIStorageException(str(e)) @@ -485,14 +485,14 @@ def upsert_bottom_up_hierarchy(metadata, type_='uid', user=None): _ = project['label'] _ = acquisition['uid'] session_uid = session['uid'] - except Exception as e: + except Exception as e: # cover 100 log.error(metadata) raise APIStorageException(str(e)) session_obj = config.db.sessions.find_one({'uid': session_uid}) if session_obj: # skip project creation, if session exists - if user and not has_access(user, session_obj, 'rw'): + if user and not has_access(user, session_obj, 'rw'): # cover 100 raise APIPermissionException('User {} does not have read-write access to session {}'.format(user, session_uid)) now = datetime.datetime.utcnow() @@ -537,7 +537,7 @@ def update_container_hierarchy(metadata, cid, container_type): c_metadata['timestamp'] = dateutil.parser.parse(c_metadata['timestamp']) c_metadata['modified'] = now c_obj = _update_container_nulls({'_id': cid}, c_metadata, container_type) - if c_obj is None: + if c_obj is None: # cover 100 raise APIStorageException('container does not exist') if container_type in ['session', 'acquisition']: _update_hierarchy(c_obj, container_type, metadata) @@ -552,14 +552,14 @@ def _update_hierarchy(container, container_type, metadata): session_obj = None if session.keys(): session['modified'] = now - if session.get('timestamp'): + if session.get('timestamp'): # cover 100 session['timestamp'] = dateutil.parser.parse(session['timestamp']) session_obj = _update_container_nulls({'_id': container['session']}, session, 'sessions') if session_obj is None: session_obj = get_container('session', container['session']) project_id = session_obj['project'] - if project_id is None: + if project_id is None: # cover 100 raise APIStorageException('Failed to find project id in session obj') project = metadata.get('project', {}) if project.keys(): @@ -569,12 +569,12 @@ def _update_hierarchy(container, container_type, metadata): def _update_container_nulls(base_query, update, container_type): coll_name = container_type if container_type.endswith('s') else container_type+'s' cont = config.db[coll_name].find_one(base_query) - if cont is None: + if cont is None: # cover 100 raise APIStorageException('Failed to find {} object using the query: {}'.format(container_type, base_query)) bulk = config.db[coll_name].initialize_unordered_bulk_op() - if update.get('metadata') and not cont.get('metadata'): + if update.get('metadata') and not cont.get('metadata'): # cover 100 # If we are trying to update metadata fields and the container metadata does not exist or is empty, # metadata can all be updated at once for efficiency m_update = util.mongo_sanitize_fields(update.pop('metadata')) diff --git a/api/dao/liststorage.py b/api/dao/liststorage.py index 0720ec26b..5adbab3c0 100644 --- a/api/dao/liststorage.py +++ b/api/dao/liststorage.py @@ -54,7 +54,7 @@ def exec_op(self, action, _id=None, query_params=None, payload=None, exclude_par if self.use_object_id: try: _id = bson.objectid.ObjectId(_id) - except bson.errors.InvalidId as e: + except bson.errors.InvalidId as e: # cover 100 raise APIStorageException(e.message) if action == 'GET': return self._get_el(_id, query_params) @@ -64,7 +64,7 @@ def exec_op(self, action, _id=None, query_params=None, payload=None, exclude_par return self._update_el(_id, query_params, payload, exclude_params) if action == 'POST': return self._create_el(_id, payload, exclude_params) - raise ValueError('action should be one of GET, POST, PUT, DELETE') + raise ValueError('action should be one of GET, POST, PUT, DELETE') # cover 100 def _create_el(self, _id, payload, exclude_params): log.debug('payload {}'.format(payload)) @@ -78,7 +78,7 @@ def _create_el(self, _id, payload, exclude_params): log.debug('query {}'.format(query)) log.debug('update {}'.format(update)) result = self.dbc.update_one(query, update) - if result.matched_count < 1: + if result.matched_count < 1: # cover 100 raise APIConflictException('Item already exists in list.') return result @@ -115,7 +115,7 @@ def _delete_el(self, _id, query_params): log.debug('update {}'.format(update)) result = self.dbc.update_one(query, update) if self.list_name is 'files' and self.cont_name in ['sessions', 'acquisitions']: - if self.cont_name == 'sessions': + if self.cont_name == 'sessions': # cover 100 session_id = _id else: session_id = AcquisitionStorage().get_container(_id).get('session') @@ -140,12 +140,12 @@ class StringListStorage(ListStorage): """ def get_container(self, _id, query_params=None): - if self.dbc is None: + if self.dbc is None: # cover 100 raise RuntimeError('collection not initialized before calling get_container') if self.use_object_id: try: _id = bson.objectid.ObjectId(_id) - except bson.errors.InvalidId as e: + except bson.errors.InvalidId as e: # cover 100 raise APIStorageException(e.message) query = {'_id': _id} projection = {self.list_name : 1, 'permissions': 1, 'public': 1} @@ -159,7 +159,7 @@ def exec_op(self, action, _id=None, query_params=None, payload=None, exclude_par query_params = query_params['value'] if payload is not None: payload = payload.get('value') - if payload is None: + if payload is None: # cover 100 raise ValueError('payload Key "value" should be defined') return super(StringListStorage, self).exec_op(action, _id, query_params, payload, exclude_params) diff --git a/api/download.py b/api/download.py index 2794bb7bf..4c7493d43 100644 --- a/api/download.py +++ b/api/download.py @@ -176,7 +176,7 @@ def _preflight_archivestream(self, req_spec, collection=None): # If the param `collection` holding a collection id is not None, filter out acquisitions that are not in the collection a_query = {'session': item_id} - if collection: + if collection: # cover 100 a_query['collections'] = bson.ObjectId(collection) acquisitions = config.db.acquisitions.find(a_query, ['label', 'files', 'uid', 'timestamp', 'timezone']) @@ -201,7 +201,7 @@ def _preflight_archivestream(self, req_spec, collection=None): elif item['level'] == 'analysis': analysis = config.db.analyses.find_one(base_query, ['parent', 'label', 'files', 'uid', 'timestamp']) - if not analysis: + if not analysis: # cover 100 # silently skip missing objects/objects user does not have access to continue prefix = self._path_from_container(analysis, used_subpaths, util.sanitize_string_to_filename(analysis['label'])) @@ -233,13 +233,13 @@ def _find_new_path(path, list_used_subpaths): path = None if not path and container.get('label'): path = container['label'] - if not path and container.get('timestamp'): + if not path and container.get('timestamp'): # cover 100 timezone = container.get('timezone') if timezone: path = pytz.timezone('UTC').localize(container['timestamp']).astimezone(pytz.timezone(timezone)).strftime('%Y%m%d_%H%M') else: path = container['timestamp'].strftime('%Y%m%d_%H%M') - if not path and container.get('uid'): + if not path and container.get('uid'): # cover 100 path = container['uid'] if not path and container.get('code'): path = container['code'] @@ -294,7 +294,7 @@ def download(self): self.response.app_iter = self.archivestream(ticket) self.response.headers['Content-Type'] = 'application/octet-stream' self.response.headers['Content-Disposition'] = 'attachment; filename=' + str(ticket['filename']) - for project_id in ticket['projects']: + for project_id in ticket['projects']: # cover 100 config.db.projects.update_one({'_id': project_id}, {'$inc': {'counter': 1}}) else: req_spec = self.request.json_body diff --git a/api/files.py b/api/files.py index 5c012ac33..e753500dd 100644 --- a/api/files.py +++ b/api/files.py @@ -24,7 +24,7 @@ def move_form_file_field_into_cas(file_field): Requires an augmented file field; see upload.process_upload() for details. """ - if not file_field.hash or not file_field.path: + if not file_field.hash or not file_field.path: # cover 100 raise Exception("Field is not a file field with hash and path") base = config.get_item('persistent', 'data_path') @@ -147,7 +147,7 @@ def _FieldStorage__write(self, line): self._FieldStorage__file = None self.file.write(line) - def get_hash(self): + def get_hash(self): # cover 100 return self.open_file.get_hash() return HashingFieldStorage diff --git a/api/handlers/collectionshandler.py b/api/handlers/collectionshandler.py index 68577a186..dbe216137 100644 --- a/api/handlers/collectionshandler.py +++ b/api/handlers/collectionshandler.py @@ -49,7 +49,7 @@ def post(self): if result.acknowledged: return {'_id': result.inserted_id} - else: + else: # cover 100 self.abort(404, 'Element not added in collection {}'.format(self.uid)) @verify_payload_exists @@ -59,7 +59,7 @@ def put(self, **kwargs): mongo_validator, payload_validator = self._get_validators() payload = self.request.json_body or {} - if not payload: + if not payload: # cover 100 self.abort(400, 'PUT request body cannot be empty') contents = payload.pop('contents', None) payload_validator(payload, 'PUT') @@ -67,33 +67,33 @@ def put(self, **kwargs): payload['modified'] = datetime.datetime.utcnow() try: result = mongo_validator(permchecker(self.storage.exec_op))('PUT', _id=_id, payload=payload) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.modified_count == 1: self._add_contents(contents, _id) return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not updated in collection {} {}'.format(self.storage.cont_name, _id)) def _add_contents(self, contents, _id): - if not contents: + if not contents: # cover 100 return acq_ids = [] for item in contents['nodes']: - if not bson.ObjectId.is_valid(item.get('_id')): + if not bson.ObjectId.is_valid(item.get('_id')): # cover 100 self.abort(400, 'not a valid object id') item_id = bson.ObjectId(item['_id']) - if item['level'] == 'project': + if item['level'] == 'project': # cover 100 sess_ids = [s['_id'] for s in config.db.sessions.find({'project': item_id}, [])] acq_ids += [a['_id'] for a in config.db.acquisitions.find({'session': {'$in': sess_ids}}, [])] elif item['level'] == 'session': acq_ids += [a['_id'] for a in config.db.acquisitions.find({'session': item_id}, [])] - elif item['level'] == 'acquisition': + elif item['level'] == 'acquisition': # cover 100 acq_ids += [item_id] operator = '$addToSet' if contents['operation'] == 'add' else '$pull' log.info(' '.join(['collection', _id, operator, str(acq_ids)])) - if not bson.ObjectId.is_valid(_id): + if not bson.ObjectId.is_valid(_id): # cover 100 self.abort(400, 'not a valid object id') config.db.acquisitions.update_many({'_id': {'$in': acq_ids}}, {operator: {'collections': bson.ObjectId(_id)}}) @@ -106,7 +106,7 @@ def get_all(self): projection = self.container_handler_configurations['collections']['list_projection'] if self.superuser_request: permchecker = always_ok - elif self.public_request: + elif self.public_request: # cover 100 permchecker = containerauth.list_public_request else: permchecker = containerauth.list_permission_checker(self) @@ -114,7 +114,7 @@ def get_all(self): results = permchecker(self.storage.exec_op)('GET', query=query, public=self.public_request, projection=projection) if not self.superuser_request and not self.is_true('join_avatars'): self._filter_all_permissions(results, self.uid) - if self.is_true('join_avatars'): + if self.is_true('join_avatars'): # cover 100 results = ContainerHandler.join_user_info(results) for result in results: if self.is_true('stats'): @@ -134,10 +134,10 @@ def curators(self): def get_sessions(self, cid): """Return the list of sessions in a collection.""" - if not bson.ObjectId.is_valid(cid): + if not bson.ObjectId.is_valid(cid): # cover 100 self.abort(400, 'not a valid object id') _id = bson.ObjectId(cid) - if not self.storage.dbc.find_one({'_id': _id}): + if not self.storage.dbc.find_one({'_id': _id}): # cover 100 self.abort(404, 'no such Collection') agg_res = config.db.acquisitions.aggregate([ {'$match': {'collections': _id}}, @@ -151,7 +151,7 @@ def get_sessions(self, cid): log.debug(projection) sessions = list(config.db.sessions.find(query, projection)) self._filter_all_permissions(sessions, self.uid) - if self.is_true('measurements'): + if self.is_true('measurements'): # cover 100 self._add_session_measurements(sessions) for sess in sessions: sess = self.handle_origin(sess) @@ -159,18 +159,18 @@ def get_sessions(self, cid): def get_acquisitions(self, cid): """Return the list of acquisitions in a collection.""" - if not bson.ObjectId.is_valid(cid): + if not bson.ObjectId.is_valid(cid): # cover 100 self.abort(400, 'not a valid object id') _id = bson.ObjectId(cid) - if not self.storage.dbc.find_one({'_id': _id}): + if not self.storage.dbc.find_one({'_id': _id}): # cover 100 self.abort(404, 'no such Collection') query = {'collections': _id} if not self.is_true('archived'): query['archived'] = {'$ne': True} sid = self.get_param('session', '') - if bson.ObjectId.is_valid(sid): + if bson.ObjectId.is_valid(sid): # cover 100 query['session'] = bson.ObjectId(sid) - elif sid != '': + elif sid != '': # cover 100 self.abort(400, sid + ' is not a valid ObjectId') projection = self.container_handler_configurations['acquisitions']['list_projection'] acquisitions = list(config.db.acquisitions.find(query, projection)) diff --git a/api/handlers/containerhandler.py b/api/handlers/containerhandler.py index 26e55e0f5..4ec78fa3d 100644 --- a/api/handlers/containerhandler.py +++ b/api/handlers/containerhandler.py @@ -100,9 +100,9 @@ def get(self, cont_name, **kwargs): try: # This line exec the actual get checking permissions using the decorator permchecker result = permchecker(self.storage.exec_op)('GET', _id) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) - if result is None: + if result is None: # cover 100 self.abort(404, 'Element not found in container {} {}'.format(self.storage.cont_name, _id)) if not self.superuser_request and not self.is_true('join_avatars'): self._filter_permissions(result, self.uid) @@ -239,7 +239,7 @@ def get_jobs(self, cid): acquisitions = cont.get('acquisitions', []) results = [] - if not acquisitions and not analyses: + if not acquisitions and not analyses: # cover 100 # no jobs return {'jobs': results} @@ -275,7 +275,7 @@ def get_jobs(self, cid): jobs.sort(key=lambda j: j.created) response = {'jobs': jobs} - if join_gears: + if join_gears: # cover 100 response['gears'] = {} for g_id in seen_gears: response['gears'][g_id] = get_gear(g_id) @@ -297,7 +297,7 @@ def get_all(self, cont_name, par_cont_name=None, par_id=None): if self.is_true('info'): projection.pop('info') if self.is_true('permissions'): - if not projection: + if not projection: # cover 100 projection = None # select which permission filter will be applied to the list of results. @@ -310,10 +310,10 @@ def get_all(self, cont_name, par_cont_name=None, par_id=None): # if par_cont_name (parent container name) and par_id are not null we return only results # within that container if par_cont_name: - if not par_id: + if not par_id: # cover 100 self.abort(500, 'par_id is required when par_cont_name is provided') if self.use_object_id.get(par_cont_name): - if not bson.ObjectId.is_valid(par_id): + if not bson.ObjectId.is_valid(par_id): # cover 100 self.abort(400, 'not a valid object id') par_id = bson.ObjectId(par_id) query = {par_cont_name[:-1]: par_id} @@ -323,7 +323,7 @@ def get_all(self, cont_name, par_cont_name=None, par_id=None): query['archived'] = {'$ne': True} # this request executes the actual reqeust filtering containers based on the user permissions results = permchecker(self.storage.exec_op)('GET', query=query, public=self.public_request, projection=projection) - if results is None: + if results is None: # cover 100 self.abort(404, 'No elements found in container {}'.format(self.storage.cont_name)) # return only permissions of the current user unless superuser or getting avatars if not self.superuser_request and not self.is_true('join_avatars'): @@ -383,9 +383,9 @@ def get_all_for_user(self, cont_name, uid): } try: results = permchecker(self.storage.exec_op)('GET', query=query, user=user, projection=projection) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) - if results is None: + if results is None: # cover 100 self.abort(404, 'Element not found in container {} {}'.format(self.storage.cont_name, uid)) self._filter_all_permissions(results, uid) return results @@ -427,7 +427,7 @@ def post(self, cont_name): result = mongo_validator(permchecker(self.storage.exec_op))('POST', payload=payload) if result.acknowledged: return {'_id': result.inserted_id} - else: + else: # cover 100 self.abort(404, 'Element not added in container {}'.format(self.storage.cont_name)) @validators.verify_payload_exists @@ -482,12 +482,12 @@ def put(self, cont_name, **kwargs): # and checking permissions using respectively the two decorators, mongo_validator and permchecker result = mongo_validator(permchecker(self.storage.exec_op))('PUT', _id=_id, payload=payload, recursive=rec, r_payload=r_payload, replace_metadata=replace_metadata) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not updated in container {} {}'.format(self.storage.cont_name, _id)) def delete(self, cont_name, **kwargs): @@ -506,12 +506,12 @@ def delete(self, cont_name, **kwargs): try: # This line exec the actual delete checking permissions using the decorator permchecker result = permchecker(self.storage.exec_op)('DELETE', _id) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.deleted_count == 1: return {'deleted': result.deleted_count} - else: + else: # cover 100 self.abort(404, 'Element not removed from container {} {}'.format(self.storage.cont_name, _id)) def get_groups_with_project(self): @@ -586,7 +586,7 @@ def _get_parent_container(self, payload): if parent_id: parent_storage.dbc = config.db[parent_storage.cont_name] parent_container = parent_storage.get_container(parent_id) - if parent_container is None: + if parent_container is None: # cover 100 self.abort(404, 'Element {} not found in container {}'.format(parent_id, parent_storage.cont_name)) parent_container['cont_name'] = parent_storage.cont_name[:-1] else: @@ -596,11 +596,11 @@ def _get_parent_container(self, payload): def _get_container(self, _id, projection=None, get_children=False): try: container = self.storage.get_container(_id, projection=projection, get_children=get_children) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if container is not None: return container - else: + else: # cover 100 self.abort(404, 'Element {} not found in container {}'.format(_id, self.storage.cont_name)) def _get_permchecker(self, container=None, parent_container=None): diff --git a/api/handlers/dataexplorerhandler.py b/api/handlers/dataexplorerhandler.py index 9df8a303d..53483a277 100644 --- a/api/handlers/dataexplorerhandler.py +++ b/api/handlers/dataexplorerhandler.py @@ -313,7 +313,7 @@ def _parse_request(self, request_type='search'): if "null" in v: if isinstance(v, list): v.remove("null") - elif isinstance(v, str): + elif isinstance(v, str): # cover 100 v = None null_filter = { 'bool': { @@ -336,7 +336,7 @@ def _parse_request(self, request_type='search'): ] } } - if len(k.split('.')) > 1: + if len(k.split('.')) > 1: # cover 100 null_filter['bool']['should'][0]['bool']['must'].append({'exists': {'field': k.split('.')[0]}}) if v: null_filter['bool']['should'].append({'terms': {k+'.raw': v}}) @@ -391,7 +391,7 @@ def aggregate_field_values(self): } } } - if not filters: + if not filters: # cover 100 # TODO add non-user auth support (#865) body['query']['bool'].pop('filter') if search_string is None: @@ -466,7 +466,7 @@ def search_fields(self): doc_type='flywheel_field', body=es_query ) - except TransportError as e: + except TransportError as e: # cover 100 config.log.warning('Fields not yet indexed for search: {}'.format(e)) return [] @@ -483,7 +483,7 @@ def search(self): size = self.request.params.get('size', 100) results = self._run_query(self._construct_query(return_type, search_string, filters, size), return_type) response = {'results': results} - if self.is_true('facets'): + if self.is_true('facets'): # cover 100 response['facets'] = self.get_facets() return response @@ -634,7 +634,7 @@ def _handle_properties(cls, properties, current_field_name): if field_name in ignore_fields: continue - elif 'properties' in field: + elif 'properties' in field: # cover 100 new_curr_field = current_field_name+'.'+field_name if current_field_name != '' else field_name cls._handle_properties(field['properties'], new_curr_field) @@ -671,10 +671,10 @@ def _handle_properties(cls, properties, current_field_name): facet_doc_count = sum([bucket['doc_count'] for bucket in aggs['buckets']]) total_doc_count = other_doc_count+facet_doc_count - if other_doc_count == 0 and facet_doc_count > 0: + if other_doc_count == 0 and facet_doc_count > 0: # cover 100 # All values fit in 15 or fewer buckets facet_status = True - elif other_doc_count > 0 and facet_doc_count > 0 and (facet_doc_count/total_doc_count) > 0.85: + elif other_doc_count > 0 and facet_doc_count > 0 and (facet_doc_count/total_doc_count) > 0.85: # cover 100 # Greater than 85% of values fit in 15 or fewer buckets facet_status = True else: diff --git a/api/handlers/devicehandler.py b/api/handlers/devicehandler.py index 74556537d..94380231d 100644 --- a/api/handlers/devicehandler.py +++ b/api/handlers/devicehandler.py @@ -47,7 +47,7 @@ def post(self): result = self.storage.update_el(device_id, payload) if result.matched_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 raise APINotFoundException('Device with id {} not found, state not updated'.format(device_id)) # NOTE method not routed in api.py diff --git a/api/handlers/grouphandler.py b/api/handlers/grouphandler.py index 2b55f9fd8..28067e111 100644 --- a/api/handlers/grouphandler.py +++ b/api/handlers/grouphandler.py @@ -20,21 +20,21 @@ def get(self, _id): result = permchecker(self.storage.exec_op)('GET', _id) if not self.superuser_request and not self.is_true('join_avatars'): self._filter_permissions([result], self.uid) - if self.is_true('join_avatars'): + if self.is_true('join_avatars'): # cover 100 ContainerHandler.join_user_info([result]) return result def delete(self, _id): - if _id == 'unknown': + if _id == 'unknown': # cover 100 self.abort(400, 'The group "unknown" can\'t be deleted as it is integral within the API') group = self._get_group(_id) permchecker = groupauth.default(self, group) result = permchecker(self.storage.exec_op)('DELETE', _id) if result.deleted_count == 1: return {'deleted': result.deleted_count} - else: + else: # cover 100 self.abort(404, 'Group {} not removed'.format(_id)) - return result + return result # cover 100 def handle_projects(self, result): """ @@ -50,7 +50,7 @@ def get_all(self, uid=None): results = permchecker(self.storage.exec_op)('GET', projection=projection) if not self.superuser_request and not self.is_true('join_avatars'): self._filter_permissions(results, self.uid) - if self.is_true('join_avatars'): + if self.is_true('join_avatars'): # cover 100 results = ContainerHandler.join_user_info(results) if 'projects' in self.request.params.getall('join'): for result in results: @@ -71,7 +71,7 @@ def put(self, _id): result = mongo_validator(permchecker(self.storage.exec_op))('PUT', _id=_id, payload=payload) if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Group {} not updated'.format(_id)) def post(self): @@ -88,17 +88,17 @@ def post(self): if result.acknowledged: if result.upserted_id: return {'_id': result.upserted_id} - else: + else: # cover 100 self.response.status_int = 201 return {'_id': payload['_id']} - else: + else: # cover 100 self.abort(404, 'Group {} not updated'.format(payload['_id'])) def _get_group(self, _id): group = self.storage.get_container(_id) if group is not None: return group - else: + else: # cover 100 self.abort(404, 'Group {} not found'.format(_id)) def _filter_permissions(self, results, uid): diff --git a/api/handlers/listhandler.py b/api/handlers/listhandler.py index f488dae3f..5bdb8d740 100644 --- a/api/handlers/listhandler.py +++ b/api/handlers/listhandler.py @@ -120,7 +120,7 @@ def get(self, cont_name, list_name, **kwargs): permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs) try: result = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result is None: @@ -137,7 +137,7 @@ def post(self, cont_name, list_name, **kwargs): if result.modified_count == 1: return {'modified':result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id)) def put(self, cont_name, list_name, **kwargs): @@ -148,7 +148,7 @@ def put(self, cont_name, list_name, **kwargs): payload_validator(payload, 'PUT') try: result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) # abort if the query of the update wasn't able to find any matching documents if result.matched_count == 0: @@ -161,11 +161,11 @@ def delete(self, cont_name, list_name, **kwargs): permchecker, storage, _, _, keycheck = self._initialize_request(cont_name, list_name, _id, query_params=kwargs) try: result = keycheck(permchecker(storage.exec_op))('DELETE', _id, query_params=kwargs) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not removed from list {} in container {} {}'.format(storage.list_name, storage.cont_name, _id)) def _initialize_request(self, cont_name, list_name, _id, query_params=None): @@ -187,7 +187,7 @@ def _initialize_request(self, cont_name, list_name, _id, query_params=None): if container is not None: if self.superuser_request: permchecker = always_ok - elif self.public_request: + elif self.public_request: # cover 100 permchecker = listauth.public_request(self, container) else: permchecker = permchecker(self, container) @@ -236,7 +236,7 @@ def _propagate_project_permissions(self, cont_name, _id): 'permissions': config.db.projects.find_one({'_id': oid},{'permissions': 1})['permissions'] }} hierarchy.propagate_changes(cont_name, oid, {}, update) - except APIStorageException: + except APIStorageException: # cover 100 self.abort(500, 'permissions not propagated from project {} to sessions'.format(_id)) @@ -255,13 +255,13 @@ def post(self, cont_name, list_name, **kwargs): payload['_id'] = payload.get('_id') or str(bson.objectid.ObjectId()) payload['user'] = payload.get('user', self.uid) payload['created'] = payload['modified'] = datetime.datetime.utcnow() - if payload.get('timestamp'): + if payload.get('timestamp'): # cover 100 payload['timestamp'] = dateutil.parser.parse(payload['timestamp']) result = keycheck(mongo_validator(permchecker(storage.exec_op)))('POST', _id=_id, payload=payload) if result.modified_count == 1: return {'modified':result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not added in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id)) def put(self, cont_name, list_name, **kwargs): @@ -271,11 +271,11 @@ def put(self, cont_name, list_name, **kwargs): payload = self.request.json_body input_validator(payload, 'PUT') payload['modified'] = datetime.datetime.utcnow() - if payload.get('timestamp'): + if payload.get('timestamp'): # cover 100 payload['timestamp'] = dateutil.parser.parse(payload['timestamp']) result = keycheck(mongo_validator(permchecker(storage.exec_op)))('PUT', _id=_id, query_params=kwargs, payload=payload) # abort if the query of the update wasn't able to find any matching documents - if result.matched_count == 0: + if result.matched_count == 0: # cover 100 self.abort(404, 'Element not updated in list {} of container {} {}'.format(storage.list_name, storage.cont_name, _id)) else: return {'modified':result.modified_count} @@ -315,7 +315,7 @@ def _propagate_group_tags(self, cont_name, _id, query, update): """ try: hierarchy.propagate_changes(cont_name, _id, query, update) - except APIStorageException: + except APIStorageException: # cover 100 self.abort(500, 'tag change not propagated from group {}'.format(_id)) @@ -329,7 +329,7 @@ def __init__(self, request=None, response=None): def _check_ticket(self, ticket_id, _id, filename): ticket = config.db.downloads.find_one({'_id': ticket_id}) - if not ticket: + if not ticket: # cover 100 self.abort(404, 'no such ticket') if ticket['target'] != _id or ticket['filename'] != filename or ticket['ip'] != self.request.client_addr: self.abort(400, 'ticket not for this resource or source IP') @@ -404,7 +404,7 @@ def get(self, cont_name, list_name, **kwargs): ticket = None if ticket_id: ticket = self._check_ticket(ticket_id, _id, filename) - if not self.origin.get('id'): + if not self.origin.get('id'): # cover 100 # If we don't have an origin with this request, use the ticket's origin self.origin = ticket.get('origin') permchecker = always_ok @@ -412,7 +412,7 @@ def get(self, cont_name, list_name, **kwargs): # Grab fileinfo from db try: fileinfo = keycheck(permchecker(storage.exec_op))('GET', _id, query_params=kwargs) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if not fileinfo: self.abort(404, 'no such file') @@ -451,14 +451,14 @@ def get(self, cont_name, list_name, **kwargs): if not ticket.get('logged', False): self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=_id) config.db.downloads.update_one({'_id': ticket_id}, {'$set': {'logged': True}}) - else: + else: # cover 100 self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=_id) # Authenticated or ticketed download request else: self.response.app_iter = open(filepath, 'rb') self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter - if self.is_true('view'): + if self.is_true('view'): # cover 100 self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream')) else: self.response.headers['Content-Type'] = 'application/octet-stream' @@ -486,7 +486,7 @@ def post(self, cont_name, list_name, **kwargs): if cont_name.endswith('s'): cont_name_plural = cont_name cont_name = cont_name[:-1] - else: + else: # cover 100 cont_name_plural = cont_name + 's' # Authorize @@ -504,11 +504,11 @@ def delete(self, cont_name, list_name, **kwargs): self.log_user_access(AccessType.delete_file, cont_name=cont_name, cont_id=_id) try: result = keycheck(storage.exec_op)('DELETE', _id, query_params=kwargs) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not removed from list {} in container {} {}'.format(storage.list_name, storage.cont_name, _id)) def _check_packfile_token(self, project_id, token_id, check_user=True): @@ -534,7 +534,7 @@ def _check_packfile_token(self, project_id, token_id, check_user=True): # Check for correct token result = config.db['tokens'].find_one(query) - if result is None: + if result is None: # cover 100 raise Exception('Invalid or expired upload token') # Update token timestamp @@ -569,7 +569,7 @@ def packfile_start(self, cont_name, **kwargs): for p in perms: if p['_id'] == self.uid and p['access'] in ('rw', 'admin'): break - else: + else: # cover 100 raise Exception('Not authorized') timestamp = datetime.datetime.utcnow() diff --git a/api/handlers/refererhandler.py b/api/handlers/refererhandler.py index 41056c169..5009f58de 100644 --- a/api/handlers/refererhandler.py +++ b/api/handlers/refererhandler.py @@ -47,7 +47,7 @@ def update_validator(self): def get_permchecker(self, parent_container): if self.superuser_request: return always_ok - elif self.public_request: + elif self.public_request: # cover 100 return containerauth.public_request(self, container=parent_container) else: # NOTE The handler (self) is passed implicitly @@ -75,7 +75,7 @@ def post(self, cont_name, cid): permchecker(noop)('POST') if self.is_true('job'): - if cont_name != 'sessions': + if cont_name != 'sessions': # cover 100 self.abort(400, 'Analysis created via a job must be at the session level') payload = self.request.json_body @@ -93,7 +93,7 @@ def post(self, cont_name, cid): if result.acknowledged: return {'_id': result.inserted_id} - else: + else: # cover 100 self.abort(500, 'Analysis not added for container {} {}'.format(cont_name, cid)) @validators.verify_payload_exists @@ -115,7 +115,7 @@ def put(self, cont_name, **kwargs): if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'Element not updated in container {} {}'.format(self.storage.cont_name, _id)) def get(self, cont_name, cid, _id): @@ -134,11 +134,11 @@ def delete(self, cont_name, cid, _id): try: result = self.storage.delete_el(_id) - except APIStorageException as e: + except APIStorageException as e: # cover 100 self.abort(400, e.message) if result.deleted_count == 1: return {'deleted': result.deleted_count} - else: + else: # cover 100 self.abort(404, 'Analysis {} not removed from container {} {}'.format(_id, cont_name, cid)) @@ -246,7 +246,7 @@ def download(self, cont_name, cid, _id, filename=None): permchecker(noop)('GET') elif ticket_id != '': ticket = self._check_ticket(ticket_id, cid, filename) - if not self.origin.get('id'): + if not self.origin.get('id'): # cover 100 self.origin = ticket.get('origin') analysis = self.storage.get_container(_id) @@ -279,9 +279,9 @@ def download(self, cont_name, cid, _id, filename=None): if not filename: if ticket: self._send_batch(ticket) - else: + else: # cover 100 self.abort(400, 'batch downloads require a ticket') - elif not fileinfo: + elif not fileinfo: # cover 100 self.abort(404, "{} doesn't exist".format(filename)) else: fileinfo = fileinfo[0] @@ -315,14 +315,14 @@ def download(self, cont_name, cid, _id, filename=None): if not ticket.get('logged', False): self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cid) config.db.downloads.update_one({'_id': ticket_id}, {'$set': {'logged': True}}) - else: + else: # cover 100 self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cid) # Request to download the file itself else: self.response.app_iter = open(filepath, 'rb') self.response.headers['Content-Length'] = str(fileinfo['size']) # must be set after setting app_iter - if self.is_true('view'): + if self.is_true('view'): # cover 100 self.response.headers['Content-Type'] = str(fileinfo.get('mimetype', 'application/octet-stream')) else: self.response.headers['Content-Type'] = 'application/octet-stream' @@ -334,7 +334,7 @@ def download(self, cont_name, cid, _id, filename=None): if not ticket.get('logged', False): self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cid) config.db.downloads.update_one({'_id': ticket_id}, {'$set': {'logged': True}}) - else: + else: # cover 100 self.log_user_access(AccessType.download_file, cont_name=cont_name, cont_id=cid) @@ -342,7 +342,7 @@ def _check_ticket(self, ticket_id, _id, filename): ticket = config.db.downloads.find_one({'_id': ticket_id}) if not ticket: self.abort(404, 'no such ticket') - if ticket['ip'] != self.request.client_addr: + if ticket['ip'] != self.request.client_addr: # cover 100 self.abort(400, 'ticket not for this source IP') if not filename: return self._check_ticket_for_batch(ticket) @@ -352,7 +352,7 @@ def _check_ticket(self, ticket_id, _id, filename): def _check_ticket_for_batch(self, ticket): - if ticket.get('type') != 'batch': + if ticket.get('type') != 'batch': # cover 100 self.abort(400, 'ticket not for this resource') return ticket diff --git a/api/handlers/reporthandler.py b/api/handlers/reporthandler.py index f935f0e25..c9308c4fb 100644 --- a/api/handlers/reporthandler.py +++ b/api/handlers/reporthandler.py @@ -69,7 +69,7 @@ def get(self, report_type): report = report_class(self.request.params) except APIReportParamsException as e: self.abort(400, e.message) - else: + else: # cover 100 raise NotImplementedError('Report type {} is not supported'.format(report_type)) if self.superuser_request or report.user_can_generate(self.uid): @@ -84,7 +84,7 @@ def get(self, report_type): for doc in report.build(): writer.writerow(doc) - except APIReportException as e: + except APIReportException as e: # cover 100 self.abort(404, str(e)) # Need to close and reopen file to flush buffer into file csv_file.close() @@ -111,13 +111,13 @@ def user_can_generate(self, uid): """ Check if user has required permissions to generate report """ - raise NotImplementedError() + raise NotImplementedError() # cover 100 def build(self): """ Build and return a json report """ - raise NotImplementedError() + raise NotImplementedError() # cover 100 @staticmethod def _get_result_list(output): @@ -148,7 +148,7 @@ def _get_result(output): if len(results) == 1: return results[0] - raise APIReportException + raise APIReportException # cover 100 @@ -333,7 +333,7 @@ def _process_demo_results(self, results, grid): race = cell['race'] ethnicity = cell['ethnicity'] sex = cell['sex'] - except Exception as e: + except Exception as e: # cover 100 raise APIReportException('Demographics aggregation was malformed: {}'.format(e)) # Null or unrecognized values are listed as UNR default @@ -345,7 +345,7 @@ def _process_demo_results(self, results, grid): sex = UNR else: sex = sex.capitalize() # We store sex as lowercase in the db - if sex not in grid[race][ethnicity]: + if sex not in grid[race][ethnicity]: # cover 100 sex = UNR # Tally up @@ -415,7 +415,7 @@ def build(self): ] try: result = self._get_result(config.db.command('aggregate', 'sessions', pipeline=pipeline)) - except APIReportException: + except APIReportException: # cover 100 # Edge case when none of the subjects have a sex field result = {} @@ -455,7 +455,7 @@ def build(self): ] try: result = self._get_result(config.db.command('aggregate', 'sessions', pipeline=pipeline)) - except APIReportException: + except APIReportException: # cover 100 # Edge case when none of the subjects have an age field result = {} @@ -502,7 +502,7 @@ def __init__(self, params): uid = params.get('user') limit= params.get('limit', 100) subject = params.get('subject', None) - if params.get('bin') == 'true': + if params.get('bin') == 'true': # cover 100 access_types = params.get('access_types', []) else: access_types = params.getall('access_types') @@ -525,7 +525,7 @@ def __init__(self, params): elif limit > 10000: raise APIReportParamsException('Limit exceeds 10,000 entries, please contact admin to run script.') for access_type in access_types: - if access_type not in AccessTypeList: + if access_type not in AccessTypeList: # cover 100 raise APIReportParamsException('Not a valid access type') self.start_date = start_date @@ -676,7 +676,7 @@ def _create_default(self, month=None, year=None, project=None, ignore_minmax=Fal date = dateutil.parser.parse(year+'-'+month+'-01T00:00.000Z') if self.first_month is None or date < self.first_month: self.first_month = date - if self.last_month is None or date > self.last_month: + if self.last_month is None or date > self.last_month: # cover 100 self.last_month = date return obj @@ -748,7 +748,7 @@ def _build_month_report(self, base_query): key = year+month # Check to see if we already have a record for this month/year combo, create and update first/last if not - if key not in report: + if key not in report: # cover 100 report[key] = self._create_default(month=month, year=year) report[key]['session_count'] = r['session_count'] @@ -782,7 +782,7 @@ def _build_month_report(self, base_query): key = year+month # Check to see if we already have a record for this month/year combo, create and update first/last if not - if key not in report: + if key not in report: # cover 100 report[key] = self._create_default(month=month, year=year) report[key]['file_mbs'] += r['mb_total'] @@ -801,7 +801,7 @@ def _build_month_report(self, base_query): except APIReportException: results = [] - for r in results: + for r in results: # cover 100 month = str(r['_id']['month']) year = str(r['_id']['year']) key = year+month @@ -840,7 +840,7 @@ def _build_month_report(self, base_query): # We don't have a record for this month/year combo, create a zero'd out version final_report_list.append(self._create_default(month=str(curr_month), year=str(curr_year), ignore_minmax=True)) curr_month += 1 - if curr_month > 12: + if curr_month > 12: # cover 100 curr_year += 1 curr_month = 1 @@ -934,7 +934,7 @@ def _build_project_report(self, base_query): except APIReportException: result = None - if result: + if result: # cover 100 report_obj['file_mbs'] += result['mb_total'] # Create a list of all possible ids in this project hierarchy diff --git a/api/handlers/userhandler.py b/api/handlers/userhandler.py index 7269b8217..d62bd0ad8 100644 --- a/api/handlers/userhandler.py +++ b/api/handlers/userhandler.py @@ -27,7 +27,7 @@ def get(self, _id): if not self.user_is_admin: projection['wechat'] = 0 result = permchecker(self.storage.exec_op)('GET', _id, projection=projection or None) - if result is None: + if result is None: # cover 100 self.abort(404, 'User does not exist') return result @@ -36,7 +36,7 @@ def self(self): if not self.uid: self.abort(400, 'no user is logged in') user = self.storage.exec_op('GET', self.uid) - if not user: + if not user: # cover 100 self.abort(403, 'user does not exist') return user @@ -46,7 +46,7 @@ def get_all(self): if not self.user_is_admin: projection['wechat'] = 0 result = permchecker(self.storage.exec_op)('GET', projection=projection) - if result is None: + if result is None: # cover 100 self.abort(404, 'Not found') return result @@ -60,9 +60,9 @@ def delete(self, _id): result = self.storage.exec_op('DELETE', _id) if result.deleted_count == 1: return {'deleted': result.deleted_count} - else: + else: # cover 100 self.abort(404, 'User {} not removed'.format(_id)) - return result + return result # cover 100 @validators.verify_payload_exists def put(self, _id): @@ -70,7 +70,7 @@ def put(self, _id): user = self._get_user(_id) permchecker = userauth.default(self, user) payload = self.request.json_body - if not payload: + if not payload: # cover 100 self.abort(400, 'PUT request body cannot be empty') mongo_schema_uri = validators.schema_uri('mongo', 'user.json') mongo_validator = validators.decorator_from_schema_path(mongo_schema_uri) @@ -81,14 +81,14 @@ def put(self, _id): result = mongo_validator(permchecker(self.storage.exec_op))('PUT', _id=_id, payload=payload) if result.modified_count == 1: return {'modified': result.modified_count} - else: + else: # cover 100 self.abort(404, 'User {} not updated'.format(_id)) def post(self): """Add user""" permchecker = userauth.default(self) payload = self.request.json_body - if self.is_true('wechat'): + if self.is_true('wechat'): # cover 100 payload['wechat'] = {'registration_code': base64.urlsafe_b64encode(os.urandom(42))} mongo_schema_uri = validators.schema_uri('mongo', 'user.json') mongo_validator = validators.decorator_from_schema_path(mongo_schema_uri) @@ -102,7 +102,7 @@ def post(self): result = mongo_validator(permchecker(self.storage.exec_op))('POST', payload=payload) if result.acknowledged: return {'_id': result.inserted_id} - else: + else: # cover 100 self.abort(404, 'User {} not updated'.format(payload['_id'])) def _cleanup_user_permissions(self, uid): @@ -115,14 +115,14 @@ def _cleanup_user_permissions(self, uid): config.db.projects.update_many(query, update) config.db.sessions.update_many(query, update) config.db.acquisitions.update_many(query, update) - except APIStorageException: + except APIStorageException: # cover 100 self.abort(500, 'Site-wide user permissions for {} were unabled to be removed'.format(uid)) def avatar(self, uid): self.resolve_avatar(uid, default=self.request.GET.get('default')) def self_avatar(self): - if self.uid is None: + if self.uid is None: # cover 100 self.abort(404, 'not a logged-in user') self.resolve_avatar(self.uid, default=self.request.GET.get('default')) @@ -135,7 +135,7 @@ def resolve_avatar(self, email, default=None): # Storage throws a 404; we want to catch that and handle it separately in the case of a provided default. try: user = self._get_user(email) - except APIStorageException: + except APIStorageException: # cover 100 user = {} avatar = user.get('avatar', None) @@ -144,7 +144,7 @@ def resolve_avatar(self, email, default=None): if user and avatar is None: gravatar = util.resolve_gravatar(email) - if gravatar is not None: + if gravatar is not None: # cover 100 user = config.db['users'].find_one_and_update({ '_id': email, }, { @@ -156,10 +156,10 @@ def resolve_avatar(self, email, default=None): return_document=pymongo.collection.ReturnDocument.AFTER ) - if user.get('avatar', None): + if user.get('avatar', None): # cover 100 # Our data is unicode, but webapp2 wants a python-string for its headers. self.redirect(str(user['avatar']), code=307) - elif default is not None: + elif default is not None: # cover 100 self.redirect(str(default), code=307) else: self.abort(404, 'no avatar') @@ -173,7 +173,7 @@ def generate_api_key(self): result = self.storage.exec_op('PUT', _id=self.uid, payload=payload) if result.modified_count == 1: return {'key': generated_key} - else: + else: # cover 100 self.abort(500, 'New key for user {} not generated'.format(self.uid)) @require_admin @@ -188,12 +188,12 @@ def reset_registration(self, uid): result = self.storage.exec_op('PUT', _id=uid, payload=update) if result.modified_count == 1: return {'registration_code': new_registration_code} - else: + else: # cover 100 self.abort(404, 'User {} not updated'.format(uid)) def _get_user(self, _id): user = self.storage.get_container(_id) if user is not None: return user - else: + else: # cover 100 self.abort(404, 'user {} not found'.format(_id)) diff --git a/api/jobs/batch.py b/api/jobs/batch.py index 0efa05884..a6dd54977 100644 --- a/api/jobs/batch.py +++ b/api/jobs/batch.py @@ -43,7 +43,7 @@ def get(batch_id, projection=None, get_jobs=False): if get_jobs: jobs = [] - for jid in batch_job.get('jobs', []): + for jid in batch_job.get('jobs', []): # cover 100 job = Job.get(jid) jobs.append(job) batch_job['jobs'] = jobs @@ -74,16 +74,16 @@ def find_matching_conts(gear, containers, container_type): ambiguous = False # Are any of the inputs ambiguous? not_matched = False for files in suggestions.itervalues(): - if len(files) > 1: + if len(files) > 1: # cover 100 ambiguous = True - elif len(files) == 0: + elif len(files) == 0: # cover 100 not_matched = True break # Based on results, add to proper list - if not_matched: + if not_matched: # cover 100 not_matched_conts.append(c) - elif ambiguous: + elif ambiguous: # cover 100 ambiguous_conts.append(c) else: # Create input map of file refs @@ -92,7 +92,7 @@ def find_matching_conts(gear, containers, container_type): inputs[input_name] = {'type': container_type, 'id': str(c['_id']), 'name': files[0]} c['inputs'] = inputs matched_conts.append(c) - else: + else: # cover 100 not_matched_conts.append(c) return { 'matched': matched_conts, @@ -123,7 +123,7 @@ def update(batch_id, payload): # Require that the batch job has the previous state query['state'] = BATCH_JOB_TRANSITIONS[payload.get('state')] result = config.db.batch.update_one({'_id': bid}, {'$set': payload}) - if result.modified_count != 1: + if result.modified_count != 1: # cover 100 raise Exception('Batch job not updated') def run(batch_job): @@ -132,7 +132,7 @@ def run(batch_job): """ proposal = batch_job.get('proposal') - if not proposal: + if not proposal: # cover 100 raise APIStorageException('The batch job is not formatted correctly.') proposed_inputs = proposal.get('inputs', []) @@ -202,7 +202,7 @@ def cancel(batch_job): try: Queue.mutate(job, {'state': 'cancelled'}) cancelled_jobs += 1 - except Exception: # pylint: disable=broad-except + except Exception: # cover 100 # pylint: disable=broad-except # if the cancellation fails, move on to next job continue @@ -217,7 +217,7 @@ def check_state(batch_id): batch = get(str(batch_id)) - if batch.get('state') == 'cancelled': + if batch.get('state') == 'cancelled': # cover 100 return None batch_jobs = config.db.jobs.find({'_id':{'$in': batch.get('jobs', [])}, 'state': {'$nin': ['complete', 'failed', 'cancelled']}}) non_failed_batch_jobs = config.db.jobs.find({'_id':{'$in': batch.get('jobs', [])}, 'state': {'$ne': 'failed'}}) @@ -227,20 +227,20 @@ def check_state(batch_id): return 'complete' else: return 'failed' - else: + else: # cover 100 return None def get_stats(): """ Return the number of jobs by state. """ - raise NotImplementedError() + raise NotImplementedError() # cover 100 def resume(): """ Move cancelled jobs back to pending. """ - raise NotImplementedError() + raise NotImplementedError() # cover 100 def delete(): """ @@ -249,4 +249,4 @@ def delete(): - it's spawned jobs - all the files it's jobs produced. """ - raise NotImplementedError() + raise NotImplementedError() # cover 100 diff --git a/api/jobs/gears.py b/api/jobs/gears.py index ed5ae81f7..dd57a68be 100644 --- a/api/jobs/gears.py +++ b/api/jobs/gears.py @@ -73,7 +73,7 @@ def suggest_container(gear, cont_name, cid): for x in schemas: f['suggested'][x] = schemas[x].is_valid(f) - for analysis in root.get('analyses',{}): + for analysis in root.get('analyses',{}): # cover 100 files = analysis.get('files', []) files[:] = [x for x in files if x.get('output')] for f in files: @@ -103,7 +103,7 @@ def suggest_for_files(gear, files): return suggested_files def validate_gear_config(gear, config_): - if len(gear.get('manifest', {}).get('config', {})) > 0: + if len(gear.get('manifest', {}).get('config', {})) > 0: # cover 100 invocation = gear_tools.derive_invocation_schema(gear['manifest']) ci = gear_tools.isolate_config_invocation(invocation) validator = Draft4Validator(ci) @@ -126,7 +126,7 @@ def insert_gear(doc): gear_tools.validate_manifest(doc['gear']) # This can be mongo-escaped and re-used later - if doc.get("invocation-schema"): + if doc.get("invocation-schema"): # cover 100 del(doc["invocation-schema"]) now = datetime.datetime.utcnow() @@ -136,7 +136,7 @@ def insert_gear(doc): result = config.db.gears.insert(doc) - if config.get_item('queue', 'prefetch'): + if config.get_item('queue', 'prefetch'): # cover 100 log.info('Queuing prefetch job for gear ' + doc['gear']['name']) job = Job(str(doc['_id']), {}, destination={}, tags=['prefetch'], request={ @@ -163,7 +163,7 @@ def insert_gear(doc): def remove_gear(_id): result = config.db.gears.delete_one({"_id": bson.ObjectId(_id)}) - if result.deleted_count != 1: + if result.deleted_count != 1: # cover 100 raise Exception("Deleted failed " + str(result.raw_result)) def upsert_gear(doc): diff --git a/api/jobs/handlers.py b/api/jobs/handlers.py index 69f4217a2..4532d8966 100644 --- a/api/jobs/handlers.py +++ b/api/jobs/handlers.py @@ -38,7 +38,7 @@ def get(self): gears = get_gears() filters = self.request.GET.getall('filter') - if 'single_input' in filters: + if 'single_input' in filters: # cover 100 gears = list(filter(lambda x: len(x["gear"]["inputs"].keys()) <= 1, gears)) return gears @@ -117,7 +117,7 @@ def post(self, _id): except ValidationError as err: key = "none" - if len(err.relative_path) > 0: + if len(err.relative_path) > 0: # cover 100 key = err.relative_path[0] message = err.message.replace("u'", "'") @@ -333,7 +333,7 @@ def get_config(self, _id): self.abort(403, 'Request requires superuser') c = Job.get(_id).config - if c is None: + if c is None: # cover 100 c = {} self.response.headers['Content-Type'] = 'application/octet-stream' diff --git a/api/jobs/jobs.py b/api/jobs/jobs.py index 13319204e..cb06d11be 100644 --- a/api/jobs/jobs.py +++ b/api/jobs/jobs.py @@ -52,7 +52,7 @@ def __init__(self, gear_id, inputs, destination=None, tags=None, time_now = datetime.datetime.utcnow() - if tags is None: + if tags is None: # cover 100 tags = [] if saved_files is None: saved_files = [] @@ -179,12 +179,12 @@ def map(self): if d.get('inputs'): for x in d['inputs'].keys(): d['inputs'][x] = d['inputs'][x].__dict__ - else: + else: # cover 100 d.pop('inputs') if d.get('destination'): d['destination'] = d['destination'].__dict__ - else: + else: # cover 100 d.pop('destination') if d['id'] is None: @@ -216,7 +216,7 @@ def insert(self): Warning: this will not stop you from inserting a job for a gear that has gear.custom.flywheel.invald set to true. """ - if self.id_ is not None: + if self.id_ is not None: # cover 100 raise Exception('Cannot insert job that has already been inserted') result = config.db.jobs.insert_one(self.mongo()) @@ -227,7 +227,7 @@ def save(self): update = self.mongo() job_id = update.pop('id') result = config.db.jobs.replace_one({'_id': job_id}, update) - if result.modified_count != 1: + if result.modified_count != 1: # cover 100 raise Exception('Job modification not saved') return {'modified_count': 1} @@ -241,7 +241,7 @@ def generate_request(self, gear): A gear_list map from the gears table. """ - if gear.get('gear', {}).get('custom', {}).get('flywheel', {}).get('invalid', False): + if gear.get('gear', {}).get('custom', {}).get('flywheel', {}).get('invalid', False): # cover 100 raise Exception('Gear marked as invalid, will not run!') r = { @@ -275,7 +275,7 @@ def generate_request(self, gear): # Add config, if any if self.config is not None: - if self.id_ is None: + if self.id_ is None: # cover 100 raise Exception('Running a job requires an ID') r['inputs'].append({ diff --git a/api/jobs/queue.py b/api/jobs/queue.py index c228c9e82..3c99b3a87 100644 --- a/api/jobs/queue.py +++ b/api/jobs/queue.py @@ -53,10 +53,10 @@ def mutate(job, mutation): Validate and save a job mutation """ - if job.state not in JOB_STATES_ALLOWED_MUTATE: + if job.state not in JOB_STATES_ALLOWED_MUTATE: # cover 100 raise Exception('Cannot mutate a job that is ' + job.state + '.') - if 'state' in mutation and not valid_transition(job.state, mutation['state']): + if 'state' in mutation and not valid_transition(job.state, mutation['state']): # cover 100 raise Exception('Mutating job from ' + job.state + ' to ' + mutation['state'] + ' not allowed.') # Any modification must be a timestamp update @@ -69,11 +69,11 @@ def mutate(job, mutation): } result = config.db.jobs.update_one(job_query, {'$set': mutation}) - if result.modified_count != 1: + if result.modified_count != 1: # cover 100 raise Exception('Job modification not saved') # If the job did not succeed, check to see if job should be retried. - if 'state' in mutation and mutation['state'] == 'failed' and retry_on_explicit_fail(): + if 'state' in mutation and mutation['state'] == 'failed' and retry_on_explicit_fail(): # cover 100 job.state = 'failed' Queue.retry(job) @@ -84,18 +84,18 @@ def retry(job, force=False): Can override the attempt limit by passing force=True. """ - if job.attempt >= max_attempts() and not force: + if job.attempt >= max_attempts() and not force: # cover 100 log.info('Permanently failed job %s (after %d attempts)', job.id_, job.attempt) return - if job.state != 'failed': + if job.state != 'failed': # cover 100 raise Exception('Can only retry a job that is failed') # Race condition: jobs should only be marked as failed once a new job has been spawned for it (if any). # No transactions in our database, so we can't do that. # Instead, make a best-hope attempt. check = config.db.jobs.find_one({'previous_job_id': job.id_ }) - if check is not None: + if check is not None: # cover 100 found = Job.load(check) raise Exception('Job ' + job.id_ + ' has already been retried as ' + str(found.id_)) @@ -118,7 +118,7 @@ def retry(job, force=False): {'jobs': job.id_}, {'$pull': {'jobs': job.id_}, '$push': {'jobs': new_id}} ) - if result.modified_count == 1: + if result.modified_count == 1: # cover 100 log.info('updated batch job list, replacing {} with {}'.format(job.id_, new_id)) return new_id @@ -188,7 +188,7 @@ def start_job(tags=None): return_document=pymongo.collection.ReturnDocument.AFTER ) - if result is None: + if result is None: # cover 100 raise Exception('Marked job as running but could not generate and save formula') return Job.load(result) @@ -207,15 +207,15 @@ def search(containers, states=None, tags=None): # Limitation: container types must match. type1 = containers[0].type for container in containers: - if container.type != type1: + if container.type != type1: # cover 100 raise Exception('All containers passed to Queue.search must be of the same type') query = {'inputs.id': {'$in': [x.id for x in containers]}, 'inputs.type': type1} - if states is not None and len(states) > 0: + if states is not None and len(states) > 0: # cover 100 query['state'] = {"$in": states} - if tags is not None and len(tags) > 0: + if tags is not None and len(tags) > 0: # cover 100 query['tags'] = {"$in": tags} # For now, mandate reverse-crono sort @@ -274,7 +274,7 @@ def scan_for_orphans(): if doc is None: break - else: + else: # cover 100 orphaned += 1 j = Job.load(doc) Queue.retry(j) diff --git a/api/jobs/rules.py b/api/jobs/rules.py index d2e29f172..46f8cdeba 100644 --- a/api/jobs/rules.py +++ b/api/jobs/rules.py @@ -84,7 +84,7 @@ def lower(x): return match_param.lower() in map(lower, file_.get('measurements', [])) else: return False - except KeyError: + except KeyError: # cover 100 _log_file_key_error(file_, container, 'has no measurements key') return False @@ -142,7 +142,7 @@ def queue_job_legacy(algorithm_id, input_): gear = gears.get_gear_by_name(algorithm_id) - if len(gear['gear']['inputs']) != 1: + if len(gear['gear']['inputs']) != 1: # cover 100 raise Exception("Legacy gear enqueue attempt of " + algorithm_id + " failed: must have exactly 1 input in manifest") input_name = gear['gear']['inputs'].keys()[0] @@ -154,7 +154,7 @@ def queue_job_legacy(algorithm_id, input_): job = Job(str(gear['_id']), inputs, tags=['auto', algorithm_id]) return job -def find_type_in_container(container, type_): +def find_type_in_container(container, type_): # cover 100 for c_file in container['files']: if type_ == c_file['type']: return c_file @@ -173,7 +173,7 @@ def create_potential_jobs(db, container, container_type, file_): rules = get_rules_for_container(db, container) # Add hardcoded rules that cannot be removed or changed - for hardcoded_rule in get_base_rules(): + for hardcoded_rule in get_base_rules(): # cover 100 rules.append(hardcoded_rule) for rule in rules: @@ -185,7 +185,7 @@ def create_potential_jobs(db, container, container_type, file_): if rule.get('match') is None: input_ = FileReference(type=container_type, id=str(container['_id']), name=file_['name']) job = queue_job_legacy(alg_name, input_) - else: + else: # cover 100 inputs = { } for input_name, match_type in rule['match'].iteritems(): diff --git a/api/placer.py b/api/placer.py index 91924c2ac..e01d1ab61 100644 --- a/api/placer.py +++ b/api/placer.py @@ -67,7 +67,7 @@ def requireTarget(self): """ Helper function that throws unless a container was provided. """ - if self.id_ is None or self.container is None or self.container_type is None: + if self.id_ is None or self.container is None or self.container_type is None: # cover 100 raise Exception('Must specify a target') def requireMetadata(self): @@ -325,7 +325,7 @@ def __init__(self, container_type, container, id_, metadata, timestamp, origin, def check(self): token = self.context['token'] - if token is None: + if token is None: # cover 100 raise Exception('TokenPlacer requires a token') # This logic is used by: @@ -381,7 +381,7 @@ def check(self): token = self.context['token'] - if token is None: + if token is None: # cover 100 raise Exception('PackfilePlacer requires a token') # This logic is used by: @@ -393,7 +393,7 @@ def check(self): base_path = config.get_item('persistent', 'data_path') self.folder = os.path.join(base_path, 'tokens', 'packfile', token) - if not os.path.isdir(self.folder): + if not os.path.isdir(self.folder): # cover 100 raise Exception('Packfile directory does not exist or has been deleted') self.requireMetadata() @@ -539,7 +539,7 @@ def finalize(self): insert_map['created'] = self.timestamp insert_map.update(self.metadata['session']) insert_map['subject'] = containerutil.add_id_to_subject(insert_map.get('subject'), bson.ObjectId(self.p_id)) - if 'timestamp' in insert_map: + if 'timestamp' in insert_map: # cover 100 insert_map['timestamp'] = dateutil.parser.parse(insert_map['timestamp']) session = config.db['session' + 's'].find_one_and_update( @@ -635,14 +635,14 @@ def finalize(self): class AnalysisJobPlacer(Placer): def check(self): - if self.id_ is None: + if self.id_ is None: # cover 100 raise Exception('Must specify a target analysis') def process_file_field(self, field, file_attrs): if self.metadata is not None: file_mds = self.metadata.get('acquisition', {}).get('files', []) - for file_md in file_mds: + for file_md in file_mds: # cover 100 if file_md['name'] == file_attrs['name']: file_attrs.update(file_md) break @@ -670,7 +670,7 @@ def finalize(self): config.db.analyses.update_one(q, u) return self.saved -class GearPlacer(Placer): +class GearPlacer(Placer): # cover 100 def check(self): self.requireMetadata() diff --git a/api/tempdir.py b/api/tempdir.py index ff4b30f0b..eecce7e19 100644 --- a/api/tempdir.py +++ b/api/tempdir.py @@ -32,7 +32,7 @@ def __init__(self, suffix="", prefix=template, dir=None): def __repr__(self): return "<{} {!r}>".format(self.__class__.__name__, self.name) - def __enter__(self): + def __enter__(self): # cover 100 return self.name def cleanup(self, _warn=False): @@ -42,7 +42,7 @@ def cleanup(self, _warn=False): Warning) try: self._rmtree(self.name) - except (TypeError, AttributeError) as ex: + except (TypeError, AttributeError) as ex: # cover 100 # Issue #10188: Emit a warning on stderr # if the directory could not be cleaned # up due to missing globals @@ -56,7 +56,7 @@ def cleanup(self, _warn=False): self._warn("end: Implicitly cleaning up {!r}".format(self), Warning) - def __exit__(self, exc, value, tb): + def __exit__(self, exc, value, tb): # cover 100 self.cleanup() def __del__(self): @@ -83,16 +83,16 @@ def _rmtree(self, path): fullname = self._path_join(path, name) try: isdir = self._isdir(fullname) and not self._islink(fullname) - except self._os_error: + except self._os_error: # cover 100 isdir = False - if isdir: + if isdir: # cover 100 self._rmtree(fullname) else: try: self._remove(fullname) - except self._os_error: + except self._os_error: # cover 100 pass try: self._rmdir(path) - except self._os_error: + except self._os_error: # cover 100 pass diff --git a/api/upload.py b/api/upload.py index 96da19e3c..42e0934ab 100644 --- a/api/upload.py +++ b/api/upload.py @@ -56,13 +56,13 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None Creates a packfile from uploaded files | | | | X """ - if not isinstance(strategy, Strategy): + if not isinstance(strategy, Strategy): # cover 100 raise Exception('Unknown upload strategy') - if id_ is not None and container_type == None: + if id_ is not None and container_type == None: # cover 100 raise Exception('Unspecified container type') - if container_type is not None and container_type not in ('acquisition', 'session', 'project', 'collection', 'analysis', 'gear'): + if container_type is not None and container_type not in ('acquisition', 'session', 'project', 'collection', 'analysis', 'gear'): # cover 100 raise Exception('Unknown container type') timestamp = datetime.datetime.utcnow() @@ -78,7 +78,7 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None if 'metadata' in form: try: metadata = json.loads(form['metadata'].value) - except Exception: + except Exception: # cover 100 raise files.FileStoreException('wrong format for field "metadata"') placer_class = strategy.value @@ -93,7 +93,7 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None file_fields = [x for x in form if form[x].filename] # TODO: Change schemas to enabled targeted uploads of more than one file. # Ref docs from placer.TargetedPlacer for details. - if strategy == Strategy.targeted and len(file_fields) > 1: + if strategy == Strategy.targeted and len(file_fields) > 1: # cover 100 raise Exception("Targeted uploads can only send one file") @@ -103,7 +103,7 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None # Not the best practice. Open to improvements. # These are presumbed to be required by every function later called with field as a parameter. field.path = os.path.join(tempdir.name, field.filename) - if not os.path.exists(field.path): + if not os.path.exists(field.path): # cover 100 tempdir_exists = os.path.exists(tempdir.name) raise Exception("file {} does not exist, tmpdir {} exists: {}, files in tmpdir: {}".format( field.path, @@ -137,7 +137,7 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None placer.process_file_field(field, file_attrs) # Respond either with Server-Sent Events or a standard json map - if placer.sse and not response: + if placer.sse and not response: # cover 100 raise Exception("Programmer error: response required") elif placer.sse: response.headers['Content-Type'] = 'text/event-stream; charset=utf-8' @@ -154,7 +154,7 @@ def process_upload(request, strategy, container_type=None, id_=None, origin=None for item in placer.finalize(): try: response.write(item) - except Exception: # pylint: disable=broad-except + except Exception: # cover 100 # pylint: disable=broad-except log.info('SSE upload progress failed to send; continuing') return @@ -183,7 +183,7 @@ def upload(self, strategy): strategy = Strategy.uidmatch elif strategy == 'reaper': strategy = Strategy.reaper - else: + else: # cover 100 self.abort(500, 'stragegy {} not implemented'.format(strategy)) return process_upload(self.request, strategy, origin=self.origin, context=context) @@ -250,7 +250,7 @@ def clean_packfile_tokens(self): result = config.db['tokens'].find_one({ '_id': token }) - except bson.errors.InvalidId: + except bson.errors.InvalidId: # cover 100 # Folders could be an invalid mongo ID, in which case they're definitely expired :) pass diff --git a/api/util.py b/api/util.py index 075b9c6bd..e815ebc8d 100644 --- a/api/util.py +++ b/api/util.py @@ -95,7 +95,7 @@ def is_user_id(uid): # NOTE unused function -def is_group_id(gid): +def is_group_id(gid): # cover 100 """ Checks to make sure uid matches uid regex """ @@ -226,7 +226,7 @@ def mkdir_p(path): except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass - else: + else: # cover 100 raise NONCE_CHARS = string.ascii_letters + string.digits diff --git a/api/validators.py b/api/validators.py index b2d804900..323b0b662 100644 --- a/api/validators.py +++ b/api/validators.py @@ -48,7 +48,7 @@ def schema_uri(type_, schema_name): ) def decorator_from_schema_path(schema_url): - if schema_url is None: + if schema_url is None: # cover 100 return no_op schema, resolver = _resolve_schema(schema_url) def g(exec_op): @@ -63,14 +63,14 @@ def validator(method, **kwargs): if method in ['POST', 'PUT']: try: _validate_json(payload, _schema, resolver) - except jsonschema.ValidationError as e: + except jsonschema.ValidationError as e: # cover 100 raise DBValidationException(str(e)) return exec_op(method, **kwargs) return validator return g def from_schema_path(schema_url): - if schema_url is None: + if schema_url is None: # cover 100 return no_op # split the url in base_uri and schema_name schema, resolver = _resolve_schema(schema_url) @@ -102,7 +102,7 @@ def key_check(schema_url): 2. a GET will retrieve a single item 3. a DELETE (most importantly) will delete a single item """ - if schema_url is None: + if schema_url is None: # cover 100 return no_op schema, _ = _resolve_schema(schema_url) log.debug(schema) @@ -113,7 +113,7 @@ def f(method, _id, query_params = None, payload = None, exclude_params=None): if method == 'POST': try: exclude_params = _post_exclude_params(schema.get('key_fields', []), payload) - except KeyError as e: + except KeyError as e: # cover 100 raise InputValidationException('missing key {} in payload'.format(e.args)) else: _check_query_params(schema.get('key_fields'), query_params) diff --git a/api/web/base.py b/api/web/base.py index 41e13db10..4e28258f4 100644 --- a/api/web/base.py +++ b/api/web/base.py @@ -64,7 +64,7 @@ def initialization_auth(self): # User (API key) authentication key = session_token.split()[1] self.uid = APIKeyAuthProvider.validate_user_api_key(key) - elif session_token.startswith('scitran-drone '): + elif session_token.startswith('scitran-drone '): # cover 100 # Drone (API key) authentication # When supported, remove custom headers and shared secret self.abort(401, 'Drone API keys are not yet supported') @@ -76,11 +76,11 @@ def initialization_auth(self): # Drone shared secret authentication elif drone_secret is not None: - if drone_method is None or drone_name is None: + if drone_method is None or drone_name is None: # cover 100 self.abort(400, 'X-SciTran-Method or X-SciTran-Name header missing') - if config.get_item('core', 'drone_secret') is None: + if config.get_item('core', 'drone_secret') is None: # cover 100 self.abort(401, 'drone secret not configured') - if drone_secret != config.get_item('core', 'drone_secret'): + if drone_secret != config.get_item('core', 'drone_secret'): # cover 100 self.abort(401, 'invalid drone secret') drone_request = True @@ -94,9 +94,9 @@ def initialization_auth(self): self.user_is_admin = True else: user = config.db.users.find_one({'_id': self.uid}, ['root', 'disabled']) - if not user: + if not user: # cover 100 self.abort(402, 'User {} will need to be added to the system before managing data.'.format(self.uid)) - if user.get('disabled', False) is True: + if user.get('disabled', False) is True: # cover 100 self.abort(402, 'User {} is disabled.'.format(self.uid)) if user.get('root'): self.user_is_admin = True @@ -105,7 +105,7 @@ def initialization_auth(self): if self.is_true('root'): if user.get('root'): self.superuser_request = True - else: + else: # cover 100 self.abort(403, 'user ' + self.uid + ' is not authorized to make superuser requests') else: self.superuser_request = False @@ -139,7 +139,7 @@ def authenticate_user_token(self, session_token): try: auth_provider = AuthProvider.factory(auth_type) - except NotImplementedError as e: + except NotImplementedError as e: # cover 100 self.abort(401, str(e)) try: @@ -314,7 +314,7 @@ def handle_exception(self, exception, debug, return_json=False): # pylint: disab custom_errors = exception.errors elif isinstance(exception, APIUnknownUserException): code = 402 - elif isinstance(exception, APIConsistencyException): + elif isinstance(exception, APIConsistencyException): # cover 100 code = 400 elif isinstance(exception, APIPermissionException): code = 403 @@ -322,12 +322,12 @@ def handle_exception(self, exception, debug, return_json=False): # pylint: disab code = 404 elif isinstance(exception, APIConflictException): code = 409 - elif isinstance(exception, APIValidationException): + elif isinstance(exception, APIValidationException): # cover 100 code = 422 custom_errors = exception.errors - elif isinstance(exception, files.FileStoreException): + elif isinstance(exception, files.FileStoreException): # cover 100 code = 400 - elif isinstance(exception, ElasticsearchException): + elif isinstance(exception, ElasticsearchException): # cover 100 code = 503 message = "Search is currently down. Try again later." self.request.logger.error(traceback.format_exc()) @@ -348,7 +348,7 @@ def log_user_access(self, access_type, cont_name=None, cont_id=None, multifile=F if not config.get_item('core', 'access_log_enabled'): return - if not isinstance(access_type, AccessType): + if not isinstance(access_type, AccessType): # cover 100 raise Exception('Unknown access type.') log_map = { @@ -361,7 +361,7 @@ def log_user_access(self, access_type, cont_name=None, cont_id=None, multifile=F if access_type not in [AccessType.user_login, AccessType.user_logout]: - if cont_name is None or cont_id is None: + if cont_name is None or cont_id is None: # cover 100 raise Exception('Container information not available.') # Create a context tree for the container @@ -390,7 +390,7 @@ def log_user_access(self, access_type, cont_name=None, cont_id=None, multifile=F {'$setOnInsert': log_map}, upsert=True ) - except Exception as e: # pylint: disable=broad-except + except Exception as e: # cover 100 # pylint: disable=broad-except config.log.exception(e) self.abort(500, 'Unable to log access.') @@ -411,7 +411,7 @@ def dispatch(self): def abort(self, code, detail=None, **kwargs): - if isinstance(detail, jsonschema.ValidationError): + if isinstance(detail, jsonschema.ValidationError): # cover 100 detail = { 'relative_path': list(detail.relative_path), 'instance': detail.instance, diff --git a/api/web/encoder.py b/api/web/encoder.py index ea0ef629b..1ced5de66 100644 --- a/api/web/encoder.py +++ b/api/web/encoder.py @@ -15,7 +15,7 @@ def custom_json_serializer(obj): return obj.map() elif isinstance(obj, Cursor): return list(obj) - raise TypeError(repr(obj) + " is not JSON serializable") + raise TypeError(repr(obj) + " is not JSON serializable") # cover 100 def sse_pack(d): diff --git a/api/web/start.py b/api/web/start.py index ace553902..37993cb24 100644 --- a/api/web/start.py +++ b/api/web/start.py @@ -41,7 +41,7 @@ def dispatcher(router, request, response): try: if uwsgi is not None: uwsgi.set_logvar('request_id', request.id) - except: # pylint: disable=bare-except + except: # cover 100 # pylint: disable=bare-except request.logger.error("Error setting request_id log var", exc_info=True) try: @@ -51,7 +51,7 @@ def dispatcher(router, request, response): response.headers['Content-Type'] = 'application/json; charset=utf-8' except webapp2.HTTPException as e: util.send_json_http_exception(response, str(e), e.code) - except Exception as e: # pylint: disable=broad-except + except Exception as e: # cover 100 # pylint: disable=broad-except request.logger.error("Error dispatching request", exc_info=True) if config.get_item('core', 'debug'): message = traceback.format_exc() From 73d95059037e016669d09e7ff99aa7edea00a816 Mon Sep 17 00:00:00 2001 From: Ambrus Simon Date: Mon, 7 Aug 2017 16:59:23 +0200 Subject: [PATCH 4/4] add check for 100% coverage (and remove obsolete module exclusion) --- test/bin/run-tests-ubuntu.sh | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/test/bin/run-tests-ubuntu.sh b/test/bin/run-tests-ubuntu.sh index 372bb231e..5ae8b8f50 100755 --- a/test/bin/run-tests-ubuntu.sh +++ b/test/bin/run-tests-ubuntu.sh @@ -29,15 +29,20 @@ clean_up () { kill $API_PID || true wait 2> /dev/null - # NOTE on omit: cross-site feature unused and planned for removal - local OMIT="--omit api/centralclient.py" echo -e "\nUNIT TEST COVERAGE:" - coverage report $OMIT --skip-covered + coverage report --skip-covered coverage combine echo -e "\nOVERALL COVERAGE:" - coverage report $OMIT --show-missing - coverage html $OMIT + coverage report --show-missing + coverage html + + local percent=$(coverage report | grep TOTAL | awk '{print $4}') + if [ "$percent" != "100%" ]; then + # override exit code even if all tests passed + echo -e "\nError: coverage dropped to $percent" >&2 + exit 1 + fi } trap clean_up EXIT