From c467034e9a334805c188c1ba49f6495d8f69bfaf Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 20:55:24 +0530 Subject: [PATCH 1/6] update CDN link to the new version of the DataTables.js --- examples/flask_tut/datatables/__init__.py | 6 + examples/flask_tut/datatables/clean_regex.py | 40 +++ examples/flask_tut/datatables/column_dt.py | 95 +++++++ examples/flask_tut/datatables/datatables.py | 258 ++++++++++++++++++ .../flask_tut/datatables/search_methods.py | 99 +++++++ examples/flask_tut/flask_tut/__init__.py | 1 - .../flask_tut/templates/dt_110x.html | 44 +-- 7 files changed, 520 insertions(+), 23 deletions(-) create mode 100644 examples/flask_tut/datatables/__init__.py create mode 100644 examples/flask_tut/datatables/clean_regex.py create mode 100644 examples/flask_tut/datatables/column_dt.py create mode 100644 examples/flask_tut/datatables/datatables.py create mode 100644 examples/flask_tut/datatables/search_methods.py diff --git a/examples/flask_tut/datatables/__init__.py b/examples/flask_tut/datatables/__init__.py new file mode 100644 index 0000000..9b5f294 --- /dev/null +++ b/examples/flask_tut/datatables/__init__.py @@ -0,0 +1,6 @@ +from __future__ import absolute_import + +from datatables.column_dt import ColumnDT +from datatables.datatables import DataTables + +__all__ = ["ColumnDT", "DataTables"] diff --git a/examples/flask_tut/datatables/clean_regex.py b/examples/flask_tut/datatables/clean_regex.py new file mode 100644 index 0000000..ec534ee --- /dev/null +++ b/examples/flask_tut/datatables/clean_regex.py @@ -0,0 +1,40 @@ +def clean_regex(regex): + """ + Escape any regex special characters other than alternation. + + :param regex: regex from datatables interface + :type regex: str + :rtype: str with regex to use with database + """ + # copy for return + ret_regex = regex + + # these characters are escaped (all except alternation | and escape \) + # see http://www.regular-expressions.info/refquick.html + escape_chars = '[^$.?*+(){}' + + # remove any escape chars + ret_regex = ret_regex.replace('\\', '') + + # escape any characters which are used by regex + # could probably concoct something incomprehensible using re.sub() but + # prefer to write clear code with this loop + # note expectation that no characters have already been escaped + for c in escape_chars: + ret_regex = ret_regex.replace(c, '\\' + c) + + # remove any double alternations until these don't exist any more + while True: + old_regex = ret_regex + ret_regex = ret_regex.replace('||', '|') + if old_regex == ret_regex: + break + + # if last char is alternation | remove it because this + # will cause operational error + # this can happen as user is typing in global search box + while len(ret_regex) >= 1 and ret_regex[-1] == '|': + ret_regex = ret_regex[:-1] + + # and back to the caller + return ret_regex diff --git a/examples/flask_tut/datatables/column_dt.py b/examples/flask_tut/datatables/column_dt.py new file mode 100644 index 0000000..805adf5 --- /dev/null +++ b/examples/flask_tut/datatables/column_dt.py @@ -0,0 +1,95 @@ +from __future__ import absolute_import + +from collections import namedtuple + +from datatables.search_methods import SEARCH_METHODS + +NULLS_ORDER = ['nullsfirst', 'nullslast'] + +ColumnTuple = namedtuple('ColumnDT', [ + 'sqla_expr', + 'column_name', + 'mData', + 'search_method', + 'nulls_order', + 'global_search', + 'yadcf_data' +]) + + +class ColumnDT(ColumnTuple): + """ + Define a DataTables column. + + :param sqla_expr: SQLAlchemy queryable attribute of object + (column, column_property, hybrid property, or combined expression) + :param mData: name of the mData property as defined in the + DataTables javascript options (default None) + :param search_method: Define how to interpret search values. + Possible values: + - 'none' + - 'string_contains' (default) + - 'ilike' + - 'like' + - 'numeric' + - 'date' + - 'yadcf_text' + - 'yadcf_autocomplete' + - 'yadcf_select' + - 'yadcf_multi_select' + - 'yadcf_range_number' + - 'yadcf_range_number_slider' + - 'yadcf_range_date' + :param nulls_order: define a sort order for the NULL values. + Possible values: + - None (default) + - 'nullsfirst' + - 'nullslast'. + :param global_search: search this column for the global search box + :param yadcf_data : define if the data needs to be return for yadcf plugin. + Possible values: + - False + - True (default) + + :type sqla_expr: SQLAlchemy query expression + :type mData: str + :type search_method: str + :type yadcf_data: bool + :type nulls_order: str + :type global_search: bool + + :return: a ColumnDT object + :rtype: ColumnDT + """ + + def __new__( + cls, + sqla_expr, + column_name=None, + mData=None, + search_method='string_contains', + nulls_order=None, + global_search=True, + yadcf_data=True, + ): + """Set default values due to namedtuple immutability.""" + if nulls_order and nulls_order not in NULLS_ORDER: + raise ValueError( + '{} is not an allowed value for nulls_order.'.format( + nulls_order)) + + if search_method not in SEARCH_METHODS: + raise ValueError( + '{} is not an allowed value for search_method.'.format( + search_method)) + + return super(ColumnDT, cls).__new__( + cls, + sqla_expr, + column_name, + mData, + search_method, + nulls_order, + global_search, + yadcf_data, + ) diff --git a/examples/flask_tut/datatables/datatables.py b/examples/flask_tut/datatables/datatables.py new file mode 100644 index 0000000..d53252c --- /dev/null +++ b/examples/flask_tut/datatables/datatables.py @@ -0,0 +1,258 @@ +from __future__ import absolute_import + +import math +import re + +from sqlalchemy import Text, func, or_ +from sqlalchemy.dialects import mysql, postgresql, sqlite + +from datatables.clean_regex import clean_regex +from datatables.search_methods import SEARCH_METHODS + + +class DataTables: + """Class defining a DataTables object. + + :param request: request containing the GET values, specified by the + datatable for filtering, sorting and paging + :type request: pyramid.request + :param query: the query wanted to be seen in the the table + :type query: sqlalchemy.orm.query.Query + :param columns: columns specification for the datatables + :type columns: list + + :returns: a DataTables object + """ + + def __init__(self, request, query, columns, allow_regex_searches=False): + """Initialize object and run the query.""" + self.params = dict(request) + #print(self.params) + if 'sEcho' in self.params: + raise ValueError( + 'Legacy datatables not supported, upgrade to >=1.10') + self.query = query + self.columns = columns + #print(list(map(lambda x: x.column_name,columns))) + self.results = None + self.allow_regex_searches = allow_regex_searches + + # total in the table after filtering + self.cardinality_filtered = 0 + + # total in the table unfiltered + self.cardinality = 0 + + self.yadcf_params = [] + self.filter_expressions = [] + self.error = None + try: + self.run() + except Exception as exc: + self.error = str(exc) + + def output_result(self): + """Output results in the format needed by DataTables.""" + output = {} + output['draw'] = str(int(self.params.get('draw', 1))) + output['recordsTotal'] = str(self.cardinality) + output['recordsFiltered'] = str(self.cardinality_filtered) + if self.error: + output['error'] = self.error + return output + + output['data'] = self.results + for k, v in self.yadcf_params: + output[k] = v + return output + + def _map_columns_with_params(self, columns, params): + """Compare columns data with the parameters data and map the correct + column number to the columns data. As a lot of times in frontend columns + are not in the correct order as they are in the backend. Also there + are cases when extra dummy columns are added in the frontend and they + disturb the sequencing, thus the results coming from the backend.""" + pattern = re.compile("columns\[(.*?)\]\[data\]") + # Extract only the keys of type columns[i][data] from the params + params_column_data = {k: v for k, v in params.items() if pattern.match(k)} + column_params_map = [] + i = 0 + for key, value in params_column_data.items(): + column_number = int(pattern.search(key).group(1)) + if value: + for column in columns: + # If the mData is specified as well as the data is specified + # in the frontend then we would try to map the correct column number + # You can set the data in the datatables here https://datatables.net/reference/option/columns.data + if value == column.mData: + column_params_map.append((column_number, column)) + break + else: + # If we are unable to find the matching data + column_params_map.append((column_number, columns[i])) + i += 1 + + return column_params_map + + def _query_with_all_filters_except_one(self, query, exclude): + return query.filter(*[ + e for i, e in enumerate(self.filter_expressions) + if e is not None and i is not exclude + ]) + + def _set_yadcf_data(self, query): + # determine values for yadcf filters + column_params_map = self._map_columns_with_params(self.columns, self.params) + #print(column_params_map) + #for i, col in enumerate(self.columns): + for i, col in column_params_map: + #print(col.yadcf_data) + if col.yadcf_data: + if col.search_method in 'yadcf_range_number_slider': + v = query.with_entities( + func.min(col.sqla_expr), func.max(col.sqla_expr)).one() + self.yadcf_params.append(('yadcf_data_{:d}'.format(i), + (math.floor(v[0]), math.ceil(v[1])))) + + if col.search_method in [ + 'yadcf_select', 'yadcf_multi_select', 'yadcf_autocomplete' + ]: + filtered = self._query_with_all_filters_except_one( + query=query, exclude=i) + v = filtered.with_entities(col.sqla_expr).distinct().all() + #Added the below if statement so only that data with only + #null values is not returned. + if not(len(v)==1 and v[0][0]==None): + self.yadcf_params.append(('yadcf_data_{:d}'.format(i), + [r[0] for r in v])) + + def run(self): + """Launch filtering, sorting and paging to output results.""" + query = self.query + + # count before filtering + self.cardinality = query.add_columns(self.columns[0].sqla_expr).count() + + self._set_column_filter_expressions() + self._set_global_filter_expression() + self._set_sort_expressions() + self._set_yadcf_data(query) + + # apply filters + query = query.filter( + *[e for e in self.filter_expressions if e is not None]) + + self.cardinality_filtered = query.with_entities( + self.columns[0].sqla_expr).count() + + # apply sorts + query = query.order_by( + *[e for e in self.sort_expressions if e is not None]) + + # add paging options + length = int(self.params.get('length')) + if length >= 0: + query = query.limit(length) + elif length == -1: + pass + else: + raise (ValueError( + 'Length should be a positive integer or -1 to disable')) + query = query.offset(int(self.params.get('start'))) + + # add columns to query + query = query.with_entities(*[c.sqla_expr for c in self.columns]) + + # fetch the result of the queries + column_names = [ + col.mData if col.mData else str(i) + for i, col in enumerate(self.columns) + ] + self.results = [{k: v + for k, v in zip(column_names, row)} + for row in query.all()] + + def _set_column_filter_expressions(self): + """Construct the query: filtering. + + Add filtering when per column searching is used. + """ + # per columns filters: + column_params_map = self._map_columns_with_params(self.columns, self.params) + for i, col in column_params_map: + filter_expr = None + value = self.params.get('columns[{:d}][search][value]'.format(i), + '').replace("\\","") + if value: + search_func = SEARCH_METHODS[col.search_method] + filter_expr = search_func(col.sqla_expr, value) + self.filter_expressions.append(filter_expr) + + def _set_global_filter_expression(self): + # global search filter + global_search = self.params.get('search[value]', '') + if global_search == '': + return + + if (self.allow_regex_searches + and self.params.get('search[regex]') == 'true'): + op = self._get_regex_operator() + val = clean_regex(global_search) + + def filter_for(col): + return col.sqla_expr.op(op)(val) + else: + val = '%' + global_search + '%' + + def filter_for(col): + return col.sqla_expr.cast(Text).ilike(val) + + global_filter = [ + filter_for(col) for col in self.columns if col.global_search + ] + + self.filter_expressions.append(or_(*global_filter)) + + def _set_sort_expressions(self): + """Construct the query: sorting. + + Add sorting(ORDER BY) on the columns needed to be applied on. + """ + column_params_map = dict(self._map_columns_with_params(self.columns, self.params)) + sort_expressions = [] + i = 0 + while self.params.get('order[{:d}][column]'.format(i), False): + column_nr = int(self.params.get('order[{:d}][column]'.format(i))) + column = column_params_map[column_nr] + direction = self.params.get('order[{:d}][dir]'.format(i)) + sort_expr = column.sqla_expr + if direction == 'asc': + sort_expr = sort_expr.asc() + elif direction == 'desc': + sort_expr = sort_expr.desc() + else: + raise ValueError( + 'Invalid order direction: {}'.format(direction)) + if column.nulls_order: + if column.nulls_order == 'nullsfirst': + sort_expr = sort_expr.nullsfirst() + elif column.nulls_order == 'nullslast': + sort_expr = sort_expr.nullslast() + else: + raise ValueError( + 'Invalid order direction: {}'.format(direction)) + + sort_expressions.append(sort_expr) + i += 1 + self.sort_expressions = sort_expressions + + def _get_regex_operator(self): + if isinstance(self.query.session.bind.dialect, postgresql.dialect): + return '~' + elif isinstance(self.query.session.bind.dialect, mysql.dialect): + return 'REGEXP' + elif isinstance(self.query.session.bind.dialect, sqlite.dialect): + return 'REGEXP' + else: + raise NotImplementedError( + 'Regex searches are not implemented for this dialect') diff --git a/examples/flask_tut/datatables/search_methods.py b/examples/flask_tut/datatables/search_methods.py new file mode 100644 index 0000000..6d01d7d --- /dev/null +++ b/examples/flask_tut/datatables/search_methods.py @@ -0,0 +1,99 @@ +import datetime +import logging + +from dateutil.parser import parse as date_parse +from sqlalchemy import Text + +logger = logging.getLogger(__name__) + +search_operators = { + '=': lambda expr, value: expr == value, + '>': lambda expr, value: expr > value, + '>=': lambda expr, value: expr >= value, + '<': lambda expr, value: expr < value, + '<=': lambda expr, value: expr <= value, +} + + +def parse_query_value(combined_value): + """Parse value in form of '>value' to a lambda and a value.""" + split = len(combined_value) - len(combined_value.lstrip('<>=')) + operator = combined_value[:split] + if operator == '': + operator = '=' + try: + operator_func = search_operators[operator] + except KeyError: + raise ValueError( + 'Numeric query should start with operator, choose from %s' % + ', '.join(search_operators.keys())) + value = combined_value[split:].strip() + return operator_func, value + + +def numeric_query(expr, value): + operator_func, value = parse_query_value(value) + if value == '': + num_value = 0 + else: + num_value = float(value) + + return operator_func(expr, num_value) + + +def date_query(expr, value): + operator_func, value = parse_query_value(value) + try: + date_value = date_parse(value) + except ValueError: + date_value = datetime.datetime.now() + + return operator_func(expr, date_value) + + +def yadcf_range_number(expr, value): + v_from, v_to = value.split('-yadcf_delim-') + v_from = float(v_from) if v_from != '' else -float('inf') + v_to = float(v_to) if v_to != '' else float('inf') + logger.debug('yadcf_range_number: between %f and %f', v_from, v_to) + return expr.between(v_from, v_to) + + +def yadcf_range_date(expr, value): + v_from, v_to = value.split('-yadcf_delim-') + v_from = date_parse(v_from) if v_from != '' else datetime.date.min + v_to = date_parse(v_to) if v_to != '' else datetime.date.max + logger.debug('yadcf_range_date: between %s and %s', v_from, v_to) + return expr.between(v_from, v_to) + + +def yadcf_multi_select(expr, value): + options = value.split('|') + #Added the below statement so that null could be converted to None as + #null is not recognized in python + options = [None if i.strip()=='null' else i for i in options] + #print(options) + logger.debug('yadcf_multi_select: in %s', options) + #Modified so when someone searches null it would be taken care of. + if None in options: + operator_func = search_operators['='] + return ((expr.cast(Text).in_(options)) | operator_func(expr.cast(Text),None)) + else: + return expr.cast(Text).in_(options) + + +SEARCH_METHODS = { + 'none': lambda expr, value: None, + 'string_contains': lambda expr, value: expr.ilike('%' + value + '%'), + 'ilike': lambda expr, value: expr.ilike(value), + 'like': lambda expr, value: expr.like(value), + 'numeric': numeric_query, + 'date': date_query, + 'yadcf_text': lambda expr, value: expr.ilike('%' + value + '%'), + 'yadcf_autocomplete': lambda expr, value: expr == value, + 'yadcf_select': lambda expr, value: expr.ilike('%' + value + '%'), + 'yadcf_multi_select': yadcf_multi_select, + 'yadcf_range_number': yadcf_range_number, + 'yadcf_range_number_slider': yadcf_range_number, + 'yadcf_range_date': yadcf_range_date +} diff --git a/examples/flask_tut/flask_tut/__init__.py b/examples/flask_tut/flask_tut/__init__.py index 4ef3e58..2f2b8ad 100644 --- a/examples/flask_tut/flask_tut/__init__.py +++ b/examples/flask_tut/flask_tut/__init__.py @@ -56,7 +56,6 @@ def data(): # GET parameters params = request.args.to_dict() - # instantiating a DataTable for the query and table needed rowTable = DataTables(params, query, columns) diff --git a/examples/flask_tut/flask_tut/templates/dt_110x.html b/examples/flask_tut/flask_tut/templates/dt_110x.html index bd5960b..910e0ea 100644 --- a/examples/flask_tut/flask_tut/templates/dt_110x.html +++ b/examples/flask_tut/flask_tut/templates/dt_110x.html @@ -1,22 +1,22 @@ {% include 'base.html' %} {% block extra_stylesheets %} - + {% endblock %} {% block content %} -
-
-

DataTables 1.10.x example: Users and their address.

- - - - - - - - - +
+
+

DataTables 1.10.x example: Users and their address.

+
IdNameAddressCreated at
+ + + + + + + +
IdNameAddressCreated at
@@ -24,14 +24,14 @@

DataTables 1.10.x example: Users and their address.

{% endblock %} {% block extra_javascripts %} - - + + }); + {% endblock %} \ No newline at end of file From 5af437192c06a5aa5f2267fb977ba38aa06cc896 Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 20:56:13 +0530 Subject: [PATCH 2/6] update CDN link to the new version of the DataTables.js --- examples/flask_tut/datatables/__init__.py | 6 - examples/flask_tut/datatables/clean_regex.py | 40 --- examples/flask_tut/datatables/column_dt.py | 95 ------- examples/flask_tut/datatables/datatables.py | 258 ------------------ .../flask_tut/datatables/search_methods.py | 99 ------- 5 files changed, 498 deletions(-) delete mode 100644 examples/flask_tut/datatables/__init__.py delete mode 100644 examples/flask_tut/datatables/clean_regex.py delete mode 100644 examples/flask_tut/datatables/column_dt.py delete mode 100644 examples/flask_tut/datatables/datatables.py delete mode 100644 examples/flask_tut/datatables/search_methods.py diff --git a/examples/flask_tut/datatables/__init__.py b/examples/flask_tut/datatables/__init__.py deleted file mode 100644 index 9b5f294..0000000 --- a/examples/flask_tut/datatables/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from __future__ import absolute_import - -from datatables.column_dt import ColumnDT -from datatables.datatables import DataTables - -__all__ = ["ColumnDT", "DataTables"] diff --git a/examples/flask_tut/datatables/clean_regex.py b/examples/flask_tut/datatables/clean_regex.py deleted file mode 100644 index ec534ee..0000000 --- a/examples/flask_tut/datatables/clean_regex.py +++ /dev/null @@ -1,40 +0,0 @@ -def clean_regex(regex): - """ - Escape any regex special characters other than alternation. - - :param regex: regex from datatables interface - :type regex: str - :rtype: str with regex to use with database - """ - # copy for return - ret_regex = regex - - # these characters are escaped (all except alternation | and escape \) - # see http://www.regular-expressions.info/refquick.html - escape_chars = '[^$.?*+(){}' - - # remove any escape chars - ret_regex = ret_regex.replace('\\', '') - - # escape any characters which are used by regex - # could probably concoct something incomprehensible using re.sub() but - # prefer to write clear code with this loop - # note expectation that no characters have already been escaped - for c in escape_chars: - ret_regex = ret_regex.replace(c, '\\' + c) - - # remove any double alternations until these don't exist any more - while True: - old_regex = ret_regex - ret_regex = ret_regex.replace('||', '|') - if old_regex == ret_regex: - break - - # if last char is alternation | remove it because this - # will cause operational error - # this can happen as user is typing in global search box - while len(ret_regex) >= 1 and ret_regex[-1] == '|': - ret_regex = ret_regex[:-1] - - # and back to the caller - return ret_regex diff --git a/examples/flask_tut/datatables/column_dt.py b/examples/flask_tut/datatables/column_dt.py deleted file mode 100644 index 805adf5..0000000 --- a/examples/flask_tut/datatables/column_dt.py +++ /dev/null @@ -1,95 +0,0 @@ -from __future__ import absolute_import - -from collections import namedtuple - -from datatables.search_methods import SEARCH_METHODS - -NULLS_ORDER = ['nullsfirst', 'nullslast'] - -ColumnTuple = namedtuple('ColumnDT', [ - 'sqla_expr', - 'column_name', - 'mData', - 'search_method', - 'nulls_order', - 'global_search', - 'yadcf_data' -]) - - -class ColumnDT(ColumnTuple): - """ - Define a DataTables column. - - :param sqla_expr: SQLAlchemy queryable attribute of object - (column, column_property, hybrid property, or combined expression) - :param mData: name of the mData property as defined in the - DataTables javascript options (default None) - :param search_method: Define how to interpret search values. - Possible values: - - 'none' - - 'string_contains' (default) - - 'ilike' - - 'like' - - 'numeric' - - 'date' - - 'yadcf_text' - - 'yadcf_autocomplete' - - 'yadcf_select' - - 'yadcf_multi_select' - - 'yadcf_range_number' - - 'yadcf_range_number_slider' - - 'yadcf_range_date' - :param nulls_order: define a sort order for the NULL values. - Possible values: - - None (default) - - 'nullsfirst' - - 'nullslast'. - :param global_search: search this column for the global search box - :param yadcf_data : define if the data needs to be return for yadcf plugin. - Possible values: - - False - - True (default) - - :type sqla_expr: SQLAlchemy query expression - :type mData: str - :type search_method: str - :type yadcf_data: bool - :type nulls_order: str - :type global_search: bool - - :return: a ColumnDT object - :rtype: ColumnDT - """ - - def __new__( - cls, - sqla_expr, - column_name=None, - mData=None, - search_method='string_contains', - nulls_order=None, - global_search=True, - yadcf_data=True, - ): - """Set default values due to namedtuple immutability.""" - if nulls_order and nulls_order not in NULLS_ORDER: - raise ValueError( - '{} is not an allowed value for nulls_order.'.format( - nulls_order)) - - if search_method not in SEARCH_METHODS: - raise ValueError( - '{} is not an allowed value for search_method.'.format( - search_method)) - - return super(ColumnDT, cls).__new__( - cls, - sqla_expr, - column_name, - mData, - search_method, - nulls_order, - global_search, - yadcf_data, - ) diff --git a/examples/flask_tut/datatables/datatables.py b/examples/flask_tut/datatables/datatables.py deleted file mode 100644 index d53252c..0000000 --- a/examples/flask_tut/datatables/datatables.py +++ /dev/null @@ -1,258 +0,0 @@ -from __future__ import absolute_import - -import math -import re - -from sqlalchemy import Text, func, or_ -from sqlalchemy.dialects import mysql, postgresql, sqlite - -from datatables.clean_regex import clean_regex -from datatables.search_methods import SEARCH_METHODS - - -class DataTables: - """Class defining a DataTables object. - - :param request: request containing the GET values, specified by the - datatable for filtering, sorting and paging - :type request: pyramid.request - :param query: the query wanted to be seen in the the table - :type query: sqlalchemy.orm.query.Query - :param columns: columns specification for the datatables - :type columns: list - - :returns: a DataTables object - """ - - def __init__(self, request, query, columns, allow_regex_searches=False): - """Initialize object and run the query.""" - self.params = dict(request) - #print(self.params) - if 'sEcho' in self.params: - raise ValueError( - 'Legacy datatables not supported, upgrade to >=1.10') - self.query = query - self.columns = columns - #print(list(map(lambda x: x.column_name,columns))) - self.results = None - self.allow_regex_searches = allow_regex_searches - - # total in the table after filtering - self.cardinality_filtered = 0 - - # total in the table unfiltered - self.cardinality = 0 - - self.yadcf_params = [] - self.filter_expressions = [] - self.error = None - try: - self.run() - except Exception as exc: - self.error = str(exc) - - def output_result(self): - """Output results in the format needed by DataTables.""" - output = {} - output['draw'] = str(int(self.params.get('draw', 1))) - output['recordsTotal'] = str(self.cardinality) - output['recordsFiltered'] = str(self.cardinality_filtered) - if self.error: - output['error'] = self.error - return output - - output['data'] = self.results - for k, v in self.yadcf_params: - output[k] = v - return output - - def _map_columns_with_params(self, columns, params): - """Compare columns data with the parameters data and map the correct - column number to the columns data. As a lot of times in frontend columns - are not in the correct order as they are in the backend. Also there - are cases when extra dummy columns are added in the frontend and they - disturb the sequencing, thus the results coming from the backend.""" - pattern = re.compile("columns\[(.*?)\]\[data\]") - # Extract only the keys of type columns[i][data] from the params - params_column_data = {k: v for k, v in params.items() if pattern.match(k)} - column_params_map = [] - i = 0 - for key, value in params_column_data.items(): - column_number = int(pattern.search(key).group(1)) - if value: - for column in columns: - # If the mData is specified as well as the data is specified - # in the frontend then we would try to map the correct column number - # You can set the data in the datatables here https://datatables.net/reference/option/columns.data - if value == column.mData: - column_params_map.append((column_number, column)) - break - else: - # If we are unable to find the matching data - column_params_map.append((column_number, columns[i])) - i += 1 - - return column_params_map - - def _query_with_all_filters_except_one(self, query, exclude): - return query.filter(*[ - e for i, e in enumerate(self.filter_expressions) - if e is not None and i is not exclude - ]) - - def _set_yadcf_data(self, query): - # determine values for yadcf filters - column_params_map = self._map_columns_with_params(self.columns, self.params) - #print(column_params_map) - #for i, col in enumerate(self.columns): - for i, col in column_params_map: - #print(col.yadcf_data) - if col.yadcf_data: - if col.search_method in 'yadcf_range_number_slider': - v = query.with_entities( - func.min(col.sqla_expr), func.max(col.sqla_expr)).one() - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - (math.floor(v[0]), math.ceil(v[1])))) - - if col.search_method in [ - 'yadcf_select', 'yadcf_multi_select', 'yadcf_autocomplete' - ]: - filtered = self._query_with_all_filters_except_one( - query=query, exclude=i) - v = filtered.with_entities(col.sqla_expr).distinct().all() - #Added the below if statement so only that data with only - #null values is not returned. - if not(len(v)==1 and v[0][0]==None): - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - [r[0] for r in v])) - - def run(self): - """Launch filtering, sorting and paging to output results.""" - query = self.query - - # count before filtering - self.cardinality = query.add_columns(self.columns[0].sqla_expr).count() - - self._set_column_filter_expressions() - self._set_global_filter_expression() - self._set_sort_expressions() - self._set_yadcf_data(query) - - # apply filters - query = query.filter( - *[e for e in self.filter_expressions if e is not None]) - - self.cardinality_filtered = query.with_entities( - self.columns[0].sqla_expr).count() - - # apply sorts - query = query.order_by( - *[e for e in self.sort_expressions if e is not None]) - - # add paging options - length = int(self.params.get('length')) - if length >= 0: - query = query.limit(length) - elif length == -1: - pass - else: - raise (ValueError( - 'Length should be a positive integer or -1 to disable')) - query = query.offset(int(self.params.get('start'))) - - # add columns to query - query = query.with_entities(*[c.sqla_expr for c in self.columns]) - - # fetch the result of the queries - column_names = [ - col.mData if col.mData else str(i) - for i, col in enumerate(self.columns) - ] - self.results = [{k: v - for k, v in zip(column_names, row)} - for row in query.all()] - - def _set_column_filter_expressions(self): - """Construct the query: filtering. - - Add filtering when per column searching is used. - """ - # per columns filters: - column_params_map = self._map_columns_with_params(self.columns, self.params) - for i, col in column_params_map: - filter_expr = None - value = self.params.get('columns[{:d}][search][value]'.format(i), - '').replace("\\","") - if value: - search_func = SEARCH_METHODS[col.search_method] - filter_expr = search_func(col.sqla_expr, value) - self.filter_expressions.append(filter_expr) - - def _set_global_filter_expression(self): - # global search filter - global_search = self.params.get('search[value]', '') - if global_search == '': - return - - if (self.allow_regex_searches - and self.params.get('search[regex]') == 'true'): - op = self._get_regex_operator() - val = clean_regex(global_search) - - def filter_for(col): - return col.sqla_expr.op(op)(val) - else: - val = '%' + global_search + '%' - - def filter_for(col): - return col.sqla_expr.cast(Text).ilike(val) - - global_filter = [ - filter_for(col) for col in self.columns if col.global_search - ] - - self.filter_expressions.append(or_(*global_filter)) - - def _set_sort_expressions(self): - """Construct the query: sorting. - - Add sorting(ORDER BY) on the columns needed to be applied on. - """ - column_params_map = dict(self._map_columns_with_params(self.columns, self.params)) - sort_expressions = [] - i = 0 - while self.params.get('order[{:d}][column]'.format(i), False): - column_nr = int(self.params.get('order[{:d}][column]'.format(i))) - column = column_params_map[column_nr] - direction = self.params.get('order[{:d}][dir]'.format(i)) - sort_expr = column.sqla_expr - if direction == 'asc': - sort_expr = sort_expr.asc() - elif direction == 'desc': - sort_expr = sort_expr.desc() - else: - raise ValueError( - 'Invalid order direction: {}'.format(direction)) - if column.nulls_order: - if column.nulls_order == 'nullsfirst': - sort_expr = sort_expr.nullsfirst() - elif column.nulls_order == 'nullslast': - sort_expr = sort_expr.nullslast() - else: - raise ValueError( - 'Invalid order direction: {}'.format(direction)) - - sort_expressions.append(sort_expr) - i += 1 - self.sort_expressions = sort_expressions - - def _get_regex_operator(self): - if isinstance(self.query.session.bind.dialect, postgresql.dialect): - return '~' - elif isinstance(self.query.session.bind.dialect, mysql.dialect): - return 'REGEXP' - elif isinstance(self.query.session.bind.dialect, sqlite.dialect): - return 'REGEXP' - else: - raise NotImplementedError( - 'Regex searches are not implemented for this dialect') diff --git a/examples/flask_tut/datatables/search_methods.py b/examples/flask_tut/datatables/search_methods.py deleted file mode 100644 index 6d01d7d..0000000 --- a/examples/flask_tut/datatables/search_methods.py +++ /dev/null @@ -1,99 +0,0 @@ -import datetime -import logging - -from dateutil.parser import parse as date_parse -from sqlalchemy import Text - -logger = logging.getLogger(__name__) - -search_operators = { - '=': lambda expr, value: expr == value, - '>': lambda expr, value: expr > value, - '>=': lambda expr, value: expr >= value, - '<': lambda expr, value: expr < value, - '<=': lambda expr, value: expr <= value, -} - - -def parse_query_value(combined_value): - """Parse value in form of '>value' to a lambda and a value.""" - split = len(combined_value) - len(combined_value.lstrip('<>=')) - operator = combined_value[:split] - if operator == '': - operator = '=' - try: - operator_func = search_operators[operator] - except KeyError: - raise ValueError( - 'Numeric query should start with operator, choose from %s' % - ', '.join(search_operators.keys())) - value = combined_value[split:].strip() - return operator_func, value - - -def numeric_query(expr, value): - operator_func, value = parse_query_value(value) - if value == '': - num_value = 0 - else: - num_value = float(value) - - return operator_func(expr, num_value) - - -def date_query(expr, value): - operator_func, value = parse_query_value(value) - try: - date_value = date_parse(value) - except ValueError: - date_value = datetime.datetime.now() - - return operator_func(expr, date_value) - - -def yadcf_range_number(expr, value): - v_from, v_to = value.split('-yadcf_delim-') - v_from = float(v_from) if v_from != '' else -float('inf') - v_to = float(v_to) if v_to != '' else float('inf') - logger.debug('yadcf_range_number: between %f and %f', v_from, v_to) - return expr.between(v_from, v_to) - - -def yadcf_range_date(expr, value): - v_from, v_to = value.split('-yadcf_delim-') - v_from = date_parse(v_from) if v_from != '' else datetime.date.min - v_to = date_parse(v_to) if v_to != '' else datetime.date.max - logger.debug('yadcf_range_date: between %s and %s', v_from, v_to) - return expr.between(v_from, v_to) - - -def yadcf_multi_select(expr, value): - options = value.split('|') - #Added the below statement so that null could be converted to None as - #null is not recognized in python - options = [None if i.strip()=='null' else i for i in options] - #print(options) - logger.debug('yadcf_multi_select: in %s', options) - #Modified so when someone searches null it would be taken care of. - if None in options: - operator_func = search_operators['='] - return ((expr.cast(Text).in_(options)) | operator_func(expr.cast(Text),None)) - else: - return expr.cast(Text).in_(options) - - -SEARCH_METHODS = { - 'none': lambda expr, value: None, - 'string_contains': lambda expr, value: expr.ilike('%' + value + '%'), - 'ilike': lambda expr, value: expr.ilike(value), - 'like': lambda expr, value: expr.like(value), - 'numeric': numeric_query, - 'date': date_query, - 'yadcf_text': lambda expr, value: expr.ilike('%' + value + '%'), - 'yadcf_autocomplete': lambda expr, value: expr == value, - 'yadcf_select': lambda expr, value: expr.ilike('%' + value + '%'), - 'yadcf_multi_select': yadcf_multi_select, - 'yadcf_range_number': yadcf_range_number, - 'yadcf_range_number_slider': yadcf_range_number, - 'yadcf_range_date': yadcf_range_date -} From 28278630ff1647fd9d845700d7f180936b120807 Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 20:59:30 +0530 Subject: [PATCH 3/6] New param, new private method & other improvements - new parameter yadcf_data is added to ColumnDT class This change is necessary as before when the search method is set as yadcf_select, yadcf_multiselect and yadcf_autocomplete then always yadcf data is being returned even when it is not required by the frontend as in some cases the data can be defined in the frontend and no need of data is required from the backend. This commit provides the yadcf_data params which can be set to False when we don't want the yadcf_data. - new private method _map_columns_with_params in DataTables class This change is necessary as before when in frontend columns are not in the same order or extra then compared to frontend, it results in the wrong data to be sent to the frontend from the server. This method will compare the ColumnsDT data with the params data and would map the correct column number in the frontend to the ColumnDT data. - other improvements yadcf_multi_select in search_methods.py have been imporved to handle null from the JavaScript. In DataTables class add_column method have been changed to with_entities method in the query object as add_column add a column or columns to the list of columns but with_entities replaces the select list with the current entities. --- datatables/column_dt.py | 8 +++- datatables/datatables.py | 81 ++++++++++++++++++++++++++---------- datatables/search_methods.py | 13 +++++- 3 files changed, 77 insertions(+), 25 deletions(-) diff --git a/datatables/column_dt.py b/datatables/column_dt.py index 0a2c378..7f51477 100644 --- a/datatables/column_dt.py +++ b/datatables/column_dt.py @@ -13,6 +13,7 @@ 'search_method', 'nulls_order', 'global_search', + 'yadcf_data' ]) @@ -45,7 +46,10 @@ class ColumnDT(ColumnTuple): - 'nullsfirst' - 'nullslast'. :param global_search: search this column for the global search box - + :param yadcf_data : define if the data needs to be returned for yadcf plugin. + Possible values: + - False + - True (default) :type sqla_expr: SQLAlchemy query expression :type mData: str :type search_method: str @@ -64,6 +68,7 @@ def __new__( search_method='string_contains', nulls_order=None, global_search=True, + yadcf_data=True, ): """Set default values due to namedtuple immutability.""" if nulls_order and nulls_order not in NULLS_ORDER: @@ -84,4 +89,5 @@ def __new__( search_method, nulls_order, global_search, + yadcf_data, ) diff --git a/datatables/datatables.py b/datatables/datatables.py index dc87561..abceaeb 100644 --- a/datatables/datatables.py +++ b/datatables/datatables.py @@ -1,6 +1,7 @@ from __future__ import absolute_import import math +import re from sqlalchemy import Text, func, or_ from sqlalchemy.dialects import mysql, postgresql, sqlite @@ -62,6 +63,33 @@ def output_result(self): for k, v in self.yadcf_params: output[k] = v return output + + def _map_columns_with_params(self, columns, params): + """Compare columns data with the parameters data and map the correct + column number to the columns data. As a lot of times in frontend columns + are not in the correct order as they are in the backend. Also there + are cases when extra dummy columns are added in the frontend and they + disturb the sequencing, thus the results coming from the backend.""" + pattern = re.compile("columns\[(.*?)\]\[data\]") + # Extract only the keys of type columns[i][data] from the params + params_column_data = {k: v for k, v in params.items() if pattern.match(k)} + column_params_map = [] + i = 0 + for key, value in params_column_data.items(): + column_number = int(pattern.search(key).group(1)) + if str(value): + for column in columns: + # If the mData is specified as well as the data is specified + # in the frontend then we would try to map the correct column number + # You can set the data in the datatables here https://datatables.net/reference/option/columns.data + if str(value) == column.mData: + column_params_map.append((column_number, column)) + break + else: + # If we are unable to find the matching data + column_params_map.append((column_number, columns[i])) + i += 1 + return column_params_map def _query_with_all_filters_except_one(self, query, exclude): return query.filter(*[ @@ -71,20 +99,26 @@ def _query_with_all_filters_except_one(self, query, exclude): def _set_yadcf_data(self, query): # determine values for yadcf filters - for i, col in enumerate(self.columns): - if col.search_method in 'yadcf_range_number_slider': - v = query.add_columns( - func.min(col.sqla_expr), func.max(col.sqla_expr)).one() - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - (math.floor(v[0]), math.ceil(v[1])))) - if col.search_method in [ - 'yadcf_select', 'yadcf_multi_select', 'yadcf_autocomplete' - ]: - filtered = self._query_with_all_filters_except_one( - query=query, exclude=i) - v = filtered.add_columns(col.sqla_expr).distinct().all() - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - [r[0] for r in v])) + column_params_map = self._map_columns_with_params(self.columns, self.params) + for i, col in column_params_map: + if col.yadcf_data: + if col.search_method in 'yadcf_range_number_slider': + v = query.with_entities( + func.min(col.sqla_expr), func.max(col.sqla_expr)).one() + self.yadcf_params.append(('yadcf_data_{:d}'.format(i), + (math.floor(v[0]), math.ceil(v[1])))) + + if col.search_method in [ + 'yadcf_select', 'yadcf_multi_select', 'yadcf_autocomplete' + ]: + filtered = self._query_with_all_filters_except_one( + query=query, exclude=i) + v = filtered.with_entities(col.sqla_expr).distinct().all() + #Added the below `if` statement so that data with only + #null value is not returned. + if not(len(v)==1 and v[0][0]==None): + self.yadcf_params.append(('yadcf_data_{:d}'.format(i), + [r[0] for r in v])) def run(self): """Launch filtering, sorting and paging to output results.""" @@ -102,7 +136,7 @@ def run(self): query = query.filter( *[e for e in self.filter_expressions if e is not None]) - self.cardinality_filtered = query.add_columns( + self.cardinality_filtered = query.with_entities( self.columns[0].sqla_expr).count() # apply sorts @@ -116,12 +150,12 @@ def run(self): elif length == -1: pass else: - raise (ValueError( + raise (ValueError( 'Length should be a positive integer or -1 to disable')) query = query.offset(int(self.params.get('start'))) # add columns to query - query = query.add_columns(*[c.sqla_expr for c in self.columns]) + query = query.with_entities(*[c.sqla_expr for c in self.columns]) # fetch the result of the queries column_names = [ @@ -138,13 +172,15 @@ def _set_column_filter_expressions(self): Add filtering when per column searching is used. """ # per columns filters: - for i in range(len(self.columns)): + column_params_map = self._map_columns_with_params(self.columns, self.params) + for i, col in column_params_map: filter_expr = None value = self.params.get('columns[{:d}][search][value]'.format(i), - '') + '').replace("\\","") + if value: - search_func = SEARCH_METHODS[self.columns[i].search_method] - filter_expr = search_func(self.columns[i].sqla_expr, value) + search_func = SEARCH_METHODS[col.search_method] + filter_expr = search_func(col.sqla_expr, value) self.filter_expressions.append(filter_expr) def _set_global_filter_expression(self): @@ -177,11 +213,12 @@ def _set_sort_expressions(self): Add sorting(ORDER BY) on the columns needed to be applied on. """ + column_params_map = dict(self._map_columns_with_params(self.columns, self.params)) sort_expressions = [] i = 0 while self.params.get('order[{:d}][column]'.format(i), False): column_nr = int(self.params.get('order[{:d}][column]'.format(i))) - column = self.columns[column_nr] + column = column_params_map[column_nr] direction = self.params.get('order[{:d}][dir]'.format(i)) sort_expr = column.sqla_expr if direction == 'asc': diff --git a/datatables/search_methods.py b/datatables/search_methods.py index 1515a37..6a0056e 100644 --- a/datatables/search_methods.py +++ b/datatables/search_methods.py @@ -37,7 +37,6 @@ def numeric_query(expr, value): num_value = 0 else: num_value = float(value) - return operator_func(expr, num_value) @@ -69,8 +68,18 @@ def yadcf_range_date(expr, value): def yadcf_multi_select(expr, value): options = value.split('|') + # Sometimes the yadcf_multi_select options contains null. Reasons could be + # a null value in the backend or explicitly set by the user in the frontend. + #Added the below line so that null could be converted to None as null is not + #recognized in python. + options = [None if i.strip()=='null' else i for i in options] logger.debug('yadcf_multi_select: in %s', options) - return expr.cast(Text).in_(options) + #Modified so when someone searches null it would be taken care of. + if None in options: + operator_func = search_operators['='] + return ((expr.cast(Text).in_(options)) | operator_func(expr.cast(Text),None)) + else: + return expr.cast(Text).in_(options) SEARCH_METHODS = { From 0300e573f71c76ca363110dbfcda85d6b92dac1d Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 21:51:07 +0530 Subject: [PATCH 4/6] Add helpers, new test for changes in datatables - Add 3 new helper methods 1) create_dt_params_with_mData This create params when the data source is set in the frontend 2) create_dt_params_with_mData_shuffled It does the same thing as above but changes the order in the frontend as compared to the backend. 3) create_dt_params_with_mData_with_extra_data It does the same thing as #1 but adds an extra column in the params coming from the frontend. - New tests 1) test_with_yacdf_data_params in test_column_dt.py test the yacdf_data params 2) parametrize test_fields_mData in test_fields.py so the test could be done on different cases of params from frontend --- tests/helpers.py | 106 +++++++++++++++++++++++++++++++++++++++- tests/test_column_dt.py | 8 +++ tests/test_fields.py | 19 ++++--- tests/test_listing.py | 2 +- tests/test_searching.py | 2 - 5 files changed, 127 insertions(+), 10 deletions(-) diff --git a/tests/helpers.py b/tests/helpers.py index ddd99e5..9cad587 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,5 +1,10 @@ +from random import shuffle + def create_dt_params(columns, search='', start=0, length=10, order=None): - """Create DataTables input parameters.""" + """Create DataTables input parameters when the data source from the rows + data object/ array is not set. + + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" params = { 'draw': '1', 'start': str(start), @@ -22,3 +27,102 @@ def create_dt_params(columns, search='', start=0, length=10, order=None): params['order[%s][%s]' % (i, key)] = str(value) return params + +# These methods would only be used when the mData param is defined in the backend + +def create_dt_params_with_mData(columns, search='', start=0, length=10, order=None): + """Create DataTables input parameters when the data source from the rows + data object/ array is set. + + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" + + params = { + 'draw': '1', + 'start': str(start), + 'length': str(length), + 'search[value]': str(search), + 'search[regex]': 'false' + } + + for i, item in enumerate(columns): + cols = 'columns[%s]' % i + params['%s%s' % (cols, '[data]')] = item.mData + params['%s%s' % (cols, '[name]')] = '' + params['%s%s' % (cols, '[searchable]')] = 'true' + params['%s%s' % (cols, '[orderable]')] = 'true' + params['%s%s' % (cols, '[search][value]')] = '' + params['%s%s' % (cols, '[search][regex]')] = 'false' + + for i, item in enumerate(order or [{'column': 0, 'dir': 'asc'}]): + for key, value in item.items(): + params['order[%s][%s]' % (i, key)] = str(value) + + return params + +def create_dt_params_with_mData_shuffled(columns, search='', start=0, length=10, order=None): + """Create DataTables input parameters when the data source from the rows + data object/ array is set. Also when the order in the frontend is not same + as in the backend. + + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" + + params = { + 'draw': '1', + 'start': str(start), + 'length': str(length), + 'search[value]': str(search), + 'search[regex]': 'false' + } + # Shuffle the columns in place + shuffle(columns) + for i, item in enumerate(columns): + cols = 'columns[%s]' % i + params['%s%s' % (cols, '[data]')] = item.mData + params['%s%s' % (cols, '[name]')] = '' + params['%s%s' % (cols, '[searchable]')] = 'true' + params['%s%s' % (cols, '[orderable]')] = 'true' + params['%s%s' % (cols, '[search][value]')] = '' + params['%s%s' % (cols, '[search][regex]')] = 'false' + + for i, item in enumerate(order or [{'column': 0, 'dir': 'asc'}]): + for key, value in item.items(): + params['order[%s][%s]' % (i, key)] = str(value) + + return params + +def create_dt_params_with_mData_with_extra_data(columns, search='', start=0, length=10, order=None): + """Create DataTables input parameters when the data source from the rows + data object/ array is set. Also when there is an extra data source defined in + the frontend just for the use in the frontend but not in the backend. + An example of this is here https://editor.datatables.net/examples/bubble-editing/simple.html + + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" + + params = { + 'draw': '1', + 'start': str(start), + 'length': str(length), + 'search[value]': str(search), + 'search[regex]': 'false' + } + # Add the extra params for the extra data source added in the frontend but + # not in the backend. + params['columns[0][name]'] = '' + params['columns[0][searchable]'] = 'true' + params['columns[0][orderable]'] = 'false' + params['columns[0][search][value]'] = '' + params['columns[0][search][regex]'] = 'false' + for i, item in enumerate(columns, 1): + cols = 'columns[%s]' % i + params['%s%s' % (cols, '[data]')] = item.mData + params['%s%s' % (cols, '[name]')] = '' + params['%s%s' % (cols, '[searchable]')] = 'true' + params['%s%s' % (cols, '[orderable]')] = 'true' + params['%s%s' % (cols, '[search][value]')] = '' + params['%s%s' % (cols, '[search][regex]')] = 'false' + + for i, item in enumerate(order or [{'column': 1, 'dir': 'asc'}]): + for key, value in item.items(): + params['order[%s][%s]' % (i, key)] = str(value) + + return params \ No newline at end of file diff --git a/tests/test_column_dt.py b/tests/test_column_dt.py index b514cb9..48b4962 100644 --- a/tests/test_column_dt.py +++ b/tests/test_column_dt.py @@ -39,3 +39,11 @@ def test_with_invalid_search_method(): """Return column with a specific filter.""" with pytest.raises(ValueError): ColumnDT(User.name, search_method='invalid') + +#Added by kartikeyas00 +def test_with_yadcf_data_param(): + """Return Column with yadcf_data filter as false.""" + col = ColumnDT(User.name, yadcf_data=False) + + if col.yadcf_data != False: + raise AssertionError() diff --git a/tests/test_fields.py b/tests/test_fields.py index cc17015..3109301 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -3,11 +3,19 @@ from datatables import ColumnDT, DataTables -from .helpers import create_dt_params +from .helpers import create_dt_params, create_dt_params_with_mData, create_dt_params_with_mData_shuffled, create_dt_params_with_mData_with_extra_data from .models import Address, User - -def test_fields_mdata(session): +@pytest.mark.parametrize( + ('create_dt_params_function'), + ( + (create_dt_params), + (create_dt_params_with_mData), + (create_dt_params_with_mData_shuffled), + (create_dt_params_with_mData_with_extra_data) + ) +) +def test_fields_mdata(session, create_dt_params_function): """Test if the result's data have mData set.""" columns = [ ColumnDT(User.id, mData='ID'), @@ -18,10 +26,9 @@ def test_fields_mdata(session): query = session.query().select_from(User).join(Address) - params = create_dt_params(columns) + params = create_dt_params_function(columns) rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 3 assert 'ID' in res['data'][0] assert 'Username' in res['data'][0] @@ -171,4 +178,4 @@ def test_column_not_searchable(session): assert len(res['data']) == 0 assert res['recordsTotal'] == '51' - assert res['recordsFiltered'] == '0' + assert res['recordsFiltered'] == '0' \ No newline at end of file diff --git a/tests/test_listing.py b/tests/test_listing.py index 376b123..abe94fa 100644 --- a/tests/test_listing.py +++ b/tests/test_listing.py @@ -161,4 +161,4 @@ def test_list_specific_page(session): assert res['recordsTotal'] == '52' assert res['recordsFiltered'] == '52' assert res['data'][0]['0'] == 51 - assert res['data'][1]['0'] == 52 + assert res['data'][1]['0'] == 52 \ No newline at end of file diff --git a/tests/test_searching.py b/tests/test_searching.py index 93900f8..8a88774 100644 --- a/tests/test_searching.py +++ b/tests/test_searching.py @@ -134,7 +134,6 @@ def test_date_lt(session): column=User.birthday, search_method='date', search_value='<1970-01-03') - print(res) assert res['recordsFiltered'] == '1' @@ -144,7 +143,6 @@ def test_yadcf_range_date(session): column=User.birthday, search_method='yadcf_range_date', search_value='1970-01-03-yadcf_delim-1970-01-13') - print(res) assert res['recordsFiltered'] == '1' From 5144ab92564eca85a4c82dfd8b33a039459c21c4 Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 23:16:32 +0530 Subject: [PATCH 5/6] Add new test in test_fields.py to test yadcf_data parameter --- tests/test_fields.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/test_fields.py b/tests/test_fields.py index 3109301..f1b0522 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -178,4 +178,28 @@ def test_column_not_searchable(session): assert len(res['data']) == 0 assert res['recordsTotal'] == '51' - assert res['recordsFiltered'] == '0' \ No newline at end of file + assert res['recordsFiltered'] == '0' + +@pytest.mark.parametrize( + ('yadcf_data_param', 'result'), + ( + (True, True), + (False, False), + ) +) +def test_column_yadcf_data(session, yadcf_data_param, result): + """Test if result's have yadcf data or not based on the given parameters.""" + columns = [ + ColumnDT(User.id, mData='ID'), + ColumnDT(User.name, mData='Username', search_method='yadcf_select', yadcf_data=yadcf_data_param) + ] + + query = session.query().select_from(User) + + params = create_dt_params(columns) + rowTable = DataTables(params, query, columns) + res = rowTable.output_result() + + assert len(res['data']) == 10 + assert res['recordsTotal'] == '50' + assert ('yadcf_data_1' in res) == result \ No newline at end of file From 1da56b09d11445f843cb097d7bdf931033940a5b Mon Sep 17 00:00:00 2001 From: kartikeyas00 <33379978+kartikeyas00@users.noreply.github.com> Date: Fri, 11 Sep 2020 23:18:00 +0530 Subject: [PATCH 6/6] Reformat with black --- datatables/clean_regex.py | 10 +-- datatables/column_dt.py | 49 +++++----- datatables/datatables.py | 148 +++++++++++++++--------------- datatables/search_methods.py | 83 ++++++++--------- tests/conftest.py | 7 +- tests/helpers.py | 169 ++++++++++++++++++----------------- tests/models.py | 16 ++-- tests/test_column_dt.py | 17 ++-- tests/test_fields.py | 152 +++++++++++++++++-------------- tests/test_listing.py | 48 +++++----- tests/test_ordering.py | 67 ++++++++------ tests/test_searching.py | 144 +++++++++++++++-------------- 12 files changed, 482 insertions(+), 428 deletions(-) diff --git a/datatables/clean_regex.py b/datatables/clean_regex.py index ec534ee..f8ce740 100644 --- a/datatables/clean_regex.py +++ b/datatables/clean_regex.py @@ -11,29 +11,29 @@ def clean_regex(regex): # these characters are escaped (all except alternation | and escape \) # see http://www.regular-expressions.info/refquick.html - escape_chars = '[^$.?*+(){}' + escape_chars = "[^$.?*+(){}" # remove any escape chars - ret_regex = ret_regex.replace('\\', '') + ret_regex = ret_regex.replace("\\", "") # escape any characters which are used by regex # could probably concoct something incomprehensible using re.sub() but # prefer to write clear code with this loop # note expectation that no characters have already been escaped for c in escape_chars: - ret_regex = ret_regex.replace(c, '\\' + c) + ret_regex = ret_regex.replace(c, "\\" + c) # remove any double alternations until these don't exist any more while True: old_regex = ret_regex - ret_regex = ret_regex.replace('||', '|') + ret_regex = ret_regex.replace("||", "|") if old_regex == ret_regex: break # if last char is alternation | remove it because this # will cause operational error # this can happen as user is typing in global search box - while len(ret_regex) >= 1 and ret_regex[-1] == '|': + while len(ret_regex) >= 1 and ret_regex[-1] == "|": ret_regex = ret_regex[:-1] # and back to the caller diff --git a/datatables/column_dt.py b/datatables/column_dt.py index 7f51477..f24dcf7 100644 --- a/datatables/column_dt.py +++ b/datatables/column_dt.py @@ -4,17 +4,20 @@ from datatables.search_methods import SEARCH_METHODS -NULLS_ORDER = ['nullsfirst', 'nullslast'] +NULLS_ORDER = ["nullsfirst", "nullslast"] -ColumnTuple = namedtuple('ColumnDT', [ - 'sqla_expr', - 'column_name', - 'mData', - 'search_method', - 'nulls_order', - 'global_search', - 'yadcf_data' -]) +ColumnTuple = namedtuple( + "ColumnDT", + [ + "sqla_expr", + "column_name", + "mData", + "search_method", + "nulls_order", + "global_search", + "yadcf_data", + ], +) class ColumnDT(ColumnTuple): @@ -48,7 +51,7 @@ class ColumnDT(ColumnTuple): :param global_search: search this column for the global search box :param yadcf_data : define if the data needs to be returned for yadcf plugin. Possible values: - - False + - False - True (default) :type sqla_expr: SQLAlchemy query expression :type mData: str @@ -61,25 +64,25 @@ class ColumnDT(ColumnTuple): """ def __new__( - cls, - sqla_expr, - column_name=None, - mData=None, - search_method='string_contains', - nulls_order=None, - global_search=True, - yadcf_data=True, + cls, + sqla_expr, + column_name=None, + mData=None, + search_method="string_contains", + nulls_order=None, + global_search=True, + yadcf_data=True, ): """Set default values due to namedtuple immutability.""" if nulls_order and nulls_order not in NULLS_ORDER: raise ValueError( - '{} is not an allowed value for nulls_order.'.format( - nulls_order)) + "{} is not an allowed value for nulls_order.".format(nulls_order) + ) if search_method not in SEARCH_METHODS: raise ValueError( - '{} is not an allowed value for search_method.'.format( - search_method)) + "{} is not an allowed value for search_method.".format(search_method) + ) return super(ColumnDT, cls).__new__( cls, diff --git a/datatables/datatables.py b/datatables/datatables.py index abceaeb..945f1e7 100644 --- a/datatables/datatables.py +++ b/datatables/datatables.py @@ -27,9 +27,8 @@ class DataTables: def __init__(self, request, query, columns, allow_regex_searches=False): """Initialize object and run the query.""" self.params = dict(request) - if 'sEcho' in self.params: - raise ValueError( - 'Legacy datatables not supported, upgrade to >=1.10') + if "sEcho" in self.params: + raise ValueError("Legacy datatables not supported, upgrade to >=1.10") self.query = query self.columns = columns self.results = None @@ -52,24 +51,24 @@ def __init__(self, request, query, columns, allow_regex_searches=False): def output_result(self): """Output results in the format needed by DataTables.""" output = {} - output['draw'] = str(int(self.params.get('draw', 1))) - output['recordsTotal'] = str(self.cardinality) - output['recordsFiltered'] = str(self.cardinality_filtered) + output["draw"] = str(int(self.params.get("draw", 1))) + output["recordsTotal"] = str(self.cardinality) + output["recordsFiltered"] = str(self.cardinality_filtered) if self.error: - output['error'] = self.error + output["error"] = self.error return output - output['data'] = self.results + output["data"] = self.results for k, v in self.yadcf_params: output[k] = v return output - + def _map_columns_with_params(self, columns, params): """Compare columns data with the parameters data and map the correct - column number to the columns data. As a lot of times in frontend columns - are not in the correct order as they are in the backend. Also there - are cases when extra dummy columns are added in the frontend and they - disturb the sequencing, thus the results coming from the backend.""" + column number to the columns data. As a lot of times in frontend columns + are not in the correct order as they are in the backend. Also there + are cases when extra dummy columns are added in the frontend and they + disturb the sequencing, thus the results coming from the backend.""" pattern = re.compile("columns\[(.*?)\]\[data\]") # Extract only the keys of type columns[i][data] from the params params_column_data = {k: v for k, v in params.items() if pattern.match(k)} @@ -89,36 +88,48 @@ def _map_columns_with_params(self, columns, params): # If we are unable to find the matching data column_params_map.append((column_number, columns[i])) i += 1 - return column_params_map + return column_params_map def _query_with_all_filters_except_one(self, query, exclude): - return query.filter(*[ - e for i, e in enumerate(self.filter_expressions) - if e is not None and i is not exclude - ]) + return query.filter( + *[ + e + for i, e in enumerate(self.filter_expressions) + if e is not None and i is not exclude + ] + ) def _set_yadcf_data(self, query): # determine values for yadcf filters column_params_map = self._map_columns_with_params(self.columns, self.params) for i, col in column_params_map: if col.yadcf_data: - if col.search_method in 'yadcf_range_number_slider': + if col.search_method in "yadcf_range_number_slider": v = query.with_entities( - func.min(col.sqla_expr), func.max(col.sqla_expr)).one() - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - (math.floor(v[0]), math.ceil(v[1])))) - + func.min(col.sqla_expr), func.max(col.sqla_expr) + ).one() + self.yadcf_params.append( + ( + "yadcf_data_{:d}".format(i), + (math.floor(v[0]), math.ceil(v[1])), + ) + ) + if col.search_method in [ - 'yadcf_select', 'yadcf_multi_select', 'yadcf_autocomplete' + "yadcf_select", + "yadcf_multi_select", + "yadcf_autocomplete", ]: filtered = self._query_with_all_filters_except_one( - query=query, exclude=i) + query=query, exclude=i + ) v = filtered.with_entities(col.sqla_expr).distinct().all() - #Added the below `if` statement so that data with only - #null value is not returned. - if not(len(v)==1 and v[0][0]==None): - self.yadcf_params.append(('yadcf_data_{:d}'.format(i), - [r[0] for r in v])) + # Added the below `if` statement so that data with only + # null value is not returned. + if not (len(v) == 1 and v[0][0] == None): + self.yadcf_params.append( + ("yadcf_data_{:d}".format(i), [r[0] for r in v]) + ) def run(self): """Launch filtering, sorting and paging to output results.""" @@ -133,38 +144,35 @@ def run(self): self._set_yadcf_data(query) # apply filters - query = query.filter( - *[e for e in self.filter_expressions if e is not None]) + query = query.filter(*[e for e in self.filter_expressions if e is not None]) self.cardinality_filtered = query.with_entities( - self.columns[0].sqla_expr).count() + self.columns[0].sqla_expr + ).count() # apply sorts - query = query.order_by( - *[e for e in self.sort_expressions if e is not None]) + query = query.order_by(*[e for e in self.sort_expressions if e is not None]) # add paging options - length = int(self.params.get('length')) + length = int(self.params.get("length")) if length >= 0: query = query.limit(length) elif length == -1: pass else: - raise (ValueError( - 'Length should be a positive integer or -1 to disable')) - query = query.offset(int(self.params.get('start'))) + raise (ValueError("Length should be a positive integer or -1 to disable")) + query = query.offset(int(self.params.get("start"))) # add columns to query query = query.with_entities(*[c.sqla_expr for c in self.columns]) # fetch the result of the queries column_names = [ - col.mData if col.mData else str(i) - for i, col in enumerate(self.columns) + col.mData if col.mData else str(i) for i, col in enumerate(self.columns) + ] + self.results = [ + {k: v for k, v in zip(column_names, row)} for row in query.all() ] - self.results = [{k: v - for k, v in zip(column_names, row)} - for row in query.all()] def _set_column_filter_expressions(self): """Construct the query: filtering. @@ -175,8 +183,9 @@ def _set_column_filter_expressions(self): column_params_map = self._map_columns_with_params(self.columns, self.params) for i, col in column_params_map: filter_expr = None - value = self.params.get('columns[{:d}][search][value]'.format(i), - '').replace("\\","") + value = self.params.get( + "columns[{:d}][search][value]".format(i), "" + ).replace("\\", "") if value: search_func = SEARCH_METHODS[col.search_method] @@ -185,26 +194,24 @@ def _set_column_filter_expressions(self): def _set_global_filter_expression(self): # global search filter - global_search = self.params.get('search[value]', '') - if global_search == '': + global_search = self.params.get("search[value]", "") + if global_search == "": return - if (self.allow_regex_searches - and self.params.get('search[regex]') == 'true'): + if self.allow_regex_searches and self.params.get("search[regex]") == "true": op = self._get_regex_operator() val = clean_regex(global_search) def filter_for(col): return col.sqla_expr.op(op)(val) + else: - val = '%' + global_search + '%' + val = "%" + global_search + "%" def filter_for(col): return col.sqla_expr.cast(Text).ilike(val) - global_filter = [ - filter_for(col) for col in self.columns if col.global_search - ] + global_filter = [filter_for(col) for col in self.columns if col.global_search] self.filter_expressions.append(or_(*global_filter)) @@ -213,29 +220,29 @@ def _set_sort_expressions(self): Add sorting(ORDER BY) on the columns needed to be applied on. """ - column_params_map = dict(self._map_columns_with_params(self.columns, self.params)) + column_params_map = dict( + self._map_columns_with_params(self.columns, self.params) + ) sort_expressions = [] i = 0 - while self.params.get('order[{:d}][column]'.format(i), False): - column_nr = int(self.params.get('order[{:d}][column]'.format(i))) + while self.params.get("order[{:d}][column]".format(i), False): + column_nr = int(self.params.get("order[{:d}][column]".format(i))) column = column_params_map[column_nr] - direction = self.params.get('order[{:d}][dir]'.format(i)) + direction = self.params.get("order[{:d}][dir]".format(i)) sort_expr = column.sqla_expr - if direction == 'asc': + if direction == "asc": sort_expr = sort_expr.asc() - elif direction == 'desc': + elif direction == "desc": sort_expr = sort_expr.desc() else: - raise ValueError( - 'Invalid order direction: {}'.format(direction)) + raise ValueError("Invalid order direction: {}".format(direction)) if column.nulls_order: - if column.nulls_order == 'nullsfirst': + if column.nulls_order == "nullsfirst": sort_expr = sort_expr.nullsfirst() - elif column.nulls_order == 'nullslast': + elif column.nulls_order == "nullslast": sort_expr = sort_expr.nullslast() else: - raise ValueError( - 'Invalid order direction: {}'.format(direction)) + raise ValueError("Invalid order direction: {}".format(direction)) sort_expressions.append(sort_expr) i += 1 @@ -243,11 +250,12 @@ def _set_sort_expressions(self): def _get_regex_operator(self): if isinstance(self.query.session.bind.dialect, postgresql.dialect): - return '~' + return "~" elif isinstance(self.query.session.bind.dialect, mysql.dialect): - return 'REGEXP' + return "REGEXP" elif isinstance(self.query.session.bind.dialect, sqlite.dialect): - return 'REGEXP' + return "REGEXP" else: raise NotImplementedError( - 'Regex searches are not implemented for this dialect') + "Regex searches are not implemented for this dialect" + ) diff --git a/datatables/search_methods.py b/datatables/search_methods.py index 6a0056e..7e0b2dc 100644 --- a/datatables/search_methods.py +++ b/datatables/search_methods.py @@ -7,33 +7,34 @@ logger = logging.getLogger(__name__) search_operators = { - '=': lambda expr, value: expr == value, - '>': lambda expr, value: expr > value, - '>=': lambda expr, value: expr >= value, - '<': lambda expr, value: expr < value, - '<=': lambda expr, value: expr <= value, + "=": lambda expr, value: expr == value, + ">": lambda expr, value: expr > value, + ">=": lambda expr, value: expr >= value, + "<": lambda expr, value: expr < value, + "<=": lambda expr, value: expr <= value, } def parse_query_value(combined_value): """Parse value in form of '>value' to a lambda and a value.""" - split = len(combined_value) - len(combined_value.lstrip('<>=')) + split = len(combined_value) - len(combined_value.lstrip("<>=")) operator = combined_value[:split] - if operator == '': - operator = '=' + if operator == "": + operator = "=" try: operator_func = search_operators[operator] except KeyError: raise ValueError( - 'Numeric query should start with operator, choose from %s' % - ', '.join(search_operators.keys())) + "Numeric query should start with operator, choose from %s" + % ", ".join(search_operators.keys()) + ) value = combined_value[split:].strip() return operator_func, value def numeric_query(expr, value): operator_func, value = parse_query_value(value) - if value == '': + if value == "": num_value = 0 else: num_value = float(value) @@ -51,49 +52,49 @@ def date_query(expr, value): def yadcf_range_number(expr, value): - v_from, v_to = value.split('-yadcf_delim-') - v_from = float(v_from) if v_from != '' else -float('inf') - v_to = float(v_to) if v_to != '' else float('inf') - logger.debug('yadcf_range_number: between %f and %f', v_from, v_to) + v_from, v_to = value.split("-yadcf_delim-") + v_from = float(v_from) if v_from != "" else -float("inf") + v_to = float(v_to) if v_to != "" else float("inf") + logger.debug("yadcf_range_number: between %f and %f", v_from, v_to) return expr.between(v_from, v_to) def yadcf_range_date(expr, value): - v_from, v_to = value.split('-yadcf_delim-') - v_from = date_parse(v_from) if v_from != '' else datetime.date.min - v_to = date_parse(v_to) if v_to != '' else datetime.date.max - logger.debug('yadcf_range_date: between %s and %s', v_from, v_to) + v_from, v_to = value.split("-yadcf_delim-") + v_from = date_parse(v_from) if v_from != "" else datetime.date.min + v_to = date_parse(v_to) if v_to != "" else datetime.date.max + logger.debug("yadcf_range_date: between %s and %s", v_from, v_to) return expr.between(v_from, v_to) def yadcf_multi_select(expr, value): - options = value.split('|') + options = value.split("|") # Sometimes the yadcf_multi_select options contains null. Reasons could be - # a null value in the backend or explicitly set by the user in the frontend. - #Added the below line so that null could be converted to None as null is not - #recognized in python. - options = [None if i.strip()=='null' else i for i in options] - logger.debug('yadcf_multi_select: in %s', options) - #Modified so when someone searches null it would be taken care of. + # a null value in the backend or explicitly set by the user in the frontend. + # Added the below line so that null could be converted to None as null is not + # recognized in python. + options = [None if i.strip() == "null" else i for i in options] + logger.debug("yadcf_multi_select: in %s", options) + # Modified so when someone searches null it would be taken care of. if None in options: - operator_func = search_operators['='] - return ((expr.cast(Text).in_(options)) | operator_func(expr.cast(Text),None)) + operator_func = search_operators["="] + return (expr.cast(Text).in_(options)) | operator_func(expr.cast(Text), None) else: return expr.cast(Text).in_(options) SEARCH_METHODS = { - 'none': lambda expr, value: None, - 'string_contains': lambda expr, value: expr.ilike('%' + value + '%'), - 'ilike': lambda expr, value: expr.ilike(value), - 'like': lambda expr, value: expr.like(value), - 'numeric': numeric_query, - 'date': date_query, - 'yadcf_text': lambda expr, value: expr.ilike('%' + value + '%'), - 'yadcf_autocomplete': lambda expr, value: expr == value, - 'yadcf_select': lambda expr, value: expr.ilike('%' + value + '%'), - 'yadcf_multi_select': yadcf_multi_select, - 'yadcf_range_number': yadcf_range_number, - 'yadcf_range_number_slider': yadcf_range_number, - 'yadcf_range_date': yadcf_range_date + "none": lambda expr, value: None, + "string_contains": lambda expr, value: expr.ilike("%" + value + "%"), + "ilike": lambda expr, value: expr.ilike(value), + "like": lambda expr, value: expr.like(value), + "numeric": numeric_query, + "date": date_query, + "yadcf_text": lambda expr, value: expr.ilike("%" + value + "%"), + "yadcf_autocomplete": lambda expr, value: expr == value, + "yadcf_select": lambda expr, value: expr.ilike("%" + value + "%"), + "yadcf_multi_select": yadcf_multi_select, + "yadcf_range_number": yadcf_range_number, + "yadcf_range_number_slider": yadcf_range_number, + "yadcf_range_date": yadcf_range_date, } diff --git a/tests/conftest.py b/tests/conftest.py index f536dfb..a908595 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -15,14 +15,15 @@ def populate(session): """Create 3 adresses and 50 users.""" users = [] f = faker.Faker(seed=1) - addresses = [Address(description=d) for d in ['Street', 'Avenue', 'Road']] + addresses = [Address(description=d) for d in ["Street", "Avenue", "Road"]] session.add_all(addresses) for i, addr in zip(range(0, 50), itertools.cycle(addresses)): user = User( name=f.name(), address=addr, - birthday=datetime(1970, 1, 2) + timedelta(days=10 * i)) + birthday=datetime(1970, 1, 2) + timedelta(days=10 * i), + ) users.append(user) session.add_all(users) @@ -32,7 +33,7 @@ def populate(session): @pytest.fixture(scope="session") def engine(): print("TestCase: Using sqlite database") - return create_engine('sqlite:///', echo=False) + return create_engine("sqlite:///", echo=False) @pytest.fixture(scope="session") diff --git a/tests/helpers.py b/tests/helpers.py index 9cad587..f7a54d7 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -1,128 +1,137 @@ from random import shuffle -def create_dt_params(columns, search='', start=0, length=10, order=None): + +def create_dt_params(columns, search="", start=0, length=10, order=None): """Create DataTables input parameters when the data source from the rows - data object/ array is not set. - + data object/ array is not set. + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" params = { - 'draw': '1', - 'start': str(start), - 'length': str(length), - 'search[value]': str(search), - 'search[regex]': 'false' + "draw": "1", + "start": str(start), + "length": str(length), + "search[value]": str(search), + "search[regex]": "false", } for i, item in enumerate(columns): - cols = 'columns[%s]' % i - params['%s%s' % (cols, '[data]')] = i - params['%s%s' % (cols, '[name]')] = '' - params['%s%s' % (cols, '[searchable]')] = 'true' - params['%s%s' % (cols, '[orderable]')] = 'true' - params['%s%s' % (cols, '[search][value]')] = '' - params['%s%s' % (cols, '[search][regex]')] = 'false' - - for i, item in enumerate(order or [{'column': 0, 'dir': 'asc'}]): + cols = "columns[%s]" % i + params["%s%s" % (cols, "[data]")] = i + params["%s%s" % (cols, "[name]")] = "" + params["%s%s" % (cols, "[searchable]")] = "true" + params["%s%s" % (cols, "[orderable]")] = "true" + params["%s%s" % (cols, "[search][value]")] = "" + params["%s%s" % (cols, "[search][regex]")] = "false" + + for i, item in enumerate(order or [{"column": 0, "dir": "asc"}]): for key, value in item.items(): - params['order[%s][%s]' % (i, key)] = str(value) + params["order[%s][%s]" % (i, key)] = str(value) return params + # These methods would only be used when the mData param is defined in the backend -def create_dt_params_with_mData(columns, search='', start=0, length=10, order=None): + +def create_dt_params_with_mData(columns, search="", start=0, length=10, order=None): """Create DataTables input parameters when the data source from the rows - data object/ array is set. - + data object/ array is set. + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" - + params = { - 'draw': '1', - 'start': str(start), - 'length': str(length), - 'search[value]': str(search), - 'search[regex]': 'false' + "draw": "1", + "start": str(start), + "length": str(length), + "search[value]": str(search), + "search[regex]": "false", } for i, item in enumerate(columns): - cols = 'columns[%s]' % i - params['%s%s' % (cols, '[data]')] = item.mData - params['%s%s' % (cols, '[name]')] = '' - params['%s%s' % (cols, '[searchable]')] = 'true' - params['%s%s' % (cols, '[orderable]')] = 'true' - params['%s%s' % (cols, '[search][value]')] = '' - params['%s%s' % (cols, '[search][regex]')] = 'false' - - for i, item in enumerate(order or [{'column': 0, 'dir': 'asc'}]): + cols = "columns[%s]" % i + params["%s%s" % (cols, "[data]")] = item.mData + params["%s%s" % (cols, "[name]")] = "" + params["%s%s" % (cols, "[searchable]")] = "true" + params["%s%s" % (cols, "[orderable]")] = "true" + params["%s%s" % (cols, "[search][value]")] = "" + params["%s%s" % (cols, "[search][regex]")] = "false" + + for i, item in enumerate(order or [{"column": 0, "dir": "asc"}]): for key, value in item.items(): - params['order[%s][%s]' % (i, key)] = str(value) + params["order[%s][%s]" % (i, key)] = str(value) return params -def create_dt_params_with_mData_shuffled(columns, search='', start=0, length=10, order=None): + +def create_dt_params_with_mData_shuffled( + columns, search="", start=0, length=10, order=None +): """Create DataTables input parameters when the data source from the rows data object/ array is set. Also when the order in the frontend is not same as in the backend. - + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" - + params = { - 'draw': '1', - 'start': str(start), - 'length': str(length), - 'search[value]': str(search), - 'search[regex]': 'false' + "draw": "1", + "start": str(start), + "length": str(length), + "search[value]": str(search), + "search[regex]": "false", } # Shuffle the columns in place - shuffle(columns) + shuffle(columns) for i, item in enumerate(columns): - cols = 'columns[%s]' % i - params['%s%s' % (cols, '[data]')] = item.mData - params['%s%s' % (cols, '[name]')] = '' - params['%s%s' % (cols, '[searchable]')] = 'true' - params['%s%s' % (cols, '[orderable]')] = 'true' - params['%s%s' % (cols, '[search][value]')] = '' - params['%s%s' % (cols, '[search][regex]')] = 'false' - - for i, item in enumerate(order or [{'column': 0, 'dir': 'asc'}]): + cols = "columns[%s]" % i + params["%s%s" % (cols, "[data]")] = item.mData + params["%s%s" % (cols, "[name]")] = "" + params["%s%s" % (cols, "[searchable]")] = "true" + params["%s%s" % (cols, "[orderable]")] = "true" + params["%s%s" % (cols, "[search][value]")] = "" + params["%s%s" % (cols, "[search][regex]")] = "false" + + for i, item in enumerate(order or [{"column": 0, "dir": "asc"}]): for key, value in item.items(): - params['order[%s][%s]' % (i, key)] = str(value) + params["order[%s][%s]" % (i, key)] = str(value) return params -def create_dt_params_with_mData_with_extra_data(columns, search='', start=0, length=10, order=None): + +def create_dt_params_with_mData_with_extra_data( + columns, search="", start=0, length=10, order=None +): """Create DataTables input parameters when the data source from the rows data object/ array is set. Also when there is an extra data source defined in the frontend just for the use in the frontend but not in the backend. An example of this is here https://editor.datatables.net/examples/bubble-editing/simple.html - + Read more about setting column data source here https://datatables.net/reference/option/columns.data""" - + params = { - 'draw': '1', - 'start': str(start), - 'length': str(length), - 'search[value]': str(search), - 'search[regex]': 'false' + "draw": "1", + "start": str(start), + "length": str(length), + "search[value]": str(search), + "search[regex]": "false", } # Add the extra params for the extra data source added in the frontend but # not in the backend. - params['columns[0][name]'] = '' - params['columns[0][searchable]'] = 'true' - params['columns[0][orderable]'] = 'false' - params['columns[0][search][value]'] = '' - params['columns[0][search][regex]'] = 'false' + params["columns[0][name]"] = "" + params["columns[0][searchable]"] = "true" + params["columns[0][orderable]"] = "false" + params["columns[0][search][value]"] = "" + params["columns[0][search][regex]"] = "false" for i, item in enumerate(columns, 1): - cols = 'columns[%s]' % i - params['%s%s' % (cols, '[data]')] = item.mData - params['%s%s' % (cols, '[name]')] = '' - params['%s%s' % (cols, '[searchable]')] = 'true' - params['%s%s' % (cols, '[orderable]')] = 'true' - params['%s%s' % (cols, '[search][value]')] = '' - params['%s%s' % (cols, '[search][regex]')] = 'false' - - for i, item in enumerate(order or [{'column': 1, 'dir': 'asc'}]): + cols = "columns[%s]" % i + params["%s%s" % (cols, "[data]")] = item.mData + params["%s%s" % (cols, "[name]")] = "" + params["%s%s" % (cols, "[searchable]")] = "true" + params["%s%s" % (cols, "[orderable]")] = "true" + params["%s%s" % (cols, "[search][value]")] = "" + params["%s%s" % (cols, "[search][regex]")] = "false" + + for i, item in enumerate(order or [{"column": 1, "dir": "asc"}]): for key, value in item.items(): - params['order[%s][%s]' % (i, key)] = str(value) + params["order[%s][%s]" % (i, key)] = str(value) - return params \ No newline at end of file + return params diff --git a/tests/models.py b/tests/models.py index 43180d1..87c58ab 100644 --- a/tests/models.py +++ b/tests/models.py @@ -11,21 +11,21 @@ class User(Base): """Define a User.""" - __tablename__ = 'users' + __tablename__ = "users" id = Column(Integer, primary_key=True) name = Column(String, unique=True) created_at = Column(DateTime, default=datetime.datetime.utcnow) birthday = Column(Date) - address = relationship('Address', uselist=False, backref=backref('user')) + address = relationship("Address", uselist=False, backref=backref("user")) def __unicode__(self): """Give a readable representation of an instance.""" - return '%s' % self.name + return "%s" % self.name def __repr__(self): """Give a unambiguous representation of an instance.""" - return '<%s#%s>' % (self.__class__.__name__, self.id) + return "<%s#%s>" % (self.__class__.__name__, self.id) @hybrid_property def dummy(self): @@ -41,16 +41,16 @@ def dummy(cls): class Address(Base): """Define an Address.""" - __tablename__ = 'addresses' + __tablename__ = "addresses" id = Column(Integer, primary_key=True) description = Column(String, unique=True) - user_id = Column(Integer, ForeignKey('users.id')) + user_id = Column(Integer, ForeignKey("users.id")) def __unicode__(self): """Give a readable representation of an instance.""" - return '%s' % (self.id) + return "%s" % (self.id) def __repr__(self): """Give a unambiguous representation of an instance.""" - return '<%s#%s>' % (self.__class__.__name__, self.id) + return "<%s#%s>" % (self.__class__.__name__, self.id) diff --git a/tests/test_column_dt.py b/tests/test_column_dt.py index 48b4962..d7c1cef 100644 --- a/tests/test_column_dt.py +++ b/tests/test_column_dt.py @@ -9,38 +9,39 @@ def test_init_with_default_params(): """Return column with given default params.""" col = ColumnDT(User.id) - if not (col.nulls_order is None or col.search_method == 'string_contains'): + if not (col.nulls_order is None or col.search_method == "string_contains"): raise AssertionError() def test_with_filter_ok(): """Return column with a specific filter.""" - col = ColumnDT(User.name, search_method='like') + col = ColumnDT(User.name, search_method="like") - if col.search_method != 'like': + if col.search_method != "like": raise AssertionError() def test_with_valid_nulls_order(): """Return column with a specific filter.""" - col = ColumnDT(User.name, nulls_order='nullslast') + col = ColumnDT(User.name, nulls_order="nullslast") - if col.nulls_order != 'nullslast': + if col.nulls_order != "nullslast": raise AssertionError() def test_with_invalid_nulls_order(): """Return column with a specific filter.""" with pytest.raises(ValueError): - ColumnDT(User.name, nulls_order='invalid') + ColumnDT(User.name, nulls_order="invalid") def test_with_invalid_search_method(): """Return column with a specific filter.""" with pytest.raises(ValueError): - ColumnDT(User.name, search_method='invalid') + ColumnDT(User.name, search_method="invalid") -#Added by kartikeyas00 + +# Added by kartikeyas00 def test_with_yadcf_data_param(): """Return Column with yadcf_data filter as false.""" col = ColumnDT(User.name, yadcf_data=False) diff --git a/tests/test_fields.py b/tests/test_fields.py index f1b0522..f89ffcc 100644 --- a/tests/test_fields.py +++ b/tests/test_fields.py @@ -3,25 +3,31 @@ from datatables import ColumnDT, DataTables -from .helpers import create_dt_params, create_dt_params_with_mData, create_dt_params_with_mData_shuffled, create_dt_params_with_mData_with_extra_data +from .helpers import ( + create_dt_params, + create_dt_params_with_mData, + create_dt_params_with_mData_shuffled, + create_dt_params_with_mData_with_extra_data, +) from .models import Address, User + @pytest.mark.parametrize( - ('create_dt_params_function'), + ("create_dt_params_function"), ( - (create_dt_params), - (create_dt_params_with_mData), - (create_dt_params_with_mData_shuffled), - (create_dt_params_with_mData_with_extra_data) - ) + (create_dt_params), + (create_dt_params_with_mData), + (create_dt_params_with_mData_shuffled), + (create_dt_params_with_mData_with_extra_data), + ), ) def test_fields_mdata(session, create_dt_params_function): """Test if the result's data have mData set.""" columns = [ - ColumnDT(User.id, mData='ID'), - ColumnDT(User.name, mData='Username'), - ColumnDT(Address.description, mData='Address'), - ColumnDT(User.created_at, mData='Created at') + ColumnDT(User.id, mData="ID"), + ColumnDT(User.name, mData="Username"), + ColumnDT(Address.description, mData="Address"), + ColumnDT(User.created_at, mData="Created at"), ] query = session.query().select_from(User).join(Address) @@ -29,11 +35,11 @@ def test_fields_mdata(session, create_dt_params_function): params = create_dt_params_function(columns) rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 3 - assert 'ID' in res['data'][0] - assert 'Username' in res['data'][0] - assert 'Address' in res['data'][0] - assert 'Created at' in res['data'][0] + assert len(res["data"]) == 3 + assert "ID" in res["data"][0] + assert "Username" in res["data"][0] + assert "Address" in res["data"][0] + assert "Created at" in res["data"][0] def test_fields_search_filters(session): @@ -41,22 +47,22 @@ def test_fields_search_filters(session): query = session.query() columns = [ - ColumnDT(User.id, search_method='numeric'), - ColumnDT(User.name, search_method='string_contains'), - ColumnDT(User.birthday, search_method='date') + ColumnDT(User.id, search_method="numeric"), + ColumnDT(User.name, search_method="string_contains"), + ColumnDT(User.birthday, search_method="date"), ] user = session.query(User).filter(User.id == 4).one() params = create_dt_params(columns) - params['columns[0][search][value]'] = '=4' - params['columns[1][search][value]'] = user.name - params['columns[2][search][value]'] = '>1965-02-02' - params['columns[2][search][value]'] = '<=99' + params["columns[0][search][value]"] = "=4" + params["columns[1][search][value]"] = user.name + params["columns[2][search][value]"] = ">1965-02-02" + params["columns[2][search][value]"] = "<=99" rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 1 + assert len(res["data"]) == 1 def test_calculating_age_on_the_fly(session): @@ -64,23 +70,23 @@ def test_calculating_age_on_the_fly(session): query = session.query().filter(User.id > 5) columns = [ - ColumnDT(User.id, search_method='numeric'), - ColumnDT(User.name, search_method='string_contains'), - ColumnDT(User.birthday, search_method='date'), - ColumnDT(func.datetime('now') - User.birthday, search_method='numeric') + ColumnDT(User.id, search_method="numeric"), + ColumnDT(User.name, search_method="string_contains"), + ColumnDT(User.birthday, search_method="date"), + ColumnDT(func.datetime("now") - User.birthday, search_method="numeric"), ] params = create_dt_params(columns) rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 10 + assert len(res["data"]) == 10 @pytest.fixture(scope="function") def fixtures_filed_filtering(session): - user51 = User(name='User 51') - user52 = User(name='User 52') + user51 = User(name="User 51") + user52 = User(name="User 52") session.add(user51) session.add(user52) @@ -96,24 +102,29 @@ def fixtures_filed_filtering(session): @pytest.mark.usefixtures("fixtures_filed_filtering") def test_fields_filtering(session): """Test if result's are filtered from global search field.""" - columns = [ColumnDT(User.id, ), ColumnDT(User.name)] + columns = [ + ColumnDT( + User.id, + ), + ColumnDT(User.name), + ] query = session.query().select_from(User) - params = create_dt_params(columns, search='51') + params = create_dt_params(columns, search="51") rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 1 - assert res['recordsTotal'] == '52' - assert res['recordsFiltered'] == '1' - assert res['data'][0]['1'] == 'User 51' + assert len(res["data"]) == 1 + assert res["recordsTotal"] == "52" + assert res["recordsFiltered"] == "1" + assert res["data"][0]["1"] == "User 51" @pytest.fixture(scope="function") def fixtures_fields_global_search_filtering_with_regex(session): - user51 = User(name='Run To') - user52 = User(name='Feeeeear Of') + user51 = User(name="Run To") + user52 = User(name="Feeeeear Of") session.add(user51) session.add(user52) @@ -129,29 +140,34 @@ def fixtures_fields_global_search_filtering_with_regex(session): @pytest.mark.usefixtures("fixtures_fields_global_search_filtering_with_regex") def test_fields_global_search_filtering_with_regex(session): """Test if result's are filtered from global search field.""" - columns = [ColumnDT(User.id, ), ColumnDT(User.name)] + columns = [ + ColumnDT( + User.id, + ), + ColumnDT(User.name), + ] query = session.query().select_from(User) - params = create_dt_params(columns, search='Fe*ar') - params['search[regex]'] = 'true' + params = create_dt_params(columns, search="Fe*ar") + params["search[regex]"] = "true" rowTable = DataTables(params, query, columns, allow_regex_searches=True) res = rowTable.output_result() - if 'error' in res: + if "error" in res: # unfortunately sqlite doesn't support regexp out of the box' - assert 'no such function: REGEXP' in res['error'] + assert "no such function: REGEXP" in res["error"] else: - assert len(res['data']) == 1 - assert res['recordsTotal'] == '1' - assert res['recordsFiltered'] == '1' - assert res['data'][0]['1'] == 'Feeeeear Of' + assert len(res["data"]) == 1 + assert res["recordsTotal"] == "1" + assert res["recordsFiltered"] == "1" + assert res["data"][0]["1"] == "Feeeeear Of" @pytest.fixture(scope="function") def fixtures_column_not_searchable(session): - user51 = User(name='User 51') + user51 = User(name="User 51") session.add(user51) session.commit() @@ -166,32 +182,38 @@ def fixtures_column_not_searchable(session): def test_column_not_searchable(session): """Test if result's are filtered from global search field.""" columns = [ - ColumnDT(User.id, mData='ID'), - ColumnDT(User.name, mData='Username', global_search=False) + ColumnDT(User.id, mData="ID"), + ColumnDT(User.name, mData="Username", global_search=False), ] query = session.query().select_from(User) - params = create_dt_params(columns, search='User 51') + params = create_dt_params(columns, search="User 51") rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 0 - assert res['recordsTotal'] == '51' - assert res['recordsFiltered'] == '0' - + assert len(res["data"]) == 0 + assert res["recordsTotal"] == "51" + assert res["recordsFiltered"] == "0" + + @pytest.mark.parametrize( - ('yadcf_data_param', 'result'), + ("yadcf_data_param", "result"), ( - (True, True), - (False, False), - ) + (True, True), + (False, False), + ), ) def test_column_yadcf_data(session, yadcf_data_param, result): """Test if result's have yadcf data or not based on the given parameters.""" columns = [ - ColumnDT(User.id, mData='ID'), - ColumnDT(User.name, mData='Username', search_method='yadcf_select', yadcf_data=yadcf_data_param) + ColumnDT(User.id, mData="ID"), + ColumnDT( + User.name, + mData="Username", + search_method="yadcf_select", + yadcf_data=yadcf_data_param, + ), ] query = session.query().select_from(User) @@ -200,6 +222,6 @@ def test_column_yadcf_data(session, yadcf_data_param, result): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 10 - assert res['recordsTotal'] == '50' - assert ('yadcf_data_1' in res) == result \ No newline at end of file + assert len(res["data"]) == 10 + assert res["recordsTotal"] == "50" + assert ("yadcf_data_1" in res) == result diff --git a/tests/test_listing.py b/tests/test_listing.py index abe94fa..e0cf689 100644 --- a/tests/test_listing.py +++ b/tests/test_listing.py @@ -16,10 +16,10 @@ def test_list(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 10 - assert len(res['data'][0]) == 1 - assert res['recordsTotal'] == '50' - assert res['recordsFiltered'] == '50' + assert len(res["data"]) == 10 + assert len(res["data"][0]) == 1 + assert res["recordsTotal"] == "50" + assert res["recordsFiltered"] == "50" def test_list_bad_length(session): @@ -32,7 +32,7 @@ def test_list_bad_length(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert 'Length should be' in res['error'] + assert "Length should be" in res["error"] def test_list_detail(session): @@ -41,7 +41,7 @@ def test_list_detail(session): ColumnDT(User.id), ColumnDT(User.name), ColumnDT(Address.description), - ColumnDT(User.created_at) + ColumnDT(User.created_at), ] query = session.query() @@ -50,7 +50,7 @@ def test_list_detail(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data'][0]) == 4 + assert len(res["data"][0]) == 4 def test_list_fixed_length(session): @@ -63,7 +63,7 @@ def test_list_fixed_length(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 7 + assert len(res["data"]) == 7 def test_list_inner_join(session): @@ -76,9 +76,9 @@ def test_list_inner_join(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 3 - assert res['recordsTotal'] == '3' - assert res['recordsFiltered'] == '3' + assert len(res["data"]) == 3 + assert res["recordsTotal"] == "3" + assert res["recordsFiltered"] == "3" def test_list_total_length(session): @@ -91,12 +91,12 @@ def test_list_total_length(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 50 + assert len(res["data"]) == 50 @pytest.fixture(scope="function") def fixtures_list_hybrid_attributes(session): - user51 = User(name='User 51') + user51 = User(name="User 51") session.add(user51) session.commit() @@ -114,7 +114,7 @@ def test_list_hybrid_attributes(session): ColumnDT(User.id), ColumnDT(User.dummy), ColumnDT(User.name), - ColumnDT(User.created_at) + ColumnDT(User.created_at), ] session.query(*[User.id, User.dummy]).all() @@ -125,15 +125,15 @@ def test_list_hybrid_attributes(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 1 - assert res['data'][0]['1'] == 'Us' - assert res['data'][0]['2'] == 'User 51' + assert len(res["data"]) == 1 + assert res["data"][0]["1"] == "Us" + assert res["data"][0]["2"] == "User 51" @pytest.fixture(scope="function") def fixtures_list_specific_page(session): - user51 = User(name='User 51') - user52 = User(name='User 52') + user51 = User(name="User 51") + user52 = User(name="User 52") session.add(user51) session.add(user52) @@ -157,8 +157,8 @@ def test_list_specific_page(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 2 - assert res['recordsTotal'] == '52' - assert res['recordsFiltered'] == '52' - assert res['data'][0]['0'] == 51 - assert res['data'][1]['0'] == 52 \ No newline at end of file + assert len(res["data"]) == 2 + assert res["recordsTotal"] == "52" + assert res["recordsFiltered"] == "52" + assert res["data"][0]["0"] == 51 + assert res["data"][1]["0"] == 52 diff --git a/tests/test_ordering.py b/tests/test_ordering.py index 617300c..17581a7 100644 --- a/tests/test_ordering.py +++ b/tests/test_ordering.py @@ -9,12 +9,12 @@ @pytest.fixture(scope="function") def fixtures_ordering(session): """Set up fake population before tests.""" - user51 = User(name='000_User') - user52 = User(name='zzz_User') - addr4 = Address(description='000_Address') - addr5 = Address(description='zzz_Address') - user53 = User(name='UserFirstAddress', address=addr4) - user54 = User(name='UserLastAddress', address=addr5) + user51 = User(name="000_User") + user52 = User(name="zzz_User") + addr4 = Address(description="000_Address") + addr5 = Address(description="zzz_Address") + user53 = User(name="UserFirstAddress", address=addr4) + user54 = User(name="UserLastAddress", address=addr5) session.add(user51) session.add(user52) session.add(user53) @@ -35,7 +35,12 @@ def fixtures_ordering(session): @pytest.mark.usefixtures("fixtures_ordering") def test_ordering(session): """Test if it returns a list with the correct order.""" - columns = [ColumnDT(User.id, ), ColumnDT(User.name)] + columns = [ + ColumnDT( + User.id, + ), + ColumnDT(User.name), + ] query = session.query().select_from(User) @@ -44,7 +49,7 @@ def test_ordering(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert res['data'][0]['1'] == 'zzz_User' + assert res["data"][0]["1"] == "zzz_User" # Ascending params = create_dt_params(columns, order=[{"column": 1, "dir": "asc"}]) @@ -52,17 +57,19 @@ def test_ordering(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert res['data'][0]['1'] == '000_User' + assert res["data"][0]["1"] == "000_User" @pytest.mark.usefixtures("fixtures_ordering") def test_ordering_nulls(session): """Test if it returns a list with the correct nulls order.""" columns = [ - ColumnDT(User.id, ), + ColumnDT( + User.id, + ), ColumnDT(User.name), - ColumnDT(Address.description, nulls_order='nullsfirst'), - ColumnDT(User.created_at) + ColumnDT(Address.description, nulls_order="nullsfirst"), + ColumnDT(User.created_at), ] query = session.query().select_from(User).join(Address) @@ -72,15 +79,17 @@ def test_ordering_nulls(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - if 'error' in res: + if "error" in res: # sqlite3 doesn't support nulls ordering - assert 'sqlite3.OperationalError) near "NULLS"' in res['error'] + assert 'sqlite3.OperationalError) near "NULLS"' in res["error"] columns = [ - ColumnDT(User.id, ), + ColumnDT( + User.id, + ), ColumnDT(User.name), - ColumnDT(Address.description, nulls_order='nullslast'), - ColumnDT(User.created_at) + ColumnDT(Address.description, nulls_order="nullslast"), + ColumnDT(User.created_at), ] # NULLS LAST @@ -89,19 +98,21 @@ def test_ordering_nulls(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - if 'error' in res: + if "error" in res: # sqlite3 doesn't support nulls ordering - assert 'sqlite3.OperationalError) near "NULLS"' in res['error'] + assert 'sqlite3.OperationalError) near "NULLS"' in res["error"] @pytest.mark.usefixtures("fixtures_ordering") def test_ordering_relation(session): """Test if it returns a list when ordering a foreign key.""" columns = [ - ColumnDT(User.id, ), + ColumnDT( + User.id, + ), ColumnDT(User.name), ColumnDT(Address.description), - ColumnDT(User.created_at) + ColumnDT(User.created_at), ] query = session.query().select_from(User).join(Address) @@ -111,14 +122,16 @@ def test_ordering_relation(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert res['data'][0]['1'] == 'UserLastAddress' - assert res['data'][0]['2'] == 'zzz_Address' + assert res["data"][0]["1"] == "UserLastAddress" + assert res["data"][0]["2"] == "zzz_Address" columns = [ - ColumnDT(User.id, ), + ColumnDT( + User.id, + ), ColumnDT(User.name), ColumnDT(Address.description), - ColumnDT(User.created_at) + ColumnDT(User.created_at), ] # Ascending @@ -127,5 +140,5 @@ def test_ordering_relation(session): rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert res['data'][0]['1'] == 'UserFirstAddress' - assert res['data'][0]['2'] == '000_Address' + assert res["data"][0]["1"] == "UserFirstAddress" + assert res["data"][0]["2"] == "000_Address" diff --git a/tests/test_searching.py b/tests/test_searching.py index 8a88774..ad3dded 100644 --- a/tests/test_searching.py +++ b/tests/test_searching.py @@ -10,183 +10,179 @@ def get_result(session, column, search_method, search_value): columns = [ColumnDT(column, search_method=search_method)] query = session.query() params = create_dt_params(columns) - params['columns[0][search][value]'] = search_value + params["columns[0][search][value]"] = search_value rowTable = DataTables(params, query, columns) return rowTable.output_result() def test_method_none(session): res = get_result( - session=session, - column=User.id, - search_method='none', - search_value='abc') - assert res['recordsFiltered'] == '50' + session=session, column=User.id, search_method="none", search_value="abc" + ) + assert res["recordsFiltered"] == "50" def test_method_numeric(session): res = get_result( - session=session, - column=User.id, - search_method='numeric', - search_value='10') - assert res['recordsFiltered'] == '1' + session=session, column=User.id, search_method="numeric", search_value="10" + ) + assert res["recordsFiltered"] == "1" def test_method_numeric_illegal_input(session): res = get_result( - session=session, - column=User.id, - search_method='numeric', - search_value='abc') + session=session, column=User.id, search_method="numeric", search_value="abc" + ) try: - float('abc') + float("abc") except ValueError as exc: expectedError = str(exc) - assert expectedError in res['error'] + assert expectedError in res["error"] def test_method_numeric_eq(session): res = get_result( - session=session, - column=User.id, - search_method='numeric', - search_value='=10') - assert res['recordsFiltered'] == '1' + session=session, column=User.id, search_method="numeric", search_value="=10" + ) + assert res["recordsFiltered"] == "1" def test_method_numeric_gt(session): res = get_result( - session=session, - column=User.id, - search_method='numeric', - search_value='>10') - assert res['recordsFiltered'] == '40' + session=session, column=User.id, search_method="numeric", search_value=">10" + ) + assert res["recordsFiltered"] == "40" def test_method_numeric_lte(session): res = get_result( - session=session, - column=User.id, - search_method='numeric', - search_value='<=10') - assert res['recordsFiltered'] == '10' + session=session, column=User.id, search_method="numeric", search_value="<=10" + ) + assert res["recordsFiltered"] == "10" def test_yadcf_range_number_gt(session): res = get_result( session=session, column=User.id, - search_method='yadcf_range_number', - search_value='10-yadcf_delim-') - assert res['recordsFiltered'] == '41' - assert res['yadcf_data_0'] == (1, 50) + search_method="yadcf_range_number", + search_value="10-yadcf_delim-", + ) + assert res["recordsFiltered"] == "41" + assert res["yadcf_data_0"] == (1, 50) def test_yadcf_range_number_lt(session): res = get_result( session=session, column=User.id, - search_method='yadcf_range_number', - search_value='-yadcf_delim-10') - assert res['recordsFiltered'] == '10' + search_method="yadcf_range_number", + search_value="-yadcf_delim-10", + ) + assert res["recordsFiltered"] == "10" def test_yadcf_range_number_range(session): res = get_result( session=session, column=User.id, - search_method='yadcf_range_number', - search_value='10-yadcf_delim-15') - assert res['recordsFiltered'] == '6' + search_method="yadcf_range_number", + search_value="10-yadcf_delim-15", + ) + assert res["recordsFiltered"] == "6" def test_string_contains(session): res = get_result( session=session, column=Address.description, - search_method='string_contains', - search_value='street') - assert res['recordsFiltered'] == '1' + search_method="string_contains", + search_value="street", + ) + assert res["recordsFiltered"] == "1" def test_like(session): res = get_result( session=session, column=Address.description, - search_method='like', - search_value='%Street%') - assert res['recordsFiltered'] == '1' + search_method="like", + search_value="%Street%", + ) + assert res["recordsFiltered"] == "1" def test_ilike(session): res = get_result( session=session, column=Address.description, - search_method='ilike', - search_value='%street%') - assert res['recordsFiltered'] == '1' + search_method="ilike", + search_value="%street%", + ) + assert res["recordsFiltered"] == "1" def test_date_lt(session): res = get_result( session=session, column=User.birthday, - search_method='date', - search_value='<1970-01-03') - assert res['recordsFiltered'] == '1' + search_method="date", + search_value="<1970-01-03", + ) + assert res["recordsFiltered"] == "1" def test_yadcf_range_date(session): res = get_result( session=session, column=User.birthday, - search_method='yadcf_range_date', - search_value='1970-01-03-yadcf_delim-1970-01-13') - assert res['recordsFiltered'] == '1' + search_method="yadcf_range_date", + search_value="1970-01-03-yadcf_delim-1970-01-13", + ) + assert res["recordsFiltered"] == "1" def test_yadcf_autocomplete(session): res = get_result( session=session, column=Address.description, - search_method='yadcf_autocomplete', - search_value='Avenue') - assert set(res['yadcf_data_0']) == set(['Avenue', 'Road', 'Street']) - assert res['recordsFiltered'] == '1' + search_method="yadcf_autocomplete", + search_value="Avenue", + ) + assert set(res["yadcf_data_0"]) == set(["Avenue", "Road", "Street"]) + assert res["recordsFiltered"] == "1" def test_yadcf_select(session): res = get_result( session=session, column=Address.description, - search_method='yadcf_select', - search_value='Road') - assert set(res['yadcf_data_0']) == set(['Avenue', 'Road', 'Street']) - assert res['recordsFiltered'] == '1' + search_method="yadcf_select", + search_value="Road", + ) + assert set(res["yadcf_data_0"]) == set(["Avenue", "Road", "Street"]) + assert res["recordsFiltered"] == "1" def test_yadcf_multi_select(session): res = get_result( session=session, column=Address.description, - search_method='yadcf_multi_select', - search_value='Avenue|StreetRoad') - assert set(res['yadcf_data_0']) == set(['Avenue', 'Road', 'Street']) - assert res['recordsFiltered'] == '1' + search_method="yadcf_multi_select", + search_value="Avenue|StreetRoad", + ) + assert set(res["yadcf_data_0"]) == set(["Avenue", "Road", "Street"]) + assert res["recordsFiltered"] == "1" def test_group_by(session): """Test group by after a join query.""" columns = [ColumnDT(func.count(User.id)), ColumnDT(Address.id)] - query = session.query().\ - select_from(User).\ - join(Address).\ - group_by(Address) + query = session.query().select_from(User).join(Address).group_by(Address) params = create_dt_params(columns) rowTable = DataTables(params, query, columns) res = rowTable.output_result() - assert len(res['data']) == 3 + assert len(res["data"]) == 3