diff --git a/sdc/tests/tests_perf/__init__.py b/sdc/tests/tests_perf/__init__.py index ac9332974..11dbad033 100644 --- a/sdc/tests/tests_perf/__init__.py +++ b/sdc/tests/tests_perf/__init__.py @@ -1,4 +1,5 @@ from sdc.tests.tests_perf.test_perf_df_rolling import * +from sdc.tests.tests_perf.test_perf_series_operators import * from sdc.tests.tests_perf.test_perf_unicode import * from sdc.tests.tests_perf.test_perf_series_str import * from sdc.tests.tests_perf.test_perf_series import * diff --git a/sdc/tests/tests_perf/test_perf_base.py b/sdc/tests/tests_perf/test_perf_base.py index 71b5213bc..bd86cb79b 100644 --- a/sdc/tests/tests_perf/test_perf_base.py +++ b/sdc/tests/tests_perf/test_perf_base.py @@ -1,5 +1,6 @@ import os import unittest +import numba from sdc.tests.tests_perf.test_perf_utils import * @@ -36,3 +37,32 @@ def tearDownClass(cls): # TODO: https://jira.devtools.intel.com/browse/SAT-2371 cls.test_results.print() cls.test_results.dump() + + def _test_jitted(self, pyfunc, record, *args, **kwargs): + # compilation time + record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) + + cfunc = numba.njit(pyfunc) + + # Warming up + cfunc(*args, **kwargs) + + # execution and boxing time + record["test_results"], record["boxing_results"] = \ + get_times(cfunc, *args, **kwargs) + + def _test_python(self, pyfunc, record, *args, **kwargs): + record["test_results"], _ = \ + get_times(pyfunc, *args, **kwargs) + + def test_jit(self, pyfunc, base, *args): + record = base.copy() + record["test_type"] = 'SDC' + self._test_jitted(pyfunc, record, *args) + self.test_results.add(**record) + + def test_py(self, pyfunc, base, *args): + record = base.copy() + record["test_type"] = 'Python' + self._test_python(pyfunc, record, *args) + self.test_results.add(**record) diff --git a/sdc/tests/tests_perf/test_perf_df.py b/sdc/tests/tests_perf/test_perf_df.py index 28f3d3ad9..8b839f191 100644 --- a/sdc/tests/tests_perf/test_perf_df.py +++ b/sdc/tests/tests_perf/test_perf_df.py @@ -44,28 +44,12 @@ class TestDataFrameMethods(TestBase): def setUpClass(cls): super().setUpClass() - def _test_jitted(self, pyfunc, record, *args, **kwargs): - # compilation time - record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) - - cfunc = numba.njit(pyfunc) - - # execution and boxing time - record["test_results"], record["boxing_results"] = \ - get_times(cfunc, *args, **kwargs) - - def _test_python(self, pyfunc, record, *args, **kwargs): - record["test_results"], _ = \ - get_times(pyfunc, *args, **kwargs) - - def _test_case(self, pyfunc, name, total_data_length, data_num=1, - input_data=test_global_input_data_float64): + def _test_case(self, pyfunc, name, total_data_length, input_data, data_num=1): + test_name = 'DataFrame.{}'.format(name) if input_data is None: input_data = test_global_input_data_float64 - test_name = 'DataFrame.{}'.format(name) - full_input_data_length = sum(len(i) for i in input_data) for data_length in total_data_length: base = { @@ -77,20 +61,13 @@ def _test_case(self, pyfunc, name, total_data_length, data_num=1, test_data = pandas.DataFrame({f"f{i}": data for i in range(3)}) args = [test_data] - for i in range(data_num-1): + for i in range(data_num - 1): np.random.seed(i) extra_data = np.random.ranf(data_length) args.append(pandas.DataFrame({f"f{i}": extra_data for i in range(3)})) - record = base.copy() - record["test_type"] = 'SDC' - self._test_jitted(pyfunc, record, *args) - self.test_results.add(**record) - - record = base.copy() - record["test_type"] = 'Python' - self._test_python(pyfunc, record, *args) - self.test_results.add(**record) + self.test_jit(pyfunc, base, *args) + self.test_py(pyfunc, base, *args) cases = [ diff --git a/sdc/tests/tests_perf/test_perf_series.py b/sdc/tests/tests_perf/test_perf_series.py index c9bc8ec27..393538368 100644 --- a/sdc/tests/tests_perf/test_perf_series.py +++ b/sdc/tests/tests_perf/test_perf_series.py @@ -46,20 +46,6 @@ class TestSeriesMethods(TestBase): def setUpClass(cls): super().setUpClass() - def _test_jitted(self, pyfunc, record, *args, **kwargs): - # compilation time - record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) - - sdc_func = sdc.jit(pyfunc) - - # execution and boxing time - record["test_results"], record["boxing_results"] = \ - get_times(sdc_func, *args, **kwargs) - - def _test_python(self, pyfunc, record, *args, **kwargs): - record["test_results"], _ = \ - get_times(pyfunc, *args, **kwargs) - def _test_case(self, pyfunc, name, total_data_length, data_num=1, input_data=test_global_input_data_float64): test_name = 'Series.{}'.format(name) @@ -82,15 +68,8 @@ def _test_case(self, pyfunc, name, total_data_length, data_num=1, input_data=tes extra_data = np.random.ranf(data_length) args.append(pandas.Series(extra_data)) - record = base.copy() - record["test_type"] = 'SDC' - self._test_jitted(pyfunc, record, *args) - self.test_results.add(**record) - - record = base.copy() - record["test_type"] = 'Python' - self._test_python(pyfunc, record, *args) - self.test_results.add(**record) + self.test_jit(pyfunc, base, *args) + self.test_py(pyfunc, base, *args) cases = [ diff --git a/sdc/tests/tests_perf/test_perf_series_operators.py b/sdc/tests/tests_perf/test_perf_series_operators.py index aff3fec4d..91eceadaf 100644 --- a/sdc/tests/tests_perf/test_perf_series_operators.py +++ b/sdc/tests/tests_perf/test_perf_series_operators.py @@ -51,23 +51,6 @@ class TestSeriesOperatorMethods(TestBase): def setUpClass(cls): super().setUpClass() - def _test_jitted(self, pyfunc, record, *args, **kwargs): - # compilation time - record["compile_results"] = calc_compilation(pyfunc, *args, **kwargs) - - sdc_func = sdc.jit(pyfunc) - - # Warming up - sdc_func(*args, **kwargs) - - # execution and boxing time - record["test_results"], record["boxing_results"] = \ - get_times(sdc_func, *args, **kwargs) - - def _test_python(self, pyfunc, record, *args, **kwargs): - record["test_results"], _ = \ - get_times(pyfunc, *args, **kwargs) - def _test_case(self, pyfunc, name, total_data_length, data_num=1, input_data=test_global_input_data_float64): test_name = 'Series.{}'.format(name) @@ -90,15 +73,8 @@ def _test_case(self, pyfunc, name, total_data_length, data_num=1, input_data=tes extra_data = np.random.ranf(data_length) args.append(pandas.Series(extra_data)) - record = base.copy() - record["test_type"] = 'SDC' - self._test_jitted(pyfunc, record, *args) - self.test_results.add(**record) - - record = base.copy() - record["test_type"] = 'Python' - self._test_python(pyfunc, record, *args) - self.test_results.add(**record) + self.test_jit(pyfunc, base, *args) + self.test_py(pyfunc, base, *args) cases = [ diff --git a/sdc/tests/tests_perf/test_perf_series_str.py b/sdc/tests/tests_perf/test_perf_series_str.py index 1fb2574cb..6be60bf43 100644 --- a/sdc/tests/tests_perf/test_perf_series_str.py +++ b/sdc/tests/tests_perf/test_perf_series_str.py @@ -57,23 +57,26 @@ def setUpClass(cls): super().setUpClass() cls.width = [16, 64, 512, 1024] - def _test_case(self, pyfunc, name, total_data_length, input_data=None, *args, **kwargs): - test_name = 'series_str_{}'.format(name) + def _test_case(self, pyfunc, name, total_data_length, data_num=1, input_data=test_global_input_data_float64): + test_name = 'Series.{}'.format(name) + input_data = input_data or test_global_input_data_unicode_kind4 - hpat_func = sdc.jit(pyfunc) + for data_length, data_width in itertools.product(total_data_length, self.width): - data = perf_data_gen_fixed_len(input_data, data_width, data_length) + base = { + "test_name": test_name, + "data_size": data_length, + "data_width": data_width, + } + full_input_data_length = data_width + data = perf_data_gen_fixed_len(input_data, full_input_data_length, + data_length) test_data = pd.Series(data) - compile_results = calc_compilation(pyfunc, test_data, iter_number=self.iter_number) - - exec_times, boxing_times = get_times(hpat_func, test_data, iter_number=self.iter_number) + args = [test_data] - self.test_results.add(test_name, 'SDC', test_data.size, exec_times, data_width, - boxing_times, compile_results=compile_results, num_threads=self.num_threads) - exec_times, _ = get_times(pyfunc, test_data, iter_number=self.iter_number) - self.test_results.add(test_name, 'Python', test_data.size, exec_times, data_width, - num_threads=self.num_threads) + self.test_jit(pyfunc, base, *args) + self.test_py(pyfunc, base, *args) cases = [