Skip to content
This repository was archived by the owner on Feb 2, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 53 additions & 16 deletions buildscripts/run_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,45 +32,82 @@

from pathlib import Path
from utilities import SDC_Build_Utilities
import multiprocessing as mp


EXAMPLES_TO_SKIP = {'basic_usage_nyse_predict.py'}
TEST_TIMEOUT = 120


def run_examples(sdc_utils):
# keep test results global to be visible for async callbacks
class TestResults():
total = 0
passed = 0
failed = 0
passed = 0
skipped = 0
failed_examples = []


def run_single_example(path, sdc_utils):
str_path = str(path)
try:
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.run_command(f'python {str_path}')
except Exception as e:
raise Exception(str_path).with_traceback(e.__traceback__)

return str_path


def normal_handler(test_name):
TestResults.passed += 1
sdc_utils.log_info(f'{test_name} PASSED')


def error_handler(error):
TestResults.failed += 1
test_name = str(error).split()[-1]
sdc_utils.log_info(f'{test_name} FAILED')
TestResults.failed_examples.append(test_name)


def run_examples(sdc_utils):

os.chdir(str(sdc_utils.examples_path))
pool = mp.Pool(max(1, mp.cpu_count()))

task_queue = []
for sdc_example in Path('.').glob('**/*.py'):
total += 1
TestResults.total += 1

if sdc_example.name in EXAMPLES_TO_SKIP:
skipped += 1
TestResults.skipped += 1
continue

sdc_example = str(sdc_example)
task_queue.append(pool.apply_async(
run_single_example,
[sdc_example, sdc_utils],
callback=normal_handler,
error_callback=error_handler
))

for promise in task_queue:
try:
sdc_utils.log_info(sdc_utils.line_double)
sdc_utils.run_command(f'python {str(sdc_example)}')
promise.get(TEST_TIMEOUT)
except Exception:
failed += 1
failed_examples.append(sdc_example)
sdc_utils.log_info(f'{sdc_example} FAILED')
traceback.print_exc()
else:
passed += 1
sdc_utils.log_info(f'{sdc_example} PASSED')

summary_msg = f'SDC examples summary: {total} RUN, {passed} PASSED, {failed} FAILED, {skipped} SKIPPED'
pool.close()
pool.join()

summary_msg = f'SDC examples summary: {TestResults.total} RUN, {TestResults.passed} PASSED, ' \
f'{TestResults.failed} FAILED, {TestResults.skipped} SKIPPED'
sdc_utils.log_info(summary_msg, separate=True)
for failed_example in failed_examples:
sdc_utils.log_info(f'FAILED: {failed_example}')
for test_name in TestResults.failed_examples:
sdc_utils.log_info(f'FAILED: {test_name}')

if failed > 0:
if TestResults.failed > 0:
sdc_utils.log_info('Intel SDC examples FAILED', separate=True)
exit(-1)
sdc_utils.log_info('Intel SDC examples PASSED', separate=True)
Expand Down
28 changes: 14 additions & 14 deletions conda-recipe/run_test.bat
Original file line number Diff line number Diff line change
Expand Up @@ -9,33 +9,33 @@ if errorlevel 1 exit 1

@rem TODO investigate root cause of NumbaPerformanceWarning
@rem http://numba.pydata.org/numba-doc/latest/user/parallel.html#diagnostics
python -W ignore -u -m sdc.runtests -v sdc.tests.test_basic
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_basic
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_series
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_series
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_dataframe
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_dataframe
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_hiframes
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_hiframes
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_date
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_date
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_strings
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_strings
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_groupby
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_groupby
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_join
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_join
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_rolling
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_rolling
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_ml
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_ml
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_io
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_io
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_hpat_jit
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_hpat_jit
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_sdc_numpy
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_sdc_numpy
if errorlevel 1 exit 1
python -W ignore -u -m sdc.runtests -v sdc.tests.test_prange_utils
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_prange_utils
if errorlevel 1 exit 1

REM Link check for Documentation using Sphinx's in-built linkchecker
Expand Down
28 changes: 14 additions & 14 deletions conda-recipe/run_test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,17 @@ python -m sdc.tests.gen_test_data

# TODO investigate root cause of NumbaPerformanceWarning
# http://numba.pydata.org/numba-doc/latest/user/parallel.html#diagnostics
python -W ignore -u -m sdc.runtests -v sdc.tests.test_basic
python -W ignore -u -m sdc.runtests -v sdc.tests.test_series
python -W ignore -u -m sdc.runtests -v sdc.tests.test_dataframe
python -W ignore -u -m sdc.runtests -v sdc.tests.test_hiframes
python -W ignore -u -m sdc.runtests -v sdc.tests.test_date
python -W ignore -u -m sdc.runtests -v sdc.tests.test_strings
python -W ignore -u -m sdc.runtests -v sdc.tests.test_groupby
python -W ignore -u -m sdc.runtests -v sdc.tests.test_join
python -W ignore -u -m sdc.runtests -v sdc.tests.test_rolling
python -W ignore -u -m sdc.runtests -v sdc.tests.test_ml
python -W ignore -u -m sdc.runtests -v sdc.tests.test_io
python -W ignore -u -m sdc.runtests -v sdc.tests.test_hpat_jit
python -W ignore -u -m sdc.runtests -v sdc.tests.test_sdc_numpy
python -W ignore -u -m sdc.runtests -v sdc.tests.test_prange_utils
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_basic
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_series
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_dataframe
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_hiframes
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_date
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_strings
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_groupby
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_join
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_rolling
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_ml
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_io
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_hpat_jit
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_sdc_numpy
python -W ignore -u -m numba.runtests -m -v sdc.tests.test_prange_utils
4 changes: 2 additions & 2 deletions sdc/tests/test_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -4392,7 +4392,7 @@ def test_impl():
return a.quantile()

hpat_func = self.jit(test_impl)
np.testing.assert_equal(hpat_func(), test_impl())
np.testing.assert_almost_equal(hpat_func(), test_impl())

def test_series_quantile_q_vector(self):
def test_series_quantile_q_vector_impl(S, param1):
Expand All @@ -4404,7 +4404,7 @@ def test_series_quantile_q_vector_impl(S, param1):
param1 = [0.0, 0.25, 0.5, 0.75, 1.0]
result_ref = test_series_quantile_q_vector_impl(s, param1)
result = hpat_func(s, param1)
np.testing.assert_equal(result, result_ref)
np.testing.assert_almost_equal(result, result_ref)

@unittest.skip("Implement unique without sorting like in pandas")
def test_unique(self):
Expand Down