Skip to content

Commit

Permalink
automated/linux/ptest: Analyze each test in package tests
Browse files Browse the repository at this point in the history
Currently ptest.py analyze only exit code of each package test
to decide if it passed or not. However, ptest-runner can return
success code even though some tests failed. So we need to parse
test output and analyze it.

It also quite useful to see exactly which tests failed. So results are
recorded for each particular test, and lava-test-set feature is used
to distinguish packages.

Signed-off-by: Oleksandr Terentiev <oterenti@cisco.com>
  • Loading branch information
oterenti committed Dec 11, 2018
1 parent 10df68a commit 652b14c
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 11 deletions.
54 changes: 44 additions & 10 deletions automated/linux/ptest/ptest.py
Expand Up @@ -84,20 +84,54 @@ def filter_ptests(ptests, requested_ptests, exclude):

return filter_ptests

def parse_line(line):
test_status_list = {
'pass': re.compile("^PASS:(.+)"),
'fail': re.compile("^FAIL:(.+)"),
'skip': re.compile("^SKIP:(.+)")
}

for test_status, status_regex in test_status_list.items():
test_name = status_regex.search(line)
if test_name:
return [test_name.group(1), test_status]

def check_ptest(ptest_dir, ptest_name, output_log):
status = 'pass'
return None

try:
output = subprocess.check_call('ptest-runner -d %s %s' %
(ptest_dir, ptest_name), shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
status = 'fail'
def run_ptest(command):
results = []
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
output = process.stdout.readline()
try:
output = unicode(output, "utf-8").strip()
except:
output = output.decode("utf-8").strip()
if len(output) == 0 and process.poll() is not None:
break
if output:
print(output)
result_tuple = parse_line(output)
if result_tuple:
results.append(result_tuple)

rc = process.poll()
return rc, results

with open(output_log, 'a+') as f:
f.write("%s %s\n" % (ptest_name, status))
def check_ptest(ptest_dir, ptest_name, output_log):
log_name = os.path.join(os.getcwd(), '%s.log' % ptest_name)
status, results = run_ptest('ptest-runner -d %s %s' % (ptest_dir, ptest_name))

with open(output_log, 'a+') as f:
f.write("lava-test-set start %s\n" % ptest_name)
f.write("%s %s\n" % (ptest_name, "pass" if status == 0 else "fail"))
for test, test_status in results:
test = test.encode("ascii", errors="ignore").decode()
f.write("%s %s\n" % (re.sub(r'[^\w-]', '', test), test_status))
f.write("lava-test-set stop %s\n" % ptest_name)

def main():
parser = argparse.ArgumentParser(description="LAVA/OE ptest script",
Expand Down
4 changes: 3 additions & 1 deletion automated/linux/ptest/ptest.yaml
Expand Up @@ -20,6 +20,8 @@ params:

run:
steps:
- locale
- python --version
- cd ./automated/linux/ptest
- ./ptest.py -o ./result.txt -t ${TESTS} -e ${EXCLUDE}
- PYTHONIOENCODING=UTF-8 ./ptest.py -o ./result.txt -t ${TESTS} -e ${EXCLUDE}
- ../../utils/send-to-lava.sh ./result.txt
14 changes: 14 additions & 0 deletions automated/utils/send-to-lava.sh
Expand Up @@ -4,6 +4,8 @@ RESULT_FILE="$1"

which lava-test-case > /dev/null 2>&1
lava_test_case="$?"
which lava-test-set > /dev/null 2>&1
lava_test_set="$?"

if [ -f "${RESULT_FILE}" ]; then
while read -r line; do
Expand Down Expand Up @@ -31,6 +33,18 @@ if [ -f "${RESULT_FILE}" ]; then
else
echo "<TEST_CASE_ID=${test} RESULT=${result} MEASUREMENT=${measurement} UNITS=${units}>"
fi
elif echo "${line}" | egrep -iq "^lava-test-set.*"; then
test_set_status="$(echo "${line}" | awk '{print $2}')"
test_set_name="$(echo "${line}" | awk '{print $3}')"
if [ "${lava_test_set}" -eq 0 ]; then
lava-test-set "${test_set_status}" "${test_set_name}"
else
if [ "${test_set_status}" == "start" ]; then
echo "<LAVA_SIGNAL_TESTSET START ${test_set_name}>"
else
echo "<LAVA_SIGNAL_TESTSET STOP>"
fi
fi
fi
done < "${RESULT_FILE}"
else
Expand Down

0 comments on commit 652b14c

Please sign in to comment.