Skip to content

Commit

Permalink
Merged from master
Browse files Browse the repository at this point in the history
  • Loading branch information
sonofmun committed Oct 23, 2019
2 parents 098029c + f16a717 commit 3ea275d
Show file tree
Hide file tree
Showing 16 changed files with 326 additions and 94 deletions.
5 changes: 4 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
dist: xenial
language: python
python:
- "3.4.5"
- "3.5"
- "3.6"
- "3.7"

# command to install dependencies
install:
- pip install -r requirements.txt
- pip install coveralls
- pip install coveralls pycodestyle

# command to run tests
script:
- coverage run setup.py test
- pycodestyle --ignore E501 *.py HookTest
after_success:
- coverage combine
- coveralls
Expand Down
12 changes: 11 additions & 1 deletion CHANGES.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,17 @@
## 1.2.3 - 2019-10-23
## 1.2.5 - 2019-10-23

- Changed requirments.txt so that MyCapytain < 3.0.0 will be installed

## 1.2.4 - 2019-07-01

- Mostly warning fixed thanks to @rilian
- Avoid StatisticError if the directory is empty by @rilian

## 1.2.3 - 2019-04-05

- Fixed `--scheme ignore` (Issue #139)
- Added the ability to debug tests with HOOKTEST_DEBUG environment variable

## 1.2.2 - 2018-07-04

- Added requirements.txt to MANIFEST.in so that it will be included in PyPI
Expand Down
7 changes: 2 additions & 5 deletions HookTest/build.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,7 @@ def remove_failing(self, files, passing):
continue
# Covers the case where the source and destination directories are different, so files are copied instead of removed
else:
try:
shutil.rmtree('{}data'.format(self.dest))
except:
pass
shutil.rmtree('{}data'.format(self.dest), ignore_errors=True)
for file in files:
if file.replace(self.path, '') in passing:
try:
Expand Down Expand Up @@ -175,4 +172,4 @@ def cmd(**kwargs):
txt=kwargs['txt'], cites=kwargs['cites'], workers=int(kwargs['workers'])).run()
return status, message
else:
return False, 'You cannot run build on the base class'
return False, 'You cannot run build on the base class'
64 changes: 35 additions & 29 deletions HookTest/capitains_units/cts.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import warnings
from threading import Timer
from collections import defaultdict
from os import makedirs
from os import makedirs, environ
import os.path
from hashlib import md5
import time
Expand Down Expand Up @@ -173,15 +173,15 @@ def check_urns(self):
onlyOneWork = True
allMembers = True
worksUrns = [
urn
for urn in self.xml.xpath("//ti:work/@urn", namespaces=TESTUnit.NS)
if urn and len(MyCapytain.common.reference.URN(urn)) == 4
]
urn
for urn in self.xml.xpath("//ti:work/@urn", namespaces=TESTUnit.NS)
if urn and len(MyCapytain.common.reference.URN(urn)) == 4
]
groupUrns = [
urn
for urn in self.xml.xpath("//ti:work/@groupUrn", namespaces=TESTUnit.NS)
if urn and len(MyCapytain.common.reference.URN(urn)) == 3
]
urn
for urn in self.xml.xpath("//ti:work/@groupUrn", namespaces=TESTUnit.NS)
if urn and len(MyCapytain.common.reference.URN(urn)) == 3
]
self.urn = None
urn = None
if len(worksUrns) == 1:
Expand Down Expand Up @@ -230,10 +230,10 @@ def check_urns(self):

self.log("Edition, translation, and commentary urns : " + " ".join(self.urns))

status = allMembers and\
matches and onlyOneWork and self.urn and \
len(groupUrns) == 1 and \
(len(texts)*2+1) == len(self.urns + worksUrns)
status = allMembers and \
matches and onlyOneWork and self.urn and \
len(groupUrns) == 1 and \
(len(texts) * 2 + 1) == len(self.urns + worksUrns)

yield status

Expand Down Expand Up @@ -396,7 +396,7 @@ def run_rng(self, rng_path):
:param rng_path: Path to the RelaxNG file to run against the XML to test
"""
test = subprocess.Popen(
["java", "-Duser.country=US", "-Duser.language=en", "-jar", TESTUnit.JING, rng_path, self.path],
["java", "-Duser.country=US", "-Duser.language=en", "-jar", TESTUnit.JING, rng_path, self.path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False
Expand All @@ -412,7 +412,7 @@ def run_rng(self, rng_path):
yield False
pass
finally:
if not timer.isAlive():
if not timer.is_alive():
self.log("Timeout on RelaxNG")
yield False
timer.cancel()
Expand Down Expand Up @@ -459,8 +459,8 @@ def get_remote_rng(self, url):
# We have a name for the rng file but also for the in-download marker
# Note : we might want to add a os.makedirs somewhere with exists=True
makedirs(".rngs", exist_ok=True)
stable_local = os.path.join(".rngs", sha+".rng")
stable_local_downloading = os.path.join(".rngs", sha+".rng-indownload")
stable_local = os.path.join(".rngs", sha + ".rng")
stable_local_downloading = os.path.join(".rngs", sha + ".rng-indownload")

# check if the stable_local rng already exists
# if it does, immediately run the rng test and move to the next rng in the file
Expand Down Expand Up @@ -521,7 +521,7 @@ def passages(self):
with warnings.catch_warnings(record=True) as warning_record:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
passages = self.Text.getValidReff(level=i+1, _debug=True)
passages = self.Text.getValidReff(level=i + 1, _debug=True)
ids = [ref.split(".", i)[-1] for ref in passages]
space_in_passage = TESTUnit.FORBIDDEN_CHAR.search("".join(ids))
len_passage = len(passages)
Expand All @@ -543,7 +543,7 @@ def passages(self):
yield status
except Exception as E:
self.error(E)
self.log("Error when searching passages at level {0}".format(i+1))
self.log("Error when searching passages at level {0}".format(i + 1))
yield False
break
else:
Expand Down Expand Up @@ -615,8 +615,12 @@ def has_urn(self):
"""
if self.xml is not None:
if self.guidelines == "2.tei":
urns = self.xml.xpath("//tei:text/tei:body[starts-with(@n, 'urn:cts:')]", namespaces=TESTUnit.NS) + \
self.xml.xpath("//tei:text[starts-with(@xml:base, 'urn:cts:')]", namespaces=TESTUnit.NS)
urns = self.xml.xpath(
"//tei:text/tei:body[starts-with(@n, 'urn:cts:')]",
namespaces=TESTUnit.NS)
urns += self.xml.xpath(
"//tei:text[starts-with(@xml:base, 'urn:cts:')]",
namespaces=TESTUnit.NS)
else:
urns = self.xml.xpath(
"//tei:body/tei:div[@type='edition' and starts-with(@n, 'urn:cts:')]",
Expand Down Expand Up @@ -694,11 +698,11 @@ def language(self):
)
elif self.guidelines == "2.tei":
urns_holding_node = self.xml.xpath("//tei:text/tei:body[starts-with(@n, 'urn:cts:')]", namespaces=TESTUnit.NS) + \
self.xml.xpath("//tei:text[starts-with(@xml:base, 'urn:cts:')]", namespaces=TESTUnit.NS)
self.xml.xpath("//tei:text[starts-with(@xml:base, 'urn:cts:')]", namespaces=TESTUnit.NS)

try:
self.lang = urns_holding_node[0].get('{http://www.w3.org/XML/1998/namespace}lang')
except:
except IndexError:
self.lang = ''
if self.lang == '' or self.lang is None:
self.lang = 'UNK'
Expand All @@ -722,24 +726,26 @@ def test(self, scheme, guidelines, rng=None, inventory=None):
if self.countwords:
tests.append("count_words")

if scheme.endswith("-ignore"):
scheme = scheme.replace("-ignore", "")
else:
if scheme in["tei", "epidoc", "auto_rng", "local_file"]:
tests = [scheme] + tests

self.scheme = scheme
self.guidelines = guidelines
self.rng = rng

if environ.get("HOOKTEST_DEBUG", False):
print("Starting %s " % self.path)
i = 0
for test in tests:

# Show the logs and return the status

if environ.get("HOOKTEST_DEBUG", False):
print("\t Testing %s " % test)
status = False not in [status for status in getattr(self, test)()]
self.test_status[test] = status
yield (CTSText_TestUnit.readable[test], status, self.logs)
if test in self.breaks and status == False:
for t in tests[i+1:]:
if test in self.breaks and not status:
for t in tests[i + 1:]:
self.test_status[t] = False
yield (CTSText_TestUnit.readable[t], False, [])
break
Expand Down
74 changes: 41 additions & 33 deletions HookTest/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@

import HookTest.capitains_units.cts
import HookTest.units
from colors import white, magenta, black
from colors import white, magenta
from operator import attrgetter


pr_finder = re.compile("pull\/([0-9]+)\/head")
pr_finder = re.compile("pull/([0-9]+)/head")


class DefaultFinder(object):
Expand Down Expand Up @@ -82,9 +82,9 @@ def find(self, directory):
cts = glob.glob(os.path.join(directory, "data/{textgroup}/__cts__.xml".format(
textgroup=textgroup
))) + \
glob.glob(os.path.join(directory, "data/{textgroup}/{work}/__cts__.xml".format(
textgroup=textgroup, work=work
)))
glob.glob(os.path.join(directory, "data/{textgroup}/{work}/__cts__.xml".format(
textgroup=textgroup, work=work
)))
files = glob.glob(os.path.join(directory, "data/{textgroup}/{work}/{version}.xml".format(
textgroup=textgroup, work=work, version=version
)))
Expand Down Expand Up @@ -124,7 +124,7 @@ class Test(object):
SCHEMES = {
"tei": "tei.rng",
"epidoc": "epidoc.rng",
"ignore": "epidoc.rng",
"ignore": None,
"auto": "auto_rng"
}

Expand Down Expand Up @@ -400,11 +400,11 @@ def run(self):

self.text_files, self.cts_files = self.find()
self.start()
# We deal with Inventory files first to get a list of urns

# We deal with Inventory files first to get a list of urns
with Pool(processes=self.workers) as executor:
# We iterate over a dictionary of completed tasks
for future in executor.imap_unordered(self.unit, [file for file in self.cts_files]):
# We iterate over the list of files, checking them in parallel.
for future in executor.imap_unordered(self.unit, self.cts_files):
result, filepath, additional = future
self.results[filepath] = result
self.passing[filepath] = result.status
Expand All @@ -416,10 +416,9 @@ def run(self):
executor.join()
self.middle() # To print the results from the metadata file tests

# We load a thread pool which has 5 maximum workers
# Now deal with the text files.
with Pool(processes=self.workers) as executor:
# We create a dictionary of tasks which
for future in executor.imap_unordered(self.unit, [file for file in self.text_files]):
for future in executor.imap_unordered(self.unit, self.text_files):
result, filepath, additional = future
self.results[filepath] = result
self.passing[filepath] = result.status
Expand All @@ -444,7 +443,7 @@ def log(self, log):
sys.stdout.write('.')
sys.stdout.flush()
else:
sys.stdout.write('X')
sys.stdout.write(str('X'))
sys.stdout.flush()
elif self.ping and len(self.stack) >= self.triggering_size:
self.flush(self.stack)
Expand All @@ -457,7 +456,7 @@ def start(self):
self.scheme = "auto_rng"
if self.console:
print(">>> Starting tests !", flush=True)
print(">>> Files to test : "+str(self.count_files), flush=True)
print(">>> Files to test : " + str(self.count_files), flush=True)
elif self.ping:
self.send({
"logs": [
Expand Down Expand Up @@ -534,7 +533,7 @@ def end(self):
try:
show.remove("Duplicate passages")
show.remove("Forbidden characters")
except:
except ValueError:
pass
if unit.coverage != 100.0:
num_failed += 1
Expand All @@ -548,36 +547,41 @@ def end(self):
failed_tests = '\n'.join([x for x in unit.units if unit.units[x] is False and x in show])

if unit.additional['duplicates']:
duplicate_nodes += '\t{name}\t{nodes}\n'.format(name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional['duplicates']))
duplicate_nodes += '\t{name}\t{nodes}\n'.format(
name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional['duplicates']))
if unit.additional['forbiddens']:
forbidden_chars += '\t{name}\t{nodes}\n'.format(name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional['forbiddens']))
forbidden_chars += '\t{name}\t{nodes}\n'.format(
name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional['forbiddens']))
if unit.additional["dtd_errors"] and self.verbose >= 6:
dtd_errors += '\t{name}\t{nodes}\n'.format(name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["dtd_errors"]))
dtd_errors += '\t{name}\t{nodes}\n'.format(
name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["dtd_errors"]))

if unit.additional["capitains_errors"]:
capitains_errors += '\t{name}\t{nodes}\n'.format(name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["capitains_errors"]))
capitains_errors += '\t{name}\t{nodes}\n'.format(
name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["capitains_errors"]))

if unit.additional["empties"]:
empty_refs += '\t{name}\t{nodes}\n'.format(name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["empties"]))
empty_refs += '\t{name}\t{nodes}\n'.format(
name=magenta(os.path.basename(unit.name)),
nodes=', '.join(unit.additional["empties"]))

if self.verbose >= 7 or unit.status is False:
if self.countwords:
row = [
"{}".format(text_color(os.path.basename(unit.name))),
"{:,}".format(unit.additional['words']),
';'.join([str(x[1]) for x in unit.additional['citations']]),
failed_tests
"{:,}".format(unit.additional['words']),
';'.join([str(x[1]) for x in unit.additional['citations']]),
failed_tests
]
else:
row = [
"{}".format(text_color(os.path.basename(unit.name))),
';'.join([str(x[1]) for x in unit.additional['citations']]),
failed_tests
';'.join([str(x[1]) for x in unit.additional['citations']]),
failed_tests
]
display_table.add_row(row)

Expand Down Expand Up @@ -605,14 +609,18 @@ def end(self):

if capitains_errors:
capitains_errors = magenta('CapiTainS parsing errors found:\n') + capitains_errors + '\n'

print("{caps}{dupes}{forbs}{dtds}{empts}>>> End of the test !\n".format(caps=capitains_errors,
dupes=duplicate_nodes,
forbs=forbidden_chars,
dtds=dtd_errors,
empts=empty_refs))
t_pass = num_texts - num_failed
cov = round(statistics.mean([test.coverage for test in self.results.values()]), ndigits=2)
cov_results = [test.coverage for test in self.results.values()]
if cov_results:
cov = round(statistics.mean(cov_results), ndigits=2)
else:
cov = 0.00
results_table = PT(["HookTestResults", ""])
results_table.align["HookTestResults", ""] = "c"
results_table.hrules = pt_all
Expand Down Expand Up @@ -745,7 +753,7 @@ def cover(self, name, test, testtype=None, logs=None, additional=None):
directory=self.directory,
name=name,
units=test,
coverage=len([v for v in results if v is True])/len(results)*100,
coverage=len([v for v in results if v is True]) / len(results) * 100,
status=False not in results,
logs=logs,
additional=additional,
Expand Down

0 comments on commit 3ea275d

Please sign in to comment.