Skip to content

Commit

Permalink
Dev -> Master | Added LTF (Lucifer Testing Framework) | 0.8.6r1
Browse files Browse the repository at this point in the history
Added LTF, which is the Lucifer Testing Framework, it allows for easy written test cases and adds a -T argument to lucifer which will run all the lucifer tests.
  • Loading branch information
Skiller9090 committed Apr 21, 2021
2 parents 6def2ac + bed7516 commit 8c5327c
Show file tree
Hide file tree
Showing 26 changed files with 453 additions and 6 deletions.
13 changes: 13 additions & 0 deletions LMI/LTF/Errors/LTFError.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
class LFTError(Exception):
def __init__(self, message):
self.message = message
self.__class__.__module__ = "LTF"

def raiseError(self):
raise self

def __str__(self):
return f"{self.message}"

def __repr__(self):
return f"<{self.__class__.__name__}, message='{self.message}'>"
5 changes: 5 additions & 0 deletions LMI/LTF/Errors/NotLTFTestError.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .LTFError import LFTError


class NotLTFTestError(LFTError):
pass
2 changes: 2 additions & 0 deletions LMI/LTF/Errors/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
from .LTFError import LFTError
from .NotLTFTestError import NotLTFTestError
20 changes: 20 additions & 0 deletions LMI/LTF/Formatters/Basic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from .Utils import map_failed_test


class Basic:
def __init__(self, statistics):
self.statistics = statistics

def generate_display(self):
data = ""
for testSet in self.statistics:
data += "====Test: " + testSet.__class__.__name__ + "====\n\n"
for test in self.statistics[testSet]:
testData = self.statistics[testSet][test]
data += "----" + test + "----\n"
data += "Succeeded: " + map_failed_test(testData["failed"]) + "\n"
data += "Time Taken: " + str(testData["time"]) + "\n\n"
return data

def show(self):
print(self.generate_display())
9 changes: 9 additions & 0 deletions LMI/LTF/Formatters/PercentageBasic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from .Basic import Basic
from .Utils import generate_percent_breakdown


class PercentageBasic(Basic):
def generate_display(self):
data = super().generate_display()
data += generate_percent_breakdown(self.statistics)
return data
9 changes: 9 additions & 0 deletions LMI/LTF/Formatters/PercentageShort.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from .Short import Short
from .Utils import generate_percent_breakdown


class PercentageShort(Short):
def generate_display(self):
data = super().generate_display()
data += generate_percent_breakdown(self.statistics)
return data
19 changes: 19 additions & 0 deletions LMI/LTF/Formatters/Short.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from .Utils import map_failed_test


class Short:
def __init__(self, statistics):
self.statistics = statistics

def generate_display(self):
data = ""
for testSet in self.statistics:
data += "====Test: " + testSet.__class__.__name__ + "====\n"
for test in self.statistics[testSet]:
testData = self.statistics[testSet][test]
data += test + ": " + map_failed_test(testData["failed"]) + "\n"
data += "\n"
return data

def show(self):
print(self.generate_display())
33 changes: 33 additions & 0 deletions LMI/LTF/Formatters/Utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
def generate_percent_breakdown(statistics):
data = ""
failedTests = 0
totalTests = 0
failedTestSets = 0
totalTestSets = 0
for testSet in statistics:
setFailed = False
for test in statistics[testSet]:
testData = statistics[testSet][test]
if testData["failed"]:
failedTests += 1
setFailed = True
totalTests += 1
if setFailed:
failedTestSets += 1
totalTestSets += 1
data += "#### Final Breakdown ####\n"
data += "Failed Tests: " + str(failedTests) + "/" + str(totalTests) + "\n"
data += "Failed Test Sets: " + str(failedTestSets) + "/" + str(totalTestSets) + "\n"
data += "Test Succeeded: " + str(round(((totalTests - failedTests) / totalTests) * 100, 2)) + "%\n"
data += "Test Sets Succeeded: " + str(round(
((totalTestSets - failedTestSets) / totalTestSets) * 100, 2)
) + "%\n"
return data


def map_failed_test(failed_value):
return {
True: "no",
False: "yes",
None: "skipped"
}.get(failed_value, "unknown")
4 changes: 4 additions & 0 deletions LMI/LTF/Formatters/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .Basic import Basic
from .PercentageBasic import PercentageBasic
from .PercentageShort import PercentageShort
from .Short import Short
12 changes: 12 additions & 0 deletions LMI/LTF/Requirements/RequireLuciferManager.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from .Requirement import Requirement


class RequireLuciferManager(Requirement):
def satisfyRequirement(self):
from LMI import LMI
self.instance.luciferManager = LMI.luciferManager

def check_satisfied(self):
if hasattr(self.instance, "luciferManger"):
return True
return False
10 changes: 10 additions & 0 deletions LMI/LTF/Requirements/Requirement.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
class Requirement:
def __init__(self, instance):
self.instance = instance
self.isSatisfied = False

def check_satisfied(self):
return self.isSatisfied

def satisfyRequirement(self):
self.isSatisfied = True
1 change: 1 addition & 0 deletions LMI/LTF/Requirements/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
from .RequireLuciferManager import RequireLuciferManager
34 changes: 34 additions & 0 deletions LMI/LTF/Runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
from .Errors import NotLTFTestError
from .Tests import AssertTest, LTFTest


class TestsRunner:
def __init__(self):
self.tests = {}
self.statistics = {}

def runAll(self):
for test in self.tests.keys():
if isinstance(test, LTFTest):
test.run()
self.statistics[test] = test.test_mappings
else:
raise NotLTFTestError("Not a LTF Test compatible Test!")

def add_function_assert_test(self, testFunction):
self.tests[AssertTest(testFunction)] = {
"hasRun": False,
"Failed": False,
"Time": None,
"Error": None
}

def add_LTF_test(self, LTFClass):
if isinstance(LTFClass, type):
LTFClass = LTFClass()
self.tests[LTFClass] = {
"hasRun": False,
"Failed": False,
"Time": None,
"Error": None
}
21 changes: 21 additions & 0 deletions LMI/LTF/Tests/AssertTest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from .LTFTest import LTFTest


class AssertTest(LTFTest):
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
for test_func in args:
self.extra_tests.append(test_func)

def run(self):
self.findFunctions()
self.satisfyRequirements()
for functionName, function in zip(self.all_functions.keys(), self.all_functions.values()):
self.setDefaultTestValues(functionName)
try:
timeTaken = self.timeFunction(function)
self.test_mappings[functionName]["time"] = timeTaken
except Exception as e:
self.addError(functionName, e)
self.test_mappings[functionName]["has_run"] = True
self.has_run = True
25 changes: 25 additions & 0 deletions LMI/LTF/Tests/BooleanTest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
from .LTFTest import LTFTest


class BooleanTest(LTFTest):
def __init__(self, *args, **kwargs):
super().__init__(**kwargs)
for test_func in args:
self.extra_tests.append(test_func)

def run(self):
self.findFunctions()
self.satisfyRequirements()
for functionName, function in zip(self.all_functions.keys(), self.all_functions.values()):
self.setDefaultTestValues(functionName)
try:
timeTaken, outValue = self.timeWithReturnFunction(function)
self.test_mappings[functionName]["time"] = timeTaken
if outValue is None:
self.test_mappings[functionName]["failed"] = None
elif not outValue:
self.addError(functionName, Exception("Failed boolean test"))
except Exception as e:
self.addError(functionName, e)
self.test_mappings[functionName]["has_run"] = True
self.has_run = True
60 changes: 60 additions & 0 deletions LMI/LTF/Tests/LTFTest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import abc
import timeit

from ...Utils import RunTimeReturn


class LTFTest(metaclass=abc.ABCMeta):
testIdentifiers = ["test_"]

def __init__(self, requirements=None):
self.requirements = []
self.all_functions = {}
self.extra_tests = []
self.test_mappings = {}
self.has_run = False
if requirements is not None:
for requirement in requirements:
self.requirements.append(requirement(self))

def findFunctions(self):
self.all_functions = {}
for function in self.extra_tests:
self.all_functions["extra-" + function.__name__] = function
for functionName in self.__class__.__dict__.keys():
for identifier in LTFTest.testIdentifiers:
if functionName.startswith(identifier):
self.all_functions[functionName] = (self.__class__.__dict__.get(functionName))
break

def satisfyRequirements(self):
for requirement in self.requirements:
if not requirement.check_satisfied():
requirement.satisfyRequirement()

def timeWithReturnFunction(self, function):
with RunTimeReturn() as RTR:
function = function.__get__(self)
timeTaken, outValue = RTR.run(function, number=1)
return timeTaken, outValue

def timeFunction(self, function):
function = function.__get__(self)
timeTaken = timeit.timeit(function, number=1)
return timeTaken

def addError(self, functionName, error, failed=True):
self.test_mappings[functionName]["errors"].append(error)
self.test_mappings[functionName]["failed"] = failed

def setDefaultTestValues(self, functionName):
self.test_mappings[functionName] = {
"time": None,
"has_run": False,
"errors": [],
"failed": False
}

@abc.abstractmethod
def run(self):
pass
3 changes: 3 additions & 0 deletions LMI/LTF/Tests/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
from .AssertTest import AssertTest
from .BooleanTest import BooleanTest
from .LTFTest import LTFTest
8 changes: 8 additions & 0 deletions LMI/LTF/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
""" LTF - Lucifer Test Framework
This is a testing framework built into lucifer which allows for modules to have an easy to use test API within lucifer,
lucifer tests also use this same framework!
"""
from . import Formatters
from . import Requirements
from . import Tests
from .Runner import TestsRunner
29 changes: 29 additions & 0 deletions LMI/Utils.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
import timeit


def check_int(s):
if s.count(".") == 1:
if not (s.split(".")[1].count("0") == len(s.split(".")[1])):
Expand All @@ -12,3 +15,29 @@ def check_int_quick(s):
if s[0] in ('-', '+'):
return s[1:].isdigit()
return s.isdigit()


class RunTimeReturn:
runReturnTemplate = """def inner(_it, _timer{init}):
{setup}
_t0 = _timer()
for _i in _it:
retval = {stmt}
_t1 = _timer()
return _t1 - _t0, retval
"""

def __init__(self):
self.oldTemplate = None

def __enter__(self):
self.oldTemplate = timeit.template
timeit.template = RunTimeReturn.runReturnTemplate
return self

def __exit__(self, exc_type, exc_val, exc_tb):
timeit.template = self.oldTemplate

@staticmethod
def run(function, number=1):
return timeit.timeit(function, number=number)

0 comments on commit 8c5327c

Please sign in to comment.