Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Test suite support for doing QA w.r.t performance #221

Merged
7 changes: 7 additions & 0 deletions utilities/test_suite/HOST/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -84,8 +84,15 @@ if (OpenCV_FOUND)
link_directories(${ROCM_PATH}/lib /usr/local/lib)

add_executable(Tensor_host Tensor_host.cpp)
add_executable(BatchPD_host_pkd3 ../../rpp-performancetests/HOST_NEW/BatchPD_host_pkd3.cpp)
add_executable(BatchPD_host_pln1 ../../rpp-performancetests/HOST_NEW/BatchPD_host_pln1.cpp)
add_executable(BatchPD_host_pln3 ../../rpp-performancetests/HOST_NEW/BatchPD_host_pln3.cpp)

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++17")
target_link_libraries(Tensor_host ${OpenCV_LIBS} -lturbojpeg -lrpp pthread ${LINK_LIBRARY_LIST})
target_link_libraries(BatchPD_host_pkd3 ${OpenCV_LIBS} -lturbojpeg -lrpp pthread ${LINK_LIBRARY_LIST})
target_link_libraries(BatchPD_host_pln1 ${OpenCV_LIBS} -lturbojpeg -lrpp pthread ${LINK_LIBRARY_LIST})
target_link_libraries(BatchPD_host_pln3 ${OpenCV_LIBS} -lturbojpeg -lrpp pthread ${LINK_LIBRARY_LIST})
else()
message("-- ${Red}Error: OpenCV must be installed to install ${PROJECT_NAME} successfully!${ColourReset}")
endif()
Expand Down
114 changes: 111 additions & 3 deletions utilities/test_suite/HOST/runTests.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,16 @@ def run_unit_test(srcPath1, srcPath2, dstPathTemp, case, numRuns, testType, layo
print("------------------------------------------------------------------------------------------")

def run_performance_test_cmd(loggingFolder, log_file_layout, srcPath1, srcPath2, dstPath, bitDepth, outputFormatToggle, case, additionalParam, numRuns, testType, layout, qaMode, decoderType, batchSize, roiList):
if qaMode == 1:
with open("{}/BatchPD_host_{}_raw_performance_log.txt".format(loggingFolder, log_file_layout), "a") as log_file:
process = subprocess.Popen([buildFolderPath + "/build/BatchPD_host_" + log_file_layout, srcPath1, srcPath2, str(bitDepth), str(outputFormatToggle), str(case), str(additionalParam), "0"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) # nosec
while True:
output = process.stdout.readline()
if not output and process.poll() is not None:
break
print(output.strip())
log_file.write(output)

with open("{}/Tensor_host_{}_raw_performance_log.txt".format(loggingFolder, log_file_layout), "a") as log_file:
print(f"./Tensor_host {srcPath1} {srcPath2} {dstPath} {bitDepth} {outputFormatToggle} {case} {additionalParam} 0 ")
process = subprocess.Popen([buildFolderPath + "/build/Tensor_host", srcPath1, srcPath2, dstPath, str(bitDepth), str(outputFormatToggle), str(case), str(additionalParam), str(numRuns), str(testType), str(layout), "0", str(qaMode), str(decoderType), str(batchSize)] + roiList + [scriptPath], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) # nosec
Expand Down Expand Up @@ -296,7 +306,7 @@ def rpp_test_suite_parser_and_validator():
batchSize = args.batch_size
roiList = ['0', '0', '0', '0'] if args.roi is None else args.roi

if qaMode and batchSize != 3:
if qaMode and testType == 0 and batchSize != 3:
print("QA mode can only run with a batch size of 3.")
exit(0)

Expand Down Expand Up @@ -375,7 +385,6 @@ def rpp_test_suite_parser_and_validator():
srcPath2 = ricapInFilePath
for layout in range(3):
dstPathTemp, log_file_layout = process_layout(layout, qaMode, case, dstPath)

run_performance_test(loggingFolder, log_file_layout, srcPath1, srcPath2, dstPath, case, numRuns, testType, layout, qaMode, decoderType, batchSize, roiList)

# print the results of qa tests
Expand Down Expand Up @@ -411,7 +420,106 @@ def rpp_test_suite_parser_and_validator():
if testType == 0 and qaMode == 0:
create_layout_directories(dstPath, layoutDict)
# Performance tests
elif (testType == 1):
elif (testType == 1 and qaMode == 1):
tensorLogFileList = get_log_file_list(preserveOutput)
batchpdLogFileList = [sub.replace("Tensor_host", "BatchPD_host") for sub in tensorLogFileList] # will be needed only in qa mode

stats = []
tensorVal = []
batchpdVal = []
functions = []
functionsBatchPD = []
funcCount = 0
thresholdDict = {"brightness": 5, "gamma_correction": 10, "blend": 10}
for i in range(3):
tensorLogFile = tensorLogFileList[i]
batchpdLogFile = batchpdLogFileList[i]
# Opening log file
try:
tensorFile = open(tensorLogFile,"r")
except IOError:
print("Skipping file -> "+ tensorLogFile)
continue

# Opening log file
try:
batchpdFile = open(batchpdLogFile,"r")
except IOError:
print("Skipping file -> "+ batchpdLogFile)
continue

prevLine = ""
# Loop over each line
for line in tensorFile:
if "max,min,avg wall times in ms/batch" in line and "u8_Tensor" in prevLine:
layoutCheck = "PKD3_toPKD3" in prevLine or "PLN3_toPLN3" in prevLine or "PLN1_toPLN1" in prevLine
interpolationCheck = "interpolationType" not in prevLine or "interpolationTypeBilinear" in prevLine
if layoutCheck and interpolationCheck:
splitWordStart = "Running "
splitWordEnd = " " + str(numRuns)
prevLine = prevLine.partition(splitWordStart)[2].partition(splitWordEnd)[0]
splitWordStart = "max,min,avg wall times in ms/batch = "
splitWordEnd = "\n"
if prevLine not in functions:
functions.append(prevLine)
stats = line.partition(splitWordStart)[2].partition(splitWordEnd)[0].split(",")
tensorVal.append(float(stats[2]))
funcCount += 1

if line != "\n":
prevLine = line

# Closing log file
tensorFile.close()

stats = []
prevLine = ""
for line in batchpdFile:
if "max,min,avg" in line and "u8_BatchPD" in prevLine:
if "PKD3_toPKD3" in prevLine or "PLN3_toPLN3" in prevLine or "PLN1_toPLN1" in prevLine:
splitWordStart = "Running "
splitWordEnd = " " + str(numRuns)
prevLine = prevLine.partition(splitWordStart)[2].partition(splitWordEnd)[0]
splitWordStart = "max,min,avg"
splitWordEnd = "\n"
if prevLine not in functionsBatchPD:
functionsBatchPD.append(prevLine)
stats = line.partition(splitWordStart)[2].partition(splitWordEnd)[0].split(",")
batchpdVal.append(float(stats[2]) * float(1000.0))

if line != "\n":
prevLine = line

# Closing log file
batchpdFile.close()

print("---------------------------------- Results of QA Test - Tensor_host ----------------------------------\n")
qaFilePath = os.path.join(outFilePath, "QA_results.txt")
f = open(qaFilePath, 'w')
numLines = 0
numPassed = 0
removalList = ["_HOST", "_toPKD3", "_toPLN3", "_toPLN1"]
caseNames = thresholdDict.keys()
for i in range(len(functions)):
perfImprovement = int(((batchpdVal[i] - tensorVal[i]) / batchpdVal[i]) * 100)
numLines += 1
funcName = functions[i]
caseName = funcName.split("_u8_")[0]
for string in removalList:
funcName = funcName.replace(string, "")
thresh = thresholdDict[caseName]
if perfImprovement > thresh:
numPassed += 1
print(funcName + ": PASSED")
else:
print(funcName + ": FAILED")

resultsInfo = "\n\nFinal Results of Tests:"
resultsInfo += "\n - Total test cases including all subvariants REQUESTED = " + str(numLines)
resultsInfo += "\n - Total test cases including all subvariants PASSED = " + str(numPassed)
f.write(resultsInfo)
print("\n-------------------------------------------------------------------" + resultsInfo + "\n\n-------------------------------------------------------------------")
elif (testType == 1 and qaMode == 0):
log_file_list = get_log_file_list(preserveOutput)

functionality_group_list = [
Expand Down