Skip to content

Commit

Permalink
RPP Test Suite Upgrade 6 - Restructure common HIP/HOST code (ROCm#315)
Browse files Browse the repository at this point in the history
* moved the common functions used in a python test suites to to a common python script

created helper function for displaying QA test summary

* reversed the order of performance runs loop and decode loop in all test suites

* modified remaining python scripts to use print qa helper function for displaying QA results

* added new helper function for print the performance test results as a summary

* added caseMax, caseMin variables in image test suite

made changes to run only necessary bitdepths needed incase of qa mode

---------

Co-authored-by: sampath1117 <sampath.rachumallu@multicorewareinc.com>
  • Loading branch information
2 people authored and kiritigowda committed Mar 5, 2024
1 parent c8c3eff commit b58c803
Show file tree
Hide file tree
Showing 11 changed files with 1,507 additions and 2,077 deletions.
1,375 changes: 688 additions & 687 deletions utilities/test_suite/HIP/Tensor_hip.cpp

Large diffs are not rendered by default.

292 changes: 147 additions & 145 deletions utilities/test_suite/HIP/Tensor_voxel_hip.cpp

Large diffs are not rendered by default.

274 changes: 41 additions & 233 deletions utilities/test_suite/HIP/runTests.py

Large diffs are not rendered by default.

226 changes: 22 additions & 204 deletions utilities/test_suite/HIP/runTests_voxel.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,10 @@
"""

import os
import subprocess # nosec
import argparse
import sys
import datetime
import shutil
sys.dont_write_bytecode = True
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..' ))
from common import *

# Set the timestamp
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
Expand All @@ -41,105 +40,6 @@
caseMin = 0
caseMax = 5

# Check if folder path is empty, if it is the root folder, or if it exists, and remove its contents
def validate_and_remove_contents(path):
if not path: # check if a string is empty
print("Folder path is empty.")
exit()
elif path == "/*": # check if the root directory is passed to the function
print("Root folder cannot be deleted.")
exit()
elif os.path.exists(path): # check if the folder exists
# Get a list of files and directories within the specified path
items = os.listdir(path)

if items:
# The directory is not empty, delete its contents
for item in items:
item_path = os.path.join(path, item)
if os.path.isfile(item_path):
os.remove(item_path)
elif os.path.isdir(item_path):
shutil.rmtree(item_path) # Delete the directory if it exists
else:
print("Path is invalid or does not exist.")
exit()

# Check if the folder is the root folder or exists, and remove the specified subfolders
def validate_and_remove_folders(path, folder):
if path == "/*": # check if the root directory is passed to the function
print("Root folder cannot be deleted.")
exit()
if path and os.path.isdir(path): # checks if directory string is not empty and it exists
output_folders = [folder_name for folder_name in os.listdir(path) if folder_name.startswith(folder)]

# Loop through each directory and delete it only if it exists
for folder_name in output_folders:
folder_path = os.path.join(path, folder_name)
if os.path.isdir(folder_path):
shutil.rmtree(folder_path) # Delete the directory if it exists
print("Deleted directory:", folder_path)
else:
print("Directory not found:", folder_path)

def case_file_check(CASE_FILE_PATH, TYPE, TENSOR_TYPE_LIST, new_file):
try:
case_file = open(CASE_FILE_PATH,'r')
for line in case_file:
print(line)
if not(line.startswith('"Name"')):
if TYPE in TENSOR_TYPE_LIST:
new_file.write(line)
d_counter[TYPE] = d_counter[TYPE] + 1
case_file.close()
return True
except IOError:
print("Unable to open case results")
return False

# Generate a directory name based on certain parameters
def directory_name_generator(qaMode, affinity, layoutType, case, path):
if qaMode == 0:
functionality_group = func_group_finder(int(case))
dst_folder_temp = "{}/rpp_{}_{}_{}".format(path, affinity, layoutType, functionality_group)
else:
dst_folder_temp = path

return dst_folder_temp

# Process the layout based on the given parameters and generate the directory name and log file layout.
def process_layout(layout, qaMode, case, dstPath):
if layout == 0:
dstPathTemp = directory_name_generator(qaMode, "hip", "pkd3", case, dstPath)
logFileLayout = "pkd3"
elif layout == 1:
dstPathTemp = directory_name_generator(qaMode, "hip", "pln3", case, dstPath)
logFileLayout = "pln3"
elif layout == 2:
dstPathTemp = directory_name_generator(qaMode, "hip", "pln1", case, dstPath)
logFileLayout = "pln1"

return dstPathTemp, logFileLayout

# Validate if a path exists and is a directory
def validate_path(input_path):
if not os.path.exists(input_path):
raise ValueError("path " + input_path +" does not exist.")
if not os.path.isdir(input_path):
raise ValueError("path " + input_path + " is not a directory.")

# Create layout directories within a destination path based on a layout dictionary
def create_layout_directories(dst_path, layout_dict):
for layout in range(3):
current_layout = layout_dict[layout]
try:
os.makedirs(dst_path + '/' + current_layout)
except FileExistsError:
pass
folder_list = [f for f in os.listdir(dst_path) if current_layout.lower() in f]
for folder in folder_list:
os.rename(dst_path + '/' + folder, dst_path + '/' + current_layout + '/' + folder)

def get_log_file_list(preserveOutput):
return [
outFolderPath + "/OUTPUT_PERFORMANCE_LOGS_HIP_VOXEL_" + timestamp + "/Tensor_voxel_hip_pkd3_raw_performance_log.txt",
Expand All @@ -156,28 +56,14 @@ def func_group_finder(case_number):
else:
return "miscellaneous"

# Generate performance reports based on counters and a list of types
def generate_performance_reports(d_counter, TYPE_LIST):
import pandas as pd
pd.options.display.max_rows = None
# Generate performance report
for TYPE in TYPE_LIST:
print("\n\n\nKernels tested - ", d_counter[TYPE], "\n\n")
df = pd.read_csv(RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv")
df["AverageMs"] = df["AverageNs"] / 1000000
dfPrint = df.drop(['Percentage'], axis = 1)
dfPrint["HIP Kernel Name"] = dfPrint.iloc[:,0].str.lstrip("Hip_")
dfPrint_noIndices = dfPrint.astype(str)
dfPrint_noIndices.replace(['0', '0.0'], '', inplace = True)
dfPrint_noIndices = dfPrint_noIndices.to_string(index = False)
print(dfPrint_noIndices)

def run_unit_test(headerPath, dataPath, dstPathTemp, layout, case, numRuns, testType, qaMode, batchSize):
print("\n\n\n\n")
print("--------------------------------")
print("Running a New Functionality...")
print("--------------------------------")
bitDepths = [0, 2]
if qaMode:
bitDepths = [2]
for bitDepth in bitDepths:
print("\n\n\nRunning New Bit Depth...\n-------------------------\n\n")
print(f"./Tensor_voxel_hip {headerPath} {dataPath} {dstPathTemp} {layout} {case} {numRuns} {testType} {qaMode} {batchSize} {bitDepth}")
Expand Down Expand Up @@ -365,7 +251,7 @@ def rpp_test_suite_parser_and_validator():
dstPath = outFilePath

# Validate DST_FOLDER
validate_and_remove_contents(dstPath)
validate_and_remove_files(dstPath)

# Enable extglob
if os.path.exists(buildFolderPath + "/build"):
Expand All @@ -377,6 +263,9 @@ def rpp_test_suite_parser_and_validator():
subprocess.run(["cmake", scriptPath], cwd=".") # nosec
subprocess.run(["make", "-j16"], cwd=".") # nosec

# List of cases supported
supportedCaseList = ['0', '1', '2', '3', '5']

# Create folders based on testType and profilingOption
if testType == 1 and profilingOption == "YES":
os.makedirs(f"{dstPath}/Tensor_PKD3")
Expand All @@ -390,23 +279,29 @@ def rpp_test_suite_parser_and_validator():

if testType == 0:
for case in caseList:
if case not in supportedCaseList:
continue
for layout in range(3):
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath)
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath, "hip", func_group_finder)
if qaMode == 0:
if not os.path.isdir(dstPathTemp):
os.mkdir(dstPathTemp)

run_unit_test(headerPath, dataPath, dstPathTemp, layout, case, numRuns, testType, qaMode, batchSize)
elif (testType == 1 and profilingOption == "NO"):
for case in caseList:
if case not in supportedCaseList:
continue
for layout in range(3):
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath)
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath, "hip", func_group_finder)
run_performance_test(loggingFolder, logFileLayout, headerPath, dataPath, dstPathTemp, layout, case, numRuns, testType, qaMode, batchSize)
elif (testType == 1 and profilingOption == "YES"):
NEW_FUNC_GROUP_LIST = [0, 1]
for case in caseList:
if case not in supportedCaseList:
continue
for layout in range(3):
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath)
dstPathTemp, logFileLayout = process_layout(layout, qaMode, case, dstPath, "hip", func_group_finder)
run_performance_test_with_profiler(loggingFolder, logFileLayout, dstPath, headerPath, dataPath, dstPathTemp, layout, case, numRuns, testType, qaMode, batchSize)

RESULTS_DIR = ""
Expand Down Expand Up @@ -451,14 +346,14 @@ def rpp_test_suite_parser_and_validator():
# Write into csv file
CASE_FILE_PATH = CASE_RESULTS_DIR + "/output_case" + str(CASE_NUM) + ".stats.csv"
print("CASE_FILE_PATH = " + CASE_FILE_PATH)
fileCheck = case_file_check(CASE_FILE_PATH, TYPE, TENSOR_TYPE_LIST, new_file)
fileCheck = case_file_check(CASE_FILE_PATH, TYPE, TENSOR_TYPE_LIST, new_file, d_counter)
if fileCheck == False:
continue

new_file.close()
subprocess.call(['chown', '{}:{}'.format(os.getuid(), os.getgid()), RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv"]) # nosec
try:
generate_performance_reports(d_counter, TYPE_LIST)
generate_performance_reports(d_counter, TYPE_LIST, RESULTS_DIR)

except ImportError:
print("\nPandas not available! Results of GPU profiling experiment are available in the following files:\n" + \
Expand All @@ -470,32 +365,14 @@ def rpp_test_suite_parser_and_validator():
print("Unable to open results in " + RESULTS_DIR + "/consolidated_results_" + TYPE + ".stats.csv")

# print the results of qa tests
supportedCaseList = ['0', '1', '2', '3', '5']
nonQACaseList = [] # Add cases present in supportedCaseList, but without QA support

if qaMode and testType == 0:
qaFilePath = os.path.join(outFilePath, "QA_results.txt")
checkFile = os.path.isfile(qaFilePath)
if checkFile:
f = open(qaFilePath, 'r+')
print("---------------------------------- Results of QA Test - Tensor_voxel_hip ----------------------------------\n")
numLines = 0
numPassed = 0
for line in f:
sys.stdout.write(line)
numLines += 1
if "PASSED" in line:
numPassed += 1
sys.stdout.flush()
resultsInfo = "\n\nFinal Results of Tests:"
resultsInfo += "\n - Total test cases including all subvariants REQUESTED = " + str(numLines)
resultsInfo += "\n - Total test cases including all subvariants PASSED = " + str(numPassed)
resultsInfo += "\n\nGeneral information on Tensor voxel test suite availability:"
resultsInfo += "\n - Total augmentations supported in Tensor test suite = " + str(len(supportedCaseList))
resultsInfo += "\n - Total augmentations with golden output QA test support = " + str(len(supportedCaseList) - len(nonQACaseList))
resultsInfo += "\n - Total augmentations without golden ouput QA test support (due to randomization involved) = " + str(len(nonQACaseList))
f.write(resultsInfo)
print("\n-------------------------------------------------------------------" + resultsInfo + "\n\n-------------------------------------------------------------------")
print_qa_tests_summary(qaFilePath, supportedCaseList, nonQACaseList)

layoutDict = {0:"PKD3", 1:"PLN3", 2:"PLN1"}
if (testType == 0 and qaMode == 0): # Unit tests
Expand All @@ -509,63 +386,4 @@ def rpp_test_suite_parser_and_validator():
]

for log_file in log_file_list:
# Opening log file
try:
f = open(log_file,"r")
print("\n\n\nOpened log file -> "+ log_file)
except IOError:
print("Skipping file -> "+ log_file)
continue

stats = []
maxVals = []
minVals = []
avgVals = []
functions = []
frames = []
prevLine = ""
funcCount = 0

# Loop over each line
for line in f:
for functionality_group in functionality_group_list:
if functionality_group in line:
functions.extend([" ", functionality_group, " "])
frames.extend([" ", " ", " "])
maxVals.extend([" ", " ", " "])
minVals.extend([" ", " ", " "])
avgVals.extend([" ", " ", " "])

if "max,min,avg wall times in ms/batch" in line:
split_word_start = "Running "
split_word_end = " " +str(numRuns)
prevLine = prevLine.partition(split_word_start)[2].partition(split_word_end)[0]
if prevLine not in functions:
functions.append(prevLine)
frames.append(numRuns)
split_word_start = "max,min,avg wall times in ms/batch = "
split_word_end = "\n"
stats = line.partition(split_word_start)[2].partition(split_word_end)[0].split(",")
maxVals.append(stats[0])
minVals.append(stats[1])
avgVals.append(stats[2])
funcCount += 1

if line != "\n":
prevLine = line

# Print log lengths
print("Functionalities - "+ str(funcCount))

# Print summary of log
print("\n\nFunctionality\t\t\t\t\t\tFrames Count\t\tmax(ms/batch)\t\tmin(ms/batch)\t\tavg(ms/batch)\n")
if len(functions) != 0:
maxCharLength = len(max(functions, key = len))
functions = [x + (' ' * (maxCharLength - len(x))) for x in functions]
for i, func in enumerate(functions):
print(func + "\t\t\t\t\t\t\t\t" + str(frames[i]) + "\t\t" + str(maxVals[i]) + "\t\t" + str(minVals[i]) + "\t\t" + str(avgVals[i]))
else:
print("No variants under this category")

# Closing log file
f.close()
print_performance_tests_summary(log_file, functionality_group_list, numRuns)
Loading

0 comments on commit b58c803

Please sign in to comment.