diff --git a/TestInput.py b/TestInput.py index a3812e92db..a8ae541312 100644 --- a/TestInput.py +++ b/TestInput.py @@ -123,7 +123,7 @@ class TestInputParser(): def get_test_input(argv): #if file is given use parse_from_file #if its from command line - (opts, args) = getopt.getopt(argv[1:], 'ht:c:v:s:i:p:l:', []) + (opts, args) = getopt.getopt(argv[1:], 'ht:c:v:s:i:p:l:d:e:r:g:m', []) #first let's loop over and find out if user has asked for help #if it has i params = {} diff --git a/scripts/find_rerun_job.py b/scripts/find_rerun_job.py new file mode 100644 index 0000000000..a31c0023c7 --- /dev/null +++ b/scripts/find_rerun_job.py @@ -0,0 +1,194 @@ +import os as OS +import subprocess +import sys +from couchbase.cluster import Cluster +from couchbase.cluster import PasswordAuthenticator +from couchbase.n1ql import N1QLQuery +try: + import requests +except ImportError: + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "requests"]) + import requests +import argparse +import get_jenkins_params as jenkins_api + + +host = '172.23.121.84' +bucket_name = 'rerun_jobs' + + +def get_run_results(): + run_results = {} + return run_results + + +def parse_args(): + """ + Parse command line arguments into a dictionary + :return: Dictionary of parsed command line arguments + :rtype: dict + """ + argument_parser = argparse.ArgumentParser() + argument_parser.add_argument("build_version", type=str, + help="Couchbase build version of the " + "job") + argument_parser.add_argument("--executor_jenkins_job", + action='store_true', + help="Run with current executor job") + argument_parser.add_argument("--jenkins_job", action="store_true", + help="Run with current jenkins job") + argument_parser.add_argument("--store_data", action="store_true", + help="Store the test_run details. To " + "be used only after testrunner " + "is run") + argument_parser.add_argument("--install_failure", + action='store_true', + help="Was there install failure in " + "the run?") + args = vars(argument_parser.parse_args()) + return args + + +def build_args(build_version, executor_jenkins_job=False, + jenkins_job=False, store_data=False, + install_failure=False): + """ + Build a dictionary of arguments needed for the program + :param build_version: Couchbase build version of the job + :type build_version: str + :param executor_jenkins_job: Run with current Executor job + :type executor_jenkins_job: bool + :param jenkins_job: Run with current jenkins job + :type jenkins_job: bool + :param store_data: "Store the test_run details. To be used only + after testrunner is run" + :type store_data: bool + :param install_failure: Was there install failure in the run? + :type install_failure: bool + :return: Dictionary of parameters + :rtype: dict + """ + return locals() + + +def find_rerun_job(args): + """ + Find if the job was run previously + :param args: Dictionary of arguments. Run build_args() if calling + this from python script or parse_args() if running from shell + :type args: dict + :return: If the job was run previously + :rtype: bool + """ + name = None + store_data = args['store_data'] + install_failure = args['install_failure'] + if args['executor_jenkins_job']: + os = OS.getenv('os') + component = OS.getenv('component') + sub_component = OS.getenv('subcomponent') + version_build = OS.getenv('version_number') + name = "{}_{}_{}".format(os, component, sub_component) + elif args['jenkins_job']: + name = OS.getenv('JOB_NAME') + version_build = args['build_version'] + else: + os = args['os'] + component = args['component'] + sub_component = args['sub_component'] + if os and component and sub_component: + name = "{}_{}_{}".format(os, component, sub_component) + elif args['name']: + name = args['name'] + version_build = args['build_version'] + if not name or not version_build: + return False, {} + cluster = Cluster('couchbase://{}'.format(host)) + authenticator = PasswordAuthenticator('Administrator', 'password') + cluster.authenticate(authenticator) + rerun_jobs = cluster.open_bucket(bucket_name) + rerun = False + doc_id = "{}_{}".format(name, version_build) + try: + run_document = rerun_jobs.get(doc_id, quiet=True) + if not store_data: + if not run_document.success: + return False, {} + else: + return True, run_document.value + parameters = jenkins_api.get_params(OS.getenv('BUILD_URL')) + run_results = get_run_results() + job_to_store = { + "job_url": OS.getenv('BUILD_URL'), + "build_id": OS.getenv('BUILD_ID'), + "run_params": parameters, + "run_results": run_results, + "install_failure": install_failure} + if run_document.success: + rerun = True + run_document = run_document.value + else: + run_document = { + "build": version_build, + "num_runs": 0, + "jobs": []} + run_document['num_runs'] += 1 + run_document['jobs'].append(job_to_store) + rerun_jobs.upsert(doc_id, run_document, ttl=(7*24*60*60)) + return rerun, run_document + except Exception as e: + print(e) + return False, {} + + +def should_dispatch_job(os, component, sub_component, version): + """ + Finds if a job has to be dispatched for a particular os, component, + subcomponent and version. The method finds if the job had run + successfully previously, if the job is currently running. + :param os: Os of the job + :type os: str + :param component: Component of the job + :type component: str + :param sub_component: Sub-component of the job + :type sub_component: str + :param version: Version of the server for the job + :type version: str + :return: Boolean on whether to dispatch the job or not + :rtype: bool + """ + doc_id = "{0}_{1}_{2}_{3}".format(os, component, sub_component, + version) + cluster = Cluster('couchbase://{}'.format(host)) + authenticator = PasswordAuthenticator('Administrator', 'password') + cluster.authenticate(authenticator) + rerun_jobs = cluster.open_bucket(bucket_name) + user_name = "{0}-{1}%{2}".format(component, sub_component, version) + query = "select * from `QE-server-pool` where username like " \ + "'{0}' and state = 'booked'".format(user_name) + qe_server_pool = cluster.open_bucket("QE-server-pool") + n1ql_result = qe_server_pool.n1ql_query(N1QLQuery(query)) + if n1ql_result.buffered_remainder.__len__(): + print("Tests are already running. Not dispatching another job") + return False + run_document = rerun_jobs.get(doc_id, quiet=True) + if not run_document.success: + return True + run_document = run_document.value + last_job = run_document['jobs'][-1] + last_job_url = last_job['job_url'].rstrip('/') + result = jenkins_api.get_js(last_job_url, "tree=result") + if not result or 'result' not in result: + return True + if result['result'] == "SUCCESS": + print("Job had run successfully previously.") + print("{} is the successful job.".format(last_job_url)) + return False + return True + + +if __name__ == "__main__": + args = parse_args() + rerun, document = find_rerun_job(args) + print(rerun.__str__()) diff --git a/scripts/get_jenkins_params.py b/scripts/get_jenkins_params.py new file mode 100644 index 0000000000..d076ff99cd --- /dev/null +++ b/scripts/get_jenkins_params.py @@ -0,0 +1,71 @@ +import subprocess +import sys +try: + import requests +except ImportError: + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "requests"]) + import requests + + +def get_params(url): + """ + Get parameters from the jenkins job + :param url: The jenkins job URL + :type url: str + :return: Dictionary of job parameters + :rtype: dict + """ + res = get_js(url, params="tree=actions[parameters[*]]") + parameters = {} + if not res: + print("Error: could not get parameters") + return None + for vals in res['actions']: + if "parameters" in vals: + for params in vals['parameters']: + parameters[params['name']] = params['value'] + break + return parameters + + +def get_js(url, params=None): + """ + Get the parameters from Jenkins job using Jenkins rest api + :param url: The jenkins job URL + :type url: str + :param params: Parameters to be passed to the json/api + :type params: str + :return: Response from the rest api + :rtype: dict + """ + res = None + try: + res = requests.get("%s/%s" % (url, "api/json"), + params=params, timeout=15) + data = res.json() + return data + except: + print("Error: url unreachable: %s" % url) + return None + + +def download_url_data(url, params=None): + """ + Download the data from the given url and with given parameters + from the jenkins job + :param url: Jenkins job url + :type url: str + :param params: Parameters to be passed to the api + :type params: str + :return: Content of the request to the jenkins api + :rtype: requests.content + """ + res = None + try: + res = requests.get("%s" % url, params=params, timeout=15) + return res.content + except: + print("[Error] url unreachable: %s" % url) + res = None + return res \ No newline at end of file diff --git a/scripts/merge_reports.py b/scripts/merge_reports.py new file mode 100644 index 0000000000..ab44a2e402 --- /dev/null +++ b/scripts/merge_reports.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python + +import os +import sys +sys.path = ["lib", "pytests", "pysystests"] + sys.path +import time +from xunit import XUnitTestResult +import glob +import xml.dom.minidom +import logging +log = logging.getLogger(__name__) +logging.info(__name__) +logging.getLogger().setLevel(logging.INFO) +import argparse + + +def filter_fields(testname): + testwords = testname.split(",") + line = "" + for fw in testwords: + if not fw.startswith("logs_folder") and not fw.startswith("conf_file") \ + and not fw.startswith("cluster_name:") \ + and not fw.startswith("ini:") \ + and not fw.startswith("case_number:") \ + and not fw.startswith("num_nodes:") \ + and not fw.startswith("spec:"): + line = line + fw.replace(":", "=", 1) + if fw != testwords[-1]: + line = line + "," + + return line + +def compare_with_sort(dict, key): + for k in dict.keys(): + if "".join(sorted(k)) == "".join(sorted(key)): + return True + + return False + + +def merge_reports(filespath): + log.info("Merging of report files from "+str(filespath)) + + testsuites = {} + if not isinstance(filespath, list): + filespaths = filespath.split(",") + else: + filespaths = filespath + for filepath in filespaths: + xml_files = glob.glob(filepath) + if not isinstance(filespath, list) and filespath.find("*"): + xml_files.sort(key=os.path.getmtime) + for xml_file in xml_files: + log.info("-- " + xml_file + " --") + doc = xml.dom.minidom.parse(xml_file) + testsuitelem = doc.getElementsByTagName("testsuite") + for ts in testsuitelem: + tsname = ts.getAttribute("name") + tserros = ts.getAttribute("errors") + tsfailures = ts.getAttribute("failures") + tsskips = ts.getAttribute("skips") + tstime = ts.getAttribute("time") + tstests = ts.getAttribute("tests") + issuite_existed = False + tests = {} + testsuite = {} + # fill testsuite details + if tsname in testsuites.keys(): + testsuite = testsuites[tsname] + tests = testsuite['tests'] + else: + testsuite['name'] = tsname + testsuite['errors'] = tserros + testsuite['failures'] = tsfailures + testsuite['skips'] = tsskips + testsuite['time'] = tstime + testsuite['testcount'] = tstests + issuite_existed = False + testcaseelem = ts.getElementsByTagName("testcase") + # fill test case details + for tc in testcaseelem: + testcase = {} + tcname = tc.getAttribute("name") + tctime = tc.getAttribute("time") + tcerror = tc.getElementsByTagName("error") + + tcname_filtered = filter_fields(tcname) + if compare_with_sort(tests, tcname_filtered): + testcase = tests[tcname_filtered] + testcase['name'] = tcname + else: + testcase['name'] = tcname + testcase['time'] = tctime + testcase['error'] = "" + if tcerror: + testcase['error'] = str(tcerror[0].firstChild.nodeValue) + + tests[tcname_filtered] = testcase + testsuite['tests'] = tests + testsuites[tsname] = testsuite + try: + abs_path = os.path.dirname(os.path.abspath(sys.argv[0])) + abs_path = abs_path.rstrip("scripts") + logs_directory = os.path.join(abs_path, "logs") + move_logs_directory = os.path.join(abs_path, "job_logs") + os.rename(logs_directory, move_logs_directory) + os.mkdir(logs_directory) + except Exception as e: + log.info(e) + return {} + log.info("\nNumber of TestSuites="+str(len(testsuites))) + tsindex = 0 + for tskey in testsuites.keys(): + tsindex = tsindex+1 + log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests']))) + pass_count = 0 + fail_count = 0 + tests = testsuites[tskey]['tests'] + xunit = XUnitTestResult() + for testname in tests.keys(): + testcase = tests[testname] + tname = testcase['name'] + ttime = testcase['time'] + inttime = float(ttime) + terrors = testcase['error'] + tparams = "" + if "," in tname: + tparams = tname[tname.find(","):] + tname = tname[:tname.find(",")] + + if terrors: + failed = True + fail_count = fail_count + 1 + xunit.add_test(name=tname, status='fail', time=inttime, + errorType='membase.error', errorMessage=str(terrors), params=tparams + ) + else: + passed = True + pass_count = pass_count + 1 + xunit.add_test(name=tname, time=inttime, params=tparams + ) + + str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()) + root_log_dir = os.path.join(logs_directory, "testrunner-{0}".format( + str_time)) + if not os.path.exists(root_log_dir): + os.makedirs(root_log_dir) + logs_folder = os.path.join(root_log_dir, "merged_summary") + try: + os.mkdir(logs_folder) + except: + pass + output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip() + + xunit.write(output_filepath) + xunit.print_summary() + log.info("Summary file is at " + output_filepath+"-"+tsname+".xml") + return testsuites + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description='Process some integers.') + parser.add_argument('files', metavar=' ...', type=str, nargs='+', + help='Accept all input xml files') + args = parser.parse_args() + + print(args.files) + merge_reports(args.files) \ No newline at end of file diff --git a/scripts/rerun_jobs.py b/scripts/rerun_jobs.py new file mode 100644 index 0000000000..b639e6b6a1 --- /dev/null +++ b/scripts/rerun_jobs.py @@ -0,0 +1,330 @@ +import json +import os as OS +import subprocess +import sys +import urllib +try: + import httplib2 +except ImportError: + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "httplib2"]) + import httplib2 +try: + import requests +except ImportError: + subprocess.check_call( + [sys.executable, "-m", "pip", "install", "requests"]) + import requests +import argparse +import find_rerun_job +import get_jenkins_params +import merge_reports + +host = '172.23.121.84' +bucket_name = 'rerun_jobs' +TIMEOUT = 60 + + +def parse_args(): + """ + Parse command line arguments into a dictionary + :return: Dictionary of parsed command line arguments + :rtype: dict + """ + argument_parser = argparse.ArgumentParser() + argument_parser.add_argument("build_version", type=str, + help="Couchbase build version of the " + "job") + argument_parser.add_argument("--executor_jenkins_job", + action='store_true', + help="Run with current executor job") + argument_parser.add_argument("--jenkins_job", action="store_true", + help="Run with current jenkins job") + argument_parser.add_argument("--install_failure", + action='store_true', + help="Was there install failure in " + "the run?") + argument_parser.add_argument("--manual_run", action="store_true", + help="Is this a manual rerun of the " + "job") + args = vars(argument_parser.parse_args()) + return args + + +def build_args(build_version, executor_jenkins_job=False, + jenkins_job=False, install_failure=False): + """ + Build a dictionary of arguments needed for the program + :param build_version: Couchbase build version of the job + :type build_version: str + :param executor_jenkins_job: Run with current Executor job + :type executor_jenkins_job: bool + :param jenkins_job: Run with current jenkins job + :type jenkins_job: bool + :param install_failure: Was there install failure in the run? + :type install_failure: bool + :return: Dictionary of parameters + :rtype: dict + """ + return locals() + + +def merge_xmls(rerun_document): + """ + Merge the xml of the runs into a single xml for the jenkins job + to consume to show the test results + :param rerun_document: The rerun document containing the details + of previous runs. + :type rerun_document: dict + :return: The merged testsuites from runs + :rtype: dict + """ + if not rerun_document: + testsuites = merge_reports.merge_reports("logs/**/*.xml") + return testsuites + print("Merging xmls") + num_runs = rerun_document['num_runs'] - 1 + valid_run = False + job = None + while not valid_run and num_runs > 0: + job = rerun_document['jobs'][num_runs - 1] + if job['install_failure']: + num_runs -= 1 + else: + valid_run = True + if not job: + print("no valid jobs found with run results") + testsuites = merge_reports.merge_reports("logs/**/*.xml") + return testsuites + job_url = job['job_url'] + artifacts = get_jenkins_params.get_js(job_url, "tree=artifacts[*]") + if not artifacts or len(artifacts['artifacts']) == 0: + print("could not find the job. Job might be deleted") + testsuites = merge_reports.merge_reports("logs/**/*.xml") + return testsuites + relative_paths = [] + for artifact in artifacts["artifacts"]: + if artifact["relativePath"].startswith("logs/") and \ + artifact["relativePath"].endswith(".xml"): + relative_paths.append(artifact["relativePath"]) + logs = [] + for rel_path in relative_paths: + xml_data = get_jenkins_params.download_url_data("{0}artifact/" + "{1}".format( + job_url, rel_path)) + try: + file_name = rel_path.split('/')[-1] + file_name = "Old_Report_{0}".format(file_name) + f = open(file_name, "w") + f.writelines(xml_data.decode('utf-8')) + f.close() + logs.append(file_name) + except Exception as e: + print(e) + logs.append("logs/**/*.xml") + testsuites = merge_reports.merge_reports(logs) + try: + # Remove old logs from the machine + try: + logs.remove("logs/**/*.xml") + except ValueError: + pass + for path in logs: + OS.remove(path) + except: + pass + print("merged xmls") + return testsuites + + +def should_rerun_tests(testsuites=None, install_failure=False, + retries=0): + """ + Finds out if the job has to be rerun again based on number of + failure in the current job, if number of retries has been exceeded + :param testsuites: The testsuite containing the merged results + from current and previous runs. + :type testsuites: dict + :param install_failure: Was there an install failure in this job + :type install_failure: bool + :param retries: Number of times to retry + :type retries: int + :return: Boolean telling whether to rerun the job or not + :rtype: bool + """ + if install_failure and retries > 0: + return True + if retries < 1: + return False + should_rerun = False + for tskey in testsuites.keys(): + tests = testsuites[tskey]['tests'] + for testname in tests.keys(): + testcase = tests[testname] + errors = testcase['error'] + if errors: + should_rerun = True + break + if should_rerun: + break + return should_rerun + + +def get_rerun_parameters(rerun_document=None, is_rerun=False): + """ + Get the rerun parameters for the rerun of the job + :param rerun_document: Document containing the run history of the job + :type rerun_document: dict + :param is_rerun: Was this job a rerun + :type is_rerun: bool + :return: Re-run parameters to be used in the next job or current job + :rtype: str + """ + rerun_params = None + if not is_rerun and not rerun_document and (rerun_document and + rerun_document['num_runs'] == 1): + current_job_url = OS.getenv("BUILD_URL") + rerun_params = "-d failed={}".format(current_job_url) + num_runs = rerun_document['num_runs'] + valid_run = False + valid_job = None + while not valid_run and num_runs > 0: + job = rerun_document['jobs'][num_runs - 1] + if job['install_failure']: + num_runs -= 1 + else: + valid_run = True + valid_job = job + if valid_run and valid_job: + job_url = valid_job['job_url'] + rerun_params = "-d failed={}".format(job_url) + return rerun_params + + +def run_jenkins_job(url, params): + """ + Trigger a jenkins job with the url provided and the params to the job + :param url: Jenkins job url + :type url: str + :param params: Parameters to be passed to the job + :type params: dict + :return: Response and content of the call + :rtype: (response, content) + """ + url = "{0}&{1}".format(url, urllib.urlencode(params)) + print(url) + response, content = httplib2.Http(timeout=TIMEOUT).request(url, + 'GET') + return response, content + + +def rerun_job(args): + """ + Rerun a job based on the arguments to the program. Determine if a + rerun has to occur or not + :param args: Dictionary of arguments to the program + :type args: dict + :return: Nothing + :rtype: None + """ + build_version = args['build_version'] + executor_jenkins_job = args['executor_jenkins_job'] + jenkins_job = args['jenkins_job'] + install_failure = args['install_failure'] + fresh_run = OS.getenv('fresh_run', False) + is_rerun_args = find_rerun_job.build_args(build_version, + executor_jenkins_job=executor_jenkins_job, + jenkins_job=jenkins_job, + store_data=True, + install_failure=install_failure) + is_rerun, rerun_document = find_rerun_job.find_rerun_job(is_rerun_args) + test_suites = {} + if is_rerun and not install_failure and (fresh_run != 'true' or + fresh_run is False): + test_suites = merge_xmls(rerun_document) + else: + test_suites = merge_xmls({}) + retry_count = OS.getenv("retries") + if not retry_count: + if "retries" in args: + retry_count = args['retries'] + else: + retry_count = 0 + if isinstance(retry_count, str): + retry_count = int(retry_count) + should_rerun = should_rerun_tests(test_suites, install_failure, + retry_count) + if not should_rerun: + print("No more failed tests. Stopping reruns") + return + rerun_params = get_rerun_parameters(rerun_document, is_rerun) + if not rerun_params: + if install_failure: + rerun_params = '' + else: + return + if jenkins_job: + current_job_url = OS.getenv('BUILD_URL') + current_job_params = get_jenkins_params.get_params( + current_job_url) + current_job_params['rerun_params'] = rerun_params + current_job_params['retries'] = retry_count - 1 + job_url = OS.getenv("JOB_URL") + job_token = args['token'] + job_url = "{0}buildWithParameters?token={1}".format(job_url, + job_token) + response, content = run_jenkins_job(job_url, current_job_params) + return + dispatcher_params = OS.getenv('dispatcher_params').lstrip( + "parameters=") + dispatcher_params = json.loads(dispatcher_params) + dispatcher_params['rerun_params'] = rerun_params + dispatcher_params['retries'] = retry_count - 1 + dispatcher_params['component'] = OS.getenv('component') + dispatcher_params['subcomponent'] = OS.getenv('subcomponent') + dispatcher_params['fresh_run'] = "false" + job_url = dispatcher_params.pop('dispatcher_url') + job_url = "{0}buildWithParameters?token=extended_sanity".format( + job_url) + response, content = run_jenkins_job(job_url, dispatcher_params) + + +def manual_rerun(args): + """ + Get the rrerun parameters for manual rerun of the job. Puts the + parameter into a file to be consumed by jenkins job + :param args: Dictionary of arguments to the program + :type args: dict + :return: Nothing + :rtype: None + """ + build_version = args['build_version'] + executor_jenkins_job = args['executor_jenkins_job'] + jenkins_job = args['jenkins_job'] + is_rerun_args = find_rerun_job.build_args(build_version, + executor_jenkins_job=executor_jenkins_job, + jenkins_job=jenkins_job, + store_data=False, + install_failure=False) + is_rerun, rerun_document = find_rerun_job.find_rerun_job( + is_rerun_args) + if not is_rerun: + print("This is the first run for this build.") + return + rerun_param = get_rerun_parameters(rerun_document, is_rerun) + if not rerun_param: + print("Could not find a valid previous build to run with") + return + with open("rerun_props_file", 'w') as f: + to_write = "rerun_params_manual={}".format( + rerun_param) + f.write(to_write) + f.close() + + +if __name__ == '__main__': + args = parse_args() + if args['manual_run']: + manual_rerun(args) + else: + rerun_job(args) \ No newline at end of file diff --git a/testrunner.py b/testrunner.py index a1e3525332..e8f86150f6 100755 --- a/testrunner.py +++ b/testrunner.py @@ -29,7 +29,12 @@ from scripts.getcoredumps import Getcoredumps, Clearcoredumps import signal import shutil - +import glob +import xml.dom.minidom +import logging +log = logging.getLogger(__name__) +logging.info(__name__) +print("*** TestRunner ***") def usage(err=None): print """\ @@ -49,28 +54,57 @@ def parse_args(argv): parser.add_option("-q", action="store_false", dest="verbose") tgroup = OptionGroup(parser, "TestCase/Runlist Options") - tgroup.add_option("-i", "--ini", - dest="ini", help="Path to .ini file containing server information,e.g -i tmp/local.ini") + tgroup.add_option("-i", "--ini", dest="ini", + help="Path to .ini file containing server information,e.g -i tmp/local.ini") tgroup.add_option("-c", "--config", dest="conf", - help="Config file name (located in the conf subdirectory), e.g -c py-view.conf") - tgroup.add_option("-t", "--test", - dest="testcase", help="Test name (multiple -t options add more tests) e.g -t performance.perf.DiskDrainRate") + help="Config file name (located in the conf subdirectory), " + "e.g -c py-view.conf") + tgroup.add_option("-t", "--test", dest="testcase", + help="Test name (multiple -t options add more tests) e.g -t " + "performance.perf.DiskDrainRate") + tgroup.add_option("-d", "--include_tests", dest="include_tests", + help="Value can be 'failed' (or) 'passed' (or) 'failed=' (or) 'passed=' (or) 'file=' (or) '' to include tests in the run. Use -g option to search " + "entire conf files. e.g. -d 'failed' or -d 'failed=report.xml' or -d " + "'^2i.*nodes_init=2.*'") + tgroup.add_option("-e", "--exclude_tests", dest="exclude_tests", + help="Value can be 'failed' (or) 'passed' (or) 'failed=' (or) 'passed=' or 'file=' (or) '' " + "to exclude tests in the run. Use -g option to search entire conf " + "files. e.g. -e 'passed'") + tgroup.add_option("-r", "--rerun", dest="rerun", + help="Rerun fail or pass tests with given =count number of times maximum. " + "\ne.g. -r 'fail=3'") + tgroup.add_option("-g", "--globalsearch", dest="globalsearch", + help="Option to get tests from given conf file path pattern, " + "like conf/**/*.conf. Useful for include or exclude conf files to " + "filter tests. e.g. -g 'conf/**/.conf'", + default="") + tgroup.add_option("-m", "--merge", dest="merge", + help="Merge the report files path pattern, like logs/**/.xml. e.g. -m '[" + "logs/**/*.xml]'", + default="") parser.add_option_group(tgroup) - parser.add_option("-p", "--params", - dest="params", help="Optional key=value parameters, comma-separated -p k=v,k2=v2,...", + parser.add_option("-p", "--params", dest="params", + help="Optional key=value parameters, comma-separated -p k=v,k2=v2,...", default="") parser.add_option("-n", "--noop", action="store_true", help="NO-OP - emit test names, but don't actually run them e.g -n true") - parser.add_option("-l", "--log-level", - dest="loglevel", default="INFO", help="e.g -l info,warning,error") + parser.add_option("-l", "--log-level", dest="loglevel", default="INFO", + help="e.g -l info,warning,error") options, args = parser.parse_args() tests = [] test_params = {} + setLogLevel(options.loglevel) + log.info("Checking arguments...") if not options.ini: - parser.error("please specify an .ini file (-i)") + parser.error("Please specify an .ini file (-i) option.") parser.print_help() else: test_params['ini'] = options.ini @@ -79,19 +113,92 @@ def parse_args(argv): test_params['cluster_name'] = splitext(os.path.basename(options.ini))[0] - if not options.testcase and not options.conf: - parser.error("please specify a configuration file (-c) or a test case (-t)") + if not options.testcase and not options.conf and not options.globalsearch and not options.include_tests and not options.exclude_tests: + parser.error("Please specify a configuration file (-c) or a test case (-t) or a globalsearch (-g) option.") parser.print_help() - if options.conf: + if options.conf and not options.globalsearch: parse_conf_file(options.conf, tests, test_params) + if options.globalsearch: + parse_global_conf_file(options.globalsearch, tests, test_params) + if options.include_tests: + tests = process_include_or_filter_exclude_tests("include", options.include_tests, tests, + options) + if options.exclude_tests: + tests = process_include_or_filter_exclude_tests("exclude", options.exclude_tests, tests, options) + if options.testcase: tests.append(options.testcase) if options.noop: - print("\n".join(tests)) + print("---\n"+"\n".join(tests)+"\n---\nTotal="+str(len(tests))) sys.exit(0) return tests, test_params, options.ini, options.params, options +def setLogLevel(log_level): + if log_level and log_level.lower() == 'info': + log.setLevel(logging.INFO) + elif log_level and log_level.lower() == 'warning': + log.setLevel(logging.WARNING) + elif log_level and log_level.lower() == 'debug': + log.setLevel(logging.DEBUG) + elif log_level and log_level.lower() == 'critical': + log.setLevel(logging.CRITICAL) + elif log_level and log_level.lower() == 'fatal': + log.setLevel(logging.FATAL) + else: + log.setLevel(logging.NOTSET) + +def process_include_or_filter_exclude_tests(filtertype, option, tests, options): + if filtertype == 'include' or filtertype == 'exclude': + + if option.startswith('failed') or option.startswith('passed') or option.startswith("http://") or option.startswith("https://"): + passfail = option.split("=") + tests_list = [] + if len(passfail) == 2: + if passfail[1].startswith("http://") or passfail[1].startswith("https://"): + tp, tf = parse_testreport_result_xml(passfail[1]) + else: + tp, tf = parse_junit_result_xml(passfail[1]) + elif option.startswith("http://") or option.startswith("https://"): + tp, tf = parse_testreport_result_xml(option) + tests_list=tp+tf + else: + tp, tf = parse_junit_result_xml() + + if option.startswith('failed') and tf: + tests_list = tf + elif option.startswith('passed') and tp: + tests_list = tp + if filtertype == 'include': + tests = tests_list + else: + for line in tests_list: + isexisted, t = check_if_exists_with_params(tests, line, options.params) + if isexisted: + tests.remove(t) + elif option.startswith("file="): + filterfile = locate_conf_file(option.split("=")[1]) + if filtertype == 'include': + tests_list = [] + if filterfile: + for line in filterfile: + tests_list.append(line.strip()) + tests = tests_list + else: + for line in filterfile: + isexisted, t = check_if_exists_with_params(tests, line.strip(), options.params) + if isexisted: + tests.remove(t) + else: # pattern + if filtertype == 'include': + tests = [i for i in tests if re.search(option, i)] + else: + tests = [i for i in tests if not re.search(option, i)] + + else: + log.warning("Warning: unknown filtertype given (only include/exclude supported)!") + + return tests def create_log_file(log_config_file_name, log_file_name, level): tmpl_log_file = open("logging.conf.sample") @@ -121,7 +228,7 @@ def append_test(tests, name): def locate_conf_file(filename): - print "filename: %s" % filename + log.info("Conf filename: %s" % filename) if filename: if os.path.exists(filename): return file(filename) @@ -152,6 +259,7 @@ def parse_conf_file(filename, tests, params): num_creates=400000 .... """ + f = locate_conf_file(filename) if not f: usage("unable to locate configuration file: " + filename) @@ -162,7 +270,7 @@ def parse_conf_file(filename, tests, params): continue if stripped.endswith(":"): prefix = stripped.split(":")[0] - print "prefix: {0}".format(prefix) + log.info("Test prefix: {0}".format(prefix)) continue name = stripped if prefix and prefix.lower() == "params": @@ -182,6 +290,141 @@ def parse_conf_file(filename, tests, params): params['conf_file'] = filename +def parse_global_conf_file(dirpath, tests, params): + log.info("dirpath="+dirpath) + if os.path.isdir(dirpath): + dirpath=dirpath+os.sep+"**"+os.sep+"*.conf" + log.info("Global filespath=" + dirpath) + + conf_files = glob.glob(dirpath) + for file in conf_files: + parse_conf_file(file, tests, params) + +def check_if_exists(test_list, test_line): + new_test_line = ''.join(sorted(test_line)) + for t in test_list: + t1 = ''.join(sorted(t)) + if t1 == new_test_line: + return True, t + return False, "" + +def check_if_exists_with_params(test_list, test_line, test_params): + new_test_line = ''.join(sorted(test_line)) + for t in test_list: + if test_params: + t1 = ''.join(sorted(t+","+test_params.strip())) + else: + t1 = ''.join(sorted(t)) + + if t1 == new_test_line: + return True, t + return False, "" + +def transform_and_write_to_file(tests_list, filename): + new_test_list = [] + for test in tests_list: + line = filter_fields(test) + line = line.rstrip(",") + isexisted, _ = check_if_exists(new_test_list, line) + if not isexisted: + new_test_list.append(line) + + file = open(filename, "w+") + for line in new_test_list: + file.writelines((line) + "\n") + file.close() + return new_test_list + +def getNodeText(nodelist): + rc = [] + for node in nodelist: + if node.nodeType == node.TEXT_NODE: + rc.append(node.data) + return ''.join(rc) + +def parse_testreport_result_xml(filepath=""): + if filepath.startswith("http://") or filepath.startswith("https://"): + url_path = filepath+"/testReport/api/xml?pretty=true" + jobnamebuild = filepath.split('/') + if not os.path.exists('logs'): + os.mkdir('logs') + newfilepath = 'logs'+''.join(os.sep)+'_'.join(jobnamebuild[-3:])+"_testresult.xml" + log.info("Downloading " + url_path +" to "+newfilepath) + try: + filedata = urllib2.urlopen(url_path) + datatowrite = filedata.read() + filepath = newfilepath + with open(filepath, 'wb') as f: + f.write(datatowrite) + except Exception as ex: + log.error("Error:: "+str(ex)+"! Please check if " + url_path + " URL is accessible!! " + "Exiting...") + sys.exit(1) + if filepath == "": + filepath = "logs/**/*.xml" + log.info("Loading result data from "+filepath) + xml_files = glob.glob(filepath) + passed_tests=[] + failed_tests=[] + for xml_file in xml_files: + log.info("-- "+xml_file+" --") + doc = xml.dom.minidom.parse(xml_file) + testresultelem = doc.getElementsByTagName("testResult") + testsuitelem = testresultelem[0].getElementsByTagName("suite") + for ts in testsuitelem: + testcaseelem = ts.getElementsByTagName("case") + for tc in testcaseelem: + tcname = getNodeText((tc.getElementsByTagName("name")[0]).childNodes) + tcstatus = getNodeText((tc.getElementsByTagName("status")[0]).childNodes) + if tcstatus == 'PASSED': + failed=False + passed_tests.append(tcname) + else: + failed=True + failed_tests.append(tcname) + + if failed_tests: + failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf") + + if passed_tests: + passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf") + + return passed_tests, failed_tests + +def parse_junit_result_xml(filepath=""): + if filepath.startswith("http://") or filepath.startswith("https://"): + parse_testreport_result_xml(filepath) + return + if filepath == "": + filepath = "logs/**/*.xml" + log.info("Loading result data from "+filepath) + xml_files = glob.glob(filepath) + passed_tests=[] + failed_tests=[] + for xml_file in xml_files: + log.info("-- "+xml_file+" --") + doc = xml.dom.minidom.parse(xml_file) + testsuitelem = doc.getElementsByTagName("testsuite") + for ts in testsuitelem: + tsname = ts.getAttribute("name") + testcaseelem = ts.getElementsByTagName("testcase") + failed=False + for tc in testcaseelem: + tcname = tc.getAttribute("name") + tcerror = tc.getElementsByTagName("error") + for tce in tcerror: + failed_tests.append(tcname) + failed = True + if not failed: + passed_tests.append(tcname) + + if failed_tests: + failed_tests = transform_and_write_to_file(failed_tests,"failed_tests.conf") + + if passed_tests: + passed_tests = transform_and_write_to_file(passed_tests, "passed_tests.conf") + + return passed_tests, failed_tests def create_headers(username, password): authorization = base64.encodestring('%s:%s' % (username, password)) @@ -192,9 +435,9 @@ def create_headers(username, password): def get_server_logs(input, path): for server in input.servers: - print "grabbing diags from ".format(server.ip) + log.info("grabbing diags from ".format(server.ip)) diag_url = "http://{0}:{1}/diag".format(server.ip, server.port) - print diag_url + log.info(diag_url) try: req = urllib2.Request(diag_url) @@ -217,13 +460,13 @@ def get_server_logs(input, path): zipped.close() os.remove(filename) - print "downloaded and zipped diags @ : {0}".format("{0}.gz".format(filename)) + log.info("downloaded and zipped diags @ : {0}".format("{0}.gz".format(filename))) except urllib2.URLError: - print "unable to obtain diags from %s" % diag_url + log.error("unable to obtain diags from %s" % diag_url) except BadStatusLine: - print "unable to obtain diags from %s" % diag_url + log.error("unable to obtain diags from %s" % diag_url) except Exception as e: - print "unable to obtain diags from %s %s" % (diag_url, e) + log.error("unable to obtain diags from %s %s" % (diag_url, e)) def get_logs_cluster_run(input, path, ns_server_path): print "grabbing logs (cluster-run)" @@ -232,7 +475,7 @@ def get_logs_cluster_run(input, path, ns_server_path): try: shutil.make_archive(path + os.sep + "logs", 'zip', logs_path) except Exception as e: - print "NOT POSSIBLE TO GRAB LOGS (CLUSTER_RUN)" + log.error("NOT POSSIBLE TO GRAB LOGS (CLUSTER_RUN)") def get_cbcollect_info(input, path): for server in input.servers: @@ -241,7 +484,7 @@ def get_cbcollect_info(input, path): try: cbcollectRunner(server, path).run() except Exception as e: - print "NOT POSSIBLE TO GRAB CBCOLLECT FROM {0}: {1}".format(server.ip, e) + log.error("NOT POSSIBLE TO GRAB CBCOLLECT FROM {0}: {1}".format(server.ip, e)) def get_couch_dbinfo(input, path): for server in input.servers: @@ -250,7 +493,7 @@ def get_couch_dbinfo(input, path): try: couch_dbinfo_Runner(server, path).run() except Exception as e: - print "NOT POSSIBLE TO GRAB dbinfo FROM {0}: {1}".format(server.ip, e) + log.error("NOT POSSIBLE TO GRAB dbinfo FROM {0}: {1}".format(server.ip, e)) def clear_old_core_dumps(_input, path): for server in _input.servers: @@ -258,7 +501,7 @@ def clear_old_core_dumps(_input, path): try: Clearcoredumps(server, path).run() except Exception as e: - print "Unable to clear core dumps on {0} : {1}".format(server.ip, e) + log.error("Unable to clear core dumps on {0} : {1}".format(server.ip, e)) def get_core_dumps(_input, path): ret = False @@ -269,8 +512,8 @@ def get_core_dumps(_input, path): if Getcoredumps(server, path).run(): ret = True except Exception as e: - print "NOT POSSIBLE TO GRAB CORE DUMPS FROM {0} : {1}".\ - format(server.ip, e) + log.error("NOT POSSIBLE TO GRAB CORE DUMPS FROM {0} : {1}".\ + format(server.ip, e)) return ret @@ -300,19 +543,11 @@ def join(self, timeout=None): Thread.join(self, timeout=None) return self._return -def main(): +def runtests(names, options, arg_i, arg_p, runtime_test_params): + log.info("\nNumber of tests to be executed: " + str(len(names))) BEFORE_SUITE = "suite_setUp" AFTER_SUITE = "suite_tearDown" - names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv) - # get params from command line - TestInputSingleton.input = TestInputParser.get_test_input(sys.argv) - # ensure command line params get higher priority - runtime_test_params.update(TestInputSingleton.input.test_params) - TestInputSingleton.input.test_params = runtime_test_params - print "Global Test input params:" - pprint(TestInputSingleton.input.test_params) - xunit = XUnitTestResult() # Create root logs directory abs_path = os.path.dirname(os.path.abspath(sys.argv[0])) @@ -324,6 +559,7 @@ def main(): results = [] case_number = 1 + if "GROUP" in runtime_test_params: print "Only cases in GROUPs '{0}' will be executed".format(runtime_test_params["GROUP"]) if "EXCLUDE_GROUP" in runtime_test_params: @@ -339,8 +575,6 @@ def main(): argument_split = [a.strip() for a in re.split("[,]?([^,=]+)=", name)[1:]] params = dict(zip(argument_split[::2], argument_split[1::2])) - - # Note that if ALL is specified at runtime then tests which have no groups are still run - just being # explicit on this @@ -363,8 +597,6 @@ def main(): print "test '{0}' skipped, is in an excluded group".format(name) continue - - # Create Log Directory logs_folder = os.path.join(root_log_dir, "test_%s" % case_number) os.mkdir(logs_folder) @@ -400,7 +632,7 @@ def main(): result.errors = [(name, "Failing test : new core dump(s) " "were found and collected." " Check testrunner logs folder.")] - print("FAIL: New core dump(s) was found and collected") + log.info("FAIL: New core dump(s) was found and collected") except AttributeError as ex: pass try: @@ -427,7 +659,7 @@ def main(): result.errors = [(name, "Failing test : new core dump(s) " "were found and collected." " Check testrunner logs folder.")] - print("FAIL: New core dump(s) was found and collected") + log.info("FAIL: New core dump(s) was found and collected") if not result: for t in threading.enumerate(): if t != threading.current_thread(): @@ -523,6 +755,205 @@ def main(): else: t._Thread__stop() + return results, xunit, "{0}{2}report-{1}".format(os.path.dirname(logs_folder), str_time, os.sep) + + + +def filter_fields(testname): + testwords = testname.split(",") + line = "" + for fw in testwords: + if not fw.startswith("logs_folder") and not fw.startswith("conf_file") \ + and not fw.startswith("cluster_name:") \ + and not fw.startswith("ini:") \ + and not fw.startswith("case_number:") \ + and not fw.startswith("num_nodes:") \ + and not fw.startswith("spec:"): + line = line + fw.replace(":", "=", 1) + if fw != testwords[-1]: + line = line + "," + + return line + +def compare_with_sort(dict, key): + for k in dict.keys(): + if "".join(sorted(k)) == "".join(sorted(key)): + return True + + return False + + +def merge_reports(filespath): + log.info("Merging of report files from "+str(filespath)) + + testsuites = {} + if not isinstance(filespath, list): + filespaths = filespath.split(",") + else: + filespaths = filespath + for filepath in filespaths: + xml_files = glob.glob(filepath) + if not isinstance(filespath, list) and filespath.find("*"): + xml_files.sort(key=os.path.getmtime) + for xml_file in xml_files: + log.info("-- " + xml_file + " --") + doc = xml.dom.minidom.parse(xml_file) + testsuitelem = doc.getElementsByTagName("testsuite") + for ts in testsuitelem: + tsname = ts.getAttribute("name") + tserros = ts.getAttribute("errors") + tsfailures = ts.getAttribute("failures") + tsskips = ts.getAttribute("skips") + tstime = ts.getAttribute("time") + tstests = ts.getAttribute("tests") + issuite_existed = False + tests = {} + testsuite = {} + # fill testsuite details + if tsname in testsuites.keys(): + testsuite = testsuites[tsname] + tests = testsuite['tests'] + else: + testsuite['name'] = tsname + testsuite['errors'] = tserros + testsuite['failures'] = tsfailures + testsuite['skips'] = tsskips + testsuite['time'] = tstime + testsuite['testcount'] = tstests + issuite_existed = False + testcaseelem = ts.getElementsByTagName("testcase") + # fill test case details + for tc in testcaseelem: + testcase = {} + tcname = tc.getAttribute("name") + tctime = tc.getAttribute("time") + tcerror = tc.getElementsByTagName("error") + + tcname_filtered = filter_fields(tcname) + if compare_with_sort(tests, tcname_filtered): + testcase = tests[tcname_filtered] + testcase['name'] = tcname + else: + testcase['name'] = tcname + testcase['time'] = tctime + testcase['error'] = "" + if tcerror: + testcase['error'] = str(tcerror[0].firstChild.nodeValue) + + tests[tcname_filtered] = testcase + testsuite['tests'] = tests + testsuites[tsname] = testsuite + + log.info("\nNumber of TestSuites="+str(len(testsuites))) + tsindex = 0 + for tskey in testsuites.keys(): + tsindex = tsindex+1 + log.info("\nTestSuite#"+str(tsindex)+") "+str(tskey)+", Number of Tests="+str(len(testsuites[tskey]['tests']))) + pass_count = 0 + fail_count = 0 + tests = testsuites[tskey]['tests'] + xunit = XUnitTestResult() + for testname in tests.keys(): + testcase = tests[testname] + tname = testcase['name'] + ttime = testcase['time'] + inttime = float(ttime) + terrors = testcase['error'] + tparams = "" + if "," in tname: + tparams = tname[tname.find(","):] + tname = tname[:tname.find(",")] + + if terrors: + failed = True + fail_count = fail_count + 1 + xunit.add_test(name=tname, status='fail', time=inttime, + errorType='membase.error', errorMessage=str(terrors), params=tparams + ) + else: + passed = True + pass_count = pass_count + 1 + xunit.add_test(name=tname, time=inttime, params=tparams + ) + + str_time = time.strftime("%y-%b-%d_%H-%M-%S", time.localtime()) + abs_path = os.path.dirname(os.path.abspath(sys.argv[0])) + root_log_dir = os.path.join(abs_path, "logs{0}testrunner-{1}".format(os.sep, str_time)) + if not os.path.exists(root_log_dir): + os.makedirs(root_log_dir) + logs_folder = os.path.join(root_log_dir, "merged_summary") + try: + os.mkdir(logs_folder) + except: + pass + output_filepath="{0}{2}mergedreport-{1}".format(logs_folder, str_time, os.sep).strip() + + xunit.write(output_filepath) + xunit.print_summary() + log.info("Summary file is at " + output_filepath+"-"+tsname+".xml") + return testsuites + + + +def reruntests(rerun, names, options, arg_i, arg_p,runtime_test_params): + if "=" in rerun: + reruns = rerun.split("=") + rerun_type = reruns[0] + rerun_count = int(reruns[1]) + all_results = {} + log.info("NOTE: Running " + rerun_type + " tests for " + str(rerun_count) + " times maximum.") + + report_files = [] + for testc in range(rerun_count+1): + if testc == 0: + log.info("\n*** FIRST run of the tests ***") + else: + log.info("\n*** "+rerun_type.upper()+" Tests Rerun#" + str(testc) + "/" + str(rerun_count) + " ***") + results, xunit, report_file = runtests(names, options, arg_i, arg_p, runtime_test_params) + all_results[(testc + 1)] = results + all_results[str(testc+1)+"_report"] = report_file+"*.xml" + report_files.append(report_file+"*.xml") + tobe_rerun = False + for result in results: + if result["result"] == rerun_type: + tobe_rerun = True + if not tobe_rerun: + break + tp, tf = parse_junit_result_xml(report_file+"*.xml") + if "fail" == rerun_type: + names = tf + elif "pass" == rerun_type: + names = tp + + log.info("\nSummary:\n" + str(all_results)) + log.info("Final result: merging...") + merge_reports(report_files) + return all_results + +def main(): + log.info("TestRunner: parsing args...") + names, runtime_test_params, arg_i, arg_p, options = parse_args(sys.argv) + log.info("TestRunner: start...") + # get params from command line + TestInputSingleton.input = TestInputParser.get_test_input(sys.argv) + # ensure command line params get higher priority + runtime_test_params.update(TestInputSingleton.input.test_params) + TestInputSingleton.input.test_params = runtime_test_params + log.info("Global Test input params:") + pprint(TestInputSingleton.input.test_params) + if names: + if options.merge: + merge_reports(options.merge) + elif options.rerun: + results = reruntests(options.rerun, names, options, arg_i, arg_p, runtime_test_params) + else: + results, _, _ = runtests(names, options, arg_i, arg_p,runtime_test_params) + else: + log.warning("Warning: No tests got selected. Please double check the .conf file and other " + "options!") + + log.info("TestRunner: end...") + def watcher(): """This little code snippet is from @@ -538,7 +969,7 @@ def watcher(): if rc > 0: sys.exit(rc) except KeyboardInterrupt: - print 'KeyBoardInterrupt' + log.error('KeyBoardInterrupt') p.terminate() else: child = os.fork() @@ -549,7 +980,7 @@ def watcher(): if rc > 0: sys.exit( rc ) except KeyboardInterrupt: - print 'KeyBoardInterrupt' + log.error('KeyBoardInterrupt') try: os.kill(child, signal.SIGKILL) except OSError: @@ -561,3 +992,4 @@ def watcher(): if __name__ == "__main__": watcher() +