diff --git a/compliance_suite/constants/constants.py b/compliance_suite/constants/constants.py index af11a21..521dec4 100644 --- a/compliance_suite/constants/constants.py +++ b/compliance_suite/constants/constants.py @@ -3,16 +3,6 @@ This module contains the constant values used across the project. It is divided into suitable categories. """ -from compliance_suite.models.v1_0_specs import ( - TesCancelTaskResponse, - TesCreateTaskResponse, - TesListTasksResponse, - TesListTasksResponseMinimal, - TesServiceInfo, - TesTask, - TesTaskMinimal, -) - # Utility Constants LOGGING_LEVEL = { @@ -22,18 +12,19 @@ } # API Constants +# 1. Basic & Full views have same required fields. Hence, validating Basic views against Full view Model. ENDPOINT_TO_MODEL = { - 'service_info': TesServiceInfo, - 'list_tasks_MINIMAL': TesListTasksResponseMinimal, - 'list_tasks_BASIC': TesListTasksResponse, - 'list_tasks_FULL': TesListTasksResponse, - 'get_task_MINIMAL': TesTaskMinimal, - 'get_task_BASIC': TesTask, - 'get_task_FULL': TesTask, - 'create_task': TesCreateTaskResponse, - 'create_task_request_body': TesTask, - 'cancel_task': TesCancelTaskResponse + 'service_info': 'TesServiceInfo', + 'list_tasks_MINIMAL': 'TesListTasksResponseMinimal', + 'list_tasks_BASIC': 'TesListTasksResponse', + 'list_tasks_FULL': 'TesListTasksResponse', + 'get_task_MINIMAL': 'TesTaskMinimal', + 'get_task_BASIC': 'TesTask', + 'get_task_FULL': 'TesTask', + 'create_task': 'TesCreateTaskResponse', + 'create_task_request_body': 'TesTask', + 'cancel_task': 'TesCancelTaskResponse' } REQUEST_HEADERS = { diff --git a/compliance_suite/functions/client.py b/compliance_suite/functions/client.py index f2cee69..d170e76 100644 --- a/compliance_suite/functions/client.py +++ b/compliance_suite/functions/client.py @@ -94,11 +94,9 @@ def check_poll( return False response_json: Any = response.json() - if self.check_cancel and response_json["state"] in ["CANCELED"]: - logger.info("Expected response received. Polling request successful") - return True - - elif not self.check_cancel and response_json["state"] in ["COMPLETE", "EXECUTOR_ERROR", "SYSTEM_ERROR"]: + valid_states = ["CANCELED", "CANCELING"] if self.check_cancel else ["COMPLETE", "EXECUTOR_ERROR", + "SYSTEM_ERROR", "PREEMPTED"] + if response_json["state"] in valid_states: logger.info("Expected response received. Polling request successful") return True diff --git a/compliance_suite/job_runner.py b/compliance_suite/job_runner.py index 44b19bc..3a8877c 100644 --- a/compliance_suite/job_runner.py +++ b/compliance_suite/job_runner.py @@ -126,6 +126,24 @@ def tag_matcher( return True return False + def version_matcher( + self, + versions: List[str] + ) -> bool: + """ Matches the user provided spec version with the YAML test versions. + Skips the test if versions are not matched. + + Args: + versions: The versions defined for a YAML test + + Returns: + True, if the versions match. Otherwise, false. + """ + + if self.version in versions: + return True + return False + def generate_report(self) -> Any: """Generates the report via ga4gh-testbed-lib and returns it @@ -170,7 +188,7 @@ def run_jobs(self) -> None: if self.report.platform_name == "": self.report.set_platform_details(self.server) - if self.tag_matcher(yaml_data["tags"]): + if self.version_matcher(yaml_data["versions"]) and self.tag_matcher(yaml_data["tags"]): test_runner = TestRunner(yaml_data["service"], self.server, self.version) job_count: int = 0 for job in yaml_data["jobs"]: @@ -183,8 +201,8 @@ def run_jobs(self) -> None: f' for {yaml_file} successful.') else: self.test_status["skipped"].append(str(self.test_count)) - logger.log(LOGGING_LEVEL['SKIP'], f"No Tag matched. Skipping Test-{self.test_count}" - f" for {yaml_file}") + logger.log(LOGGING_LEVEL['SKIP'], f"Version or tag did not match. Skipping " + f"Test-{self.test_count} for {yaml_file}") except (JobValidationException, TestFailureException) as err: self.test_status["failed"].append(str(self.test_count)) diff --git a/compliance_suite/models/v1_0_specs.py b/compliance_suite/models/v1_0_0_specs.py similarity index 99% rename from compliance_suite/models/v1_0_specs.py rename to compliance_suite/models/v1_0_0_specs.py index 8fd38c4..80193ea 100644 --- a/compliance_suite/models/v1_0_specs.py +++ b/compliance_suite/models/v1_0_0_specs.py @@ -1,6 +1,6 @@ -"""Module compliance_suite.models.v1_0_specs.py +"""Module compliance_suite.models.v1_0_0_specs.py -Pydantic generated models for TES API Specs v1.0 +Pydantic generated models for TES API Specs v1.0.0 """ from __future__ import annotations @@ -429,6 +429,7 @@ class TesListTasksResponse(BaseModel): ) +# Extra models manually added for Minimal View class TesTaskMinimal(BaseModel): id: str = Field( ..., diff --git a/compliance_suite/models/v1_1_0_specs.py b/compliance_suite/models/v1_1_0_specs.py new file mode 100644 index 0000000..7e82633 --- /dev/null +++ b/compliance_suite/models/v1_1_0_specs.py @@ -0,0 +1,498 @@ +"""Module compliance_suite.models.v1_1_0_specs.py + +Pydantic generated models for TES API Specs v1.1.0 +""" + +from __future__ import annotations + +from datetime import datetime +from enum import Enum +from typing import Dict, List, Optional + +from pydantic import AnyUrl, BaseModel, Field + + +class TesCancelTaskResponse(BaseModel): + pass + + +class TesCreateTaskResponse(BaseModel): + id: str = Field(..., description='Task identifier assigned by the server.') + + +class TesExecutor(BaseModel): + image: str = Field( + ..., + description='Name of the container image. The string will be passed as the image\nargument to the ' + 'containerization run command. Examples:\n - `ubuntu`\n - `quay.io/aptible/ubuntu`\n - ' + '`gcr.io/my-org/my-image`\n - `myregistryhost:5000/fedora/httpd:version1.0`', + example='ubuntu:20.04', + ) + command: List[str] = Field( + ..., + description='A sequence of program arguments to execute, where the first argument\nis the program to ' + 'execute (i.e. argv). Example:\n```\n{\n "command" : ["/bin/md5", "/data/file1"]\n}\n```', + example=['/bin/md5', '/data/file1'], + ) + workdir: Optional[str] = Field( + None, + description='The working directory that the command will be executed in.\nIf not defined, the system will ' + 'default to the directory set by\nthe container image.', + example='/data/', + ) + stdin: Optional[str] = Field( + None, + description='Path inside the container to a file which will be piped\nto the executor\'s stdin. This must be ' + 'an absolute path. This mechanism\ncould be used in conjunction with the input declaration to ' + 'process\na data file using a tool that expects STDIN.\n\nFor example, to get the MD5 sum of a ' + 'file by reading it into the STDIN\n```\n{\n "command" : ["/bin/md5"],\n ' + '"stdin" : "/data/file1"\n}\n```', + example='/data/file1', + ) + stdout: Optional[str] = Field( + None, + description='Path inside the container to a file where the executor\'s\nstdout will be written to. ' + 'Must be an absolute path. Example:\n```\n{\n "stdout" : "/tmp/stdout.log"\n}\n```', + example='/tmp/stdout.log', + ) + stderr: Optional[str] = Field( + None, + description='Path inside the container to a file where the executor\'s\nstderr will be written to. Must be ' + 'an absolute path. Example:\n```\n{\n "stderr" : "/tmp/stderr.log"\n}\n```', + example='/tmp/stderr.log', + ) + env: Optional[Dict[str, str]] = Field( + None, + description='Enviromental variables to set within the container. Example:\n```\n{\n "env" : {\n ' + '"ENV_CONFIG_PATH" : "/data/config.file",\n "BLASTDB" : "/data/GRC38",\n ' + '"HMMERDB" : "/data/hmmer"\n }\n}\n```', + example={'BLASTDB': '/data/GRC38', 'HMMERDB': '/data/hmmer'}, + ) + ignore_error: Optional[bool] = Field( + None, + description='Default behavior of running an array of executors is that execution\nstops on the first error. ' + 'If `ignore_error` is `True`, then the\nrunner will record error exit codes, but will continue on ' + 'to the next\ntesExecutor.', + ) + + +class TesExecutorLog(BaseModel): + start_time: Optional[str] = Field( + None, + description='Time the executor started, in RFC 3339 format.', + example='2020-10-02T10:00:00-05:00', + ) + end_time: Optional[str] = Field( + None, + description='Time the executor ended, in RFC 3339 format.', + example='2020-10-02T11:00:00-05:00', + ) + stdout: Optional[str] = Field( + None, + description='Stdout content.\n\nThis is meant for convenience. No guarantees are made about the ' + 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' + 'reference only, etc.\n\nIn order to capture the full stdout client should set ' + 'Executor.stdout\nto a container file path, and use Task.outputs to upload that file\nto ' + 'permanent storage.', + ) + stderr: Optional[str] = Field( + None, + description='Stderr content.\n\nThis is meant for convenience. No guarantees are made about the ' + 'content.\nImplementations may chose different approaches: only the head, only the tail,\na URL ' + 'reference only, etc.\n\nIn order to capture the full stderr client should set ' + 'Executor.stderr\nto a container file path, and use Task.outputs to upload that file\nto ' + 'permanent storage.', + ) + exit_code: int = Field(..., description='Exit code.') + + +class TesFileType(Enum): + FILE = 'FILE' + DIRECTORY = 'DIRECTORY' + + +class TesInput(BaseModel): + name: Optional[str] = None + description: Optional[str] = None + url: Optional[str] = Field( + None, + description='REQUIRED, unless "content" is set.\n\nURL in long term storage, for example:\n - ' + 's3://my-object-store/file1\n - gs://my-bucket/file2\n - file:///path/to/my/file\n - ' + '/path/to/my/file', + example='s3://my-object-store/file1', + ) + path: str = Field( + ..., + description='Path of the file inside the container.\nMust be an absolute path.', + example='/data/file1', + ) + type: Optional[TesFileType] = None + content: Optional[str] = Field( + None, + description='File content literal.\n\nImplementations should support a minimum of 128 KiB in this ' + 'field\nand may define their own maximum.\n\nUTF-8 encoded\n\nIf content is not empty, ' + '"url" must be ignored.', + ) + streamable: Optional[bool] = Field( + None, + description='Indicate that a file resource could be accessed using a streaming\ninterface, ie a FUSE mounted ' + 's3 object. This flag indicates that\nusing a streaming mount, as opposed to downloading the whole ' + 'file to\nthe local scratch space, may be faster despite the latency and\noverhead. This does not ' + 'mean that the backend will use a streaming\ninterface, as it may not be provided by the vendor, ' + 'but if the\ncapacity is avalible it can be used without degrading the\nperformance of the ' + 'underlying program.', + ) + + +class TesOutput(BaseModel): + name: Optional[str] = Field(None, description='User-provided name of output file') + description: Optional[str] = Field( + None, + description='Optional users provided description field, can be used for documentation.', + ) + url: str = Field( + ..., + description='URL at which the TES server makes the output accessible after the task is complete.\nWhen ' + 'tesOutput.path contains wildcards, it must be a directory; see\n`tesOutput.path_prefix` for ' + 'details on how output URLs are constructed in this case.\nFor Example:\n - ' + '`s3://my-object-store/file1`\n - `gs://my-bucket/file2`\n - `file:///path/to/my/file`', + ) + path: str = Field( + ..., + description='Absolute path of the file inside the container.\nMay contain pattern matching wildcards to select ' + 'multiple outputs at once, but mind\nimplications for `tesOutput.url` and `tesOutput.path_prefix`.' + '\nOnly wildcards defined in IEEE Std 1003.1-2017 (POSIX), 12.3 are supported; ' + 'see\nhttps://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_13', + ) + path_prefix: Optional[str] = Field( + None, + description='Prefix to be removed from matching outputs if `tesOutput.path` contains wildcards;\noutput URLs ' + 'are constructed by appending pruned paths to the directory specfied\nin ' + '`tesOutput.url`.\nRequired if `tesOutput.path` contains wildcards, ignored otherwise.', + ) + type: Optional[TesFileType] = None + + +class TesOutputFileLog(BaseModel): + url: str = Field( + ..., description='URL of the file in storage, e.g. s3://bucket/file.txt' + ) + path: str = Field( + ..., + description='Path of the file inside the container. Must be an absolute path.', + ) + size_bytes: str = Field( + ..., + description="Size of the file in bytes. Note, this is currently coded as a string\nbecause official " + "JSON doesn't support int64 numbers.", + example=['1024'], + ) + + +class TesResources(BaseModel): + cpu_cores: Optional[int] = Field( + None, description='Requested number of CPUs', example=4 + ) + preemptible: Optional[bool] = Field( + None, + description="Define if the task is allowed to run on preemptible compute instances,\nfor example, " + "AWS Spot. This option may have no effect when utilized\non some backends that don't have " + "the concept of preemptible jobs.", + example=False, + ) + ram_gb: Optional[float] = Field( + None, description='Requested RAM required in gigabytes (GB)', example=8 + ) + disk_gb: Optional[float] = Field( + None, description='Requested disk size in gigabytes (GB)', example=40 + ) + zones: Optional[List[str]] = Field( + None, + description='Request that the task be run in these compute zones. How this string\nis utilized ' + 'will be dependent on the backend system. For example, a\nsystem based on a cluster ' + 'queueing system may use this string to define\npriorty queue to which the job is assigned.', + example='us-west-1', + ) + backend_parameters: Optional[Dict[str, str]] = Field( + None, + description='Key/value pairs for backend configuration.\nServiceInfo shall return a list of keys that a ' + 'backend supports.\nKeys are case insensitive.\nIt is expected that clients pass all runtime ' + 'or hardware requirement key/values\nthat are not mapped to existing tesResources properties ' + 'to backend_parameters.\nBackends shall log system warnings if a key is passed that is ' + 'unsupported.\nBackends shall not store or return unsupported keys if included in a task.\nIf ' + 'backend_parameters_strict equals true,\nbackends should fail the task if any key/values are ' + 'unsupported, otherwise,\nbackends should attempt to run the task\nIntended uses include VM size ' + 'selection, coprocessor configuration, etc.\nExample:\n```\n{\n "backend_parameters" : ' + '{\n "VmSize" : "Standard_D64_v3"\n }\n}\n```', + example={'VmSize': 'Standard_D64_v3'}, + ) + backend_parameters_strict: Optional[bool] = Field( + False, + description='If set to true, backends should fail the task if any backend_parameters\nkey/values are ' + 'unsupported, otherwise, backends should attempt to run the task', + example=False, + ) + + +class Artifact(Enum): + tes = 'tes' + + +class TesState(Enum): + UNKNOWN = 'UNKNOWN' + QUEUED = 'QUEUED' + INITIALIZING = 'INITIALIZING' + RUNNING = 'RUNNING' + PAUSED = 'PAUSED' + COMPLETE = 'COMPLETE' + EXECUTOR_ERROR = 'EXECUTOR_ERROR' + SYSTEM_ERROR = 'SYSTEM_ERROR' + CANCELED = 'CANCELED' + PREEMPTED = 'PREEMPTED' + CANCELING = 'CANCELING' + + +class TesTaskLog(BaseModel): + logs: List[TesExecutorLog] = Field(..., description='Logs for each executor') + metadata: Optional[Dict[str, str]] = Field( + None, + description='Arbitrary logging metadata included by the implementation.', + example={'host': 'worker-001', 'slurmm_id': 123456}, + ) + start_time: Optional[str] = Field( + None, + description='When the task started, in RFC 3339 format.', + example='2020-10-02T10:00:00-05:00', + ) + end_time: Optional[str] = Field( + None, + description='When the task ended, in RFC 3339 format.', + example='2020-10-02T11:00:00-05:00', + ) + outputs: List[TesOutputFileLog] = Field( + ..., + description='Information about all output files. Directory outputs are\nflattened into separate items.', + ) + system_logs: Optional[List[str]] = Field( + None, + description='System logs are any logs the system decides are relevant,\nwhich are not tied directly ' + 'to an Executor process.\nContent is implementation specific: format, size, etc.\n\nSystem ' + 'logs may be collected here to provide convenient access.\n\nFor example, the system may ' + 'include the name of the host\nwhere the task is executing, an error message ' + 'that caused\na SYSTEM_ERROR state (e.g. disk is full), etc.\n\nSystem logs are ' + 'only included in the FULL task view.', + ) + + +class ServiceType(BaseModel): + group: str = Field( + ..., + description="Namespace in reverse domain name format. Use `org.ga4gh` for implementations compliant " + "with official GA4GH specifications. For services with custom APIs not standardized by " + "GA4GH, or implementations diverging from official GA4GH specifications, use a different " + "namespace (e.g. your organization's reverse domain name).", + example='org.ga4gh', + ) + artifact: str = Field( + ..., + description='Name of the API or GA4GH specification implemented. Official GA4GH types should be assigned ' + 'as part of standards approval process. Custom artifacts are supported.', + example='beacon', + ) + version: str = Field( + ..., + description='Version of the API or specification. GA4GH specifications use semantic versioning.', + example='1.0.0', + ) + + +class Organization(BaseModel): + name: str = Field( + ..., + description='Name of the organization responsible for the service', + example='My organization', + ) + url: AnyUrl = Field( + ..., + description='URL of the website of the organization (RFC 3986 format)', + example='https://example.com', + ) + + +class Service(BaseModel): + id: str = Field( + ..., + description='Unique ID of this service. Reverse domain name notation is recommended, though not required. ' + 'The identifier should attempt to be globally unique so it can be used in downstream ' + 'aggregator services e.g. Service Registry.', + example='org.ga4gh.myservice', + ) + name: str = Field( + ..., + description='Name of this service. Should be human readable.', + example='My project', + ) + type: ServiceType + description: Optional[str] = Field( + None, + description='Description of the service. Should be human readable and provide information about the service.', + example='This service provides...', + ) + organization: Organization = Field( + ..., description='Organization providing the service' + ) + contactUrl: Optional[AnyUrl] = Field( + None, + description='URL of the contact for the provider of this service, e.g. a link to a contact form ' + '(RFC 3986 format), or an email (RFC 2368 format).', + example='mailto:support@example.com', + ) + documentationUrl: Optional[AnyUrl] = Field( + None, + description='URL of the documentation of this service (RFC 3986 format). This should help someone ' + 'learn how to use your service, including any specifics required to access data, ' + 'e.g. authentication.', + example='https://docs.myservice.example.com', + ) + createdAt: Optional[datetime] = Field( + None, + description='Timestamp describing when the service was first deployed and available (RFC 3339 format)', + example='2019-06-04T12:58:19Z', + ) + updatedAt: Optional[datetime] = Field( + None, + description='Timestamp describing when the service was last updated (RFC 3339 format)', + example='2019-06-04T12:58:19Z', + ) + environment: Optional[str] = Field( + None, + description='Environment the service is running in. Use this to distinguish between production, ' + 'development and testing/staging deployments. Suggested values are prod, test, dev, ' + 'staging. However this is advised and not enforced.', + example='test', + ) + version: str = Field( + ..., + description='Version of the service being described. Semantic versioning is recommended, but ' + 'other identifiers, such as dates or commit hashes, are also allowed. The version should ' + 'be changed whenever the service is updated.', + example='1.0.0', + ) + + +class TesServiceType(ServiceType): + artifact: Artifact = Field(..., example='tes') + + +class TesServiceInfo(Service): + storage: Optional[List[str]] = Field( + None, + description='Lists some, but not necessarily all, storage locations supported\nby the service.', + example=[ + 'file:///path/to/local/funnel-storage', + 's3://ohsu-compbio-funnel/storage', + ], + ) + tesResources_backend_parameters: Optional[List[str]] = Field( + None, + description='Lists all tesResources.backend_parameters keys supported\nby the service', + example=['VmSize'], + ) + type: TesServiceType = Field(...) + + +class TesTask(BaseModel): + id: Optional[str] = Field( + None, + description='Task identifier assigned by the server.', + example='job-0012345', + ) + state: Optional[TesState] = None + name: Optional[str] = Field(None, description='User-provided task name.') + description: Optional[str] = Field( + None, + description='Optional user-provided description of task for documentation purposes.', + ) + inputs: Optional[List[TesInput]] = Field( + None, + description='Input files that will be used by the task. Inputs will be downloaded\nand mounted into ' + 'the executor container as defined by the task request\ndocument.', + example=[{'url': 's3://my-object-store/file1', 'path': '/data/file1'}], + ) + outputs: Optional[List[TesOutput]] = Field( + None, + description='Output files.\nOutputs will be uploaded from the executor container to long-term storage.', + example=[ + { + 'path': '/data/outfile', + 'url': 's3://my-object-store/outfile-1', + 'type': 'FILE', + } + ], + ) + resources: Optional[TesResources] = None + executors: List[TesExecutor] = Field( + ..., + description='An array of executors to be run. Each of the executors will run one\nat a time sequentially. ' + 'Each executor is a different command that\nwill be run, and each can utilize a different ' + 'docker image. But each of\nthe executors will see the same mapped inputs and volumes ' + 'that are declared\nin the parent CreateTask message.\n\nExecution stops on the first error.', + ) + volumes: Optional[List[str]] = Field( + None, + description='Volumes are directories which may be used to share data between\nExecutors. Volumes are ' + 'initialized as empty directories by the\nsystem when the task starts and are mounted at the ' + 'same path\nin each Executor.\n\nFor example, given a volume defined at `/vol/A`,\nexecutor 1 ' + 'may write a file to `/vol/A/exec1.out.txt`, then\nexecutor 2 may read from that ' + 'file.\n\n(Essentially, this translates to a `docker run -v` flag where\nthe container path ' + 'is the same for each executor).', + example=['/vol/A/'], + ) + tags: Optional[Dict[str, str]] = Field( + None, + description='A key-value map of arbitrary tags. These can be used to store meta-data\nand annotations ' + 'about a task. Example:\n```\n{\n "tags" : {\n "WORKFLOW_ID" : "cwl-01234",\n ' + '"PROJECT_GROUP" : "alice-lab"\n }\n}\n```', + example={'WORKFLOW_ID': 'cwl-01234', 'PROJECT_GROUP': 'alice-lab'}, + ) + logs: Optional[List[TesTaskLog]] = Field( + None, + description='Task logging information.\nNormally, this will contain only one entry, but in the case ' + 'where\na task fails and is retried, an entry will be appended to this list.', + ) + creation_time: Optional[str] = Field( + None, + description='Date + time the task was created, in RFC 3339 format.\nThis is set by the system, not the client.', + example='2020-10-02T10:00:00-05:00', + ) + + +class TesListTasksResponse(BaseModel): + tasks: List[TesTask] = Field( + ..., + description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' + 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', + ) + next_page_token: Optional[str] = Field( + None, + description='Token used to return the next page of results. This value can be used\nin the `page_token` ' + 'field of the next ListTasks request.', + ) + + +# Extra models manually added for Minimal View +class TesTaskMinimal(BaseModel): + id: str = Field( + ..., + description='Task identifier assigned by the server.', + example='job-0012345', + ) + state: TesState = Field(..., example='UNKNOWN') + + +class TesListTasksResponseMinimal(BaseModel): + tasks: List[TesTaskMinimal] = Field( + ..., + description='List of tasks. These tasks will be based on the original submitted\ntask document, but with ' + 'other fields, such as the job state and\nlogging info, added/changed as the job progresses.', + ) diff --git a/compliance_suite/test_runner.py b/compliance_suite/test_runner.py index eb1b523..e6e8837 100644 --- a/compliance_suite/test_runner.py +++ b/compliance_suite/test_runner.py @@ -3,6 +3,7 @@ This module contains class definition for Test Runner to run the individual jobs, validate them and store their result """ +import importlib import json from typing import ( Any, @@ -94,7 +95,10 @@ def validate_logic( description="Check if response matches the model schema") try: - ENDPOINT_TO_MODEL[endpoint_model](**json_data) + pydantic_module: Any = importlib.import_module( + "compliance_suite.models.v" + self.version.replace('.', '_') + "_specs") + pydantic_model_class: Any = getattr(pydantic_module, ENDPOINT_TO_MODEL[endpoint_model]) + pydantic_model_class(**json_data) # JSON validation against Pydantic Model logger.info(f'{message} Schema validation successful for ' f'{self.job_data["operation"]} {self.job_data["endpoint"]}') ReportUtility.case_pass(case=report_case_schema, @@ -170,7 +174,7 @@ def validate_response( description="Check if response status code is 200") if response.status_code == response_status: - logger.info(f'{self.job_data["operation"]} {self.job_data["endpoint"]} Successful Response status code') + logger.info(f'{self.job_data["operation"]} {self.job_data["endpoint"]} response status code matched') ReportUtility.case_pass(case=report_case_status, message=f'{self.job_data["operation"]} {self.job_data["endpoint"]} Successful ' f'Response status code', @@ -178,28 +182,29 @@ def validate_response( else: ReportUtility.case_fail(case=report_case_status, - message=f'Unsuccessful Response status code for ' - f'{self.job_data["operation"]} {self.job_data["endpoint"]}', + message=f'Response status code for {self.job_data["operation"]}' + f' {self.job_data["endpoint"]} did not match', log_message="") raise TestFailureException(name="Incorrect HTTP Response Status", - message=f'{self.job_data["operation"]} {self.job_data["endpoint"]} ' - f'Response status code is not 200', + message=f'Response status code for {self.job_data["operation"]}' + f' {self.job_data["endpoint"]} did not match', details=None) # Logical Schema Validation - if not response.text: - response_json: Any = {} # Handle the Cancel Task Endpoint empty response - else: - response_json: Any = response.json() - - if self.job_data["name"] in ["list_tasks", "get_task"]: - view_query: List[str] = [item["view"] for item in self.job_data["query_parameters"]] - endpoint_model: str = self.job_data["name"] + "_" + view_query[0] - else: - endpoint_model: str = self.job_data["name"] - self.validate_logic(endpoint_model, response_json, "Response") - self.save_storage_vars(response_json) + if response_status == 200: # Further response checks only if successful response body + if not response.text: + response_json: Any = {} # Handle the Cancel Task Endpoint empty response + else: + response_json: Any = response.json() + + if self.job_data["name"] in ["list_tasks", "get_task"]: + view_query: List[str] = [item["view"] for item in self.job_data["query_parameters"]] + endpoint_model: str = self.job_data["name"] + "_" + view_query[0] + else: + endpoint_model: str = self.job_data["name"] + self.validate_logic(endpoint_model, response_json, "Response") + self.save_storage_vars(response_json) def save_storage_vars(self, json_data: Any) -> None: """ Extract the keys mentioned in the YAML job from the request/response and save them in the auxiliary space. diff --git a/docs/endpoints.md b/docs/endpoints.md index 3d8f30d..915f9b5 100644 --- a/docs/endpoints.md +++ b/docs/endpoints.md @@ -38,7 +38,7 @@ status from `["COMPLETE", "EXECUTOR_ERROR", "SYSTEM_ERROR"]`, then its successfu A task must be present in order to be canceled. Hence, creating a task beforehand is necessary. A simple logical test would be just sending the request to cancel the task. However, a more detailed validation will be GET this task by ID and monitor the task status inside the TES server. If the task returns an appropriate status -from `["CANCELED"]`, then its successful. +from `["CANCELED", "CANCELING"]`, then its successful. 1. Create a new task `POST /tasks`. The ID will be stored in auxiliary space. 2. Cancel the task `POST /tasks/{id}:cancel` diff --git a/tests/cancel_task.yml b/tests/cancel_task.yml index c6e483d..aad46cd 100644 --- a/tests/cancel_task.yml +++ b/tests/cancel_task.yml @@ -3,6 +3,7 @@ description: Job to cancel a TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - cancel task diff --git a/tests/cancel_task_functional.yml b/tests/cancel_task_functional.yml index e2fc14b..c65c796 100644 --- a/tests/cancel_task_functional.yml +++ b/tests/cancel_task_functional.yml @@ -3,6 +3,7 @@ description: Job to cancel a TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - functional - cancel task diff --git a/tests/create_task.yml b/tests/create_task.yml index 55f1b0b..63e827f 100644 --- a/tests/create_task.yml +++ b/tests/create_task.yml @@ -3,6 +3,7 @@ description: Job to create a new TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - create task @@ -19,21 +20,13 @@ jobs: "description": "CompTest", "executors": [ { - "image": "ubuntu:20.04", + "image": "alpine", "command": [ - "TestString1", - "TestString2" - ] - }, - { - "image": "ubuntu:20.04", - "command": [ - "TestString3", - "TestString4" + "echo", + "hello" ] } ] } - response: 200: {} \ No newline at end of file diff --git a/tests/create_task_backend_parameters.yml b/tests/create_task_backend_parameters.yml new file mode 100644 index 0000000..32dc824 --- /dev/null +++ b/tests/create_task_backend_parameters.yml @@ -0,0 +1,38 @@ +name: Create Tes Task Job With Backend Parameters +description: Job to create a new TES Task with backend parameters +service: TES +versions: + - 1.1.0 +tags: + - logical + - create task + - create task backend parameters test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "resources": { + "backend_parameters": { + "VmSize" : "Standard_D64_v3", + "Caching" : "ReadWrite" + }, + "backend_parameters_strict": false + } + } + response: + 200: {} diff --git a/tests/create_task_backend_parameters_negative.yml b/tests/create_task_backend_parameters_negative.yml new file mode 100644 index 0000000..49362af --- /dev/null +++ b/tests/create_task_backend_parameters_negative.yml @@ -0,0 +1,39 @@ +name: Create Tes Task Job With Backend Parameters (Negative case) +description: Job to create a new TES Task with backend parameters (Negative case) +service: TES +versions: + - 1.1.0 +tags: + - logical + - create task + - create task backend parameters negative test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "resources": { + "backend_parameters": { + "INVALID" : "PARAMETER" + }, + "backend_parameters_strict": true + } + } + response: + # Keeping response code as 400 to test intentionally wrong backend parameters + # https://github.com/elixir-cloud-aai/tes-compliance-suite/pull/29#discussion_r1108893420 + 400: {} diff --git a/tests/create_task_functional.yml b/tests/create_task_functional.yml index 0eb240b..d068789 100644 --- a/tests/create_task_functional.yml +++ b/tests/create_task_functional.yml @@ -3,6 +3,7 @@ description: Job to create a new TES Task and test functionality service: TES versions: - 1.0.0 + - 1.1.0 tags: - functional - create task diff --git a/tests/create_task_inputs.yml b/tests/create_task_inputs.yml new file mode 100644 index 0000000..45c9bc0 --- /dev/null +++ b/tests/create_task_inputs.yml @@ -0,0 +1,39 @@ +name: Create Tes Task Job With Inputs +description: Job to create a new TES Task with inputs +service: TES +versions: + - 1.0.0 + - 1.1.0 +tags: + - logical + - create task + - create task inputs test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "inputs": [ + { + "url": "s3://my-object-store/file-1", + "path": "/data/file1", + "type": "FILE" + } + ] + } + response: + 200: {} diff --git a/tests/create_task_optional_filetype.yml b/tests/create_task_optional_filetype.yml new file mode 100644 index 0000000..6dd76d0 --- /dev/null +++ b/tests/create_task_optional_filetype.yml @@ -0,0 +1,43 @@ +name: Create Tes Task Job Without Filetypes +description: Job to create a new TES Task intentionally without input and output filetypes +service: TES +versions: + - 1.1.0 +tags: + - logical + - create task + - create task optional filetypes test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "inputs": [ + { + "url": "s3://my-object-store/file-1", + "path": "/data/file1" + } + ], + "outputs": [ + { + "url": "s3://my-object-store/outfile-1", + "path": "/data/outfile" + } + ] + } + response: + 200: {} diff --git a/tests/create_task_outputs.yml b/tests/create_task_outputs.yml new file mode 100644 index 0000000..8a5681e --- /dev/null +++ b/tests/create_task_outputs.yml @@ -0,0 +1,39 @@ +name: Create Tes Task Job With Outputs +description: Job to create a new TES Task with outputs +service: TES +versions: + - 1.0.0 + - 1.1.0 +tags: + - logical + - create task + - create task outputs test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "outputs": [ + { + "url": "s3://my-object-store/outfile-1", + "path": "/data/outfile", + "type": "FILE" + } + ] + } + response: + 200: {} diff --git a/tests/create_task_streamable.yml b/tests/create_task_streamable.yml new file mode 100644 index 0000000..8bf1bd7 --- /dev/null +++ b/tests/create_task_streamable.yml @@ -0,0 +1,40 @@ +name: Create Tes Task Job With Input Streamable Flag +description: Job to create a new TES Task with streamable flag set in input +service: TES +versions: + - 1.1.0 +tags: + - logical + - create task + - create task streamable test + - all +jobs: + - name: create_task + description: Create a new TES task + endpoint: /tasks + operation: POST + request_body: | + { + "name": "CompTest", + "description": "CompTest", + "executors": [ + { + "image": "alpine", + "command": [ + "echo", + "hello" + ] + } + ], + "input": [ + { + "name": "access-file", + "description": "Access file resource via streaming", + "url": "s3://my-object-store/file1", + "path": "/data/file1", + "streamable": true + } + ] + } + response: + 200: {} diff --git a/tests/get_task_basic.yml b/tests/get_task_basic.yml index 1e0702a..0e370a7 100644 --- a/tests/get_task_basic.yml +++ b/tests/get_task_basic.yml @@ -3,6 +3,7 @@ description: Job to retrieve the Basic view of TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - get task diff --git a/tests/get_task_full.yml b/tests/get_task_full.yml index 11595e7..40d0d28 100644 --- a/tests/get_task_full.yml +++ b/tests/get_task_full.yml @@ -3,6 +3,7 @@ description: Job to retrieve the Full view of TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - get task diff --git a/tests/get_task_minimal.yml b/tests/get_task_minimal.yml index f4baa41..7a5dc01 100644 --- a/tests/get_task_minimal.yml +++ b/tests/get_task_minimal.yml @@ -3,6 +3,7 @@ description: Job to retrieve the Minimal view of TES Task service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - get task diff --git a/tests/list_tasks_basic.yml b/tests/list_tasks_basic.yml index 356cd68..05be28e 100644 --- a/tests/list_tasks_basic.yml +++ b/tests/list_tasks_basic.yml @@ -3,6 +3,7 @@ description: Job to retrieve the list of Basic view of TES tasks service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - list tasks diff --git a/tests/list_tasks_full.yml b/tests/list_tasks_full.yml index 75838a8..4f6b668 100644 --- a/tests/list_tasks_full.yml +++ b/tests/list_tasks_full.yml @@ -3,6 +3,7 @@ description: Job to retrieve the list of Full view of TES tasks service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - list tasks diff --git a/tests/list_tasks_minimal.yml b/tests/list_tasks_minimal.yml index 50bb143..09920c9 100644 --- a/tests/list_tasks_minimal.yml +++ b/tests/list_tasks_minimal.yml @@ -3,6 +3,7 @@ description: Job to retrieve the list of Minimal view of TES tasks service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - list tasks diff --git a/tests/service_info.yml b/tests/service_info.yml index e9ecdee..d9fc3c9 100644 --- a/tests/service_info.yml +++ b/tests/service_info.yml @@ -3,6 +3,7 @@ description: Job to retrieve the service info service: TES versions: - 1.0.0 + - 1.1.0 tags: - logical - service info diff --git a/unittests/data/__init__.py b/unittests/data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unittests/data/constants.py b/unittests/data/constants.py new file mode 100644 index 0000000..879d917 --- /dev/null +++ b/unittests/data/constants.py @@ -0,0 +1,7 @@ +""" +Mock data for unit testing +""" + +TEST_SERVICE = "test" +TEST_URL = "https://test.com/" +TEST_VERSIONS = ["1.0.0", "1.1.0"] diff --git a/unittests/test_cli.py b/unittests/test_cli.py index 511002a..1c5cc7e 100644 --- a/unittests/test_cli.py +++ b/unittests/test_cli.py @@ -3,7 +3,6 @@ This module is to test the entry point CLI functionality """ -import unittest from unittest.mock import ( MagicMock, mock_open, @@ -11,13 +10,18 @@ ) from click.testing import CliRunner +import pytest from compliance_suite.cli import main, report from compliance_suite.job_runner import JobRunner from compliance_suite.report_server import ReportServer +from unittests.data.constants import ( + TEST_URL, + TEST_VERSIONS +) -class TestJobRunner(unittest.TestCase): +class TestJobRunner: def test_main(self): """asserts that the 'main' method of cli module can be executed""" @@ -37,25 +41,27 @@ def test_report_no_version(self): """ asserts if the application raises Exception if no server is provided""" runner = CliRunner() - result = runner.invoke(report, ['--server', 'https://test.com/']) + result = runner.invoke(report, ['--server', TEST_URL]) assert result.exit_code == 1 + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(JobRunner, "generate_report") @patch.object(JobRunner, "run_jobs") - def test_report_no_tag(self, mock_run_jobs, mock_generate_reports): + def test_report_no_tag(self, mock_run_jobs, mock_generate_reports, version): """ asserts if the application is invoked if no tags provided""" with patch('builtins.open', mock_open()): mock_run_jobs.return_value = {} mock_generate_reports.return_value = '{"test": "test"}' runner = CliRunner() - result = runner.invoke(report, ['--server', 'https://test.com/', '--version', '1.0.0']) + result = runner.invoke(report, ['--server', TEST_URL, '--version', version]) assert result.exit_code == 0 + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(ReportServer, 'serve_thread') @patch.object(JobRunner, "generate_report") @patch.object(JobRunner, "run_jobs") - def test_report(self, mock_run_jobs, mock_generate_reports, mock_report_server): + def test_report(self, mock_run_jobs, mock_generate_reports, mock_report_server, version): """ asserts if the application is invoked if a tag is provided""" with patch('builtins.open', mock_open()): @@ -63,7 +69,7 @@ def test_report(self, mock_run_jobs, mock_generate_reports, mock_report_server): mock_generate_reports.return_value = '{"test": "test"}' mock_report_server.return_value = MagicMock() runner = CliRunner() - result = runner.invoke(report, ['--server', 'https://test.com/', '--version', '1.0.0', '--tag', 'All', + result = runner.invoke(report, ['--server', TEST_URL, '--version', version, '--tag', 'All', '--output_path', "path/to/output", '--serve', '--port', 9090, '--uptime', 1000]) assert result.exit_code == 0 diff --git a/unittests/test_functions.py b/unittests/test_functions.py index 32501a0..aa68235 100644 --- a/unittests/test_functions.py +++ b/unittests/test_functions.py @@ -124,6 +124,16 @@ def test_check_poll_cancel(self): assert client.check_poll(resp) is True + def test_check_poll_canceling(self): + """ Asserts the check poll function to be True for status code 200 and CANCELING state""" + + client = Client() + client.check_cancel = True + resp = MagicMock(status_code=200) + resp.json.return_value = {"state": "CANCELING"} + + assert client.check_poll(resp) is True + def test_check_poll_fail(self): """ Asserts the check poll function to be False for status code not equal to 200""" diff --git a/unittests/test_job_runner.py b/unittests/test_job_runner.py index 45182f2..5ce59d5 100644 --- a/unittests/test_job_runner.py +++ b/unittests/test_job_runner.py @@ -4,12 +4,12 @@ """ import os -import unittest from unittest.mock import ( MagicMock, patch ) +import pytest import yaml from compliance_suite.exceptions.compliance_exception import ( @@ -18,6 +18,10 @@ ) from compliance_suite.job_runner import JobRunner from compliance_suite.test_runner import TestRunner +from unittests.data.constants import ( + TEST_URL, + TEST_VERSIONS +) SCHEMA_PATH = os.path.join(os.getcwd(), "tests", "template", "test_template_schema.json") @@ -29,57 +33,71 @@ YAML_WRONG_SCHEMA = os.path.join(os.getcwd(), "unittests", "data", "tests", "wrong_schema_yaml.yml") -class TestJobRunner(unittest.TestCase): +class TestJobRunner: - def test_generate_summary(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_generate_summary(self, version): """ Checks if generate summary functions runs successfully""" - job_runner_object = JobRunner('https://test.com/', 'v1.0', []) - job_runner_object.generate_summary() - assert True + job_runner_object = JobRunner(TEST_URL, version, []) + assert job_runner_object.generate_summary() is None - def test_generate_report(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_generate_report(self, version): """ Checks if generate summary functions runs successfully""" - job_runner_object = JobRunner('https://test.com/', 'v1.0', []) + job_runner_object = JobRunner(TEST_URL, version, []) job_runner_object.set_report(MagicMock()) job_runner_object.generate_report() assert True - def test_tag_matcher_success(self): - job_runner_object = JobRunner('https://test.com/', 'v1.0', ["tag"]) + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_tag_matcher_success(self, version): + job_runner_object = JobRunner(TEST_URL, version, ["tag"]) assert job_runner_object.tag_matcher(["tag", "tag1", "tag2"]) is True - def test_tag_matcher_fail(self): - job_runner_object = JobRunner('https://test.com/', 'v1.0', ["NoMatch"]) + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_tag_matcher_fail(self, version): + job_runner_object = JobRunner(TEST_URL, version, ["NoMatch"]) assert job_runner_object.tag_matcher(["tag", "tag1", "tag2"]) is False + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_version_matcher_success(self, version): + job_runner_object = JobRunner(TEST_URL, version, ["tag"]) + assert job_runner_object.version_matcher(TEST_VERSIONS) is True + + def test_version_matcher_fail(self): + job_runner_object = JobRunner(TEST_URL, '0.0.0', ["tag"]) + assert job_runner_object.version_matcher(TEST_VERSIONS) is False + + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch("os.path.join", return_value=SCHEMA_PATH) - def test_validate_job_success(self, mock_os): + def test_validate_job_success(self, mock_os, version): """ Asserts validate job functions for proper YAML schema""" with open(YAML_TEST_PATH_SUCCESS, "r") as f: yaml_data = yaml.safe_load(f) - job_runner_object = JobRunner('https://test.com/', 'v1.0', []) - job_runner_object.validate_job(yaml_data, "success_01.yml") - assert True + job_runner_object = JobRunner(TEST_URL, version, []) + assert job_runner_object.validate_job(yaml_data, "success_01.yml") is None + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch('os.path.join', return_value=SCHEMA_PATH) - def test_validate_job_failure(self, mock_os): + def test_validate_job_failure(self, mock_os, version): """ Asserts validate_job() function for incorrect YAML schema""" with open(YAML_WRONG_SCHEMA, "r") as f: yaml_data = yaml.safe_load(f) - with self.assertRaises(JobValidationException): - job_runner_object = JobRunner('https://test.com/', 'v1.0', []) + with pytest.raises(JobValidationException): + job_runner_object = JobRunner(TEST_URL, version, []) job_runner_object.validate_job(yaml_data, "wrong_schema_yaml.yml") + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(JobRunner, 'validate_job') @patch.object(TestRunner, 'run_tests') @patch('os.path.join') - def test_run_jobs_success(self, mock_os, mock_run_tests, mock_validate_job): + def test_run_jobs_success(self, mock_os, mock_run_tests, mock_validate_job, version): """ Asserts run_jobs() for unit test YAML files""" mock_run_tests.side_effect = [TestRunnerException(name="test", message="test", details="test"), None, None] @@ -88,6 +106,5 @@ def test_run_jobs_success(self, mock_os, mock_run_tests, mock_validate_job): mock_os.side_effect = [YAML_TEST_PATH, YAML_TEST_PATH_FAIL, YAML_TEST_PATH_INVALID, YAML_TEST_PATH_SKIP, YAML_TEST_PATH_SUCCESS] tag = ["all"] - job_runner_object = JobRunner('https://test.com/', 'v1.0', tag) - job_runner_object.run_jobs() - assert True + job_runner_object = JobRunner(TEST_URL, version, tag) + assert job_runner_object.run_jobs() is None diff --git a/unittests/test_report_server.py b/unittests/test_report_server.py index ab254fa..e67d7b2 100644 --- a/unittests/test_report_server.py +++ b/unittests/test_report_server.py @@ -22,8 +22,7 @@ def test_render_html(self): """Asserts if successfully able to render HTML from Jinja2 templates""" report_server = ReportServer(web_dir=WEB_DIR) - report_server.render_html() - assert True + assert report_server.render_html() is None @patch('socketserver.TCPServer') @patch('webbrowser.open') @@ -34,8 +33,7 @@ def test_start_local_server(self, mock_web, mock_serve): mock_serve.return_value = MagicMock() report_server = ReportServer(WEB_DIR) - report_server.start_local_server(9090, 10) - assert True + assert report_server.start_local_server(9090, 10) is None @patch.object(ReportServer, 'render_html') @patch('threading.Thread') @@ -47,8 +45,7 @@ def test_serve_thread(self, mock_server, mock_render_html): report_server = ReportServer(web_dir=WEB_DIR) report_server.local_server = MagicMock() - report_server.serve_thread(9090, 1) - assert True + assert report_server.serve_thread(9090, 1) is None @patch.object(ReportServer, 'render_html') @patch('threading.Thread') @@ -60,6 +57,4 @@ def test_serve_thread_keyboard_interrupt(self, mock_server, mock_render_html): report_server = ReportServer(web_dir=WEB_DIR) report_server.local_server = MagicMock() - report_server.serve_thread(9090, 100) - - assert True + assert report_server.serve_thread(9090, 100) is None diff --git a/unittests/test_test_runner.py b/unittests/test_test_runner.py index ac5aa6b..60a79cf 100644 --- a/unittests/test_test_runner.py +++ b/unittests/test_test_runner.py @@ -3,26 +3,33 @@ This module is to test the Test Runner class and its methods """ -import unittest from unittest.mock import ( MagicMock, patch ) +import pytest + from compliance_suite.exceptions.compliance_exception import ( JobValidationException, TestFailureException ) from compliance_suite.functions.client import Client from compliance_suite.test_runner import TestRunner +from unittests.data.constants import ( + TEST_SERVICE, + TEST_URL, + TEST_VERSIONS +) -class TestTestRunner(unittest.TestCase): +class TestTestRunner: - def test_validate_logic_success(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_validate_logic_success(self, version): """ Asserts validate_logic() function for successful schema validation to API Model""" - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "operation": "test", @@ -43,14 +50,13 @@ def test_validate_logic_success(self): "version": "test" } - test_runner.validate_logic("service_info", service_info_response, "Response") - - assert True + assert test_runner.validate_logic("service_info", service_info_response, "Response") is None - def test_validate_logic_failure(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_validate_logic_failure(self, version): """ Asserts validate_logic() function for unsuccessful schema validation to API Model""" - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "operation": "test", @@ -58,16 +64,17 @@ def test_validate_logic_failure(self): } ) test_runner.report_test = MagicMock() - with self.assertRaises(TestFailureException): + with pytest.raises(TestFailureException): test_runner.validate_logic("service_info", {}, "Response") + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(TestRunner, "validate_logic") - def test_validate_request_body_success(self, mock_validate_job): + def test_validate_request_body_success(self, mock_validate_job, version): """ Asserts validate_request_body() function for successful JSON format and schema validation to API Model""" mock_validate_job.return_value = {} - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "name": "test", @@ -76,13 +83,13 @@ def test_validate_request_body_success(self, mock_validate_job): } ) test_runner.report_test = MagicMock() - test_runner.validate_request_body("{}") - assert True + assert test_runner.validate_request_body("{}") is None - def test_validate_request_body_failure(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_validate_request_body_failure(self, version): """ Asserts validate_request_body() function for unsuccessful JSON format""" - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "operation": "test", @@ -90,16 +97,17 @@ def test_validate_request_body_failure(self): } ) test_runner.report_test = MagicMock() - with self.assertRaises(JobValidationException): + with pytest.raises(JobValidationException): test_runner.validate_request_body("{") + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(TestRunner, "validate_logic") - def test_validate_response_success_get(self, mock_validate_job): + def test_validate_response_success_get(self, mock_validate_job, version): """ Asserts validate_response() function for successful response and schema validation to API Model""" mock_validate_job.return_value = {} - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "name": "list_tasks", @@ -112,16 +120,16 @@ def test_validate_response_success_get(self, mock_validate_job): test_runner.report_test = MagicMock() resp = MagicMock(status_code=200, text="") - test_runner.validate_response(resp) - assert True + assert test_runner.validate_response(resp) is None + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(TestRunner, "validate_logic") - def test_validate_response_success(self, mock_validate_job): + def test_validate_response_success(self, mock_validate_job, version): """ Asserts validate_response() function for successful response and schema validation to API Model""" mock_validate_job.return_value = {} - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "name": "test", @@ -133,13 +141,13 @@ def test_validate_response_success(self, mock_validate_job): test_runner.report_test = MagicMock() resp = MagicMock(status_code=200) - test_runner.validate_response(resp) - assert True + assert test_runner.validate_response(resp) is None - def test_validate_response_failure(self): + @pytest.mark.parametrize("version", TEST_VERSIONS) + def test_validate_response_failure(self, version): """ Asserts validate_response() function for unsuccessful response""" - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) test_runner.set_job_data( { "operation": "test", @@ -150,18 +158,19 @@ def test_validate_response_failure(self): test_runner.report_test = MagicMock() resp = MagicMock(status_code=400) - with self.assertRaises(TestFailureException): + with pytest.raises(TestFailureException): test_runner.validate_response(resp) + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(Client, "poll_request") @patch.object(TestRunner, "validate_response") - def test_run_jobs_get_task(self, mock_validate_response, mock_client): + def test_run_jobs_get_task(self, mock_validate_response, mock_client, version): """Assert the run job method for get task to be successful""" mock_validate_response.return_value = {} mock_client.return_value = MagicMock() - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) job_data = { "name": "get_task", "description": "test", @@ -174,14 +183,13 @@ def test_run_jobs_get_task(self, mock_validate_response, mock_client): } } test_runner.set_auxiliary_space("id", "1234") - test_runner.run_tests(job_data, MagicMock()) - - assert True + assert test_runner.run_tests(job_data, MagicMock()) is None + @pytest.mark.parametrize("version", TEST_VERSIONS) @patch.object(Client, "send_request") @patch.object(TestRunner, "validate_request_body") @patch.object(TestRunner, "validate_logic") - def test_run_jobs_create_task(self, mock_validate_logic, mock_validate_request_body, mock_client): + def test_run_jobs_create_task(self, mock_validate_logic, mock_validate_request_body, mock_client, version): """Assert the run job method for create task to be successful""" mock_validate_logic.return_value = {} @@ -189,7 +197,7 @@ def test_run_jobs_create_task(self, mock_validate_logic, mock_validate_request_b resp = MagicMock(status_code=200, text='{"id": "1234"}') mock_client.return_value = resp - test_runner = TestRunner("test", "test", "v1.0") + test_runner = TestRunner(TEST_SERVICE, TEST_URL, version) job_data = { "name": "create_task", "description": "test", @@ -201,6 +209,4 @@ def test_run_jobs_create_task(self, mock_validate_logic, mock_validate_request_b }, "response": {"200": ""} } - test_runner.run_tests(job_data, MagicMock()) - - assert True + assert test_runner.run_tests(job_data, MagicMock()) is None