From 4b3216f310f2c2f426e17ef3d1949851c052ba8a Mon Sep 17 00:00:00 2001 From: Marion Deveaud Date: Sun, 6 Aug 2023 19:08:19 +0200 Subject: [PATCH] refactor(jobs): add new /all endpoint, increase timeout robustness --- fossology/jobs.py | 52 +++++++++++++++++++++++++++++--------------- fossology/uploads.py | 2 +- tests/conftest.py | 2 +- tests/test_jobs.py | 32 ++++++++++++++++++++++----- 4 files changed, 63 insertions(+), 25 deletions(-) diff --git a/fossology/jobs.py b/fossology/jobs.py index e9901f1..8d48771 100644 --- a/fossology/jobs.py +++ b/fossology/jobs.py @@ -4,9 +4,10 @@ import json import logging import time +from typing import Optional from fossology.exceptions import AuthorizationError, FossologyApiError -from fossology.obj import Job, get_options +from fossology.obj import Folder, Group, Job, Upload, get_options logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) @@ -16,21 +17,28 @@ class Jobs: """Class dedicated to all "jobs" related endpoints""" def list_jobs( - self, upload=None, page_size=100, page=1, all_pages=False - ) -> tuple[list, int]: - """Get all available jobs + self, + upload: Optional[Upload] = None, + all: bool = False, + page_size: int = 100, + page: int = 1, + all_pages: bool = False, + ) -> tuple[list[Job], int]: + """Get all jobs created by the logged in user API Endpoint: GET /jobs The answer is limited to the first page of 20 results by default :param upload: list only jobs of the given upload (default: None) - :param page_size: the maximum number of results per page - :param page: the number of pages to be retrieved + :param all: list all jobs created by all users, only available for admins (default: False) + :param page_size: the maximum number of results per page (default: 100) + :param page: the number of pages to be retrieved (default: 1) :param all_pages: get all jobs (default: False) :type upload: Upload - :type page_size: int (default: 100) - :type page: int (default: 1) + :type all: boolean + :type page_size: int + :type page: int :type all_pages: boolean :return: a tuple containing the list of jobs and the total number of pages :rtype: Tuple(list of Job, int) @@ -42,6 +50,9 @@ def list_jobs( params["upload"] = upload.id jobs_list = list() + jobs_endpoint = f"{self.api}/jobs" + if all: + jobs_endpoint += "/all" if all_pages: # will be reset after the total number of pages has been retrieved from the API x_total_pages = 2 @@ -49,9 +60,7 @@ def list_jobs( x_total_pages = page while page <= x_total_pages: headers["page"] = str(page) - response = self.session.get( - f"{self.api}/jobs", params=params, headers=headers - ) + response = self.session.get(jobs_endpoint, params=params, headers=headers) if response.status_code == 200: for job in response.json(): jobs_list.append(Job.from_json(job)) @@ -62,23 +71,26 @@ def list_jobs( ) return jobs_list, x_total_pages page += 1 + elif response.status_code == 403: + description = "Access denied to /jobs/all endpoint" + raise FossologyApiError(description, response) else: description = f"Unable to retrieve the list of jobs from page {page}" raise FossologyApiError(description, response) logger.info(f"Retrieved all {x_total_pages} pages of jobs") return jobs_list, x_total_pages - def detail_job(self, job_id, wait=False, timeout=30) -> Job: + def detail_job(self, job_id: int, wait: bool = False, timeout: int = 10) -> Job: """Get detailed information about a job API Endpoint: GET /jobs/{id} :param job_id: the id of the job :param wait: wait until the job is finished (default: False) - :param timeout: stop waiting after x seconds (default: 30) + :param timeout: stop waiting after x seconds (default: 10) :type: int :type wait: boolean - :type timeout: 30 + :type timeout: 10 :return: the job data :rtype: Job :raises FossologyApiError: if the REST call failed @@ -105,7 +117,13 @@ def detail_job(self, job_id, wait=False, timeout=30) -> Job: raise FossologyApiError(description, response) def schedule_jobs( - self, folder, upload, spec, group=None, wait=False, timeout=30 + self, + folder: Folder, + upload: Upload, + spec: dict, + group: Group = None, + wait: bool = False, + timeout: int = 10, ) -> Job: """Schedule jobs for a specific upload @@ -149,13 +167,13 @@ def schedule_jobs( :param spec: the job specification :param group: the group name to choose while scheduling jobs (default: None) :param wait: wait for the scheduled job to finish (default: False) - :param timeout: stop waiting after x seconds (default: 30) + :param timeout: stop waiting after x seconds (default: 10) :type upload: Upload :type folder: Folder :type spec: dict :type group: string :type wait: boolean - :type timeout: 30 + :type timeout: 10 :return: the job id :rtype: Job :raises FossologyApiError: if the REST call failed diff --git a/fossology/uploads.py b/fossology/uploads.py index b356280..5bea36b 100644 --- a/fossology/uploads.py +++ b/fossology/uploads.py @@ -439,7 +439,7 @@ def delete_upload(self, upload, group=None): if group: headers["groupName"] = group response = self.session.delete( - f"{self.api}/uploads/{upload.id}", headers=headers + f"{self.api}/uploads/{upload.id}", headers=headers, timeout=5 ) if response.status_code == 202: diff --git a/tests/conftest.py b/tests/conftest.py index 53e3b1b..34e9f4d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -32,7 +32,7 @@ def jobs_lookup(foss: fossology.Fossology, upload: Upload): f"Waiting for job {job.name} with status {job.status} to complete (eta: {job.eta})" ) job = foss.detail_job(job.id) - time.sleep(5) + time.sleep(10) @pytest.fixture(scope="session") diff --git a/tests/test_jobs.py b/tests/test_jobs.py index e0b3442..5cd6f22 100644 --- a/tests/test_jobs.py +++ b/tests/test_jobs.py @@ -3,6 +3,7 @@ import secrets from typing import Dict +from unittest.mock import Mock import pytest import responses @@ -46,6 +47,17 @@ def test_schedule_jobs( assert jobs[0].id == job.id +def test_detail_job_wait_completed( + foss: Fossology, upload_with_jobs: Upload, monkeypatch: pytest.MonkeyPatch +): + mocked_logger = Mock() + monkeypatch.setattr("fossology.jobs.logger", mocked_logger) + jobs, _ = foss.list_jobs(upload=upload_with_jobs) + job = foss.detail_job(jobs[0].id, wait=10) + assert job.status == "Completed" + mocked_logger.debug.assert_called_once_with((f"Job {job.id} has completed")) + + @responses.activate def test_schedule_job_error(foss_server: str, foss: Fossology, upload: Upload): responses.add(responses.POST, f"{foss_server}/api/v1/jobs", status=404) @@ -64,6 +76,14 @@ def test_list_jobs_error(foss_server: str, foss: Fossology): assert "Unable to retrieve the list of jobs from page 1" in str(excinfo.value) +@responses.activate +def test_list_all_jobs_access_denied(foss_server: str, foss: Fossology): + responses.add(responses.GET, f"{foss_server}/api/v1/jobs/all", status=403) + with pytest.raises(FossologyApiError) as excinfo: + foss.list_jobs(all=True) + assert "Access denied to /jobs/all endpoint" in str(excinfo.value) + + @responses.activate def test_detail_job_error(foss_server: str, foss: Fossology): job_id = secrets.randbelow(1000) @@ -78,6 +98,12 @@ def test_detail_job_error(foss_server: str, foss: Fossology): def test_paginated_list_jobs(foss: Fossology, upload_with_jobs: Upload): + jobs, total_pages = foss.list_jobs( + upload=upload_with_jobs, page_size=1, all_pages=True + ) + assert len(jobs) == 3 + assert total_pages == 3 + jobs, total_pages = foss.list_jobs(upload=upload_with_jobs, page_size=1, page=1) assert len(jobs) == 1 assert total_pages == 3 @@ -89,9 +115,3 @@ def test_paginated_list_jobs(foss: Fossology, upload_with_jobs: Upload): jobs, total_pages = foss.list_jobs(upload=upload_with_jobs, page_size=2, page=1) assert len(jobs) == 2 assert total_pages == 2 - - jobs, total_pages = foss.list_jobs( - upload=upload_with_jobs, page_size=1, all_pages=True - ) - assert len(jobs) == 3 - assert total_pages == 3