Skip to content

Commit

Permalink
Merge 4b3216f into cc1c7d7
Browse files Browse the repository at this point in the history
  • Loading branch information
deveaud-m committed Aug 6, 2023
2 parents cc1c7d7 + 4b3216f commit c39cc09
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 25 deletions.
52 changes: 35 additions & 17 deletions fossology/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
import json
import logging
import time
from typing import Optional

from fossology.exceptions import AuthorizationError, FossologyApiError
from fossology.obj import Job, get_options
from fossology.obj import Folder, Group, Job, Upload, get_options

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
Expand All @@ -16,21 +17,28 @@ class Jobs:
"""Class dedicated to all "jobs" related endpoints"""

def list_jobs(
self, upload=None, page_size=100, page=1, all_pages=False
) -> tuple[list, int]:
"""Get all available jobs
self,
upload: Optional[Upload] = None,
all: bool = False,
page_size: int = 100,
page: int = 1,
all_pages: bool = False,
) -> tuple[list[Job], int]:
"""Get all jobs created by the logged in user
API Endpoint: GET /jobs
The answer is limited to the first page of 20 results by default
:param upload: list only jobs of the given upload (default: None)
:param page_size: the maximum number of results per page
:param page: the number of pages to be retrieved
:param all: list all jobs created by all users, only available for admins (default: False)
:param page_size: the maximum number of results per page (default: 100)
:param page: the number of pages to be retrieved (default: 1)
:param all_pages: get all jobs (default: False)
:type upload: Upload
:type page_size: int (default: 100)
:type page: int (default: 1)
:type all: boolean
:type page_size: int
:type page: int
:type all_pages: boolean
:return: a tuple containing the list of jobs and the total number of pages
:rtype: Tuple(list of Job, int)
Expand All @@ -42,16 +50,17 @@ def list_jobs(
params["upload"] = upload.id

jobs_list = list()
jobs_endpoint = f"{self.api}/jobs"
if all:
jobs_endpoint += "/all"
if all_pages:
# will be reset after the total number of pages has been retrieved from the API
x_total_pages = 2
else:
x_total_pages = page
while page <= x_total_pages:
headers["page"] = str(page)
response = self.session.get(
f"{self.api}/jobs", params=params, headers=headers
)
response = self.session.get(jobs_endpoint, params=params, headers=headers)
if response.status_code == 200:
for job in response.json():
jobs_list.append(Job.from_json(job))
Expand All @@ -62,23 +71,26 @@ def list_jobs(
)
return jobs_list, x_total_pages
page += 1
elif response.status_code == 403:
description = "Access denied to /jobs/all endpoint"
raise FossologyApiError(description, response)
else:
description = f"Unable to retrieve the list of jobs from page {page}"
raise FossologyApiError(description, response)
logger.info(f"Retrieved all {x_total_pages} pages of jobs")
return jobs_list, x_total_pages

def detail_job(self, job_id, wait=False, timeout=30) -> Job:
def detail_job(self, job_id: int, wait: bool = False, timeout: int = 10) -> Job:
"""Get detailed information about a job
API Endpoint: GET /jobs/{id}
:param job_id: the id of the job
:param wait: wait until the job is finished (default: False)
:param timeout: stop waiting after x seconds (default: 30)
:param timeout: stop waiting after x seconds (default: 10)
:type: int
:type wait: boolean
:type timeout: 30
:type timeout: 10
:return: the job data
:rtype: Job
:raises FossologyApiError: if the REST call failed
Expand All @@ -105,7 +117,13 @@ def detail_job(self, job_id, wait=False, timeout=30) -> Job:
raise FossologyApiError(description, response)

def schedule_jobs(
self, folder, upload, spec, group=None, wait=False, timeout=30
self,
folder: Folder,
upload: Upload,
spec: dict,
group: Group = None,
wait: bool = False,
timeout: int = 10,
) -> Job:
"""Schedule jobs for a specific upload
Expand Down Expand Up @@ -149,13 +167,13 @@ def schedule_jobs(
:param spec: the job specification
:param group: the group name to choose while scheduling jobs (default: None)
:param wait: wait for the scheduled job to finish (default: False)
:param timeout: stop waiting after x seconds (default: 30)
:param timeout: stop waiting after x seconds (default: 10)
:type upload: Upload
:type folder: Folder
:type spec: dict
:type group: string
:type wait: boolean
:type timeout: 30
:type timeout: 10
:return: the job id
:rtype: Job
:raises FossologyApiError: if the REST call failed
Expand Down
2 changes: 1 addition & 1 deletion fossology/uploads.py
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ def delete_upload(self, upload, group=None):
if group:
headers["groupName"] = group
response = self.session.delete(
f"{self.api}/uploads/{upload.id}", headers=headers
f"{self.api}/uploads/{upload.id}", headers=headers, timeout=5
)

if response.status_code == 202:
Expand Down
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def jobs_lookup(foss: fossology.Fossology, upload: Upload):
f"Waiting for job {job.name} with status {job.status} to complete (eta: {job.eta})"
)
job = foss.detail_job(job.id)
time.sleep(5)
time.sleep(10)


@pytest.fixture(scope="session")
Expand Down
32 changes: 26 additions & 6 deletions tests/test_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import secrets
from typing import Dict
from unittest.mock import Mock

import pytest
import responses
Expand Down Expand Up @@ -46,6 +47,17 @@ def test_schedule_jobs(
assert jobs[0].id == job.id


def test_detail_job_wait_completed(
foss: Fossology, upload_with_jobs: Upload, monkeypatch: pytest.MonkeyPatch
):
mocked_logger = Mock()
monkeypatch.setattr("fossology.jobs.logger", mocked_logger)
jobs, _ = foss.list_jobs(upload=upload_with_jobs)
job = foss.detail_job(jobs[0].id, wait=10)
assert job.status == "Completed"
mocked_logger.debug.assert_called_once_with((f"Job {job.id} has completed"))


@responses.activate
def test_schedule_job_error(foss_server: str, foss: Fossology, upload: Upload):
responses.add(responses.POST, f"{foss_server}/api/v1/jobs", status=404)
Expand All @@ -64,6 +76,14 @@ def test_list_jobs_error(foss_server: str, foss: Fossology):
assert "Unable to retrieve the list of jobs from page 1" in str(excinfo.value)


@responses.activate
def test_list_all_jobs_access_denied(foss_server: str, foss: Fossology):
responses.add(responses.GET, f"{foss_server}/api/v1/jobs/all", status=403)
with pytest.raises(FossologyApiError) as excinfo:
foss.list_jobs(all=True)
assert "Access denied to /jobs/all endpoint" in str(excinfo.value)


@responses.activate
def test_detail_job_error(foss_server: str, foss: Fossology):
job_id = secrets.randbelow(1000)
Expand All @@ -78,6 +98,12 @@ def test_detail_job_error(foss_server: str, foss: Fossology):


def test_paginated_list_jobs(foss: Fossology, upload_with_jobs: Upload):
jobs, total_pages = foss.list_jobs(
upload=upload_with_jobs, page_size=1, all_pages=True
)
assert len(jobs) == 3
assert total_pages == 3

jobs, total_pages = foss.list_jobs(upload=upload_with_jobs, page_size=1, page=1)
assert len(jobs) == 1
assert total_pages == 3
Expand All @@ -89,9 +115,3 @@ def test_paginated_list_jobs(foss: Fossology, upload_with_jobs: Upload):
jobs, total_pages = foss.list_jobs(upload=upload_with_jobs, page_size=2, page=1)
assert len(jobs) == 2
assert total_pages == 2

jobs, total_pages = foss.list_jobs(
upload=upload_with_jobs, page_size=1, all_pages=True
)
assert len(jobs) == 3
assert total_pages == 3

0 comments on commit c39cc09

Please sign in to comment.