Skip to content

Commit

Permalink
Merge pull request #58 from fossology/feat/pagination
Browse files Browse the repository at this point in the history
feat(pagination): get all_pages for jobs and uploads
  • Loading branch information
deveaud-m authored Feb 10, 2021
2 parents 0af632d + 6557ba3 commit a6f9918
Show file tree
Hide file tree
Showing 7 changed files with 157 additions and 61 deletions.
2 changes: 1 addition & 1 deletion docs-source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
copyright = "2021, Siemens AG"

# The full version, including major/minor/patch tags
release = "1.2.1"
release = "1.3.0"


# -- General configuration ---------------------------------------------------
Expand Down
50 changes: 35 additions & 15 deletions fossology/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,36 +15,56 @@
class Jobs:
"""Class dedicated to all "jobs" related endpoints"""

def list_jobs(self, page_size=20, page=1, upload=None):
def list_jobs(self, upload=None, page_size=100, page=1, all_pages=False):
"""Get all available jobs
API Endpoint: GET /jobs
The answer is limited to the first page of 20 results by default
:param upload: list only jobs of the given upload (default: None)
:param page_size: the maximum number of results per page
:param page: the number of pages to be retrieved
:param upload: list only jobs of the given upload (default: None)
:type page_size: int (default: 20)
:type page: int (default: 1)
:param all_pages: get all jobs (default: False)
:type upload: Upload
:return: the jobs data
:rtype: list of Job
:type page_size: int (default: 100)
:type page: int (default: 1)
:type all_pages: boolean
:return: a tuple containing the list of jobs and the total number of pages
:rtype: Tuple(list of Job, int)
:raises FossologyApiError: if the REST call failed
"""
params = {}
headers = {"limit": str(page_size), "page": str(page)}
headers = {"limit": str(page_size)}
if upload:
params["upload"] = upload.id
response = self.session.get(f"{self.api}/jobs", params=params, headers=headers)
if response.status_code == 200:
jobs_list = list()
for job in response.json():
jobs_list.append(Job.from_json(job))
return jobs_list

jobs_list = list()
if all_pages:
# will be reset after the total number of pages has been retrieved from the API
x_total_pages = 2
else:
description = "Getting the list of jobs failed"
raise FossologyApiError(description, response)
x_total_pages = page
while page <= x_total_pages:
headers["page"] = str(page)
response = self.session.get(
f"{self.api}/jobs", params=params, headers=headers
)
if response.status_code == 200:
for job in response.json():
jobs_list.append(Job.from_json(job))
x_total_pages = int(response.headers.get("X-TOTAL-PAGES", 0))
if not all_pages or x_total_pages == 0:
logger.info(
f"Retrieved page {page} of jobs, {x_total_pages} pages are in total available"
)
return jobs_list, x_total_pages
page += 1
else:
description = f"Unable to retrieve the list of jobs from page {page}"
raise FossologyApiError(description, response)
logger.info(f"Retrieved all {x_total_pages} pages of jobs")
return jobs_list, x_total_pages

def detail_job(self, job_id, wait=False, timeout=30):
"""Get detailled information about a job
Expand Down
65 changes: 41 additions & 24 deletions fossology/uploads.py
Original file line number Diff line number Diff line change
Expand Up @@ -403,7 +403,13 @@ def delete_upload(self, upload, group=None):
raise FossologyApiError(description, response)

def list_uploads(
self, folder=None, group=None, recursive=True, page_size=20, page=1
self,
folder=None,
group=None,
recursive=True,
page_size=100,
page=1,
all_pages=False,
):
"""Get all uploads available to the registered user
Expand All @@ -412,49 +418,60 @@ def list_uploads(
:param folder: only list uploads from the given folder
:param group: list uploads from a specific group (not only your own uploads) (default: None)
:param recursive: wether to list uploads from children folders or not (default: True)
:param page_size: limit the number of uploads per page (default: 20)
:param page_size: limit the number of uploads per page (default: 100)
:param page: the number of the page to fetch uploads from (default: 1)
:param all_pages: get all uploads (default: False)
:type folder: Folder
:type group: string
:type recursive: boolean
:type page_size: int
:type page: int
:return: a list of uploads
:rtype: list of Upload
:type all_pages: boolean
:return: a tuple containing the list of uploads and the total number of pages
:rtype: Tuple(list of Upload, int)
:raises FossologyApiError: if the REST call failed
:raises AuthorizationError: if the user can't access the group
"""
params = {}
headers = {"limit": str(page_size), "page": str(page)}
headers = {"limit": str(page_size)}
if group:
headers["groupName"] = group
if folder:
params["folderId"] = folder.id
if not recursive:
params["recursive"] = "false"

response = self.session.get(
f"{self.api}/uploads", headers=headers, params=params
)

if response.status_code == 200:
uploads_list = list()
for upload in response.json():
uploads_list.append(Upload.from_json(upload))
logger.info(
f"Retrieved page {page} of uploads, {response.headers.get('X-TOTAL-PAGES', 'Unknown')} pages are in total available"
uploads_list = list()
if all_pages:
# will be reset after the total number of pages has been retrieved from the API
x_total_pages = 2
else:
x_total_pages = page
while page <= x_total_pages:
headers["page"] = str(page)
response = self.session.get(
f"{self.api}/uploads", headers=headers, params=params
)
return uploads_list
if response.status_code == 200:
for upload in response.json():
uploads_list.append(Upload.from_json(upload))
x_total_pages = int(response.headers.get("X-TOTAL-PAGES", 0))
if not all_pages or x_total_pages == 0:
logger.info(
f"Retrieved page {page} of uploads, {x_total_pages} pages are in total available"
)
return uploads_list, x_total_pages
page += 1

elif response.status_code == 403:
description = (
f"Retrieving list of uploads {get_options(group, folder)}not authorized"
)
raise AuthorizationError(description, response)
elif response.status_code == 403:
description = f"Retrieving list of uploads {get_options(group, folder)}not authorized"
raise AuthorizationError(description, response)

else:
description = "Unable to retrieve the list of uploads"
raise FossologyApiError(description, response)
else:
description = f"Unable to retrieve the list of uploads from page {page}"
raise FossologyApiError(description, response)
logger.info(f"Retrieved all {x_total_pages} of uploads")
return uploads_list, x_total_pages

def move_upload(self, upload, folder, group=None):
"""Move an upload to another folder
Expand Down
24 changes: 12 additions & 12 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "fossology"
version = "1.2.1"
version = "1.3.0"
description = "A library to automate Fossology from Python scripts"
authors = ["Marion Deveaud <[email protected]>"]
license = "MIT License"
Expand Down
32 changes: 28 additions & 4 deletions tests/test_jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@


def test_unpack_jobs(foss: Fossology, upload: Upload):
jobs = foss.list_jobs(upload=upload)
jobs, _ = foss.list_jobs(upload=upload)
assert len(jobs) == 1


Expand All @@ -28,7 +28,7 @@ def test_schedule_jobs(foss: Fossology, upload: Upload, foss_schedule_agents: Di
job = foss.schedule_jobs(foss.rootFolder, upload, foss_schedule_agents)
assert job.name == upload.uploadname

jobs = foss.list_jobs(upload=upload)
jobs, _ = foss.list_jobs(upload=upload)
assert len(jobs) == 2

job = foss.detail_job(jobs[1].id, wait=True, timeout=30)
Expand All @@ -39,7 +39,7 @@ def test_schedule_jobs(foss: Fossology, upload: Upload, foss_schedule_agents: Di
)

# Use pagination
jobs = foss.list_jobs(upload=upload, page_size=1, page=2)
jobs, _ = foss.list_jobs(upload=upload, page_size=1, page=2)
assert len(jobs) == 1
assert jobs[0].id == job.id

Expand All @@ -59,7 +59,7 @@ def test_list_jobs_error(foss_server: str, foss: Fossology):
responses.add(responses.GET, f"{foss_server}/api/v1/jobs", status=404)
with pytest.raises(FossologyApiError) as excinfo:
foss.list_jobs()
assert "Getting the list of jobs failed" in str(excinfo.value)
assert "Unable to retrieve the list of jobs from page 1" in str(excinfo.value)


@responses.activate
Expand All @@ -73,3 +73,27 @@ def test_detail_job_error(foss_server: str, foss: Fossology):
with pytest.raises(FossologyApiError) as excinfo:
foss.detail_job(job_id)
assert f"Error while getting details for job {job_id}" in str(excinfo.value)


def test_paginated_list_jobs(foss: Fossology, scanned_upload: Upload):
jobs, total_pages = foss.list_jobs(upload=scanned_upload, page_size=1, page=1)
print(jobs, total_pages)
assert len(jobs) == 1
assert total_pages == 3

jobs, total_pages = foss.list_jobs(upload=scanned_upload, page_size=1, page=2)
print(jobs, total_pages)
assert len(jobs) == 1
assert total_pages == 3

jobs, total_pages = foss.list_jobs(upload=scanned_upload, page_size=2, page=1)
print(jobs, total_pages)
assert len(jobs) == 2
assert total_pages == 2

jobs, total_pages = foss.list_jobs(
upload=scanned_upload, page_size=1, all_pages=True
)
assert len(jobs) == 2
# FIXME: total number of pages should be 2
# assert total_pages == 3
43 changes: 39 additions & 4 deletions tests/test_uploads.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# SPDX-License-Identifier: MIT

import secrets
import time

import pytest
import responses
Expand Down Expand Up @@ -91,9 +92,9 @@ def test_get_uploads(foss: Fossology, upload_folder: Folder, test_file_path: str
# Folder listing is still unstable in version 1.0.16
# Let's skip it since it has been fixed in newest versions
if versiontuple(foss.version) > versiontuple("1.0.16"):
assert len(foss.list_uploads(folder=upload_folder)) == 2
assert len(foss.list_uploads(folder=upload_folder, recursive=False)) == 1
assert len(foss.list_uploads(folder=upload_subfolder)) == 1
assert len(foss.list_uploads(folder=upload_folder)[0]) == 2
assert len(foss.list_uploads(folder=upload_folder, recursive=False)[0]) == 1
assert len(foss.list_uploads(folder=upload_subfolder)[0]) == 1


def test_upload_from_vcs(foss: Fossology):
Expand Down Expand Up @@ -215,7 +216,7 @@ def test_move_copy_upload(foss: Fossology, upload: Upload, move_folder: Folder):
assert moved_upload.folderid == move_folder.id

foss.copy_upload(moved_upload, foss.rootFolder)
list_uploads = foss.list_uploads()
list_uploads, _ = foss.list_uploads()
test_upload = None
for upload in list_uploads:
if upload.folderid == foss.rootFolder.id:
Expand Down Expand Up @@ -302,3 +303,37 @@ def test_delete_unknown_upload_unknown_group(foss: Fossology):
assert f"Deleting upload {upload.id} for group test not authorized" in str(
excinfo.value
)


def test_paginated_list_uploads(foss: Fossology, upload: Upload, test_file_path: str):
if versiontuple(foss.version) < versiontuple("1.1.1"):
# Upload pagination not available yet
return
# Add a second upload
second_upload = foss.upload_file(
foss.rootFolder,
file=test_file_path,
description="Test second upload via fossology-python lib",
access_level=AccessLevel.PUBLIC,
)
time.sleep(3)
uploads, _ = foss.list_uploads(page_size=1, page=1)
assert len(uploads) == 1

uploads, _ = foss.list_uploads(page_size=1, page=2)
assert len(uploads) == 1

uploads, _ = foss.list_uploads(page_size=2, page=1)
assert len(uploads) == 2

uploads, _ = foss.list_uploads(page_size=1, all_pages=True)
num_known_uploads = 0
for up in uploads:
if up.description in (
"Test upload via fossology-python lib",
"Test second upload via fossology-python lib",
):
num_known_uploads += 1
assert num_known_uploads >= 2

foss.delete_upload(second_upload)

0 comments on commit a6f9918

Please sign in to comment.