diff --git a/CHANGELOG.md b/CHANGELOG.md index 660f3c5..d228cb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Zscaler Python SDK Changelog +## 0.10.4 (January,9 2024) + +### Notes + +- Python Versions: **v3.8, v3.9, v3.10, v3.11** + +### Bug Fix: + +* ([#237](https://github.com/zscaler/zscaler-sdk-python/pull/237)) - Fixed pagination parameters on ZIA `cloud_apps` resource. Cloud Apps use the following parameters during pagination: `limit` and `page_number`. + ## 0.10.3 (January,8 2024) ### Notes diff --git a/docsrc/conf.py b/docsrc/conf.py index be99ff0..c98be6a 100644 --- a/docsrc/conf.py +++ b/docsrc/conf.py @@ -28,9 +28,9 @@ html_title = "" # The short X.Y version -version = "0.10.2" +version = "0.10.4" # The full version, including alpha/beta/rc tags -release = "0.10.2" +release = "0.10.4" # -- General configuration --------------------------------------------------- diff --git a/docsrc/zs/guides/release_notes.rst b/docsrc/zs/guides/release_notes.rst index 9f46dcc..7dac681 100644 --- a/docsrc/zs/guides/release_notes.rst +++ b/docsrc/zs/guides/release_notes.rst @@ -6,6 +6,16 @@ Release Notes Zscaler Python SDK Changelog ---------------------------- +## 0.10.4 (January,9 2024) + +### Notes + +- Python Versions: **v3.8, v3.9, v3.10, v3.11** + +### Bug Fix: + +* Fixed pagination parameters on ZIA `cloud_apps` resource. Cloud Apps use the following parameters during pagination: `limit` and `page_number`. (`237 `_). + ## 0.10.3 (January,8 2024) ### Notes diff --git a/pyproject.toml b/pyproject.toml index 69f8400..b50b7ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "zscaler-sdk-python" -version = "0.10.3" +version = "0.10.4" description = "Official Python SDK for the Zscaler Products (Beta)" authors = ["Zscaler, Inc. "] license = "MIT" diff --git a/zscaler/__init__.py b/zscaler/__init__.py index e83db75..6961bf9 100644 --- a/zscaler/__init__.py +++ b/zscaler/__init__.py @@ -29,7 +29,7 @@ __contributors__ = [ "William Guilherme", ] -__version__ = "0.10.3" +__version__ = "0.10.4" from zscaler.zdx import ZDXClientHelper # noqa from zscaler.zia import ZIAClientHelper # noqa diff --git a/zscaler/zia/__init__.py b/zscaler/zia/__init__.py index e24f01b..1924907 100644 --- a/zscaler/zia/__init__.py +++ b/zscaler/zia/__init__.py @@ -405,11 +405,15 @@ def get_paginated_data( self, path=None, expected_status_code=200, + limit=None, # OPTIONAL PARAMETER page=None, + page_number=None, # Snake_case for user input pagesize=None, search=None, + filter=None, max_items=None, # Maximum number of items to retrieve across pages max_pages=None, # Maximum number of pages to retrieve + app_segment=None, type=None, # Specify type of VPN credentials (CN, IP, UFQDN, XAUTH) include_only_without_location=None, # Include only VPN credentials not associated with any location location_id=None, # VPN credentials for a specific location ID @@ -421,22 +425,25 @@ def get_paginated_data( Args: path (str): The API endpoint path to send requests to. - expected_status_code (int): The expected HTTP status code for a successful request. Defaults to 200. - page (int): Specific page number to fetch. Defaults to 1 if not provided. - pagesize (int): Number of items per page, default is 100, with a maximum of 10000. - search (str): Search query to filter the results. - max_items (int): Maximum number of items to retrieve. - max_pages (int): Maximum number of pages to fetch. - type (str, optional): Type of VPN credentials (e.g., CN, IP, UFQDN, XAUTH). - include_only_without_location (bool, optional): Filter to include only VPN credentials not associated with a location. - location_id (int, optional): Retrieve VPN credentials for the specified location ID. - managed_by (int, optional): Retrieve VPN credentials managed by the specified partner. - prefix (int, optional): Retrieve VPN credentials managed by a given partner. + expected_status_code (int): The expected HTTP status code for a successful request. + filter (int, optional): Retrieves the list of PAC files without the PAC file content in the response + limit (int, optional): Number of items to retrieve (sent as 'limit' in the URL construct). + page (int, optional): Specific page number to fetch (sent as 'page'). + page_number (int, optional): Alternative parameter for specifying a page number (sent as 'pageNumber'). + pagesize (int, optional): Number of items per page (default=1000, sent as 'pageSize'). + search (str, optional): Search query to filter results. + max_items (int, optional): Maximum number of items to retrieve. + max_pages (int, optional): Maximum number of pages to fetch. + type (str, optional): Type of VPN credentials (e.g., CN, IP). + include_only_without_location (bool, optional): Filter only items w/o location. + location_id (int, optional): ID of a specific location. + managed_by (int, optional): Items managed by a given partner. + prefix (int, optional): Items with partner prefix. Returns: tuple: A tuple containing: - - BoxList: A list of fetched items wrapped in a BoxList for easy access. - - str: An error message if any occurred during the data fetching process. + - BoxList: A list of fetched items (BoxList). + - str: An error message if any occurred, else None. """ logger = logging.getLogger(__name__) @@ -446,10 +453,21 @@ def get_paginated_data( } # Initialize parameters - params = { - "page": page if page is not None else 1, # Start at page 1 if not specified - "pageSize": pagesize if pagesize is not None else 100, # Allow any user-defined pagesize - } + params = {} + + # Handle 'page' and 'page_number' parameters + if page is not None: + params["page"] = page + elif page_number is not None: + params["pageNumber"] = page_number # Use camelCase in the URL construct + else: + params["page"] = 1 # Default to page 1 if neither is provided + + # Handle 'limit' and 'pagesize' logic + if limit is not None: + params["limit"] = limit # Explicitly use 'limit' in the URL construct + else: + params["pageSize"] = pagesize if pagesize is not None else 1000 # Default to 'pageSize' # Add optional filters to the params if provided if search: @@ -458,33 +476,37 @@ def get_paginated_data( params["type"] = type if include_only_without_location is not None: params["includeOnlyWithoutLocation"] = include_only_without_location + if app_segment: + params["appSegment"] = app_segment if location_id: params["locationId"] = location_id if managed_by: params["managedBy"] = managed_by if prefix: params["prefix"] = prefix + if filter: + params["filter"] = filter - # If the user specifies a page, fetch only that page - if page is not None: + # If the user specifies a specific page, fetch only that page. + if page is not None or page_number is not None: response = self.send("GET", path=path, params=params) if response.status_code != expected_status_code: error_msg = ERROR_MESSAGES["UNEXPECTED_STATUS"].format( - status_code=response.status_code, page=params["page"] + status_code=response.status_code, page=params.get("page", params.get("pageNumber")) ) logger.error(error_msg) return BoxList([]), error_msg response_data = response.json() if not isinstance(response_data, list): - error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params["page"]) - logger.warn(error_msg) + error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params.get("page", params.get("pageNumber"))) + logger.warning(error_msg) return BoxList([]), error_msg data = convert_keys_to_snake(response_data) return BoxList(data), None - # If no page is specified, iterate through pages to fetch all items + # If no specific page is specified, iterate through pages to fetch all items ret_data = [] total_collected = 0 try: @@ -499,7 +521,7 @@ def get_paginated_data( # Check for unexpected status code if response.status_code != expected_status_code: error_msg = ERROR_MESSAGES["UNEXPECTED_STATUS"].format( - status_code=response.status_code, page=params["page"] + status_code=response.status_code, page=params.get("page", params.get("pageNumber")) ) logger.error(error_msg) return BoxList([]), error_msg @@ -507,177 +529,46 @@ def get_paginated_data( # Parse the response as a flat list of items response_data = response.json() if not isinstance(response_data, list): - error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params["page"]) - logger.warn(error_msg) + error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params.get("page", params.get("pageNumber"))) + logger.warning(error_msg) return BoxList([]), error_msg data = convert_keys_to_snake(response_data) # Limit data collection based on max_items if max_items is not None: - data = data[: max_items - total_collected] # Limit items on the current page + data = data[: max_items - total_collected] ret_data.extend(data) total_collected += len(data) # Check if we've reached max_items or max_pages limits if (max_items is not None and total_collected >= max_items) or ( - max_pages is not None and params["page"] >= max_pages + max_pages is not None and params.get("page", params.get("pageNumber")) >= max_pages ): break - # Stop if fewer items than pageSize are returned - if len(data) < params["pageSize"]: + # Stop if fewer items than pageSize or limit are returned + if "pageSize" in params and len(data) < params["pageSize"]: + break + if "limit" in params and len(data) < params["limit"]: break # Move to the next page - params["page"] += 1 + if "page" in params: + params["page"] += 1 + elif "pageNumber" in params: + params["pageNumber"] += 1 finally: - time.sleep(2) # Ensure a delay between requests regardless of outcome + time.sleep(2) # Ensure a delay between requests, regardless of outcome if not ret_data: - error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params["page"]) - logger.warn(error_msg) + error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params.get("page", params.get("pageNumber"))) + logger.warning(error_msg) return BoxList([]), error_msg return BoxList(ret_data), None - - # def get_paginated_data( - # self, - # path=None, - # expected_status_code=200, - # page=None, - # pagesize=None, - # search=None, - # max_items=None, # Maximum number of items to retrieve across pages - # max_pages=None, # Maximum number of pages to retrieve - # type=None, # Specify type of VPN credentials (CN, IP, UFQDN, XAUTH) - # include_only_without_location=None, # Include only VPN credentials not associated with any location - # location_id=None, # VPN credentials for a specific location ID - # managed_by=None, # VPN credentials managed by a given partner - # prefix=None, # VPN credentials managed by a given partner - # ): - # """ - # Fetches paginated data from the API based on specified parameters and handles pagination. - - # Args: - # path (str): The API endpoint path to send requests to. - # expected_status_code (int): The expected HTTP status code for a successful request. Defaults to 200. - # page (int): Specific page number to fetch. Defaults to 1 if not provided. - # pagesize (int): Number of items per page, default is 100, with a maximum of 1000. - # search (str): Search query to filter the results. - # max_items (int): Maximum number of items to retrieve. - # max_pages (int): Maximum number of pages to fetch. - # type (str, optional): Type of VPN credentials (e.g., CN, IP, UFQDN, XAUTH). - # include_only_without_location (bool, optional): Filter to include only VPN credentials not associated with a location. - # location_id (int, optional): Retrieve VPN credentials for the specified location ID. - # managed_by (int, optional): Retrieve VPN credentials managed by the specified partner. - # prefix (int, optional): Retrieve VPN credentials managed by the specified partner. - - # Returns: - # tuple: A tuple containing: - # - BoxList: A list of fetched items wrapped in a BoxList for easy access. - # - str: An error message if any occurred during the data fetching process. - # """ - # logger = logging.getLogger(__name__) - - # ERROR_MESSAGES = { - # "UNEXPECTED_STATUS": "Unexpected status code {status_code} received for page {page}.", - # "EMPTY_RESULTS": "No results found for page {page}.", - # } - - # # Initialize pagination parameters - # # params = { - # # "page": page if page is not None else 1, # Start at page 1 if not specified - # # "pagesize": min(pagesize if pagesize is not None else 100, max_page_size), # Apply max_page_size limit - # # } - - # params = { - # "page": page if page is not None else 1, # Start at page 1 if not specified - # "pagesize": max(100, min(pagesize or 100, 10000)), # Ensure pagesize is within API limits - # } - - # # Add optional filters to the params if provided - # if search: - # params["search"] = search - # if type: - # params["type"] = type - # if include_only_without_location is not None: - # params["includeOnlyWithoutLocation"] = include_only_without_location - # if location_id: - # params["locationId"] = location_id - # if managed_by: - # params["managedBy"] = managed_by - # if prefix: - # params["prefix"] = prefix - - # ret_data = [] - # total_collected = 0 - - # try: - # while True: - # # Apply rate-limiting if necessary - # should_wait, delay = self.rate_limiter.wait("GET") - # if should_wait: - # time.sleep(delay) - - # # Send the request to the API - # response = self.send("GET", path=path, params=params) - - # # Check for unexpected status code - # if response.status_code != expected_status_code: - # error_msg = ERROR_MESSAGES["UNEXPECTED_STATUS"].format( - # status_code=response.status_code, page=params["page"] - # ) - # logger.error(error_msg) - # return BoxList([]), error_msg - - # # Parse the response as a flat list of items - # response_data = response.json() - # if not isinstance(response_data, list): - # error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params["page"]) - # logger.warn(error_msg) - # return BoxList([]), error_msg - - # data = convert_keys_to_snake(response_data) - - # # If searching for a specific item, stop if we find a match - # if search: - # for item in data: - # if item.get("name") == search: - # ret_data.append(item) - # return BoxList(ret_data), None - - # # Limit data collection based on max_items - # if max_items is not None: - # data = data[: max_items - total_collected] # Limit items on the current page - # ret_data.extend(data) - # total_collected += len(data) - - # # Check if we've reached max_items or max_pages limits - # if (max_items is not None and total_collected >= max_items) or ( - # max_pages is not None and params["page"] >= max_pages - # ): - # break - - # # Stop if we've processed all available pages (i.e., less than requested page size) - # if len(data) < params["pagesize"]: - # break - - # # Move to the next page - # params["page"] += 1 - - # finally: - # time.sleep(2) # Ensure a delay between requests regardless of outcome - - # if not ret_data: - # error_msg = ERROR_MESSAGES["EMPTY_RESULTS"].format(page=params["page"]) - # logger.warn(error_msg) - # return BoxList([]), error_msg - - # return BoxList(ret_data), None - @property def admin_and_role_management(self): """ diff --git a/zscaler/zia/client.py b/zscaler/zia/client.py index e2313ce..4ab749a 100644 --- a/zscaler/zia/client.py +++ b/zscaler/zia/client.py @@ -34,7 +34,7 @@ def get_paginated_data( self, path: str = None, data_key_name: str = None, - data_per_page: int = 500, + data_per_page: int = 1000, expected_status_code=200, ): """ diff --git a/zscaler/zia/cloud_apps.py b/zscaler/zia/cloud_apps.py index c96cc7a..688e0c8 100644 --- a/zscaler/zia/cloud_apps.py +++ b/zscaler/zia/cloud_apps.py @@ -367,10 +367,16 @@ def export_shadow_it_csv(self, application: str, entity: str, duration: str = "L return self.rest.post(f"shadowIT/applications/{entity}/exportCsv", json=payload).text - def list_apps(self): + def list_apps(self, **kwargs): """ List all predefined and custom cloud applications by name and id. + Keyword Args: + **limit (int, optional): + Specifies the maximum number of cloud applications that must be retrieved in a page. The maximum size is 1000 + **page_number (int, optional): + Specifies the page number. The numbering starts at 0. + Returns: :obj:`BoxList` of :obj:`Box`: A list of cloud applications. @@ -382,7 +388,8 @@ def list_apps(self): print(app.name) """ - return self.rest.get("cloudApplications/lite") + list, _ = self.rest.get_paginated_data(path="/cloudApplications/lite", **kwargs) + return list def list_custom_tags(self): """ diff --git a/zscaler/zia/isolation_profile.py b/zscaler/zia/isolation_profile.py index 7d772f0..f946244 100644 --- a/zscaler/zia/isolation_profile.py +++ b/zscaler/zia/isolation_profile.py @@ -17,7 +17,6 @@ from box import BoxList -from zscaler.utils import snake_to_camel from zscaler.zia.client import ZIAClient @@ -25,7 +24,7 @@ class IsolationProfileAPI: def __init__(self, client: ZIAClient): self.rest = client - def list_isolation_profiles(self, **kwargs) -> BoxList: + def list_isolation_profiles(self) -> BoxList: """ Returns a list of all profiles in the Isolation Profile field for URL Filtering rules and Cloud App Control rules. diff --git a/zscaler/zia/labels.py b/zscaler/zia/labels.py index 47166e1..2ee75c6 100644 --- a/zscaler/zia/labels.py +++ b/zscaler/zia/labels.py @@ -31,11 +31,9 @@ def list_labels(self, **kwargs) -> BoxList: Returns the list of ZIA Rule Labels. Keyword Args: - **max_items (int, optional): - The maximum number of items to request before stopping iteration. - **max_pages (int, optional): - The maximum number of pages to request before stopping iteration. - **page_size (int, optional): + **page (int, optional): + Specifies the page offset. + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 1000. Returns: diff --git a/zscaler/zia/locations.py b/zscaler/zia/locations.py index 8cd0484..7a83cc0 100644 --- a/zscaler/zia/locations.py +++ b/zscaler/zia/locations.py @@ -35,11 +35,9 @@ def list_locations(self, **kwargs) -> BoxList: Filter based on whether the Enforce Authentication setting is enabled or disabled for a location. **bw_enforced (bool, optional): Filter based on whether Bandwith Control is being enforced for a location. - **max_items (int, optional): - The maximum number of items to request before stopping iteration. - **max_pages (int, optional): - The maximum number of pages to request before stopping iteration. - **page_size (int, optional): + **page (int, optional): + Specifies the page offset. + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 1000. **search (str, optional): The search string used to partially match against a location's name and port attributes. @@ -60,7 +58,7 @@ def list_locations(self, **kwargs) -> BoxList: List locations, returning 200 items per page for a maximum of 2 pages: - >>> for location in zia.locations.list_locations(page_size=200, max_pages=2): + >>> for location in zia.locations.list_locations(pagesize=200, max_pages=2): ... print(location) """ @@ -243,7 +241,7 @@ def list_sub_locations(self, location_id: str, **kwargs) -> BoxList: The maximum number of items to request before stopping iteration. **max_pages (int, optional): The maximum number of pages to request before stopping iteration. - **page_size (int, optional): + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 1000. **search (str, optional): The search string used to partially match against a location's name and port attributes. @@ -274,7 +272,7 @@ def list_locations_lite(self, **kwargs) -> BoxList: The maximum number of items to request before stopping iteration. **max_pages (int, optional): The maximum number of pages to request before stopping iteration. - **page_size (int, optional): + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 1000. **search (str, optional): The search string used to partially match against a location's name and port attributes. @@ -295,7 +293,7 @@ def list_locations_lite(self, **kwargs) -> BoxList: List locations, returning 200 items per page for a maximum of 2 pages: - >>> for location in zia.locations.list_locations_lite(page_size=200, max_pages=2): + >>> for location in zia.locations.list_locations_lite(pagesize=200, max_pages=2): ... print(location) """ @@ -518,7 +516,7 @@ def get_location_group_by_id(self, group_id: int) -> Box: """ return self.rest.get(f"locations/groups/{group_id}") - def list_location_groups_lite(self, page: int = 1, page_size: int = 100) -> BoxList: + def list_location_groups_lite(self, **kwargs) -> BoxList: """ Returns a list of location groups (lite version) by their ID where only name and ID is returned in ZIA. @@ -535,8 +533,8 @@ def list_location_groups_lite(self, page: int = 1, page_size: int = 100) -> BoxL Get a list of all configured location groups: >>> location = zia.locations.list_location_groups_lite() """ - params = {"page": page, "pageSize": page_size} - return self.rest.get("locations/groups/lite", params=params) + list, _ = self.rest.get_paginated_data(path="/locations/groups/lite", **kwargs) + return list def get_location_group_lite_by_id(self, group_id: int) -> Box: """ @@ -633,12 +631,12 @@ def list_cities_by_name(self, **kwargs) -> BoxList: country, postal code, etc. Args: - **kwargs: Optional keyword arguments including 'prefix', 'page', and 'page_size'. + **kwargs: Optional keyword arguments including 'prefix', 'page', and 'pagesize'. Keyword Args: prefix (str): The prefix string to search for cities. page (int): The page number of the results. - page_size (int): The number of results per page. + pagesize (int): The number of results per page. Returns: :obj:`BoxList`: The list of cities (along with their geographical data) that match the prefix search. diff --git a/zscaler/zia/pac_files.py b/zscaler/zia/pac_files.py index e49ebbe..80d5624 100644 --- a/zscaler/zia/pac_files.py +++ b/zscaler/zia/pac_files.py @@ -31,12 +31,10 @@ def list_pac_files(self, **kwargs) -> BoxList: Returns the list of ZIA Pac Files. Keyword Args: - **max_items (int, optional): - The maximum number of items to request before stopping iteration. - **max_pages (int, optional): - The maximum number of pages to request before stopping iteration. - **page_size (int, optional): - Specifies the page size. The default size is 100, but the maximum size is 1000. + **filter (int, optional): + Retrieves the list of PAC files without the PAC file content in the response + **search (str, optional): + Returns PAC files with the names that match the search criteria Returns: :obj:`BoxList`: The list of PAC Files configured in ZIA. diff --git a/zscaler/zia/traffic.py b/zscaler/zia/traffic.py index 12a2500..fccd21a 100644 --- a/zscaler/zia/traffic.py +++ b/zscaler/zia/traffic.py @@ -58,7 +58,8 @@ def list_gre_tunnels(self, **kwargs) -> BoxList: ... print(tunnel) """ - return BoxList(Iterator(self.rest, "greTunnels", **kwargs)) + list, _ = self.rest.get_paginated_data(path="/greTunnels", **kwargs) + return list def get_gre_tunnel(self, tunnel_id: str) -> Box: """ @@ -230,11 +231,9 @@ def list_vips(self, **kwargs) -> BoxList: **include (str, optional): Include all, private, or public VIPs in the list. Available choices are `all`, `private`, `public`. Defaults to `public`. - **max_items (int, optional): - The maximum number of items to request before stopping iteration. - **max_pages (int, optional): - The maximum number of pages to request before stopping iteration. - **page_size (int, optional): + **page (int, optional): + Specifies the page offset. + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 1000. **region (str, optional): Filter based on region. @@ -259,7 +258,8 @@ def list_vips(self, **kwargs) -> BoxList: ... print(vip) """ - return BoxList(Iterator(self.rest, "vips", **kwargs)) + list, _ = self.rest.get_paginated_data(path="/vips", **kwargs) + return list def add_gre_tunnel( self, diff --git a/zscaler/zia/users.py b/zscaler/zia/users.py index 5a0a726..c1d61e1 100644 --- a/zscaler/zia/users.py +++ b/zscaler/zia/users.py @@ -163,13 +163,11 @@ def list_users(self, **kwargs) -> BoxList: Filters by department name. This is a `starts with` match. **group (str, optional): Filters by group name. This is a `starts with` match. - **max_items (int, optional): - The maximum number of items to request before stopping iteration. - **max_pages (int, optional): - The maximum number of pages to request before stopping iteration. **name (str, optional): Filters by user name. This is a `partial` match. - **page_size (int, optional): + **page (int, optional): + Specifies the page offset. + **pagesize (int, optional): Specifies the page size. The default size is 100, but the maximum size is 10000. **sort_by (str): The field name to sort by, supported values: id, name, creationTime or modifiedTime (default to name) diff --git a/zscaler/zia/workload_groups.py b/zscaler/zia/workload_groups.py index 334d7d3..f09627e 100644 --- a/zscaler/zia/workload_groups.py +++ b/zscaler/zia/workload_groups.py @@ -36,10 +36,8 @@ def list_groups(self, **kwargs) -> BoxList: ... pprint(workloads) """ - response = self.rest.get("/workloadGroups") - if isinstance(response, Response): - return None - return response + list, _ = self.rest.get_paginated_data(path="/workloadGroups", **kwargs) + return list # Search Workload Group By Name def get_group_by_name(self, name): diff --git a/zscaler/zia/zpa_gateway.py b/zscaler/zia/zpa_gateway.py index 863c4f3..47db9f3 100644 --- a/zscaler/zia/zpa_gateway.py +++ b/zscaler/zia/zpa_gateway.py @@ -29,8 +29,11 @@ def list_gateways(self, **kwargs) -> BoxList: """ Returns a list of all ZPA Gateways. - Returns: - :obj:`BoxList`: The list of all ZPA Gateways Items + Keyword Args: + **app_segment (list, optional): + Filters the list by Application Segment + **search (str, optional): + The search string used to match against a ZPA gateway name or an associated Server Group name Returns: :obj:`BoxList`: The list of all ZPA Gateways Items @@ -42,7 +45,8 @@ def list_gateways(self, **kwargs) -> BoxList: ... for item in results: ... print(item) """ - return self.rest.get("zpaGateways") + list, _ = self.rest.get_paginated_data(path="/zpaGateways", **kwargs) + return list def get_gateway(self, gateway_id: str) -> Box: """