From 73a4a28f712cdb55081ce0757e9f83f48dd5dbfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mika=20H=C3=A4nninen?= Date: Thu, 14 Mar 2024 21:01:20 +0200 Subject: [PATCH] Improve `RPA.Cloud.Google` Python usage (#1157) --- docs/source/releasenotes.rst | 7 + packages/google/pyproject.toml | 2 +- .../google/src/RPA/Cloud/Google/__init__.py | 67 +++++-- .../RPA/Cloud/Google/keywords/apps_script.py | 17 +- .../src/RPA/Cloud/Google/keywords/base.py | 7 +- .../src/RPA/Cloud/Google/keywords/context.py | 55 ++---- .../RPA/Cloud/Google/keywords/document_ai.py | 26 +-- .../src/RPA/Cloud/Google/keywords/drive.py | 92 +++++---- .../src/RPA/Cloud/Google/keywords/gmail.py | 34 ++-- .../Cloud/Google/keywords/natural_language.py | 18 +- .../src/RPA/Cloud/Google/keywords/sheets.py | 187 ++++++++++++++++-- .../Cloud/Google/keywords/speech_to_text.py | 19 +- .../src/RPA/Cloud/Google/keywords/storage.py | 25 ++- .../Cloud/Google/keywords/text_to_speech.py | 21 +- .../RPA/Cloud/Google/keywords/translation.py | 14 +- .../Google/keywords/video_intelligence.py | 16 +- .../src/RPA/Cloud/Google/keywords/vision.py | 35 ++-- 17 files changed, 404 insertions(+), 238 deletions(-) diff --git a/docs/source/releasenotes.rst b/docs/source/releasenotes.rst index 02dedf2bd2..4db5435108 100644 --- a/docs/source/releasenotes.rst +++ b/docs/source/releasenotes.rst @@ -30,6 +30,13 @@ Latest versions - Add keyword ``Merge range`` for merging cells in a range. - Add keyword ``Unmerge range`` for unmerging cells in a range. +- Library **RPA.Cloud.Google** (:pr:`1157``, `rpaframework-google`` **9.0.0**): + + - Fix problem with method intellisense in Python development environments. + - Add keyword ``Detect Tables`` for detecting table-like structures in spreadsheet's sheets. + - Add keyword ``Get Sheet Formulas`` for getting formulas from a sheet. + - Add keyword ``To A1 notation`` for converting column number to A1 notation. + 28.3.0 - 22 Feb 2024 -------------------- diff --git a/packages/google/pyproject.toml b/packages/google/pyproject.toml index cd25cc3e24..646fe513ef 100644 --- a/packages/google/pyproject.toml +++ b/packages/google/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "rpaframework-google" -version = "8.2.0" +version = "9.0.0" description = "Google library for RPA Framework" authors = ["RPA Framework "] license = "Apache-2.0" diff --git a/packages/google/src/RPA/Cloud/Google/__init__.py b/packages/google/src/RPA/Cloud/Google/__init__.py index 8d60076a65..08461b0798 100644 --- a/packages/google/src/RPA/Cloud/Google/__init__.py +++ b/packages/google/src/RPA/Cloud/Google/__init__.py @@ -1,9 +1,9 @@ import importlib import logging import os -from robotlibcore import DynamicCore +from .keywords.context import LibraryContext from .keywords import ( AppsScriptKeywords, BaseKeywords, @@ -30,7 +30,21 @@ def import_vault(): return None -class Google(DynamicCore): +class Google( + AppsScriptKeywords, + BaseKeywords, + DocumentAIKeywords, + DriveKeywords, + GmailKeywords, + NaturalLanguageKeywords, + SheetsKeywords, + SpeechToTextKeywords, + StorageKeywords, + TextToSpeechKeywords, + TranslationKeywords, + VideoIntelligenceKeywords, + VisionKeywords, +): # pylint: disable=too-many-ancestors """`Google` is a library for operating with Google API endpoints. **Installation** @@ -124,21 +138,34 @@ def __init__( if self.service_account_file is None: self.service_account_file = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") self.secrets_library = import_vault() - - # Register keyword libraries to LibCore - libraries = [ - AppsScriptKeywords(self), - BaseKeywords(self), - DocumentAIKeywords(self), - DriveKeywords(self), - GmailKeywords(self), - NaturalLanguageKeywords(self), - SheetsKeywords(self), - SpeechToTextKeywords(self), - StorageKeywords(self), - TextToSpeechKeywords(self), - TranslationKeywords(self), - VideoIntelligenceKeywords(self), - VisionKeywords(self), - ] - super().__init__(libraries) + ctx = LibraryContext(self) + AppsScriptKeywords.__init__(self, ctx) + BaseKeywords.__init__(self, ctx) + DocumentAIKeywords.__init__(self, ctx) + DriveKeywords.__init__(self, ctx) + GmailKeywords.__init__(self, ctx) + NaturalLanguageKeywords.__init__(self, ctx) + SheetsKeywords.__init__(self, ctx) + SpeechToTextKeywords.__init__(self, ctx) + StorageKeywords.__init__(self, ctx) + TextToSpeechKeywords.__init__(self, ctx) + TranslationKeywords.__init__(self, ctx) + VideoIntelligenceKeywords.__init__(self, ctx) + VisionKeywords.__init__(self, ctx) + + +__all__ = [ + "AppsScriptKeywords", + "BaseKeywords", + "DocumentAIKeywords", + "DriveKeywords", + "GmailKeywords", + "NaturalLanguageKeywords", + "SheetsKeywords", + "SpeechToTextKeywords", + "StorageKeywords", + "TextToSpeechKeywords", + "TranslationKeywords", + "VideoIntelligenceKeywords", + "VisionKeywords", +] diff --git a/packages/google/src/RPA/Cloud/Google/keywords/apps_script.py b/packages/google/src/RPA/Cloud/Google/keywords/apps_script.py index b7142ca293..ebf8eb5af6 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/apps_script.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/apps_script.py @@ -1,12 +1,9 @@ from typing import Optional -from . import ( - LibraryContext, - keyword, -) +from . import keyword -class AppsScriptKeywords(LibraryContext): +class AppsScriptKeywords: """Class for Google Apps Script API For more information about Google Apps Script API link_. @@ -15,8 +12,8 @@ class AppsScriptKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.script_service = None @keyword(tags=["init", "apps script"]) def init_apps_script( @@ -38,7 +35,7 @@ def init_apps_script( apps_scopes = ["script.projects", "drive.scripts", "script.external_request"] if scopes: apps_scopes += scopes - self.service = self.init_service( + self.script_service = self.ctx.init_service( service_name="script", api_version="v1", scopes=apps_scopes, @@ -47,7 +44,7 @@ def init_apps_script( use_robocorp_vault=use_robocorp_vault, token_file=token_file, ) - return self.service + return self.script_service @keyword(tags=["apps script"]) def run_script( @@ -74,7 +71,7 @@ def run_script( if parameters: request["parameters"] = [parameters] response = ( - self.service.scripts() + self.script_service.scripts() .run( body=request, scriptId=script_id, diff --git a/packages/google/src/RPA/Cloud/Google/keywords/base.py b/packages/google/src/RPA/Cloud/Google/keywords/base.py index 5004cecceb..4af60704b4 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/base.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/base.py @@ -1,9 +1,12 @@ -from . import LibraryContext, keyword +from . import keyword -class BaseKeywords(LibraryContext): +class BaseKeywords: """Base keywords for the Google library""" + def __init__(self, ctx): + self.ctx = ctx + @keyword def set_robocorp_vault( self, diff --git a/packages/google/src/RPA/Cloud/Google/keywords/context.py b/packages/google/src/RPA/Cloud/Google/keywords/context.py index 3f6ce602ca..4061c2945a 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/context.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/context.py @@ -33,30 +33,7 @@ class LibraryContext: def __init__(self, ctx): self.ctx = ctx - - @property - def logger(self): - return self.ctx.logger - - @property - def robocorp_vault_name(self): - return self.ctx.robocorp_vault_name - - @property - def robocorp_vault_secret_key(self): - return self.ctx.robocorp_vault_secret_key - - @property - def use_robocorp_vault(self): - return self.ctx.use_robocorp_vault - - @property - def service_account_file(self): - return self.ctx.service_account_file - - @property - def cloud_auth_type(self): - return self.ctx.cloud_auth_type + self.logger = ctx.logger def get_secret_from_robocorp_vault(self, secret_type="serviceaccount"): if self.ctx.secrets_library is None: @@ -105,7 +82,7 @@ def init_service( """ service = None credentials = None - self.logger.debug("Init service with scopes: %s", scopes) + self.ctx.logger.debug("Init service with scopes: %s", scopes) scopes = [f"https://www.googleapis.com/auth/{scope}" for scope in scopes] if use_robocorp_vault is not None: use_cloud = bool(use_robocorp_vault) @@ -122,17 +99,17 @@ def init_service( save_token, ) elif service_account_file: - self.logger.info("Authenticating with service account file") + self.ctx.logger.info("Authenticating with service account file") credentials = oauth_service_account.Credentials.from_service_account_file( service_account_file, scopes=scopes ) elif token_file: - self.logger.info("Authenticating with oauth token file") + self.ctx.logger.info("Authenticating with oauth token file") credentials = self.get_credentials_with_oauth_token( use_cloud, token_file, credentials_file, scopes, save_token ) elif self.ctx.service_account_file: - self.logger.info("Authenticating with service account file") + self.ctx.logger.info("Authenticating with service account file") credentials = oauth_service_account.Credentials.from_service_account_file( self.ctx.service_account_file, scopes=scopes ) @@ -172,24 +149,24 @@ def init_service_with_object( client_object, cloud_auth_type, service_account_file, **kwargs ) elif service_account_file: - self.logger.info("Authenticating with service account file") + self.ctx.logger.info("Authenticating with service account file") service = client_object.from_service_account_json( service_account_file, **kwargs ) elif token_file: - self.logger.info("Authenticating with oauth token file") + self.ctx.logger.info("Authenticating with oauth token file") token_file_location = Path(token_file).absolute() if os.path.exists(token_file_location): with open(token_file_location, "rb") as token: credentials = pickle.loads(token) service = client_object(credentials=credentials, **kwargs) elif self.ctx.service_account_file: - self.logger.info("Authenticating with service account file") + self.ctx.logger.info("Authenticating with service account file") service = client_object.from_service_account_json( self.ctx.service_account_file, **kwargs ) else: - self.logger.info("Authenticating with default client object") + self.ctx.logger.info("Authenticating with default client object") service = client_object(**kwargs) if service is None: @@ -244,7 +221,7 @@ def get_credentials_with_oauth_token( pickle.dumps(credentials) ).decode("utf-8") self.ctx.secrets_library().set_secret(secrets) - self.logger.debug("Credentials refreshed") + self.ctx.logger.debug("Credentials refreshed") if not credentials: raise GoogleOAuthAuthenticationError( "Could not get Google OAuth credentials" @@ -262,7 +239,7 @@ def get_credentials_from_robocorp_vault( ): credentials = None if cloud_auth_type == "serviceaccount": - self.logger.info( + self.ctx.logger.info( "Authenticating with service account file from Robocorp Vault" ) service_account_file = self.get_secret_from_robocorp_vault("serviceaccount") @@ -270,7 +247,9 @@ def get_credentials_from_robocorp_vault( service_account_file, scopes=scopes ) else: - self.logger.info("Authenticating with oauth token file from Robocorp Vault") + self.ctx.logger.info( + "Authenticating with oauth token file from Robocorp Vault" + ) credentials = self.get_credentials_with_oauth_token( True, token_file, @@ -286,7 +265,7 @@ def get_service_from_robocorp_vault( service = None if cloud_auth_type == "serviceaccount": try: - self.logger.info( + self.ctx.logger.info( "Authenticating with service account file from Robocorp Vault" ) service_account_file = self.get_secret_from_robocorp_vault( @@ -299,7 +278,9 @@ def get_service_from_robocorp_vault( if service_account_file: os.remove(service_account_file) else: - self.logger.info("Authenticating with oauth token file from Robocorp Vault") + self.ctx.logger.info( + "Authenticating with oauth token file from Robocorp Vault" + ) token = self.get_secret_from_robocorp_vault("token") credentials = pickle.loads(base64.b64decode(token)) service = client_object(credentials=credentials, **kwargs) diff --git a/packages/google/src/RPA/Cloud/Google/keywords/document_ai.py b/packages/google/src/RPA/Cloud/Google/keywords/document_ai.py index 8f137365be..60ca3194e5 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/document_ai.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/document_ai.py @@ -5,10 +5,10 @@ from google.api_core.client_options import ClientOptions from google.cloud import documentai_v1 as documentai -from . import LibraryContext, keyword +from . import keyword -class DocumentAIKeywords(LibraryContext): +class DocumentAIKeywords: """Keywords for Google Cloud Document AI service. Added on **rpaframework-google** version: 6.1.1 @@ -25,8 +25,8 @@ class DocumentAIKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.ai_service = None @keyword(name="Init Document AI", tags=["init", "document ai"]) def init_document_ai( @@ -73,15 +73,15 @@ def init_document_ai( api_endpoint=f"{region.lower()}-documentai.googleapis.com" ) kwargs["client_options"] = opts - self.logger.info(f"Using Document AI from '{region.upper()}' region") - self.service = self.init_service_with_object( + self.ctx.logger.info(f"Using Document AI from '{region.upper()}' region") + self.ai_service = self.ctx.init_service_with_object( documentai.DocumentProcessorServiceClient, service_account, use_robocorp_vault, token_file, **kwargs, ) - return self.service + return self.ai_service @keyword(tags=["document ai"]) def process_document( @@ -141,21 +141,23 @@ def process_document( for lang in languages: print(lang) """ # noqa: E501 - name = self.service.processor_path(project_id, region, processor_id) + name = self.ai_service.processor_path(project_id, region, processor_id) # Read the file into memory with open(file_path, "rb") as binary: binary_content = binary.read() mime = mime_type or mimetypes.guess_type(file_path)[0] - self.logger.info(f"Processing document '{file_path}' with mimetype '{mime}'") + self.ctx.logger.info( + f"Processing document '{file_path}' with mimetype '{mime}'" + ) # Load Binary Data into Document AI RawDocument Object raw_document = documentai.RawDocument(content=binary_content, mime_type=mime) # Configure the process request request = documentai.ProcessRequest(name=name, raw_document=raw_document) - result = self.service.process_document(request=request) + result = self.ai_service.process_document(request=request) document = result.document return document @@ -314,10 +316,10 @@ def list_processors(self, project_id: str, region: str) -> List: print(f"Processor type: {p.type_}") print(f"Processor name: {p.display_name}") """ - parent_value = self.service.common_location_path(project_id, region) + parent_value = self.ai_service.common_location_path(project_id, region) # Initialize request argument(s) request = documentai.ListProcessorsRequest( parent=parent_value, ) - processor_list = self.service.list_processors(request=request) + processor_list = self.ai_service.list_processors(request=request) return processor_list diff --git a/packages/google/src/RPA/Cloud/Google/keywords/drive.py b/packages/google/src/RPA/Cloud/Google/keywords/drive.py index 39c88d1ebe..ca0d5be1e1 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/drive.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/drive.py @@ -8,7 +8,7 @@ from apiclient.errors import HttpError from apiclient.http import MediaFileUpload, MediaIoBaseDownload -from . import LibraryContext, keyword, UpdateAction +from . import keyword, UpdateAction from .enums import DriveRole, DriveType, to_drive_role, to_drive_type @@ -16,7 +16,7 @@ class GoogleDriveError(Exception): """Raised with errors in Drive API""" -class DriveKeywords(LibraryContext): +class DriveKeywords: """Class for Google Drive API For more information about Google Drive API link_. @@ -25,8 +25,8 @@ class DriveKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.drive_service = None @keyword(tags=["init", "drive"]) def init_drive( @@ -54,7 +54,7 @@ def init_drive( ] if scopes: drive_scopes += scopes - self.service = self.init_service( + self.drive_service = self.ctx.init_service( service_name="drive", api_version="v3", scopes=drive_scopes, @@ -63,7 +63,7 @@ def init_drive( use_robocorp_vault=use_robocorp_vault, token_file=token_file, ) - return self.service + return self.drive_service @keyword(tags=["drive"]) def upload_drive_file( @@ -107,7 +107,7 @@ def upload_drive_file( raise GoogleDriveError("Filename '%s' does not exist" % filename) query_string = f"name = '{filepath.name}' and '{folder_id}' in parents" - self.logger.debug("Upload query_string: '%s'" % query_string) + self.ctx.logger.debug("Upload query_string: '%s'" % query_string) target_file = self.search_drive_files(query=query_string, recurse=True) guess_mimetype = mimetypes.guess_type(str(filepath.absolute())) file_mimetype = guess_mimetype[0] if guess_mimetype else "*/*" @@ -119,15 +119,15 @@ def upload_drive_file( "parents": [folder_id], "mimeType": file_mimetype, } - self.logger.debug("Upload file_metadata: '%s'" % file_metadata) + self.ctx.logger.debug("Upload file_metadata: '%s'" % file_metadata) if len(target_file) == 1 and overwrite: - self.logger.info("Overwriting file '%s' with new content", filename) + self.ctx.logger.info("Overwriting file '%s' with new content", filename) return self._file_update(target_file, media) elif len(target_file) == 1 and not overwrite: - self.logger.warn("Not uploading new copy of file '%s'", filepath.name) + self.ctx.logger.warn("Not uploading new copy of file '%s'", filepath.name) return target_file[0]["id"] elif len(target_file) > 1: - self.logger.warn( + self.ctx.logger.warn( "Drive already contains '%s' copies of file '%s'. Not uploading again." % (len(target_file), filepath.name) ) @@ -138,7 +138,7 @@ def upload_drive_file( def _file_create(self, file_metadata, media): try: result = ( - self.service.files() + self.drive_service.files() .create( body=file_metadata, media_body=media, @@ -153,7 +153,7 @@ def _file_create(self, file_metadata, media): def _file_update(self, target_file, media): try: result = ( - self.service.files() + self.drive_service.files() .update(fileId=target_file[0]["id"], media_body=media, fields="id") .execute() ) @@ -163,7 +163,7 @@ def _file_update(self, target_file, media): def _download_with_fileobject(self, file_object): try: - request = self.service.files().get_media(fileId=file_object["id"]) + request = self.drive_service.files().get_media(fileId=file_object["id"]) except HttpError as err: raise GoogleDriveError(str(err)) from err fh = BytesIO() @@ -221,12 +221,12 @@ def download_drive_files( current_time = time.time() files_downloaded.append(f["name"]) if limit and len(files_downloaded) >= limit: - self.logger.info( + self.ctx.logger.info( "Drive download limit %s reached. Stopping the download.", limit ) break if timeout and (current_time - start_time) > float(timeout): - self.logger.info( + self.ctx.logger.info( "Drive download timeout %s seconds reached. " "Stopping the download.", timeout, @@ -323,10 +323,10 @@ def _drive_files_update(self, file_id: str, action: UpdateAction): else: # TODO: mypy should handle enum exhaustivity validation raise ValueError(f"Unsupported update action: {action}") - self.logger.debug(body) + self.ctx.logger.debug(body) try: updated_file = ( - self.service.files().update(fileId=file_id, body=body).execute() + self.drive_service.files().update(fileId=file_id, body=body).execute() ) except HttpError as err: raise GoogleDriveError(str(err)) from err @@ -368,10 +368,10 @@ def delete_drive_file( delete_count = 0 for tf in target_files: try: - self.service.files().delete(fileId=tf).execute() + self.drive_service.files().delete(fileId=tf).execute() except HttpError as err: if suppress_errors: - self.logger.warn(str(err)) + self.ctx.logger.warn(str(err)) else: raise GoogleDriveError(str(err)) from err delete_count += 1 @@ -403,7 +403,7 @@ def get_drive_folder_id( if folder is None: try: drive_file = ( - self.service.files().get(fileId="root", fields="id").execute() + self.drive_service.files().get(fileId="root", fields="id").execute() ) except HttpError as err: raise GoogleDriveError(str(err)) from err @@ -416,7 +416,7 @@ def get_drive_folder_id( if len(folders) == 1: drive_file = folders[0] # .get("id", None) else: - self.logger.info( + self.ctx.logger.info( "Found %s directories with name '%s'" % (len(folders), folder) ) if drive_file: @@ -470,11 +470,15 @@ def move_drive_file( "Unable to find target folder: '%s'" % (target if target else "root") ) for tf in target_files: - file = self.service.files().get(fileId=tf["id"], fields="parents").execute() + file = ( + self.drive_service.files() + .get(fileId=tf["id"], fields="parents") + .execute() + ) previous_parents = ",".join(file.get("parents")) try: result_file = ( - self.service.files() + self.drive_service.files() .update( fileId=tf["id"], addParents=target_parent, @@ -542,8 +546,8 @@ def search_drive_files( if page_token: parameters["pageToken"] = page_token try: - self.logger.debug("Searching with parameters: '%s'" % parameters) - response = self.service.files().list(**parameters).execute() + self.ctx.logger.debug("Searching with parameters: '%s'" % parameters) + response = self.drive_service.files().list(**parameters).execute() for file_details in response.get("files", []): file_dict = self._drive_file_details_into_file_dict(file_details) filelist.append(file_dict) @@ -581,13 +585,13 @@ def _drive_file_details_into_file_dict(self, details): kind = details.get("kind") mimetype = details.get("mimeType") is_folder = mimetype == "application/vnd.google-apps.folder" + # fmt: off folder_id = ( file_id if mimetype == "application/vnd.google-apps.folder" - else parents[0] - if parents and len(parents) > 0 - else None + else parents[0] if parents and len(parents) > 0 else None ) + # fmt: on file_link = ( None if mimetype == "application/vnd.google-apps.folder" @@ -642,7 +646,7 @@ def create_drive_directory( folder_id = self.get_drive_folder_id(folder, parent_folder=parent_folder) if folder_id: - self.logger.info( + self.ctx.logger.info( "Folder '%s' already exists. Not creating new one.", folder_id ) return self._folder_response(folder_id) @@ -657,7 +661,9 @@ def create_drive_directory( file_metadata["parents"] = [parent_folder_id] try: added_folder = ( - self.service.files().create(body=file_metadata, fields="id").execute() + self.drive_service.files() + .create(body=file_metadata, fields="id") + .execute() ) return self._folder_response(added_folder["id"]) except HttpError as err: @@ -700,7 +706,7 @@ def export_drive_file( if len(target_files) != 1: raise ValueError("Did not find the Google Drive file to export") try: - request = self.service.files().export( + request = self.drive_service.files().export( fileId=target_files[0], mimeType=mimetype ) except HttpError as err: @@ -812,7 +818,9 @@ def add_drive_share( request_parameters["emailMessage"] = notification_message try: - response = self.service.permissions().create(**request_parameters).execute() + response = ( + self.drive_service.permissions().create(**request_parameters).execute() + ) return {"file_id": target_file[0], "permission_id": response["id"]} except HttpError as err: raise GoogleDriveError(str(err)) from err @@ -861,20 +869,20 @@ def remove_drive_share_by_permission_id( if not target_file: raise GoogleDriveError("Did not find target file") - self.logger.info( + self.ctx.logger.info( "Removing permission id '%s' for file_id '%s'" % (permission_id, target_file[0]) ) response = None try: response = ( - self.service.permissions() + self.drive_service.permissions() .delete(fileId=target_file[0], permissionId=permission_id) .execute() ) except HttpError as err: if suppress_errors: - self.logger.warn(str(err)) + self.ctx.logger.warn(str(err)) else: raise GoogleDriveError(str(err)) from err return response @@ -939,7 +947,7 @@ def remove_drive_share_by_criteria( for tf in target_files: file_permissions_removed = [] if "permissions" in tf and tf["permissions"]: - self.logger.info( + self.ctx.logger.info( "Removing shares from file '%s' id '%s'" % (tf["name"], tf["id"]) ) for p in tf["permissions"]: @@ -976,13 +984,13 @@ def _remove_file_permission( self, drive_file, permission, permissions_removed, suppress_errors ): try: - self.service.permissions().delete( + self.drive_service.permissions().delete( fileId=drive_file["id"], permissionId=permission["id"] ).execute() permissions_removed.append(permission) except HttpError as err: if suppress_errors: - self.logger.warn(str(err)) + self.ctx.logger.warn(str(err)) else: raise GoogleDriveError(str(err)) from err @@ -1016,7 +1024,7 @@ def remove_all_drive_shares( permissions_removed = [] for tf in target_files: if "permissions" in tf and tf["permissions"]: - self.logger.info( + self.ctx.logger.info( "Removing shares from file '%s' id '%s'" % (tf["name"], tf["id"]) ) for p in tf["permissions"]: @@ -1047,12 +1055,12 @@ def get_drive_file_by_id(self, file_id: str, suppress_errors: bool = False) -> D response = None try: raw_response = ( - self.service.files().get(fileId=file_id, fields="*").execute() + self.drive_service.files().get(fileId=file_id, fields="*").execute() ) response = self._drive_file_details_into_file_dict(raw_response) except HttpError as err: if suppress_errors: - self.logger.warn(str(err)) + self.ctx.logger.warn(str(err)) else: raise GoogleDriveError(str(err)) from err return response diff --git a/packages/google/src/RPA/Cloud/Google/keywords/gmail.py b/packages/google/src/RPA/Cloud/Google/keywords/gmail.py index e042cae309..9f40091cd5 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/gmail.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/gmail.py @@ -13,7 +13,7 @@ from googleapiclient import errors -from . import LibraryContext, keyword +from . import keyword def get_size_format(b, factor=1024, suffix="B"): @@ -35,7 +35,7 @@ def clean(text): return "".join(c if c.isalnum() else "_" for c in text) -class GmailKeywords(LibraryContext): +class GmailKeywords: """Class for Google Gmail API **Note:** The Gmail API does not work with _service accounts_ @@ -46,8 +46,8 @@ class GmailKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.gmail_service = None @keyword(tags=["init", "gmail"]) def init_gmail( @@ -69,7 +69,7 @@ def init_gmail( gmail_scopes = ["gmail.send", "gmail.compose", "gmail.modify", "gmail.labels"] if scopes: gmail_scopes = scopes - self.service = self.init_service( + self.gmail_service = self.ctx.init_service( service_name="gmail", api_version="v1", scopes=gmail_scopes, @@ -78,7 +78,7 @@ def init_gmail( use_robocorp_vault=use_robocorp_vault, token_file=token_file, ) - return self.service + return self.gmail_service def create_message( self, @@ -116,7 +116,7 @@ def add_attachment_to_message(self, mimeMessage, attachment): if content_type is None or encoding is not None: content_type = "application/octet-stream" main_type, sub_type = content_type.split("/", 1) - self.logger.debug( + self.ctx.logger.debug( f"Adding attachment of main_type: {main_type} and sub_type: {sub_type}" ) mime_type_mapping = { @@ -167,20 +167,20 @@ def send_message( ... body of the message ... ${attachments} """ - if not self.service: + if not self.gmail_service: raise AssertionError("Gmail service has not been initialized") attachments = attachments or [] message = self.create_message(to, subject, message_text, attachments, html) try: response = ( - self.service.users() + self.gmail_service.users() .messages() .send(userId=sender, body=message) .execute() ) - self.logger.debug("Message Id: %s" % response["id"]) + self.ctx.logger.debug("Message Id: %s" % response["id"]) except errors.HttpError as he: - self.logger.warning(str(he)) + self.ctx.logger.warning(str(he)) raise he return response @@ -238,13 +238,13 @@ def handle_mimetypes(self, parsed_parts, part, msg, folder_name): if "attachment" in part_header_value: # we get the attachment ID # and make another request to get the attachment itself - self.logger.info( + self.ctx.logger.info( "Saving the file: %s, size:%s" % (filename, get_size_format(filesize)) ) attachment_id = body.get("attachmentId") attachment = ( - self.service.users() + self.gmail_service.users() .messages() .attachments() .get( @@ -305,13 +305,15 @@ def list_messages( folder_name = Path(folder_name) if folder_name else Path().absolute() messages = [] try: - response = self.service.users().messages().list(**parameters).execute() + response = ( + self.gmail_service.users().messages().list(**parameters).execute() + ) message_ids = [ m["id"] for m in response["messages"] if "messages" in response.keys() ] for message_id in message_ids: response = ( - self.service.users() + self.gmail_service.users() .messages() .get(userId=user_id, id=message_id) .execute() @@ -329,7 +331,7 @@ def list_messages( message_dict["parts"] = parsed_parts messages.append(message_dict) except errors.HttpError as he: - self.logger.warning(str(he)) + self.ctx.logger.warning(str(he)) raise he return messages diff --git a/packages/google/src/RPA/Cloud/Google/keywords/natural_language.py b/packages/google/src/RPA/Cloud/Google/keywords/natural_language.py index 142d3f390c..d7d5168a56 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/natural_language.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/natural_language.py @@ -2,15 +2,15 @@ from google.cloud import language_v1 -from . import LibraryContext, keyword, TextType, to_texttype +from . import keyword, TextType, to_texttype -class NaturalLanguageKeywords(LibraryContext): +class NaturalLanguageKeywords: """Keywords for Google Cloud Natural Language API""" def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.lang_service = None @keyword(tags=["init", "natural language"]) def init_natural_language( @@ -25,13 +25,13 @@ def init_natural_language( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.lang_service = self.ctx.init_service_with_object( language_v1.LanguageServiceClient, service_account, use_robocorp_vault, token_file, ) - return self.service + return self.lang_service @keyword(tags=["natural language"]) def analyze_sentiment( @@ -119,12 +119,12 @@ def _analyze_handler( document = language_v1.Document(**parameters) if analyze_method == "classify": - response = self.service.classify_text(document=document) + response = self.lang_service.classify_text(document=document) elif analyze_method == "sentiment": # Available values: NONE, UTF8, UTF16, UTF32 # encoding_type = enums.EncodingType.UTF8 - response = self.service.analyze_sentiment( + response = self.lang_service.analyze_sentiment( document=document, encoding_type="UTF8" ) - self.write_json(json_file, response) + self.ctx.write_json(json_file, response) return response diff --git a/packages/google/src/RPA/Cloud/Google/keywords/sheets.py b/packages/google/src/RPA/Cloud/Google/keywords/sheets.py index d1d909286e..d099b81134 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/sheets.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/sheets.py @@ -1,17 +1,14 @@ from typing import Dict, List, Optional -from . import ( - LibraryContext, - keyword, -) +from . import keyword -class SheetsKeywords(LibraryContext): +class SheetsKeywords: """Keywords for Google Sheets operations""" def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.sheets_service = None @keyword(tags=["init", "sheets"]) def init_sheets( @@ -33,7 +30,7 @@ def init_sheets( sheets_scopes = ["drive", "drive.file", "spreadsheets"] if scopes: sheets_scopes += scopes - self.service = self.init_service( + self.sheets_service = self.ctx.init_service( service_name="sheets", api_version="v4", scopes=sheets_scopes, @@ -42,7 +39,7 @@ def init_sheets( use_robocorp_vault=use_robocorp_vault, token_file=token_file, ) - return self.service + return self.sheets_service @keyword(tags=["sheets"]) def create_spreadsheet(self, title: str) -> str: @@ -70,7 +67,7 @@ def create_spreadsheet(self, title: str) -> str: data = {"properties": {"title": title}} spreadsheet = ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .create(body=data, fields="spreadsheetId") .execute() ) @@ -114,7 +111,7 @@ def insert_sheet_values( """ resource = {"majorDimension": major_dimension, "values": values} return ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .values() .append( spreadsheetId=spreadsheet_id, @@ -171,7 +168,7 @@ def update_sheet_values( """ resource = {"majorDimension": major_dimension, "values": values} return ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .values() .update( spreadsheetId=spreadsheet_id, @@ -221,7 +218,7 @@ def get_sheet_values( "dateTimeRenderOption": datetime_render_option, } - return self.service.spreadsheets().values().get(**parameters).execute() + return self.sheets_service.spreadsheets().values().get(**parameters).execute() @keyword(tags=["sheets"]) def get_all_sheet_values( @@ -276,7 +273,7 @@ def get_all_sheet_values( rows = found_sheet["rows"] parameters["range"] = f"{sheet_title}!A1:{target_column}{rows}" - return self.service.spreadsheets().values().get(**parameters).execute() + return self.sheets_service.spreadsheets().values().get(**parameters).execute() @keyword(tags=["sheets"]) def clear_sheet_values(self, spreadsheet_id: str, sheet_range: str) -> Dict: @@ -301,7 +298,7 @@ def clear_sheet_values(self, spreadsheet_id: str, sheet_range: str) -> Dict: ${result}= Clear Sheet Values ${SPREADSHEET_ID} A1:C1 """ return ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .values() .clear( spreadsheetId=spreadsheet_id, @@ -342,7 +339,7 @@ def copy_spreadsheet(self, spreadsheet_id: str, target_spreadsheet_id: str) -> D "destination_spreadsheet_id": target_spreadsheet_id, } return ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .sheets() .copyTo( spreadsheetId=spreadsheet_id, @@ -384,7 +381,11 @@ def get_spreadsheet_details(self, spreadsheet_id: str) -> Dict: :param spreadsheet_id: ID of the spreadsheet :return: operation result as an dictionary """ - return self.service.spreadsheets().get(spreadsheetId=spreadsheet_id).execute() + return ( + self.sheets_service.spreadsheets() + .get(spreadsheetId=spreadsheet_id) + .execute() + ) @keyword(tags=["sheets"]) def to_column_letter(self, number: int): @@ -653,10 +654,160 @@ def generic_spreadsheet_batch_update(self, spreadsheet_id: str, body: Dict): ${result}= Generic Spreadsheet Batch Update ${SPREADSHEET_ID} ${body} """ # noqa: E501 return ( - self.service.spreadsheets() + self.sheets_service.spreadsheets() .batchUpdate( spreadsheetId=spreadsheet_id, body=body, ) .execute() ) + + @keyword(tags=["sheets"]) + def detect_tables(self, spreadsheet_id: str, sheet_name: str = None): + """Detect tables in the sheet. + + :param spreadsheet_id: id of the spreadsheet + :param sheet_name: name of the sheet, or leave None for all sheets + :return: tables arranged by sheets + """ + if sheet_name: + sheets = [sheet_name] + else: + result = self.get_spreadsheet_basic_information(spreadsheet_id) + sheets = [sheet["title"] for sheet in result["sheets"]] + + tables = {} + for sheet in sheets: + tables[sheet] = self._detect_tables_in_sheet(spreadsheet_id, sheet) + self.ctx.logger.info( + f"found {len(tables[sheet])} table(s) in sheet: {sheet}" + ) + + return tables + + def _detect_tables_in_sheet(self, spreadsheet_id: str, sheet_name: str): + result = self.get_all_sheet_values(spreadsheet_id, sheet_name) + rows = [] + if "values" in result.keys(): + rows = result["values"] + + # Identify header rows and their columns. + areas = self._identify_header_rows_and_columns(rows) + return self._combine_areas(areas) + + def _combine_areas(self, areas): + combined = [] + + for item in areas: + row = item["row"] + column = item["column"] + size = item["size"] + if ( + combined + and row == combined[-1]["row"] + and column == combined[-1]["column_end"] + 1 + ): + # If the current item is in the same row and adjacent column, merge it + combined[-1]["headers"].append(item["header"]) + combined[-1]["column_end"] = column + combined[-1][ + "range" + ] = f'{combined[-1]["start"]}:{self.to_A1_notation(column, row+size-1)}' + + else: + # Otherwise, start a new entry + start = self.to_A1_notation(column, row) + combined.append( + { + "start": start, + "range": f"{start}:{self.to_A1_notation(column, row+size-1)}", + "column_end": column, + "row": row, + "headers": [item["header"]], + "size": size, + } + ) + + return [ + { + k: v + for k, v in d.items() + if k not in ["column_end", "start", "end", "row"] + } + for d in combined + ] + + def _identify_header_rows_and_columns(self, rows): + areas = [] + header_indices = [] + for row_idx, row in enumerate(rows): + for col_idx, cell in enumerate(row): + if cell: # Found a non-empty cell, possibly a header. + # Check if this cell is a new segment header. + is_new_header = True + for header_index in header_indices: + if header_index[1] == col_idx: + is_new_header = False + break + if is_new_header: + header_indices.append((row_idx, col_idx)) + + # For each header, determine the segment size. + for header_idx, header_col in header_indices: + segment_size = 0 + row_idx = header_idx + while row_idx < len(rows) and any( + rows[row_idx][header_col : header_col + 1] + ): + segment_size += 1 + row_idx += 1 + areas.append( + { + "column": header_col + 1, + "row": header_idx + 1, + "header": rows[header_idx][header_col], + "size": segment_size, + } + ) + sorted_areas = sorted(areas, key=lambda x: (x["row"], x["column"])) + return sorted_areas + + @keyword(tags=["sheets"]) + def get_sheet_formulas(self, spreadsheet_id: str, sheet_name: str): + """Get formulas from the sheet. + + :param spreadsheet_id: id of the spreadsheet + :param sheet_name: name of the sheet + :return: _description_ + """ + result = self.get_all_sheet_values( + spreadsheet_id, sheet_name, value_render_option="FORMULA" + ) + rows = [] + if "values" in result.keys(): + rows = result["values"] + + formula_cells = [ + (row_idx, col_idx) + for row_idx, row in enumerate(rows) + for col_idx, cell in enumerate(row) + if isinstance(cell, str) and cell.startswith("=") + ] + formula_cells_dict = [ + {"range": self.to_A1_notation(col + 1, row + 1), "formula": rows[row][col]} + for row, col in formula_cells + ] + return formula_cells_dict + + @keyword(tags=["sheets"]) + def to_A1_notation(self, column_number: int, row_number: int): + """Convert a column number and a row number into a cell reference. + + :param column_number: column number to convert + :param row_number: row number to convert + :return: cell reference string + """ + if row_number < 1 or column_number < 1: + raise ValueError("Number must be greater than 0") + + return f"{self.to_column_letter(column_number)}{row_number}" diff --git a/packages/google/src/RPA/Cloud/Google/keywords/speech_to_text.py b/packages/google/src/RPA/Cloud/Google/keywords/speech_to_text.py index dd7421ae29..273f3c0893 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/speech_to_text.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/speech_to_text.py @@ -2,10 +2,7 @@ from google.cloud import speech from google.cloud.speech_v1.types import RecognitionConfig, RecognitionAudio -from . import ( - LibraryContext, - keyword, -) +from . import keyword ENCODING = { "AMR": RecognitionConfig.AudioEncoding.AMR, @@ -19,7 +16,7 @@ } -class SpeechToTextKeywords(LibraryContext): +class SpeechToTextKeywords: """Class for Google Cloud Speech-To-Text API Possible input audio encodings: @@ -39,8 +36,8 @@ class SpeechToTextKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.speech_service = None @keyword(tags=["init", "speech to text"]) def init_speech_to_text( @@ -55,10 +52,10 @@ def init_speech_to_text( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.speech_service = self.ctx.init_service_with_object( speech.SpeechClient, service_account, use_robocorp_vault, token_file ) - return self.service + return self.speech_service @keyword(tags=["speech to text"]) def recognize_text_from_audio( @@ -90,7 +87,7 @@ def recognize_text_from_audio( """ audio = self.set_audio_type(audio_file, audio_uri) parameters = {"use_enhanced": True} - # audio_encoding = ENCODING["UNSPECIFIED"] + parameters["encoding"] = ENCODING["UNSPECIFIED"] if encoding and encoding.upper() in ENCODING.keys(): parameters["encoding"] = ENCODING[encoding.upper()] if sample_rate: @@ -100,7 +97,7 @@ def recognize_text_from_audio( if audio_channel_count: parameters["audio_channel_count"] = audio_channel_count config = RecognitionConfig(**parameters) # pylint: disable=E1101 - rec = self.service.recognize(config=config, audio=audio) + rec = self.speech_service.recognize(config=config, audio=audio) return rec.results def set_audio_type(self, audio_file, audio_uri): diff --git a/packages/google/src/RPA/Cloud/Google/keywords/storage.py b/packages/google/src/RPA/Cloud/Google/keywords/storage.py index f8b731aa46..093dc25f8c 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/storage.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/storage.py @@ -2,13 +2,10 @@ from google.cloud import storage -from . import ( - LibraryContext, - keyword, -) +from . import keyword -class StorageKeywords(LibraryContext): +class StorageKeywords: """Class for Google Cloud Storage API and Google Cloud Storage JSON API @@ -18,8 +15,8 @@ class StorageKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.storage_service = None @keyword(tags=["init", "storage"]) def init_storage( @@ -34,10 +31,10 @@ def init_storage( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.storage_service = self.ctx.init_service_with_object( storage.Client, service_account, use_robocorp_vault, token_file ) - return self.service + return self.storage_service @keyword(tags=["storage"]) def create_storage_bucket(self, bucket_name: str) -> Dict: @@ -54,7 +51,7 @@ def create_storage_bucket(self, bucket_name: str) -> Dict: ${result}= Create Storage Bucket visionfolder """ - bucket = self.service.create_bucket(bucket_name) + bucket = self.storage_service.create_bucket(bucket_name) return bucket @keyword(tags=["storage"]) @@ -94,7 +91,7 @@ def get_storage_bucket(self, bucket_name: str) -> Dict: ${result}= Get Bucket visionfolder """ - bucket = self.service.get_bucket(bucket_name) + bucket = self.storage_service.get_bucket(bucket_name) return bucket @keyword(tags=["storage"]) @@ -114,7 +111,7 @@ def list_storage_buckets(self) -> List: Log ${bucket} END """ - return list(self.service.list_buckets()) + return list(self.storage_service.list_buckets()) @keyword(tags=["storage"]) def delete_storage_files(self, bucket_name: str, files: Any) -> List: @@ -254,7 +251,7 @@ def download_storage_files(self, bucket_name: str, files: Any) -> List: if blob: with open(filename, "wb") as f: blob.download_to_file(f) - self.logger.info( + self.ctx.logger.info( "Downloaded object %s from Google to filepath %s", object_name, filename, @@ -268,7 +265,7 @@ def download_storage_files(self, bucket_name: str, files: Any) -> List: if blob: with open(filename, "wb") as f: blob.download_to_file(f) - self.logger.info( + self.ctx.logger.info( "Downloaded object %s from Google to filepath %s", filename, filename, diff --git a/packages/google/src/RPA/Cloud/Google/keywords/text_to_speech.py b/packages/google/src/RPA/Cloud/Google/keywords/text_to_speech.py index 42fb19579c..16a544890f 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/text_to_speech.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/text_to_speech.py @@ -7,13 +7,10 @@ SynthesisInput, ) -from . import ( - LibraryContext, - keyword, -) +from . import keyword -class TextToSpeechKeywords(LibraryContext): +class TextToSpeechKeywords: """Class for Google Cloud Text-to-Speech API Link to `Text To Speech PyPI`_ page. @@ -22,8 +19,8 @@ class TextToSpeechKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.text_service = None @keyword(tags=["init", "text to speech"]) def init_text_to_speech( @@ -38,13 +35,13 @@ def init_text_to_speech( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.text_service = self.ctx.init_service_with_object( texttospeech_v1.TextToSpeechClient, service_account, use_robocorp_vault, token_file, ) - return self.service + return self.text_service @keyword(tags=["text to speech"]) def list_supported_voices(self, language_code: str = None) -> List: @@ -62,9 +59,9 @@ def list_supported_voices(self, language_code: str = None) -> List: ${result}= List Supported Voices en-US """ if language_code: - voices = self.service.list_voices(language_code) + voices = self.text_service.list_voices(language_code) else: - voices = self.service.list_voices() + voices = self.text_service.list_voices() return voices.voices @keyword(tags=["text to speech"]) @@ -101,7 +98,7 @@ def synthesize_speech( language_code=language, name=name, ssml_gender=gender ) audio_config = AudioConfig(audio_encoding=encoding) - response = self.service.synthesize_speech( + response = self.text_service.synthesize_speech( request={ "input": synth_input, "voice": voice_selection, diff --git a/packages/google/src/RPA/Cloud/Google/keywords/translation.py b/packages/google/src/RPA/Cloud/Google/keywords/translation.py index 4a61d852dc..eb30293d73 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/translation.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/translation.py @@ -2,10 +2,10 @@ from google.cloud import translate_v3 -from . import LibraryContext, keyword, TextType, to_texttype +from . import keyword, TextType, to_texttype -class TranslationKeywords(LibraryContext): +class TranslationKeywords: """Class for Google Cloud Translation API Link to `Translation PyPI`_ page. @@ -14,8 +14,8 @@ class TranslationKeywords(LibraryContext): """ def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.translation_service = None self.project_id = None @keyword(tags=["init", "translation"]) @@ -34,13 +34,13 @@ def init_translation( :param token_file: file path to token file """ self.project_id = project_identifier - self.service = self.init_service_with_object( + self.translation_service = self.ctx.init_service_with_object( translate_v3.TranslationServiceClient, service_account, use_robocorp_vault, token_file, ) - return self.service + return self.translation_service @keyword(tags=["translation"]) def translate( @@ -79,5 +79,5 @@ def translate( if mime_type: mimetype = to_texttype(mime_type) parameters["mime_type"] = mimetype - response = self.service.translate_text(**parameters) + response = self.translation_service.translate_text(**parameters) return response diff --git a/packages/google/src/RPA/Cloud/Google/keywords/video_intelligence.py b/packages/google/src/RPA/Cloud/Google/keywords/video_intelligence.py index 0279375c7d..508e4f2861 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/video_intelligence.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/video_intelligence.py @@ -2,15 +2,15 @@ from google.cloud import videointelligence -from . import LibraryContext, keyword, to_feature +from . import keyword, to_feature -class VideoIntelligenceKeywords(LibraryContext): +class VideoIntelligenceKeywords: """Keywords for Google Video Intelligence API""" def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.video_service = None @keyword(tags=["init", "video intelligence"]) def init_video_intelligence( @@ -25,13 +25,13 @@ def init_video_intelligence( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.video_service = self.ctx.init_service_with_object( videointelligence.VideoIntelligenceServiceClient, service_account, use_robocorp_vault, token_file, ) - return self.service + return self.video_service @keyword(tags=["video intelligence"]) def annotate_video( @@ -97,7 +97,7 @@ def annotate_video( if output_uri: parameters["output_uri"] = output_uri - operation = self.service.annotate_video(request=parameters) + operation = self.video_service.annotate_video(request=parameters) result = operation.result(timeout=timeout) - self.write_json(json_file, result) + self.ctx.write_json(json_file, result) return result diff --git a/packages/google/src/RPA/Cloud/Google/keywords/vision.py b/packages/google/src/RPA/Cloud/Google/keywords/vision.py index f7d9bdb843..90703626d2 100644 --- a/packages/google/src/RPA/Cloud/Google/keywords/vision.py +++ b/packages/google/src/RPA/Cloud/Google/keywords/vision.py @@ -1,18 +1,15 @@ from typing import Dict, Optional from google.cloud import vision -from . import ( - LibraryContext, - keyword, -) +from . import keyword -class VisionKeywords(LibraryContext): +class VisionKeywords: """Keywords for Google Vision operations""" def __init__(self, ctx): - super().__init__(ctx) - self.service = None + self.ctx = ctx + self.vision_service = None @keyword(tags=["init", "vision"]) def init_vision( @@ -27,13 +24,13 @@ def init_vision( :param use_robocorp_vault: use credentials in `Robocorp Vault` :param token_file: file path to token file """ - self.service = self.init_service_with_object( + self.vision_service = self.ctx.init_service_with_object( vision.ImageAnnotatorClient, service_account, use_robocorp_vault, token_file, ) - return self.service + return self.vision_service def set_image_type(self, image_file: str = None, image_uri: str = None): if image_file: @@ -66,8 +63,8 @@ def detect_labels( ... json_file=${CURDIR}${/}result.json """ parameters = self.set_image_type(image_file, image_uri) - response = self.service.label_detection(**parameters) - self.write_json(json_file, response) + response = self.vision_service.label_detection(**parameters) + self.ctx.write_json(json_file, response) return response @keyword(tags=["vision"]) @@ -91,8 +88,8 @@ def detect_text( ... json_file=${CURDIR}${/}result.json """ parameters = self.set_image_type(image_file, image_uri) - response = self.service.text_detection(**parameters) - self.write_json(json_file, response) + response = self.vision_service.text_detection(**parameters) + self.ctx.write_json(json_file, response) return response @keyword(tags=["vision"]) @@ -116,8 +113,8 @@ def detect_document( ... json_file=${CURDIR}${/}result.json """ parameters = self.set_image_type(image_file, image_uri) - response = self.service.document_text_detection(**parameters) - self.write_json(json_file, response) + response = self.vision_service.document_text_detection(**parameters) + self.ctx.write_json(json_file, response) return response @keyword(tags=["vision"]) @@ -141,8 +138,8 @@ def annotate_image( ... json_file=${CURDIR}${/}result.json """ parameters = self.set_image_type(image_file, image_uri) - response = self.service.annotate_image(**parameters) - self.write_json(json_file, response) + response = self.vision_service.annotate_image(**parameters) + self.ctx.write_json(json_file, response) return response @keyword(tags=["vision"]) @@ -166,6 +163,6 @@ def face_detection( ... json_file=${CURDIR}${/}result.json """ parameters = self.set_image_type(image_file, image_uri) - response = self.service.face_detection(**parameters) - self.write_json(json_file, response) + response = self.vision_service.face_detection(**parameters) + self.ctx.write_json(json_file, response) return response