diff --git a/.flake8 b/.flake8 index 83d7461..59d72fb 100644 --- a/.flake8 +++ b/.flake8 @@ -10,6 +10,7 @@ exclude = examples per-file-ignores = api/api.py:E402 + api/testrun.py api/test/*.py:E402 db/models/init_db.py:E402,F401 api/tmttestrun.py:E402,F401 diff --git a/Dockerfile-api b/Dockerfile-api index 0075ab2..d36ee2f 100644 --- a/Dockerfile-api +++ b/Dockerfile-api @@ -18,7 +18,8 @@ ENV BASIL_ADMIN_PASSWORD=${ADMIN_PASSWORD} BASIL_API_PORT=${API_PORT} # Init the database and # Write permission to db -RUN mkdir -p /var/tmp && cd /BASIL-API/db/models && \ +RUN mkdir -p /var/tmp && \ + cd /BASIL-API/db/models && \ python3 init_db.py && \ chmod a+rw /BASIL-API/db @@ -33,6 +34,6 @@ ENV BASIL_ADMIN_PASSWORD= EXPOSE ${BASIL_API_PORT} CMD echo "BASIL_API_PORT: ${BASIL_API_PORT}" && cd api && \ - gunicorn --access-logfile /var/tmp/tc-gunicorn-access.log \ - --error-logfile /var/tmp/tc-gunicorn-error.log \ - --bind 0.0.0.0:${BASIL_API_PORT} api:app 2>&1 | tee /var/tmp/tc-error.log + gunicorn --access-logfile /var/tmp/gunicorn-access.log \ + --error-logfile /var/tmp/gunicorn-error.log \ + --bind 0.0.0.0:${BASIL_API_PORT} api:app 2>&1 | tee /var/tmp/basil-error.log diff --git a/api/api.py b/api/api.py index bfe7412..45ca284 100644 --- a/api/api.py +++ b/api/api.py @@ -1,19 +1,24 @@ import base64 -from flask_cors import CORS -from flask import Flask, request, send_file, send_from_directory -from flask_restful import Resource, Api, reqparse -import os import datetime +import json import logging import math +import os import shutil -from sqlalchemy import and_, or_ -from sqlalchemy.orm.exc import NoResultFound import sys import urllib from urllib.error import HTTPError, URLError from uuid import uuid4 +import gitlab +import yaml +from flask import Flask, request, send_file, send_from_directory +from flask_cors import CORS +from flask_restful import Api, Resource, reqparse +from sqlalchemy import and_, or_ +from sqlalchemy.orm.exc import NoResultFound +from testrun import TestRunner + logging.basicConfig() logging.getLogger('sqlalchemy.engine').setLevel(logging.WARNING) @@ -26,6 +31,7 @@ MAX_LOGIN_ATTEMPTS = 5 MAX_LOGIN_ATTEMPTS_TIMEOUT = 60 * 15 # 15 minutes SSH_KEYS_PATH = os.path.join(currentdir, 'ssh_keys') +TESTRUN_PRESET_FILEPATH = os.path.join(currentdir, "testrun_plugin_presets.yaml") TMT_LOGS_PATH = os.getenv('BASIL_TMT_WORKDIR_ROOT', '/var/tmp/tmt') if not os.path.exists(SSH_KEYS_PATH): @@ -40,6 +46,8 @@ USER_ROLES_MANAGE_PERMISSIONS = ['ADMIN', 'USER'] USER_ROLES_MANAGE_USERS = ['ADMIN', ] +OK_STATUS = 200 +CREATED_STATUS = 201 BAD_REQUEST_MESSAGE = 'Bad request' BAD_REQUEST_STATUS = 400 UNAUTHORIZED_MESSAGE = 'User not authorized' @@ -65,31 +73,29 @@ 'write_permissions'] from db import db_orm -from db.models.api_document import ApiDocumentModel, ApiDocumentHistoryModel -from db.models.api_justification import ApiJustificationModel, ApiJustificationHistoryModel -from db.models.api_sw_requirement import ApiSwRequirementModel, ApiSwRequirementHistoryModel -from db.models.api_test_case import ApiTestCaseModel, ApiTestCaseHistoryModel -from db.models.api_test_specification import ApiTestSpecificationModel -from db.models.api_test_specification import ApiTestSpecificationHistoryModel -from db.models.api import ApiModel, ApiHistoryModel +from db.models.api import ApiHistoryModel, ApiModel +from db.models.api_document import ApiDocumentHistoryModel, ApiDocumentModel +from db.models.api_justification import ApiJustificationHistoryModel, ApiJustificationModel +from db.models.api_sw_requirement import ApiSwRequirementHistoryModel, ApiSwRequirementModel +from db.models.api_test_case import ApiTestCaseHistoryModel, ApiTestCaseModel +from db.models.api_test_specification import ApiTestSpecificationHistoryModel, ApiTestSpecificationModel from db.models.comment import CommentModel -from db.models.document import DocumentModel, DocumentHistoryModel -from db.models.justification import JustificationModel, JustificationHistoryModel +from db.models.document import DocumentHistoryModel, DocumentModel +from db.models.justification import JustificationHistoryModel, JustificationModel from db.models.notification import NotificationModel from db.models.ssh_key import SshKeyModel -from db.models.sw_requirement_sw_requirement import SwRequirementSwRequirementModel -from db.models.sw_requirement_sw_requirement import SwRequirementSwRequirementHistoryModel -from db.models.sw_requirement_test_case import SwRequirementTestCaseModel -from db.models.sw_requirement_test_case import SwRequirementTestCaseHistoryModel -from db.models.sw_requirement_test_specification import SwRequirementTestSpecificationModel -from db.models.sw_requirement_test_specification import SwRequirementTestSpecificationHistoryModel -from db.models.sw_requirement import SwRequirementModel, SwRequirementHistoryModel -from db.models.test_case import TestCaseModel, TestCaseHistoryModel +from db.models.sw_requirement import SwRequirementHistoryModel, SwRequirementModel +from db.models.sw_requirement_sw_requirement import (SwRequirementSwRequirementHistoryModel, + SwRequirementSwRequirementModel) +from db.models.sw_requirement_test_case import SwRequirementTestCaseHistoryModel, SwRequirementTestCaseModel +from db.models.sw_requirement_test_specification import (SwRequirementTestSpecificationHistoryModel, + SwRequirementTestSpecificationModel) +from db.models.test_case import TestCaseHistoryModel, TestCaseModel from db.models.test_run import TestRunModel from db.models.test_run_config import TestRunConfigModel -from db.models.test_specification_test_case import TestSpecificationTestCaseModel -from db.models.test_specification_test_case import TestSpecificationTestCaseHistoryModel -from db.models.test_specification import TestSpecificationModel, TestSpecificationHistoryModel +from db.models.test_specification import TestSpecificationHistoryModel, TestSpecificationModel +from db.models.test_specification_test_case import (TestSpecificationTestCaseHistoryModel, + TestSpecificationTestCaseModel) from db.models.user import UserModel app = Flask("BASIL-API") @@ -614,11 +620,18 @@ def get_split_sections(_specification, _mapping, _work_item_types): return sorted(mapped_sections, key=lambda k: k['offset']) -def check_fields_in_request(fields, request): +def check_fields_in_request(fields, request, allow_empty_string=True): for field in fields: if field not in request.keys(): print(f'field: {field} not in request: {request.keys()}') return False + else: + if allow_empty_string: + pass + else: + if not str(request[field]): + print(f'field {field} is empty') + return False return True @@ -629,8 +642,9 @@ def get_query_string_args(args): order_how = args.get("order_how", default="", type=str) permitted_keys = ["api-id", "artifact", "id", "library", "mapped_to_type", "mapped_to_id", - "mode", "parent_id", "parent_table", "relation_id", "search", - "token", "url", "user-id", "work_item_type", "page", "per_page"] + "mode", "parent_id", "parent_table", "plugin", "relation_id", "search", + "token", "url", "user-id", "work_item_type", "page", "per_page", "preset", + "job", "stage", "ref", "params"] ret = {"db": db, "limit": limit, @@ -891,6 +905,117 @@ def filter(self, token): return False +def add_test_run_config(dbi, request_data, user): + mandatory_fields = ['environment_vars', 'git_repo_ref', 'id', 'plugin', 'plugin_preset', + 'title'] + tmt_mandatory_fields = ['context_vars', 'provision_guest', 'provision_guest_port', 'provision_type', + 'ssh_key'] + gitlab_ci_mandatory_fields = ["job", "private_token", "project_id", "stage", "trigger_token", "url"] + github_actions_mandatory_fields = ["job", "private_token", "url", "workflow_id"] + kernel_ci_mandatory_fields = [] + + if not check_fields_in_request(mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} Miss mandatory fields.", BAD_REQUEST_STATUS + + if request_data["id"] not in ["", 0]: + if str(request_data["id"]).strip().isnumeric(): + testrun_config_id = int(str(request_data["id"])) + try: + existing_config = dbi.session.query(TestRunConfigModel).filter( + TestRunConfigModel.id == testrun_config_id + ).one() + return existing_config, OK_STATUS + except NoResultFound: + return f"{BAD_REQUEST_MESSAGE} Unable to find the Test Run Configuration.", BAD_REQUEST_STATUS + else: + return f"{BAD_REQUEST_MESSAGE} Test Run Configuration ID is not valid.", BAD_REQUEST_STATUS + + if request_data["plugin"] not in TestRunner.test_run_plugin_models.keys(): + if not check_fields_in_request(tmt_mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} Plugin not supported.", BAD_REQUEST_STATUS + + if request_data["plugin"] == "tmt": + if not check_fields_in_request(tmt_mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} tmt miss mandatory fields.", BAD_REQUEST_STATUS + + if request_data["plugin"] == "gitlab_ci": + if not check_fields_in_request(gitlab_ci_mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} GitlabCI miss mandatory fields.", BAD_REQUEST_STATUS + + if request_data["plugin"] == "github_actions": + if not check_fields_in_request(github_actions_mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} Github Actions miss mandatory fields.", BAD_REQUEST_STATUS + + if request_data["plugin"] == "kernel_ci": + if not check_fields_in_request(kernel_ci_mandatory_fields, request_data): + return f"{BAD_REQUEST_MESSAGE} KernelCI miss mandatory fields.", BAD_REQUEST_STATUS + + # Config + config_title = str(request_data['title']).strip() + environment_vars = str(request_data['environment_vars']).strip() + git_repo_ref = str(request_data['git_repo_ref']).strip() + plugin = str(request_data['plugin']).strip() + plugin_preset = str(request_data['plugin_preset']).strip() + plugin_vars = "" + context_vars = "" + provision_type = "" + provision_guest = "" + provision_guest_port = "" + ssh_key = None + + # Check mandatory fields + if config_title == '': + return f"{BAD_REQUEST_MESSAGE} Empty Configuration Title.", BAD_REQUEST_STATUS + + if plugin == 'tmt': + context_vars = str(request_data['context_vars']).strip() + provision_type = str(request_data['provision_type']).strip() + provision_guest = str(request_data['provision_guest']).strip() + provision_guest_port = str(request_data['provision_guest_port']).strip() + ssh_key_id = request_data['ssh_key'] + + if provision_type == '': + return f"{BAD_REQUEST_MESSAGE} tmt provision type not defined.", BAD_REQUEST_STATUS + + if provision_type == 'connect': + if provision_guest == '' or provision_guest_port == '' or ssh_key_id == '' or ssh_key_id == '0': + return f"{BAD_REQUEST_MESSAGE} tmt provision configuration is not correct.", BAD_REQUEST_STATUS + + try: + ssh_key = dbi.session.query(SshKeyModel).filter( + SshKeyModel.id == ssh_key_id, + SshKeyModel.created_by_id == user.id + ).one() + except NoResultFound: + return f"{BAD_REQUEST_MESSAGE} Unable to find the SSH Key.", BAD_REQUEST_STATUS + + elif plugin == 'gitlab_ci': + plugin_vars += ";".join([f"{field}={str(request_data[field]).strip()}" + for field in gitlab_ci_mandatory_fields]) + elif plugin == 'github_actions': + plugin_vars += ";".join([f"{field}={str(request_data[field]).strip()}" + for field in github_actions_mandatory_fields]) + elif plugin == 'kernel_ci': + plugin_vars += ";".join([f"{field}={str(request_data[field]).strip()}" + for field in kernel_ci_mandatory_fields]) + + test_config = TestRunConfigModel(plugin, + plugin_preset, + plugin_vars, + config_title, + git_repo_ref, + context_vars, + environment_vars, + provision_type, + provision_guest, + provision_guest_port, + ssh_key, + user) + dbi.session.add(test_config) + dbi.session.commit() + return test_config, CREATED_STATUS + + tokenManager = Token() @@ -5670,12 +5795,6 @@ def get(self): def post(self): request_data = request.get_json(force=True) - mandatory_fields = ['context_vars', 'environment_vars', - 'git_repo_ref', - 'provision_guest', 'provision_guest_port', 'provision_type', - 'ssh_key', 'title'] - if not check_fields_in_request(mandatory_fields, request_data): - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS dbi = db_orm.DbInterface(get_db()) @@ -5684,47 +5803,14 @@ def post(self): if not isinstance(user, UserModel): return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS - # Config - config_title = str(request_data['title']).strip() - git_repo_ref = str(request_data['git_repo_ref']).strip() - context_vars = str(request_data['context_vars']).strip() - environment_vars = str(request_data['environment_vars']).strip() - provision_type = str(request_data['provision_type']).strip() - provision_guest = str(request_data['provision_guest']).strip() - provision_guest_port = str(request_data['provision_guest_port']).strip() - ssh_key_id = request_data['ssh_key'] - - # Check mandatory fields - if config_title == '' or provision_type == '': - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - - if provision_type == 'connect': - if provision_guest == '' or provision_guest_port == '' or ssh_key_id == '' or ssh_key_id == '0': - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + dbi.engine.dispose() - try: - ssh_key = dbi.session.query(SshKeyModel).filter( - SshKeyModel.id == ssh_key_id, - SshKeyModel.created_by_id == user.id - ).one() - except NoResultFound: - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + # Test Run Configuration + test_config_ret, test_config_status = add_test_run_config(dbi, request_data, user) + if test_config_status not in [OK_STATUS, CREATED_STATUS]: + return test_config_ret, test_config_status else: - ssh_key = None - - test_config = TestRunConfigModel(config_title, - git_repo_ref, - context_vars, - environment_vars, - provision_type, - provision_guest, - provision_guest_port, - ssh_key, - user) - - dbi.session.add(test_config) - dbi.session.commit() - dbi.engine.dispose() + test_config = test_config_ret return test_config.as_dict() @@ -5772,8 +5858,10 @@ def get(self): TestRunModel.id.like(f'%{search}%'), TestRunModel.uid.like(f'%{search}%'), TestRunModel.title.like(f'%{search}%'), - TestRunModel.note.like(f'%{search}%'), + TestRunModel.notes.like(f'%{search}%'), TestRunModel.bugs.like(f'%{search}%'), + TestRunModel.fixes.like(f'%{search}%'), + TestRunModel.report.like(f'%{search}%'), TestRunModel.result.like(f'%{search}%'), TestRunModel.created_at.like(f'%{search}%'), TestRunConfigModel.title.like(f'%{search}%'), @@ -5793,17 +5881,10 @@ def get(self): def post(self): request_data = request.get_json(force=True) - mandatory_fields = ['api-id', 'title', 'note', 'test-run-config', 'mapped_to_type', 'mapped_to_id'] + mandatory_fields = ['api-id', 'title', 'notes', 'test-run-config', 'mapped_to_type', 'mapped_to_id'] if not check_fields_in_request(mandatory_fields, request_data): return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - config_mandatory_fields = ['context_vars', 'environment_vars', - 'from_db', 'id', - 'title', 'git_repo_ref', 'ssh_key', - 'provision_type', 'provision_guest', 'provision_guest_port'] - if not check_fields_in_request(config_mandatory_fields, request_data['test-run-config']): - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - dbi = db_orm.DbInterface(get_db()) # User @@ -5817,65 +5898,16 @@ def post(self): dbi.engine.dispose() return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS - # Config - test_run_config_id = str(request_data['test-run-config']['id']).strip() - config_title = str(request_data['test-run-config']['title']).strip() - git_repo_ref = str(request_data['test-run-config']['git_repo_ref']).strip() - context_vars = str(request_data['test-run-config']['context_vars']).strip() - environment_vars = str(request_data['test-run-config']['environment_vars']).strip() - provision_type = str(request_data['test-run-config']['provision_type']).strip() - provision_guest = str(request_data['test-run-config']['provision_guest']).strip() - provision_guest_port = str(request_data['test-run-config']['provision_guest_port']).strip() - ssh_key_id = request_data['test-run-config']['ssh_key'] - - if test_run_config_id == '0': - # Check mandatory fields - if config_title == '' or provision_type == '': - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - - if provision_type == 'connect': - if provision_guest == '' or provision_guest_port == '' or ssh_key_id == '' or ssh_key_id == '0': - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - - try: - ssh_key = dbi.session.query(SshKeyModel).filter( - SshKeyModel.id == ssh_key_id, - SshKeyModel.created_by_id == user.id - ).one() - except NoResultFound: - return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS - else: - ssh_key = None - - test_config = TestRunConfigModel(config_title, - git_repo_ref, - context_vars, - environment_vars, - provision_type, - provision_guest, - provision_guest_port, - ssh_key, - user) - - dbi.session.add(test_config) - + # Test Run Configuration + test_config_ret, test_config_status = add_test_run_config(dbi, request_data['test-run-config'], user) + if test_config_status not in [OK_STATUS, CREATED_STATUS]: + return test_config_ret, test_config_status else: - # Reuse existing Test Config - try: - test_config = dbi.session.query(TestRunConfigModel).filter( - TestRunConfigModel.id == test_run_config_id - ).one() - except NoResultFound: - return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS - - # Check the ssh_key still exists - if test_config.provision_type == 'connect': - if not test_config.ssh_key: - return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS + test_config = test_config_ret # Test Run title = request_data['title'].strip() - note = request_data['note'].strip() + notes = request_data['notes'].strip() mapping_to = str(request_data['mapped_to_type']).strip() mapping_id = request_data['mapped_to_id'] @@ -5902,42 +5934,49 @@ def post(self): # Create the Test Config only if the Test Run data is consistent dbi.session.commit() - new_test_run = TestRunModel(api, title, - note, + notes, test_config, mapping_to, mapping_id, user) + if 'report' in request_data.keys(): + new_test_run.report = request_data['report'] + if 'result' in request_data.keys(): + new_test_run.result = request_data['result'] + if 'status' in request_data.keys(): + new_test_run.status = request_data['status'] + dbi.session.add(new_test_run) dbi.session.commit() # Start the detached process to run the test async - cmd = f"python3 {os.path.join(currentdir, 'tmttestrun.py')} --id {new_test_run.id} " \ - f"&> {TMT_LOGS_PATH}/{new_test_run.uid}.log &" - os.system(cmd) + if new_test_run.status == 'created': + cmd = f"python3 {os.path.join(currentdir, 'testrun.py')} --id {new_test_run.id} " \ + f"&> {TMT_LOGS_PATH}/{new_test_run.uid}.log &" + os.system(cmd) - # Notification - notification = f'{user.email} started a Test Run for Test Case ' \ - f'{mapping.test_case.title} as part of the sw component ' \ - f'{api.api}, library {api.library}' - notifications = NotificationModel(api, - 'info', - f'Test Run for {api.api} has been requested', - notification, - str(user.id), - f'/mapping/{api.id}') - dbi.session.add(notifications) - dbi.session.commit() + # Notification + notification = f'{user.email} started a Test Run for Test Case ' \ + f'{mapping.test_case.title} as part of the sw component ' \ + f'{api.api}, library {api.library}' + notifications = NotificationModel(api, + 'info', + f'Test Run for {api.api} has been requested', + notification, + str(user.id), + f'/mapping/{api.id}') + dbi.session.add(notifications) + dbi.session.commit() dbi.engine.dispose() return new_test_run.as_dict() def put(self): request_data = request.get_json(force=True) - mandatory_fields = ['api-id', 'id', 'bugs', 'note', 'mapped_to_type', 'mapped_to_id'] + mandatory_fields = ['api-id', 'id', 'bugs', 'fixes', 'notes', 'mapped_to_type', 'mapped_to_id'] if not check_fields_in_request(mandatory_fields, request_data): return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS @@ -5994,9 +6033,13 @@ def put(self): test_run_modified = True run.bugs = request_data['bugs'] - if run.note != request_data['note']: + if run.fixes != request_data['fixes']: test_run_modified = True - run.note = request_data['note'] + run.fixes = request_data['fixes'] + + if run.notes != request_data['notes']: + test_run_modified = True + run.notes = request_data['notes'] if test_run_modified: dbi.session.add(run) @@ -6135,16 +6178,7 @@ def get(self): except NoResultFound: return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS - log_txt = '' log_exec = '' - log_txt_path = os.path.join(TMT_LOGS_PATH, run.uid, 'log.txt') - - if os.path.exists(log_txt_path): - f = open(log_txt_path, 'r') - log_txt = f.read() - f.close() - else: - log_txt = "File not found that mean there was an error in the execution. See the stdout/stderr section." log_exec_path = os.path.join(TMT_LOGS_PATH, f'{run.uid}.log') if os.path.exists(log_exec_path): @@ -6159,10 +6193,10 @@ def get(self): if os.path.exists(os.path.join(TMT_LOGS_PATH, run.uid, 'api', 'tmt-plan', 'data')): artifacts = os.listdir(os.path.join(TMT_LOGS_PATH, run.uid, 'api', 'tmt-plan', 'data')) - return {'artifacts': artifacts, - 'log_txt': log_txt, - 'log_exec': log_exec, - 'stdout_stderr': run.log} + ret = run.as_dict() + ret['artifacts'] = artifacts + ret['log_exec'] = log_exec + return ret class TestRunArtifacts(Resource): @@ -6179,9 +6213,8 @@ def get(self): user = get_active_user_from_request(args, dbi.session) if isinstance(user, UserModel): user_id = user.id - return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS else: - user_id = 0 + return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS # Find api api = get_api_from_request(args, dbi.session) @@ -6212,6 +6245,250 @@ def get(self): return send_from_directory(artifacts_path, args['artifact']) +class TestRunPluginPresets(Resource): + + PLUGIN_PRESET_FILENAME = "testrun_plugin_presets.yaml" + + def get(self): + mandatory_fields = ['api-id', 'plugin'] + args = get_query_string_args(request.args) + if not check_fields_in_request(mandatory_fields, args): + return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + + plugin = args["plugin"] + dbi = db_orm.DbInterface(get_db()) + + # User + user = get_active_user_from_request(args, dbi.session) + if isinstance(user, UserModel): + user_id = user.id + else: + return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS + + # Find api + api = get_api_from_request(args, dbi.session) + if not api: + dbi.engine.dispose() + return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS + + # Permissions + permissions = get_api_user_permissions(api, user_id, dbi.session) + if 'r' not in permissions: + dbi.engine.dispose() + return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS + + presets_filepath = os.path.join(currentdir, self.PLUGIN_PRESET_FILENAME) + if os.path.exists(presets_filepath): + try: + presets_file = open(presets_filepath, "r") + presets = yaml.safe_load(presets_file) + presets_file.close() + if plugin in presets.keys(): + if isinstance(presets[plugin], list): + return [x["name"] for x in presets[plugin] if "name" in x.keys()] + except Exception: + print(f"Unable to read {presets_filepath}") + return [] + return [] + + +class ExternalTestRuns(Resource): + def get(self): + mandatory_fields = ["api-id", "plugin", "preset", "ref"] + + args = get_query_string_args(request.args) + if not check_fields_in_request(mandatory_fields, args): + return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + + ret = [] + ret_pipelines = [] + all_pipelines = [] + filtered_pipelines = [] + plugin = args["plugin"].strip() + preset = args["preset"].strip() + params_strings = [] + params = {} + ref = args["ref"].strip() + + preset_config = None + dbi = db_orm.DbInterface(get_db()) + + # User + user = get_active_user_from_request(args, dbi.session) + if isinstance(user, UserModel): + user_id = user.id + else: + return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS + + # Find api + api = get_api_from_request(args, dbi.session) + if not api: + dbi.engine.dispose() + return NOT_FOUND_MESSAGE, NOT_FOUND_STATUS + + # Permissions + permissions = get_api_user_permissions(api, user_id, dbi.session) + if 'r' not in permissions: + dbi.engine.dispose() + return UNAUTHORIZED_MESSAGE, UNAUTHORIZED_STATUS + + if preset: + presets_file = open(TESTRUN_PRESET_FILEPATH, "r") + presets = yaml.safe_load(presets_file) + presets_file.close() + + if plugin in presets.keys(): + tmp = [x for x in presets[plugin] if x["name"] == preset] + if tmp: + # Init the config with the preset + # Values from test_run_config will override preset values + preset_config = tmp[0] + + if "params" in args.keys(): + params_strings = args["params"].split(";") + params_strings = [x for x in params_strings if "=" in x] + for param_string in params_strings: + k = param_string.split("=")[0].strip() + v = param_string.split("=")[1].strip() + if k and v: + params[k] = v + + if preset_config: + if plugin == "gitlab_ci": + gitlab_ci_mandatory_fields = ["private_token", "project_id", "url"] + + # Skip pending pipelines from the list + gitlab_ci_valid_status = ["success", "failed"] + + if not check_fields_in_request(gitlab_ci_mandatory_fields, preset_config, allow_empty_string=False): + return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + + gl = gitlab.Gitlab(url=preset_config["url"], + private_token=preset_config["private_token"]) + gl.auth() + project = gl.projects.get(id=preset_config["project_id"]) + + job = None + stage = None + if "job" in preset_config.keys(): + if preset_config["job"]: + job = preset_config["job"] + + if not ref: + if "git_repo_ref" in preset_config.keys(): + if preset_config["git_repo_ref"]: + ref = preset_config["git_repo_ref"] + + if "stage" in preset_config.keys(): + if preset_config["stage"]: + stage = preset_config["stage"] + + if ref: + all_pipelines = project.pipelines.list(ref=ref) + else: + all_pipelines = project.pipelines.list() + + # Filter + all_pipelines = [x for x in all_pipelines if x.status in gitlab_ci_valid_status] + param_pipelines = [] + + if params.keys(): + for i in range(len(all_pipelines)): + for k, v in params.items(): + for pipe_kv in all_pipelines[i].variables.list(): + if pipe_kv.key == k: + if v in pipe_kv.value: + param_pipelines.append(all_pipelines[i]) + else: + param_pipelines = all_pipelines + + if stage: + for pipeline in param_pipelines: + pipeline_jobs = pipeline.jobs.list() + for pipeline_job in pipeline_jobs: + if pipeline_job.__dict__["_attrs"]["stage"] == stage: + if job: + if pipeline_job.__dict__["_attrs"]["name"] == job: + filtered_pipelines.append(pipeline) + break + else: + filtered_pipelines.append(pipeline) + break + + ret_pipelines = filtered_pipelines + + for p in ret_pipelines: + ret.append({"created_at": p.created_at, + "id": p.id, + "project": project.name, + "ref": p.ref, + "status": "pass" if p.status == "success" else "fail", + "web_url": p.web_url}) + + if plugin == "github_actions": + github_actions_mandatory_fields = ["private_token", "url"] + + if not check_fields_in_request(github_actions_mandatory_fields, + preset_config, + allow_empty_string=False): + return BAD_REQUEST_MESSAGE, BAD_REQUEST_STATUS + + if preset_config['url'].endswith("/"): + preset_config['url'] = preset_config['url'][:-1] + if preset_config['url'].endswith(".git"): + preset_config['url'] = preset_config['url'][:-4] + + url_split = preset_config['url'].split('/') + if len(url_split) < 2: + return f"{BAD_REQUEST_MESSAGE} Github repository url is not valid", BAD_REQUEST_STATUS + + owner = url_split[-2] + repo = url_split[-1] + workflows_url = f"https://api.github.com/repos/{owner}/{repo}/actions/runs?" + + if not ref: + if "ref" in preset_config.keys(): + ref = preset_config['ref'] + + if ref: + workflows_url += f"&branch={ref}" + + if "workflow_id" in preset_config.keys(): + workflows_url += f"&workflow_id={preset_config['workflow_id']}" + + if params_strings: + workflows_url += "&" + "&".join(params_strings) + + headers = { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {preset_config['private_token']}", + "X-GitHub-Api-Version": "2022-11-28" + } + + try: + request_params = urllib.request.Request( + url=workflows_url, + headers=headers + ) + + response_data = urllib.request.urlopen(request_params).read() + content = json.loads(response_data.decode("utf-8")) + except Exception as e: + return f"{BAD_REQUEST_MESSAGE} Unable to read workflows {e}", BAD_REQUEST_STATUS + else: + ret_pipelines = content["workflow_runs"] + + for p in ret_pipelines: + ret.append({"created_at": p['created_at'], + "id": p['id'], + "project": f"{owner}/{repo}", + "ref": p['head_branch'], + "status": "pass" if p['conclusion'] == "success" else "fail", + "web_url": f"{preset_config['url']}/actions/runs/{p['id']}"}) + + return ret + + class Version(Resource): def get(self): @@ -6248,8 +6525,10 @@ def get(self): api.add_resource(ApiTestCasesMapping, '/mapping/api/test-cases') api.add_resource(TestRunConfig, '/mapping/api/test-run-configs') api.add_resource(TestRun, '/mapping/api/test-runs') +api.add_resource(ExternalTestRuns, '/mapping/api/test-runs/external') api.add_resource(TestRunLog, '/mapping/api/test-run/log') api.add_resource(TestRunArtifacts, '/mapping/api/test-run/artifacts') +api.add_resource(TestRunPluginPresets, '/mapping/api/test-run-plugin-presets') # - Indirect api.add_resource(SwRequirementSwRequirementsMapping, '/mapping/sw-requirement/sw-requirements') diff --git a/api/testrun.py b/api/testrun.py new file mode 100644 index 0000000..8308f52 --- /dev/null +++ b/api/testrun.py @@ -0,0 +1,279 @@ +#! /bin/python3 +import argparse +import os +import sys + +import yaml + +currentdir = os.path.dirname(os.path.realpath(__file__)) +sys.path.insert(1, os.path.dirname(currentdir)) + +from sqlalchemy.orm.exc import NoResultFound +from testrun_github_actions import TestRunnerGithubActionsPlugin +from testrun_gitlab_ci import TestRunnerGitlabCIPlugin +from testrun_tmt import TestRunnerTmtPlugin + +from db import db_orm +from db.models.api_test_case import ApiTestCaseModel +from db.models.notification import NotificationModel +from db.models.sw_requirement_test_case import SwRequirementTestCaseModel +from db.models.test_run import TestRunModel +from db.models.test_specification_test_case import TestSpecificationTestCaseModel + + +class TestRunner: + """ + TestRunner class is aimed to read the request from the database + and to run the test using the desired plugin. + The default plugin is `tmt` implemented at testrun_tmt.py + this file provides a class named TestRunnerTmtPlugin that inherit from + TestRunnerBasePlugin and is aimed to implement the run() method. + The goal of the run() is to execute the test and provide information for the following + variables: + + log + + test_report + + test_result + + test_status + + TestRunner - Error numbers + - 1: Unable to find the Test Run in the db + - 2: Test Run has been already triggered + - 3: Unable to find the Model of the parent item in the mapping definition + - 4: Unable to find the Mapping in the db + - 5: The selected plugin is not supported yet + - 6: Exceptions + """ + RESULT_FAIL = 'fail' + RESULT_PASS = 'pass' + + STATUS_CREATED = 'created' + STATUS_ERROR = 'error' + STATUS_RUNNING = 'running' + STATUS_COMPLETED = 'completed' + + test_run_plugin_models = {'github_actions': TestRunnerGithubActionsPlugin, + 'gitlab_ci': TestRunnerGitlabCIPlugin, + 'KernelCI': None, + 'tmt': TestRunnerTmtPlugin} + + runner_plugin = None + config = {} + + id = None + execution_result = '' + execution_return_code = -1 + test_result = '' + test_report = '' + ssh_keys_dir = os.path.join(currentdir, 'ssh_keys') # Same as SSH_KEYS_PATH defined in api.py + presets_filepath = os.path.join(currentdir, 'testrun_plugin_presets.yaml') + + dbi = None + db_test_run = None + db_test_case = None + mapping_to_model = None + mapping = None + DB_NAME = 'basil.db' + + def __del__(self): + if self.dbi: + self.dbi.engine.dispose() + + def __init__(self, id): + self.id = id + self.dbi = db_orm.DbInterface(self.DB_NAME) + + # Test Run + try: + self.db_test_run = self.dbi.session.query(TestRunModel).filter( + TestRunModel.id == self.id + ).one() + except NoResultFound: + print("ERROR: Unable to find the Test Run in the db") + sys.exit(1) + + if self.db_test_run.status != self.STATUS_CREATED: + print(f"ERROR: Test Run {id} has been already triggered, current status is `{self.db_test_run.status}`.") + sys.exit(2) + + # Test Case + if self.db_test_run.mapping_to == ApiTestCaseModel.__tablename__: + self.mapping_model = ApiTestCaseModel + elif self.db_test_run.mapping_to == SwRequirementTestCaseModel.__tablename__: + self.mapping_model = SwRequirementTestCaseModel + elif self.db_test_run.mapping_to == TestSpecificationTestCaseModel.__tablename__: + self.mapping_model = TestSpecificationTestCaseModel + else: + # TODO: Update db with the error info + print("Unable to find the Model of the parent item in the mapping definition") + sys.exit(3) + + try: + self.mapping = self.dbi.session.query(self.mapping_model).filter( + self.mapping_model.id == self.db_test_run.mapping_id + ).one() + except BaseException: + # TODO: Update db with the error info + print("ERROR: Unable to find the Mapping in the db") + sys.exit(4) + + db_config = self.db_test_run.test_run_config.as_dict() + + # Load preset configuration or explode the plugin_vars + preset = self.db_test_run.test_run_config.plugin_preset + if preset: + # Init config with preset if required + self.load_preset() + else: + plugin_vars = self.unpack_kv_str(db_config["plugin_vars"]) + for k, v in plugin_vars.items(): + db_config[k] = v + del db_config["plugin_vars"] + + # Override preset values from test run configuration + # but for lists, for the ones we append to the existing values + db_config["env"] = self.unpack_kv_str(db_config["environment_vars"]) + + del db_config["environment_vars"] + + db_config["context"] = self.unpack_kv_str(db_config["context_vars"]) + del db_config["context_vars"] + + for k, v in db_config.items(): + if isinstance(v, dict): + if k in self.config.keys(): + pass + else: + self.config[k] = {} + for kk, vv in v.items(): + self.config[k][kk] = vv + else: + if v: + self.config[k] = v + + self.config["uid"] = self.db_test_run.uid + self.config["env_str"] = "" + self.config["context_str"] = "" + + env_str = f'basil_test_case_id={self.mapping.test_case.id};' + env_str += f'basil_test_case_title={self.mapping.test_case.title};' + env_str += f'basil_api_api={self.mapping.api.api};' + env_str += f'basil_api_library={self.mapping.api.library};' + env_str += f'basil_api_library_version={self.mapping.api.library_version};' + env_str += f'basil_test_case_mapping_table={self.db_test_run.mapping_to};' + env_str += f'basil_test_case_mapping_id={self.db_test_run.mapping_id};' + env_str += f'basil_test_relative_path={self.mapping.test_case.relative_path};' + env_str += f'basil_test_repo_path={self.mapping.test_case.repository};' + env_str += f'basil_test_repo_url={self.mapping.test_case.repository};' + env_str += f'basil_test_repo_ref={self.config["git_repo_ref"]};' + env_str += f'basil_test_run_id={self.db_test_run.uid};' + env_str += f'basil_test_run_title={self.db_test_run.title};' + env_str += f'basil_test_run_config_id={self.config["id"]};' + env_str += f'basil_test_run_config_title={self.config["title"]};' + env_str += f'basil_user_email={self.db_test_run.created_by.email};' + env_str += self.pack_str_kv(self.config['env']) + + def load_preset(self): + plugin = self.db_test_run.test_run_config.plugin + preset = self.db_test_run.test_run_config.plugin_preset + + if preset: + presets_file = open(self.presets_filepath, "r") + presets = yaml.safe_load(presets_file) + presets_file.close() + + if plugin in presets.keys(): + tmp = [x for x in presets[plugin] if x["name"] == preset] + if tmp: + # Init the config with the preset + # Values from test_run_config will override preset values + self.config = tmp[0] + + def unpack_kv_str(self, _string): + # return a dict from a string formatted as + # key1=value1;key2=value2... + PAIRS_DIV = ';' + KV_DIV = '=' + ret = {} + pairs = _string.split(PAIRS_DIV) + for pair in pairs: + if KV_DIV in pair: + if pair.count(KV_DIV) == 1: + ret[pair.split(KV_DIV)[0].strip()] = pair.split(KV_DIV)[1].strip() + return ret + + def pack_str_kv(self, _dict): + # return a string formatted as following + # key1=value1;key2=value2... + # from a flat key values dict + ret = "" + for k, v in _dict.items(): + ret += f"{k}={v};" + if ret.endswith(";"): + ret = ret[:-1] + return ret + + def notify(self): + # Notification + if self.test_result == self.RESULT_PASS: + variant = 'success' + else: + variant = 'danger' + + notification = f'Test Run for Test Case ' \ + f'{self.mapping.test_case.title} as part of the sw component ' \ + f'{self.db_test_run.api.api}, library {self.db_test_run.api.library} ' \ + f'completed with: {self.test_result.upper()}' + notifications = NotificationModel(self.db_test_run.api, + variant, + f'Test Run for {self.db_test_run.api.api} {self.test_result.upper()}', + notification, + '', + f'/mapping/{self.db_test_run.api.id}') + self.dbi.session.add(notifications) + self.dbi.session.commit() + + def publish(self): + """ + Update the database with the current version of the TestRunModel instance + """ + self.dbi.session.add(self.db_test_run) + self.dbi.session.commit() + + def run(self): + # Test Run Plugin + try: + if self.db_test_run.test_run_config.plugin in self.test_run_plugin_models: + self.runner_plugin = self.test_run_plugin_models[ + self.db_test_run.test_run_config.plugin](runner=self, + currentdir=currentdir) + self.runner_plugin.validate() + self.runner_plugin.run() + self.runner_plugin.cleanup() + self.publish() + else: + reason = "\nERROR: The selected plugin is not supported yet" + print(reason) + self.db_test_run.status = "error" + self.db_test_run.log += reason + self.publish() + sys.exit(5) + except Exception as e: + self.db_test_run.status = "error" + self.db_test_run.log += f"\n{e}" + self.publish() + sys.exit(6) + + +if __name__ == '__main__': + """ + This file is called by the api.py via a terminal and + require as argument an id of the TestRun table + """ + parser = argparse.ArgumentParser() + + parser.add_argument("--id", type=str, help="TODO") + args = parser.parse_args() + + tr = TestRunner(id=args.id) + tr.run() + tr.notify() diff --git a/api/testrun_base.py b/api/testrun_base.py new file mode 100644 index 0000000..19c49c8 --- /dev/null +++ b/api/testrun_base.py @@ -0,0 +1,59 @@ +#! /bin/python3 +import datetime + + +class TestRunnerBasePlugin(): + + """ + Error numbers: + 7: validation issue + 8: execution issue + 9: monitor issue + """ + config = None + currentdir = None + execution_result = None + log = '' + runner = None + test_result = None + test_status = None + test_report = None + + def __init__(self, runner=None, currentdir=None, *args, **kwargs): + self.runner = runner + self.config = runner.config + self.currentdir = currentdir + self.test_status = runner.STATUS_CREATED + + def append_log(self, _log): + self.log += f"\n\n----------> {self.timestamp()}" + self.log += f"\n{_log.strip()}" + self.log += "\n<------------" + + def cleanup(self): + pass + + def get_result(self): + pass + + def run(self): + self.test_status = self.runner.STATUS_RUNNING + self.status_update() + + def timestamp(self): + TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S' + return f"{datetime.datetime.utcnow().strftime(TIMESTAMP_FORMAT)} UTC" + + def status_update(self): + """ + propagate log, report, result, status to the db instance inside the runner + and update the database using the TestRunner publish method + """ + self.runner.db_test_run.log = self.log + self.runner.db_test_run.report = self.test_report + self.runner.db_test_run.result = self.test_result + self.runner.db_test_run.status = self.test_status + self.runner.publish() + + def validate(self): + pass diff --git a/api/testrun_github_actions.py b/api/testrun_github_actions.py new file mode 100644 index 0000000..45a2deb --- /dev/null +++ b/api/testrun_github_actions.py @@ -0,0 +1,262 @@ +#! /bin/python3 +import sys +import time + +import requests +from testrun_base import TestRunnerBasePlugin + + +class TestRunnerGithubActionsPlugin(TestRunnerBasePlugin): + + # To get the dispatched request ID we need to implement an input + # in the workflow file as described at + # https://github.com/orgs/community/discussions/9752 + # + # Once the ID will be returned by the API we can change the current + # implementation, reducing the configuration work for BASIL users + + # Constants + config_mandatory_fields = ["private_token", "url", ] + HTTP_REQUEST_TIMEOUT = 30 + WAIT_INTERVAL = 60 * 1 # seconds + SLEEP_DISPATCH = 30 # seconds + + # Variables + conclusion_map_result = {} + prepare_log = "PREPARATION\n" + execution_log = "EXECUTION\n" + local_status = None + + api_base_url = "" + owner = "" + repo = "" + uuid = "" + workflow_id = "" + valid_job = False + + headers = { + "Accept": "application/vnd.github+json", + "Authorization": "Bearer ", + "X-GitHub-Api-Version": "2022-11-28" + } + + def __init__(self, runner=None, *args, **kwargs): + super().__init__(runner=runner, *args, **kwargs) + + self.conclusion_map_result = {"success": self.runner.RESULT_PASS, + "failure": self.runner.RESULT_FAIL, + "neutral": self.runner.RESULT_FAIL, + "cancelled": self.runner.RESULT_FAIL, + "skipped": self.runner.RESULT_FAIL, + "timed_out": self.runner.RESULT_FAIL, + "action_required": self.runner.RESULT_FAIL + } + optional_fields = ["job"] + + self.headers["Authorization"] += self.config["private_token"] + + # Log optional fields + for f in optional_fields: + if f in self.config.keys(): + if self.config[f]: + self.prepare_log += f"{f}: {self.config[f]}\n" + + self.append_log(self.prepare_log) + self.status_update() + + def get_result(self): + """ + Jobs are monitored using the jobs_url endpoint of github + at that endpoint jobs are visible only when they start. + So, if user specify a job name, and the job is not the first of the pipeline, + you cannot see it at first iterations and need to wait without raising + exceptions. + An exceptions can be raised if the job is not in the endpoint when the + overall workflows end. + + If the job name is not populated, we cannot iterate over all jobs because + those are incrementally added to the jobs_url. + We have to rely on the workflow conclusion field. + """ + + completed = False + overall_result = "" + + self.execution_log += f"job defined: {str(self.valid_job)}\n" + self.append_log(self.execution_log) + self.execution_log = "" + + workflow_url = f"{self.api_base_url}/runs?branch={self.config['git_repo_ref']}" \ + f"&workflow_id={self.workflow_id}" \ + f"&event=workflow_dispatch" + self.test_report = workflow_url + + iteration = 1 + while not completed: + try: + # Update the job status and log on each iteration + self.execution_log = "" + workflow_response = requests.get(url=workflow_url, headers=self.headers) + if workflow_response.status_code != 200: + print(f"ERROR: Unable to read workflow runs. Status Code {workflow_response.status_code}") + sys.exit(9) + + if "workflow_runs" not in workflow_response.json().keys(): + print("ERROR: workflow_runs is not in the response") + sys.exit(9) + + if not workflow_response.json()["workflow_runs"]: + print("ERROR: workflow_runs is empty") + sys.exit(9) + + workflow_runs = workflow_response.json()["workflow_runs"] + workflow_runs = [x for x in workflow_runs if self.uuid in x["name"]] + + workflow_run = workflow_runs[0] + self.execution_log += f"workflow run id: {workflow_run['id']}\n" + self.execution_log += f"workflow run status: {workflow_run['status']}\n" + self.execution_log += f"workflow run conclusion: {workflow_run['conclusion']}\n" + self.execution_log += f"workflow run jobs url: {workflow_run['jobs_url']}\n" + + if not self.valid_job: + if workflow_run["conclusion"]: + overall_result = self.conclusion_map_result[workflow_run["conclusion"]] + completed = True + else: + jobs_url = workflow_run["jobs_url"] + + jobs_response = requests.get(url=jobs_url) + if jobs_response.status_code != 200: + print(f"ERROR: Unable to read jobs. Status Code {jobs_response.status_code}") + sys.exit(9) + + if "jobs" not in jobs_response.json().keys(): + print("ERROR: jobs is not in the response") + sys.exit(9) + + if not jobs_response.json()["jobs"]: + print("ERROR: Job list is empty") + sys.exit(9) + + jobs = jobs_response.json()["jobs"] + self.execution_log += f"jobs: {' - '.join([x['name'] for x in jobs])}\n" + job_exists = False + + for job in jobs: + if job["name"] == self.config["job"]: + self.execution_log += f"target job {self.config['job']} status: {job['status']}\n" + job_exists = True + if job["status"] == "completed": + completed = True + overall_result = self.conclusion_map_result[job["conclusion"]] + break + + if not job_exists: + if workflow_run['conclusion']: + print("ERROR: Selected job is not part of the workflow") + sys.exit(9) + + if not completed: + self.execution_log += f"Not completed yet at iteration {iteration}\n" + iteration += 1 + time.sleep(self.WAIT_INTERVAL) + else: + self.execution_log += f"Completed at iteration {iteration}\n" + self.execution_log += f"Overall result: {overall_result}\n" + + self.append_log(self.execution_log) + self.execution_log = "" + self.status_update() # Update the log + + except Exception as e: + print(f"Exception: {e}") + + self.test_status = self.runner.STATUS_COMPLETED + self.test_result = overall_result + self.status_update() + + def cleanup(self): + pass + + def run(self): + super().run() + + data = {"ref": self.config["git_repo_ref"], + "inputs": self.config["env"]} + + # Add and override config inputs + if "inputs" in self.config.keys(): + for k, v in self.config["inputs"].items(): + data["inputs"][k] = v + + if "uuid" not in data["inputs"].keys(): + data["inputs"]["uuid"] = self.runner.config["uid"] + self.uuid = self.runner.config["uid"] + else: + self.uuid = data["inputs"]["uuid"] + + self.append_log(f"CI trigger payload: {data}") + + trigger_url = f"{self.api_base_url}/workflows/{self.workflow_id}/dispatches" + response = requests.post(url=trigger_url, + json=data, + headers=self.headers, + timeout=self.HTTP_REQUEST_TIMEOUT) + + if response.status_code == 204: + self.test_status = self.runner.STATUS_RUNNING + self.status_update() + time.sleep(self.SLEEP_DISPATCH) + self.get_result() + else: + self.test_status = self.runner.STATUS_ERROR + self.test_result = self.runner.RESULT_FAIL + self.status_update() + + def validate(self): + # Validate mandatory fields + for f in self.config_mandatory_fields: + if f not in self.config.keys(): + self.prepare_log += f"ERROR: Wrong configuration. Miss mandatory field {f}\n" + print(f"ERROR: Wrong configuration. Miss mandatory field {f}\n") + self.append_log(self.prepare_log) + self.status_update() + sys.exit(7) + else: + if not self.config[f]: + self.prepare_log += f"ERROR: Wrong configuration. Miss mandatory field {f}\n" + print(f"ERROR: Wrong configuration. Miss mandatory field {f}\n") + self.append_log(self.prepare_log) + self.status_update() + sys.exit(7) + + # Expected url format examples: + # - https://github.com/elisa-tech/BASIL + # - https://github.com/elisa-tech/BASIL/ + # - https://github.com/elisa-tech/BASIL.git + # - https://github.com/elisa-tech/BASIL.git/ + if self.config['url'].endswith("/"): + self.config['url'] = self.config['url'][:-1] + if self.config['url'].endswith(".git"): + self.config['url'] = self.config['url'][:-4] + + if 'workflow_id' in self.config.keys(): + if self.config['workflow_id']: + self.workflow_id = self.config['workflow_id'] + if not self.workflow_id: + print("ERROR: Github workflow_id is not valid") + sys.exit(7) + + url_split = self.config['url'].split('/') + if len(url_split) < 2: + print("ERROR: Github repository url is not valid") + sys.exit(7) + + self.owner = url_split[-2] + self.repo = url_split[-1] + self.api_base_url = f"https://api.github.com/repos/{self.owner}/{self.repo}/actions" + + if "job" in self.config.keys(): + if isinstance(self.config["job"], str): + if len(self.config["job"].strip()): + self.valid_job = True diff --git a/api/testrun_gitlab_ci.py b/api/testrun_gitlab_ci.py new file mode 100644 index 0000000..ffa4680 --- /dev/null +++ b/api/testrun_gitlab_ci.py @@ -0,0 +1,231 @@ +#! /bin/python3 +import json +import sys +import time + +import gitlab +import requests +from testrun_base import TestRunnerBasePlugin + + +class TestRunnerGitlabCIPlugin(TestRunnerBasePlugin): + + # Constants + config_mandatory_fields = ["private_token", "project_id", "trigger_token", "url"] + HTTP_REQUEST_TIMEOUT = 30 + WAIT_INTERVAL = 60 * 1 # seconds + + # Variables + status_map_result = {} + job_id = None + pipeline_id = None + project_pipeline_url = None + project_job_url = None + prepare_log = "PREPARATION\n" + execution_log = "EXECUTION\n" + local_status = None + + valid_job = False + valid_stage = False + + def __init__(self, runner=None, *args, **kwargs): + super().__init__(runner=runner, *args, **kwargs) + + if self.config['url'].endswith("/"): + self.config['url'] = self.config['url'][:-1] + self.project_pipeline_url = f"{self.config['url']}/api/v4/projects/" \ + f"{self.config['project_id']}/trigger/pipeline" + + self.status_map_result = {"failed": self.runner.RESULT_FAIL, + "warning": None, + "pending": None, + "running": None, + "manual": None, + "scheduled": None, + "canceled": self.runner.RESULT_FAIL, + "success": self.runner.RESULT_PASS, + "skipped": self.runner.RESULT_FAIL, + "created": None, } + + optional_fields = ["stage", "job"] + + # Log optional fields + for f in optional_fields: + if f in self.config.keys(): + if self.config[f]: + self.prepare_log += f"{f}: {self.config[f]}\n" + + self.prepare_log += f"pipeline_url: {self.project_pipeline_url}\n" + self.append_log(self.prepare_log) + self.status_update() + + def connect(self): + try: + gl = gitlab.Gitlab(url=self.config["url"], + private_token=self.config["private_token"]) + gl.auth() + project = gl.projects.get(id=self.config["project_id"]) + except Exception as e: + print(f"ERROR: Unable to connect to gitlab instance {e}") + sys.exit(9) + return project + + def log_pipeline_job(self, pipeline_job): + self.execution_log += f"\n-> Job {pipeline_job.id} {pipeline_job.name}: `{pipeline_job.status}`" + + def get_result(self): + """ + if the job name is not populated, all the jobs of the stage have to pass + if the stage name is not populated all the pipeline jobs have to pass + """ + + completed = False + self.execution_log += f"stage defined: {str(self.valid_stage)}\n" + self.execution_log += f"job defined: {str(self.valid_job)}\n" + self.append_log(self.execution_log) + + project = self.connect() + + iteration = 1 + while not completed: + try: + # Update the job status and log on each iteration + + if self.valid_stage: + if self.valid_job: + pipeline_job = None + if self.job_id: + # Already know the pipeline job id + pipeline_job = project.jobs.get(self.job_id) + self.local_status = pipeline_job.status + self.log_pipeline_job(pipeline_job) + else: + # First iteration, don't know the pipeline job id + pipeline = project.pipelines.get(self.pipeline_id) + pipeline_jobs = pipeline.jobs.list() + for pjob in pipeline_jobs: + if pjob.__dict__["_attrs"]["stage"] == self.config["stage"]: + if pjob.__dict__["_attrs"]["name"] == self.config["job"]: + pipeline_job = pjob + self.job_id = pipeline_job.id + self.log_pipeline_job(pipeline_job) + self.local_status = pipeline_job.status + self.test_report = pjob.__dict__["_attrs"]["web_url"] + break + if self.status_map_result.get(pipeline_job.status): + completed = True + else: + # All jobs of the stage have to pass + pipeline = project.pipelines.get(self.pipeline_id) + self.test_report = pipeline.__dict__["_attrs"]["web_url"] + pipeline_jobs = pipeline.jobs.list() + completed = True + self.local_status = "wait-all" + for pipeline_job in pipeline_jobs: + if pipeline_job.__dict__["_attrs"]["stage"] == self.config["stage"]: + + self.log_pipeline_job(pipeline_job) + + if self.status_map_result.get(pipeline_job.status) == self.runner.RESULT_FAIL: + self.local_status = pipeline_job.status + completed = True + break + elif not self.status_map_result.get(pipeline_job.status): + # A job is still running + self.local_status = None + completed = False + else: + # All jobs of all the stages have to pass + pipeline = project.pipelines.get(self.pipeline_id) + self.test_report = pipeline.__dict__["_attrs"]["web_url"] + pipeline_jobs = pipeline.jobs.list() + completed = True + self.local_status = "wait-all" + for pipeline_job in pipeline_jobs: + self.log_pipeline_job(pipeline_job) + + if self.status_map_result.get(pipeline_job.status) == self.runner.RESULT_FAIL: + self.local_status = pipeline_job.status + completed = True + break + elif not self.status_map_result.get(pipeline_job.status): + # A job is still running + self.local_status = None + completed = False + + if not completed: + self.execution_log = f"Not completed yet at iteration {iteration}" + iteration += 1 + time.sleep(self.WAIT_INTERVAL) + + self.append_log(self.execution_log) + self.execution_log = "" + self.status_update() # Update the log + + except Exception as e: + print(f"ERROR: Unable to connect to gitlab: {e}") + + self.test_status = self.local_status + if self.test_status == 'wait-all': + self.test_status = 'success' + + self.test_result = self.status_map_result.get(self.test_status) + self.status_update() + + def cleanup(self): + pass + + def run(self): + + super().run() + data = {"token": self.config["trigger_token"], + "ref": self.config["git_repo_ref"], + "variables": self.config["env"]} + + # Hide token in the log + data_log = data.copy() + data_log["token"] = "***" + self.append_log(f"CI trigger payload: {data_log}") + + response = requests.post(self.project_pipeline_url, + json=data, + timeout=self.HTTP_REQUEST_TIMEOUT) + + response_dict = json.loads(response.text) + + if response.status_code == 201 and "id" in response_dict.keys(): + self.pipeline_id = str(response_dict["id"]) + self.test_status = self.runner.STATUS_RUNNING + self.status_update() + self.get_result() + else: + self.test_status = self.runner.STATUS_ERROR + self.test_result = self.runner.RESULT_FAIL + self.status_update() + + def validate(self): + # Validate mandatory fields + for f in self.config_mandatory_fields: + if f not in self.config.keys(): + self.prepare_log += f"ERROR: Wrong configuration. Miss mandatory field {f}\n" + print(f"ERROR: Wrong configuration. Miss mandatory field {f}\n") + self.append_log(self.prepare_log) + self.status_update() + sys.exit(7) + else: + if not self.config[f]: + self.prepare_log += f"ERROR: Wrong configuration. Miss mandatory field {f}\n" + print(f"ERROR: Wrong configuration. Miss mandatory field {f}\n") + self.append_log(self.prepare_log) + self.status_update() + sys.exit(7) + + if "stage" in self.config.keys(): + if isinstance(self.config["stage"], str): + if len(self.config["stage"].strip()): + self.valid_stage = True + + if "job" in self.config.keys(): + if isinstance(self.config["job"], str): + if len(self.config["job"].strip()): + self.valid_job = True diff --git a/api/testrun_plugin_presets.yaml b/api/testrun_plugin_presets.yaml new file mode 100644 index 0000000..524d016 --- /dev/null +++ b/api/testrun_plugin_presets.yaml @@ -0,0 +1,32 @@ +--- +#gitlab_ci: +# - name: gitlab-ci-project-name +# url: https://www.gitlab.com/ +# git_repo_ref: main +# project_id: 1000000 +# trigger_token: aaaa-bbbb-cccc +# private_token: xxx-yyy-zzz +# stage: test-stage +# job: unit-test-job +# env: +# var1: value1 +# var2: value2 +#github_actions: +# - name: basil-test +# url: https://www.github.com/elisa-tech/BASIL +# git_repo_ref: main +# private_token: xxx-yyy-zzz +# workflow_id: build.yaml +# job: test +# inputs: +# uuid: XXX-YYY-ZZZ +#tmt: +# - name: ltp_syscall +# git_repo_ref: main +# provision_type: container +# context: +# var1: value1 +# var2: value2 +# env: +# var1: value1 +# var2: value2 diff --git a/api/testrun_tmt.py b/api/testrun_tmt.py new file mode 100644 index 0000000..c4cff9f --- /dev/null +++ b/api/testrun_tmt.py @@ -0,0 +1,126 @@ +#! /bin/python3 +import datetime +import os +import subprocess +import yaml +from testrun_base import TestRunnerBasePlugin + + +class TestRunnerTmtPlugin(TestRunnerBasePlugin): + + # Constants + plan = 'tmt-plan' + root_dir = os.getenv('BASIL_TMT_WORKDIR_ROOT', '/var/tmp/tmt') # Same as TMT_LOGS_PATH defined in api.py + + def __init__(self, runner=None, *args, **kwargs): + super().__init__(runner=runner, *args, **kwargs) + + if not os.path.exists(self.root_dir): + os.mkdir(self.root_dir) + + if len(self.config["context"].keys()) > 0: + for k, v in self.config["context"].items(): + if len(v): + self.config["context_str"] += f"-c {k}='\"{v}\"' " + + if len(self.config["env"].keys()) > 0: + for k, v in self.config["env"].items(): + if len(v): + self.config["env_str"] += f"-e {k}='\"{v}\"' " + + def validate(self): + # Check that all the required config are set + pass + + def run(self): + super().run() + self.validate() + + provision_str = 'container --stop-time 30' + root_dir_var_str = '' + + if self.config["provision_type"] == 'connect': + if self.config["provision_guest"] != '' and self.config["ssh_key_id"] != '': + provision_str = f'connect --guest {self.config["provision_guest"]} ' + provision_str += f'--key {self.runner.ssh_keys_dir}/{self.config["ssh_key_id"]}' + if self.config["provision_guest_port"] != '': + provision_str += f' --port {self.config["provision_guest_port"]}' + + if self.root_dir != '': + root_dir_var_str = f'export TMT_WORKDIR_ROOT={self.root_dir}' + + # skip prepare that can generate package manager error on some systems + cmd = f'{root_dir_var_str} && cd {self.currentdir} &&' \ + f' tmt {self.config["context_str"]} run -vvv -a --id {self.runner.db_test_run.uid}' \ + f' {self.config["env_str"]}' \ + f' provision --how {provision_str} plan --name {self.plan}' + cmd = cmd.replace(' ', ' ') + + process = subprocess.Popen(cmd, + shell=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + self.log += f'TEST RUN {self.runner.db_test_run.uid}\n' + self.log += '======================================\n' + self.log += f'STARTED AT {datetime.datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S")}\n' + self.log += '--------------------------------------\n' + self.status_update() + + out, err = process.communicate() + execution_return_code = process.returncode + self.execution_result = self.runner.RESULT_FAIL + + if execution_return_code in [0, 1]: + self.execution_result = self.runner.RESULT_PASS + + self.log += f'out: {out.decode("utf-8")}\n' + self.log += '--------------------------------------\n' + self.log += f'err: {err.decode("utf-8")}\n' + self.log += '\n\n' + self.log += '--------------------------------------\n' + self.log += f'EXECUTION RESULT: {self.execution_result}\n' + self.log += '======================================\n' + self.status_update() + + # Test Result Evaluation + if self.execution_result == self.runner.RESULT_PASS: + results_file_path = f'{self.root_dir}/{self.runner.db_test_run.uid}/api/{self.plan}/execute/results.yaml' + report_file_path = f'{self.root_dir}/{self.runner.db_test_run.uid}/api/{self.plan}/report/html-report' \ + f'/index.html' + + if not os.path.exists(results_file_path): + self.test_result = self.runner.RESULT_FAIL + else: + with open(results_file_path, 'r') as file: + result_yaml = yaml.safe_load(file) + if isinstance(result_yaml, list): + if 'result' in result_yaml[0].keys(): + self.test_result = result_yaml[0]['result'] + if 'log' in result_yaml[0].keys(): + if isinstance(result_yaml[0]['log'], list): + log_file = result_yaml[0]['log'][0] + if os.path.exists(log_file): + f = open(log_file, 'r') + self.log += 'Log File Content\n' + self.log += '--------------------------------------\n' + self.log += f.read() + self.log += '======================================\n' + f.close() + + if os.path.exists(report_file_path): + self.test_report = report_file_path + else: + self.test_result = 'not executed' + + self.status_update() + + def cleanup(self): + cmd = "podman container prune -f" + process = subprocess.Popen(cmd, + shell=True, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + process.communicate() diff --git a/api/tmttestrun.py b/api/tmttestrun.py deleted file mode 100644 index 17ee42d..0000000 --- a/api/tmttestrun.py +++ /dev/null @@ -1,253 +0,0 @@ -#! /bin/python3 -import argparse -import os -import subprocess -import sys -import yaml - -currentdir = os.path.dirname(os.path.realpath(__file__)) -sys.path.insert(1, os.path.dirname(currentdir)) - -from db.models.test_specification_test_case import TestSpecificationTestCaseModel -from db.models.test_run import TestRunModel -from db.models.sw_requirement_test_case import SwRequirementTestCaseModel -from db.models.notification import NotificationModel -from db.models.api_test_case import ApiTestCaseModel -from db import db_orm -from sqlalchemy.orm.exc import NoResultFound - - -class TmtTestRunner(): - - RESULT_FAIL = 'fail' - RESULT_PASS = 'pass' - - context = {} - env = {} - id = None - plan = 'tmt-plan' - execution_result = '' - execution_return_code = -1 - test_result = '' - test_report = '' - root_dir = os.getenv('BASIL_TMT_WORKDIR_ROOT', '/var/tmp/tmt') # Same as TMT_LOGS_PATH defined in api.py - ssh_keys_dir = os.path.join(currentdir, 'ssh_keys') # Same as SSH_KEYS_PATH defined in api.py - dbi = None - db_test_run = None - db_test_case = None - DB_NAME = 'basil.db' - - def __del__(self): - if self.dbi: - self.dbi.engine.dispose() - - def __init__(self, id): - self.id = id - self.dbi = db_orm.DbInterface(self.DB_NAME) - - if not os.path.exists(self.root_dir): - os.mkdir(self.root_dir) - - # Test Run - try: - self.db_test_run = self.dbi.session.query(TestRunModel).filter( - TestRunModel.id == self.id - ).one() - except NoResultFound: - print("No Test Run") - sys.exit(1) - - # Test Case - mapping_model = None - if self.db_test_run.mapping_to == ApiTestCaseModel.__tablename__: - mapping_model = ApiTestCaseModel - elif self.db_test_run.mapping_to == SwRequirementTestCaseModel.__tablename__: - mapping_model = SwRequirementTestCaseModel - elif self.db_test_run.mapping_to == TestSpecificationTestCaseModel.__tablename__: - mapping_model = TestSpecificationTestCaseModel - else: - # Update db with the error info - sys.exit(2) - - try: - mapping = self.dbi.session.query(mapping_model).filter( - mapping_model.id == self.db_test_run.mapping_id - ).one() - except BaseException: - # Update db with the error info - sys.exit(3) - - env_str = f'basil_test_case_id={mapping.test_case.id};' - env_str += f'basil_test_case_title={mapping.test_case.title};' - env_str += f'basil_api_api={mapping.api.api};' - env_str += f'basil_api_library={mapping.api.library};' - env_str += f'basil_api_library_version={mapping.api.library_version};' - env_str += f'basil_test_case_mapping_table={self.db_test_run.mapping_to};' - env_str += f'basil_test_case_mapping_id={self.db_test_run.mapping_id};' - env_str += f'basil_test_relative_path={mapping.test_case.relative_path};' - env_str += f'basil_test_repo_path={mapping.test_case.repository};' - env_str += f'basil_test_repo_url={mapping.test_case.repository};' - env_str += f'basil_test_repo_ref={self.db_test_run.test_run_config.git_repo_ref};' - env_str += f'basil_test_run_id={self.db_test_run.uid};' - env_str += f'basil_test_run_title={self.db_test_run.title};' - env_str += f'basil_test_run_config_id={self.db_test_run.test_run_config.id};' - env_str += f'basil_test_run_config_title={self.db_test_run.test_run_config.title};' - env_str += f'basil_user_email={self.db_test_run.created_by.email};' - env_str += self.db_test_run.test_run_config.environment_vars - self.env = self.unpack_kv_str(env_str) - self.context = self.unpack_kv_str(self.db_test_run.test_run_config.context_vars) - - def unpack_kv_str(self, _string): - # return a dict froma string formatted as - # key1=value1;key2=value2... - PAIRS_DIV = ';' - KV_DIV = '=' - ret = {} - pairs = _string.split(PAIRS_DIV) - for pair in pairs: - if KV_DIV in pair: - if pair.count(KV_DIV) == 1: - ret[pair.split(KV_DIV)[0].strip()] = pair.split(KV_DIV)[1].strip() - return ret - - def notify(self): - # Notification - if self.db_test_run.mapping_to == ApiTestCaseModel.__tablename__: - mapping_model = ApiTestCaseModel - elif self.db_test_run.mapping_to == SwRequirementTestCaseModel.__tablename__: - mapping_model = SwRequirementTestCaseModel - elif self.db_test_run.mapping_to == TestSpecificationTestCaseModel.__tablename__: - mapping_model = TestSpecificationTestCaseModel - else: - return False - - try: - mapping = self.dbi.session.query(mapping_model).filter( - mapping_model.id == self.db_test_run.mapping_id - ).one() - except BaseException: - return False - - if self.test_result == 'pass': - variant = 'success' - else: - variant = 'danger' - - notification = f'Test Run for Test Case ' \ - f'{mapping.test_case.title} as part of the sw component ' \ - f'{self.db_test_run.api.api}, library {self.db_test_run.api.library} ' \ - f'completed with: {self.test_result.upper()}' - notifications = NotificationModel(self.db_test_run.api, - variant, - f'Test Run for {self.db_test_run.api.api} {self.test_result.upper()}', - notification, - '', - f'/mapping/{self.db_test_run.api.id}') - self.dbi.session.add(notifications) - self.dbi.session.commit() - - def publish(self): - if self.execution_result == self.RESULT_PASS: - results_file_path = f'{self.root_dir}/{self.db_test_run.uid}/api/{self.plan}/execute/results.yaml' - report_file_path = f'{self.root_dir}/{self.db_test_run.uid}/api/{self.plan}/report/html-report/index.html' - - if not os.path.exists(results_file_path): - self.execution_result = self.RESULT_FAIL - self.test_result = self.RESULT_FAIL - else: - with open(results_file_path, 'r') as file: - result_yaml = yaml.safe_load(file) - if isinstance(result_yaml, list): - if 'result' in result_yaml[0].keys(): - self.test_result = result_yaml[0]['result'] - if 'log' in result_yaml[0].keys(): - if isinstance(result_yaml[0]['log'], list): - log_file = result_yaml[0]['log'][0] - if os.path.exists(log_file): - f = open(log_file, 'r') - self.log = f.read() - f.close() - - if os.path.exists(report_file_path): - self.test_report = report_file_path - - else: - self.test_result = 'not executed' - - # Update db - try: - db_test_run = self.dbi.session.query(TestRunModel).filter( - TestRunModel.id == self.id - ).one() - except NoResultFound: - print("No Test Run") - sys.exit(1) - - db_test_run.status = 'done' - db_test_run.result = self.test_result - db_test_run.log = self.log - self.dbi.session.add(db_test_run) - self.dbi.session.commit() - - def run(self): - context_options_str = '' - env_options_str = '' - provision_str = 'container --stop-time 30' - root_dir_var_str = '' - - if len(self.context.keys()) > 0: - for k, v in self.context.items(): - if len(v): - context_options_str += f"-c {k}='\"{v}\"' " - - if len(self.env.keys()) > 0: - for k, v in self.env.items(): - if len(v): - env_options_str += f"-e {k}='\"{v}\"' " - - if self.db_test_run.test_run_config.provision_type == 'connect': - if self.db_test_run.test_run_config.provision_guest != '' and \ - self.db_test_run.test_run_config.ssh_key_id != '': - provision_str = f'connect --guest {self.db_test_run.test_run_config.provision_guest} ' - provision_str += f'--key {self.ssh_keys_dir}/{self.db_test_run.test_run_config.ssh_key_id}' - if self.db_test_run.test_run_config.provision_guest_port != '': - provision_str += f' --port {self.db_test_run.test_run_config.provision_guest_port}' - - if self.root_dir != '': - root_dir_var_str = f'export TMT_WORKDIR_ROOT={self.root_dir}' - - # skip prepare that can generate package manager error on some systems - cmd = f'{root_dir_var_str} && cd {currentdir} && ' \ - f'tmt {context_options_str} run -vvv -a --id {self.db_test_run.uid} {env_options_str} ' \ - f'provision --how {provision_str} plan --name {self.plan}' - cmd = cmd.replace(' ', ' ') - - process = subprocess.Popen(cmd, - shell=True, - stdin=subprocess.PIPE, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - - out, err = process.communicate() - self.execution_return_code = process.returncode - if self.execution_return_code in [0, 1]: - self.execution_result = self.RESULT_PASS - else: - self.execution_result = self.RESULT_FAIL - - self.log = f'out: {out.decode("utf-8")}' - self.log += '\n--------------------------' - self.log += f'\nerr: {err.decode("utf-8")}' - - self.publish() - self.notify() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - - parser.add_argument("--id", type=str, help="TODO") - args = parser.parse_args() - - tr = TmtTestRunner(id=args.id) - tr.run() diff --git a/app/src/app/Constants/constants.tsx b/app/src/app/Constants/constants.tsx index 7e2a675..3ca2ad4 100644 --- a/app/src/app/Constants/constants.tsx +++ b/app/src/app/Constants/constants.tsx @@ -37,6 +37,13 @@ export const provision_type = [ { value: 'connect', label: 'SSH', disabled: false } ] +export const test_run_plugins = [ + { value: 'tmt', label: 'tmt', disabled: false }, + { value: 'github_actions', label: 'github actions', disabled: false }, + { value: 'gitlab_ci', label: 'gitlab ci', disabled: false }, + { value: 'kernel_ci', label: 'KernelCI', disabled: false } +] + export const spdx_relations = [ { value: '', label: 'Select a value', disabled: false }, { value: 'AFFECTS', label: 'AFFECTS', disabled: false }, @@ -118,6 +125,30 @@ export const capitalizeFirstWithoutHashes = (_string: string) => { return tmp.charAt(0).toUpperCase() + tmp.slice(1) } +export const extend_config_with_plugin_vars = (config) => { + if (Object.keys(config).indexOf('plugin_vars') < 0) { + return config + } + let vars_str = config['plugin_vars'] + let kv = vars_str.split(';') + let tmp + for (let i = 0; i < kv.length; i++) { + tmp = kv[i].split('=') + if (tmp.length == 2) { + config[tmp[0].trim()] = tmp[1].trim() + } + } + return config +} + +export const get_config_plugin_var = (_config, _varname) => { + let tmp_config = extend_config_with_plugin_vars(_config) + if (Object.keys(tmp_config).indexOf(_varname) > -1) { + return tmp_config[_varname] + } + return '' +} + export const tcFormEmpty = { coverage: 0, description: '', diff --git a/app/src/app/Mapping/Form/TestRunBugForm.tsx b/app/src/app/Mapping/Form/TestRunBugForm.tsx index 6355a7d..a6f1f37 100644 --- a/app/src/app/Mapping/Form/TestRunBugForm.tsx +++ b/app/src/app/Mapping/Form/TestRunBugForm.tsx @@ -18,20 +18,26 @@ export const TestRunBugForm: React.FunctionComponent = ({ }: TestRunBugFormProps) => { const auth = useAuth() const [bugsValue, setBugsValue] = React.useState(modalTestRun.bugs) - const [noteValue, setNoteValue] = React.useState(modalTestRun.note) + const [fixesValue, setFixesValue] = React.useState(modalTestRun.fixes) + const [notesValue, setNotesValue] = React.useState(modalTestRun.notes) const [messageValue, setMessageValue] = React.useState('') const handleBugsValueChange = (value: string) => { setBugsValue(value) } - const handleNoteValueChange = (value: string) => { - setNoteValue(value) + const handleFixesValueChange = (value: string) => { + setFixesValue(value) + } + + const handleNotesValueChange = (value: string) => { + setNotesValue(value) } React.useEffect(() => { setBugsValue(modalTestRun.bugs) - setNoteValue(modalTestRun.note) + setFixesValue(modalTestRun.fixes) + setNotesValue(modalTestRun.notes) }, [modalTestRun]) const handleSubmit = () => { @@ -51,7 +57,8 @@ export const TestRunBugForm: React.FunctionComponent = ({ 'api-id': api.id, id: modalTestRun.id, bugs: bugsValue.trim(), - note: noteValue.trim(), + fixes: fixesValue.trim(), + notes: notesValue.trim(), 'user-id': auth.userId, token: auth.token, mapped_to_type: mapping_to, @@ -80,26 +87,35 @@ export const TestRunBugForm: React.FunctionComponent = ({ return (
- + handleBugsValueChange(value)} /> - + + handleFixesValueChange(value)} + /> + +