diff --git a/conf/docker.yaml.template b/conf/docker.yaml.template index 81fc6a84dd0..4176b38ee8a 100644 --- a/conf/docker.yaml.template +++ b/conf/docker.yaml.template @@ -9,3 +9,23 @@ DOCKER: PRIVATE_REGISTRY_USERNAME: # Private docker registry password PRIVATE_REGISTRY_PASSWORD: + # Image Pass Registry + IMAGE_REGISTRY: + # image repository URL + URL: + # Pull a non-namespace image using the image pass registry proxy + NON_NAMESPACE: + # Proxy for the non-namespace image + PROXY: + # Username for the non-namespace image pass registry proxy + USERNAME: + # Password for the non-namespace image pass registry proxy + PASSWORD: + # Pull a namespace image using the image pass registry proxy + NAMESPACE: + # proxy for the namespace image + PROXY: + # Username for the namespace image pass registry proxy + USERNAME: + # Password for the namespace image pass registry proxy + PASSWORD: diff --git a/pytest_fixtures/component/domain.py b/pytest_fixtures/component/domain.py index c62b45f54d5..57a1c265b30 100644 --- a/pytest_fixtures/component/domain.py +++ b/pytest_fixtures/component/domain.py @@ -1,15 +1,15 @@ # Domain Fixtures -from nailgun import entities import pytest @pytest.fixture(scope='session') def default_domain(session_target_sat, default_smart_proxy): domain_name = session_target_sat.hostname.partition('.')[-1] - dom = entities.Domain().search(query={'search': f'name={domain_name}'})[0] - dom.dns = default_smart_proxy - dom.update(['dns']) - return entities.Domain(id=dom.id).read() + dom = session_target_sat.api.Domain().search(query={'search': f'name={domain_name}'})[0] + if 'dns' in session_target_sat.get_features(): + dom.dns = default_smart_proxy + dom.update(['dns']) + return session_target_sat.api.Domain(id=dom.id).read() @pytest.fixture(scope='module') diff --git a/pytest_fixtures/component/provision_pxe.py b/pytest_fixtures/component/provision_pxe.py index d4f835a60a9..0b800778eb8 100644 --- a/pytest_fixtures/component/provision_pxe.py +++ b/pytest_fixtures/component/provision_pxe.py @@ -231,6 +231,33 @@ def provisioning_host(module_ssh_key_file, pxe_loader): prov_host.blank = getattr(prov_host, 'blank', False) +@pytest.fixture +def provision_multiple_hosts(module_ssh_key_file, pxe_loader, request): + """Fixture to check out two blank VMs""" + vlan_id = settings.provisioning.vlan_id + cd_iso = ( + "" # TODO: Make this an optional fixture parameter (update vm_firmware when adding this) + ) + # Keeping the default value to 2 + count = request.param if request.param is not None else 2 + + with Broker( + workflow="deploy-configure-pxe-provisioning-host-rhv", + host_class=ContentHost, + _count=count, + target_vlan_id=vlan_id, + target_vm_firmware=pxe_loader.vm_firmware, + target_vm_cd_iso=cd_iso, + blank=True, + target_memory='6GiB', + auth=module_ssh_key_file, + ) as hosts: + yield hosts + + for prov_host in hosts: + prov_host.blank = getattr(prov_host, 'blank', False) + + @pytest.fixture def provisioning_hostgroup( module_provisioning_sat, diff --git a/pytest_plugins/auto_vault.py b/pytest_plugins/auto_vault.py index e63fc7f0835..cb9e1f0c10a 100644 --- a/pytest_plugins/auto_vault.py +++ b/pytest_plugins/auto_vault.py @@ -1,5 +1,4 @@ """Plugin enables pytest to notify and update the requirements""" -import subprocess from robottelo.utils.vault import Vault @@ -7,4 +6,4 @@ def pytest_addoption(parser): """Options to allow user to update the requirements""" with Vault() as vclient: - vclient.login(stdout=subprocess.PIPE, stderr=subprocess.PIPE) + vclient.login() diff --git a/pytest_plugins/fixture_collection.py b/pytest_plugins/fixture_collection.py index 934efa5f56d..6f61f2b3360 100644 --- a/pytest_plugins/fixture_collection.py +++ b/pytest_plugins/fixture_collection.py @@ -13,7 +13,7 @@ def pytest_addoption(parser): example: pytest tests/foreman --uses-fixtures target_sat module_target_sat ''' - parser.addoption("--uses-fixtures", nargs='+', help=help_text) + parser.addoption("--uses-fixtures", nargs='?', help=help_text) def pytest_collection_modifyitems(items, config): @@ -22,17 +22,18 @@ def pytest_collection_modifyitems(items, config): return filter_fixtures = config.getvalue('uses_fixtures') + fixtures_list = filter_fixtures.split(',') if ',' in filter_fixtures else [filter_fixtures] selected = [] deselected = [] for item in items: - if set(item.fixturenames).intersection(set(filter_fixtures)): + if set(item.fixturenames).intersection(set(fixtures_list)): selected.append(item) else: deselected.append(item) logger.debug( f'Selected {len(selected)} and deselected {len(deselected)} ' - f'tests based on given fixtures {filter_fixtures} used by tests' + f'tests based on given fixtures {fixtures_list} used by tests' ) config.hook.pytest_deselected(items=deselected) items[:] = selected diff --git a/requirements.txt b/requirements.txt index a0c2d3518b8..aa4168050a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,9 +2,9 @@ betelgeuse==1.10.0 broker[docker]==0.4.1 -cryptography==41.0.4 -deepdiff==6.6.0 -dynaconf[vault]==3.2.3 +cryptography==41.0.5 +deepdiff==6.6.1 +dynaconf[vault]==3.2.4 fauxfactory==3.1.0 jinja2==3.1.2 manifester==0.0.14 @@ -12,10 +12,10 @@ navmazing==1.1.6 productmd==1.37 pyotp==2.9.0 python-box==7.1.1 -pytest==7.4.2 +pytest==7.4.3 pytest-services==2.2.1 -pytest-mock==3.11.1 -pytest-reportportal==5.2.2 +pytest-mock==3.12.0 +pytest-reportportal==5.3.0 pytest-xdist==3.3.1 pytest-ibutsu==2.2.4 PyYAML==6.0.1 @@ -23,7 +23,7 @@ requests==2.31.0 tenacity==8.2.3 testimony==2.3.0 wait-for==1.2.0 -wrapanapi==3.5.18 +wrapanapi==3.6.0 # Get airgun, nailgun and upgrade from 6.13.z git+https://github.com/SatelliteQE/airgun.git@6.13.z#egg=airgun diff --git a/robottelo/cli/sm_packages.py b/robottelo/cli/sm_packages.py index ece44a773e0..d4674279172 100644 --- a/robottelo/cli/sm_packages.py +++ b/robottelo/cli/sm_packages.py @@ -30,6 +30,7 @@ class Packages(Base): def lock(cls, options=None): """Build satellite-maintain packages lock""" cls.command_sub = 'lock' + cls.command_end = None options = options or {} return cls.sm_execute(cls._construct_command(options)) @@ -37,6 +38,7 @@ def lock(cls, options=None): def unlock(cls, options=None): """Build satellite-maintain packages unlock""" cls.command_sub = 'unlock' + cls.command_end = None options = options or {} return cls.sm_execute(cls._construct_command(options)) @@ -44,6 +46,7 @@ def unlock(cls, options=None): def is_locked(cls, options=None): """Build satellite-maintain packages is-locked""" cls.command_sub = 'is-locked' + cls.command_end = None options = options or {} return cls.sm_execute(cls._construct_command(options)) @@ -75,5 +78,6 @@ def update(cls, packages='', options=None): def check_update(cls, options=None): """Build satellite-maintain packages check-update""" cls.command_sub = 'check-update' + cls.command_end = None options = options or {} return cls.sm_execute(cls._construct_command(options)) diff --git a/robottelo/constants/__init__.py b/robottelo/constants/__init__.py index 107fa56f4cf..a51263410c3 100644 --- a/robottelo/constants/__init__.py +++ b/robottelo/constants/__init__.py @@ -287,9 +287,9 @@ class Colored(Box): 'rhsc7': 'Red Hat Satellite Capsule 6.11 (for RHEL 7 Server) (RPMs)', 'rhsc8': 'Red Hat Satellite Capsule 6.13 for RHEL 8 x86_64 (RPMs)', 'rhsc7_iso': 'Red Hat Satellite Capsule 6.4 (for RHEL 7 Server) (ISOs)', - 'rhsclient7': 'Red Hat Satellite Client 6 for RHEL 7 Server RPMs x86_64', - 'rhsclient8': 'Red Hat Satellite Client 6 for RHEL 8 x86_64 RPMs', - 'rhsclient9': 'Red Hat Satellite Client 6 for RHEL 9 x86_64 RPMs', + 'rhsclient7': 'Red Hat Satellite Client 6 (for RHEL 7 Server) (RPMs)', + 'rhsclient8': 'Red Hat Satellite Client 6 for RHEL 8 x86_64 (RPMs)', + 'rhsclient9': 'Red Hat Satellite Client 6 for RHEL 9 x86_64 (RPMs)', 'rhst7': 'Red Hat Satellite Tools 6.9 (for RHEL 7 Server) (RPMs)', 'rhst7_610': 'Red Hat Satellite Tools 6.10 (for RHEL 7 Server) (RPMs)', 'rhst6': 'Red Hat Satellite Tools 6.9 (for RHEL 6 Server) (RPMs)', @@ -406,7 +406,7 @@ class Colored(Box): 'name': ('Red Hat Satellite Client 6 for RHEL 8 x86_64 RPMs'), 'version': '6', 'reposet': REPOSET['rhsclient8'], - 'product': PRDS['rhel'], + 'product': PRDS['rhel8'], 'distro': 'rhel8', 'key': PRODUCT_KEY_SAT_CLIENT, }, @@ -415,7 +415,7 @@ class Colored(Box): 'name': ('Red Hat Satellite Client 6 for RHEL 9 x86_64 RPMs'), 'version': '6', 'reposet': REPOSET['rhsclient9'], - 'product': PRDS['rhel'], + 'product': PRDS['rhel9'], 'distro': 'rhel9', 'key': PRODUCT_KEY_SAT_CLIENT, }, @@ -1729,7 +1729,7 @@ class Colored(Box): VMWARE_CONSTANTS = { 'folder': 'vm', - 'guest_os': 'Red Hat Enterprise Linux 8 (64-bit)', + 'guest_os': 'Red Hat Enterprise Linux 8 (64 bit)', 'scsicontroller': 'LSI Logic Parallel', 'virtualhw_version': 'Default', 'pool': 'Resources', diff --git a/robottelo/utils/vault.py b/robottelo/utils/vault.py index a4b5d48adb4..d447331ac15 100644 --- a/robottelo/utils/vault.py +++ b/robottelo/utils/vault.py @@ -19,25 +19,32 @@ class Vault: def __init__(self, env_file='.env'): self.env_path = robottelo_root_dir.joinpath(env_file) + self.envdata = None + self.vault_enabled = None def setup(self): - self.export_vault_addr() + if self.env_path.exists(): + self.envdata = self.env_path.read_text() + is_enabled = re.findall('^(?:.*\n)*VAULT_ENABLED_FOR_DYNACONF=(.*)', self.envdata) + if is_enabled: + self.vault_enabled = is_enabled[0] + self.export_vault_addr() def teardown(self): - del os.environ['VAULT_ADDR'] + if os.environ.get('VAULT_ADDR') is not None: + del os.environ['VAULT_ADDR'] def export_vault_addr(self): - envdata = self.env_path.read_text() - vaulturl = re.findall('VAULT_URL_FOR_DYNACONF=(.*)', envdata)[0] + vaulturl = re.findall('VAULT_URL_FOR_DYNACONF=(.*)', self.envdata)[0] # Set Vault CLI Env Var os.environ['VAULT_ADDR'] = vaulturl # Dynaconf Vault Env Vars - if re.findall('VAULT_ENABLED_FOR_DYNACONF=(.*)', envdata)[0] == 'true': + if self.vault_enabled and self.vault_enabled in ['True', 'true']: if 'localhost:8200' in vaulturl: raise InvalidVaultURLForOIDC( - f"{vaulturl} doesnt supports OIDC login," + f"{vaulturl} doesn't support OIDC login," "please change url to corp vault in env file!" ) @@ -47,7 +54,7 @@ def exec_vault_command(self, command: str, **kwargs): :param comamnd str: The vault CLI command :param kwargs dict: Arguments to the subprocess run command to customize the run behavior """ - vcommand = subprocess.run(command, shell=True, **kwargs) # capture_output=True + vcommand = subprocess.run(command, shell=True, capture_output=True, **kwargs) if vcommand.returncode != 0: verror = str(vcommand.stderr) if vcommand.returncode == 127: @@ -57,15 +64,19 @@ def exec_vault_command(self, command: str, **kwargs): if 'Error revoking token' in verror: logger.info("Token is alredy revoked!") elif 'Error looking up token' in verror: - logger.warning("Warning! Vault not logged in!") + logger.info("Vault is not logged in!") else: logger.error(f"Error! {verror}") return vcommand def login(self, **kwargs): - if 'VAULT_SECRET_ID_FOR_DYNACONF' not in os.environ: + if ( + self.vault_enabled + and self.vault_enabled in ['True', 'true'] + and 'VAULT_SECRET_ID_FOR_DYNACONF' not in os.environ + ): if self.status(**kwargs).returncode != 0: - logger.warning( + logger.info( "Warning! The browser is about to open for vault OIDC login, " "close the tab once the sign-in is done!" ) @@ -76,29 +87,28 @@ def login(self, **kwargs): self.exec_vault_command(command="vault token renew -i 10h", **kwargs) logger.info("Success! Vault OIDC Logged-In and extended for 10 hours!") # Fetching tokens - token = self.exec_vault_command( - "vault token lookup --format json", capture_output=True - ).stdout + token = self.exec_vault_command("vault token lookup --format json").stdout token = json.loads(str(token.decode('UTF-8')))['data']['id'] # Setting new token in env file - envdata = self.env_path.read_text() - envdata = re.sub( - '.*VAULT_TOKEN_FOR_DYNACONF=.*', f"VAULT_TOKEN_FOR_DYNACONF={token}", envdata + _envdata = re.sub( + '.*VAULT_TOKEN_FOR_DYNACONF=.*', + f"VAULT_TOKEN_FOR_DYNACONF={token}", + self.envdata, ) - self.env_path.write_text(envdata) + self.env_path.write_text(_envdata) logger.info( "Success! New OIDC token added to .env file to access secrets from vault!" ) def logout(self): # Teardown - Setting dymmy token in env file - envdata = self.env_path.read_text() - envdata = re.sub( - '.*VAULT_TOKEN_FOR_DYNACONF=.*', "# VAULT_TOKEN_FOR_DYNACONF=myroot", envdata + _envdata = re.sub( + '.*VAULT_TOKEN_FOR_DYNACONF=.*', "# VAULT_TOKEN_FOR_DYNACONF=myroot", self.envdata ) - self.env_path.write_text(envdata) - self.exec_vault_command('vault token revoke -self') - logger.info("Success! OIDC token removed from Env file successfully!") + self.env_path.write_text(_envdata) + vstatus = self.exec_vault_command('vault token revoke -self') + if vstatus.returncode == 0: + logger.info("Success! OIDC token removed from Env file successfully!") def status(self, **kwargs): vstatus = self.exec_vault_command('vault token lookup', **kwargs) diff --git a/tests/foreman/api/test_capsulecontent.py b/tests/foreman/api/test_capsulecontent.py index bced65e6853..ed436479831 100644 --- a/tests/foreman/api/test_capsulecontent.py +++ b/tests/foreman/api/test_capsulecontent.py @@ -1355,3 +1355,72 @@ def test_positive_remove_capsule_orphans( 'ls /var/lib/pulp/media/artifact/*/* | xargs file | grep RPM' ) assert result.status, 'RPM artifacts are still present. They should be gone.' + + @pytest.mark.skip_if_not_set('capsule') + def test_positive_capsule_sync_openstack_container_repos( + self, + module_target_sat, + module_capsule_configured, + function_org, + function_product, + function_lce, + ): + """Synchronize openstack container repositories to capsule + + :id: 23e64385-7f34-4ab9-bd63-72306e5a4de0 + + :setup: + 1. A blank external capsule that has not been synced yet. + + :steps: + 1. Enable and sync openstack container repos. + + :expectedresults: + 1. container repos should sync on capsule. + + :customerscenario: true + + :BZ: 2154734 + + """ + upstream_names = [ + 'rhosp13/openstack-cinder-api', + 'rhosp13/openstack-neutron-server', + 'rhosp13/openstack-neutron-dhcp-agent', + 'rhosp13/openstack-nova-api', + ] + repos = [] + + for ups_name in upstream_names: + repo = module_target_sat.api.Repository( + content_type='docker', + docker_upstream_name=ups_name, + product=function_product, + url=constants.RH_CONTAINER_REGISTRY_HUB, + upstream_username=settings.subscription.rhn_username, + upstream_password=settings.subscription.rhn_password, + ).create() + repo.sync(timeout=1800) + repos.append(repo) + + # Associate LCE with the capsule + module_capsule_configured.nailgun_capsule.content_add_lifecycle_environment( + data={'environment_id': function_lce.id} + ) + result = module_capsule_configured.nailgun_capsule.content_lifecycle_environments() + assert len(result['results']) + assert function_lce.id in [capsule_lce['id'] for capsule_lce in result['results']] + + # Create and publish a content view with all repositories + cv = module_target_sat.api.ContentView(organization=function_org, repository=repos).create() + cv.publish() + cv = cv.read() + assert len(cv.version) == 1 + + # Promote the latest CV version into capsule's LCE + cvv = cv.version[-1].read() + cvv.promote(data={'environment_ids': function_lce.id}) + cvv = cvv.read() + assert len(cvv.environment) == 2 + + module_capsule_configured.wait_for_sync() diff --git a/tests/foreman/api/test_convert2rhel.py b/tests/foreman/api/test_convert2rhel.py index a8a450b3809..ce2e78d85b2 100644 --- a/tests/foreman/api/test_convert2rhel.py +++ b/tests/foreman/api/test_convert2rhel.py @@ -47,6 +47,12 @@ def create_activation_key(sat, org, lce, cv, subscription_id): environment=lce, ).create() act_key.add_subscriptions(data={'subscription_id': subscription_id}) + content = sat.cli.ActivationKey.product_content({'id': act_key.id, 'organization-id': org.id}) + act_key.content_override( + data={'content_overrides': [{'content_label': content[0]['label'], 'value': '1'}]} + ) + ak_subscriptions = act_key.product_content()['results'] + ak_subscriptions[0]['enabled'] = True return act_key @@ -59,11 +65,11 @@ def update_cv(sat, cv, lce, repos): return cv -def register_host(sat, act_key, module_org, module_loc, host, ubi=None): +def register_host(sat, act_key, org, module_loc, host, ubi=None): """Register host to satellite""" # generate registration command command = sat.api.RegistrationCommand( - organization=module_org, + organization=org, activation_keys=[act_key.name], location=module_loc, insecure=True, @@ -83,13 +89,21 @@ def ssl_cert(module_target_sat, module_org): @pytest.fixture -def activation_key_rhel(target_sat, module_org, module_lce, module_promoted_cv, version): +def activation_key_rhel( + module_target_sat, module_entitlement_manifest_org, module_lce, module_promoted_cv, version +): """Create activation key that will be used after conversion for registration""" - subs = target_sat.api.Subscription(organization=module_org).search( - query={'search': f'{DEFAULT_SUBSCRIPTION_NAME}'} - ) + subs = module_target_sat.api.Subscription( + organization=module_entitlement_manifest_org.id + ).search(query={'search': f'{DEFAULT_SUBSCRIPTION_NAME}'}) assert subs - return create_activation_key(target_sat, module_org, module_lce, module_promoted_cv, subs[0].id) + return create_activation_key( + module_target_sat, + module_entitlement_manifest_org, + module_lce, + module_promoted_cv, + subs[0].id, + ) @pytest.fixture(scope='module') @@ -123,6 +137,8 @@ def enable_rhel_subscriptions(module_target_sat, module_entitlement_manifest_org module_target_sat.wait_for_tasks( search_query=(f'id = {task["id"]}'), poll_timeout=2500, + search_rate=20, + max_tries=10, ) task_status = module_target_sat.api.ForemanTask(id=task['id']).poll() assert task_status['result'] == 'success' @@ -131,9 +147,9 @@ def enable_rhel_subscriptions(module_target_sat, module_entitlement_manifest_org @pytest.fixture def centos( - target_sat, + module_target_sat, centos_host, - module_org, + module_entitlement_manifest_org, smart_proxy_location, module_promoted_cv, module_lce, @@ -144,15 +160,28 @@ def centos( # updating centos packages on CentOS 8 is necessary for conversion major = version.split('.')[0] if major == '8': - centos_host.execute("yum update -y centos-*") + centos_host.execute('yum -y update centos-*') repo_url = settings.repos.convert2rhel.convert_to_rhel_repo.format(major) - repo = create_repo(target_sat, module_org, repo_url) - cv = update_cv(target_sat, module_promoted_cv, module_lce, enable_rhel_subscriptions + [repo]) - c2r_sub = target_sat.api.Subscription(organization=module_org, name=repo.product.name).search()[ - 0 - ] - act_key = create_activation_key(target_sat, module_org, module_lce, cv, c2r_sub.id) - register_host(target_sat, act_key, module_org, smart_proxy_location, centos_host) + repo = create_repo(module_target_sat, module_entitlement_manifest_org, repo_url) + cv = update_cv( + module_target_sat, module_promoted_cv, module_lce, enable_rhel_subscriptions + [repo] + ) + c2r_sub = module_target_sat.api.Subscription( + organization=module_entitlement_manifest_org.id, name=repo.product.name + ).search()[0] + act_key = create_activation_key( + module_target_sat, module_entitlement_manifest_org, module_lce, cv, c2r_sub.id + ) + register_host( + module_target_sat, + act_key, + module_entitlement_manifest_org, + smart_proxy_location, + centos_host, + ) + centos_host.execute('yum -y update kernel*') + if centos_host.execute('needs-restarting -r').status == 1: + centos_host.power_control(state='reboot') yield centos_host # close ssh session before teardown, because of reboot in conversion it may cause problems centos_host.close() @@ -160,9 +189,9 @@ def centos( @pytest.fixture def oracle( - target_sat, + module_target_sat, oracle_host, - module_org, + module_entitlement_manifest_org, smart_proxy_location, module_promoted_cv, module_lce, @@ -183,16 +212,27 @@ def oracle( oracle_host.power_control(state='reboot') major = version.split('.')[0] repo_url = settings.repos.convert2rhel.convert_to_rhel_repo.format(major) - repo = create_repo(target_sat, module_org, repo_url, ssl_cert) - cv = update_cv(target_sat, module_promoted_cv, module_lce, enable_rhel_subscriptions + [repo]) - c2r_sub = target_sat.api.Subscription(organization=module_org, name=repo.product.name).search()[ - 0 - ] - act_key = create_activation_key(target_sat, module_org, module_lce, cv, c2r_sub.id) + repo = create_repo(module_target_sat, module_entitlement_manifest_org, repo_url, ssl_cert) + cv = update_cv( + module_target_sat, module_promoted_cv, module_lce, enable_rhel_subscriptions + [repo] + ) + c2r_sub = module_target_sat.api.Subscription( + organization=module_entitlement_manifest_org, name=repo.product.name + ).search()[0] + act_key = create_activation_key( + module_target_sat, module_entitlement_manifest_org, module_lce, cv, c2r_sub.id + ) ubi_url = settings.repos.convert2rhel.ubi7 if major == '7' else settings.repos.convert2rhel.ubi8 - ubi = create_repo(target_sat, module_org, ubi_url) + ubi = create_repo(module_target_sat, module_entitlement_manifest_org, ubi_url) ubi_repo = ubi.full_path.replace('https', 'http') - register_host(target_sat, act_key, module_org, smart_proxy_location, oracle_host, ubi_repo) + register_host( + module_target_sat, + act_key, + module_entitlement_manifest_org, + smart_proxy_location, + oracle_host, + ubi_repo, + ) yield oracle_host # close ssh session before teardown, because of reboot in conversion it may cause problems oracle_host.close() @@ -201,7 +241,7 @@ def oracle( @pytest.fixture(scope='module') def version(request): """Version of converted OS""" - return settings.content_host.get(request.param).vm.release + return settings.content_host.get(request.param).vm.deploy_rhel_version @pytest.mark.e2e @@ -210,7 +250,7 @@ def version(request): ['oracle7', 'oracle8'], indirect=True, ) -def test_convert2rhel_oracle(target_sat, oracle, activation_key_rhel, version): +def test_convert2rhel_oracle(module_target_sat, oracle, activation_key_rhel, version): """Convert Oracle linux to RHEL :id: 7fd393f0-551a-4de0-acdd-7f026b485f79 @@ -227,46 +267,42 @@ def test_convert2rhel_oracle(target_sat, oracle, activation_key_rhel, version): :CaseImportance: Medium """ - host_content = target_sat.api.Host(id=oracle.hostname).read_json() + host_content = module_target_sat.api.Host(id=oracle.hostname).read_json() assert host_content['operatingsystem_name'] == f"OracleLinux {version}" # execute job 'Convert 2 RHEL' on host template_id = ( - target_sat.api.JobTemplate().search(query={'search': 'name="Convert to RHEL"'})[0].id + module_target_sat.api.JobTemplate().search(query={'search': 'name="Convert to RHEL"'})[0].id ) - job = target_sat.api.JobInvocation().run( + job = module_target_sat.api.JobInvocation().run( synchronous=False, data={ 'job_template_id': template_id, 'inputs': { 'Activation Key': activation_key_rhel.id, 'Restart': 'yes', + 'Data telemetry': 'yes', }, 'targeting_type': 'static_query', 'search_query': f'name = {oracle.hostname}', }, ) # wait for job to complete - target_sat.wait_for_tasks( - f'resource_type = JobInvocation and resource_id = {job["id"]}', poll_timeout=1000 + module_target_sat.wait_for_tasks( + f'resource_type = JobInvocation and resource_id = {job["id"]}', poll_timeout=2500 ) - result = target_sat.api.JobInvocation(id=job['id']).read() + result = module_target_sat.api.JobInvocation(id=job['id']).read() assert result.succeeded == 1 # check facts: correct os and valid subscription status - host_content = target_sat.api.Host(id=oracle.hostname).read_json() - # workaround for BZ 2080347 - assert ( - host_content['operatingsystem_name'].startswith(f"RHEL Server {version}") - or host_content['operatingsystem_name'].startswith(f"RedHat {version}") - or host_content['operatingsystem_name'].startswith(f"RHEL {version}") - ) + host_content = module_target_sat.api.Host(id=oracle.hostname).read_json() + assert host_content['subscription_status'] == 0 @pytest.mark.e2e -@pytest.mark.parametrize("version", ['centos7', 'centos8'], indirect=True) -def test_convert2rhel_centos(target_sat, centos, activation_key_rhel, version): +@pytest.mark.parametrize('version', ['centos7', 'centos8'], indirect=True) +def test_convert2rhel_centos(module_target_sat, centos, activation_key_rhel, version): """Convert Centos linux to RHEL :id: 6f698440-7d85-4deb-8dd9-363ea9003b92 @@ -283,39 +319,41 @@ def test_convert2rhel_centos(target_sat, centos, activation_key_rhel, version): :CaseImportance: Medium """ - host_content = target_sat.api.Host(id=centos.hostname).read_json() + host_content = module_target_sat.api.Host(id=centos.hostname).read_json() major = version.split('.')[0] - assert host_content['operatingsystem_name'] == f"CentOS {major}" - + assert host_content['operatingsystem_name'] == f'CentOS {major}' # execute job 'Convert 2 RHEL' on host template_id = ( - target_sat.api.JobTemplate().search(query={'search': 'name="Convert to RHEL"'})[0].id + module_target_sat.api.JobTemplate().search(query={'search': 'name="Convert to RHEL"'})[0].id ) - job = target_sat.api.JobInvocation().run( + job = module_target_sat.api.JobInvocation().run( synchronous=False, data={ 'job_template_id': template_id, 'inputs': { 'Activation Key': activation_key_rhel.id, 'Restart': 'yes', + 'Data telemetry': 'yes', }, 'targeting_type': 'static_query', 'search_query': f'name = {centos.hostname}', }, ) # wait for job to complete - target_sat.wait_for_tasks( - f'resource_type = JobInvocation and resource_id = {job["id"]}', poll_timeout=1000 + module_target_sat.wait_for_tasks( + f'resource_type = JobInvocation and resource_id = {job["id"]}', + poll_timeout=2500, + search_rate=20, ) - result = target_sat.api.JobInvocation(id=job['id']).read() + result = module_target_sat.api.JobInvocation(id=job['id']).read() assert result.succeeded == 1 # check facts: correct os and valid subscription status - host_content = target_sat.api.Host(id=centos.hostname).read_json() + host_content = module_target_sat.api.Host(id=centos.hostname).read_json() # workaround for BZ 2080347 assert ( - host_content['operatingsystem_name'].startswith(f"RHEL Server {version}") - or host_content['operatingsystem_name'].startswith(f"RedHat {version}") - or host_content['operatingsystem_name'].startswith(f"RHEL {version}") + host_content['operatingsystem_name'].startswith(f'RHEL Server {version}') + or host_content['operatingsystem_name'].startswith(f'RedHat {version}') + or host_content['operatingsystem_name'].startswith(f'RHEL {version}') ) assert host_content['subscription_status'] == 0 diff --git a/tests/foreman/api/test_discoveredhost.py b/tests/foreman/api/test_discoveredhost.py index 0db7c486bcf..e61bf069b0a 100644 --- a/tests/foreman/api/test_discoveredhost.py +++ b/tests/foreman/api/test_discoveredhost.py @@ -339,7 +339,7 @@ def test_positive_auto_provision_all( @pytest.mark.stubbed @pytest.mark.tier3 - def test_positive_refresh_facts_pxe_host(self): + def test_positive_refresh_facts_pxe_host(self, module_target_sat): """Refresh the facts of pxe based discovered hosts by adding a new NIC :id: 413fb608-cd5c-441d-af86-fd2d40346d96 @@ -354,14 +354,12 @@ def test_positive_refresh_facts_pxe_host(self): :expectedresults: Added Fact should be displayed on refreshing the facts - :CaseAutomation: NotAutomated - :CaseImportance: High """ @pytest.mark.on_premises_provisioning @pytest.mark.parametrize('module_provisioning_sat', ['discovery'], indirect=True) - @pytest.mark.parametrize('pxe_loader', ['bios', 'uefi'], indirect=True) + @pytest.mark.parametrize('pxe_loader', ['uefi'], indirect=True) @pytest.mark.rhel_ver_match('9') @pytest.mark.tier3 def test_positive_reboot_pxe_host( @@ -394,6 +392,7 @@ def test_positive_reboot_pxe_host( timeout=240, delay=20, ) + discovered_host = sat.api.DiscoveredHost().search(query={'mac': mac})[0] discovered_host.hostgroup = provisioning_hostgroup discovered_host.location = provisioning_hostgroup.location[0] @@ -402,25 +401,51 @@ def test_positive_reboot_pxe_host( result = sat.api.DiscoveredHost(id=discovered_host.id).reboot() assert 'Unable to perform reboot' not in result - @pytest.mark.stubbed + @pytest.mark.on_premises_provisioning + @pytest.mark.parametrize('module_provisioning_sat', ['discovery'], indirect=True) + @pytest.mark.parametrize('pxe_loader', ['bios'], indirect=True) + @pytest.mark.rhel_ver_match('9') + @pytest.mark.parametrize('provision_multiple_hosts', [2]) @pytest.mark.tier3 - def test_positive_reboot_all_pxe_hosts(self): + def test_positive_reboot_all_pxe_hosts( + self, + module_provisioning_rhel_content, + module_discovery_sat, + provision_multiple_hosts, + provisioning_hostgroup, + pxe_loader, + count, + ): """Rebooting all pxe-based discovered hosts :id: 69c807f8-5646-4aa6-8b3c-5ecdb69560ed :parametrized: yes - :Setup: Provisioning should be configured and a hosts should be discovered via PXE boot. + :Setup: Provisioning should be configured and hosts should be discovered via PXE boot. :Steps: PUT /api/v2/discovered_hosts/reboot_all - :expectedresults: All disdcovered host should be rebooted successfully - - :CaseAutomation: Automated + :expectedresults: All discovered hosst should be rebooted successfully :CaseImportance: Medium """ + sat = module_discovery_sat.sat + for host in provision_multiple_hosts: + host.power_control(ensure=False) + mac = host._broker_args['provisioning_nic_mac_addr'] + wait_for( + lambda: sat.api.DiscoveredHost().search(query={'mac': mac}) != [], + timeout=240, + delay=20, + ) + discovered_host = sat.api.DiscoveredHost().search(query={'mac': mac})[0] + discovered_host.hostgroup = provisioning_hostgroup + discovered_host.location = provisioning_hostgroup.location[0] + discovered_host.organization = provisioning_hostgroup.organization[0] + discovered_host.build = True + result = sat.api.DiscoveredHost().reboot_all() + assert 'Discovered hosts are rebooting now' in result['message'] class TestFakeDiscoveryTests: diff --git a/tests/foreman/api/test_discoveryrule.py b/tests/foreman/api/test_discoveryrule.py index f55a287d675..51ac5fec162 100644 --- a/tests/foreman/api/test_discoveryrule.py +++ b/tests/foreman/api/test_discoveryrule.py @@ -16,36 +16,16 @@ :Upstream: No """ -from fauxfactory import gen_choice, gen_integer, gen_string -from nailgun import entities +from fauxfactory import gen_choice, gen_integer import pytest from requests.exceptions import HTTPError from robottelo.utils.datafactory import valid_data_list -@pytest.fixture(scope="module") -def module_hostgroup(module_org): - module_hostgroup = entities.HostGroup(organization=[module_org]).create() - yield module_hostgroup - module_hostgroup.delete() - - -@pytest.fixture(scope="module") -def module_location(module_location): - yield module_location - module_location.delete() - - -@pytest.fixture(scope="module") -def module_org(module_org): - yield module_org - module_org.delete() - - @pytest.mark.tier1 @pytest.mark.e2e -def test_positive_end_to_end_crud(module_org, module_location, module_hostgroup): +def test_positive_end_to_end_crud(module_org, module_location, module_hostgroup, target_sat): """Create a new discovery rule with several attributes, update them and delete the rule itself. @@ -67,7 +47,7 @@ def test_positive_end_to_end_crud(module_org, module_location, module_hostgroup) name = gen_choice(list(valid_data_list().values())) search = gen_choice(searches) hostname = 'myhost-<%= rand(99999) %>' - discovery_rule = entities.DiscoveryRule( + discovery_rule = target_sat.api.DiscoveryRule( name=name, search_=search, hostname=hostname, @@ -103,23 +83,10 @@ def test_positive_end_to_end_crud(module_org, module_location, module_hostgroup) discovery_rule.read() -@pytest.mark.tier1 -def test_negative_create_with_invalid_host_limit_and_priority(): - """Create a discovery rule with invalid host limit and priority - - :id: e3c7acb1-ac56-496b-ac04-2a83f66ec290 - - :expectedresults: Validation error should be raised - """ - with pytest.raises(HTTPError): - entities.DiscoveryRule(max_count=gen_string('alpha')).create() - with pytest.raises(HTTPError): - entities.DiscoveryRule(priority=gen_string('alpha')).create() - - -@pytest.mark.stubbed @pytest.mark.tier3 -def test_positive_provision_with_rule_priority(): +def test_positive_update_and_provision_with_rule_priority( + module_target_sat, module_discovery_hostgroup, discovery_location, discovery_org +): """Create multiple discovery rules with different priority and check rule with highest priority executed first @@ -130,44 +97,67 @@ def test_positive_provision_with_rule_priority(): :expectedresults: Host with lower count have higher priority and that rule should be executed first - :CaseAutomation: NotAutomated - :CaseImportance: High """ + discovered_host = module_target_sat.api_factory.create_discovered_host() + + prio_rule = module_target_sat.api.DiscoveryRule( + max_count=5, + hostgroup=module_discovery_hostgroup, + search_=f'name = {discovered_host["name"]}', + location=[discovery_location], + organization=[discovery_org], + priority=1, + ).create() + rule = module_target_sat.api.DiscoveryRule( + max_count=5, + hostgroup=module_discovery_hostgroup, + search_=f'name = {discovered_host["name"]}', + location=[discovery_location], + organization=[discovery_org], + priority=10, + ).create() -@pytest.mark.stubbed -@pytest.mark.tier3 -def test_positive_multi_provision_with_rule_limit(): - """Create a discovery rule (CPU_COUNT = 2) with host limit 1 and - provision more than 2 hosts with same rule - - :id: 553c8ebf-d1c1-4ac2-7948-d3664a5b450b - - :Setup: Hosts with two CPUs should already be discovered - - :expectedresults: Rule should only be applied to 2 discovered hosts - and the rule should already be skipped for the 3rd one. - - :CaseAutomation: NotAutomated + result = module_target_sat.api.DiscoveredHost(id=discovered_host['id']).auto_provision() + assert f'provisioned with rule {prio_rule.name}' in result['message'] - :CaseImportance: High - """ + # Delete discovery rule + for _ in rule, prio_rule: + _.delete() + with pytest.raises(HTTPError): + _.read() -@pytest.mark.stubbed @pytest.mark.tier3 -def test_positive_provision_with_updated_discovery_rule(): - """Update an existing rule and provision a host with it. +def test_positive_multi_provision_with_rule_limit( + module_target_sat, module_discovery_hostgroup, discovery_location, discovery_org +): + """Create a discovery rule with certain host limit and try to provision more than the passed limit - :id: 3fb20f0f-02e9-4158-9744-f583308c4e89 - - :Setup: Host should already be discovered + :id: 553c8ebf-d1c1-4ac2-7948-d3664a5b450b - :expectedresults: User should be able to update the rule and it should - be applied on discovered host + :Setup: Hosts should already be discovered - :CaseAutomation: NotAutomated + :expectedresults: Rule should only be applied to the number of the hosts passed as limit in the rule :CaseImportance: High """ + for _ in range(2): + discovered_host = module_target_sat.api_factory.create_discovered_host() + + rule = module_target_sat.api.DiscoveryRule( + max_count=1, + hostgroup=module_discovery_hostgroup, + search_=f'name = {discovered_host["name"]}', + location=[discovery_location], + organization=[discovery_org], + priority=1000, + ).create() + result = module_target_sat.api.DiscoveredHost().auto_provision_all() + assert '1 discovered hosts were provisioned' in result['message'] + + # Delete discovery rule + rule.delete() + with pytest.raises(HTTPError): + rule.read() diff --git a/tests/foreman/api/test_notifications.py b/tests/foreman/api/test_notifications.py deleted file mode 100644 index a8b4c24ebf8..00000000000 --- a/tests/foreman/api/test_notifications.py +++ /dev/null @@ -1,227 +0,0 @@ -"""Test class for Notifications API - -:Requirement: Notifications - -:CaseAutomation: Automated - -:CaseLevel: Acceptance - -:CaseComponent: Notifications - -:Team: Endeavour - -:TestType: Functional - -:CaseImportance: High - -:Upstream: No -""" -from mailbox import mbox -from re import findall -from tempfile import mkstemp - -from fauxfactory import gen_string -import pytest -from wait_for import TimedOutError, wait_for - -from robottelo.config import settings -from robottelo.constants import DEFAULT_LOC, DEFAULT_ORG -from robottelo.utils.issue_handlers import is_open - - -@pytest.fixture -def admin_user_with_localhost_email(target_sat): - """Admin user with e-mail set to `root@localhost`.""" - user = target_sat.api.User( - admin=True, - default_organization=DEFAULT_ORG, - default_location=DEFAULT_LOC, - description='created by nailgun', - login=gen_string("alphanumeric"), - password=gen_string("alphanumeric"), - mail='root@localhost', - ).create() - user.mail_enabled = True - user.update() - - yield user - - user.delete() - - -@pytest.fixture -def reschedule_long_running_tasks_notification(target_sat): - """Reschedule long-running tasks checker from midnight (default) to every minute. - Reset it back after the test. - """ - default_cron_schedule = '0 0 * * *' - every_minute_cron_schedule = '* * * * *' - - assert ( - target_sat.execute( - f"FOREMAN_TASKS_CHECK_LONG_RUNNING_TASKS_CRONLINE='{every_minute_cron_schedule}' " - "foreman-rake foreman_tasks:reschedule_long_running_tasks_checker" - ).status - == 0 - ) - - yield - - assert ( - target_sat.execute( - f"FOREMAN_TASKS_CHECK_LONG_RUNNING_TASKS_CRONLINE='{default_cron_schedule}' " - "foreman-rake foreman_tasks:reschedule_long_running_tasks_checker" - ).status - == 0 - ) - - -@pytest.fixture -def start_postfix_service(target_sat): - """Start postfix service (disabled by default).""" - assert target_sat.execute('systemctl start postfix').status == 0 - - -@pytest.fixture -def clean_root_mailbox(target_sat): - """Backup & purge local mailbox of the Satellite's root@localhost user. - Restore it afterwards. - """ - root_mailbox = '/var/spool/mail/root' - root_mailbox_backup = f'{root_mailbox}-{gen_string("alphanumeric")}.bak' - target_sat.execute(f'cp -f {root_mailbox} {root_mailbox_backup}') - target_sat.execute(f'truncate -s 0 {root_mailbox}') - - yield root_mailbox - - target_sat.execute(f'mv -f {root_mailbox_backup} {root_mailbox}') - - -@pytest.fixture -def wait_for_long_running_task_mail(target_sat, clean_root_mailbox, long_running_task): - """Wait until the long-running task ID is found in the Satellite's mbox file.""" - timeout = 300 - try: - wait_for( - func=target_sat.execute, - func_args=[f'grep --quiet {long_running_task["task"]["id"]} {clean_root_mailbox}'], - fail_condition=lambda res: res.status == 0, - timeout=timeout, - delay=5, - ) - except TimedOutError: - raise AssertionError( - f'No notification e-mail with long-running task ID {long_running_task["task"]["id"]} ' - f'has arrived to {clean_root_mailbox} after {timeout} seconds.' - ) - return True - - -@pytest.fixture -def root_mailbox_copy(target_sat, clean_root_mailbox, wait_for_long_running_task_mail): - """Parsed local system copy of the Satellite's root user mailbox. - - :returns: :class:`mailbox.mbox` instance - """ - assert wait_for_long_running_task_mail - result = target_sat.execute(f'cat {clean_root_mailbox}') - assert result.status == 0, f'Could not read mailbox {clean_root_mailbox} on Satellite host.' - mbox_content = result.stdout - _, local_mbox_file = mkstemp() - with open(local_mbox_file, 'w') as fh: - fh.writelines(mbox_content) - return mbox(path=local_mbox_file) - - -@pytest.fixture -def long_running_task(target_sat): - """Create an async task and set its start time and last report time to two days ago. - After the test finishes, the task is cancelled. - """ - template_id = ( - target_sat.api.JobTemplate() - .search(query={'search': 'name="Run Command - Script Default"'})[0] - .id - ) - job = target_sat.api.JobInvocation().run( - synchronous=False, - data={ - 'job_template_id': template_id, - 'organization': DEFAULT_ORG, - 'location': DEFAULT_LOC, - 'inputs': { - 'command': 'sleep 300', - }, - 'targeting_type': 'static_query', - 'search_query': f'name = {target_sat.hostname}', - 'password': settings.server.ssh_password, - }, - ) - sql_date_2_days_ago = "now() - INTERVAL \'2 days\'" - result = target_sat.execute( - "su - postgres -c \"psql foreman postgres <