From 867a49016c2b4b2ad8306d46b921acfe8ee62e16 Mon Sep 17 00:00:00 2001 From: Marius Leustean Date: Fri, 3 Feb 2023 15:39:36 +0200 Subject: [PATCH] Shard affinity for k8s workloads Instances that are part of the same K8S cluster will get scheduled to the same shard (vCenter). It identifies the K8S cluster by looking at the tags or metadata set by the k8s cluster orchestrators when creating the instances. Kubernikus and Gardener are supported for now. BigVMs are "out of the picture" and should not adhere to shards. They are only scheduled on their allocated hosts. Change-Id: I73d04ba295d23db1d4728e9db124fc2a27c2d4bc --- nova/db/main/api.py | 63 ++++ nova/scheduler/filters/shard_filter.py | 82 ++++- .../scheduler/filters/test_shard_filter.py | 318 +++++++++++++++--- 3 files changed, 416 insertions(+), 47 deletions(-) diff --git a/nova/db/main/api.py b/nova/db/main/api.py index ae245351063..320395c2739 100644 --- a/nova/db/main/api.py +++ b/nova/db/main/api.py @@ -2087,6 +2087,69 @@ def instance_get_active_by_window_joined(context, begin, end=None, return _instances_fill_metadata(context, query.all(), manual_joins) +@require_context +@pick_context_manager_reader_allow_async +def instance_get_host_by_tag(context, tag, filters=None): + count_label = func.count('*').label('count') + query = context.session.query(models.Instance, count_label). \ + join(models.Tag, models.Tag.resource_id == models.Instance.uuid) + query = _handle_instance_host_query_filters(query, filters) + query = query.filter(models.Instance.deleted == 0, + models.Tag.tag == tag) + + query = query.group_by(models.Instance.host). \ + order_by(sql.desc(count_label)).limit(1) + + result = query.all() + if result: + return result[0] + else: + return None + + +@require_context +@pick_context_manager_reader_allow_async +def instance_get_host_by_metadata(context, meta_key, meta_value, + filters=None): + count_label = func.count('*').label('count') + query = context.session.query(models.Instance.host, count_label). \ + join(models.InstanceMetadata, + models.InstanceMetadata.instance_uuid == models.Instance.uuid) + query = _handle_instance_host_query_filters(query, filters) + query = query.filter(models.Instance.deleted == 0, + models.InstanceMetadata.deleted == 0, + models.InstanceMetadata.key == meta_key, + models.InstanceMetadata.value == meta_value) + query = query.group_by(models.Instance.host). \ + order_by(sql.desc(count_label)). \ + limit(1) + + result = query.all() + if result: + return result[0] + else: + return None + + +def _handle_instance_host_query_filters(query, filters=None): + if not filters: + return query + hv_type = filters.get('hv_type') + if hv_type: + query = query.join( + models.ComputeNode, + models.Instance.node == models.ComputeNode.hypervisor_hostname) + + availability_zone = filters.get('availability_zone') + if availability_zone: + query = query.filter( + models.Instance.availability_zone == availability_zone) + if hv_type: + query = query.filter(models.ComputeNode.deleted == 0, + models.ComputeNode.hypervisor_type == hv_type) + return query + + def _instance_get_all_query(context, project_only=False, joins=None): if joins is None: joins = ['info_cache', 'security_groups'] diff --git a/nova/scheduler/filters/shard_filter.py b/nova/scheduler/filters/shard_filter.py index 9393b069be2..fd049afd735 100644 --- a/nova/scheduler/filters/shard_filter.py +++ b/nova/scheduler/filters/shard_filter.py @@ -19,6 +19,10 @@ from oslo_log import log as logging import nova.conf +from nova import context as nova_context +from nova.db.main import api as main_db_api +from nova.objects.aggregate import AggregateList +from nova.objects.build_request import BuildRequest from nova.scheduler import filters from nova.scheduler import utils from nova import utils as nova_utils @@ -28,6 +32,10 @@ CONF = nova.conf.CONF _SERVICE_AUTH = None +GARDENER_PREFIX = "kubernetes.io-cluster-" +KKS_PREFIX = "kubernikus:kluster" +HANA_PREFIX = "hana_" +VMWARE_HV_TYPE = 'VMware vCenter Server' class ShardFilter(filters.BaseHostFilter): @@ -37,6 +45,8 @@ class ShardFilter(filters.BaseHostFilter): Alternatively the project may have the "sharding_enabled" tag set, which enables the project for hosts in all shards. + + Implements `filter_all` directly instead of `host_passes` """ _PROJECT_SHARD_CACHE = {} @@ -114,11 +124,63 @@ def _get_shards(self, project_id): return self._PROJECT_SHARD_CACHE.get(project_id) - def host_passes(self, host_state, spec_obj): + def _get_k8s_shard(self, spec_obj): + if (spec_obj.flavor.name.startswith(HANA_PREFIX) or + utils.request_is_resize(spec_obj)): + return None + elevated = nova_context.get_admin_context() + build_request = BuildRequest.get_by_instance_uuid( + elevated, spec_obj.instance_uuid) + + kks_tag = next((t.tag for t in build_request.tags + if t.tag.startswith(KKS_PREFIX)), None) + gardener_meta = None + if not kks_tag: + gardener_meta = \ + {k: v for k, v in build_request.instance.metadata.items() + if k.startswith(GARDENER_PREFIX)} + + if not kks_tag and not gardener_meta: + return None + + q_filters = {'hv_type': VMWARE_HV_TYPE} + if spec_obj.availability_zone: + q_filters['availability_zone'] = spec_obj.availability_zone + + k8s_host = None + if kks_tag: + k8s_host = nova_context.scatter_gather_skip_cell0( + elevated, main_db_api.instance_get_host_by_tag, + kks_tag, filters=q_filters) + elif gardener_meta: + (meta_key, meta_value) = next( + (k, v) for k, v in gardener_meta.items()) + k8s_host = nova_context.scatter_gather_skip_cell0( + elevated, main_db_api.instance_get_host_by_metadata, + meta_key, meta_value, filters=q_filters) + + if not k8s_host: + return None + + aggrs = [aggr.name for aggr in + AggregateList.get_by_host(elevated, k8s_host) + if aggr.name.startswith(self._SHARD_PREFIX)] + if aggrs: + return aggrs[0] + else: + return None + + def filter_all(self, filter_obj_list, spec_obj): # Only VMware if utils.is_non_vmware_spec(spec_obj): - return True + return filter_obj_list + + k8s_shard = self._get_k8s_shard(spec_obj) + + return [host_state for host_state in filter_obj_list + if self._host_passes(host_state, spec_obj, k8s_shard)] + def _host_passes(self, host_state, spec_obj, k8s_shard): host_shard_aggrs = [aggr for aggr in host_state.aggregates if aggr.name.startswith(self._SHARD_PREFIX)] @@ -148,14 +210,12 @@ def host_passes(self, host_state, spec_obj): if self._ALL_SHARDS in shards: LOG.debug('project enabled for all shards %(project_shards)s.', {'project_shards': shards}) - return True elif host_shard_names & set(shards): LOG.debug('%(host_state)s shard %(host_shard)s found in project ' 'shards %(project_shards)s.', {'host_state': host_state, 'host_shard': host_shard_names, 'project_shards': shards}) - return True else: LOG.debug('%(host_state)s shard %(host_shard)s not found in ' 'project shards %(project_shards)s.', @@ -163,3 +223,17 @@ def host_passes(self, host_state, spec_obj): 'host_shard': host_shard_names, 'project_shards': shards}) return False + + if k8s_shard: + return any(host_shard == k8s_shard + for host_shard in host_shard_names) + + return True + + def _host_passes_k8s(self, host_shard_aggrs, k8s_hosts): + """Instances of a K8S cluster must end up on the same shard. + The K8S cluster is identified by the metadata or tags set + by the orchestrator (Gardener or Kubernikus). + """ + return any(set(aggr.hosts) & k8s_hosts + for aggr in host_shard_aggrs) diff --git a/nova/tests/unit/scheduler/filters/test_shard_filter.py b/nova/tests/unit/scheduler/filters/test_shard_filter.py index 7acdcdf69e3..d3e713dc252 100644 --- a/nova/tests/unit/scheduler/filters/test_shard_filter.py +++ b/nova/tests/unit/scheduler/filters/test_shard_filter.py @@ -16,9 +16,12 @@ import mock +from nova.db.main import api as main_db_api from nova import objects from nova.scheduler.filters import shard_filter from nova import test +from nova.tests.unit import fake_flavor +from nova.tests.unit import fake_instance from nova.tests.unit.scheduler import fakes @@ -31,6 +34,13 @@ def setUp(self): 'foo': ['vc-a-0', 'vc-b-0'], 'last_modified': time.time() } + instance = fake_instance.fake_instance_obj( + mock.sentinel.ctx, expected_attrs=['metadata', 'tags']) + build_req = objects.BuildRequest() + build_req.instance_uuid = instance.uuid + build_req.tags = objects.TagList(objects=[]) + build_req.instance = instance + self.fake_build_req = build_req @mock.patch('nova.scheduler.filters.shard_filter.' 'ShardFilter._update_cache') @@ -63,93 +73,130 @@ def set_cache(): ['vc-a-1', 'vc-b-0']) mock_update_cache.assert_called_once() + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') - def test_shard_baremetal_passes(self, agg_mock): + def test_shard_baremetal_passes(self, agg_mock, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-0', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) extra_specs = {'capabilities:cpu_arch': 'x86_64'} spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs=extra_specs)) - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'], + extra_specs=extra_specs)) + self._assert_passes(host, spec_obj, True) + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') @mock.patch('nova.scheduler.filters.shard_filter.' 'ShardFilter._update_cache') @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') - def test_shard_project_not_found(self, agg_mock, mock_update_cache): + def test_shard_project_not_found(self, agg_mock, mock_update_cache, + get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-0', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='bar', - flavor=objects.Flavor(extra_specs={})) - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) + self._assert_passes(host, spec_obj, False) + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') - def test_shard_project_no_shards(self, agg_mock): + def test_shard_project_no_shards(self, agg_mock, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-0', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) self.filt_cls._PROJECT_SHARD_CACHE['foo'] = [] - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') - def test_shard_host_no_shard_aggregate(self, agg_mock): + def test_shard_host_no_shard_aggregate(self, agg_mock, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req host = fakes.FakeHostState('host1', 'compute', {}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) agg_mock.return_value = {} - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) - def test_shard_host_no_shards_in_aggregate(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_host_no_shards_in_aggregate(self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) - def test_shard_project_shard_match_host_shard(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_shard_match_host_shard(self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-0', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, True) - def test_shard_project_shard_do_not_match_host_shard(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_shard_do_not_match_host_shard(self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-1', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) - def test_shard_project_has_multiple_shards_per_az(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_has_multiple_shards_per_az(self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-1', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) self.filt_cls._PROJECT_SHARD_CACHE['foo'] = ['vc-a-0', 'vc-a-1', 'vc-b-0'] - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, True) - def test_shard_project_has_multiple_shards_per_az_resize_same_shard(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_has_multiple_shards_per_az_resize_same_shard( + self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1', 'host2']), objects.Aggregate(id=1, name='vc-a-1', hosts=['host1', @@ -157,40 +204,55 @@ def test_shard_project_has_multiple_shards_per_az_resize_same_shard(self): host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={}), + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs']), scheduler_hints=dict(_nova_check_type=['resize'], source_host=['host2'])) self.filt_cls._PROJECT_SHARD_CACHE['foo'] = ['vc-a-0', 'vc-a-1', 'vc-b-0'] - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, True) - def test_shard_project_has_multiple_shards_per_az_resize_other_shard(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_has_multiple_shards_per_az_resize_other_shard( + self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1', 'host2']), objects.Aggregate(id=1, name='vc-a-1', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={}), + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs']), + instance_uuid=self.fake_build_req.instance_uuid, scheduler_hints=dict(_nova_check_type=['resize'], source_host=['host2'])) self.filt_cls._PROJECT_SHARD_CACHE['foo'] = ['vc-a-0', 'vc-a-1', 'vc-b-0'] - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, True) - def test_shard_project_has_sharding_enabled_any_host_passes(self): + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_has_sharding_enabled_any_host_passes( + self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req self.filt_cls._PROJECT_SHARD_CACHE['baz'] = ['sharding_enabled'] aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), objects.Aggregate(id=1, name='vc-a-0', hosts=['host1'])] host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='baz', - flavor=objects.Flavor(extra_specs={})) - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) - - def test_shard_project_has_sharding_enabled_and_single_shards(self): + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) + self._assert_passes(host, spec_obj, True) + + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + def test_shard_project_has_sharding_enabled_and_single_shards( + self, get_by_uuid): + get_by_uuid.return_value = self.fake_build_req self.filt_cls._PROJECT_SHARD_CACHE['baz'] = ['sharding_enabled', 'vc-a-1'] aggs = [objects.Aggregate(id=1, name='some-az-a', hosts=['host1']), @@ -198,16 +260,179 @@ def test_shard_project_has_sharding_enabled_and_single_shards(self): host = fakes.FakeHostState('host1', 'compute', {'aggregates': aggs}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='baz', - flavor=objects.Flavor(extra_specs={})) - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) + self._assert_passes(host, spec_obj, True) + + @mock.patch('nova.objects.AggregateList.get_by_host') + @mock.patch('nova.context.scatter_gather_skip_cell0') + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + @mock.patch('nova.context.get_admin_context') + def test_same_shard_for_kubernikus_cluster(self, get_context, + get_by_uuid, + gather_host, + get_aggrs): + kks_cluster = 'kubernikus:kluster-example' + build_req = objects.BuildRequest() + build_req.tags = objects.TagList(objects=[ + objects.Tag(tag=kks_cluster) + ]) + + result = self._filter_k8s_hosts(build_req, get_context, + get_by_uuid, gather_host, + get_aggrs) + + gather_host.assert_called_once_with( + get_context.return_value, + main_db_api.instance_get_host_by_tag, + 'kubernikus:kluster-example', + filters={'hv_type': 'VMware vCenter Server', + 'availability_zone': 'az-2'}) + + self.assertEqual(2, len(result)) + self.assertEqual(result[0].host, 'host4') + self.assertEqual(result[1].host, 'host5') + + @mock.patch('nova.objects.AggregateList.get_by_host') + @mock.patch('nova.context.scatter_gather_skip_cell0') + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + @mock.patch('nova.context.get_admin_context') + def test_same_shard_for_gardener_cluster(self, get_context, + get_by_uuid, + gather_host, + get_aggrs): + gardener_cluster = 'kubernetes.io-cluster-shoot--garden--testCluster' + new_instance = fake_instance.fake_instance_obj( + get_context.return_value, + expected_attrs=['metadata'], + metadata={gardener_cluster: '1'}) + build_req = objects.BuildRequest() + build_req.instance = new_instance + build_req.tags = objects.TagList() + + result = self._filter_k8s_hosts(build_req, get_context, + get_by_uuid, gather_host, + get_aggrs) + + gather_host.assert_called_once_with( + get_context.return_value, + main_db_api.instance_get_host_by_metadata, + gardener_cluster, '1', + filters={'hv_type': 'VMware vCenter Server', + 'availability_zone': 'az-2'}) + + self.assertEqual(2, len(result)) + self.assertEqual(result[0].host, 'host4') + self.assertEqual(result[1].host, 'host5') + + def _filter_k8s_hosts(self, build_req, get_context, get_by_uuid, + gather_host, get_aggrs): + """Given a K8S cluster that spans across 3 shards + (vc-a-0, vc-b-0, vc-b-1) and 2 availability zones (az-1, az-2) + where the most k8s hosts are in the vc-b-1 shard. When there is + a RequestSpec for 'az-2', then the hosts in 'vc-b-1' shard must + be returned, since it's the dominant shard. + """ + get_by_uuid.return_value = build_req + gather_host.return_value = 'host5' + + self.filt_cls._PROJECT_SHARD_CACHE['foo'] = ['sharding_enabled', + 'vc-a-1'] + agg1 = objects.Aggregate(id=1, name='vc-a-0', hosts=['host1']) + agg2 = objects.Aggregate(id=2, name='vc-b-0', hosts=['host2', 'host3']) + agg3 = objects.Aggregate(id=3, name='vc-b-1', hosts=['host4', 'host5']) + + get_aggrs.return_value = [agg3] + + host1 = fakes.FakeHostState('host1', 'compute', + {'aggregates': [agg1]}) + host2 = fakes.FakeHostState('host2', 'compute', + {'aggregates': [agg2]}) + host3 = fakes.FakeHostState('host3', 'compute', + {'aggregates': [agg2]}) + host4 = fakes.FakeHostState('host4', 'compute', + {'aggregates': [agg3]}) + host5 = fakes.FakeHostState('host5', 'compute', + {'aggregates': [agg3]}) + + spec_obj = objects.RequestSpec( + context=get_context.return_value, project_id='foo', + availability_zone='az-2', + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'], + name='m1')) + + return list(self.filt_cls.filter_all( + [host1, host2, host3, host4, host5], spec_obj)) + + @mock.patch('nova.objects.AggregateList.get_by_host') + @mock.patch('nova.context.scatter_gather_skip_cell0') + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') + @mock.patch('nova.context.get_admin_context') + def test_k8s_bypass_hana_flavors(self, get_context, + get_by_uuid, + gather_host, + get_aggrs): + gardener_cluster = 'kubernetes.io-cluster-shoot--garden--testCluster' + hana_flavor = fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'], + id=1, name='hana_flavor1', memory_mb=256, vcpus=1, root_gb=1) + new_instance = fake_instance.fake_instance_obj( + get_context.return_value, + flavor=hana_flavor, + expected_attrs=['metadata'], + metadata={gardener_cluster: '1'}) + build_req = objects.BuildRequest() + build_req.instance = new_instance + build_req.tags = objects.TagList() + + get_by_uuid.return_value = build_req + + self.filt_cls._PROJECT_SHARD_CACHE['baz'] = ['sharding_enabled', + 'vc-a-1'] + agg1 = objects.Aggregate(id=1, name='vc-a-0', hosts=['host1']) + hana_agg = objects.Aggregate(id=1, name='vc-b-0', + hosts=['host2', 'host3']) + + host1 = fakes.FakeHostState('host1', 'compute', + {'aggregates': [agg1]}) + host2 = fakes.FakeHostState('host2', 'compute', + {'aggregates': [hana_agg]}) + host3 = fakes.FakeHostState('host3', 'compute', + {'aggregates': [hana_agg]}) + get_aggrs.return_value = [agg1, hana_agg] + spec_obj = objects.RequestSpec( + context=get_context.return_value, project_id='foo', + availability_zone='az-1', + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'], + name='hana_flavor1')) + + result = list(self.filt_cls.filter_all([host1, host2, host3], + spec_obj)) + + gather_host.assert_not_called() + self.assertEqual(3, len(result)) + self.assertEqual(result[0].host, 'host1') + self.assertEqual(result[1].host, 'host2') + self.assertEqual(result[2].host, 'host3') + + @mock.patch('nova.objects.BuildRequest.get_by_instance_uuid') @mock.patch('nova.scheduler.filters.shard_filter.LOG') @mock.patch('nova.scheduler.filters.utils.aggregate_metadata_get_by_host') - def test_log_level_for_missing_vc_aggregate(self, agg_mock, log_mock): + def test_log_level_for_missing_vc_aggregate(self, agg_mock, log_mock, + get_by_uuid): + get_by_uuid.return_value = self.fake_build_req host = fakes.FakeHostState('host1', 'compute', {}) spec_obj = objects.RequestSpec( context=mock.sentinel.ctx, project_id='foo', - flavor=objects.Flavor(extra_specs={})) + instance_uuid=self.fake_build_req.instance_uuid, + flavor=fake_flavor.fake_flavor_obj( + mock.sentinel.ctx, expected_attrs=['extra_specs'])) agg_mock.return_value = {} @@ -215,7 +440,7 @@ def test_log_level_for_missing_vc_aggregate(self, agg_mock, log_mock): log_mock.debug = mock.Mock() log_mock.error = mock.Mock() host.hypervisor_type = 'ironic' - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) log_mock.debug.assert_called_once_with(mock.ANY, mock.ANY) log_mock.error.assert_not_called() @@ -223,14 +448,21 @@ def test_log_level_for_missing_vc_aggregate(self, agg_mock, log_mock): log_mock.debug = mock.Mock() log_mock.error = mock.Mock() host.hypervisor_type = 'Some HV' - self.assertFalse(self.filt_cls.host_passes(host, spec_obj)) + self._assert_passes(host, spec_obj, False) log_mock.error.assert_called_once_with(mock.ANY, mock.ANY) log_mock.debug.assert_not_called() @mock.patch('nova.scheduler.utils.is_non_vmware_spec', return_value=True) def test_non_vmware_spec(self, mock_is_non_vmware_spec): - host = mock.sentinel.host + host1 = mock.sentinel.host1 + host2 = mock.sentinel.host2 spec_obj = mock.sentinel.spec_obj - self.assertTrue(self.filt_cls.host_passes(host, spec_obj)) + result = list(self.filt_cls.filter_all([host1, host2], spec_obj)) + + self.assertEqual([host1, host2], result) mock_is_non_vmware_spec.assert_called_once_with(spec_obj) + + def _assert_passes(self, host, spec_obj, passes): + result = bool(list(self.filt_cls.filter_all([host], spec_obj))) + self.assertEqual(passes, result)