diff --git a/cluster-autoscaler/context/autoscaling_context.go b/cluster-autoscaler/context/autoscaling_context.go index 1743e8c443c9..31e2af8f4fce 100644 --- a/cluster-autoscaler/context/autoscaling_context.go +++ b/cluster-autoscaler/context/autoscaling_context.go @@ -27,7 +27,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/expander" processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/client-go/informers" kube_client "k8s.io/client-go/kubernetes" @@ -44,9 +44,8 @@ type AutoscalingContext struct { AutoscalingKubeClients // CloudProvider used in CA. CloudProvider cloudprovider.CloudProvider - // TODO(kgolab) - move away too as it's not config - // PredicateChecker to check if a pod can fit into a node. - PredicateChecker predicatechecker.PredicateChecker + // FrameworkHandle can be used to interact with the scheduler framework. + FrameworkHandle *framework.Handle // ClusterSnapshot denotes cluster snapshot used for predicate checking. ClusterSnapshot clustersnapshot.ClusterSnapshot // ExpanderStrategy is the strategy used to choose which node group to expand when scaling up @@ -100,7 +99,7 @@ func NewResourceLimiterFromAutoscalingOptions(options config.AutoscalingOptions) // NewAutoscalingContext returns an autoscaling context from all the necessary parameters passed via arguments func NewAutoscalingContext( options config.AutoscalingOptions, - predicateChecker predicatechecker.PredicateChecker, + fwHandle *framework.Handle, clusterSnapshot clustersnapshot.ClusterSnapshot, autoscalingKubeClients *AutoscalingKubeClients, cloudProvider cloudprovider.CloudProvider, @@ -114,7 +113,7 @@ func NewAutoscalingContext( AutoscalingOptions: options, CloudProvider: cloudProvider, AutoscalingKubeClients: *autoscalingKubeClients, - PredicateChecker: predicateChecker, + FrameworkHandle: fwHandle, ClusterSnapshot: clusterSnapshot, ExpanderStrategy: expanderStrategy, ProcessorCallbacks: processorCallbacks, diff --git a/cluster-autoscaler/core/autoscaler.go b/cluster-autoscaler/core/autoscaler.go index 0e63a3c85011..1665ea4ebe4a 100644 --- a/cluster-autoscaler/core/autoscaler.go +++ b/cluster-autoscaler/core/autoscaler.go @@ -33,9 +33,11 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/observers/loopstart" ca_processors "k8s.io/autoscaler/cluster-autoscaler/processors" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/predicate" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/client-go/informers" @@ -49,7 +51,7 @@ type AutoscalerOptions struct { InformerFactory informers.SharedInformerFactory AutoscalingKubeClients *context.AutoscalingKubeClients CloudProvider cloudprovider.CloudProvider - PredicateChecker predicatechecker.PredicateChecker + FrameworkHandle *framework.Handle ClusterSnapshot clustersnapshot.ClusterSnapshot ExpanderStrategy expander.Strategy EstimatorBuilder estimator.EstimatorBuilder @@ -86,7 +88,7 @@ func NewAutoscaler(opts AutoscalerOptions, informerFactory informers.SharedInfor } return NewStaticAutoscaler( opts.AutoscalingOptions, - opts.PredicateChecker, + opts.FrameworkHandle, opts.ClusterSnapshot, opts.AutoscalingKubeClients, opts.Processors, @@ -114,8 +116,15 @@ func initializeDefaultOptions(opts *AutoscalerOptions, informerFactory informers if opts.AutoscalingKubeClients == nil { opts.AutoscalingKubeClients = context.NewAutoscalingKubeClients(opts.AutoscalingOptions, opts.KubeClient, opts.InformerFactory) } + if opts.FrameworkHandle == nil { + fwHandle, err := framework.NewHandle(opts.InformerFactory, opts.SchedulerConfig) + if err != nil { + return err + } + opts.FrameworkHandle = fwHandle + } if opts.ClusterSnapshot == nil { - opts.ClusterSnapshot = clustersnapshot.NewBasicClusterSnapshot() + opts.ClusterSnapshot = predicate.NewPredicateSnapshot(base.NewBasicSnapshotBase(), opts.FrameworkHandle) } if opts.RemainingPdbTracker == nil { opts.RemainingPdbTracker = pdb.NewBasicRemainingPdbTracker() diff --git a/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes_test.go b/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes_test.go index 0aebcbd4d671..ae41f5127c54 100644 --- a/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes_test.go +++ b/cluster-autoscaler/core/podlistprocessor/currently_drained_nodes_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" ) @@ -194,7 +195,7 @@ func TestCurrentlyDrainedNodesPodListProcessor(t *testing.T) { name: "single node, non-recreatable pods filtered out", drainedNodes: []string{"n"}, nodes: []*apiv1.Node{ - BuildTestNode("n", 1000, 10), + BuildTestNode("n", 2000, 10), }, pods: []*apiv1.Pod{ BuildScheduledTestPod("p1", 100, 1, "n"), @@ -229,11 +230,11 @@ func TestCurrentlyDrainedNodesPodListProcessor(t *testing.T) { name: "everything works together", drainedNodes: []string{"n1", "n3", "n5"}, nodes: []*apiv1.Node{ - BuildTestNode("n1", 1000, 10), - BuildTestNode("n2", 1000, 10), - BuildTestNode("n3", 1000, 10), - BuildTestNode("n4", 1000, 10), - BuildTestNode("n5", 1000, 10), + BuildTestNode("n1", 3000, 10), + BuildTestNode("n2", 3000, 10), + BuildTestNode("n3", 3000, 10), + BuildTestNode("n4", 3000, 10), + BuildTestNode("n5", 3000, 10), }, pods: []*apiv1.Pod{ BuildScheduledTestPod("p1", 100, 1, "n1"), @@ -267,7 +268,7 @@ func TestCurrentlyDrainedNodesPodListProcessor(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ctx := context.AutoscalingContext{ ScaleDownActuator: &mockActuator{&mockActuationStatus{tc.drainedNodes}}, - ClusterSnapshot: clustersnapshot.NewBasicClusterSnapshot(), + ClusterSnapshot: testsnapshot.NewTestSnapshotOrDie(t), } clustersnapshot.InitializeClusterSnapshotOrDie(t, ctx.ClusterSnapshot, tc.nodes, tc.pods) diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable.go index 0ec929814a1a..faba2261ba60 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable.go @@ -23,7 +23,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/context" core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils" caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - klog "k8s.io/klog/v2" + "k8s.io/klog/v2" ) type filterOutExpendable struct { @@ -56,7 +56,8 @@ func (p *filterOutExpendable) Process(context *context.AutoscalingContext, pods // CA logic from before migration to scheduler framework. So let's keep it for now func (p *filterOutExpendable) addPreemptingPodsToSnapshot(pods []*apiv1.Pod, ctx *context.AutoscalingContext) error { for _, p := range pods { - if err := ctx.ClusterSnapshot.AddPod(p, p.Status.NominatedNodeName); err != nil { + // TODO(DRA): Figure out if/how to use the predicate-checking SchedulePod() here instead - otherwise this doesn't work with DRA pods. + if err := ctx.ClusterSnapshot.ForceAddPod(p, p.Status.NominatedNodeName); err != nil { klog.Errorf("Failed to update snapshot with pod %s/%s waiting for preemption: %v", p.Namespace, p.Name, err) return caerrors.ToAutoscalerError(caerrors.InternalError, err) } diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go index 458f633c7152..280b53768636 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_expendable_test.go @@ -21,10 +21,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/test" ) @@ -108,8 +109,9 @@ func TestFilterOutExpendable(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { processor := NewFilterOutExpendablePodListProcessor() - snapshot := clustersnapshot.NewBasicClusterSnapshot() - snapshot.AddNodes(tc.nodes) + snapshot := testsnapshot.NewTestSnapshotOrDie(t) + err := snapshot.SetClusterState(tc.nodes, nil) + assert.NoError(t, err) pods, err := processor.Process(&context.AutoscalingContext{ ClusterSnapshot: snapshot, diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go index f56fb19d98c0..3283cdeda2d8 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable.go @@ -26,7 +26,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" corev1helpers "k8s.io/component-helpers/scheduling/corev1" klog "k8s.io/klog/v2" @@ -38,9 +37,9 @@ type filterOutSchedulablePodListProcessor struct { } // NewFilterOutSchedulablePodListProcessor creates a PodListProcessor filtering out schedulable pods -func NewFilterOutSchedulablePodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *filterOutSchedulablePodListProcessor { +func NewFilterOutSchedulablePodListProcessor(nodeFilter func(*framework.NodeInfo) bool) *filterOutSchedulablePodListProcessor { return &filterOutSchedulablePodListProcessor{ - schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker), + schedulingSimulator: scheduling.NewHintingSimulator(), nodeFilter: nodeFilter, } } diff --git a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go index e02e0b9c0bb6..d57eb18b3dc6 100644 --- a/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go +++ b/cluster-autoscaler/core/podlistprocessor/filter_out_schedulable_test.go @@ -22,18 +22,17 @@ import ( "time" "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) func TestFilterOutSchedulable(t *testing.T) { - schedulermetrics.Register() - node := buildReadyTestNode("node", 2000, 100) matchesAllNodes := func(*framework.NodeInfo) bool { return true } matchesNoNodes := func(*framework.NodeInfo) bool { return false } @@ -175,29 +174,23 @@ func TestFilterOutSchedulable(t *testing.T) { for tn, tc := range testCases { t.Run(tn, func(t *testing.T) { - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(t, err) + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) var allExpectedScheduledPods []*apiv1.Pod allExpectedScheduledPods = append(allExpectedScheduledPods, tc.expectedScheduledPods...) for node, pods := range tc.nodesWithPods { - err := clusterSnapshot.AddNode(node) - assert.NoError(t, err) - for _, pod := range pods { pod.Spec.NodeName = node.Name - err = clusterSnapshot.AddPod(pod, node.Name) - assert.NoError(t, err) - allExpectedScheduledPods = append(allExpectedScheduledPods, pod) } + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pods...)) + assert.NoError(t, err) } clusterSnapshot.Fork() - processor := NewFilterOutSchedulablePodListProcessor(predicateChecker, tc.nodeFilter) + processor := NewFilterOutSchedulablePodListProcessor(tc.nodeFilter) unschedulablePods, err := processor.filterOutSchedulableByPacking(tc.unschedulableCandidates, clusterSnapshot) assert.NoError(t, err) @@ -256,8 +249,12 @@ func BenchmarkFilterOutSchedulable(b *testing.B) { }, } snapshots := map[string]func() clustersnapshot.ClusterSnapshot{ - "basic": func() clustersnapshot.ClusterSnapshot { return clustersnapshot.NewBasicClusterSnapshot() }, - "delta": func() clustersnapshot.ClusterSnapshot { return clustersnapshot.NewDeltaClusterSnapshot() }, + "basic": func() clustersnapshot.ClusterSnapshot { + return testsnapshot.NewCustomTestSnapshotOrDie(b, base.NewBasicSnapshotBase()) + }, + "delta": func() clustersnapshot.ClusterSnapshot { + return testsnapshot.NewCustomTestSnapshotOrDie(b, base.NewDeltaSnapshotBase()) + }, } for snapshotName, snapshotFactory := range snapshots { for _, tc := range tests { @@ -282,23 +279,15 @@ func BenchmarkFilterOutSchedulable(b *testing.B) { } } - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(b, err) - clusterSnapshot := snapshotFactory() - if err := clusterSnapshot.AddNodes(nodes); err != nil { + if err := clusterSnapshot.SetClusterState(nodes, scheduledPods); err != nil { assert.NoError(b, err) } - for _, pod := range scheduledPods { - if err := clusterSnapshot.AddPod(pod, pod.Spec.NodeName); err != nil { - assert.NoError(b, err) - } - } b.ResetTimer() for i := 0; i < b.N; i++ { - processor := NewFilterOutSchedulablePodListProcessor(predicateChecker, scheduling.ScheduleAnywhere) + processor := NewFilterOutSchedulablePodListProcessor(scheduling.ScheduleAnywhere) if stillPending, err := processor.filterOutSchedulableByPacking(pendingPods, clusterSnapshot); err != nil { assert.NoError(b, err) } else if len(stillPending) < tc.pendingPods { diff --git a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go index 9557b134c2cc..cff1f6587d04 100644 --- a/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go +++ b/cluster-autoscaler/core/podlistprocessor/pod_list_processor.go @@ -19,17 +19,16 @@ package podlistprocessor import ( "k8s.io/autoscaler/cluster-autoscaler/processors/pods" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" ) // NewDefaultPodListProcessor returns a default implementation of the pod list // processor, which wraps and sequentially runs other sub-processors. -func NewDefaultPodListProcessor(predicateChecker predicatechecker.PredicateChecker, nodeFilter func(*framework.NodeInfo) bool) *pods.CombinedPodListProcessor { +func NewDefaultPodListProcessor(nodeFilter func(*framework.NodeInfo) bool) *pods.CombinedPodListProcessor { return pods.NewCombinedPodListProcessor([]pods.PodListProcessor{ NewClearTPURequestsPodListProcessor(), NewFilterOutExpendablePodListProcessor(), NewCurrentlyDrainedNodesPodListProcessor(), - NewFilterOutSchedulablePodListProcessor(predicateChecker, nodeFilter), + NewFilterOutSchedulablePodListProcessor(nodeFilter), NewFilterOutDaemonSetPodListProcessor(), }) } diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator.go b/cluster-autoscaler/core/scaledown/actuation/actuator.go index b02e2016aaab..33be236918ab 100644 --- a/cluster-autoscaler/core/scaledown/actuation/actuator.go +++ b/cluster-autoscaler/core/scaledown/actuation/actuator.go @@ -33,6 +33,8 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/observers/nodegroupchange" "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/predicate" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" "k8s.io/autoscaler/cluster-autoscaler/simulator/utilization" @@ -356,8 +358,7 @@ func (a *Actuator) taintNode(node *apiv1.Node) error { } func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterSnapshot, error) { - knownNodes := make(map[string]bool) - snapshot := clustersnapshot.NewBasicClusterSnapshot() + snapshot := predicate.NewPredicateSnapshot(base.NewBasicSnapshotBase(), a.ctx.FrameworkHandle) pods, err := a.ctx.AllPodLister().List() if err != nil { return nil, err @@ -366,22 +367,10 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS scheduledPods := kube_util.ScheduledPods(pods) nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff) - for _, node := range nodes { - if err := snapshot.AddNode(node); err != nil { - return nil, err - } - - knownNodes[node.Name] = true - } - - for _, pod := range nonExpendableScheduledPods { - if knownNodes[pod.Spec.NodeName] { - if err := snapshot.AddPod(pod, pod.Spec.NodeName); err != nil { - return nil, err - } - } + err = snapshot.SetClusterState(nodes, nonExpendableScheduledPods) + if err != nil { + return nil, err } - return snapshot, nil } diff --git a/cluster-autoscaler/core/scaledown/actuation/actuator_test.go b/cluster-autoscaler/core/scaledown/actuation/actuator_test.go index 2f48e498c8ee..9a278f4f6eb0 100644 --- a/cluster-autoscaler/core/scaledown/actuation/actuator_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/actuator_test.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -43,13 +44,13 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/observers/nodegroupchange" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/utilization" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) type nodeGroupViewInfo struct { @@ -999,8 +1000,6 @@ func getStartDeletionTestCases(ignoreDaemonSetsUtilization bool, suffix string) } func TestStartDeletion(t *testing.T) { - schedulermetrics.Register() - testSets := []map[string]startDeletionTestCase{ // IgnoreDaemonSetsUtilization is false getStartDeletionTestCases(false, "testNg1"), @@ -1159,7 +1158,7 @@ func TestStartDeletion(t *testing.T) { csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) for _, bucket := range emptyNodeGroupViews { for _, node := range bucket.Nodes { - err := ctx.ClusterSnapshot.AddNodeWithPods(node, tc.pods[node.Name]) + err := ctx.ClusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node, tc.pods[node.Name]...)) if err != nil { t.Fatalf("Couldn't add node %q to snapshot: %v", node.Name, err) } @@ -1171,7 +1170,7 @@ func TestStartDeletion(t *testing.T) { if !found { t.Fatalf("Drain node %q doesn't have pods defined in the test case.", node.Name) } - err := ctx.ClusterSnapshot.AddNodeWithPods(node, pods) + err := ctx.ClusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pods...)) if err != nil { t.Fatalf("Couldn't add node %q to snapshot: %v", node.Name, err) } diff --git a/cluster-autoscaler/core/scaledown/actuation/drain_test.go b/cluster-autoscaler/core/scaledown/actuation/drain_test.go index 7205afe4a975..962a707990dc 100644 --- a/cluster-autoscaler/core/scaledown/actuation/drain_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/drain_test.go @@ -26,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -37,6 +38,8 @@ import ( . "k8s.io/autoscaler/cluster-autoscaler/core/test" "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" @@ -610,9 +613,9 @@ func TestPodsToEvict(t *testing.T) { }, } { t.Run(tn, func(t *testing.T) { - snapshot := clustersnapshot.NewBasicClusterSnapshot() + snapshot := testsnapshot.NewTestSnapshotOrDie(t) node := BuildTestNode("test-node", 1000, 1000) - err := snapshot.AddNodeWithPods(node, tc.pods) + err := snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, tc.pods...)) if err != nil { t.Errorf("AddNodeWithPods unexpected error: %v", err) } diff --git a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go index 23397ab4327e..b585d5951517 100644 --- a/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go +++ b/cluster-autoscaler/core/scaledown/eligibility/eligibility_test.go @@ -21,6 +21,9 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + + apiv1 "k8s.io/api/core/v1" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/config" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable" @@ -29,10 +32,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" - apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" ) @@ -149,8 +148,6 @@ func getTestCases(ignoreDaemonSetsUtilization bool, suffix string, now time.Time } func TestFilterOutUnremovable(t *testing.T) { - schedulermetrics.Register() - now := time.Now() for _, tc := range append(getTestCases(false, "IgnoreDaemonSetUtilization=false", now), getTestCases(true, "IgnoreDaemonsetUtilization=true", now)...) { diff --git a/cluster-autoscaler/core/scaledown/planner/planner.go b/cluster-autoscaler/core/scaledown/planner/planner.go index 2898e240cb05..32be506ca7ab 100644 --- a/cluster-autoscaler/core/scaledown/planner/planner.go +++ b/cluster-autoscaler/core/scaledown/planner/planner.go @@ -89,8 +89,8 @@ func New(context *context.AutoscalingContext, processors *processors.Autoscaling context: context, unremovableNodes: unremovable.NewNodes(), unneededNodes: unneeded.NewNodes(processors.NodeGroupConfigProcessor, resourceLimitsFinder), - rs: simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, context.PredicateChecker, deleteOptions, drainabilityRules, true), - actuationInjector: scheduling.NewHintingSimulator(context.PredicateChecker), + rs: simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, deleteOptions, drainabilityRules, true), + actuationInjector: scheduling.NewHintingSimulator(), eligibilityChecker: eligibility.NewChecker(processors.NodeGroupConfigProcessor), nodeUtilizationMap: make(map[string]utilization.Info), resourceLimitsFinder: resourceLimitsFinder, diff --git a/cluster-autoscaler/core/scaledown/planner/planner_test.go b/cluster-autoscaler/core/scaledown/planner/planner_test.go index 0aa4881a9613..fc3fe854be23 100644 --- a/cluster-autoscaler/core/scaledown/planner/planner_test.go +++ b/cluster-autoscaler/core/scaledown/planner/planner_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,12 +45,9 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) func TestUpdateClusterState(t *testing.T) { - schedulermetrics.Register() - testCases := []struct { name string nodes []*apiv1.Node diff --git a/cluster-autoscaler/core/scaledown/unneeded/nodes_test.go b/cluster-autoscaler/core/scaledown/unneeded/nodes_test.go index 51048eb57df5..05f450150fe3 100644 --- a/cluster-autoscaler/core/scaledown/unneeded/nodes_test.go +++ b/cluster-autoscaler/core/scaledown/unneeded/nodes_test.go @@ -21,6 +21,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" @@ -33,9 +35,6 @@ import ( kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" ) func TestUpdate(t *testing.T) { @@ -129,8 +128,6 @@ func version(n simulator.NodeToBeRemoved) string { } func TestRemovableAt(t *testing.T) { - schedulermetrics.Register() - testCases := []struct { name string numEmpty int diff --git a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go index de2dabf600bc..a8e82b87e7ba 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/async_initializer.go @@ -25,10 +25,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" @@ -110,7 +110,7 @@ func (s *AsyncNodeGroupInitializer) InitializeNodeGroup(result nodegroups.AsyncN mainCreatedNodeGroup := result.CreationResult.MainCreatedNodeGroup // If possible replace candidate node-info with node info based on crated node group. The latter // one should be more in line with nodes which will be created by node group. - nodeInfo, aErr := utils.GetNodeInfoFromTemplate(mainCreatedNodeGroup, s.daemonSets, s.taintConfig) + nodeInfo, aErr := simulator.TemplateNodeInfoFromNodeGroupTemplate(mainCreatedNodeGroup, s.daemonSets, s.taintConfig) if aErr != nil { klog.Warningf("Cannot build node info for newly created main node group %s. Using fallback. Error: %v", mainCreatedNodeGroup.Id(), aErr) nodeInfo = s.nodeInfo diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go index dd4b53a241ac..73161a658e7f 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator.go @@ -27,7 +27,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/equivalence" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/resource" - "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/expander" "k8s.io/autoscaler/cluster-autoscaler/metrics" @@ -35,6 +34,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/status" + "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" @@ -465,7 +465,6 @@ func (o *ScaleUpOrchestrator) ComputeExpansionOption( estimateStart := time.Now() expansionEstimator := o.estimatorBuilder( - o.autoscalingContext.PredicateChecker, o.autoscalingContext.ClusterSnapshot, estimator.NewEstimationContext(o.autoscalingContext.MaxNodesTotal, option.SimilarNodeGroups, currentNodeCount), ) @@ -527,7 +526,7 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup( // If possible replace candidate node-info with node info based on crated node group. The latter // one should be more in line with nodes which will be created by node group. - mainCreatedNodeInfo, aErr := utils.GetNodeInfoFromTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, o.taintConfig) + mainCreatedNodeInfo, aErr := simulator.TemplateNodeInfoFromNodeGroupTemplate(createNodeGroupResult.MainCreatedNodeGroup, daemonSets, o.taintConfig) if aErr == nil { nodeInfos[createNodeGroupResult.MainCreatedNodeGroup.Id()] = mainCreatedNodeInfo schedulablePodGroups[createNodeGroupResult.MainCreatedNodeGroup.Id()] = o.SchedulablePodGroups(podEquivalenceGroups, createNodeGroupResult.MainCreatedNodeGroup, mainCreatedNodeInfo) @@ -542,7 +541,7 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup( delete(schedulablePodGroups, oldId) } for _, nodeGroup := range createNodeGroupResult.ExtraCreatedNodeGroups { - nodeInfo, aErr := utils.GetNodeInfoFromTemplate(nodeGroup, daemonSets, o.taintConfig) + nodeInfo, aErr := simulator.TemplateNodeInfoFromNodeGroupTemplate(nodeGroup, daemonSets, o.taintConfig) if aErr != nil { klog.Warningf("Cannot build node info for newly created extra node group %v; balancing similar node groups will not work; err=%v", nodeGroup.Id(), aErr) continue @@ -569,11 +568,7 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups( defer o.autoscalingContext.ClusterSnapshot.Revert() // Add test node to snapshot. - var allPods []*apiv1.Pod - for _, podInfo := range nodeInfo.Pods() { - allPods = append(allPods, podInfo.Pod) - } - if err := o.autoscalingContext.ClusterSnapshot.AddNodeWithPods(nodeInfo.Node(), allPods); err != nil { + if err := o.autoscalingContext.ClusterSnapshot.AddNodeInfo(nodeInfo); err != nil { klog.Errorf("Error while adding test Node: %v", err) return []estimator.PodEquivalenceGroup{} } @@ -581,7 +576,7 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups( var schedulablePodGroups []estimator.PodEquivalenceGroup for _, eg := range podEquivalenceGroups { samplePod := eg.Pods[0] - if err := o.autoscalingContext.PredicateChecker.CheckPredicates(o.autoscalingContext.ClusterSnapshot, samplePod, nodeInfo.Node().Name); err == nil { + if err := o.autoscalingContext.ClusterSnapshot.CheckPredicates(samplePod, nodeInfo.Node().Name); err == nil { // Add pods to option. schedulablePodGroups = append(schedulablePodGroups, estimator.PodEquivalenceGroup{ Pods: eg.Pods, @@ -590,7 +585,7 @@ func (o *ScaleUpOrchestrator) SchedulablePodGroups( eg.Schedulable = true eg.SchedulableGroups = append(eg.SchedulableGroups, nodeGroup.Id()) } else { - klog.V(2).Infof("Pod %s/%s can't be scheduled on %s, predicate checking error: %v", samplePod.Namespace, samplePod.Name, nodeGroup.Id(), err.VerboseMessage()) + klog.V(2).Infof("Pod %s/%s can't be scheduled on %s, predicate checking error: %v", samplePod.Namespace, samplePod.Name, nodeGroup.Id(), err) if podCount := len(eg.Pods); podCount > 1 { klog.V(2).Infof("%d other pods similar to %s can't be scheduled on %s", podCount-1, samplePod.Name, nodeGroup.Id()) } diff --git a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go index a1cdd15eba91..b3d6448322b3 100644 --- a/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/core/scaleup/orchestrator/orchestrator_test.go @@ -26,13 +26,9 @@ import ( "testing" "time" - "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" - "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - kube_record "k8s.io/client-go/tools/record" - "k8s.io/component-base/metrics/legacyregistry" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" @@ -44,20 +40,21 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/estimator" "k8s.io/autoscaler/cluster-autoscaler/metrics" "k8s.io/autoscaler/cluster-autoscaler/processors" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig" + "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupset" "k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider" "k8s.io/autoscaler/cluster-autoscaler/processors/status" processorstest "k8s.io/autoscaler/cluster-autoscaler/processors/test" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/autoscaler/cluster-autoscaler/utils/units" - - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes/fake" + kube_record "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics/legacyregistry" "github.com/stretchr/testify/assert" ) @@ -72,8 +69,6 @@ var defaultOptions = config.AutoscalingOptions{ // Scale up scenarios. func TestScaleUpOK(t *testing.T) { - schedulermetrics.Register() - config := &ScaleUpTestConfig{ Nodes: []NodeConfig{ {Name: "n1", Cpu: 100, Memory: 100, Gpu: 0, Ready: true, Group: "ng1"}, @@ -1049,6 +1044,8 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR // build orchestrator context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) + err = context.ClusterSnapshot.SetClusterState(nodes, kube_util.ScheduledPods(pods)) + assert.NoError(t, err) nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false). Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) @@ -1130,13 +1127,15 @@ func TestScaleUpUnhealthy(t *testing.T) { SetNodeReadyState(n1, true, someTimeAgo) n2 := BuildTestNode("n2", 1000, 1000) SetNodeReadyState(n2, true, someTimeAgo) + nodes := []*apiv1.Node{n1, n2} p1 := BuildTestPod("p1", 80, 0) p2 := BuildTestPod("p2", 800, 0) p1.Spec.NodeName = "n1" p2.Spec.NodeName = "n2" + pods := []*apiv1.Pod{p1, p2} - podLister := kube_util.NewTestPodLister([]*apiv1.Pod{p1, p2}) + podLister := kube_util.NewTestPodLister(pods) listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil) provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error { @@ -1155,8 +1154,8 @@ func TestScaleUpUnhealthy(t *testing.T) { } context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) - - nodes := []*apiv1.Node{n1, n2} + err = context.ClusterSnapshot.SetClusterState(nodes, pods) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) @@ -1198,7 +1197,8 @@ func TestBinpackingLimiter(t *testing.T) { context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) - + err = context.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false). Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) @@ -1233,11 +1233,13 @@ func TestScaleUpNoHelp(t *testing.T) { n1 := BuildTestNode("n1", 100, 1000) now := time.Now() SetNodeReadyState(n1, true, now.Add(-2*time.Minute)) + nodes := []*apiv1.Node{n1} p1 := BuildTestPod("p1", 80, 0) p1.Spec.NodeName = "n1" + pods := []*apiv1.Pod{p1} - podLister := kube_util.NewTestPodLister([]*apiv1.Pod{p1}) + podLister := kube_util.NewTestPodLister(pods) listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil) provider := testprovider.NewTestCloudProvider(func(nodeGroup string, increase int) error { @@ -1255,8 +1257,8 @@ func TestScaleUpNoHelp(t *testing.T) { } context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) - - nodes := []*apiv1.Node{n1} + err = context.ClusterSnapshot.SetClusterState(nodes, pods) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) @@ -1410,7 +1412,8 @@ func TestComputeSimilarNodeGroups(t *testing.T) { listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil) ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) - + err = ctx.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) assert.NoError(t, clusterState.UpdateNodes(nodes, nodeInfos, time.Now())) @@ -1474,7 +1477,8 @@ func TestScaleUpBalanceGroups(t *testing.T) { } context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil) assert.NoError(t, err) - + err = context.ClusterSnapshot.SetClusterState(nodes, podList) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) clusterState.UpdateNodes(nodes, nodeInfos, time.Now()) @@ -1650,6 +1654,8 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) { assert.NoError(t, err) nodes := []*apiv1.Node{n1, n2} + err = context.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now()) processors := processorstest.NewTestProcessors(&context) clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker()) diff --git a/cluster-autoscaler/core/scaleup/resource/manager_test.go b/cluster-autoscaler/core/scaleup/resource/manager_test.go index ac1204be1b55..4c7d64ad3930 100644 --- a/cluster-autoscaler/core/scaleup/resource/manager_test.go +++ b/cluster-autoscaler/core/scaleup/resource/manager_test.go @@ -22,6 +22,7 @@ import ( "time" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" @@ -35,7 +36,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/taints" utils_test "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) type nodeGroupConfig struct { @@ -53,8 +53,6 @@ type deltaForNodeTestCase struct { } func TestDeltaForNode(t *testing.T) { - schedulermetrics.Register() - testCases := []deltaForNodeTestCase{ { nodeGroupConfig: nodeGroupConfig{Name: "ng1", Min: 3, Max: 10, Size: 5, CPU: 8, Mem: 16}, @@ -73,6 +71,8 @@ func TestDeltaForNode(t *testing.T) { ng := testCase.nodeGroupConfig group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem) + err := ctx.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now()) rm := NewManager(processors.CustomResourcesProcessor) @@ -114,6 +114,8 @@ func TestResourcesLeft(t *testing.T) { ng := testCase.nodeGroupConfig _, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem) + err := ctx.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now()) rm := NewManager(processors.CustomResourcesProcessor) @@ -165,6 +167,8 @@ func TestApplyLimits(t *testing.T) { ng := testCase.nodeGroupConfig group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem) + err := ctx.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now()) rm := NewManager(processors.CustomResourcesProcessor) @@ -230,6 +234,8 @@ func TestResourceManagerWithGpuResource(t *testing.T) { assert.NoError(t, err) nodes := []*corev1.Node{n1} + err = context.ClusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(t, err) nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now()) rm := NewManager(processors.CustomResourcesProcessor) diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index f14e8db7f681..7c86ea222f9d 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -34,7 +34,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/planner" scaledownstatus "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup" - orchestrator "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator" + "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator" core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot" "k8s.io/autoscaler/cluster-autoscaler/estimator" @@ -48,17 +48,15 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" caerrors "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" - scheduler_utils "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" "k8s.io/utils/integer" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - klog "k8s.io/klog/v2" + "k8s.io/klog/v2" ) const ( @@ -132,7 +130,7 @@ func (callbacks *staticAutoscalerProcessorCallbacks) reset() { // NewStaticAutoscaler creates an instance of Autoscaler filled with provided parameters func NewStaticAutoscaler( opts config.AutoscalingOptions, - predicateChecker predicatechecker.PredicateChecker, + fwHandle *framework.Handle, clusterSnapshot clustersnapshot.ClusterSnapshot, autoscalingKubeClients *context.AutoscalingKubeClients, processors *ca_processors.AutoscalingProcessors, @@ -155,7 +153,7 @@ func NewStaticAutoscaler( processorCallbacks := newStaticAutoscalerProcessorCallbacks() autoscalingContext := context.NewAutoscalingContext( opts, - predicateChecker, + fwHandle, clusterSnapshot, autoscalingKubeClients, cloudProvider, @@ -242,28 +240,6 @@ func (a *StaticAutoscaler) cleanUpIfRequired() { a.initialized = true } -func (a *StaticAutoscaler) initializeClusterSnapshot(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod) caerrors.AutoscalerError { - a.ClusterSnapshot.Clear() - - knownNodes := make(map[string]bool) - for _, node := range nodes { - if err := a.ClusterSnapshot.AddNode(node); err != nil { - klog.Errorf("Failed to add node %s to cluster snapshot: %v", node.Name, err) - return caerrors.ToAutoscalerError(caerrors.InternalError, err) - } - knownNodes[node.Name] = true - } - for _, pod := range scheduledPods { - if knownNodes[pod.Spec.NodeName] { - if err := a.ClusterSnapshot.AddPod(pod, pod.Spec.NodeName); err != nil { - klog.Errorf("Failed to add pod %s scheduled to node %s to cluster snapshot: %v", pod.Name, pod.Spec.NodeName, err) - return caerrors.ToAutoscalerError(caerrors.InternalError, err) - } - } - } - return nil -} - func (a *StaticAutoscaler) initializeRemainingPdbTracker() caerrors.AutoscalerError { a.RemainingPdbTracker.Clear() @@ -361,8 +337,8 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr } nonExpendableScheduledPods := core_utils.FilterOutExpendablePods(originalScheduledPods, a.ExpendablePodsPriorityCutoff) // Initialize cluster state to ClusterSnapshot - if typedErr := a.initializeClusterSnapshot(allNodes, nonExpendableScheduledPods); typedErr != nil { - return typedErr.AddPrefix("failed to initialize ClusterSnapshot: ") + if err := a.ClusterSnapshot.SetClusterState(allNodes, nonExpendableScheduledPods); err != nil { + return caerrors.ToAutoscalerError(caerrors.InternalError, err).AddPrefix("failed to initialize ClusterSnapshot: ") } // Initialize Pod Disruption Budget tracking if typedErr := a.initializeRemainingPdbTracker(); typedErr != nil { @@ -486,7 +462,7 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) caerrors.AutoscalerErr allNodes = subtractNodesByName(allNodes, allRegisteredUpcoming) // Remove the nodes from the snapshot as well so that the state is consistent. for _, notStartedNodeName := range allRegisteredUpcoming { - err := a.ClusterSnapshot.RemoveNode(notStartedNodeName) + err := a.ClusterSnapshot.RemoveNodeInfo(notStartedNodeName) if err != nil { klog.Errorf("Failed to remove NotStarted node %s from cluster snapshot: %v", notStartedNodeName, err) // ErrNodeNotFound shouldn't happen (so it needs to be logged above if it does), but what we care about here is that the @@ -682,20 +658,16 @@ func (a *StaticAutoscaler) addUpcomingNodesToClusterSnapshot(upcomingCounts map[ nodeGroups := a.nodeGroupsById() upcomingNodeGroups := make(map[string]int) upcomingNodesFromUpcomingNodeGroups := 0 - for nodeGroupName, upcomingNodes := range getUpcomingNodeInfos(upcomingCounts, nodeInfosForGroups) { + for nodeGroupName, upcomingNodeInfos := range getUpcomingNodeInfos(upcomingCounts, nodeInfosForGroups) { nodeGroup := nodeGroups[nodeGroupName] if nodeGroup == nil { return fmt.Errorf("failed to find node group: %s", nodeGroupName) } isUpcomingNodeGroup := a.processors.AsyncNodeGroupStateChecker.IsUpcoming(nodeGroup) - for _, upcomingNode := range upcomingNodes { - var pods []*apiv1.Pod - for _, podInfo := range upcomingNode.Pods() { - pods = append(pods, podInfo.Pod) - } - err := a.ClusterSnapshot.AddNodeWithPods(upcomingNode.Node(), pods) + for _, upcomingNodeInfo := range upcomingNodeInfos { + err := a.ClusterSnapshot.AddNodeInfo(upcomingNodeInfo) if err != nil { - return fmt.Errorf("Failed to add upcoming node %s to cluster snapshot: %w", upcomingNode.Node().Name, err) + return fmt.Errorf("failed to add upcoming node %s to cluster snapshot: %w", upcomingNodeInfo.Node().Name, err) } if isUpcomingNodeGroup { upcomingNodesFromUpcomingNodeGroups++ @@ -1054,7 +1026,7 @@ func getUpcomingNodeInfos(upcomingCounts map[string]int, nodeInfos map[string]*f // Ensure new nodes have different names because nodeName // will be used as a map key. Also deep copy pods (daemonsets & // any pods added by cloud provider on template). - nodes = append(nodes, scheduler_utils.DeepCopyTemplateNode(nodeTemplate, fmt.Sprintf("upcoming-%d", i))) + nodes = append(nodes, simulator.FreshNodeInfoFromTemplateNodeInfo(nodeTemplate, fmt.Sprintf("upcoming-%d", i))) } upcomingNodes[nodeGroup] = nodes } diff --git a/cluster-autoscaler/core/static_autoscaler_test.go b/cluster-autoscaler/core/static_autoscaler_test.go index 2df10d4b7355..ec0b084a752b 100644 --- a/cluster-autoscaler/core/static_autoscaler_test.go +++ b/cluster-autoscaler/core/static_autoscaler_test.go @@ -27,6 +27,16 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" mockprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/mocks" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" @@ -63,22 +73,10 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - kube_record "k8s.io/client-go/tools/record" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - policyv1 "k8s.io/api/policy/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" v1appslister "k8s.io/client-go/listers/apps/v1" - - "github.com/google/go-cmp/cmp" - "github.com/google/go-cmp/cmp/cmpopts" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - klog "k8s.io/klog/v2" + kube_record "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ) type podListerMock struct { @@ -313,8 +311,6 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) { // TODO: Refactor tests to use setupAutoscaler func TestStaticAutoscalerRunOnce(t *testing.T) { - schedulermetrics.Register() - readyNodeLister := kubernetes.NewTestNodeLister(nil) allNodeLister := kubernetes.NewTestNodeLister(nil) allPodListerMock := &podListerMock{} @@ -406,7 +402,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // MaxNodesTotal reached. readyNodeLister.SetNodes([]*apiv1.Node{n1}) allNodeLister.SetNodes([]*apiv1.Node{n1}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -417,7 +413,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Scale up. readyNodeLister.SetNodes([]*apiv1.Node{n1}) allNodeLister.SetNodes([]*apiv1.Node{n1}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once() @@ -431,7 +427,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Mark unneeded nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -446,7 +442,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Scale down. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3) + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "n2").Return(nil).Once() @@ -460,7 +456,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Mark unregistered nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -475,7 +471,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Remove unregistered nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng2", "n3").Return(nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -489,7 +485,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) { // Scale up to node group min size. readyNodeLister.SetNodes([]*apiv1.Node{n4}) allNodeLister.SetNodes([]*apiv1.Node{n4}) - allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil) podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil) onScaleUpMock.On("ScaleUp", "ng3", 2).Return(nil).Once() // 2 new nodes are supposed to be scaled up. @@ -689,7 +685,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) { // Mark unneeded nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -701,7 +697,7 @@ func TestStaticAutoscalerRunOnceWithScaleDownDelayPerNG(t *testing.T) { // Scale down nodegroup readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3) + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil) onScaleDownMock.On("ScaleDown", tc.expectedScaleDownNG, tc.expectedScaleDownNode).Return(nil).Once() @@ -828,7 +824,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { // Scale up. readyNodeLister.SetNodes([]*apiv1.Node{n1}) allNodeLister.SetNodes([]*apiv1.Node{n1}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() onNodeGroupCreateMock.On("Create", "autoprovisioned-TN2").Return(nil).Once() @@ -845,7 +841,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { // Remove autoprovisioned node group and mark unneeded nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() onNodeGroupDeleteMock.On("Delete", "autoprovisioned-TN1").Return(nil).Once() @@ -861,7 +857,7 @@ func TestStaticAutoscalerRunOnceWithAutoprovisionedEnabled(t *testing.T) { // Scale down. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Times(3) + allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() onNodeGroupDeleteMock.On("Delete", "autoprovisioned-"+ @@ -984,7 +980,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) { // Scale up. readyNodeLister.SetNodes(nodes) allNodeLister.SetNodes(nodes) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleUpMock.On("ScaleUp", "ng1", 1).Return(nil).Once() @@ -1002,7 +998,7 @@ func TestStaticAutoscalerRunOnceWithALongUnregisteredNode(t *testing.T) { // Remove broken node readyNodeLister.SetNodes(nodes) allNodeLister.SetNodes(nodes) - allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "broken").Return(nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -1137,7 +1133,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { // Scale up readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5, p6}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5, p6}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleUpMock.On("ScaleUp", "ng2", 1).Return(nil).Once() @@ -1150,7 +1146,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { // Mark unneeded nodes. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -1164,7 +1160,7 @@ func TestStaticAutoscalerRunOncePodsWithPriorities(t *testing.T) { // Scale down. readyNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2, n3}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Times(3) + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3, p4, p5}, nil).Twice() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() onScaleDownMock.On("ScaleDown", "ng1", "n1").Return(nil).Once() @@ -1266,7 +1262,7 @@ func TestStaticAutoscalerRunOnceWithFilteringOnBinPackingEstimator(t *testing.T) // Scale up readyNodeLister.SetNodes([]*apiv1.Node{n1, n2}) allNodeLister.SetNodes([]*apiv1.Node{n1, n2}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p3, p4}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p3, p4}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -1365,7 +1361,7 @@ func TestStaticAutoscalerRunOnceWithFilteringOnUpcomingNodesEnabledNoScaleUp(t * // Scale up readyNodeLister.SetNodes([]*apiv1.Node{n2, n3}) allNodeLister.SetNodes([]*apiv1.Node{n2, n3}) - allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Twice() + allPodListerMock.On("List").Return([]*apiv1.Pod{p1, p2, p3}, nil).Once() daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() @@ -1566,7 +1562,7 @@ func TestStaticAutoscalerRunOnceWithBypassedSchedulers(t *testing.T) { tc.setupConfig.mocks.readyNodeLister.SetNodes([]*apiv1.Node{n1}) tc.setupConfig.mocks.allNodeLister.SetNodes([]*apiv1.Node{n1}) - tc.setupConfig.mocks.allPodLister.On("List").Return(tc.pods, nil).Twice() + tc.setupConfig.mocks.allPodLister.On("List").Return(tc.pods, nil).Once() tc.setupConfig.mocks.daemonSetLister.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil).Once() tc.setupConfig.mocks.podDisruptionBudgetLister.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil).Once() if tc.expectedScaleUp != nil { diff --git a/cluster-autoscaler/core/test/common.go b/cluster-autoscaler/core/test/common.go index 7dfc57b765ae..5abeffa27e85 100644 --- a/cluster-autoscaler/core/test/common.go +++ b/cluster-autoscaler/core/test/common.go @@ -36,9 +36,8 @@ import ( processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks" "k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups" "k8s.io/autoscaler/cluster-autoscaler/processors/status" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/backoff" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" @@ -175,15 +174,14 @@ func NewScaleTestAutoscalingContext( if err != nil { return context.AutoscalingContext{}, err } - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - if err != nil { - return context.AutoscalingContext{}, err - } remainingPdbTracker := pdb.NewBasicRemainingPdbTracker() if debuggingSnapshotter == nil { debuggingSnapshotter = debuggingsnapshot.NewDebuggingSnapshotter(false) } - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() + clusterSnapshot, err := testsnapshot.NewTestSnapshot() + if err != nil { + return context.AutoscalingContext{}, err + } return context.AutoscalingContext{ AutoscalingOptions: options, AutoscalingKubeClients: context.AutoscalingKubeClients{ @@ -193,7 +191,6 @@ func NewScaleTestAutoscalingContext( ListerRegistry: listers, }, CloudProvider: provider, - PredicateChecker: predicateChecker, ClusterSnapshot: clusterSnapshot, ExpanderStrategy: random.NewStrategy(), ProcessorCallbacks: processorCallbacks, diff --git a/cluster-autoscaler/core/utils/utils.go b/cluster-autoscaler/core/utils/utils.go index c25db2ef8453..1b493b783cfc 100644 --- a/cluster-autoscaler/core/utils/utils.go +++ b/cluster-autoscaler/core/utils/utils.go @@ -17,52 +17,17 @@ limitations under the License. package utils import ( - "fmt" - "math/rand" "reflect" "time" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/clusterstate" "k8s.io/autoscaler/cluster-autoscaler/metrics" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" "k8s.io/autoscaler/cluster-autoscaler/utils/gpu" - "k8s.io/autoscaler/cluster-autoscaler/utils/labels" - "k8s.io/autoscaler/cluster-autoscaler/utils/taints" ) -// GetNodeInfoFromTemplate returns NodeInfo object built base on TemplateNodeInfo returned by NodeGroup.TemplateNodeInfo(). -func GetNodeInfoFromTemplate(nodeGroup cloudprovider.NodeGroup, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) { - id := nodeGroup.Id() - baseNodeInfo, err := nodeGroup.TemplateNodeInfo() - if err != nil { - return nil, errors.ToAutoscalerError(errors.CloudProviderError, err) - } - - labels.UpdateDeprecatedLabels(baseNodeInfo.Node().ObjectMeta.Labels) - - sanitizedNode, typedErr := SanitizeNode(baseNodeInfo.Node(), id, taintConfig) - if err != nil { - return nil, typedErr - } - baseNodeInfo.SetNode(sanitizedNode) - - pods, err := daemonset.GetDaemonSetPodsForNode(baseNodeInfo, daemonsets) - if err != nil { - return nil, errors.ToAutoscalerError(errors.InternalError, err) - } - for _, podInfo := range baseNodeInfo.Pods() { - pods = append(pods, &framework.PodInfo{Pod: podInfo.Pod}) - } - - sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, SanitizePods(pods, sanitizedNode)...) - return sanitizedNodeInfo, nil -} - // isVirtualNode determines if the node is created by virtual kubelet func isVirtualNode(node *apiv1.Node) bool { return node.ObjectMeta.Labels["type"] == "virtual-kubelet" @@ -89,48 +54,6 @@ func FilterOutNodesFromNotAutoscaledGroups(nodes []*apiv1.Node, cloudProvider cl return result, nil } -// DeepCopyNodeInfo clones the provided nodeInfo -func DeepCopyNodeInfo(nodeInfo *framework.NodeInfo) *framework.NodeInfo { - newPods := make([]*framework.PodInfo, 0) - for _, podInfo := range nodeInfo.Pods() { - newPods = append(newPods, &framework.PodInfo{Pod: podInfo.Pod.DeepCopy()}) - } - - // Build a new node info. - newNodeInfo := framework.NewNodeInfo(nodeInfo.Node().DeepCopy(), nil, newPods...) - return newNodeInfo -} - -// SanitizeNode cleans up nodes used for node group templates -func SanitizeNode(node *apiv1.Node, nodeGroup string, taintConfig taints.TaintConfig) (*apiv1.Node, errors.AutoscalerError) { - newNode := node.DeepCopy() - nodeName := fmt.Sprintf("template-node-for-%s-%d", nodeGroup, rand.Int63()) - newNode.Labels = make(map[string]string, len(node.Labels)) - for k, v := range node.Labels { - if k != apiv1.LabelHostname { - newNode.Labels[k] = v - } else { - newNode.Labels[k] = nodeName - } - } - newNode.Name = nodeName - newNode.Spec.Taints = taints.SanitizeTaints(newNode.Spec.Taints, taintConfig) - return newNode, nil -} - -// SanitizePods cleans up pods used for node group templates -func SanitizePods(pods []*framework.PodInfo, sanitizedNode *apiv1.Node) []*framework.PodInfo { - // Update node name in pods. - sanitizedPods := make([]*framework.PodInfo, 0) - for _, pod := range pods { - sanitizedPod := pod.Pod.DeepCopy() - sanitizedPod.Spec.NodeName = sanitizedNode.Name - sanitizedPods = append(sanitizedPods, &framework.PodInfo{Pod: sanitizedPod}) - } - - return sanitizedPods -} - func hasHardInterPodAffinity(affinity *apiv1.Affinity) bool { if affinity == nil { return false diff --git a/cluster-autoscaler/core/utils/utils_test.go b/cluster-autoscaler/core/utils/utils_test.go index b63badbcc834..2613b0419a13 100644 --- a/cluster-autoscaler/core/utils/utils_test.go +++ b/cluster-autoscaler/core/utils/utils_test.go @@ -20,8 +20,6 @@ import ( "testing" "time" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "github.com/stretchr/testify/assert" @@ -29,33 +27,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func TestSanitizePods(t *testing.T) { - pod := BuildTestPod("p1", 80, 0) - pod.Spec.NodeName = "n1" - pods := []*framework.PodInfo{{Pod: pod}} - - node := BuildTestNode("node", 1000, 1000) - - resNode, err := SanitizeNode(node, "test-group", taints.TaintConfig{}) - assert.NoError(t, err) - res := SanitizePods(pods, resNode) - assert.Equal(t, 1, len(res)) -} - -func TestSanitizeLabels(t *testing.T) { - oldNode := BuildTestNode("ng1-1", 1000, 1000) - oldNode.Labels = map[string]string{ - apiv1.LabelHostname: "abc", - "x": "y", - } - node, err := SanitizeNode(oldNode, "bzium", taints.TaintConfig{}) - assert.NoError(t, err) - assert.NotEqual(t, node.Labels[apiv1.LabelHostname], "abc", nil) - assert.Equal(t, node.Labels["x"], "y") - assert.NotEqual(t, node.Name, oldNode.Name) - assert.Equal(t, node.Labels[apiv1.LabelHostname], node.Name) -} - func TestGetNodeResource(t *testing.T) { node := BuildTestNode("n1", 1000, 2*MiB) diff --git a/cluster-autoscaler/estimator/binpacking_estimator.go b/cluster-autoscaler/estimator/binpacking_estimator.go index 6ffad3800df6..313155d9c2d1 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator.go +++ b/cluster-autoscaler/estimator/binpacking_estimator.go @@ -21,16 +21,14 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" + core_utils "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" - "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" - klog "k8s.io/klog/v2" + "k8s.io/klog/v2" ) // BinpackingNodeEstimator estimates the number of needed nodes to handle the given amount of pods. type BinpackingNodeEstimator struct { - predicateChecker predicatechecker.PredicateChecker clusterSnapshot clustersnapshot.ClusterSnapshot limiter EstimationLimiter podOrderer EstimationPodOrderer @@ -48,9 +46,13 @@ type estimationState struct { newNodesWithPods map[string]bool } +func (s *estimationState) trackScheduledPod(pod *apiv1.Pod, nodeName string) { + s.newNodesWithPods[nodeName] = true + s.scheduledPods = append(s.scheduledPods, pod) +} + // NewBinpackingNodeEstimator builds a new BinpackingNodeEstimator. func NewBinpackingNodeEstimator( - predicateChecker predicatechecker.PredicateChecker, clusterSnapshot clustersnapshot.ClusterSnapshot, limiter EstimationLimiter, podOrderer EstimationPodOrderer, @@ -58,7 +60,6 @@ func NewBinpackingNodeEstimator( estimationAnalyserFunc EstimationAnalyserFunc, ) *BinpackingNodeEstimator { return &BinpackingNodeEstimator{ - predicateChecker: predicateChecker, clusterSnapshot: clusterSnapshot, limiter: limiter, podOrderer: podOrderer, @@ -136,16 +137,16 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnExistingNodes( pod := pods[index] // Check schedulability on all nodes created during simulation - nodeName, err := e.predicateChecker.FitsAnyNodeMatching(e.clusterSnapshot, pod, func(nodeInfo *framework.NodeInfo) bool { + nodeName, err := e.clusterSnapshot.SchedulePodOnAnyNodeMatching(pod, func(nodeInfo *framework.NodeInfo) bool { return estimationState.newNodeNames[nodeInfo.Node().Name] }) - if err != nil { + if err != nil && err.Type() == clustersnapshot.NoNodesPassingPredicatesFoundError { break - } - - if err := e.tryToAddNode(estimationState, pod, nodeName); err != nil { + } else if err != nil { + // Unexpected error. return nil, err } + estimationState.trackScheduledPod(pod, nodeName) } return pods[index:], nil } @@ -160,11 +161,12 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes( if estimationState.lastNodeName != "" { // Check schedulability on only newly created node - if err := e.predicateChecker.CheckPredicates(e.clusterSnapshot, pod, estimationState.lastNodeName); err == nil { + if err := e.clusterSnapshot.SchedulePod(pod, estimationState.lastNodeName); err == nil { found = true - if err := e.tryToAddNode(estimationState, pod, estimationState.lastNodeName); err != nil { - return err - } + estimationState.trackScheduledPod(pod, estimationState.lastNodeName) + } else if err.Type() != clustersnapshot.FailingPredicateError { + // Unexpected error. + return err } } @@ -195,12 +197,13 @@ func (e *BinpackingNodeEstimator) tryToScheduleOnNewNodes( // Note that this may still fail (ex. if topology spreading with zonal topologyKey is used); // in this case we can't help the pending pod. We keep the node in clusterSnapshot to avoid // adding and removing node to snapshot for each such pod. - if err := e.predicateChecker.CheckPredicates(e.clusterSnapshot, pod, estimationState.lastNodeName); err != nil { + if err := e.clusterSnapshot.SchedulePod(pod, estimationState.lastNodeName); err != nil && err.Type() == clustersnapshot.FailingPredicateError { break - } - if err := e.tryToAddNode(estimationState, pod, estimationState.lastNodeName); err != nil { + } else if err != nil { + // Unexpected error. return err } + estimationState.trackScheduledPod(pod, estimationState.lastNodeName) } } return nil @@ -210,12 +213,8 @@ func (e *BinpackingNodeEstimator) addNewNodeToSnapshot( estimationState *estimationState, template *framework.NodeInfo, ) error { - newNodeInfo := scheduler.DeepCopyTemplateNode(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex)) - var pods []*apiv1.Pod - for _, podInfo := range newNodeInfo.Pods() { - pods = append(pods, podInfo.Pod) - } - if err := e.clusterSnapshot.AddNodeWithPods(newNodeInfo.Node(), pods); err != nil { + newNodeInfo := core_utils.FreshNodeInfoFromTemplateNodeInfo(template, fmt.Sprintf("e-%d", estimationState.newNodeNameIndex)) + if err := e.clusterSnapshot.AddNodeInfo(newNodeInfo); err != nil { return err } estimationState.newNodeNameIndex++ @@ -223,16 +222,3 @@ func (e *BinpackingNodeEstimator) addNewNodeToSnapshot( estimationState.newNodeNames[estimationState.lastNodeName] = true return nil } - -func (e *BinpackingNodeEstimator) tryToAddNode( - estimationState *estimationState, - pod *apiv1.Pod, - nodeName string, -) error { - if err := e.clusterSnapshot.AddPod(pod, nodeName); err != nil { - return fmt.Errorf("Error adding pod %v.%v to node %v in ClusterSnapshot; %v", pod.Namespace, pod.Name, nodeName, err) - } - estimationState.newNodesWithPods[nodeName] = true - estimationState.scheduledPods = append(estimationState.scheduledPods, pod) - return nil -} diff --git a/cluster-autoscaler/estimator/binpacking_estimator_test.go b/cluster-autoscaler/estimator/binpacking_estimator_test.go index e0fa48aeda10..ac205f16ba46 100644 --- a/cluster-autoscaler/estimator/binpacking_estimator_test.go +++ b/cluster-autoscaler/estimator/binpacking_estimator_test.go @@ -20,17 +20,15 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/autoscaler/cluster-autoscaler/utils/units" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" ) func makePodEquivalenceGroup(pod *apiv1.Pod, podCount int) PodEquivalenceGroup { @@ -66,8 +64,6 @@ func makeNode(cpu, mem, podCount int64, name string, zone string) *apiv1.Node { } func TestBinpackingEstimate(t *testing.T) { - schedulermetrics.Register() - highResourcePodGroup := makePodEquivalenceGroup( BuildTestPod( "estimatee", @@ -212,15 +208,14 @@ func TestBinpackingEstimate(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) // Add one node in different zone to trigger topology spread constraints - clusterSnapshot.AddNode(makeNode(100, 100, 10, "oldnode", "zone-jupiter")) - - predicateChecker, err := predicatechecker.NewTestPredicateChecker() + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(makeNode(100, 100, 10, "oldnode", "zone-jupiter"))) assert.NoError(t, err) + limiter := NewThresholdBasedEstimationLimiter([]Threshold{NewStaticThreshold(tc.maxNodes, time.Duration(0))}) processor := NewDecreasingPodOrderer() - estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) + estimator := NewBinpackingNodeEstimator(clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) node := makeNode(tc.millicores, tc.memory, 10, "template", "zone-mars") nodeInfo := framework.NewTestNodeInfo(node) @@ -267,14 +262,13 @@ func BenchmarkBinpackingEstimate(b *testing.B) { } for i := 0; i < b.N; i++ { - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - clusterSnapshot.AddNode(makeNode(100, 100, 10, "oldnode", "zone-jupiter")) - - predicateChecker, err := predicatechecker.NewTestPredicateChecker() + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(b) + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(makeNode(100, 100, 10, "oldnode", "zone-jupiter"))) assert.NoError(b, err) + limiter := NewThresholdBasedEstimationLimiter([]Threshold{NewStaticThreshold(maxNodes, time.Duration(0))}) processor := NewDecreasingPodOrderer() - estimator := NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) + estimator := NewBinpackingNodeEstimator(clusterSnapshot, limiter, processor, nil /* EstimationContext */, nil /* EstimationAnalyserFunc */) node := makeNode(millicores, memory, podsPerNode, "template", "zone-mars") nodeInfo := framework.NewTestNodeInfo(node) diff --git a/cluster-autoscaler/estimator/estimator.go b/cluster-autoscaler/estimator/estimator.go index b8e3db070349..19752115a3e6 100644 --- a/cluster-autoscaler/estimator/estimator.go +++ b/cluster-autoscaler/estimator/estimator.go @@ -23,7 +23,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" ) const ( @@ -57,7 +56,7 @@ type Estimator interface { } // EstimatorBuilder creates a new estimator object. -type EstimatorBuilder func(predicatechecker.PredicateChecker, clustersnapshot.ClusterSnapshot, EstimationContext) Estimator +type EstimatorBuilder func(clustersnapshot.ClusterSnapshot, EstimationContext) Estimator // EstimationAnalyserFunc to be run at the end of the estimation logic. type EstimationAnalyserFunc func(clustersnapshot.ClusterSnapshot, cloudprovider.NodeGroup, map[string]bool) @@ -67,10 +66,9 @@ func NewEstimatorBuilder(name string, limiter EstimationLimiter, orderer Estimat switch name { case BinpackingEstimatorName: return func( - predicateChecker predicatechecker.PredicateChecker, clusterSnapshot clustersnapshot.ClusterSnapshot, context EstimationContext) Estimator { - return NewBinpackingNodeEstimator(predicateChecker, clusterSnapshot, limiter, orderer, context, estimationAnalyserFunc) + return NewBinpackingNodeEstimator(clusterSnapshot, limiter, orderer, context, estimationAnalyserFunc) }, nil } return nil, fmt.Errorf("unknown estimator: %s", name) diff --git a/cluster-autoscaler/go.mod b/cluster-autoscaler/go.mod index eae107a99bee..6a8a4ee693ee 100644 --- a/cluster-autoscaler/go.mod +++ b/cluster-autoscaler/go.mod @@ -233,64 +233,4 @@ replace github.com/digitalocean/godo => github.com/digitalocean/godo v1.27.0 replace github.com/rancher/go-rancher => github.com/rancher/go-rancher v0.1.0 -replace k8s.io/api => k8s.io/api v0.32.0-alpha.3 - -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.0-alpha.3 - -replace k8s.io/apimachinery => k8s.io/apimachinery v0.32.0-alpha.3 - -replace k8s.io/apiserver => k8s.io/apiserver v0.32.0-alpha.3 - -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.0-alpha.3 - -replace k8s.io/client-go => k8s.io/client-go v0.32.0-alpha.3 - -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.32.0-alpha.3 - -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.32.0-alpha.3 - -replace k8s.io/code-generator => k8s.io/code-generator v0.32.0-alpha.3 - -replace k8s.io/component-base => k8s.io/component-base v0.32.0-alpha.3 - -replace k8s.io/component-helpers => k8s.io/component-helpers v0.32.0-alpha.3 - -replace k8s.io/controller-manager => k8s.io/controller-manager v0.32.0-alpha.3 - -replace k8s.io/cri-api => k8s.io/cri-api v0.32.0-alpha.3 - -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.32.0-alpha.3 - -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.32.0-alpha.3 - -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.32.0-alpha.3 - -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.32.0-alpha.3 - -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.32.0-alpha.3 - -replace k8s.io/kubectl => k8s.io/kubectl v0.32.0-alpha.3 - -replace k8s.io/kubelet => k8s.io/kubelet v0.32.0-alpha.3 - -replace k8s.io/metrics => k8s.io/metrics v0.32.0-alpha.3 - -replace k8s.io/mount-utils => k8s.io/mount-utils v0.32.0-alpha.3 - -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.32.0-alpha.3 - -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.32.0-alpha.3 - -replace k8s.io/sample-controller => k8s.io/sample-controller v0.32.0-alpha.3 - -replace k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.32.0-alpha.3 - -replace k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.32.0-alpha.3 - -replace k8s.io/kms => k8s.io/kms v0.32.0-alpha.3 - -replace k8s.io/endpointslice => k8s.io/endpointslice v0.32.0-alpha.3 - replace k8s.io/autoscaler/cluster-autoscaler/apis => ./apis - -replace k8s.io/cri-client => k8s.io/cri-client v0.32.0-alpha.3 diff --git a/cluster-autoscaler/main.go b/cluster-autoscaler/main.go index cf3ca31ccbe5..f24dcf44b9b0 100644 --- a/cluster-autoscaler/main.go +++ b/cluster-autoscaler/main.go @@ -28,6 +28,7 @@ import ( "syscall" "time" + "github.com/spf13/pflag" "k8s.io/autoscaler/cluster-autoscaler/core/scaledown/actuation" "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator" "k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot" @@ -35,12 +36,12 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/besteffortatomic" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/checkcapacity" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/predicate" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config" - "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apiserver/pkg/server/mux" @@ -68,7 +69,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/processors/scaledowncandidates/previouscandidates" "k8s.io/autoscaler/cluster-autoscaler/processors/status" provreqorchestrator "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/orchestrator" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" @@ -88,7 +88,6 @@ import ( "k8s.io/component-base/metrics/legacyregistry" "k8s.io/klog/v2" scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) // MultiStringFlag is a flag for passing multiple parameters using same flag @@ -494,7 +493,7 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot } informerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, 0, informers.WithTransform(trim)) - predicateChecker, err := predicatechecker.NewSchedulerBasedPredicateChecker(informerFactory, autoscalingOptions.SchedulerConfig) + fwHandle, err := framework.NewHandle(informerFactory, autoscalingOptions.SchedulerConfig) if err != nil { return nil, nil, err } @@ -503,11 +502,11 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot opts := core.AutoscalerOptions{ AutoscalingOptions: autoscalingOptions, - ClusterSnapshot: clustersnapshot.NewDeltaClusterSnapshot(), + FrameworkHandle: fwHandle, + ClusterSnapshot: predicate.NewPredicateSnapshot(base.NewDeltaSnapshotBase(), fwHandle), KubeClient: kubeClient, InformerFactory: informerFactory, DebuggingSnapshotter: debuggingSnapshotter, - PredicateChecker: predicateChecker, DeleteOptions: deleteOptions, DrainabilityRules: drainabilityRules, ScaleUpOrchestrator: orchestrator.New(), @@ -515,7 +514,7 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot opts.Processors = ca_processors.DefaultProcessors(autoscalingOptions) opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime, *forceDaemonSets) - podListProcessor := podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker, scheduling.ScheduleAnywhere) + podListProcessor := podlistprocessor.NewDefaultPodListProcessor(scheduling.ScheduleAnywhere) var ProvisioningRequestInjector *provreq.ProvisioningRequestPodsInjector if autoscalingOptions.ProvisioningRequestEnabled { @@ -546,7 +545,7 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot scaleUpOrchestrator := provreqorchestrator.NewWrapperOrchestrator(provreqOrchestrator) opts.ScaleUpOrchestrator = scaleUpOrchestrator - provreqProcesor := provreq.NewProvReqProcessor(client, opts.PredicateChecker) + provreqProcesor := provreq.NewProvReqProcessor(client) opts.LoopStartNotifier = loopstart.NewObserversList([]loopstart.Observer{provreqProcesor}) podListProcessor.AddProcessor(provreqProcesor) @@ -632,7 +631,6 @@ func buildAutoscaler(context ctx.Context, debuggingSnapshotter debuggingsnapshot } func run(healthCheck *metrics.HealthCheck, debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter) { - schedulermetrics.Register() metrics.RegisterAll(*emitPerNodeGroupMetrics) context, cancel := ctx.WithCancel(ctx.Background()) defer cancel() diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go index 8b0ebd58571a..34f486392099 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor.go @@ -24,7 +24,6 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" - "k8s.io/autoscaler/cluster-autoscaler/core/utils" "k8s.io/autoscaler/cluster-autoscaler/simulator" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/errors" @@ -78,11 +77,6 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, result := make(map[string]*framework.NodeInfo) seenGroups := make(map[string]bool) - podsForNodes, err := getPodsForNodes(ctx.ListerRegistry) - if err != nil { - return map[string]*framework.NodeInfo{}, err - } - // processNode returns information whether the nodeTemplate was generated and if there was an error. processNode := func(node *apiv1.Node) (bool, string, errors.AutoscalerError) { nodeGroup, err := ctx.CloudProvider.NodeGroupForNode(node) @@ -94,22 +88,15 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, } id := nodeGroup.Id() if _, found := result[id]; !found { - // Build nodeInfo. - sanitizedNode, err := utils.SanitizeNode(node, id, taintConfig) + nodeInfo, err := ctx.ClusterSnapshot.GetNodeInfo(node.Name) if err != nil { - return false, "", err + return false, "", errors.NewAutoscalerError(errors.InternalError, "error while retrieving node %s from cluster snapshot - this shouldn't happen: %v", node.Name, err) } - nodeInfo, err := simulator.BuildNodeInfoForNode(sanitizedNode, podsForNodes[node.Name], daemonsets, p.forceDaemonSets) + templateNodeInfo, caErr := simulator.TemplateNodeInfoFromExampleNodeInfo(nodeInfo, id, daemonsets, p.forceDaemonSets, taintConfig) if err != nil { - return false, "", err - } - - var pods []*apiv1.Pod - for _, podInfo := range nodeInfo.Pods() { - pods = append(pods, podInfo.Pod) + return false, "", caErr } - sanitizedNodeInfo := framework.NewNodeInfo(sanitizedNode, nil, utils.SanitizePods(nodeInfo.Pods(), sanitizedNode)...) - result[id] = sanitizedNodeInfo + result[id] = templateNodeInfo return true, id, nil } return false, "", nil @@ -125,7 +112,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, return map[string]*framework.NodeInfo{}, typedErr } if added && p.nodeInfoCache != nil { - nodeInfoCopy := utils.DeepCopyNodeInfo(result[id]) + nodeInfoCopy := result[id].DeepCopy() p.nodeInfoCache[id] = cacheItem{NodeInfo: nodeInfoCopy, added: time.Now()} } } @@ -142,7 +129,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, if p.isCacheItemExpired(cacheItem.added) { delete(p.nodeInfoCache, id) } else { - result[id] = utils.DeepCopyNodeInfo(cacheItem.NodeInfo) + result[id] = cacheItem.NodeInfo.DeepCopy() continue } } @@ -150,7 +137,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, // No good template, trying to generate one. This is called only if there are no // working nodes in the node groups. By default CA tries to use a real-world example. - nodeInfo, err := utils.GetNodeInfoFromTemplate(nodeGroup, daemonsets, taintConfig) + nodeInfo, err := simulator.TemplateNodeInfoFromNodeGroupTemplate(nodeGroup, daemonsets, taintConfig) if err != nil { if err == cloudprovider.ErrNotImplemented { continue @@ -192,19 +179,6 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext, return result, nil } -func getPodsForNodes(listers kube_util.ListerRegistry) (map[string][]*apiv1.Pod, errors.AutoscalerError) { - pods, err := listers.AllPodLister().List() - if err != nil { - return nil, errors.ToAutoscalerError(errors.ApiCallError, err) - } - scheduledPods := kube_util.ScheduledPods(pods) - podsForNodes := map[string][]*apiv1.Pod{} - for _, p := range scheduledPods { - podsForNodes[p.Spec.NodeName] = append(podsForNodes[p.Spec.NodeName], p) - } - return podsForNodes, nil -} - func isNodeGoodTemplateCandidate(node *apiv1.Node, now time.Time) bool { ready, lastTransitionTime, _ := kube_util.GetReadinessState(node) stable := lastTransitionTime.Add(stabilizationDelay).Before(now) diff --git a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go index 68e04752a8dc..172b756e6875 100644 --- a/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go +++ b/cluster-autoscaler/processors/nodeinfosprovider/mixed_nodeinfos_processor_test.go @@ -20,18 +20,17 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/context" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/taints" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" ) var ( @@ -39,8 +38,6 @@ var ( ) func TestGetNodeInfosForGroups(t *testing.T) { - schedulermetrics.Register() - now := time.Now() ready1 := BuildTestNode("n1", 1000, 1000) SetNodeReadyState(ready1, true, now.Add(-2*time.Minute)) @@ -78,17 +75,19 @@ func TestGetNodeInfosForGroups(t *testing.T) { podLister := kube_util.NewTestPodLister([]*apiv1.Pod{}) registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil) - predicateChecker, err := predicatechecker.NewTestPredicateChecker() + nodes := []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1} + snapshot := testsnapshot.NewTestSnapshotOrDie(t) + err := snapshot.SetClusterState(nodes, nil) assert.NoError(t, err) ctx := context.AutoscalingContext{ - CloudProvider: provider1, - PredicateChecker: predicateChecker, + CloudProvider: provider1, + ClusterSnapshot: snapshot, AutoscalingKubeClients: context.AutoscalingKubeClients{ ListerRegistry: registry, }, } - res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) assert.Equal(t, 5, len(res)) info, found := res["ng1"] @@ -109,8 +108,8 @@ func TestGetNodeInfosForGroups(t *testing.T) { // Test for a nodegroup without nodes and TemplateNodeInfo not implemented by cloud proivder ctx = context.AutoscalingContext{ - CloudProvider: provider2, - PredicateChecker: predicateChecker, + CloudProvider: provider2, + ClusterSnapshot: testsnapshot.NewTestSnapshotOrDie(t), AutoscalingKubeClients: context.AutoscalingKubeClients{ ListerRegistry: registry, }, @@ -162,19 +161,21 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { podLister := kube_util.NewTestPodLister([]*apiv1.Pod{}) registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil) - predicateChecker, err := predicatechecker.NewTestPredicateChecker() + nodes := []*apiv1.Node{unready4, unready3, ready2, ready1} + snapshot := testsnapshot.NewTestSnapshotOrDie(t) + err := snapshot.SetClusterState(nodes, nil) assert.NoError(t, err) // Fill cache ctx := context.AutoscalingContext{ - CloudProvider: provider1, - PredicateChecker: predicateChecker, + CloudProvider: provider1, + ClusterSnapshot: snapshot, AutoscalingKubeClients: context.AutoscalingKubeClients{ ListerRegistry: registry, }, } niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl, false) - res, err := niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + res, err := niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) // Check results assert.Equal(t, 4, len(res)) @@ -208,7 +209,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { assert.Equal(t, "ng3", lastDeletedGroup) // Check cache with all nodes removed - res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) // Check results assert.Equal(t, 2, len(res)) @@ -229,7 +230,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) { // Fill cache manually infoNg4Node6 := framework.NewTestNodeInfo(ready6.DeepCopy()) niProcessor.nodeInfoCache = map[string]cacheItem{"ng4": {NodeInfo: infoNg4Node6, added: now}} - res, err = niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + res, err = niProcessor.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) // Check if cache was used assert.NoError(t, err) assert.Equal(t, 2, len(res)) @@ -250,12 +251,15 @@ func TestGetNodeInfosCacheExpired(t *testing.T) { provider := testprovider.NewTestAutoprovisioningCloudProvider(nil, nil, nil, nil, nil, nil) podLister := kube_util.NewTestPodLister([]*apiv1.Pod{}) registry := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil) - predicateChecker, err := predicatechecker.NewTestPredicateChecker() + + nodes := []*apiv1.Node{ready1} + snapshot := testsnapshot.NewTestSnapshotOrDie(t) + err := snapshot.SetClusterState(nodes, nil) assert.NoError(t, err) ctx := context.AutoscalingContext{ - CloudProvider: provider, - PredicateChecker: predicateChecker, + CloudProvider: provider, + ClusterSnapshot: snapshot, AutoscalingKubeClients: context.AutoscalingKubeClients{ ListerRegistry: registry, }, @@ -272,7 +276,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) { provider.AddNode("ng1", ready1) assert.Equal(t, 2, len(niProcessor1.nodeInfoCache)) - _, err = niProcessor1.Process(&ctx, []*apiv1.Node{ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + _, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) assert.Equal(t, 1, len(niProcessor1.nodeInfoCache)) @@ -283,7 +287,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) { "ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)}, } assert.Equal(t, 2, len(niProcessor2.nodeInfoCache)) - _, err = niProcessor1.Process(&ctx, []*apiv1.Node{ready1}, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) + _, err = niProcessor1.Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now) assert.NoError(t, err) assert.Equal(t, 2, len(niProcessor2.nodeInfoCache)) diff --git a/cluster-autoscaler/processors/nodes/scale_down_set_processor_test.go b/cluster-autoscaler/processors/nodes/scale_down_set_processor_test.go index dc43792c7183..af18b0ef072e 100644 --- a/cluster-autoscaler/processors/nodes/scale_down_set_processor_test.go +++ b/cluster-autoscaler/processors/nodes/scale_down_set_processor_test.go @@ -20,17 +20,16 @@ import ( "testing" "github.com/stretchr/testify/assert" + testprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test" "k8s.io/autoscaler/cluster-autoscaler/config" . "k8s.io/autoscaler/cluster-autoscaler/core/test" "k8s.io/autoscaler/cluster-autoscaler/simulator" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) func TestAtomicResizeFilterUnremovableNodes(t *testing.T) { - schedulermetrics.Register() testCases := []struct { name string nodeGroups []struct { diff --git a/cluster-autoscaler/processors/podinjection/pod_injection_processor_test.go b/cluster-autoscaler/processors/podinjection/pod_injection_processor_test.go index d2f96b244585..426da16c68d1 100644 --- a/cluster-autoscaler/processors/podinjection/pod_injection_processor_test.go +++ b/cluster-autoscaler/processors/podinjection/pod_injection_processor_test.go @@ -28,7 +28,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/autoscaler/cluster-autoscaler/context" podinjectionbackoff "k8s.io/autoscaler/cluster-autoscaler/processors/podinjection/backoff" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" ) @@ -111,11 +113,9 @@ func TestTargetCountInjectionPodListProcessor(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { p := NewPodInjectionPodListProcessor(podinjectionbackoff.NewFakePodControllerRegistry()) - clusterSnapshot := clustersnapshot.NewDeltaClusterSnapshot() - clusterSnapshot.AddNode(node) - for _, pod := range tc.scheduledPods { - clusterSnapshot.AddPod(pod, node.Name) - } + clusterSnapshot := testsnapshot.NewCustomTestSnapshotOrDie(t, base.NewDeltaSnapshotBase()) + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node, tc.scheduledPods...)) + assert.NoError(t, err) ctx := context.AutoscalingContext{ AutoscalingKubeClients: context.AutoscalingKubeClients{ ListerRegistry: kubernetes.NewListerRegistry(nil, nil, nil, nil, nil, nil, jobLister, replicaSetLister, statefulsetLister), diff --git a/cluster-autoscaler/processors/provreq/processor.go b/cluster-autoscaler/processors/provreq/processor.go index 56f52257547c..1463b1e9f6db 100644 --- a/cluster-autoscaler/processors/provreq/processor.go +++ b/cluster-autoscaler/processors/provreq/processor.go @@ -32,7 +32,6 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" "k8s.io/klog/v2" @@ -58,8 +57,8 @@ type provReqProcessor struct { } // NewProvReqProcessor return ProvisioningRequestProcessor. -func NewProvReqProcessor(client *provreqclient.ProvisioningRequestClient, predicateChecker predicatechecker.PredicateChecker) *provReqProcessor { - return &provReqProcessor{now: time.Now, maxUpdated: defaultMaxUpdated, client: client, injector: scheduling.NewHintingSimulator(predicateChecker)} +func NewProvReqProcessor(client *provreqclient.ProvisioningRequestClient) *provReqProcessor { + return &provReqProcessor{now: time.Now, maxUpdated: defaultMaxUpdated, client: client, injector: scheduling.NewHintingSimulator()} } // Refresh implements loop.Observer interface and will be run at the start diff --git a/cluster-autoscaler/processors/provreq/processor_test.go b/cluster-autoscaler/processors/provreq/processor_test.go index 20eaa0e1ffba..11daf0aa2590 100644 --- a/cluster-autoscaler/processors/provreq/processor_test.go +++ b/cluster-autoscaler/processors/provreq/processor_test.go @@ -22,11 +22,9 @@ import ( "time" "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - v1 "k8s.io/autoscaler/cluster-autoscaler/apis/provisioningrequest/autoscaling.x-k8s.io/v1" "k8s.io/autoscaler/cluster-autoscaler/config" . "k8s.io/autoscaler/cluster-autoscaler/core/test" @@ -34,6 +32,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient" "k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqwrapper" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" ) @@ -239,8 +238,6 @@ func (f *fakeInjector) TrySchedulePods(clusterSnapshot clustersnapshot.ClusterSn } func TestBookCapacity(t *testing.T) { - schedulermetrics.Register() - testCases := []struct { name string conditions []string diff --git a/cluster-autoscaler/processors/test/common.go b/cluster-autoscaler/processors/test/common.go index dd45d6569f91..07f69c16a346 100644 --- a/cluster-autoscaler/processors/test/common.go +++ b/cluster-autoscaler/processors/test/common.go @@ -39,7 +39,7 @@ import ( // NewTestProcessors returns a set of simple processors for use in tests. func NewTestProcessors(context *context.AutoscalingContext) *processors.AutoscalingProcessors { return &processors.AutoscalingProcessors{ - PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(context.PredicateChecker, scheduling.ScheduleAnywhere), + PodListProcessor: podlistprocessor.NewDefaultPodListProcessor(scheduling.ScheduleAnywhere), NodeGroupListProcessor: &nodegroups.NoOpNodeGroupListProcessor{}, BinpackingLimiter: binpacking.NewTimeLimiter(context.MaxNodeGroupBinpackingDuration), NodeGroupSetProcessor: nodegroupset.NewDefaultNodeGroupSetProcessor([]string{}, config.NodeGroupDifferenceRatios{}), diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go index 4008b5992246..6174749c46d7 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator.go @@ -69,7 +69,7 @@ func (o *provReqOrchestrator) Initialize( ) { o.initialized = true o.context = autoscalingContext - o.injector = scheduling.NewHintingSimulator(autoscalingContext.PredicateChecker) + o.injector = scheduling.NewHintingSimulator() for _, mode := range o.provisioningClasses { mode.Initialize(autoscalingContext, processors, clusterStateRegistry, estimatorBuilder, taintConfig, o.injector) } diff --git a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go index ba6a8e684a17..414cd43c16a7 100644 --- a/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go +++ b/cluster-autoscaler/provisioningrequest/orchestrator/orchestrator_test.go @@ -49,13 +49,9 @@ import ( . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" clocktesting "k8s.io/utils/clock/testing" - - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" ) func TestScaleUp(t *testing.T) { - schedulermetrics.Register() - // Set up a cluster with 200 nodes: // - 100 nodes with high cpu, low memory in autoscaled group with max 150 // - 100 nodes with high memory, low cpu not in autoscaled group diff --git a/cluster-autoscaler/provisioningrequest/pods/pods_test.go b/cluster-autoscaler/provisioningrequest/pods/pods_test.go index 48bdf0c78c6c..9e348958d3c0 100644 --- a/cluster-autoscaler/provisioningrequest/pods/pods_test.go +++ b/cluster-autoscaler/provisioningrequest/pods/pods_test.go @@ -330,6 +330,10 @@ func TestPodsForProvisioningRequest(t *testing.T) { corev1.ResourceMemory: resource.MustParse("8G"), }, }, + ResizePolicy: []corev1.ContainerResizePolicy{ + {ResourceName: corev1.ResourceCPU, RestartPolicy: corev1.NotRequired}, + {ResourceName: corev1.ResourceMemory, RestartPolicy: corev1.NotRequired}, + }, }, }, EnableServiceLinks: ptr.To(true), @@ -345,7 +349,7 @@ func TestPodsForProvisioningRequest(t *testing.T) { t.Errorf("PodsForProvisioningRequest() error = %v, wantErr %v", err, tc.wantErr) return } - if diff := cmp.Diff(got, tc.want); diff != "" { + if diff := cmp.Diff(tc.want, got); diff != "" { t.Errorf("unexpected response from PodsForProvisioningRequest(), diff (-want +got): %v", diff) } }) diff --git a/cluster-autoscaler/simulator/cluster.go b/cluster-autoscaler/simulator/cluster.go index 6855ae5efb95..36fa10ff3be9 100644 --- a/cluster-autoscaler/simulator/cluster.go +++ b/cluster-autoscaler/simulator/cluster.go @@ -26,13 +26,12 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/simulator/drainability/rules" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" "k8s.io/autoscaler/cluster-autoscaler/utils/tpu" - klog "k8s.io/klog/v2" + "k8s.io/klog/v2" ) // NodeToBeRemoved contain information about a node that can be removed. @@ -105,15 +104,14 @@ type RemovalSimulator struct { } // NewRemovalSimulator returns a new RemovalSimulator. -func NewRemovalSimulator(listers kube_util.ListerRegistry, clusterSnapshot clustersnapshot.ClusterSnapshot, predicateChecker predicatechecker.PredicateChecker, - deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, persistSuccessfulSimulations bool) *RemovalSimulator { +func NewRemovalSimulator(listers kube_util.ListerRegistry, clusterSnapshot clustersnapshot.ClusterSnapshot, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, persistSuccessfulSimulations bool) *RemovalSimulator { return &RemovalSimulator{ listers: listers, clusterSnapshot: clusterSnapshot, canPersist: persistSuccessfulSimulations, deleteOptions: deleteOptions, drainabilityRules: drainabilityRules, - schedulingSimulator: scheduling.NewHintingSimulator(predicateChecker), + schedulingSimulator: scheduling.NewHintingSimulator(), } } @@ -223,7 +221,7 @@ func (r *RemovalSimulator) findPlaceFor(removedNode string, pods []*apiv1.Pod, n // remove pods from clusterSnapshot first for _, pod := range pods { - if err := r.clusterSnapshot.RemovePod(pod.Namespace, pod.Name, removedNode); err != nil { + if err := r.clusterSnapshot.UnschedulePod(pod.Namespace, pod.Name, removedNode); err != nil { // just log error klog.Errorf("Simulating removal of %s/%s return error; %v", pod.Namespace, pod.Name, err) } diff --git a/cluster-autoscaler/simulator/cluster_test.go b/cluster-autoscaler/simulator/cluster_test.go index e493a6672b02..2fbf1f55973f 100644 --- a/cluster-autoscaler/simulator/cluster_test.go +++ b/cluster-autoscaler/simulator/cluster_test.go @@ -21,19 +21,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/autoscaler/cluster-autoscaler/simulator/options" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/drain" kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -57,10 +56,10 @@ func TestFindEmptyNodes(t *testing.T) { types.ConfigMirrorAnnotationKey: "", } - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) clustersnapshot.InitializeClusterSnapshotOrDie(t, clusterSnapshot, []*apiv1.Node{nodes[0], nodes[1], nodes[2], nodes[3]}, []*apiv1.Pod{pod1, pod2}) testTime := time.Date(2020, time.December, 18, 17, 0, 0, 0, time.UTC) - r := NewRemovalSimulator(nil, clusterSnapshot, nil, testDeleteOptions(), nil, false) + r := NewRemovalSimulator(nil, clusterSnapshot, testDeleteOptions(), nil, false) emptyNodes := r.FindEmptyNodesToRemove(nodeNames, testTime) assert.Equal(t, []string{nodeNames[0], nodeNames[2], nodeNames[3]}, emptyNodes) } @@ -75,8 +74,6 @@ type findNodesToRemoveTestConfig struct { } func TestFindNodesToRemove(t *testing.T) { - schedulermetrics.Register() - emptyNode := BuildTestNode("n1", 1000, 2000000) // two small pods backed by ReplicaSet @@ -141,9 +138,7 @@ func TestFindNodesToRemove(t *testing.T) { PodsToReschedule: []*apiv1.Pod{pod1, pod2}, } - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(t, err) + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) tests := []findNodesToRemoveTestConfig{ { @@ -190,7 +185,7 @@ func TestFindNodesToRemove(t *testing.T) { destinations = append(destinations, node.Name) } clustersnapshot.InitializeClusterSnapshotOrDie(t, clusterSnapshot, test.allNodes, test.pods) - r := NewRemovalSimulator(registry, clusterSnapshot, predicateChecker, testDeleteOptions(), nil, false) + r := NewRemovalSimulator(registry, clusterSnapshot, testDeleteOptions(), nil, false) toRemove, unremovable := r.FindNodesToRemove(test.candidates, destinations, time.Now(), nil) fmt.Printf("Test scenario: %s, found len(toRemove)=%v, expected len(test.toRemove)=%v\n", test.name, len(toRemove), len(test.toRemove)) assert.Equal(t, test.toRemove, toRemove) diff --git a/cluster-autoscaler/simulator/clustersnapshot/basic.go b/cluster-autoscaler/simulator/clustersnapshot/base/basic.go similarity index 72% rename from cluster-autoscaler/simulator/clustersnapshot/basic.go rename to cluster-autoscaler/simulator/clustersnapshot/base/basic.go index ce083894f64e..f6933e34365a 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/basic.go +++ b/cluster-autoscaler/simulator/clustersnapshot/base/basic.go @@ -14,20 +14,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clustersnapshot +package base import ( "fmt" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) -// BasicClusterSnapshot is simple, reference implementation of ClusterSnapshot. +// BasicSnapshotBase is simple, reference implementation of SnapshotBase. // It is inefficient. But hopefully bug-free and good for initial testing. -type BasicClusterSnapshot struct { +type BasicSnapshotBase struct { data []*internalBasicSnapshotData } @@ -70,7 +71,7 @@ func (data *internalBasicSnapshotData) getNodeInfo(nodeName string) (*schedulerf if v, ok := data.nodeInfoMap[nodeName]; ok { return v, nil } - return nil, ErrNodeNotFound + return nil, clustersnapshot.ErrNodeNotFound } func (data *internalBasicSnapshotData) isPVCUsedByPods(key string) bool { @@ -153,18 +154,9 @@ func (data *internalBasicSnapshotData) addNode(node *apiv1.Node) error { return nil } -func (data *internalBasicSnapshotData) addNodes(nodes []*apiv1.Node) error { - for _, node := range nodes { - if err := data.addNode(node); err != nil { - return err - } - } - return nil -} - -func (data *internalBasicSnapshotData) removeNode(nodeName string) error { +func (data *internalBasicSnapshotData) removeNodeInfo(nodeName string) error { if _, found := data.nodeInfoMap[nodeName]; !found { - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } for _, pod := range data.nodeInfoMap[nodeName].Pods { data.removePvcUsedByPod(pod.Pod) @@ -175,7 +167,7 @@ func (data *internalBasicSnapshotData) removeNode(nodeName string) error { func (data *internalBasicSnapshotData) addPod(pod *apiv1.Pod, nodeName string) error { if _, found := data.nodeInfoMap[nodeName]; !found { - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } data.nodeInfoMap[nodeName].AddPod(pod) data.addPvcUsedByPod(pod) @@ -185,7 +177,7 @@ func (data *internalBasicSnapshotData) addPod(pod *apiv1.Pod, nodeName string) e func (data *internalBasicSnapshotData) removePod(namespace, podName, nodeName string) error { nodeInfo, found := data.nodeInfoMap[nodeName] if !found { - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } logger := klog.Background() for _, podInfo := range nodeInfo.Pods { @@ -202,19 +194,19 @@ func (data *internalBasicSnapshotData) removePod(namespace, podName, nodeName st return fmt.Errorf("pod %s/%s not in snapshot", namespace, podName) } -// NewBasicClusterSnapshot creates instances of BasicClusterSnapshot. -func NewBasicClusterSnapshot() *BasicClusterSnapshot { - snapshot := &BasicClusterSnapshot{} - snapshot.Clear() +// NewBasicSnapshotBase creates instances of BasicSnapshotBase. +func NewBasicSnapshotBase() *BasicSnapshotBase { + snapshot := &BasicSnapshotBase{} + snapshot.clear() return snapshot } -func (snapshot *BasicClusterSnapshot) getInternalData() *internalBasicSnapshotData { +func (snapshot *BasicSnapshotBase) getInternalData() *internalBasicSnapshotData { return snapshot.data[len(snapshot.data)-1] } // GetNodeInfo gets a NodeInfo. -func (snapshot *BasicClusterSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { +func (snapshot *BasicSnapshotBase) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { schedNodeInfo, err := snapshot.getInternalData().getNodeInfo(nodeName) if err != nil { return nil, err @@ -223,13 +215,13 @@ func (snapshot *BasicClusterSnapshot) GetNodeInfo(nodeName string) (*framework.N } // ListNodeInfos lists NodeInfos. -func (snapshot *BasicClusterSnapshot) ListNodeInfos() ([]*framework.NodeInfo, error) { +func (snapshot *BasicSnapshotBase) ListNodeInfos() ([]*framework.NodeInfo, error) { schedNodeInfos := snapshot.getInternalData().listNodeInfos() return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil } // AddNodeInfo adds a NodeInfo. -func (snapshot *BasicClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) error { +func (snapshot *BasicSnapshotBase) AddNodeInfo(nodeInfo *framework.NodeInfo) error { if err := snapshot.getInternalData().addNode(nodeInfo.Node()); err != nil { return err } @@ -241,57 +233,55 @@ func (snapshot *BasicClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) return nil } -// AddNode adds node to the snapshot. -func (snapshot *BasicClusterSnapshot) AddNode(node *apiv1.Node) error { - return snapshot.getInternalData().addNode(node) -} - -// AddNodes adds nodes in batch to the snapshot. -func (snapshot *BasicClusterSnapshot) AddNodes(nodes []*apiv1.Node) error { - return snapshot.getInternalData().addNodes(nodes) -} +// SetClusterState sets the cluster state. +func (snapshot *BasicSnapshotBase) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod) error { + snapshot.clear() -// AddNodeWithPods adds a node and set of pods to be scheduled to this node to the snapshot. -func (snapshot *BasicClusterSnapshot) AddNodeWithPods(node *apiv1.Node, pods []*apiv1.Pod) error { - if err := snapshot.AddNode(node); err != nil { - return err - } - for _, pod := range pods { - if err := snapshot.AddPod(pod, node.Name); err != nil { + knownNodes := make(map[string]bool) + for _, node := range nodes { + if err := snapshot.getInternalData().addNode(node); err != nil { return err } + knownNodes[node.Name] = true + } + for _, pod := range scheduledPods { + if knownNodes[pod.Spec.NodeName] { + if err := snapshot.getInternalData().addPod(pod, pod.Spec.NodeName); err != nil { + return err + } + } } return nil } -// RemoveNode removes nodes (and pods scheduled to it) from the snapshot. -func (snapshot *BasicClusterSnapshot) RemoveNode(nodeName string) error { - return snapshot.getInternalData().removeNode(nodeName) +// RemoveNodeInfo removes nodes (and pods scheduled to it) from the snapshot. +func (snapshot *BasicSnapshotBase) RemoveNodeInfo(nodeName string) error { + return snapshot.getInternalData().removeNodeInfo(nodeName) } -// AddPod adds pod to the snapshot and schedules it to given node. -func (snapshot *BasicClusterSnapshot) AddPod(pod *apiv1.Pod, nodeName string) error { +// ForceAddPod adds pod to the snapshot and schedules it to given node. +func (snapshot *BasicSnapshotBase) ForceAddPod(pod *apiv1.Pod, nodeName string) error { return snapshot.getInternalData().addPod(pod, nodeName) } -// RemovePod removes pod from the snapshot. -func (snapshot *BasicClusterSnapshot) RemovePod(namespace, podName, nodeName string) error { +// ForceRemovePod removes pod from the snapshot. +func (snapshot *BasicSnapshotBase) ForceRemovePod(namespace, podName, nodeName string) error { return snapshot.getInternalData().removePod(namespace, podName, nodeName) } // IsPVCUsedByPods returns if the pvc is used by any pod -func (snapshot *BasicClusterSnapshot) IsPVCUsedByPods(key string) bool { +func (snapshot *BasicSnapshotBase) IsPVCUsedByPods(key string) bool { return snapshot.getInternalData().isPVCUsedByPods(key) } // Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert() -func (snapshot *BasicClusterSnapshot) Fork() { +func (snapshot *BasicSnapshotBase) Fork() { forkData := snapshot.getInternalData().clone() snapshot.data = append(snapshot.data, forkData) } // Revert reverts snapshot state to moment of forking. -func (snapshot *BasicClusterSnapshot) Revert() { +func (snapshot *BasicSnapshotBase) Revert() { if len(snapshot.data) == 1 { return } @@ -299,7 +289,7 @@ func (snapshot *BasicClusterSnapshot) Revert() { } // Commit commits changes done after forking. -func (snapshot *BasicClusterSnapshot) Commit() error { +func (snapshot *BasicSnapshotBase) Commit() error { if len(snapshot.data) <= 1 { // do nothing return nil @@ -308,48 +298,48 @@ func (snapshot *BasicClusterSnapshot) Commit() error { return nil } -// Clear reset cluster snapshot to empty, unforked state -func (snapshot *BasicClusterSnapshot) Clear() { +// clear reset cluster snapshot to empty, unforked state +func (snapshot *BasicSnapshotBase) clear() { baseData := newInternalBasicSnapshotData() snapshot.data = []*internalBasicSnapshotData{baseData} } // implementation of SharedLister interface -type basicClusterSnapshotNodeLister BasicClusterSnapshot -type basicClusterSnapshotStorageLister BasicClusterSnapshot +type basicClusterSnapshotNodeLister BasicSnapshotBase +type basicClusterSnapshotStorageLister BasicSnapshotBase // NodeInfos exposes snapshot as NodeInfoLister. -func (snapshot *BasicClusterSnapshot) NodeInfos() schedulerframework.NodeInfoLister { +func (snapshot *BasicSnapshotBase) NodeInfos() schedulerframework.NodeInfoLister { return (*basicClusterSnapshotNodeLister)(snapshot) } // StorageInfos exposes snapshot as StorageInfoLister. -func (snapshot *BasicClusterSnapshot) StorageInfos() schedulerframework.StorageInfoLister { +func (snapshot *BasicSnapshotBase) StorageInfos() schedulerframework.StorageInfoLister { return (*basicClusterSnapshotStorageLister)(snapshot) } // List returns the list of nodes in the snapshot. func (snapshot *basicClusterSnapshotNodeLister) List() ([]*schedulerframework.NodeInfo, error) { - return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfos(), nil + return (*BasicSnapshotBase)(snapshot).getInternalData().listNodeInfos(), nil } // HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity func (snapshot *basicClusterSnapshotNodeLister) HavePodsWithAffinityList() ([]*schedulerframework.NodeInfo, error) { - return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfosThatHavePodsWithAffinityList() + return (*BasicSnapshotBase)(snapshot).getInternalData().listNodeInfosThatHavePodsWithAffinityList() } // HavePodsWithRequiredAntiAffinityList returns the list of NodeInfos of nodes with pods with required anti-affinity terms. func (snapshot *basicClusterSnapshotNodeLister) HavePodsWithRequiredAntiAffinityList() ([]*schedulerframework.NodeInfo, error) { - return (*BasicClusterSnapshot)(snapshot).getInternalData().listNodeInfosThatHavePodsWithRequiredAntiAffinityList() + return (*BasicSnapshotBase)(snapshot).getInternalData().listNodeInfosThatHavePodsWithRequiredAntiAffinityList() } // Returns the NodeInfo of the given node name. func (snapshot *basicClusterSnapshotNodeLister) Get(nodeName string) (*schedulerframework.NodeInfo, error) { - return (*BasicClusterSnapshot)(snapshot).getInternalData().getNodeInfo(nodeName) + return (*BasicSnapshotBase)(snapshot).getInternalData().getNodeInfo(nodeName) } // Returns the IsPVCUsedByPods in a given key. func (snapshot *basicClusterSnapshotStorageLister) IsPVCUsedByPods(key string) bool { - return (*BasicClusterSnapshot)(snapshot).getInternalData().isPVCUsedByPods(key) + return (*BasicSnapshotBase)(snapshot).getInternalData().isPVCUsedByPods(key) } diff --git a/cluster-autoscaler/simulator/clustersnapshot/delta.go b/cluster-autoscaler/simulator/clustersnapshot/base/delta.go similarity index 80% rename from cluster-autoscaler/simulator/clustersnapshot/delta.go rename to cluster-autoscaler/simulator/clustersnapshot/base/delta.go index 9559b43dbb78..12c022f461b6 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/delta.go +++ b/cluster-autoscaler/simulator/clustersnapshot/base/delta.go @@ -14,18 +14,19 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clustersnapshot +package base import ( "fmt" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/klog/v2" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" ) -// DeltaClusterSnapshot is an implementation of ClusterSnapshot optimized for typical Cluster Autoscaler usage - (fork, add stuff, revert), repeated many times per loop. +// DeltaSnapshotBase is an implementation of SnapshotBase optimized for typical Cluster Autoscaler usage - (fork, add stuff, revert), repeated many times per loop. // // Complexity of some notable operations: // @@ -42,12 +43,12 @@ import ( // (when forked affects delta, but not base.) // pod affinity - causes scheduler framework to list pods with non-empty selector, // so basic caching doesn't help. -type DeltaClusterSnapshot struct { +type DeltaSnapshotBase struct { data *internalDeltaSnapshotData } -type deltaSnapshotNodeLister DeltaClusterSnapshot -type deltaSnapshotStorageLister DeltaClusterSnapshot +type deltaSnapshotNodeLister DeltaSnapshotBase +type deltaSnapshotStorageLister DeltaSnapshotBase type internalDeltaSnapshotData struct { baseData *internalDeltaSnapshotData @@ -136,16 +137,6 @@ func (data *internalDeltaSnapshotData) buildNodeInfoList() []*schedulerframework return nodeInfoList } -// Convenience method to avoid writing loop for adding nodes. -func (data *internalDeltaSnapshotData) addNodes(nodes []*apiv1.Node) error { - for _, node := range nodes { - if err := data.addNode(node); err != nil { - return err - } - } - return nil -} - func (data *internalDeltaSnapshotData) addNode(node *apiv1.Node) error { nodeInfo := schedulerframework.NewNodeInfo() nodeInfo.SetNode(node) @@ -187,7 +178,7 @@ func (data *internalDeltaSnapshotData) clearPodCaches() { data.pvcNamespaceMap = nil } -func (data *internalDeltaSnapshotData) removeNode(nodeName string) error { +func (data *internalDeltaSnapshotData) removeNodeInfo(nodeName string) error { _, foundInDelta := data.addedNodeInfoMap[nodeName] if foundInDelta { // If node was added within this delta, delete this change. @@ -201,7 +192,7 @@ func (data *internalDeltaSnapshotData) removeNode(nodeName string) error { if _, deleted := data.deletedNodeInfos[nodeName]; deleted { // If node was deleted within this delta, fail with error. - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } _, foundInBase := data.baseData.getNodeInfo(nodeName) @@ -212,7 +203,7 @@ func (data *internalDeltaSnapshotData) removeNode(nodeName string) error { if !foundInBase && !foundInDelta { // Node not found in the chain. - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } // Maybe consider deleting from the lists instead. Maybe not. @@ -240,7 +231,7 @@ func (data *internalDeltaSnapshotData) nodeInfoToModify(nodeName string) (*sched func (data *internalDeltaSnapshotData) addPod(pod *apiv1.Pod, nodeName string) error { ni, found := data.nodeInfoToModify(nodeName) if !found { - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } ni.AddPod(pod) @@ -256,7 +247,7 @@ func (data *internalDeltaSnapshotData) removePod(namespace, name, nodeName strin // probably means things are very bad anyway. ni, found := data.nodeInfoToModify(nodeName) if !found { - return ErrNodeNotFound + return clustersnapshot.ErrNodeNotFound } podFound := false @@ -306,12 +297,12 @@ func (data *internalDeltaSnapshotData) commit() (*internalDeltaSnapshotData, err return data, nil } for node := range data.deletedNodeInfos { - if err := data.baseData.removeNode(node); err != nil { + if err := data.baseData.removeNodeInfo(node); err != nil { return nil, err } } for _, node := range data.modifiedNodeInfoMap { - if err := data.baseData.removeNode(node.Node().Name); err != nil { + if err := data.baseData.removeNodeInfo(node.Node().Name); err != nil { return nil, err } if err := data.baseData.addNodeInfo(node); err != nil { @@ -369,42 +360,42 @@ func (snapshot *deltaSnapshotNodeLister) HavePodsWithRequiredAntiAffinityList() // Get returns node info by node name. func (snapshot *deltaSnapshotNodeLister) Get(nodeName string) (*schedulerframework.NodeInfo, error) { - return (*DeltaClusterSnapshot)(snapshot).getNodeInfo(nodeName) + return (*DeltaSnapshotBase)(snapshot).getNodeInfo(nodeName) } // IsPVCUsedByPods returns if PVC is used by pods func (snapshot *deltaSnapshotStorageLister) IsPVCUsedByPods(key string) bool { - return (*DeltaClusterSnapshot)(snapshot).IsPVCUsedByPods(key) + return (*DeltaSnapshotBase)(snapshot).IsPVCUsedByPods(key) } -func (snapshot *DeltaClusterSnapshot) getNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) { +func (snapshot *DeltaSnapshotBase) getNodeInfo(nodeName string) (*schedulerframework.NodeInfo, error) { data := snapshot.data node, found := data.getNodeInfo(nodeName) if !found { - return nil, ErrNodeNotFound + return nil, clustersnapshot.ErrNodeNotFound } return node, nil } // NodeInfos returns node lister. -func (snapshot *DeltaClusterSnapshot) NodeInfos() schedulerframework.NodeInfoLister { +func (snapshot *DeltaSnapshotBase) NodeInfos() schedulerframework.NodeInfoLister { return (*deltaSnapshotNodeLister)(snapshot) } // StorageInfos returns storage lister -func (snapshot *DeltaClusterSnapshot) StorageInfos() schedulerframework.StorageInfoLister { +func (snapshot *DeltaSnapshotBase) StorageInfos() schedulerframework.StorageInfoLister { return (*deltaSnapshotStorageLister)(snapshot) } -// NewDeltaClusterSnapshot creates instances of DeltaClusterSnapshot. -func NewDeltaClusterSnapshot() *DeltaClusterSnapshot { - snapshot := &DeltaClusterSnapshot{} - snapshot.Clear() +// NewDeltaSnapshotBase creates instances of DeltaSnapshotBase. +func NewDeltaSnapshotBase() *DeltaSnapshotBase { + snapshot := &DeltaSnapshotBase{} + snapshot.clear() return snapshot } // GetNodeInfo gets a NodeInfo. -func (snapshot *DeltaClusterSnapshot) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { +func (snapshot *DeltaSnapshotBase) GetNodeInfo(nodeName string) (*framework.NodeInfo, error) { schedNodeInfo, err := snapshot.getNodeInfo(nodeName) if err != nil { return nil, err @@ -413,13 +404,13 @@ func (snapshot *DeltaClusterSnapshot) GetNodeInfo(nodeName string) (*framework.N } // ListNodeInfos lists NodeInfos. -func (snapshot *DeltaClusterSnapshot) ListNodeInfos() ([]*framework.NodeInfo, error) { +func (snapshot *DeltaSnapshotBase) ListNodeInfos() ([]*framework.NodeInfo, error) { schedNodeInfos := snapshot.data.getNodeInfoList() return framework.WrapSchedulerNodeInfos(schedNodeInfos), nil } // AddNodeInfo adds a NodeInfo. -func (snapshot *DeltaClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) error { +func (snapshot *DeltaSnapshotBase) AddNodeInfo(nodeInfo *framework.NodeInfo) error { if err := snapshot.data.addNode(nodeInfo.Node()); err != nil { return err } @@ -431,58 +422,56 @@ func (snapshot *DeltaClusterSnapshot) AddNodeInfo(nodeInfo *framework.NodeInfo) return nil } -// AddNode adds node to the snapshot. -func (snapshot *DeltaClusterSnapshot) AddNode(node *apiv1.Node) error { - return snapshot.data.addNode(node) -} - -// AddNodes adds nodes in batch to the snapshot. -func (snapshot *DeltaClusterSnapshot) AddNodes(nodes []*apiv1.Node) error { - return snapshot.data.addNodes(nodes) -} +// SetClusterState sets the cluster state. +func (snapshot *DeltaSnapshotBase) SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod) error { + snapshot.clear() -// AddNodeWithPods adds a node and set of pods to be scheduled to this node to the snapshot. -func (snapshot *DeltaClusterSnapshot) AddNodeWithPods(node *apiv1.Node, pods []*apiv1.Pod) error { - if err := snapshot.AddNode(node); err != nil { - return err - } - for _, pod := range pods { - if err := snapshot.AddPod(pod, node.Name); err != nil { + knownNodes := make(map[string]bool) + for _, node := range nodes { + if err := snapshot.data.addNode(node); err != nil { return err } + knownNodes[node.Name] = true + } + for _, pod := range scheduledPods { + if knownNodes[pod.Spec.NodeName] { + if err := snapshot.data.addPod(pod, pod.Spec.NodeName); err != nil { + return err + } + } } return nil } -// RemoveNode removes nodes (and pods scheduled to it) from the snapshot. -func (snapshot *DeltaClusterSnapshot) RemoveNode(nodeName string) error { - return snapshot.data.removeNode(nodeName) +// RemoveNodeInfo removes nodes (and pods scheduled to it) from the snapshot. +func (snapshot *DeltaSnapshotBase) RemoveNodeInfo(nodeName string) error { + return snapshot.data.removeNodeInfo(nodeName) } -// AddPod adds pod to the snapshot and schedules it to given node. -func (snapshot *DeltaClusterSnapshot) AddPod(pod *apiv1.Pod, nodeName string) error { +// ForceAddPod adds pod to the snapshot and schedules it to given node. +func (snapshot *DeltaSnapshotBase) ForceAddPod(pod *apiv1.Pod, nodeName string) error { return snapshot.data.addPod(pod, nodeName) } -// RemovePod removes pod from the snapshot. -func (snapshot *DeltaClusterSnapshot) RemovePod(namespace, podName, nodeName string) error { +// ForceRemovePod removes pod from the snapshot. +func (snapshot *DeltaSnapshotBase) ForceRemovePod(namespace, podName, nodeName string) error { return snapshot.data.removePod(namespace, podName, nodeName) } // IsPVCUsedByPods returns if the pvc is used by any pod -func (snapshot *DeltaClusterSnapshot) IsPVCUsedByPods(key string) bool { +func (snapshot *DeltaSnapshotBase) IsPVCUsedByPods(key string) bool { return snapshot.data.isPVCUsedByPods(key) } // Fork creates a fork of snapshot state. All modifications can later be reverted to moment of forking via Revert() // Time: O(1) -func (snapshot *DeltaClusterSnapshot) Fork() { +func (snapshot *DeltaSnapshotBase) Fork() { snapshot.data = snapshot.data.fork() } // Revert reverts snapshot state to moment of forking. // Time: O(1) -func (snapshot *DeltaClusterSnapshot) Revert() { +func (snapshot *DeltaSnapshotBase) Revert() { if snapshot.data.baseData != nil { snapshot.data = snapshot.data.baseData } @@ -490,7 +479,7 @@ func (snapshot *DeltaClusterSnapshot) Revert() { // Commit commits changes done after forking. // Time: O(n), where n = size of delta (number of nodes added, modified or deleted since forking) -func (snapshot *DeltaClusterSnapshot) Commit() error { +func (snapshot *DeltaSnapshotBase) Commit() error { newData, err := snapshot.data.commit() if err != nil { return err @@ -501,6 +490,6 @@ func (snapshot *DeltaClusterSnapshot) Commit() error { // Clear reset cluster snapshot to empty, unforked state // Time: O(1) -func (snapshot *DeltaClusterSnapshot) Clear() { +func (snapshot *DeltaSnapshotBase) clear() { snapshot.data = newInternalDeltaSnapshotData() } diff --git a/cluster-autoscaler/simulator/clustersnapshot/base/delta_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/base/delta_benchmark_test.go new file mode 100644 index 000000000000..2a527a5417df --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/base/delta_benchmark_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package base + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" +) + +func BenchmarkBuildNodeInfoList(b *testing.B) { + testCases := []struct { + nodeCount int + }{ + { + nodeCount: 1000, + }, + { + nodeCount: 5000, + }, + { + nodeCount: 15000, + }, + { + nodeCount: 100000, + }, + } + + for _, tc := range testCases { + b.Run(fmt.Sprintf("fork add 1000 to %d", tc.nodeCount), func(b *testing.B) { + nodes := clustersnapshot.CreateTestNodes(tc.nodeCount + 1000) + clusterSnapshot := NewDeltaSnapshotBase() + if err := clusterSnapshot.SetClusterState(nodes[:tc.nodeCount], nil); err != nil { + assert.NoError(b, err) + } + clusterSnapshot.Fork() + for _, node := range nodes[tc.nodeCount:] { + if err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node)); err != nil { + assert.NoError(b, err) + } + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + list := clusterSnapshot.data.buildNodeInfoList() + if len(list) != tc.nodeCount+1000 { + assert.Equal(b, len(list), tc.nodeCount+1000) + } + } + }) + } + for _, tc := range testCases { + b.Run(fmt.Sprintf("base %d", tc.nodeCount), func(b *testing.B) { + nodes := clustersnapshot.CreateTestNodes(tc.nodeCount) + clusterSnapshot := NewDeltaSnapshotBase() + if err := clusterSnapshot.SetClusterState(nodes, nil); err != nil { + assert.NoError(b, err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + list := clusterSnapshot.data.buildNodeInfoList() + if len(list) != tc.nodeCount { + assert.Equal(b, len(list), tc.nodeCount) + } + } + }) + } +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go index a80c85c22d22..b80335edcc9b 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go +++ b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot.go @@ -28,25 +28,50 @@ import ( // ClusterSnapshot is abstraction of cluster state used for predicate simulations. // It exposes mutation methods and can be viewed as scheduler's SharedLister. type ClusterSnapshot interface { + SnapshotBase + + // SchedulePod tries to schedule the given Pod on the Node with the given name inside the snapshot, + // checking scheduling predicates. The pod is only scheduled if the predicates pass. If the pod is scheduled, + // all relevant DRA objects are modified to reflect that. Returns nil if the pod got scheduled, and a non-nil + // error explaining why not otherwise. The error type can be checked to distinguish failing predicates + // from unexpected errors. + SchedulePod(pod *apiv1.Pod, nodeName string) SchedulingError + // SchedulePodOnAnyNodeMatching tries to schedule the given Pod on any Node for which nodeMatches returns + // true. Scheduling predicates are checked, and the pod is scheduled only if there is a matching Node with passing + // predicates. If the pod is scheduled, all relevant DRA objects are modified to reflect that, and the name of the + // Node its scheduled on and nil are returned. If the pod can't be scheduled on any Node, an empty string and a non-nil + // error explaining why are returned. The error type can be checked to distinguish failing predicates from unexpected errors. + SchedulePodOnAnyNodeMatching(pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (matchingNode string, err SchedulingError) + // UnschedulePod removes the given Pod from the given Node inside the snapshot, and modifies all relevant DRA objects + // to reflect the removal. The pod can then be scheduled on another Node in the snapshot using the Schedule methods. + UnschedulePod(namespace string, podName string, nodeName string) error + + // CheckPredicates runs scheduler predicates to check if the given Pod would be able to schedule on the Node with the given + // name. Returns nil if predicates pass, or a non-nil error specifying why they didn't otherwise. The error type can be checked + // to distinguish failing predicates from unexpected errors. Doesn't mutate the snapshot. + CheckPredicates(pod *apiv1.Pod, nodeName string) SchedulingError +} + +// SnapshotBase is the "low-level" part of ClusterSnapshot. Mutation methods modify the snapshot state directly, without going +// through scheduler predicates. +type SnapshotBase interface { schedulerframework.SharedLister - // AddNode adds node to the snapshot. - AddNode(node *apiv1.Node) error - // AddNodes adds nodes to the snapshot. - AddNodes(nodes []*apiv1.Node) error - // RemoveNode removes nodes (and pods scheduled to it) from the snapshot. - RemoveNode(nodeName string) error - // AddPod adds pod to the snapshot and schedules it to given node. - AddPod(pod *apiv1.Pod, nodeName string) error - // RemovePod removes pod from the snapshot. - RemovePod(namespace string, podName string, nodeName string) error - // AddNodeWithPods adds a node and set of pods to be scheduled to this node to the snapshot. - AddNodeWithPods(node *apiv1.Node, pods []*apiv1.Pod) error - // IsPVCUsedByPods returns if the pvc is used by any pod, key = / - IsPVCUsedByPods(key string) bool - - // AddNodeInfo adds the given NodeInfo to the snapshot. The Node and the Pods are added, as well as - // any DRA objects passed along them. + + // SetClusterState resets the snapshot to an unforked state and replaces the contents of the snapshot + // with the provided data. scheduledPods are correlated to their Nodes based on spec.NodeName. + SetClusterState(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod) error + + // ForceAddPod adds the given Pod to the Node with the given nodeName inside the snapshot. + ForceAddPod(pod *apiv1.Pod, nodeName string) error + // ForceRemovePod removes the given Pod (and all DRA objects it owns) from the snapshot. + ForceRemovePod(namespace string, podName string, nodeName string) error + + // AddNodeInfo adds the given NodeInfo to the snapshot without checking scheduler predicates. The Node and the Pods are added, + // as well as any DRA objects passed along them. AddNodeInfo(nodeInfo *framework.NodeInfo) error + // RemoveNodeInfo removes the given NodeInfo from the snapshot The Node and the Pods are removed, as well as + // any DRA objects owned by them. + RemoveNodeInfo(nodeName string) error // GetNodeInfo returns an internal NodeInfo for a given Node - all information about the Node tracked in the snapshot. // This means the Node itself, its scheduled Pods, as well as all relevant DRA objects. The internal NodeInfos // obtained via this method should always be used in CA code instead of directly using *schedulerframework.NodeInfo. @@ -61,8 +86,6 @@ type ClusterSnapshot interface { Revert() // Commit commits changes done after forking. Commit() error - // Clear reset cluster snapshot to empty, unforked state. - Clear() } // ErrNodeNotFound means that a node wasn't found in the snapshot. diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_benchmark_test.go deleted file mode 100644 index cf851773537e..000000000000 --- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_benchmark_test.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustersnapshot - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - - apiv1 "k8s.io/api/core/v1" -) - -func createTestNodesWithPrefix(prefix string, n int) []*apiv1.Node { - nodes := make([]*apiv1.Node, n, n) - for i := 0; i < n; i++ { - nodes[i] = BuildTestNode(fmt.Sprintf("%s-%d", prefix, i), 2000, 2000000) - SetNodeReadyState(nodes[i], true, time.Time{}) - } - return nodes -} - -func createTestNodes(n int) []*apiv1.Node { - return createTestNodesWithPrefix("n", n) -} - -func createTestPodsWithPrefix(prefix string, n int) []*apiv1.Pod { - pods := make([]*apiv1.Pod, n, n) - for i := 0; i < n; i++ { - pods[i] = BuildTestPod(fmt.Sprintf("%s-%d", prefix, i), 1000, 2000000) - } - return pods -} - -func createTestPods(n int) []*apiv1.Pod { - return createTestPodsWithPrefix("p", n) -} - -func assignPodsToNodes(pods []*apiv1.Pod, nodes []*apiv1.Node) { - if len(nodes) == 0 { - return - } - - j := 0 - for i := 0; i < len(pods); i++ { - if j >= len(nodes) { - j = 0 - } - pods[i].Spec.NodeName = nodes[j].Name - j++ - } -} - -func BenchmarkAddNodes(b *testing.B) { - testCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} - - for snapshotName, snapshotFactory := range snapshots { - for _, tc := range testCases { - nodes := createTestNodes(tc) - clusterSnapshot := snapshotFactory() - b.ResetTimer() - b.Run(fmt.Sprintf("%s: AddNode() %d", snapshotName, tc), func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - clusterSnapshot.Clear() - b.StartTimer() - for _, node := range nodes { - err := clusterSnapshot.AddNode(node) - if err != nil { - assert.NoError(b, err) - } - } - } - }) - } - } - for snapshotName, snapshotFactory := range snapshots { - for _, tc := range testCases { - nodes := createTestNodes(tc) - clusterSnapshot := snapshotFactory() - b.ResetTimer() - b.Run(fmt.Sprintf("%s: AddNodes() %d", snapshotName, tc), func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - clusterSnapshot.Clear() - b.StartTimer() - err := clusterSnapshot.AddNodes(nodes) - if err != nil { - assert.NoError(b, err) - } - } - }) - } - } -} - -func BenchmarkListNodeInfos(b *testing.B) { - testCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} - - for snapshotName, snapshotFactory := range snapshots { - for _, tc := range testCases { - nodes := createTestNodes(tc) - clusterSnapshot := snapshotFactory() - err := clusterSnapshot.AddNodes(nodes) - if err != nil { - assert.NoError(b, err) - } - b.ResetTimer() - b.Run(fmt.Sprintf("%s: List() %d", snapshotName, tc), func(b *testing.B) { - for i := 0; i < b.N; i++ { - nodeInfos, err := clusterSnapshot.NodeInfos().List() - if err != nil { - assert.NoError(b, err) - } - if len(nodeInfos) != tc { - assert.Equal(b, len(nodeInfos), tc) - } - } - }) - } - } -} - -func BenchmarkAddPods(b *testing.B) { - testCases := []int{1, 10, 100, 1000, 5000, 15000} - - for snapshotName, snapshotFactory := range snapshots { - for _, tc := range testCases { - clusterSnapshot := snapshotFactory() - nodes := createTestNodes(tc) - err := clusterSnapshot.AddNodes(nodes) - assert.NoError(b, err) - pods := createTestPods(tc * 30) - assignPodsToNodes(pods, nodes) - b.ResetTimer() - b.Run(fmt.Sprintf("%s: AddPod() 30*%d", snapshotName, tc), func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - clusterSnapshot.Clear() - - err = clusterSnapshot.AddNodes(nodes) - if err != nil { - assert.NoError(b, err) - } - b.StartTimer() - for _, pod := range pods { - err = clusterSnapshot.AddPod(pod, pod.Spec.NodeName) - if err != nil { - assert.NoError(b, err) - } - } - } - }) - } - } -} - -func BenchmarkForkAddRevert(b *testing.B) { - nodeTestCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} - podTestCases := []int{0, 1, 30} - - for snapshotName, snapshotFactory := range snapshots { - for _, ntc := range nodeTestCases { - nodes := createTestNodes(ntc) - for _, ptc := range podTestCases { - pods := createTestPods(ntc * ptc) - assignPodsToNodes(pods, nodes) - clusterSnapshot := snapshotFactory() - err := clusterSnapshot.AddNodes(nodes) - assert.NoError(b, err) - for _, pod := range pods { - err = clusterSnapshot.AddPod(pod, pod.Spec.NodeName) - assert.NoError(b, err) - } - tmpNode1 := BuildTestNode("tmp-1", 2000, 2000000) - tmpNode2 := BuildTestNode("tmp-2", 2000, 2000000) - b.ResetTimer() - b.Run(fmt.Sprintf("%s: ForkAddRevert (%d nodes, %d pods)", snapshotName, ntc, ptc), func(b *testing.B) { - for i := 0; i < b.N; i++ { - clusterSnapshot.Fork() - err = clusterSnapshot.AddNode(tmpNode1) - if err != nil { - assert.NoError(b, err) - } - clusterSnapshot.Fork() - err = clusterSnapshot.AddNode(tmpNode2) - if err != nil { - assert.NoError(b, err) - } - clusterSnapshot.Revert() - clusterSnapshot.Revert() - } - }) - } - } - } -} - -func BenchmarkBuildNodeInfoList(b *testing.B) { - testCases := []struct { - nodeCount int - }{ - { - nodeCount: 1000, - }, - { - nodeCount: 5000, - }, - { - nodeCount: 15000, - }, - { - nodeCount: 100000, - }, - } - - for _, tc := range testCases { - b.Run(fmt.Sprintf("fork add 1000 to %d", tc.nodeCount), func(b *testing.B) { - nodes := createTestNodes(tc.nodeCount + 1000) - snapshot := NewDeltaClusterSnapshot() - if err := snapshot.AddNodes(nodes[:tc.nodeCount]); err != nil { - assert.NoError(b, err) - } - snapshot.Fork() - if err := snapshot.AddNodes(nodes[tc.nodeCount:]); err != nil { - assert.NoError(b, err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - list := snapshot.data.buildNodeInfoList() - if len(list) != tc.nodeCount+1000 { - assert.Equal(b, len(list), tc.nodeCount+1000) - } - } - }) - } - for _, tc := range testCases { - b.Run(fmt.Sprintf("base %d", tc.nodeCount), func(b *testing.B) { - nodes := createTestNodes(tc.nodeCount) - snapshot := NewDeltaClusterSnapshot() - if err := snapshot.AddNodes(nodes); err != nil { - assert.NoError(b, err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - list := snapshot.data.buildNodeInfoList() - if len(list) != tc.nodeCount { - assert.Equal(b, len(list), tc.nodeCount) - } - } - }) - } -} diff --git a/cluster-autoscaler/simulator/clustersnapshot/error.go b/cluster-autoscaler/simulator/clustersnapshot/error.go new file mode 100644 index 000000000000..87b988c5bf6c --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/error.go @@ -0,0 +1,149 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustersnapshot + +import ( + "fmt" + "strings" + + apiv1 "k8s.io/api/core/v1" +) + +// SchedulingErrorType represents different possible schedulingError types. +type SchedulingErrorType int + +const ( + // SchedulingInternalError denotes internal unexpected error while trying to schedule a pod + SchedulingInternalError SchedulingErrorType = iota + // FailingPredicateError means that a pod couldn't be scheduled on a particular node because of a failing scheduler predicate + FailingPredicateError + // NoNodesPassingPredicatesFoundError means that a pod couldn't be scheduled on any Node because of failing scheduler predicates + NoNodesPassingPredicatesFoundError +) + +// SchedulingError represents an error encountered while trying to schedule a Pod inside ClusterSnapshot. +// An interface is exported instead of the concrete type to avoid the dreaded https://go.dev/doc/faq#nil_error. +type SchedulingError interface { + error + + // Type can be used to distinguish between different SchedulingError types. + Type() SchedulingErrorType + // Reasons provides a list of human-readable reasons explaining the error. + Reasons() []string + + // FailingPredicateName returns the name of the predicate that failed. Only applicable to the FailingPredicateError type. + FailingPredicateName() string + // FailingPredicateReasons returns a list of human-readable reasons explaining why the predicate failed. Only applicable to the FailingPredicateError type. + FailingPredicateReasons() []string +} + +type schedulingError struct { + errorType SchedulingErrorType + pod *apiv1.Pod + + // Only applicable to SchedulingInternalError: + internalErrorMsg string + + // Only applicable to FailingPredicateError: + failingPredicateName string + failingPredicateReasons []string + failingPredicateUnexpectedErrMsg string + // debugInfo contains additional info that predicate doesn't include, + // but may be useful for debugging (e.g. taints on node blocking scale-up) + failingPredicateDebugInfo string +} + +// Type returns if error was internal of names predicate failure. +func (se *schedulingError) Type() SchedulingErrorType { + return se.errorType +} + +// Error satisfies the builtin error interface. +func (se *schedulingError) Error() string { + msg := "" + + switch se.errorType { + case SchedulingInternalError: + msg = fmt.Sprintf("unexpected error: %s", se.internalErrorMsg) + case FailingPredicateError: + details := []string{ + fmt.Sprintf("predicateReasons=[%s]", strings.Join(se.FailingPredicateReasons(), ", ")), + } + if se.failingPredicateDebugInfo != "" { + details = append(details, fmt.Sprintf("debugInfo=%s", se.failingPredicateDebugInfo)) + } + if se.failingPredicateUnexpectedErrMsg != "" { + details = append(details, fmt.Sprintf("unexpectedError=%s", se.failingPredicateUnexpectedErrMsg)) + } + msg = fmt.Sprintf("predicate %q didn't pass (%s)", se.FailingPredicateName(), strings.Join(details, "; ")) + case NoNodesPassingPredicatesFoundError: + msg = fmt.Sprintf("couldn't find a matching Node with passing predicates") + default: + msg = fmt.Sprintf("SchedulingErrorType type %q unknown - this shouldn't happen", se.errorType) + } + + return fmt.Sprintf("can't schedule pod %s/%s: %s", se.pod.Namespace, se.pod.Name, msg) +} + +// Reasons returns a list of human-readable reasons for the error. +func (se *schedulingError) Reasons() []string { + switch se.errorType { + case FailingPredicateError: + return se.FailingPredicateReasons() + default: + return []string{se.Error()} + } +} + +// FailingPredicateName returns the name of the predicate which failed. +func (se *schedulingError) FailingPredicateName() string { + return se.failingPredicateName +} + +// FailingPredicateReasons returns the failure reasons from the failed predicate as a slice of strings. +func (se *schedulingError) FailingPredicateReasons() []string { + return se.failingPredicateReasons +} + +// NewSchedulingInternalError creates a new schedulingError with SchedulingInternalError type. +func NewSchedulingInternalError(pod *apiv1.Pod, errMsg string) SchedulingError { + return &schedulingError{ + errorType: SchedulingInternalError, + pod: pod, + internalErrorMsg: errMsg, + } +} + +// NewFailingPredicateError creates a new schedulingError with FailingPredicateError type. +func NewFailingPredicateError(pod *apiv1.Pod, predicateName string, predicateReasons []string, unexpectedErrMsg string, debugInfo string) SchedulingError { + return &schedulingError{ + errorType: FailingPredicateError, + pod: pod, + failingPredicateName: predicateName, + failingPredicateReasons: predicateReasons, + failingPredicateUnexpectedErrMsg: unexpectedErrMsg, + failingPredicateDebugInfo: debugInfo, + } +} + +// NewNoNodesPassingPredicatesFoundError creates a new schedulingError with NoNodesPassingPredicatesFoundError type. +func NewNoNodesPassingPredicatesFoundError(pod *apiv1.Pod) SchedulingError { + return &schedulingError{ + errorType: NoNodesPassingPredicatesFoundError, + pod: pod, + } +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner.go new file mode 100644 index 000000000000..82b49b54670d --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner.go @@ -0,0 +1,126 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "context" + "fmt" + "strings" + + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + + apiv1 "k8s.io/api/core/v1" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" +) + +// SchedulerPluginRunner can be used to run various phases of scheduler plugins through the scheduler framework. +type SchedulerPluginRunner struct { + fwHandle *framework.Handle + snapshotBase clustersnapshot.SnapshotBase + lastIndex int +} + +// NewSchedulerPluginRunner builds a SchedulerPluginRunner. +func NewSchedulerPluginRunner(fwHandle *framework.Handle, snapshotBase clustersnapshot.SnapshotBase) *SchedulerPluginRunner { + return &SchedulerPluginRunner{fwHandle: fwHandle, snapshotBase: snapshotBase} +} + +// RunFiltersUntilPassingNode runs the scheduler framework PreFilter phase once, and then keeps running the Filter phase for all nodes in the cluster that match the provided +// function - until a Node where the filters pass is found. Filters are only run for matching Nodes. If no matching node with passing filters is found, an error is returned. +// +// The node iteration always starts from the next Node from the last Node that was found by this method. TODO: Extract the iteration strategy out of SchedulerPluginRunner. +func (p *SchedulerPluginRunner) RunFiltersUntilPassingNode(pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (string, clustersnapshot.SchedulingError) { + nodeInfosList, err := p.snapshotBase.ListNodeInfos() + if err != nil { + return "", clustersnapshot.NewSchedulingInternalError(pod, "ClusterSnapshot not provided") + } + + p.fwHandle.DelegatingLister.UpdateDelegate(p.snapshotBase) + defer p.fwHandle.DelegatingLister.ResetDelegate() + + state := schedulerframework.NewCycleState() + preFilterResult, preFilterStatus, _ := p.fwHandle.Framework.RunPreFilterPlugins(context.TODO(), state, pod) + if !preFilterStatus.IsSuccess() { + return "", clustersnapshot.NewFailingPredicateError(pod, preFilterStatus.Plugin(), preFilterStatus.Reasons(), "PreFilter failed", "") + } + + for i := range nodeInfosList { + nodeInfo := nodeInfosList[(p.lastIndex+i)%len(nodeInfosList)] + if !nodeMatches(nodeInfo) { + continue + } + + if !preFilterResult.AllNodes() && !preFilterResult.NodeNames.Has(nodeInfo.Node().Name) { + continue + } + + // Be sure that the node is schedulable. + if nodeInfo.Node().Spec.Unschedulable { + continue + } + + filterStatus := p.fwHandle.Framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) + if filterStatus.IsSuccess() { + p.lastIndex = (p.lastIndex + i + 1) % len(nodeInfosList) + return nodeInfo.Node().Name, nil + } + } + return "", clustersnapshot.NewNoNodesPassingPredicatesFoundError(pod) +} + +// RunFiltersOnNode runs the scheduler framework PreFilter and Filter phases to check if the given pod can be scheduled on the given node. +func (p *SchedulerPluginRunner) RunFiltersOnNode(pod *apiv1.Pod, nodeName string) clustersnapshot.SchedulingError { + nodeInfo, err := p.snapshotBase.GetNodeInfo(nodeName) + if err != nil { + return clustersnapshot.NewSchedulingInternalError(pod, fmt.Sprintf("error obtaining NodeInfo for name %q: %v", nodeName, err)) + } + + p.fwHandle.DelegatingLister.UpdateDelegate(p.snapshotBase) + defer p.fwHandle.DelegatingLister.ResetDelegate() + + state := schedulerframework.NewCycleState() + _, preFilterStatus, _ := p.fwHandle.Framework.RunPreFilterPlugins(context.TODO(), state, pod) + if !preFilterStatus.IsSuccess() { + return clustersnapshot.NewFailingPredicateError(pod, preFilterStatus.Plugin(), preFilterStatus.Reasons(), "PreFilter failed", "") + } + + filterStatus := p.fwHandle.Framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) + + if !filterStatus.IsSuccess() { + filterName := filterStatus.Plugin() + filterReasons := filterStatus.Reasons() + unexpectedErrMsg := "" + if !filterStatus.IsRejected() { + unexpectedErrMsg = fmt.Sprintf("unexpected filter status %q", filterStatus.Code().String()) + } + return clustersnapshot.NewFailingPredicateError(pod, filterName, filterReasons, unexpectedErrMsg, p.failingFilterDebugInfo(filterName, nodeInfo)) + } + + return nil +} + +func (p *SchedulerPluginRunner) failingFilterDebugInfo(filterName string, nodeInfo *framework.NodeInfo) string { + infoParts := []string{fmt.Sprintf("nodeName: %q", nodeInfo.Node().Name)} + + switch filterName { + case "TaintToleration": + infoParts = append(infoParts, fmt.Sprintf("nodeTaints: %#v", nodeInfo.Node().Spec.Taints)) + } + + return strings.Join(infoParts, ", ") +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner_test.go new file mode 100644 index 000000000000..8f2a770e9ce4 --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/plugin_runner_test.go @@ -0,0 +1,328 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/informers" + clientsetfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/scheduler/apis/config" + scheduler_config_latest "k8s.io/kubernetes/pkg/scheduler/apis/config/latest" + + testconfig "k8s.io/autoscaler/cluster-autoscaler/config/test" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + + apiv1 "k8s.io/api/core/v1" +) + +func TestRunFiltersOnNode(t *testing.T) { + p450 := BuildTestPod("p450", 450, 500000) + p600 := BuildTestPod("p600", 600, 500000) + p8000 := BuildTestPod("p8000", 8000, 0) + p500 := BuildTestPod("p500", 500, 500000) + + n1000 := BuildTestNode("n1000", 1000, 2000000) + SetNodeReadyState(n1000, true, time.Time{}) + n1000Unschedulable := BuildTestNode("n1000", 1000, 2000000) + SetNodeReadyState(n1000Unschedulable, true, time.Time{}) + + // temp dir + tmpDir, err := os.MkdirTemp("", "scheduler-configs") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") + if err := os.WriteFile(customConfigFile, + []byte(testconfig.SchedulerConfigNodeResourcesFitDisabled), + os.FileMode(0600)); err != nil { + t.Fatal(err) + } + customConfig, err := scheduler.ConfigFromPath(customConfigFile) + assert.NoError(t, err) + + tests := []struct { + name string + customConfig *config.KubeSchedulerConfiguration + node *apiv1.Node + scheduledPods []*apiv1.Pod + testPod *apiv1.Pod + expectError bool + }{ + // default predicate checker test cases + { + name: "default - other pod - insuficient cpu", + node: n1000, + scheduledPods: []*apiv1.Pod{p450}, + testPod: p600, + expectError: true, + }, + { + name: "default - other pod - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{p450}, + testPod: p500, + expectError: false, + }, + { + name: "default - empty - insuficient cpu", + node: n1000, + scheduledPods: []*apiv1.Pod{}, + testPod: p8000, + expectError: true, + }, + { + name: "default - empty - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{}, + testPod: p600, + expectError: false, + }, + // custom predicate checker test cases + { + name: "custom - other pod - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{p450}, + testPod: p600, + expectError: false, + customConfig: customConfig, + }, + { + name: "custom -other pod - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{p450}, + testPod: p500, + expectError: false, + customConfig: customConfig, + }, + { + name: "custom -empty - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{}, + testPod: p8000, + expectError: false, + customConfig: customConfig, + }, + { + name: "custom -empty - ok", + node: n1000, + scheduledPods: []*apiv1.Pod{}, + testPod: p600, + expectError: false, + customConfig: customConfig, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + snapshotBase := base.NewBasicSnapshotBase() + err := snapshotBase.AddNodeInfo(framework.NewTestNodeInfo(tt.node, tt.scheduledPods...)) + assert.NoError(t, err) + + pluginRunner, err := newTestPluginRunner(snapshotBase, tt.customConfig) + assert.NoError(t, err) + + predicateError := pluginRunner.RunFiltersOnNode(tt.testPod, tt.node.Name) + if tt.expectError { + assert.NotNil(t, predicateError) + assert.Equal(t, clustersnapshot.FailingPredicateError, predicateError.Type()) + assert.Equal(t, "NodeResourcesFit", predicateError.FailingPredicateName()) + assert.Equal(t, []string{"Insufficient cpu"}, predicateError.FailingPredicateReasons()) + assert.Contains(t, predicateError.Error(), "NodeResourcesFit") + assert.Contains(t, predicateError.Error(), "Insufficient cpu") + } else { + assert.Nil(t, predicateError) + } + }) + } +} + +func TestRunFilterUntilPassingNode(t *testing.T) { + p900 := BuildTestPod("p900", 900, 1000) + p1900 := BuildTestPod("p1900", 1900, 1000) + p2100 := BuildTestPod("p2100", 2100, 1000) + + n1000 := BuildTestNode("n1000", 1000, 2000000) + n2000 := BuildTestNode("n2000", 2000, 2000000) + + // temp dir + tmpDir, err := os.MkdirTemp("", "scheduler-configs") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") + if err := os.WriteFile(customConfigFile, + []byte(testconfig.SchedulerConfigNodeResourcesFitDisabled), + os.FileMode(0600)); err != nil { + t.Fatal(err) + } + customConfig, err := scheduler.ConfigFromPath(customConfigFile) + assert.NoError(t, err) + + testCases := []struct { + name string + customConfig *config.KubeSchedulerConfiguration + pod *apiv1.Pod + expectedNodes []string + expectError bool + }{ + // default predicate checker test cases + { + name: "default - small pod - no error", + pod: p900, + expectedNodes: []string{"n1000", "n2000"}, + expectError: false, + }, + { + name: "default - medium pod - no error", + pod: p1900, + expectedNodes: []string{"n2000"}, + expectError: false, + }, + { + name: "default - large pod - insufficient cpu", + pod: p2100, + expectError: true, + }, + + // custom predicate checker test cases + { + name: "custom - small pod - no error", + customConfig: customConfig, + pod: p900, + expectedNodes: []string{"n1000", "n2000"}, + expectError: false, + }, + { + name: "custom - medium pod - no error", + customConfig: customConfig, + pod: p1900, + expectedNodes: []string{"n1000", "n2000"}, + expectError: false, + }, + { + name: "custom - large pod - insufficient cpu", + customConfig: customConfig, + pod: p2100, + expectedNodes: []string{"n1000", "n2000"}, + expectError: false, + }, + } + + snapshotBase := base.NewBasicSnapshotBase() + err = snapshotBase.AddNodeInfo(framework.NewTestNodeInfo(n1000)) + assert.NoError(t, err) + err = snapshotBase.AddNodeInfo(framework.NewTestNodeInfo(n2000)) + assert.NoError(t, err) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + pluginRunner, err := newTestPluginRunner(snapshotBase, tc.customConfig) + assert.NoError(t, err) + + nodeName, err := pluginRunner.RunFiltersUntilPassingNode(tc.pod, func(info *framework.NodeInfo) bool { return true }) + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Contains(t, tc.expectedNodes, nodeName) + } + }) + } +} + +func TestDebugInfo(t *testing.T) { + p1 := BuildTestPod("p1", 0, 0) + node1 := BuildTestNode("n1", 1000, 2000000) + node1.Spec.Taints = []apiv1.Taint{ + { + Key: "SomeTaint", + Value: "WhyNot?", + Effect: apiv1.TaintEffectNoSchedule, + }, + { + Key: "RandomTaint", + Value: "JustBecause", + Effect: apiv1.TaintEffectNoExecute, + }, + } + SetNodeReadyState(node1, true, time.Time{}) + + clusterSnapshot := base.NewBasicSnapshotBase() + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node1)) + assert.NoError(t, err) + + // with default predicate checker + defaultPluginnRunner, err := newTestPluginRunner(clusterSnapshot, nil) + assert.NoError(t, err) + predicateErr := defaultPluginnRunner.RunFiltersOnNode(p1, "n1") + assert.NotNil(t, predicateErr) + assert.Contains(t, predicateErr.FailingPredicateReasons(), "node(s) had untolerated taint {SomeTaint: WhyNot?}") + assert.Contains(t, predicateErr.Error(), "node(s) had untolerated taint {SomeTaint: WhyNot?}") + assert.Contains(t, predicateErr.Error(), "RandomTaint") + + // with custom predicate checker + + // temp dir + tmpDir, err := os.MkdirTemp("", "scheduler-configs") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") + if err := os.WriteFile(customConfigFile, + []byte(testconfig.SchedulerConfigTaintTolerationDisabled), + os.FileMode(0600)); err != nil { + t.Fatal(err) + } + + customConfig, err := scheduler.ConfigFromPath(customConfigFile) + assert.NoError(t, err) + customPluginnRunner, err := newTestPluginRunner(clusterSnapshot, customConfig) + assert.NoError(t, err) + predicateErr = customPluginnRunner.RunFiltersOnNode(p1, "n1") + assert.Nil(t, predicateErr) +} + +// newTestPluginRunner builds test version of SchedulerPluginRunner. +func newTestPluginRunner(snapshotBase clustersnapshot.SnapshotBase, schedConfig *config.KubeSchedulerConfiguration) (*SchedulerPluginRunner, error) { + if schedConfig == nil { + defaultConfig, err := scheduler_config_latest.Default() + if err != nil { + return nil, err + } + schedConfig = defaultConfig + } + + fwHandle, err := framework.NewHandle(informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0), schedConfig) + if err != nil { + return nil, err + } + return NewSchedulerPluginRunner(fwHandle, snapshotBase), nil +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go new file mode 100644 index 000000000000..3e1156b58dae --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot.go @@ -0,0 +1,71 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" +) + +// PredicateSnapshot implements ClusterSnapshot on top of a SnapshotBase by using +// SchedulerBasedPredicateChecker to check scheduler predicates. +type PredicateSnapshot struct { + clustersnapshot.SnapshotBase + pluginRunner *SchedulerPluginRunner +} + +// NewPredicateSnapshot builds a PredicateSnapshot. +func NewPredicateSnapshot(snapshotBase clustersnapshot.SnapshotBase, fwHandle *framework.Handle) *PredicateSnapshot { + return &PredicateSnapshot{ + SnapshotBase: snapshotBase, + pluginRunner: NewSchedulerPluginRunner(fwHandle, snapshotBase), + } +} + +// SchedulePod adds pod to the snapshot and schedules it to given node. +func (s *PredicateSnapshot) SchedulePod(pod *apiv1.Pod, nodeName string) clustersnapshot.SchedulingError { + if schedErr := s.pluginRunner.RunFiltersOnNode(pod, nodeName); schedErr != nil { + return schedErr + } + if err := s.ForceAddPod(pod, nodeName); err != nil { + return clustersnapshot.NewSchedulingInternalError(pod, err.Error()) + } + return nil +} + +// SchedulePodOnAnyNodeMatching adds pod to the snapshot and schedules it to any node matching the provided function. +func (s *PredicateSnapshot) SchedulePodOnAnyNodeMatching(pod *apiv1.Pod, anyNodeMatching func(*framework.NodeInfo) bool) (string, clustersnapshot.SchedulingError) { + nodeName, schedErr := s.pluginRunner.RunFiltersUntilPassingNode(pod, anyNodeMatching) + if schedErr != nil { + return "", schedErr + } + if err := s.ForceAddPod(pod, nodeName); err != nil { + return "", clustersnapshot.NewSchedulingInternalError(pod, err.Error()) + } + return nodeName, nil +} + +// UnschedulePod removes the given Pod from the given Node inside the snapshot. +func (s *PredicateSnapshot) UnschedulePod(namespace string, podName string, nodeName string) error { + return s.ForceRemovePod(namespace, podName, nodeName) +} + +// CheckPredicates checks whether scheduler predicates pass for the given pod on the given node. +func (s *PredicateSnapshot) CheckPredicates(pod *apiv1.Pod, nodeName string) clustersnapshot.SchedulingError { + return s.pluginRunner.RunFiltersOnNode(pod, nodeName) +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go new file mode 100644 index 000000000000..05712bac3a3e --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_benchmark_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" +) + +func BenchmarkAddNodeInfo(b *testing.B) { + testCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} + + for snapshotName, snapshotFactory := range snapshots { + for _, tc := range testCases { + nodes := clustersnapshot.CreateTestNodes(tc) + clusterSnapshot, err := snapshotFactory() + assert.NoError(b, err) + b.ResetTimer() + b.Run(fmt.Sprintf("%s: AddNodeInfo() %d", snapshotName, tc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + assert.NoError(b, clusterSnapshot.SetClusterState(nil, nil)) + b.StartTimer() + for _, node := range nodes { + err := clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) + if err != nil { + assert.NoError(b, err) + } + } + } + }) + } + } +} + +func BenchmarkListNodeInfos(b *testing.B) { + testCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} + + for snapshotName, snapshotFactory := range snapshots { + for _, tc := range testCases { + nodes := clustersnapshot.CreateTestNodes(tc) + clusterSnapshot, err := snapshotFactory() + assert.NoError(b, err) + err = clusterSnapshot.SetClusterState(nodes, nil) + if err != nil { + assert.NoError(b, err) + } + b.ResetTimer() + b.Run(fmt.Sprintf("%s: List() %d", snapshotName, tc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + nodeInfos, err := clusterSnapshot.NodeInfos().List() + if err != nil { + assert.NoError(b, err) + } + if len(nodeInfos) != tc { + assert.Equal(b, len(nodeInfos), tc) + } + } + }) + } + } +} + +func BenchmarkAddPods(b *testing.B) { + testCases := []int{1, 10, 100, 1000, 5000, 15000} + + for snapshotName, snapshotFactory := range snapshots { + for _, tc := range testCases { + nodes := clustersnapshot.CreateTestNodes(tc) + pods := clustersnapshot.CreateTestPods(tc * 30) + clustersnapshot.AssignTestPodsToNodes(pods, nodes) + clusterSnapshot, err := snapshotFactory() + assert.NoError(b, err) + err = clusterSnapshot.SetClusterState(nodes, nil) + assert.NoError(b, err) + b.ResetTimer() + b.Run(fmt.Sprintf("%s: ForceAddPod() 30*%d", snapshotName, tc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + + err = clusterSnapshot.SetClusterState(nodes, nil) + if err != nil { + assert.NoError(b, err) + } + b.StartTimer() + for _, pod := range pods { + err = clusterSnapshot.ForceAddPod(pod, pod.Spec.NodeName) + if err != nil { + assert.NoError(b, err) + } + } + } + }) + } + } +} + +func BenchmarkForkAddRevert(b *testing.B) { + nodeTestCases := []int{1, 10, 100, 1000, 5000, 15000, 100000} + podTestCases := []int{0, 1, 30} + + for snapshotName, snapshotFactory := range snapshots { + for _, ntc := range nodeTestCases { + nodes := clustersnapshot.CreateTestNodes(ntc) + for _, ptc := range podTestCases { + pods := clustersnapshot.CreateTestPods(ntc * ptc) + clustersnapshot.AssignTestPodsToNodes(pods, nodes) + clusterSnapshot, err := snapshotFactory() + assert.NoError(b, err) + err = clusterSnapshot.SetClusterState(nodes, pods) + assert.NoError(b, err) + tmpNode1 := BuildTestNode("tmp-1", 2000, 2000000) + tmpNode2 := BuildTestNode("tmp-2", 2000, 2000000) + b.ResetTimer() + b.Run(fmt.Sprintf("%s: ForkAddRevert (%d nodes, %d pods)", snapshotName, ntc, ptc), func(b *testing.B) { + for i := 0; i < b.N; i++ { + clusterSnapshot.Fork() + err = clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(tmpNode1)) + if err != nil { + assert.NoError(b, err) + } + clusterSnapshot.Fork() + err = clusterSnapshot.AddNodeInfo(framework.NewTestNodeInfo(tmpNode2)) + if err != nil { + assert.NoError(b, err) + } + clusterSnapshot.Revert() + clusterSnapshot.Revert() + } + }) + } + } + } +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go similarity index 70% rename from cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go rename to cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go index f9ce65162580..d73d50fc91b8 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/clustersnapshot_test.go +++ b/cluster-autoscaler/simulator/clustersnapshot/predicate/predicate_snapshot_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package clustersnapshot +package predicate import ( "fmt" @@ -23,6 +23,8 @@ import ( "time" apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" @@ -30,9 +32,21 @@ import ( "github.com/stretchr/testify/assert" ) -var snapshots = map[string]func() ClusterSnapshot{ - "basic": func() ClusterSnapshot { return NewBasicClusterSnapshot() }, - "delta": func() ClusterSnapshot { return NewDeltaClusterSnapshot() }, +var snapshots = map[string]func() (clustersnapshot.ClusterSnapshot, error){ + "basic": func() (clustersnapshot.ClusterSnapshot, error) { + fwHandle, err := framework.NewTestFrameworkHandle() + if err != nil { + return nil, err + } + return NewPredicateSnapshot(base.NewBasicSnapshotBase(), fwHandle), nil + }, + "delta": func() (clustersnapshot.ClusterSnapshot, error) { + fwHandle, err := framework.NewTestFrameworkHandle() + if err != nil { + return nil, err + } + return NewPredicateSnapshot(base.NewDeltaSnapshotBase(), fwHandle), nil + }, } func nodeNames(nodes []*apiv1.Node) []string { @@ -61,7 +75,7 @@ func compareStates(t *testing.T, a, b snapshotState) { assert.ElementsMatch(t, a.pods, b.pods) } -func getSnapshotState(t *testing.T, snapshot ClusterSnapshot) snapshotState { +func getSnapshotState(t *testing.T, snapshot clustersnapshot.ClusterSnapshot) snapshotState { nodes, err := snapshot.ListNodeInfos() assert.NoError(t, err) var pods []*apiv1.Pod @@ -73,20 +87,17 @@ func getSnapshotState(t *testing.T, snapshot ClusterSnapshot) snapshotState { return snapshotState{extractNodes(nodes), pods} } -func startSnapshot(t *testing.T, snapshotFactory func() ClusterSnapshot, state snapshotState) ClusterSnapshot { - snapshot := snapshotFactory() - err := snapshot.AddNodes(state.nodes) +func startSnapshot(t *testing.T, snapshotFactory func() (clustersnapshot.ClusterSnapshot, error), state snapshotState) clustersnapshot.ClusterSnapshot { + snapshot, err := snapshotFactory() + assert.NoError(t, err) + err = snapshot.SetClusterState(state.nodes, state.pods) assert.NoError(t, err) - for _, pod := range state.pods { - err := snapshot.AddPod(pod, pod.Spec.NodeName) - assert.NoError(t, err) - } return snapshot } type modificationTestCase struct { name string - op func(ClusterSnapshot) + op func(clustersnapshot.ClusterSnapshot) state snapshotState modifiedState snapshotState } @@ -98,9 +109,9 @@ func validTestCases(t *testing.T) []modificationTestCase { testCases := []modificationTestCase{ { - name: "add node", - op: func(snapshot ClusterSnapshot) { - err := snapshot.AddNode(node) + name: "add empty nodeInfo", + op: func(snapshot clustersnapshot.ClusterSnapshot) { + err := snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) }, modifiedState: snapshotState{ @@ -108,9 +119,9 @@ func validTestCases(t *testing.T) []modificationTestCase { }, }, { - name: "add node with pods", - op: func(snapshot ClusterSnapshot) { - err := snapshot.AddNodeWithPods(node, []*apiv1.Pod{pod}) + name: "add nodeInfo", + op: func(snapshot clustersnapshot.ClusterSnapshot) { + err := snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pod)) assert.NoError(t, err) }, modifiedState: snapshotState{ @@ -119,25 +130,25 @@ func validTestCases(t *testing.T) []modificationTestCase { }, }, { - name: "remove node", + name: "remove nodeInfo", state: snapshotState{ nodes: []*apiv1.Node{node}, }, - op: func(snapshot ClusterSnapshot) { - err := snapshot.RemoveNode(node.Name) + op: func(snapshot clustersnapshot.ClusterSnapshot) { + err := snapshot.RemoveNodeInfo(node.Name) assert.NoError(t, err) }, }, { - name: "remove node, then add it back", + name: "remove nodeInfo, then add it back", state: snapshotState{ nodes: []*apiv1.Node{node}, }, - op: func(snapshot ClusterSnapshot) { - err := snapshot.RemoveNode(node.Name) + op: func(snapshot clustersnapshot.ClusterSnapshot) { + err := snapshot.RemoveNodeInfo(node.Name) assert.NoError(t, err) - err = snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) }, modifiedState: snapshotState{ @@ -145,14 +156,14 @@ func validTestCases(t *testing.T) []modificationTestCase { }, }, { - name: "add pod, then remove node", + name: "add pod, then remove nodeInfo", state: snapshotState{ nodes: []*apiv1.Node{node}, }, - op: func(snapshot ClusterSnapshot) { - err := snapshot.AddPod(pod, node.Name) - assert.NoError(t, err) - err = snapshot.RemoveNode(node.Name) + op: func(snapshot clustersnapshot.ClusterSnapshot) { + schedErr := snapshot.ForceAddPod(pod, node.Name) + assert.NoError(t, schedErr) + err := snapshot.RemoveNodeInfo(node.Name) assert.NoError(t, err) }, }, @@ -203,7 +214,7 @@ func TestForking(t *testing.T) { tc.op(snapshot) snapshot.Fork() - snapshot.AddNode(node) + snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) snapshot.Revert() snapshot.Revert() @@ -247,7 +258,7 @@ func TestForking(t *testing.T) { snapshot.Fork() tc.op(snapshot) snapshot.Fork() - snapshot.AddNode(node) + snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) snapshot.Revert() err := snapshot.Commit() assert.NoError(t, err) @@ -279,7 +290,7 @@ func TestForking(t *testing.T) { } } -func TestClear(t *testing.T) { +func TestSetClusterState(t *testing.T) { // Run with -count=1 to avoid caching. localRand := rand.New(rand.NewSource(time.Now().Unix())) @@ -288,20 +299,20 @@ func TestClear(t *testing.T) { extraNodeCount := localRand.Intn(100) extraPodCount := localRand.Intn(1000) - nodes := createTestNodes(nodeCount) - pods := createTestPods(podCount) - assignPodsToNodes(pods, nodes) + nodes := clustersnapshot.CreateTestNodes(nodeCount) + pods := clustersnapshot.CreateTestPods(podCount) + clustersnapshot.AssignTestPodsToNodes(pods, nodes) state := snapshotState{nodes, pods} - extraNodes := createTestNodesWithPrefix("extra", extraNodeCount) + extraNodes := clustersnapshot.CreateTestNodesWithPrefix("extra", extraNodeCount) allNodes := make([]*apiv1.Node, len(nodes)+len(extraNodes), len(nodes)+len(extraNodes)) copy(allNodes, nodes) copy(allNodes[len(nodes):], extraNodes) - extraPods := createTestPodsWithPrefix("extra", extraPodCount) - assignPodsToNodes(extraPods, allNodes) + extraPods := clustersnapshot.CreateTestPodsWithPrefix("extra", extraPodCount) + clustersnapshot.AssignTestPodsToNodes(extraPods, allNodes) allPods := make([]*apiv1.Pod, len(pods)+len(extraPods), len(pods)+len(extraPods)) copy(allPods, pods) @@ -313,10 +324,21 @@ func TestClear(t *testing.T) { snapshot := startSnapshot(t, snapshotFactory, state) compareStates(t, state, getSnapshotState(t, snapshot)) - snapshot.Clear() + assert.NoError(t, snapshot.SetClusterState(nil, nil)) compareStates(t, snapshotState{}, getSnapshotState(t, snapshot)) }) + t.Run(fmt.Sprintf("%s: clear base %d nodes %d pods and set a new state", name, nodeCount, podCount), + func(t *testing.T) { + snapshot := startSnapshot(t, snapshotFactory, state) + compareStates(t, state, getSnapshotState(t, snapshot)) + + newNodes, newPods := clustersnapshot.CreateTestNodes(13), clustersnapshot.CreateTestPods(37) + clustersnapshot.AssignTestPodsToNodes(newPods, newNodes) + assert.NoError(t, snapshot.SetClusterState(newNodes, newPods)) + + compareStates(t, snapshotState{nodes: newNodes, pods: newPods}, getSnapshotState(t, snapshot)) + }) t.Run(fmt.Sprintf("%s: clear fork %d nodes %d pods %d extra nodes %d extra pods", name, nodeCount, podCount, extraNodeCount, extraPodCount), func(t *testing.T) { snapshot := startSnapshot(t, snapshotFactory, state) @@ -324,23 +346,24 @@ func TestClear(t *testing.T) { snapshot.Fork() - err := snapshot.AddNodes(extraNodes) - assert.NoError(t, err) + for _, node := range extraNodes { + err := snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) + assert.NoError(t, err) + } for _, pod := range extraPods { - err := snapshot.AddPod(pod, pod.Spec.NodeName) + err := snapshot.ForceAddPod(pod, pod.Spec.NodeName) assert.NoError(t, err) } compareStates(t, snapshotState{allNodes, allPods}, getSnapshotState(t, snapshot)) - snapshot.Clear() + assert.NoError(t, snapshot.SetClusterState(nil, nil)) compareStates(t, snapshotState{}, getSnapshotState(t, snapshot)) - // Clear() should break out of forked state. + // SetClusterState() should break out of forked state. snapshot.Fork() - assert.NoError(t, err) }) } } @@ -349,20 +372,20 @@ func TestNode404(t *testing.T) { // Anything and everything that returns errNodeNotFound should be tested here. ops := []struct { name string - op func(ClusterSnapshot) error + op func(clustersnapshot.ClusterSnapshot) error }{ - {"add pod", func(snapshot ClusterSnapshot) error { - return snapshot.AddPod(BuildTestPod("p1", 0, 0), "node") + {"add pod", func(snapshot clustersnapshot.ClusterSnapshot) error { + return snapshot.ForceAddPod(BuildTestPod("p1", 0, 0), "node") }}, - {"remove pod", func(snapshot ClusterSnapshot) error { - return snapshot.RemovePod("default", "p1", "node") + {"remove pod", func(snapshot clustersnapshot.ClusterSnapshot) error { + return snapshot.ForceRemovePod("default", "p1", "node") }}, - {"get node", func(snapshot ClusterSnapshot) error { + {"get node", func(snapshot clustersnapshot.ClusterSnapshot) error { _, err := snapshot.NodeInfos().Get("node") return err }}, - {"remove node", func(snapshot ClusterSnapshot) error { - return snapshot.RemoveNode("node") + {"remove nodeInfo", func(snapshot clustersnapshot.ClusterSnapshot) error { + return snapshot.RemoveNodeInfo("node") }}, } @@ -370,25 +393,27 @@ func TestNode404(t *testing.T) { for _, op := range ops { t.Run(fmt.Sprintf("%s: %s empty", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) // Empty snapshot - shouldn't be able to operate on nodes that are not here. - err := op.op(snapshot) + err = op.op(snapshot) assert.Error(t, err) }) t.Run(fmt.Sprintf("%s: %s fork", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) node := BuildTestNode("node", 10, 100) - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) snapshot.Fork() assert.NoError(t, err) - err = snapshot.RemoveNode("node") + err = snapshot.RemoveNodeInfo("node") assert.NoError(t, err) // Node deleted after fork - shouldn't be able to operate on it. @@ -405,13 +430,14 @@ func TestNode404(t *testing.T) { t.Run(fmt.Sprintf("%s: %s base", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) node := BuildTestNode("node", 10, 100) - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) - err = snapshot.RemoveNode("node") + err = snapshot.RemoveNodeInfo("node") assert.NoError(t, err) // Node deleted from base - shouldn't be able to operate on it. @@ -429,13 +455,10 @@ func TestNodeAlreadyExists(t *testing.T) { ops := []struct { name string - op func(ClusterSnapshot) error + op func(clustersnapshot.ClusterSnapshot) error }{ - {"add node", func(snapshot ClusterSnapshot) error { - return snapshot.AddNode(node) - }}, - {"add node with pod", func(snapshot ClusterSnapshot) error { - return snapshot.AddNodeWithPods(node, []*apiv1.Pod{pod}) + {"add nodeInfo", func(snapshot clustersnapshot.ClusterSnapshot) error { + return snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pod)) }}, } @@ -443,9 +466,10 @@ func TestNodeAlreadyExists(t *testing.T) { for _, op := range ops { t.Run(fmt.Sprintf("%s: %s base", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) // Node already in base. @@ -455,9 +479,10 @@ func TestNodeAlreadyExists(t *testing.T) { t.Run(fmt.Sprintf("%s: %s base, forked", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) snapshot.Fork() @@ -470,11 +495,12 @@ func TestNodeAlreadyExists(t *testing.T) { t.Run(fmt.Sprintf("%s: %s fork", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) snapshot.Fork() - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) // Node already in fork. @@ -483,11 +509,12 @@ func TestNodeAlreadyExists(t *testing.T) { }) t.Run(fmt.Sprintf("%s: %s committed", name, op.name), func(t *testing.T) { - snapshot := snapshotFactory() + snapshot, err := snapshotFactory() + assert.NoError(t, err) snapshot.Fork() - err := snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err) err = snapshot.Commit() @@ -623,18 +650,19 @@ func TestPVCUsedByPods(t *testing.T) { for snapshotName, snapshotFactory := range snapshots { for _, tc := range testcase { t.Run(fmt.Sprintf("%s with snapshot (%s)", tc.desc, snapshotName), func(t *testing.T) { - snapshot := snapshotFactory() - err := snapshot.AddNodeWithPods(tc.node, tc.pods) + snapshot, err := snapshotFactory() + assert.NoError(t, err) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(tc.node, tc.pods...)) assert.NoError(t, err) - volumeExists := snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", tc.claimName)) + volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", tc.claimName)) assert.Equal(t, tc.exists, volumeExists) if tc.removePod != "" { - err = snapshot.RemovePod("default", tc.removePod, "node") + err = snapshot.ForceRemovePod("default", tc.removePod, "node") assert.NoError(t, err) - volumeExists = snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", tc.claimName)) + volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", tc.claimName)) assert.Equal(t, tc.existsAfterRemove, volumeExists) } }) @@ -693,39 +721,41 @@ func TestPVCClearAndFork(t *testing.T) { for snapshotName, snapshotFactory := range snapshots { t.Run(fmt.Sprintf("fork and revert snapshot with pvc pods with snapshot: %s", snapshotName), func(t *testing.T) { - snapshot := snapshotFactory() - err := snapshot.AddNodeWithPods(node, []*apiv1.Pod{pod1}) + snapshot, err := snapshotFactory() + assert.NoError(t, err) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pod1)) assert.NoError(t, err) - volumeExists := snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) + volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) assert.Equal(t, true, volumeExists) snapshot.Fork() assert.NoError(t, err) - volumeExists = snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) + volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) assert.Equal(t, true, volumeExists) - err = snapshot.AddPod(pod2, "node") + err = snapshot.ForceAddPod(pod2, "node") assert.NoError(t, err) - volumeExists = snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim2")) + volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim2")) assert.Equal(t, true, volumeExists) snapshot.Revert() - volumeExists = snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim2")) + volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim2")) assert.Equal(t, false, volumeExists) }) t.Run(fmt.Sprintf("clear snapshot with pvc pods with snapshot: %s", snapshotName), func(t *testing.T) { - snapshot := snapshotFactory() - err := snapshot.AddNodeWithPods(node, []*apiv1.Pod{pod1}) + snapshot, err := snapshotFactory() + assert.NoError(t, err) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node, pod1)) assert.NoError(t, err) - volumeExists := snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) + volumeExists := snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) assert.Equal(t, true, volumeExists) - snapshot.Clear() - volumeExists = snapshot.IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) + assert.NoError(t, snapshot.SetClusterState(nil, nil)) + volumeExists = snapshot.StorageInfos().IsPVCUsedByPods(schedulerframework.GetNamespacedName("default", "claim1")) assert.Equal(t, false, volumeExists) }) @@ -747,7 +777,7 @@ func TestWithForkedSnapshot(t *testing.T) { return false, err } t.Run(fmt.Sprintf("%s: %s WithForkedSnapshot for failed function", name, tc.name), func(t *testing.T) { - err1, err2 := WithForkedSnapshot(snapshot, failedFunc) + err1, err2 := clustersnapshot.WithForkedSnapshot(snapshot, failedFunc) assert.Error(t, err1) assert.NoError(t, err2) @@ -755,7 +785,7 @@ func TestWithForkedSnapshot(t *testing.T) { compareStates(t, tc.state, getSnapshotState(t, snapshot)) }) t.Run(fmt.Sprintf("%s: %s WithForkedSnapshot for success function", name, tc.name), func(t *testing.T) { - err1, err2 := WithForkedSnapshot(snapshot, successFunc) + err1, err2 := clustersnapshot.WithForkedSnapshot(snapshot, successFunc) assert.Error(t, err1) assert.NoError(t, err2) diff --git a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go index 501756fe2438..5ebb699bb98a 100644 --- a/cluster-autoscaler/simulator/clustersnapshot/test_utils.go +++ b/cluster-autoscaler/simulator/clustersnapshot/test_utils.go @@ -17,10 +17,16 @@ limitations under the License. package clustersnapshot import ( + "fmt" + "math" "testing" + "time" "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + "k8s.io/autoscaler/cluster-autoscaler/utils/test" ) // InitializeClusterSnapshotOrDie clears cluster snapshot and then initializes it with given set of nodes and pods. @@ -32,22 +38,67 @@ func InitializeClusterSnapshotOrDie( pods []*apiv1.Pod) { var err error - snapshot.Clear() + assert.NoError(t, snapshot.SetClusterState(nil, nil)) for _, node := range nodes { - err = snapshot.AddNode(node) + err = snapshot.AddNodeInfo(framework.NewTestNodeInfo(node)) assert.NoError(t, err, "error while adding node %s", node.Name) } for _, pod := range pods { if pod.Spec.NodeName != "" { - err = snapshot.AddPod(pod, pod.Spec.NodeName) + err = snapshot.ForceAddPod(pod, pod.Spec.NodeName) assert.NoError(t, err, "error while adding pod %s/%s to node %s", pod.Namespace, pod.Name, pod.Spec.NodeName) } else if pod.Status.NominatedNodeName != "" { - err = snapshot.AddPod(pod, pod.Status.NominatedNodeName) + err = snapshot.ForceAddPod(pod, pod.Status.NominatedNodeName) assert.NoError(t, err, "error while adding pod %s/%s to nominated node %s", pod.Namespace, pod.Name, pod.Status.NominatedNodeName) } else { assert.Fail(t, "pod %s/%s does not have Spec.NodeName nor Status.NominatedNodeName set", pod.Namespace, pod.Name) } } } + +// CreateTestNodesWithPrefix creates n test Nodes with the given name prefix. +func CreateTestNodesWithPrefix(prefix string, n int) []*apiv1.Node { + nodes := make([]*apiv1.Node, n, n) + for i := 0; i < n; i++ { + nodes[i] = test.BuildTestNode(fmt.Sprintf("%s-%d", prefix, i), math.MaxInt, math.MaxInt) + test.SetNodeReadyState(nodes[i], true, time.Time{}) + } + return nodes +} + +// CreateTestNodes creates n test Nodes. +func CreateTestNodes(n int) []*apiv1.Node { + return CreateTestNodesWithPrefix("n", n) +} + +// CreateTestPodsWithPrefix creates n test Pods with the given name prefix. +func CreateTestPodsWithPrefix(prefix string, n int) []*apiv1.Pod { + pods := make([]*apiv1.Pod, n, n) + for i := 0; i < n; i++ { + pods[i] = test.BuildTestPod(fmt.Sprintf("%s-%d", prefix, i), 1, 1) + } + return pods +} + +// CreateTestPods creates n test Pods. +func CreateTestPods(n int) []*apiv1.Pod { + return CreateTestPodsWithPrefix("p", n) +} + +// AssignTestPodsToNodes assigns test pods to test nodes based on their index position. +func AssignTestPodsToNodes(pods []*apiv1.Pod, nodes []*apiv1.Node) { + if len(nodes) == 0 { + return + } + + j := 0 + for i := 0; i < len(pods); i++ { + if j >= len(nodes) { + j = 0 + } + pods[i].Spec.NodeName = nodes[j].Name + j++ + } +} diff --git a/cluster-autoscaler/simulator/clustersnapshot/testsnapshot/test_snapshot.go b/cluster-autoscaler/simulator/clustersnapshot/testsnapshot/test_snapshot.go new file mode 100644 index 000000000000..f72e8b346bd1 --- /dev/null +++ b/cluster-autoscaler/simulator/clustersnapshot/testsnapshot/test_snapshot.go @@ -0,0 +1,65 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsnapshot + +import ( + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/base" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/predicate" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" +) + +// testFailer is an abstraction that covers both *testing.T and *testing.B. +type testFailer interface { + Fatalf(format string, args ...any) +} + +// NewTestSnapshot returns an instance of ClusterSnapshot that can be used in tests. +func NewTestSnapshot() (clustersnapshot.ClusterSnapshot, error) { + testFwHandle, err := framework.NewTestFrameworkHandle() + if err != nil { + return nil, err + } + return predicate.NewPredicateSnapshot(base.NewBasicSnapshotBase(), testFwHandle), nil +} + +// NewTestSnapshotOrDie returns an instance of ClusterSnapshot that can be used in tests. +func NewTestSnapshotOrDie(t testFailer) clustersnapshot.ClusterSnapshot { + snapshot, err := NewTestSnapshot() + if err != nil { + t.Fatalf("NewTestSnapshotOrDie: couldn't create test ClusterSnapshot: %v", err) + } + return snapshot +} + +// NewCustomTestSnapshot returns an instance of ClusterSnapshot with a specific SnapshotBase that can be used in tests. +func NewCustomTestSnapshot(snapshot clustersnapshot.SnapshotBase) (clustersnapshot.ClusterSnapshot, error) { + testFwHandle, err := framework.NewTestFrameworkHandle() + if err != nil { + return nil, err + } + return predicate.NewPredicateSnapshot(snapshot, testFwHandle), nil +} + +// NewCustomTestSnapshotOrDie returns an instance of ClusterSnapshot with a specific SnapshotBase that can be used in tests. +func NewCustomTestSnapshotOrDie(t testFailer, snapshot clustersnapshot.SnapshotBase) clustersnapshot.ClusterSnapshot { + result, err := NewCustomTestSnapshot(snapshot) + if err != nil { + t.Fatalf("NewCustomTestSnapshotOrDie: couldn't create test ClusterSnapshot: %v", err) + } + return result +} diff --git a/cluster-autoscaler/simulator/predicatechecker/delegating_shared_lister.go b/cluster-autoscaler/simulator/framework/delegating_shared_lister.go similarity index 99% rename from cluster-autoscaler/simulator/predicatechecker/delegating_shared_lister.go rename to cluster-autoscaler/simulator/framework/delegating_shared_lister.go index be66bb8bd326..413f517be435 100644 --- a/cluster-autoscaler/simulator/predicatechecker/delegating_shared_lister.go +++ b/cluster-autoscaler/simulator/framework/delegating_shared_lister.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package predicatechecker +package framework import ( "fmt" diff --git a/cluster-autoscaler/simulator/framework/handle.go b/cluster-autoscaler/simulator/framework/handle.go new file mode 100644 index 000000000000..ca229d7f9963 --- /dev/null +++ b/cluster-autoscaler/simulator/framework/handle.go @@ -0,0 +1,70 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + + "k8s.io/client-go/informers" + "k8s.io/kubernetes/pkg/scheduler/apis/config" + scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config/latest" + schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" + scheduler_plugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" + schedulerframeworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" + schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" +) + +// Handle is meant for interacting with the scheduler framework. +type Handle struct { + Framework schedulerframework.Framework + DelegatingLister *DelegatingSchedulerSharedLister +} + +// NewHandle builds a framework Handle based on the provided informers and scheduler config. +func NewHandle(informerFactory informers.SharedInformerFactory, schedConfig *config.KubeSchedulerConfiguration) (*Handle, error) { + if schedConfig == nil { + var err error + schedConfig, err = scheduler_config.Default() + if err != nil { + return nil, fmt.Errorf("couldn't create scheduler config: %v", err) + } + } + + if len(schedConfig.Profiles) != 1 { + return nil, fmt.Errorf("unexpected scheduler config: expected one scheduler profile only (found %d profiles)", len(schedConfig.Profiles)) + } + sharedLister := NewDelegatingSchedulerSharedLister() + + schedulermetrics.InitMetrics() + framework, err := schedulerframeworkruntime.NewFramework( + context.TODO(), + scheduler_plugins.NewInTreeRegistry(), + &schedConfig.Profiles[0], + schedulerframeworkruntime.WithInformerFactory(informerFactory), + schedulerframeworkruntime.WithSnapshotSharedLister(sharedLister), + ) + + if err != nil { + return nil, fmt.Errorf("couldn't create scheduler framework; %v", err) + } + + return &Handle{ + Framework: framework, + DelegatingLister: sharedLister, + }, nil +} diff --git a/cluster-autoscaler/simulator/framework/infos.go b/cluster-autoscaler/simulator/framework/infos.go index c3af45d08258..c9062d170f8e 100644 --- a/cluster-autoscaler/simulator/framework/infos.go +++ b/cluster-autoscaler/simulator/framework/infos.go @@ -93,6 +93,23 @@ func (n *NodeInfo) ToScheduler() *schedulerframework.NodeInfo { return n.schedNodeInfo } +// DeepCopy clones the NodeInfo. +func (n *NodeInfo) DeepCopy() *NodeInfo { + var newPods []*PodInfo + for _, podInfo := range n.Pods() { + var newClaims []*resourceapi.ResourceClaim + for _, claim := range podInfo.NeededResourceClaims { + newClaims = append(newClaims, claim.DeepCopy()) + } + newPods = append(newPods, &PodInfo{Pod: podInfo.Pod.DeepCopy(), NeededResourceClaims: newClaims}) + } + var newSlices []*resourceapi.ResourceSlice + for _, slice := range n.LocalResourceSlices { + newSlices = append(newSlices, slice.DeepCopy()) + } + return NewNodeInfo(n.Node().DeepCopy(), newSlices, newPods...) +} + // NewNodeInfo returns a new internal NodeInfo from the provided data. func NewNodeInfo(node *apiv1.Node, slices []*resourceapi.ResourceSlice, pods ...*PodInfo) *NodeInfo { result := &NodeInfo{ diff --git a/cluster-autoscaler/simulator/framework/infos_test.go b/cluster-autoscaler/simulator/framework/infos_test.go index e6f997129253..be92e6f762ea 100644 --- a/cluster-autoscaler/simulator/framework/infos_test.go +++ b/cluster-autoscaler/simulator/framework/infos_test.go @@ -208,6 +208,64 @@ func TestNodeInfo(t *testing.T) { } } +func TestDeepCopyNodeInfo(t *testing.T) { + node := test.BuildTestNode("node", 1000, 1000) + pods := []*PodInfo{ + {Pod: test.BuildTestPod("p1", 80, 0, test.WithNodeName(node.Name))}, + { + Pod: test.BuildTestPod("p2", 80, 0, test.WithNodeName(node.Name)), + NeededResourceClaims: []*resourceapi.ResourceClaim{ + {ObjectMeta: v1.ObjectMeta{Name: "claim1"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req1"}}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "claim2"}, Spec: resourceapi.ResourceClaimSpec{Devices: resourceapi.DeviceClaim{Requests: []resourceapi.DeviceRequest{{Name: "req2"}}}}}, + }, + }, + } + slices := []*resourceapi.ResourceSlice{ + {ObjectMeta: v1.ObjectMeta{Name: "slice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}}, + {ObjectMeta: v1.ObjectMeta{Name: "slice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node"}}, + } + nodeInfo := NewNodeInfo(node, slices, pods...) + + // Verify that the contents are identical after copying. + nodeInfoCopy := nodeInfo.DeepCopy() + if diff := cmp.Diff(nodeInfo, nodeInfoCopy, + cmp.AllowUnexported(schedulerframework.NodeInfo{}, NodeInfo{}, PodInfo{}, podExtraInfo{}), + // We don't care about this field staying the same, and it differs because it's a global counter bumped + // on every AddPod. + cmpopts.IgnoreFields(schedulerframework.NodeInfo{}, "Generation"), + ); diff != "" { + t.Errorf("nodeInfo differs after DeepCopyNodeInfo, diff (-want +got): %s", diff) + } + + // Verify that the object addresses changed in the copy. + if nodeInfo == nodeInfoCopy { + t.Error("nodeInfo address identical after DeepCopyNodeInfo") + } + if nodeInfo.ToScheduler() == nodeInfoCopy.ToScheduler() { + t.Error("schedulerframework.NodeInfo address identical after DeepCopyNodeInfo") + } + for i := range len(nodeInfo.LocalResourceSlices) { + if nodeInfo.LocalResourceSlices[i] == nodeInfoCopy.LocalResourceSlices[i] { + t.Errorf("%d-th LocalResourceSlice address identical after DeepCopyNodeInfo", i) + } + } + for podIndex := range len(pods) { + oldPodInfo := nodeInfo.Pods()[podIndex] + newPodInfo := nodeInfoCopy.Pods()[podIndex] + if oldPodInfo == newPodInfo { + t.Errorf("%d-th PodInfo address identical after DeepCopyNodeInfo", podIndex) + } + if oldPodInfo.Pod == newPodInfo.Pod { + t.Errorf("%d-th PodInfo.Pod address identical after DeepCopyNodeInfo", podIndex) + } + for claimIndex := range len(newPodInfo.NeededResourceClaims) { + if oldPodInfo.NeededResourceClaims[podIndex] == newPodInfo.NeededResourceClaims[podIndex] { + t.Errorf("%d-th PodInfo - %d-th NeededResourceClaim address identical after DeepCopyNodeInfo", podIndex, claimIndex) + } + } + } +} + func testPodInfos(pods []*apiv1.Pod, addClaims bool) []*PodInfo { var result []*PodInfo for _, pod := range pods { diff --git a/cluster-autoscaler/simulator/framework/test_utils.go b/cluster-autoscaler/simulator/framework/test_utils.go index 9cdfd45eccd1..e637d2e03eae 100644 --- a/cluster-autoscaler/simulator/framework/test_utils.go +++ b/cluster-autoscaler/simulator/framework/test_utils.go @@ -18,8 +18,16 @@ package framework import ( apiv1 "k8s.io/api/core/v1" + "k8s.io/client-go/informers" + clientsetfake "k8s.io/client-go/kubernetes/fake" + scheduler_config_latest "k8s.io/kubernetes/pkg/scheduler/apis/config/latest" ) +// testFailer is an abstraction that covers both *testing.T and *testing.B. +type testFailer interface { + Fatalf(format string, args ...any) +} + // NewTestNodeInfo returns a new NodeInfo without any DRA information - only to be used in test code. // Production code should always take DRA objects into account. func NewTestNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *NodeInfo { @@ -29,3 +37,25 @@ func NewTestNodeInfo(node *apiv1.Node, pods ...*apiv1.Pod) *NodeInfo { } return nodeInfo } + +// NewTestFrameworkHandle creates a Handle that can be used in tests. +func NewTestFrameworkHandle() (*Handle, error) { + defaultConfig, err := scheduler_config_latest.Default() + if err != nil { + return nil, err + } + fwHandle, err := NewHandle(informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0), defaultConfig) + if err != nil { + return nil, err + } + return fwHandle, nil +} + +// NewTestFrameworkHandleOrDie creates a Handle that can be used in tests. +func NewTestFrameworkHandleOrDie(t testFailer) *Handle { + handle, err := NewTestFrameworkHandle() + if err != nil { + t.Fatalf("TestFrameworkHandleOrDie: couldn't create test framework handle: %v", err) + } + return handle +} diff --git a/cluster-autoscaler/simulator/node_info_utils.go b/cluster-autoscaler/simulator/node_info_utils.go new file mode 100644 index 000000000000..943893db67a3 --- /dev/null +++ b/cluster-autoscaler/simulator/node_info_utils.go @@ -0,0 +1,153 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "math/rand" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/autoscaler/cluster-autoscaler/utils/labels" + pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" + "k8s.io/autoscaler/cluster-autoscaler/utils/taints" +) + +type nodeGroupTemplateNodeInfoGetter interface { + Id() string + TemplateNodeInfo() (*framework.NodeInfo, error) +} + +// TemplateNodeInfoFromNodeGroupTemplate returns a template NodeInfo object based on NodeGroup.TemplateNodeInfo(). The template is sanitized, and only +// contains the pods that should appear on a new Node from the same node group (e.g. DaemonSet pods). +func TemplateNodeInfoFromNodeGroupTemplate(nodeGroup nodeGroupTemplateNodeInfoGetter, daemonsets []*appsv1.DaemonSet, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) { + id := nodeGroup.Id() + baseNodeInfo, err := nodeGroup.TemplateNodeInfo() + if err != nil { + return nil, errors.ToAutoscalerError(errors.CloudProviderError, err) + } + labels.UpdateDeprecatedLabels(baseNodeInfo.Node().ObjectMeta.Labels) + + return TemplateNodeInfoFromExampleNodeInfo(baseNodeInfo, id, daemonsets, true, taintConfig) +} + +// TemplateNodeInfoFromExampleNodeInfo returns a template NodeInfo object based on a real example NodeInfo from the cluster. The template is sanitized, and only +// contains the pods that should appear on a new Node from the same node group (e.g. DaemonSet pods). +func TemplateNodeInfoFromExampleNodeInfo(example *framework.NodeInfo, nodeGroupId string, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool, taintConfig taints.TaintConfig) (*framework.NodeInfo, errors.AutoscalerError) { + randSuffix := fmt.Sprintf("%d", rand.Int63()) + newNodeNameBase := fmt.Sprintf("template-node-for-%s", nodeGroupId) + + // We need to sanitize the example before determining the DS pods, since taints are checked there, and + // we might need to filter some out during sanitization. + sanitizedExample := sanitizeNodeInfo(example, newNodeNameBase, randSuffix, &taintConfig) + expectedPods, err := podsExpectedOnFreshNode(sanitizedExample, daemonsets, forceDaemonSets, randSuffix) + if err != nil { + return nil, err + } + // No need to sanitize the expected pods again - they either come from sanitizedExample and were sanitized above, + // or were added by podsExpectedOnFreshNode and sanitized there. + return framework.NewNodeInfo(sanitizedExample.Node(), nil, expectedPods...), nil +} + +// FreshNodeInfoFromTemplateNodeInfo duplicates the provided template NodeInfo, returning a fresh NodeInfo that can be injected into the cluster snapshot. +// The NodeInfo is sanitized (names, UIDs are changed, etc.), so that it can be injected along other copies created from the same template. +func FreshNodeInfoFromTemplateNodeInfo(template *framework.NodeInfo, suffix string) *framework.NodeInfo { + // Template node infos should already have taints and pods filtered, so not setting these parameters. + return sanitizeNodeInfo(template, template.Node().Name, suffix, nil) +} + +func sanitizeNodeInfo(nodeInfo *framework.NodeInfo, newNodeNameBase string, namesSuffix string, taintConfig *taints.TaintConfig) *framework.NodeInfo { + freshNodeName := fmt.Sprintf("%s-%s", newNodeNameBase, namesSuffix) + freshNode := sanitizeNode(nodeInfo.Node(), freshNodeName, taintConfig) + result := framework.NewNodeInfo(freshNode, nil) + + for _, podInfo := range nodeInfo.Pods() { + freshPod := sanitizePod(podInfo.Pod, freshNode.Name, namesSuffix) + result.AddPod(&framework.PodInfo{Pod: freshPod}) + } + return result +} + +func sanitizeNode(node *apiv1.Node, newName string, taintConfig *taints.TaintConfig) *apiv1.Node { + newNode := node.DeepCopy() + newNode.UID = uuid.NewUUID() + + newNode.Name = newName + if newNode.Labels == nil { + newNode.Labels = make(map[string]string) + } + newNode.Labels[apiv1.LabelHostname] = newName + + if taintConfig != nil { + newNode.Spec.Taints = taints.SanitizeTaints(newNode.Spec.Taints, *taintConfig) + } + return newNode +} + +func sanitizePod(pod *apiv1.Pod, nodeName, nameSuffix string) *apiv1.Pod { + sanitizedPod := pod.DeepCopy() + sanitizedPod.UID = uuid.NewUUID() + sanitizedPod.Name = fmt.Sprintf("%s-%s", pod.Name, nameSuffix) + sanitizedPod.Spec.NodeName = nodeName + return sanitizedPod +} + +func podsExpectedOnFreshNode(sanitizedExampleNodeInfo *framework.NodeInfo, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool, nameSuffix string) ([]*framework.PodInfo, errors.AutoscalerError) { + var result []*framework.PodInfo + runningDS := make(map[types.UID]bool) + for _, pod := range sanitizedExampleNodeInfo.Pods() { + // Ignore scheduled pods in deletion phase + if pod.DeletionTimestamp != nil { + continue + } + // Add scheduled mirror and DS pods + if pod_util.IsMirrorPod(pod.Pod) || pod_util.IsDaemonSetPod(pod.Pod) { + result = append(result, pod) + } + // Mark DS pods as running + controllerRef := metav1.GetControllerOf(pod) + if controllerRef != nil && controllerRef.Kind == "DaemonSet" { + runningDS[controllerRef.UID] = true + } + } + // Add all pending DS pods if force scheduling DS + if forceDaemonSets { + var pendingDS []*appsv1.DaemonSet + for _, ds := range daemonsets { + if !runningDS[ds.UID] { + pendingDS = append(pendingDS, ds) + } + } + // The provided nodeInfo has to have taints properly sanitized, or this won't work correctly. + daemonPods, err := daemonset.GetDaemonSetPodsForNode(sanitizedExampleNodeInfo, pendingDS) + if err != nil { + return nil, errors.ToAutoscalerError(errors.InternalError, err) + } + for _, pod := range daemonPods { + // There's technically no need to sanitize these pods since they're created from scratch, but + // it's nice to have the same suffix for all names in one sanitized NodeInfo when debugging. + result = append(result, &framework.PodInfo{Pod: sanitizePod(pod.Pod, sanitizedExampleNodeInfo.Node().Name, nameSuffix)}) + } + } + return result, nil +} diff --git a/cluster-autoscaler/simulator/node_info_utils_test.go b/cluster-autoscaler/simulator/node_info_utils_test.go new file mode 100644 index 000000000000..00350eb36947 --- /dev/null +++ b/cluster-autoscaler/simulator/node_info_utils_test.go @@ -0,0 +1,510 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "math/rand" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/autoscaler/cluster-autoscaler/config" + "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" + "k8s.io/autoscaler/cluster-autoscaler/utils/errors" + "k8s.io/autoscaler/cluster-autoscaler/utils/taints" + . "k8s.io/autoscaler/cluster-autoscaler/utils/test" + "k8s.io/kubernetes/pkg/controller/daemon" +) + +var ( + ds1 = &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds1", + Namespace: "ds1-namespace", + UID: types.UID("ds1"), + }, + } + ds2 = &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds2", + Namespace: "ds2-namespace", + UID: types.UID("ds2"), + }, + } + ds3 = &appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ds3", + Namespace: "ds3-namespace", + UID: types.UID("ds3"), + }, + Spec: appsv1.DaemonSetSpec{ + Template: apiv1.PodTemplateSpec{ + Spec: apiv1.PodSpec{ + NodeSelector: map[string]string{"key": "value"}, + }, + }, + }, + } + testDaemonSets = []*appsv1.DaemonSet{ds1, ds2, ds3} +) + +func TestTemplateNodeInfoFromNodeGroupTemplate(t *testing.T) { + exampleNode := BuildTestNode("n", 1000, 10) + exampleNode.Spec.Taints = []apiv1.Taint{ + {Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule}, + } + + for _, tc := range []struct { + testName string + nodeGroup *fakeNodeGroup + + wantPods []*apiv1.Pod + wantCpError bool + }{ + { + testName: "node group error results in an error", + nodeGroup: &fakeNodeGroup{templateNodeInfoErr: fmt.Errorf("test error")}, + wantCpError: true, + }, + { + testName: "simple template with no pods", + nodeGroup: &fakeNodeGroup{ + templateNodeInfoResult: framework.NewNodeInfo(exampleNode, nil), + }, + wantPods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + buildDSPod(ds2, "n"), + }, + }, + { + testName: "template with all kinds of pods", + nodeGroup: &fakeNodeGroup{ + templateNodeInfoResult: framework.NewNodeInfo(exampleNode, nil, + &framework.PodInfo{Pod: BuildScheduledTestPod("p1", 100, 1, "n")}, + &framework.PodInfo{Pod: BuildScheduledTestPod("p2", 100, 1, "n")}, + &framework.PodInfo{Pod: SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n"))}, + &framework.PodInfo{Pod: setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n")))}, + &framework.PodInfo{Pod: buildDSPod(ds1, "n")}, + &framework.PodInfo{Pod: setDeletionTimestamp(buildDSPod(ds2, "n"))}, + ), + }, + wantPods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")), + buildDSPod(ds1, "n"), + buildDSPod(ds2, "n"), + }, + }, + } { + t.Run(tc.testName, func(t *testing.T) { + templateNodeInfo, err := TemplateNodeInfoFromNodeGroupTemplate(tc.nodeGroup, testDaemonSets, taints.TaintConfig{}) + if tc.wantCpError { + if err == nil || err.Type() != errors.CloudProviderError { + t.Fatalf("TemplateNodeInfoFromNodeGroupTemplate(): want CloudProviderError, but got: %v (%T)", err, err) + } else { + return + } + } + if err != nil { + t.Fatalf("TemplateNodeInfoFromNodeGroupTemplate(): expected no error, but got %v", err) + } + + // Verify that the taints are correctly sanitized. + // Verify that the NodeInfo is sanitized using the node group id as base. + // Pass empty string as nameSuffix so that it's auto-determined from the sanitized templateNodeInfo, because + // TemplateNodeInfoFromNodeGroupTemplate randomizes the suffix. + // Pass non-empty expectedPods to verify that the set of pods is changed as expected (e.g. DS pods added, non-DS/deleted pods removed). + if err := verifyNodeInfoSanitization(tc.nodeGroup.templateNodeInfoResult, templateNodeInfo, tc.wantPods, "template-node-for-"+tc.nodeGroup.id, "", nil); err != nil { + t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): NodeInfo wasn't properly sanitized: %v", err) + } + }) + } +} + +func TestTemplateNodeInfoFromExampleNodeInfo(t *testing.T) { + exampleNode := BuildTestNode("n", 1000, 10) + exampleNode.Spec.Taints = []apiv1.Taint{ + {Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule}, + } + + testCases := []struct { + name string + pods []*apiv1.Pod + daemonSets []*appsv1.DaemonSet + forceDS bool + + wantPods []*apiv1.Pod + wantError bool + }{ + { + name: "node without any pods", + }, + { + name: "node with non-DS/mirror pods", + pods: []*apiv1.Pod{ + BuildScheduledTestPod("p1", 100, 1, "n"), + BuildScheduledTestPod("p2", 100, 1, "n"), + }, + wantPods: []*apiv1.Pod{}, + }, + { + name: "node with a mirror pod", + pods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")), + }, + wantPods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")), + }, + }, + { + name: "node with a deleted mirror pod", + pods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")), + setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p2", 100, 1, "n"))), + }, + wantPods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p1", 100, 1, "n")), + }, + }, + { + name: "node with DS pods [forceDS=false, no daemon sets]", + pods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + wantPods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + }, + }, + { + name: "node with DS pods [forceDS=false, some daemon sets]", + pods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + daemonSets: testDaemonSets, + wantPods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + }, + }, + { + name: "node with a DS pod [forceDS=true, no daemon sets]", + pods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + wantPods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + }, + forceDS: true, + }, + { + name: "node with a DS pod [forceDS=true, some daemon sets]", + pods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + daemonSets: testDaemonSets, + forceDS: true, + wantPods: []*apiv1.Pod{ + buildDSPod(ds1, "n"), + buildDSPod(ds2, "n"), + }, + }, + { + name: "everything together [forceDS=false]", + pods: []*apiv1.Pod{ + BuildScheduledTestPod("p1", 100, 1, "n"), + BuildScheduledTestPod("p2", 100, 1, "n"), + SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")), + setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n"))), + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + daemonSets: testDaemonSets, + wantPods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")), + buildDSPod(ds1, "n"), + }, + }, + { + name: "everything together [forceDS=true]", + pods: []*apiv1.Pod{ + BuildScheduledTestPod("p1", 100, 1, "n"), + BuildScheduledTestPod("p2", 100, 1, "n"), + SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")), + setDeletionTimestamp(SetMirrorPodSpec(BuildScheduledTestPod("p4", 100, 1, "n"))), + buildDSPod(ds1, "n"), + setDeletionTimestamp(buildDSPod(ds2, "n")), + }, + daemonSets: testDaemonSets, + forceDS: true, + wantPods: []*apiv1.Pod{ + SetMirrorPodSpec(BuildScheduledTestPod("p3", 100, 1, "n")), + buildDSPod(ds1, "n"), + buildDSPod(ds2, "n"), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + nodeGroupId := "nodeGroupId" + exampleNodeInfo := framework.NewNodeInfo(exampleNode, nil) + for _, pod := range tc.pods { + exampleNodeInfo.AddPod(&framework.PodInfo{Pod: pod}) + } + + templateNodeInfo, err := TemplateNodeInfoFromExampleNodeInfo(exampleNodeInfo, nodeGroupId, tc.daemonSets, tc.forceDS, taints.TaintConfig{}) + if tc.wantError { + if err == nil { + t.Fatal("TemplateNodeInfoFromExampleNodeInfo(): want error, but got nil") + } else { + return + } + } + if err != nil { + t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): expected no error, but got %v", err) + } + + // Verify that the taints are correctly sanitized. + // Verify that the NodeInfo is sanitized using the node group id as base. + // Pass empty string as nameSuffix so that it's auto-determined from the sanitized templateNodeInfo, because + // TemplateNodeInfoFromExampleNodeInfo randomizes the suffix. + // Pass non-empty expectedPods to verify that the set of pods is changed as expected (e.g. DS pods added, non-DS/deleted pods removed). + if err := verifyNodeInfoSanitization(exampleNodeInfo, templateNodeInfo, tc.wantPods, "template-node-for-"+nodeGroupId, "", nil); err != nil { + t.Fatalf("TemplateNodeInfoFromExampleNodeInfo(): NodeInfo wasn't properly sanitized: %v", err) + } + }) + } +} + +func TestFreshNodeInfoFromTemplateNodeInfo(t *testing.T) { + nodeName := "template-node" + templateNode := BuildTestNode(nodeName, 1000, 1000) + templateNode.Spec.Taints = []apiv1.Taint{ + {Key: "startup-taint", Value: "true", Effect: apiv1.TaintEffectNoSchedule}, + {Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule}, + {Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}, + } + pods := []*framework.PodInfo{ + {Pod: BuildTestPod("p1", 80, 0, WithNodeName(nodeName))}, + {Pod: BuildTestPod("p2", 80, 0, WithNodeName(nodeName))}, + } + templateNodeInfo := framework.NewNodeInfo(templateNode, nil, pods...) + + suffix := "abc" + freshNodeInfo := FreshNodeInfoFromTemplateNodeInfo(templateNodeInfo, suffix) + // Verify that the taints are not sanitized (they should be sanitized in the template already). + // Verify that the NodeInfo is sanitized using the template Node name as base. + initialTaints := templateNodeInfo.Node().Spec.Taints + if err := verifyNodeInfoSanitization(templateNodeInfo, freshNodeInfo, nil, templateNodeInfo.Node().Name, suffix, initialTaints); err != nil { + t.Fatalf("FreshNodeInfoFromTemplateNodeInfo(): NodeInfo wasn't properly sanitized: %v", err) + } +} + +func TestSanitizeNodeInfo(t *testing.T) { + oldNodeName := "old-node" + basicNode := BuildTestNode(oldNodeName, 1000, 1000) + + labelsNode := basicNode.DeepCopy() + labelsNode.Labels = map[string]string{ + apiv1.LabelHostname: oldNodeName, + "a": "b", + "x": "y", + } + + taintsNode := basicNode.DeepCopy() + taintsNode.Spec.Taints = []apiv1.Taint{ + {Key: "startup-taint", Value: "true", Effect: apiv1.TaintEffectNoSchedule}, + {Key: taints.ToBeDeletedTaint, Value: "2312532423", Effect: apiv1.TaintEffectNoSchedule}, + {Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}, + } + taintConfig := taints.NewTaintConfig(config.AutoscalingOptions{StartupTaints: []string{"startup-taint"}}) + + taintsLabelsNode := labelsNode.DeepCopy() + taintsLabelsNode.Spec.Taints = taintsNode.Spec.Taints + + pods := []*framework.PodInfo{ + {Pod: BuildTestPod("p1", 80, 0, WithNodeName(oldNodeName))}, + {Pod: BuildTestPod("p2", 80, 0, WithNodeName(oldNodeName))}, + } + + for _, tc := range []struct { + testName string + + nodeInfo *framework.NodeInfo + taintConfig *taints.TaintConfig + + wantTaints []apiv1.Taint + }{ + { + testName: "sanitize node", + nodeInfo: framework.NewTestNodeInfo(basicNode), + }, + { + testName: "sanitize node labels", + nodeInfo: framework.NewTestNodeInfo(labelsNode), + }, + { + testName: "sanitize node taints - disabled", + nodeInfo: framework.NewTestNodeInfo(taintsNode), + taintConfig: nil, + wantTaints: taintsNode.Spec.Taints, + }, + { + testName: "sanitize node taints - enabled", + nodeInfo: framework.NewTestNodeInfo(taintsNode), + taintConfig: &taintConfig, + wantTaints: []apiv1.Taint{{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}}, + }, + { + testName: "sanitize pods", + nodeInfo: framework.NewNodeInfo(basicNode, nil, pods...), + }, + { + testName: "sanitize everything", + nodeInfo: framework.NewNodeInfo(taintsLabelsNode, nil, pods...), + taintConfig: &taintConfig, + wantTaints: []apiv1.Taint{{Key: "a", Value: "b", Effect: apiv1.TaintEffectNoSchedule}}, + }, + } { + t.Run(tc.testName, func(t *testing.T) { + newNameBase := "node" + suffix := "abc" + sanitizedNodeInfo := sanitizeNodeInfo(tc.nodeInfo, newNameBase, suffix, tc.taintConfig) + if err := verifyNodeInfoSanitization(tc.nodeInfo, sanitizedNodeInfo, nil, newNameBase, suffix, tc.wantTaints); err != nil { + t.Fatalf("sanitizeNodeInfo(): NodeInfo wasn't properly sanitized: %v", err) + } + }) + } +} + +// verifyNodeInfoSanitization verifies whether sanitizedNodeInfo was correctly sanitized starting from initialNodeInfo, with the provided +// nameBase and nameSuffix. The expected taints aren't auto-determined, so wantTaints should always be provided. +// +// If nameSuffix is an empty string, the suffix will be determined from sanitizedNodeInfo. This is useful if +// the test doesn't know/control the name suffix (e.g. because it's randomized by the tested function). +// +// If expectedPods is nil, the set of pods is expected not to change between initialNodeInfo and sanitizedNodeInfo. If the sanitization is +// expected to change the set of pods, the expected set should be passed to expectedPods. +func verifyNodeInfoSanitization(initialNodeInfo, sanitizedNodeInfo *framework.NodeInfo, expectedPods []*apiv1.Pod, nameBase, nameSuffix string, wantTaints []apiv1.Taint) error { + if nameSuffix == "" { + // Determine the suffix from the provided sanitized NodeInfo - it should be the last part of a dash-separated name. + nameParts := strings.Split(sanitizedNodeInfo.Node().Name, "-") + if len(nameParts) < 2 { + return fmt.Errorf("sanitized NodeInfo name unexpected: want format -, got %q", sanitizedNodeInfo.Node().Name) + } + nameSuffix = nameParts[len(nameParts)-1] + } + if expectedPods != nil { + // If the sanitization is expected to change the set of pods, hack the initial NodeInfo to have the expected pods. + // Then we can just compare things pod-by-pod as if the set didn't change. + initialNodeInfo = framework.NewNodeInfo(initialNodeInfo.Node(), nil) + for _, pod := range expectedPods { + initialNodeInfo.AddPod(&framework.PodInfo{Pod: pod}) + } + } + + // Verification below assumes the same set of pods between initialNodeInfo and sanitizedNodeInfo. + wantNodeName := fmt.Sprintf("%s-%s", nameBase, nameSuffix) + if gotName := sanitizedNodeInfo.Node().Name; gotName != wantNodeName { + return fmt.Errorf("want sanitized Node name %q, got %q", wantNodeName, gotName) + } + if gotUid, oldUid := sanitizedNodeInfo.Node().UID, initialNodeInfo.Node().UID; gotUid == "" || gotUid == oldUid { + return fmt.Errorf("sanitized Node UID wasn't randomized - got %q, old UID was %q", gotUid, oldUid) + } + wantLabels := make(map[string]string) + for k, v := range initialNodeInfo.Node().Labels { + wantLabels[k] = v + } + wantLabels[apiv1.LabelHostname] = wantNodeName + if diff := cmp.Diff(wantLabels, sanitizedNodeInfo.Node().Labels); diff != "" { + return fmt.Errorf("sanitized Node labels unexpected, diff (-want +got): %s", diff) + } + if diff := cmp.Diff(wantTaints, sanitizedNodeInfo.Node().Spec.Taints); diff != "" { + return fmt.Errorf("sanitized Node taints unexpected, diff (-want +got): %s", diff) + } + if diff := cmp.Diff(initialNodeInfo.Node(), sanitizedNodeInfo.Node(), + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "Labels", "UID"), + cmpopts.IgnoreFields(apiv1.NodeSpec{}, "Taints"), + ); diff != "" { + return fmt.Errorf("sanitized Node unexpected diff (-want +got): %s", diff) + } + + oldPods := initialNodeInfo.Pods() + newPods := sanitizedNodeInfo.Pods() + if len(oldPods) != len(newPods) { + return fmt.Errorf("want %d pods in sanitized NodeInfo, got %d", len(oldPods), len(newPods)) + } + for i, newPod := range newPods { + oldPod := oldPods[i] + + if newPod.Name == oldPod.Name || !strings.HasSuffix(newPod.Name, nameSuffix) { + return fmt.Errorf("sanitized Pod name unexpected: want (different than %q, ending in %q), got %q", oldPod.Name, nameSuffix, newPod.Name) + } + if gotUid, oldUid := newPod.UID, oldPod.UID; gotUid == "" || gotUid == oldUid { + return fmt.Errorf("sanitized Pod UID wasn't randomized - got %q, old UID was %q", gotUid, oldUid) + } + if gotNodeName := newPod.Spec.NodeName; gotNodeName != wantNodeName { + return fmt.Errorf("want sanitized Pod.Spec.NodeName %q, got %q", wantNodeName, gotNodeName) + } + if diff := cmp.Diff(oldPod, newPod, + cmpopts.IgnoreFields(metav1.ObjectMeta{}, "Name", "UID"), + cmpopts.IgnoreFields(apiv1.PodSpec{}, "NodeName"), + ); diff != "" { + return fmt.Errorf("sanitized Pod unexpected diff (-want +got): %s", diff) + } + } + return nil +} + +func buildDSPod(ds *appsv1.DaemonSet, nodeName string) *apiv1.Pod { + pod := daemon.NewPod(ds, nodeName) + pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63()) + ptrVal := true + pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + {Kind: "DaemonSet", UID: ds.UID, Name: ds.Name, Controller: &ptrVal}, + } + return pod +} + +func setDeletionTimestamp(pod *apiv1.Pod) *apiv1.Pod { + now := metav1.NewTime(time.Now()) + pod.DeletionTimestamp = &now + return pod +} + +type fakeNodeGroup struct { + id string + templateNodeInfoResult *framework.NodeInfo + templateNodeInfoErr error +} + +func (f *fakeNodeGroup) Id() string { + return f.id +} + +func (f *fakeNodeGroup) TemplateNodeInfo() (*framework.NodeInfo, error) { + return f.templateNodeInfoResult, f.templateNodeInfoErr +} diff --git a/cluster-autoscaler/simulator/nodes.go b/cluster-autoscaler/simulator/nodes.go deleted file mode 100644 index c80fe0cbe092..000000000000 --- a/cluster-autoscaler/simulator/nodes.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package simulator - -import ( - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/utils/daemonset" - "k8s.io/autoscaler/cluster-autoscaler/utils/errors" - - pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod" -) - -// BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created. -func BuildNodeInfoForNode(node *apiv1.Node, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) { - nodeInfo := framework.NewNodeInfo(node, nil) - return addExpectedPods(nodeInfo, scheduledPods, daemonsets, forceDaemonSets) -} - -func addExpectedPods(nodeInfo *framework.NodeInfo, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*framework.NodeInfo, errors.AutoscalerError) { - runningDS := make(map[types.UID]bool) - for _, pod := range scheduledPods { - // Ignore scheduled pods in deletion phase - if pod.DeletionTimestamp != nil { - continue - } - // Add scheduled mirror and DS pods - if pod_util.IsMirrorPod(pod) || pod_util.IsDaemonSetPod(pod) { - nodeInfo.AddPod(&framework.PodInfo{Pod: pod}) - } - // Mark DS pods as running - controllerRef := metav1.GetControllerOf(pod) - if controllerRef != nil && controllerRef.Kind == "DaemonSet" { - runningDS[controllerRef.UID] = true - } - } - // Add all pending DS pods if force scheduling DS - if forceDaemonSets { - var pendingDS []*appsv1.DaemonSet - for _, ds := range daemonsets { - if !runningDS[ds.UID] { - pendingDS = append(pendingDS, ds) - } - } - daemonPods, err := daemonset.GetDaemonSetPodsForNode(nodeInfo, pendingDS) - if err != nil { - return nil, errors.ToAutoscalerError(errors.InternalError, err) - } - for _, pod := range daemonPods { - nodeInfo.AddPod(pod) - } - } - return nodeInfo, nil -} diff --git a/cluster-autoscaler/simulator/nodes_test.go b/cluster-autoscaler/simulator/nodes_test.go deleted file mode 100644 index b931979de6cb..000000000000 --- a/cluster-autoscaler/simulator/nodes_test.go +++ /dev/null @@ -1,239 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package simulator - -import ( - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/autoscaler/cluster-autoscaler/utils/test" - "k8s.io/kubernetes/pkg/controller/daemon" -) - -func TestBuildNodeInfoForNode(t *testing.T) { - ds1 := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ds1", - Namespace: "ds1-namespace", - UID: types.UID("ds1"), - }, - } - - ds2 := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ds2", - Namespace: "ds2-namespace", - UID: types.UID("ds2"), - }, - } - - ds3 := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: "ds3", - Namespace: "ds3-namespace", - UID: types.UID("ds3"), - }, - Spec: appsv1.DaemonSetSpec{ - Template: apiv1.PodTemplateSpec{ - Spec: apiv1.PodSpec{ - NodeSelector: map[string]string{"key": "value"}, - }, - }, - }, - } - - testCases := []struct { - name string - node *apiv1.Node - pods []*apiv1.Pod - daemonSets []*appsv1.DaemonSet - forceDS bool - - wantPods []*apiv1.Pod - wantError bool - }{ - { - name: "node without any pods", - node: test.BuildTestNode("n", 1000, 10), - }, - { - name: "node with non-DS/mirror pods", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - test.BuildScheduledTestPod("p1", 100, 1, "n"), - test.BuildScheduledTestPod("p2", 100, 1, "n"), - }, - }, - { - name: "node with a mirror pod", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")), - }, - wantPods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")), - }, - }, - { - name: "node with a deleted mirror pod", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")), - setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p2", 100, 1, "n"))), - }, - wantPods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p1", 100, 1, "n")), - }, - }, - { - name: "node with DS pods [forceDS=false, no daemon sets]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - wantPods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - }, - }, - { - name: "node with DS pods [forceDS=false, some daemon sets]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3}, - wantPods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - }, - }, - { - name: "node with a DS pod [forceDS=true, no daemon sets]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - wantPods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - }, - forceDS: true, - }, - { - name: "node with a DS pod [forceDS=true, some daemon sets]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3}, - forceDS: true, - wantPods: []*apiv1.Pod{ - buildDSPod(ds1, "n"), - buildDSPod(ds2, "n"), - }, - }, - { - name: "everything together [forceDS=false]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - test.BuildScheduledTestPod("p1", 100, 1, "n"), - test.BuildScheduledTestPod("p2", 100, 1, "n"), - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")), - setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p4", 100, 1, "n"))), - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3}, - wantPods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")), - buildDSPod(ds1, "n"), - }, - }, - { - name: "everything together [forceDS=true]", - node: test.BuildTestNode("n", 1000, 10), - pods: []*apiv1.Pod{ - test.BuildScheduledTestPod("p1", 100, 1, "n"), - test.BuildScheduledTestPod("p2", 100, 1, "n"), - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")), - setDeletionTimestamp(test.SetMirrorPodSpec(test.BuildScheduledTestPod("p4", 100, 1, "n"))), - buildDSPod(ds1, "n"), - setDeletionTimestamp(buildDSPod(ds2, "n")), - }, - daemonSets: []*appsv1.DaemonSet{ds1, ds2, ds3}, - forceDS: true, - wantPods: []*apiv1.Pod{ - test.SetMirrorPodSpec(test.BuildScheduledTestPod("p3", 100, 1, "n")), - buildDSPod(ds1, "n"), - buildDSPod(ds2, "n"), - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - nodeInfo, err := BuildNodeInfoForNode(tc.node, tc.pods, tc.daemonSets, tc.forceDS) - - if tc.wantError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Equal(t, nodeInfo.Node(), tc.node) - - // clean pod metadata for comparison purposes - var wantPods, pods []*apiv1.Pod - for _, pod := range tc.wantPods { - wantPods = append(wantPods, cleanPodMetadata(pod)) - } - for _, podInfo := range nodeInfo.Pods() { - pods = append(pods, cleanPodMetadata(podInfo.Pod)) - } - assert.ElementsMatch(t, tc.wantPods, pods) - } - }) - } -} - -func cleanPodMetadata(pod *apiv1.Pod) *apiv1.Pod { - pod.Name = strings.Split(pod.Name, "-")[0] - pod.OwnerReferences = nil - return pod -} - -func buildDSPod(ds *appsv1.DaemonSet, nodeName string) *apiv1.Pod { - pod := daemon.NewPod(ds, nodeName) - pod.Name = ds.Name - ptrVal := true - pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ - {Kind: "DaemonSet", UID: ds.UID, Controller: &ptrVal}, - } - return pod -} - -func setDeletionTimestamp(pod *apiv1.Pod) *apiv1.Pod { - now := metav1.NewTime(time.Now()) - pod.DeletionTimestamp = &now - return pod -} diff --git a/cluster-autoscaler/simulator/predicatechecker/error.go b/cluster-autoscaler/simulator/predicatechecker/error.go deleted file mode 100644 index 9e4d6de29d71..000000000000 --- a/cluster-autoscaler/simulator/predicatechecker/error.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicatechecker - -import ( - "fmt" - "strings" -) - -// PredicateErrorType is type of predicate error -type PredicateErrorType int - -const ( - // NotSchedulablePredicateError means that one of the filters returned that pod does not fit a node - NotSchedulablePredicateError PredicateErrorType = iota - // InternalPredicateError denotes internal unexpected error while calling PredicateChecker - InternalPredicateError -) - -// PredicateError is a structure representing error returned from predicate checking simulation. -type PredicateError struct { - errorType PredicateErrorType - predicateName string - errorMessage string - reasons []string - // debugInfo contains additional info that predicate doesn't include, - // but may be useful for debugging (e.g. taints on node blocking scale-up) - debugInfo func() string -} - -// ErrorType returns if error was internal of names predicate failure. -func (pe *PredicateError) ErrorType() PredicateErrorType { - return pe.errorType -} - -// PredicateName return name of predicate which failed. -func (pe *PredicateError) PredicateName() string { - return pe.predicateName -} - -// Message returns error message. -func (pe *PredicateError) Message() string { - if pe.errorMessage == "" { - return "unknown error" - } - return pe.errorMessage -} - -// VerboseMessage generates verbose error message. Building verbose message may be expensive so number of calls should be -// limited. -func (pe *PredicateError) VerboseMessage() string { - return fmt.Sprintf( - "%s; predicateName=%s; reasons: %s; debugInfo=%s", - pe.Message(), - pe.predicateName, - strings.Join(pe.reasons, ", "), - pe.debugInfo()) -} - -// Reasons returns failure reasons from failed predicate as a slice of strings. -func (pe *PredicateError) Reasons() []string { - return pe.reasons -} - -// NewPredicateError creates a new predicate error from error and reasons. -func NewPredicateError( - errorType PredicateErrorType, - predicateName string, - errorMessage string, - reasons []string, - debugInfo func() string, -) *PredicateError { - return &PredicateError{ - errorType: errorType, - predicateName: predicateName, - errorMessage: errorMessage, - reasons: reasons, - debugInfo: debugInfo, - } -} - -// GenericPredicateError return a generic instance of PredicateError to be used in context where predicate name is not -// know. -func GenericPredicateError() *PredicateError { - return &PredicateError{ - errorType: NotSchedulablePredicateError, - errorMessage: "generic predicate failure", - } -} - -func emptyString() string { - return "" -} diff --git a/cluster-autoscaler/simulator/predicatechecker/interface.go b/cluster-autoscaler/simulator/predicatechecker/interface.go deleted file mode 100644 index 2d35b779172c..000000000000 --- a/cluster-autoscaler/simulator/predicatechecker/interface.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicatechecker - -import ( - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - - apiv1 "k8s.io/api/core/v1" -) - -// PredicateChecker checks whether all required predicates pass for given Pod and Node. -type PredicateChecker interface { - FitsAnyNode(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod) (string, error) - FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (string, error) - CheckPredicates(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeName string) *PredicateError -} diff --git a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go b/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go deleted file mode 100644 index 4e37e97528a2..000000000000 --- a/cluster-autoscaler/simulator/predicatechecker/schedulerbased.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicatechecker - -import ( - "context" - "fmt" - - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" - "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/client-go/informers" - v1listers "k8s.io/client-go/listers/core/v1" - klog "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config/latest" - schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework" - scheduler_plugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" - schedulerframeworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" -) - -// SchedulerBasedPredicateChecker checks whether all required predicates pass for given Pod and Node. -// The verification is done by calling out to scheduler code. -type SchedulerBasedPredicateChecker struct { - framework schedulerframework.Framework - delegatingSharedLister *DelegatingSchedulerSharedLister - nodeLister v1listers.NodeLister - podLister v1listers.PodLister - lastIndex int -} - -// NewSchedulerBasedPredicateChecker builds scheduler based PredicateChecker. -func NewSchedulerBasedPredicateChecker(informerFactory informers.SharedInformerFactory, schedConfig *config.KubeSchedulerConfiguration) (*SchedulerBasedPredicateChecker, error) { - if schedConfig == nil { - var err error - schedConfig, err = scheduler_config.Default() - if err != nil { - return nil, fmt.Errorf("couldn't create scheduler config: %v", err) - } - } - - if len(schedConfig.Profiles) != 1 { - return nil, fmt.Errorf("unexpected scheduler config: expected one scheduler profile only (found %d profiles)", len(schedConfig.Profiles)) - } - sharedLister := NewDelegatingSchedulerSharedLister() - - framework, err := schedulerframeworkruntime.NewFramework( - context.TODO(), - scheduler_plugins.NewInTreeRegistry(), - &schedConfig.Profiles[0], - schedulerframeworkruntime.WithInformerFactory(informerFactory), - schedulerframeworkruntime.WithSnapshotSharedLister(sharedLister), - ) - - if err != nil { - return nil, fmt.Errorf("couldn't create scheduler framework; %v", err) - } - - checker := &SchedulerBasedPredicateChecker{ - framework: framework, - delegatingSharedLister: sharedLister, - } - - return checker, nil -} - -// FitsAnyNode checks if the given pod can be placed on any of the given nodes. -func (p *SchedulerBasedPredicateChecker) FitsAnyNode(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod) (string, error) { - return p.FitsAnyNodeMatching(clusterSnapshot, pod, func(*framework.NodeInfo) bool { - return true - }) -} - -// FitsAnyNodeMatching checks if the given pod can be placed on any of the given nodes matching the provided function. -func (p *SchedulerBasedPredicateChecker) FitsAnyNodeMatching(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeMatches func(*framework.NodeInfo) bool) (string, error) { - if clusterSnapshot == nil { - return "", fmt.Errorf("ClusterSnapshot not provided") - } - - nodeInfosList, err := clusterSnapshot.ListNodeInfos() - if err != nil { - // This should never happen. - // - // Scheduler requires interface returning error, but no implementation - // of ClusterSnapshot ever does it. - klog.Errorf("Error obtaining nodeInfos from schedulerLister") - return "", fmt.Errorf("error obtaining nodeInfos from schedulerLister") - } - - p.delegatingSharedLister.UpdateDelegate(clusterSnapshot) - defer p.delegatingSharedLister.ResetDelegate() - - state := schedulerframework.NewCycleState() - preFilterResult, preFilterStatus, _ := p.framework.RunPreFilterPlugins(context.TODO(), state, pod) - if !preFilterStatus.IsSuccess() { - return "", fmt.Errorf("error running pre filter plugins for pod %s; %s", pod.Name, preFilterStatus.Message()) - } - - for i := range nodeInfosList { - nodeInfo := nodeInfosList[(p.lastIndex+i)%len(nodeInfosList)] - if !nodeMatches(nodeInfo) { - continue - } - - if !preFilterResult.AllNodes() && !preFilterResult.NodeNames.Has(nodeInfo.Node().Name) { - continue - } - - // Be sure that the node is schedulable. - if nodeInfo.Node().Spec.Unschedulable { - continue - } - - filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) - if filterStatus.IsSuccess() { - p.lastIndex = (p.lastIndex + i + 1) % len(nodeInfosList) - return nodeInfo.Node().Name, nil - } - } - return "", fmt.Errorf("cannot put pod %s on any node", pod.Name) -} - -// CheckPredicates checks if the given pod can be placed on the given node. -func (p *SchedulerBasedPredicateChecker) CheckPredicates(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, nodeName string) *PredicateError { - if clusterSnapshot == nil { - return NewPredicateError(InternalPredicateError, "", "ClusterSnapshot not provided", nil, emptyString) - } - nodeInfo, err := clusterSnapshot.GetNodeInfo(nodeName) - if err != nil { - errorMessage := fmt.Sprintf("Error obtaining NodeInfo for name %s; %v", nodeName, err) - return NewPredicateError(InternalPredicateError, "", errorMessage, nil, emptyString) - } - - p.delegatingSharedLister.UpdateDelegate(clusterSnapshot) - defer p.delegatingSharedLister.ResetDelegate() - - state := schedulerframework.NewCycleState() - _, preFilterStatus, _ := p.framework.RunPreFilterPlugins(context.TODO(), state, pod) - if !preFilterStatus.IsSuccess() { - return NewPredicateError( - InternalPredicateError, - "", - preFilterStatus.Message(), - preFilterStatus.Reasons(), - emptyString) - } - - filterStatus := p.framework.RunFilterPlugins(context.TODO(), state, pod, nodeInfo.ToScheduler()) - - if !filterStatus.IsSuccess() { - filterName := filterStatus.Plugin() - filterMessage := filterStatus.Message() - filterReasons := filterStatus.Reasons() - if filterStatus.IsRejected() { - return NewPredicateError( - NotSchedulablePredicateError, - filterName, - filterMessage, - filterReasons, - p.buildDebugInfo(filterName, nodeInfo)) - } - return NewPredicateError( - InternalPredicateError, - filterName, - filterMessage, - filterReasons, - p.buildDebugInfo(filterName, nodeInfo)) - } - - return nil -} - -func (p *SchedulerBasedPredicateChecker) buildDebugInfo(filterName string, nodeInfo *framework.NodeInfo) func() string { - switch filterName { - case "TaintToleration": - taints := nodeInfo.Node().Spec.Taints - return func() string { - return fmt.Sprintf("taints on node: %#v", taints) - } - default: - return emptyString - } -} diff --git a/cluster-autoscaler/simulator/predicatechecker/schedulerbased_test.go b/cluster-autoscaler/simulator/predicatechecker/schedulerbased_test.go deleted file mode 100644 index 44b7ebf60d00..000000000000 --- a/cluster-autoscaler/simulator/predicatechecker/schedulerbased_test.go +++ /dev/null @@ -1,321 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicatechecker - -import ( - "os" - "path/filepath" - "testing" - "time" - - testconfig "k8s.io/autoscaler/cluster-autoscaler/config/test" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" - scheduler "k8s.io/autoscaler/cluster-autoscaler/utils/scheduler" - . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" - - apiv1 "k8s.io/api/core/v1" -) - -func TestCheckPredicate(t *testing.T) { - schedulermetrics.Register() - - p450 := BuildTestPod("p450", 450, 500000) - p600 := BuildTestPod("p600", 600, 500000) - p8000 := BuildTestPod("p8000", 8000, 0) - p500 := BuildTestPod("p500", 500, 500000) - - n1000 := BuildTestNode("n1000", 1000, 2000000) - SetNodeReadyState(n1000, true, time.Time{}) - n1000Unschedulable := BuildTestNode("n1000", 1000, 2000000) - SetNodeReadyState(n1000Unschedulable, true, time.Time{}) - - defaultPredicateChecker, err := NewTestPredicateChecker() - assert.NoError(t, err) - - // temp dir - tmpDir, err := os.MkdirTemp("", "scheduler-configs") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") - if err := os.WriteFile(customConfigFile, - []byte(testconfig.SchedulerConfigNodeResourcesFitDisabled), - os.FileMode(0600)); err != nil { - t.Fatal(err) - } - - customConfig, err := scheduler.ConfigFromPath(customConfigFile) - assert.NoError(t, err) - customPredicateChecker, err := NewTestPredicateCheckerWithCustomConfig(customConfig) - assert.NoError(t, err) - - tests := []struct { - name string - node *apiv1.Node - scheduledPods []*apiv1.Pod - testPod *apiv1.Pod - predicateChecker PredicateChecker - expectError bool - }{ - // default predicate checker test cases - { - name: "default - other pod - insuficient cpu", - node: n1000, - scheduledPods: []*apiv1.Pod{p450}, - testPod: p600, - expectError: true, - predicateChecker: defaultPredicateChecker, - }, - { - name: "default - other pod - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{p450}, - testPod: p500, - expectError: false, - predicateChecker: defaultPredicateChecker, - }, - { - name: "default - empty - insuficient cpu", - node: n1000, - scheduledPods: []*apiv1.Pod{}, - testPod: p8000, - expectError: true, - predicateChecker: defaultPredicateChecker, - }, - { - name: "default - empty - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{}, - testPod: p600, - expectError: false, - predicateChecker: defaultPredicateChecker, - }, - // custom predicate checker test cases - { - name: "custom - other pod - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{p450}, - testPod: p600, - expectError: false, - predicateChecker: customPredicateChecker, - }, - { - name: "custom -other pod - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{p450}, - testPod: p500, - expectError: false, - predicateChecker: customPredicateChecker, - }, - { - name: "custom -empty - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{}, - testPod: p8000, - expectError: false, - predicateChecker: customPredicateChecker, - }, - { - name: "custom -empty - ok", - node: n1000, - scheduledPods: []*apiv1.Pod{}, - testPod: p600, - expectError: false, - predicateChecker: customPredicateChecker, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var err error - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - err = clusterSnapshot.AddNodeWithPods(tt.node, tt.scheduledPods) - assert.NoError(t, err) - - predicateError := tt.predicateChecker.CheckPredicates(clusterSnapshot, tt.testPod, tt.node.Name) - if tt.expectError { - assert.NotNil(t, predicateError) - assert.Equal(t, NotSchedulablePredicateError, predicateError.ErrorType()) - assert.Equal(t, "Insufficient cpu", predicateError.Message()) - assert.Contains(t, predicateError.VerboseMessage(), "Insufficient cpu; predicateName=NodeResourcesFit") - } else { - assert.Nil(t, predicateError) - } - }) - } -} - -func TestFitsAnyNode(t *testing.T) { - p900 := BuildTestPod("p900", 900, 1000) - p1900 := BuildTestPod("p1900", 1900, 1000) - p2100 := BuildTestPod("p2100", 2100, 1000) - - n1000 := BuildTestNode("n1000", 1000, 2000000) - n2000 := BuildTestNode("n2000", 2000, 2000000) - - defaultPredicateChecker, err := NewTestPredicateChecker() - assert.NoError(t, err) - - // temp dir - tmpDir, err := os.MkdirTemp("", "scheduler-configs") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") - if err := os.WriteFile(customConfigFile, - []byte(testconfig.SchedulerConfigNodeResourcesFitDisabled), - os.FileMode(0600)); err != nil { - t.Fatal(err) - } - - customConfig, err := scheduler.ConfigFromPath(customConfigFile) - assert.NoError(t, err) - customPredicateChecker, err := NewTestPredicateCheckerWithCustomConfig(customConfig) - assert.NoError(t, err) - - testCases := []struct { - name string - predicateChecker PredicateChecker - pod *apiv1.Pod - expectedNodes []string - expectError bool - }{ - // default predicate checker test cases - { - name: "default - small pod - no error", - predicateChecker: defaultPredicateChecker, - pod: p900, - expectedNodes: []string{"n1000", "n2000"}, - expectError: false, - }, - { - name: "default - medium pod - no error", - predicateChecker: defaultPredicateChecker, - pod: p1900, - expectedNodes: []string{"n2000"}, - expectError: false, - }, - { - name: "default - large pod - insufficient cpu", - predicateChecker: defaultPredicateChecker, - pod: p2100, - expectError: true, - }, - - // custom predicate checker test cases - { - name: "custom - small pod - no error", - predicateChecker: customPredicateChecker, - pod: p900, - expectedNodes: []string{"n1000", "n2000"}, - expectError: false, - }, - { - name: "custom - medium pod - no error", - predicateChecker: customPredicateChecker, - pod: p1900, - expectedNodes: []string{"n1000", "n2000"}, - expectError: false, - }, - { - name: "custom - large pod - insufficient cpu", - predicateChecker: customPredicateChecker, - pod: p2100, - expectedNodes: []string{"n1000", "n2000"}, - expectError: false, - }, - } - - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - err = clusterSnapshot.AddNode(n1000) - assert.NoError(t, err) - err = clusterSnapshot.AddNode(n2000) - assert.NoError(t, err) - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - nodeName, err := tc.predicateChecker.FitsAnyNode(clusterSnapshot, tc.pod) - if tc.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.Contains(t, tc.expectedNodes, nodeName) - } - }) - } - -} - -func TestDebugInfo(t *testing.T) { - p1 := BuildTestPod("p1", 0, 0) - node1 := BuildTestNode("n1", 1000, 2000000) - node1.Spec.Taints = []apiv1.Taint{ - { - Key: "SomeTaint", - Value: "WhyNot?", - Effect: apiv1.TaintEffectNoSchedule, - }, - { - Key: "RandomTaint", - Value: "JustBecause", - Effect: apiv1.TaintEffectNoExecute, - }, - } - SetNodeReadyState(node1, true, time.Time{}) - - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - - err := clusterSnapshot.AddNode(node1) - assert.NoError(t, err) - - // with default predicate checker - defaultPredicateChecker, err := NewTestPredicateChecker() - assert.NoError(t, err) - predicateErr := defaultPredicateChecker.CheckPredicates(clusterSnapshot, p1, "n1") - assert.NotNil(t, predicateErr) - assert.Equal(t, "node(s) had untolerated taint {SomeTaint: WhyNot?}", predicateErr.Message()) - assert.Contains(t, predicateErr.VerboseMessage(), "RandomTaint") - - // with custom predicate checker - - // temp dir - tmpDir, err := os.MkdirTemp("", "scheduler-configs") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - customConfigFile := filepath.Join(tmpDir, "custom_config.yaml") - if err := os.WriteFile(customConfigFile, - []byte(testconfig.SchedulerConfigTaintTolerationDisabled), - os.FileMode(0600)); err != nil { - t.Fatal(err) - } - - customConfig, err := scheduler.ConfigFromPath(customConfigFile) - assert.NoError(t, err) - customPredicateChecker, err := NewTestPredicateCheckerWithCustomConfig(customConfig) - assert.NoError(t, err) - predicateErr = customPredicateChecker.CheckPredicates(clusterSnapshot, p1, "n1") - assert.Nil(t, predicateErr) -} diff --git a/cluster-autoscaler/simulator/predicatechecker/testchecker.go b/cluster-autoscaler/simulator/predicatechecker/testchecker.go deleted file mode 100644 index dd9e1745acac..000000000000 --- a/cluster-autoscaler/simulator/predicatechecker/testchecker.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package predicatechecker - -import ( - "k8s.io/client-go/informers" - clientsetfake "k8s.io/client-go/kubernetes/fake" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - scheduler_config_latest "k8s.io/kubernetes/pkg/scheduler/apis/config/latest" -) - -// NewTestPredicateChecker builds test version of PredicateChecker. -func NewTestPredicateChecker() (PredicateChecker, error) { - schedConfig, err := scheduler_config_latest.Default() - if err != nil { - return nil, err - } - - // just call out to NewSchedulerBasedPredicateChecker but use fake kubeClient - return NewSchedulerBasedPredicateChecker(informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0), schedConfig) -} - -// NewTestPredicateCheckerWithCustomConfig builds test version of PredicateChecker with custom scheduler config. -func NewTestPredicateCheckerWithCustomConfig(schedConfig *config.KubeSchedulerConfiguration) (PredicateChecker, error) { - if schedConfig != nil { - // just call out to NewSchedulerBasedPredicateChecker but use fake kubeClient - return NewSchedulerBasedPredicateChecker(informers.NewSharedInformerFactory(clientsetfake.NewSimpleClientset(), 0), schedConfig) - } - - return NewTestPredicateChecker() -} diff --git a/cluster-autoscaler/simulator/scheduling/hinting_simulator.go b/cluster-autoscaler/simulator/scheduling/hinting_simulator.go index 2287d28810e4..0bbf77966e0f 100644 --- a/cluster-autoscaler/simulator/scheduling/hinting_simulator.go +++ b/cluster-autoscaler/simulator/scheduling/hinting_simulator.go @@ -17,11 +17,8 @@ limitations under the License. package scheduling import ( - "fmt" - "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" apiv1 "k8s.io/api/core/v1" @@ -35,15 +32,13 @@ type Status struct { // HintingSimulator is a helper object for simulating scheduler behavior. type HintingSimulator struct { - predicateChecker predicatechecker.PredicateChecker - hints *Hints + hints *Hints } // NewHintingSimulator returns a new HintingSimulator. -func NewHintingSimulator(predicateChecker predicatechecker.PredicateChecker) *HintingSimulator { +func NewHintingSimulator() *HintingSimulator { return &HintingSimulator{ - predicateChecker: predicateChecker, - hints: NewHints(), + hints: NewHints(), } } @@ -62,20 +57,20 @@ func (s *HintingSimulator) TrySchedulePods(clusterSnapshot clustersnapshot.Clust loggingQuota := klogx.PodsLoggingQuota() for _, pod := range pods { klogx.V(5).UpTo(loggingQuota).Infof("Looking for place for %s/%s", pod.Namespace, pod.Name) - nodeName, err := s.findNodeWithHints(clusterSnapshot, pod, isNodeAcceptable) + nodeName, err := s.tryScheduleUsingHints(clusterSnapshot, pod, isNodeAcceptable) if err != nil { return nil, 0, err } if nodeName == "" { - nodeName = s.findNode(similarPods, clusterSnapshot, pod, loggingQuota, isNodeAcceptable) + nodeName, err = s.trySchedule(similarPods, clusterSnapshot, pod, loggingQuota, isNodeAcceptable) + if err != nil { + return nil, 0, err + } } if nodeName != "" { klogx.V(4).UpTo(loggingQuota).Infof("Pod %s/%s can be moved to %s", pod.Namespace, pod.Name, nodeName) - if err := clusterSnapshot.AddPod(pod, nodeName); err != nil { - return nil, 0, fmt.Errorf("simulating scheduling of %s/%s to %s return error; %v", pod.Namespace, pod.Name, nodeName, err) - } statuses = append(statuses, Status{Pod: pod, NodeName: nodeName}) } else if breakOnFailure { break @@ -85,40 +80,45 @@ func (s *HintingSimulator) TrySchedulePods(clusterSnapshot clustersnapshot.Clust return statuses, similarPods.OverflowingControllerCount(), nil } -func (s *HintingSimulator) findNodeWithHints(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, isNodeAcceptable func(*framework.NodeInfo) bool) (string, error) { +func (s *HintingSimulator) tryScheduleUsingHints(clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, isNodeAcceptable func(*framework.NodeInfo) bool) (string, error) { hk := HintKeyFromPod(pod) if hintedNode, hasHint := s.hints.Get(hk); hasHint { - if err := s.predicateChecker.CheckPredicates(clusterSnapshot, pod, hintedNode); err == nil { + nodeInfo, err := clusterSnapshot.GetNodeInfo(hintedNode) + if err != nil { + return "", err + } + if !isNodeAcceptable(nodeInfo) { + return "", nil + } + if err := clusterSnapshot.SchedulePod(pod, hintedNode); err == nil { s.hints.Set(hk, hintedNode) - - nodeInfo, err := clusterSnapshot.GetNodeInfo(hintedNode) - if err != nil { - return "", err - } - - if isNodeAcceptable(nodeInfo) { - return hintedNode, nil - } + return hintedNode, nil + } else if err != nil && err.Type() != clustersnapshot.FailingPredicateError { + // Unexpected error. + return "", err } } return "", nil } -func (s *HintingSimulator) findNode(similarPods *SimilarPodsScheduling, clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, loggingQuota *klogx.Quota, isNodeAcceptable func(*framework.NodeInfo) bool) string { +func (s *HintingSimulator) trySchedule(similarPods *SimilarPodsScheduling, clusterSnapshot clustersnapshot.ClusterSnapshot, pod *apiv1.Pod, loggingQuota *klogx.Quota, isNodeAcceptable func(*framework.NodeInfo) bool) (string, error) { if similarPods.IsSimilarUnschedulable(pod) { klogx.V(4).UpTo(loggingQuota).Infof("failed to find place for %s/%s based on similar pods scheduling", pod.Namespace, pod.Name) - return "" + return "", nil } - newNodeName, err := s.predicateChecker.FitsAnyNodeMatching(clusterSnapshot, pod, isNodeAcceptable) - if err != nil { + newNodeName, err := clusterSnapshot.SchedulePodOnAnyNodeMatching(pod, isNodeAcceptable) + if err != nil && err.Type() == clustersnapshot.NoNodesPassingPredicatesFoundError { klogx.V(4).UpTo(loggingQuota).Infof("failed to find place for %s/%s: %v", pod.Namespace, pod.Name, err) similarPods.SetUnschedulable(pod) - return "" + return "", nil + } else if err != nil { + // Unexpected error. + return "", err } s.hints.Set(HintKeyFromPod(pod), newNodeName) - return newNodeName + return newNodeName, nil } // DropOldHints drops old scheduling hints. diff --git a/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go b/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go index 7e3ec8cb3d11..d4b86d256862 100644 --- a/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go +++ b/cluster-autoscaler/simulator/scheduling/hinting_simulator_test.go @@ -20,19 +20,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + + apiv1 "k8s.io/api/core/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot" + "k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot/testsnapshot" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" - "k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker" . "k8s.io/autoscaler/cluster-autoscaler/utils/test" - schedulermetrics "k8s.io/kubernetes/pkg/scheduler/metrics" - - "github.com/stretchr/testify/assert" - apiv1 "k8s.io/api/core/v1" ) func TestTrySchedulePods(t *testing.T) { - schedulermetrics.Register() - testCases := []struct { desc string nodes []*apiv1.Node @@ -136,11 +133,9 @@ func TestTrySchedulePods(t *testing.T) { tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(t, err) + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) clustersnapshot.InitializeClusterSnapshotOrDie(t, clusterSnapshot, tc.nodes, tc.pods) - s := NewHintingSimulator(predicateChecker) + s := NewHintingSimulator() statuses, _, err := s.TrySchedulePods(clusterSnapshot, tc.newPods, tc.acceptableNodes, false) if tc.wantErr { assert.Error(t, err) @@ -213,16 +208,14 @@ func TestPodSchedulesOnHintedNode(t *testing.T) { tc := tc t.Run(tc.desc, func(t *testing.T) { t.Parallel() - clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot() - predicateChecker, err := predicatechecker.NewTestPredicateChecker() - assert.NoError(t, err) + clusterSnapshot := testsnapshot.NewTestSnapshotOrDie(t) nodes := make([]*apiv1.Node, 0, len(tc.nodeNames)) for _, n := range tc.nodeNames { nodes = append(nodes, buildReadyNode(n, 9999, 9999)) } clustersnapshot.InitializeClusterSnapshotOrDie(t, clusterSnapshot, nodes, []*apiv1.Pod{}) pods := make([]*apiv1.Pod, 0, len(tc.podNodes)) - s := NewHintingSimulator(predicateChecker) + s := NewHintingSimulator() var expectedStatuses []Status for p, n := range tc.podNodes { pod := BuildTestPod(p, 1, 1) diff --git a/cluster-autoscaler/utils/daemonset/daemonset.go b/cluster-autoscaler/utils/daemonset/daemonset.go index 06236ae2443c..dbeab83ad96b 100644 --- a/cluster-autoscaler/utils/daemonset/daemonset.go +++ b/cluster-autoscaler/utils/daemonset/daemonset.go @@ -22,6 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" "k8s.io/kubernetes/pkg/controller/daemon" ) @@ -40,6 +41,10 @@ func GetDaemonSetPodsForNode(nodeInfo *framework.NodeInfo, daemonsets []*appsv1. if shouldRun { pod := daemon.NewPod(ds, nodeInfo.Node().Name) pod.Name = fmt.Sprintf("%s-pod-%d", ds.Name, rand.Int63()) + ptrVal := true + pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ + {Kind: "DaemonSet", UID: ds.UID, Name: ds.Name, Controller: &ptrVal}, + } result = append(result, &framework.PodInfo{Pod: pod}) } } diff --git a/cluster-autoscaler/utils/scheduler/scheduler.go b/cluster-autoscaler/utils/scheduler/scheduler.go index 04a6e99e7af7..c63da3cbf437 100644 --- a/cluster-autoscaler/utils/scheduler/scheduler.go +++ b/cluster-autoscaler/utils/scheduler/scheduler.go @@ -23,7 +23,6 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/autoscaler/cluster-autoscaler/simulator/framework" scheduler_config "k8s.io/kubernetes/pkg/scheduler/apis/config" scheduler_scheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" @@ -79,27 +78,6 @@ func isHugePageResourceName(name apiv1.ResourceName) bool { return strings.HasPrefix(string(name), apiv1.ResourceHugePagesPrefix) } -// DeepCopyTemplateNode copies NodeInfo object used as a template. It changes -// names of UIDs of both node and pods running on it, so that copies can be used -// to represent multiple nodes. -func DeepCopyTemplateNode(nodeTemplate *framework.NodeInfo, suffix string) *framework.NodeInfo { - node := nodeTemplate.Node().DeepCopy() - node.Name = fmt.Sprintf("%s-%s", node.Name, suffix) - node.UID = uuid.NewUUID() - if node.Labels == nil { - node.Labels = make(map[string]string) - } - node.Labels["kubernetes.io/hostname"] = node.Name - nodeInfo := framework.NewNodeInfo(node, nil) - for _, podInfo := range nodeTemplate.Pods() { - pod := podInfo.Pod.DeepCopy() - pod.Name = fmt.Sprintf("%s-%s", podInfo.Pod.Name, suffix) - pod.UID = uuid.NewUUID() - nodeInfo.AddPod(&framework.PodInfo{Pod: pod}) - } - return nodeInfo -} - // ResourceToResourceList returns a resource list of the resource. func ResourceToResourceList(r *schedulerframework.Resource) apiv1.ResourceList { result := apiv1.ResourceList{