Skip to content

Commit

Permalink
clean-up: remove unused functions (#4292)
Browse files Browse the repository at this point in the history
* Removed unused code

Signed-off-by: Omer Aplatony <[email protected]>

* Remove unused imports in jindofsx/worker.go

Signed-off-by: Omer Aplatony <[email protected]>

---------

Signed-off-by: Omer Aplatony <[email protected]>
  • Loading branch information
omerap12 authored Aug 27, 2024
1 parent 6ff32f9 commit 6e33346
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 728 deletions.
113 changes: 0 additions & 113 deletions pkg/ddc/jindocache/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@ limitations under the License.
package jindocache

import (
"context"

"github.com/fluid-cloudnative/fluid/pkg/common"
"github.com/fluid-cloudnative/fluid/pkg/ctrl"
fluiderrs "github.com/fluid-cloudnative/fluid/pkg/errors"
Expand All @@ -27,8 +25,6 @@ import (

datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
"github.com/fluid-cloudnative/fluid/pkg/utils"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/retry"
)

Expand Down Expand Up @@ -152,112 +148,3 @@ func (e *JindoCacheEngine) getWorkerSelectors() string {
}
return selectorValue
}

// buildWorkersAffinity builds workers affinity if it doesn't have
func (e *JindoCacheEngine) buildWorkersAffinity(workers *v1.StatefulSet) (workersToUpdate *v1.StatefulSet, err error) {
// TODO: for now, runtime affinity can't be set by user, so we can assume the affinity is nil in the first time.
// We need to enhance it in future
workersToUpdate = workers.DeepCopy()

if workersToUpdate.Spec.Template.Spec.Affinity == nil {
workersToUpdate.Spec.Template.Spec.Affinity = &corev1.Affinity{}
dataset, err := utils.GetDataset(e.Client, e.name, e.namespace)
if err != nil {
return workersToUpdate, err
}
// 1. Set pod anti affinity(required) for same dataset (Current using port conflict for scheduling, no need to do)

// 2. Set pod anti affinity for the different dataset
if dataset.IsExclusiveMode() {
workersToUpdate.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
}
} else {
workersToUpdate.Spec.Template.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
// The default weight is 50
Weight: 50,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset-placement",
Operator: metav1.LabelSelectorOpIn,
Values: []string{string(datav1alpha1.ExclusiveMode)},
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
}
}

// 3. Prefer to locate on the node which already has fuse on it
if workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity == nil {
workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity = &corev1.NodeAffinity{}
}

if len(workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution) == 0 {
workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []corev1.PreferredSchedulingTerm{}
}

workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution =
append(workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
corev1.PreferredSchedulingTerm{
Weight: 100,
Preference: corev1.NodeSelectorTerm{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: e.getFuseLabelname(),
Operator: corev1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
})

// 3. set node affinity if possible
if dataset.Spec.NodeAffinity != nil {
if dataset.Spec.NodeAffinity.Required != nil {
workersToUpdate.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution =
dataset.Spec.NodeAffinity.Required
}
}

err = e.Client.Update(context.TODO(), workersToUpdate)
if err != nil {
return workersToUpdate, err
}

}

return
}
251 changes: 0 additions & 251 deletions pkg/ddc/jindocache/worker_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package jindocache

import (
"reflect"
"testing"

datav1alpha1 "github.com/fluid-cloudnative/fluid/api/v1alpha1"
Expand Down Expand Up @@ -581,253 +580,3 @@ func TestGetWorkerSelectors(t *testing.T) {
})
}
}

func TestBuildWorkersAffinity(t *testing.T) {
type fields struct {
dataset *datav1alpha1.Dataset
worker *appsv1.StatefulSet
want *v1.Affinity
}
tests := []struct {
name string
fields fields
want *v1.Affinity
}{
{name: "exlusive",
fields: fields{
dataset: &datav1alpha1.Dataset{
ObjectMeta: metav1.ObjectMeta{
Name: "test1",
Namespace: "big-data",
},
Spec: datav1alpha1.DatasetSpec{
PlacementMode: datav1alpha1.ExclusiveMode,
},
},
worker: &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test1-jindofs-worker",
Namespace: "big-data",
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
},
},
want: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 100,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "fluid.io/f-big-data-test1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
},
},
},
}, {name: "shared",
fields: fields{
dataset: &datav1alpha1.Dataset{
ObjectMeta: metav1.ObjectMeta{
Name: "test2",
Namespace: "big-data",
},
Spec: datav1alpha1.DatasetSpec{
PlacementMode: datav1alpha1.ShareMode,
},
},
worker: &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test2-jindofs-worker",
Namespace: "big-data",
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
},
},
want: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
{
// The default weight is 50
Weight: 50,
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset-placement",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"Exclusive"},
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 100,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "fluid.io/f-big-data-test2",
Operator: v1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
},
},
},
}, {name: "dataset-with-affinity",
fields: fields{
dataset: &datav1alpha1.Dataset{
ObjectMeta: metav1.ObjectMeta{
Name: "test3",
Namespace: "big-data",
},
Spec: datav1alpha1.DatasetSpec{
NodeAffinity: &datav1alpha1.CacheableNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "nodeA",
Operator: v1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
},
},
},
worker: &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: "test3-jindofs-worker",
Namespace: "big-data",
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
},
},
want: &v1.Affinity{
PodAntiAffinity: &v1.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "fluid.io/dataset",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "nodeA",
Operator: v1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Weight: 100,
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "fluid.io/f-big-data-test3",
Operator: v1.NodeSelectorOpIn,
Values: []string{"true"},
},
},
},
},
},
},
},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := runtime.NewScheme()
s.AddKnownTypes(datav1alpha1.GroupVersion, tt.fields.dataset)
s.AddKnownTypes(appsv1.SchemeGroupVersion, tt.fields.worker)
_ = v1.AddToScheme(s)
runtimeObjs := []runtime.Object{}
runtimeObjs = append(runtimeObjs, tt.fields.dataset)
runtimeObjs = append(runtimeObjs, tt.fields.worker)
mockClient := fake.NewFakeClientWithScheme(s, runtimeObjs...)
e := &JindoCacheEngine{
name: tt.fields.dataset.Name,
namespace: tt.fields.dataset.Namespace,
Client: mockClient,
}

want := tt.fields.want
worker, err := e.buildWorkersAffinity(tt.fields.worker)
if err != nil {
t.Errorf("JindoCacheEngine.buildWorkersAffinity() = %v", err)
}

if !reflect.DeepEqual(worker.Spec.Template.Spec.Affinity, want) {
t.Errorf("Test case %s JindoCacheEngine.buildWorkersAffinity() = %v, want %v", tt.name, worker.Spec.Template.Spec.Affinity, tt.fields.want)
}
})
}
}
Loading

0 comments on commit 6e33346

Please sign in to comment.