Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
304 changes: 131 additions & 173 deletions pkg/ddc/goosefs/node_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@ package goosefs
import (
"context"
"fmt"
"reflect"
"testing"

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"

"github.com/fluid-cloudnative/fluid/api/v1alpha1"
"github.com/fluid-cloudnative/fluid/pkg/common"
Expand Down Expand Up @@ -62,183 +63,140 @@ func getTestGooseFSEngineNode(c client.Client, name string, namespace string, wi
return engine
}

func TestSyncScheduleInfoToCacheNodes(t *testing.T) {
type fields struct {
worker *appsv1.StatefulSet
pods []*v1.Pod
nodes []*v1.Node
name string
namespace string
}
testcases := []struct {
name string
fields fields
nodeNames []string
}{}

testcaseCnt := 0
makeDatasetResourcesFn := func(dsName string, dsNamespace string, stsPodNodeNames []string) fields {
testcaseCnt++
ret := fields{
name: dsName,
namespace: dsNamespace,
worker: &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: testNodeKindSts,
APIVersion: testNodeAPIVersion,
},
ObjectMeta: metav1.ObjectMeta{
Name: dsName + "-worker",
Namespace: dsNamespace,
UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": testNodeLabelApp,
"role": testNodeLabelRole,
"release": dsName,
},
},
},
},
pods: []*v1.Pod{},
var _ = Describe("GooseFSEngine", func() {
Describe("SyncScheduleInfoToCacheNodes", func() {
type fields struct {
worker *appsv1.StatefulSet
pods []*v1.Pod
nodes []*v1.Node
name string
namespace string
}

for idx, nodeName := range stsPodNodeNames {
ret.pods = append(ret.pods, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-worker-%d", dsName, idx),
Namespace: dsNamespace,
OwnerReferences: []metav1.OwnerReference{{
testcaseCnt := 0
makeDatasetResources := func(dsName string, dsNamespace string, stsPodNodeNames []string) fields {
testcaseCnt++
ret := fields{
name: dsName,
namespace: dsNamespace,
worker: &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: testNodeKindSts,
APIVersion: testNodeAPIVersion,
Name: dsName + "-worker",
UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
Controller: ptr.To(true),
}},
Labels: map[string]string{
"app": testNodeLabelApp,
"role": testNodeLabelRole,
"release": dsName,
testNodeLabelDataset: fmt.Sprintf("%s-%s", dsNamespace, dsName),
},
ObjectMeta: metav1.ObjectMeta{
Name: dsName + "-worker",
Namespace: dsNamespace,
UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
},
Spec: appsv1.StatefulSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": testNodeLabelApp,
"role": testNodeLabelRole,
"release": dsName,
},
},
},
},
Spec: v1.PodSpec{
NodeName: nodeName,
},
})
}

return ret
}

fields1 := makeDatasetResourcesFn("spark", testNodeNamespace, []string{"node1"})
fields1.nodes = append(fields1.nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}})
testcases = append(testcases, struct {
name string
fields fields
nodeNames []string
}{
name: "create",
fields: fields1,
nodeNames: []string{"node1"},
})

fields2 := makeDatasetResourcesFn("hbase", testNodeNamespace, []string{"node2", "node3"})
fields2.nodes = append(fields2.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node3"}},
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"fluid.io/s-big-data-hbase": "true"}}},
)
testcases = append(testcases, struct {
name string
fields fields
nodeNames []string
}{
name: "add",
fields: fields2,
nodeNames: []string{"node2", "node3"},
})

fields3 := makeDatasetResourcesFn("hbase-a", testNodeNamespace, []string{"node4", "node5"})
fields3.pods[1].OwnerReferences = []metav1.OwnerReference{}
fields3.nodes = append(fields3.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node5"}},
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: map[string]string{"fluid.io/s-big-data-hbase-a": "true"}}},
)
testcases = append(testcases, struct {
name string
fields fields
nodeNames []string
}{
name: "noController",
fields: fields3,
nodeNames: []string{"node4"},
})

fields4 := makeDatasetResourcesFn("hbase-b", testNodeNamespace, []string{})
fields4.nodes = append(fields4.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node6", Labels: map[string]string{"fluid.io/s-big-data-hbase-b": "true", "fluid.io/s-goosefs-big-data-hbase-b": "true"}}},
)
testcases = append(testcases, struct {
name string
fields fields
nodeNames []string
}{
name: "remove",
fields: fields4,
nodeNames: []string{},
})

runtimeObjs := []runtime.Object{}

for _, testcase := range testcases {
runtimeObjs = append(runtimeObjs, testcase.fields.worker)

for _, pod := range testcase.fields.pods {
runtimeObjs = append(runtimeObjs, pod)
}

for _, node := range testcase.fields.nodes {
runtimeObjs = append(runtimeObjs, node)
}
}
c := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)

for _, testcase := range testcases {
engine := getTestGooseFSEngineNode(c, testcase.fields.name, testcase.fields.namespace, true)
err := engine.SyncScheduleInfoToCacheNodes()
if err != nil {
t.Errorf("testcase %s: Got error %v", testcase.name, err)
continue
}

nodeList := &v1.NodeList{}
datasetLabels, parseErr := labels.Parse(fmt.Sprintf(testNodeLabelSelector, engine.runtimeInfo.GetCommonLabelName()))
if parseErr != nil {
t.Fatalf("testcase %s: Got error parsing labels: %v", testcase.name, parseErr)
}

listErr := c.List(context.TODO(), nodeList, &client.ListOptions{
LabelSelector: datasetLabels,
})

if listErr != nil {
t.Errorf("testcase %s: Got error listing nodes: %v", testcase.name, listErr)
continue
}

nodeNames := []string{}
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
}
pods: []*v1.Pod{},
}

for idx, nodeName := range stsPodNodeNames {
ret.pods = append(ret.pods, &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-worker-%d", dsName, idx),
Namespace: dsNamespace,
OwnerReferences: []metav1.OwnerReference{{
Kind: testNodeKindSts,
APIVersion: testNodeAPIVersion,
Name: dsName + "-worker",
UID: types.UID(fmt.Sprintf("uid%d", testcaseCnt)),
Controller: ptr.To(true),
}},
Labels: map[string]string{
"app": testNodeLabelApp,
"role": testNodeLabelRole,
"release": dsName,
testNodeLabelDataset: fmt.Sprintf("%s-%s", dsNamespace, dsName),
},
},
Spec: v1.PodSpec{
NodeName: nodeName,
},
})
}

if len(testcase.nodeNames) == 0 && len(nodeNames) == 0 {
continue
return ret
}

if !reflect.DeepEqual(testcase.nodeNames, nodeNames) {
t.Errorf("test case %v fail to sync node labels, wanted %v, got %v", testcase.name, testcase.nodeNames, nodeNames)
}
}
}
fields1 := makeDatasetResources("spark", testNodeNamespace, []string{"node1"})
fields1.nodes = append(fields1.nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}})

fields2 := makeDatasetResources("hbase", testNodeNamespace, []string{"node2", "node3"})
fields2.nodes = append(fields2.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node3"}},
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node2", Labels: map[string]string{"fluid.io/s-big-data-hbase": "true"}}},
)

fields3 := makeDatasetResources("hbase-a", testNodeNamespace, []string{"node4", "node5"})
fields3.pods[1].OwnerReferences = []metav1.OwnerReference{}
fields3.nodes = append(fields3.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node5"}},
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node4", Labels: map[string]string{"fluid.io/s-big-data-hbase-a": "true"}}},
)

fields4 := makeDatasetResources("hbase-b", testNodeNamespace, []string{})
fields4.nodes = append(fields4.nodes,
&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node6", Labels: map[string]string{
"fluid.io/s-big-data-hbase-b": "true",
"fluid.io/s-goosefs-big-data-hbase-b": "true",
}}},
)

DescribeTable("should sync schedule info to cache nodes correctly",
func(f fields, expectedNodeNames []string) {
runtimeObjs := []runtime.Object{}
runtimeObjs = append(runtimeObjs, f.worker)

for _, pod := range f.pods {
runtimeObjs = append(runtimeObjs, pod)
}
for _, node := range f.nodes {
runtimeObjs = append(runtimeObjs, node)
}

c := fake.NewFakeClientWithScheme(testScheme, runtimeObjs...)
engine := getTestGooseFSEngineNode(c, f.name, f.namespace, true)

err := engine.SyncScheduleInfoToCacheNodes()
Expect(err).NotTo(HaveOccurred())

nodeList := &v1.NodeList{}
datasetLabels, err := labels.Parse(fmt.Sprintf(testNodeLabelSelector, engine.runtimeInfo.GetCommonLabelName()))
Expect(err).NotTo(HaveOccurred())

err = c.List(context.TODO(), nodeList, &client.ListOptions{
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

For clarity and adherence to best practices, it's recommended to use context.Background() as the root context in tests instead of context.TODO(). The Go documentation specifies that context.Background() is typically used by the main function, initialization, and tests.

Suggested change
err = c.List(context.TODO(), nodeList, &client.ListOptions{
err = c.List(context.Background(), nodeList, &client.ListOptions{

LabelSelector: datasetLabels,
})
Expect(err).NotTo(HaveOccurred())

nodeNames := []string{}
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
}

if len(expectedNodeNames) == 0 && len(nodeNames) == 0 {
return
}

Expect(nodeNames).To(Equal(expectedNodeNames),
fmt.Sprintf("wanted %v, got %v", expectedNodeNames, nodeNames))
},
Entry("create", fields1, []string{"node1"}),
Entry("add", fields2, []string{"node2", "node3"}),
Entry("noController", fields3, []string{"node4"}),
Entry("remove", fields4, []string{}),
)
})
})
Loading