From 5c422aac08fd9788bd6fb84408eb794635ba9ab9 Mon Sep 17 00:00:00 2001 From: "dom.bozzuto" Date: Fri, 6 Sep 2024 09:22:15 -0400 Subject: [PATCH] Revert "scheduler volumebinding: leverage PreFilterResult" This reverts commit 380c7f248e4ecc2a104b11652b6171ab65501cba. datadog:patch --- .../framework/plugins/volumebinding/binder.go | 64 +--- .../plugins/volumebinding/binder_test.go | 141 -------- .../plugins/volumebinding/fake_binder.go | 6 - .../plugins/volumebinding/volume_binding.go | 9 +- .../volumebinding/volume_binding_test.go | 43 +-- pkg/volume/util/util.go | 38 --- pkg/volume/util/util_test.go | 302 ------------------ .../volumescheduling/volume_binding_test.go | 6 +- 8 files changed, 6 insertions(+), 603 deletions(-) diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder.go b/pkg/scheduler/framework/plugins/volumebinding/binder.go index e255604a5e5a9..bf537da446db6 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go @@ -48,7 +48,6 @@ import ( "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics" "k8s.io/kubernetes/pkg/scheduler/util/assumecache" - "k8s.io/kubernetes/pkg/volume/util" ) // ConflictReason is used for the special strings which explain why @@ -130,8 +129,6 @@ type InTreeToCSITranslator interface { // 1. The scheduler takes a Pod off the scheduler queue and processes it serially: // a. Invokes all pre-filter plugins for the pod. GetPodVolumeClaims() is invoked // here, pod volume information will be saved in current scheduling cycle state for later use. -// If pod has bound immediate PVCs, GetEligibleNodes() is invoked to potentially reduce -// down the list of eligible nodes based on the bound PV's NodeAffinity (if any). // b. Invokes all filter plugins, parallelized across nodes. FindPodVolumes() is invoked here. // c. Invokes all score plugins. Future/TBD // d. Selects the best node for the Pod. @@ -154,14 +151,6 @@ type SchedulerVolumeBinder interface { // unbound with immediate binding (including prebound) and PVs that belong to storage classes of unbound PVCs with delayed binding. GetPodVolumeClaims(logger klog.Logger, pod *v1.Pod) (podVolumeClaims *PodVolumeClaims, err error) - // GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be - // potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used - // in subsequent scheduling stages. - // - // If eligibleNodes is 'nil', then it indicates that such eligible node reduction cannot be made - // and all nodes should be considered. - GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) - // FindPodVolumes checks if all of a Pod's PVCs can be satisfied by the // node and returns pod's volumes information. // @@ -233,8 +222,6 @@ type volumeBinder struct { csiStorageCapacityLister storagelisters.CSIStorageCapacityLister } -var _ SchedulerVolumeBinder = &volumeBinder{} - // CapacityCheck contains additional parameters for NewVolumeBinder that // are only needed when checking volume sizes against available storage // capacity is desired. @@ -276,7 +263,7 @@ func NewVolumeBinder( } // FindPodVolumes finds the matching PVs for PVCs and nodes to provision PVs -// for the given pod and node. If the node does not fit, conflict reasons are +// for the given pod and node. If the node does not fit, confilict reasons are // returned. func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolumeClaims *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { podVolumes = &PodVolumes{} @@ -384,55 +371,6 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume return } -// GetEligibleNodes checks the existing bound claims of the pod to determine if the list of nodes can be -// potentially reduced down to a subset of eligible nodes based on the bound claims which then can be used -// in subsequent scheduling stages. -// -// Returning 'nil' for eligibleNodes indicates that such eligible node reduction cannot be made and all nodes -// should be considered. -func (b *volumeBinder) GetEligibleNodes(logger klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { - if len(boundClaims) == 0 { - return - } - - var errs []error - for _, pvc := range boundClaims { - pvName := pvc.Spec.VolumeName - pv, err := b.pvCache.GetPV(pvName) - if err != nil { - errs = append(errs, err) - continue - } - - // if the PersistentVolume is local and has node affinity matching specific node(s), - // add them to the eligible nodes - nodeNames := util.GetLocalPersistentVolumeNodeNames(pv) - if len(nodeNames) != 0 { - // on the first found list of eligible nodes for the local PersistentVolume, - // insert to the eligible node set. - if eligibleNodes == nil { - eligibleNodes = sets.New(nodeNames...) - } else { - // for subsequent finding of eligible nodes for the local PersistentVolume, - // take the intersection of the nodes with the existing eligible nodes - // for cases if PV1 has node affinity to node1 and PV2 has node affinity to node2, - // then the eligible node list should be empty. - eligibleNodes = eligibleNodes.Intersection(sets.New(nodeNames...)) - } - } - } - - if len(errs) > 0 { - logger.V(4).Info("GetEligibleNodes: one or more error occurred finding eligible nodes", "error", errs) - return nil - } - - if eligibleNodes != nil { - logger.V(4).Info("GetEligibleNodes: reduced down eligible nodes", "nodes", eligibleNodes) - } - return -} - // AssumePodVolumes will take the matching PVs and PVCs to provision in pod's // volume information for the chosen node, and: // 1. Update the pvCache with the new prebound PV. diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index 60ebdc4598426..b2ae07836bfee 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "os" - "reflect" "sort" "testing" "time" @@ -32,7 +31,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/informers" @@ -63,9 +61,6 @@ var ( boundPVCNode1a = makeTestPVC("unbound-pvc", "1G", "", pvcBound, "pv-node1a", "1", &waitClass) immediateUnboundPVC = makeTestPVC("immediate-unbound-pvc", "1G", "", pvcUnbound, "", "1", &immediateClass) immediateBoundPVC = makeTestPVC("immediate-bound-pvc", "1G", "", pvcBound, "pv-bound-immediate", "1", &immediateClass) - localPreboundPVC1a = makeTestPVC("local-prebound-pvc-1a", "1G", "", pvcPrebound, "local-pv-node1a", "1", &waitClass) - localPreboundPVC1b = makeTestPVC("local-prebound-pvc-1b", "1G", "", pvcPrebound, "local-pv-node1b", "1", &waitClass) - localPreboundPVC2a = makeTestPVC("local-prebound-pvc-2a", "1G", "", pvcPrebound, "local-pv-node2a", "1", &waitClass) // PVCs for dynamic provisioning provisionedPVC = makeTestPVC("provisioned-pvc", "1Gi", "", pvcUnbound, "", "1", &waitClassWithProvisioner) @@ -97,9 +92,6 @@ var ( pvNode1bBoundHigherVersion = makeTestPV("pv-node1b", "node1", "10G", "2", unboundPVC2, waitClass) pvBoundImmediate = makeTestPV("pv-bound-immediate", "node1", "1G", "1", immediateBoundPVC, immediateClass) pvBoundImmediateNode2 = makeTestPV("pv-bound-immediate", "node2", "1G", "1", immediateBoundPVC, immediateClass) - localPVNode1a = makeLocalPV("local-pv-node1a", "node1", "5G", "1", nil, waitClass) - localPVNode1b = makeLocalPV("local-pv-node1b", "node1", "10G", "1", nil, waitClass) - localPVNode2a = makeLocalPV("local-pv-node2a", "node2", "5G", "1", nil, waitClass) // PVs for CSI migration migrationPVBound = makeTestPVForCSIMigration(zone1Labels, boundMigrationPVC, true) @@ -709,12 +701,6 @@ func makeTestPVForCSIMigration(labels map[string]string, pvc *v1.PersistentVolum return pv } -func makeLocalPV(name, node, capacity, version string, boundToPVC *v1.PersistentVolumeClaim, className string) *v1.PersistentVolume { - pv := makeTestPV(name, node, capacity, version, boundToPVC, className) - pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key = v1.LabelHostname - return pv -} - func pvcSetSelectedNode(pvc *v1.PersistentVolumeClaim, node string) *v1.PersistentVolumeClaim { newPVC := pvc.DeepCopy() metav1.SetMetaDataAnnotation(&newPVC.ObjectMeta, volume.AnnSelectedNode, node) @@ -2326,130 +2312,3 @@ func TestCapacity(t *testing.T) { }) } } - -func TestGetEligibleNodes(t *testing.T) { - type scenarioType struct { - // Inputs - pvcs []*v1.PersistentVolumeClaim - pvs []*v1.PersistentVolume - nodes []*v1.Node - - // Expected return values - eligibleNodes sets.Set[string] - } - - scenarios := map[string]scenarioType{ - "no-bound-claims": {}, - "no-nodes-found": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - }, - "pv-not-found": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - nodes: []*v1.Node{ - node1, - }, - }, - "node-affinity-mismatch": { - pvcs: []*v1.PersistentVolumeClaim{ - preboundPVC, - preboundPVCNode1a, - }, - pvs: []*v1.PersistentVolume{ - pvNode1a, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - }, - "local-pv-with-node-affinity": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New("node1"), - }, - "multi-local-pv-with-different-nodes": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - localPreboundPVC2a, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - localPVNode2a, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New[string](), - }, - "local-and-non-local-pv": { - pvcs: []*v1.PersistentVolumeClaim{ - localPreboundPVC1a, - localPreboundPVC1b, - preboundPVC, - immediateBoundPVC, - }, - pvs: []*v1.PersistentVolume{ - localPVNode1a, - localPVNode1b, - pvNode1a, - pvBoundImmediate, - pvBoundImmediateNode2, - }, - nodes: []*v1.Node{ - node1, - node2, - }, - eligibleNodes: sets.New("node1"), - }, - } - - run := func(t *testing.T, scenario scenarioType) { - logger, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Setup - testEnv := newTestBinder(t, ctx) - testEnv.initVolumes(scenario.pvs, scenario.pvs) - - testEnv.initNodes(scenario.nodes) - testEnv.initClaims(scenario.pvcs, scenario.pvcs) - - // Execute - eligibleNodes := testEnv.binder.GetEligibleNodes(logger, scenario.pvcs) - - // Validate - if reflect.DeepEqual(scenario.eligibleNodes, eligibleNodes) { - fmt.Println("foo") - } - - if compDiff := cmp.Diff(scenario.eligibleNodes, eligibleNodes, cmp.Comparer(func(a, b sets.Set[string]) bool { - return reflect.DeepEqual(a, b) - })); compDiff != "" { - t.Errorf("Unexpected eligible nodes (-want +got):\n%s", compDiff) - } - } - - for name, scenario := range scenarios { - t.Run(name, func(t *testing.T) { run(t, scenario) }) - } -} diff --git a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go index 667669c65b44c..f563c3c756372 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go +++ b/pkg/scheduler/framework/plugins/volumebinding/fake_binder.go @@ -20,7 +20,6 @@ import ( "context" v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" ) @@ -55,11 +54,6 @@ func (b *FakeVolumeBinder) GetPodVolumeClaims(_ klog.Logger, pod *v1.Pod) (podVo return &PodVolumeClaims{}, nil } -// GetEligibleNodes implements SchedulerVolumeBinder.GetEligibleNodes. -func (b *FakeVolumeBinder) GetEligibleNodes(_ klog.Logger, boundClaims []*v1.PersistentVolumeClaim) (eligibleNodes sets.Set[string]) { - return nil -} - // FindPodVolumes implements SchedulerVolumeBinder.FindPodVolumes. func (b *FakeVolumeBinder) FindPodVolumes(_ klog.Logger, pod *v1.Pod, _ *PodVolumeClaims, node *v1.Node) (podVolumes *PodVolumes, reasons ConflictReasons, err error) { return nil, b.config.FindReasons, b.config.FindErr diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 2dcb72fdccb0d..764aa6080d1f6 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -340,13 +340,6 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt status.AppendReason("pod has unbound immediate PersistentVolumeClaims") return nil, status } - // Attempt to reduce down the number of nodes to consider in subsequent scheduling stages if pod has bound claims. - var result *framework.PreFilterResult - if eligibleNodes := pl.Binder.GetEligibleNodes(logger, podVolumeClaims.boundClaims); eligibleNodes != nil { - result = &framework.PreFilterResult{ - NodeNames: eligibleNodes, - } - } state.Write(stateKey, &stateData{ podVolumesByNode: make(map[string]*PodVolumes), @@ -356,7 +349,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt unboundVolumesDelayBinding: podVolumeClaims.unboundVolumesDelayBinding, }, }) - return result, nil + return nil, nil } // PreFilterExtensions returns prefilter extensions, pod add and remove. diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index 78ad84a8e5746..9697ebf17045e 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -27,7 +27,6 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/klog/v2/ktesting" @@ -81,7 +80,6 @@ func TestVolumeBinding(t *testing.T) { pvs []*v1.PersistentVolume fts feature.Features args *config.VolumeBindingArgs - wantPreFilterResult *framework.PreFilterResult wantPreFilterStatus *framework.Status wantStateAfterPreFilter *stateData wantFilterStatus []*framework.Status @@ -127,43 +125,6 @@ func TestVolumeBinding(t *testing.T) { }, wantPreScoreStatus: framework.NewStatus(framework.Skip), }, - { - name: "all bound with local volumes", - pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod, - nodes: []*v1.Node{ - makeNode("node-a").Node, - }, - pvcs: []*v1.PersistentVolumeClaim{ - makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, - makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim, - }, - pvs: []*v1.PersistentVolume{ - makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{ - v1.LabelHostname: {"node-a"}, - }).PersistentVolume, - makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{ - v1.LabelHostname: {"node-a"}, - }).PersistentVolume, - }, - wantPreFilterResult: &framework.PreFilterResult{ - NodeNames: sets.New("node-a"), - }, - wantStateAfterPreFilter: &stateData{ - podVolumeClaims: &PodVolumeClaims{ - boundClaims: []*v1.PersistentVolumeClaim{ - makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim, - makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim, - }, - unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{}, - unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{}, - }, - podVolumesByNode: map[string]*PodVolumes{}, - }, - wantFilterStatus: []*framework.Status{ - nil, - }, - wantPreScoreStatus: framework.NewStatus(framework.Skip), - }, { name: "PVC does not exist", pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod, @@ -833,10 +794,8 @@ func TestVolumeBinding(t *testing.T) { state := framework.NewCycleState() t.Logf("Verify: call PreFilter and check status") - gotPreFilterResult, gotPreFilterStatus := p.PreFilter(ctx, state, item.pod) + _, gotPreFilterStatus := p.PreFilter(ctx, state, item.pod) assert.Equal(t, item.wantPreFilterStatus, gotPreFilterStatus) - assert.Equal(t, item.wantPreFilterResult, gotPreFilterResult) - if !gotPreFilterStatus.IsSuccess() { // scheduler framework will skip Filter if PreFilter fails return diff --git a/pkg/volume/util/util.go b/pkg/volume/util/util.go index 4af8f7975e15a..9c50c7d41e7cc 100644 --- a/pkg/volume/util/util.go +++ b/pkg/volume/util/util.go @@ -511,44 +511,6 @@ func IsLocalEphemeralVolume(volume v1.Volume) bool { volume.ConfigMap != nil } -// GetLocalPersistentVolumeNodeNames returns the node affinity node name(s) for -// local PersistentVolumes. nil is returned if the PV does not have any -// specific node affinity node selector terms and match expressions. -// PersistentVolume with node affinity has select and match expressions -// in the form of: -// -// nodeAffinity: -// required: -// nodeSelectorTerms: -// - matchExpressions: -// - key: kubernetes.io/hostname -// operator: In -// values: -// - -// - -func GetLocalPersistentVolumeNodeNames(pv *v1.PersistentVolume) []string { - if pv == nil || pv.Spec.NodeAffinity == nil || pv.Spec.NodeAffinity.Required == nil { - return nil - } - - var result sets.Set[string] - for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { - var nodes sets.Set[string] - for _, matchExpr := range term.MatchExpressions { - if matchExpr.Key == v1.LabelHostname && matchExpr.Operator == v1.NodeSelectorOpIn { - if nodes == nil { - nodes = sets.New(matchExpr.Values...) - } else { - nodes = nodes.Intersection(sets.New(matchExpr.Values...)) - } - } - } - result = result.Union(nodes) - } - - return sets.List(result) -} - // GetPodVolumeNames returns names of volumes that are used in a pod, // either as filesystem mount or raw block device, together with list // of all SELinux contexts of all containers that use the volumes. diff --git a/pkg/volume/util/util_test.go b/pkg/volume/util/util_test.go index b00f95c9a449e..455283922a6cf 100644 --- a/pkg/volume/util/util_test.go +++ b/pkg/volume/util/util_test.go @@ -22,7 +22,6 @@ import ( "runtime" "testing" - "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -962,304 +961,3 @@ func TestGetPodVolumeNames(t *testing.T) { }) } } - -func TestGetPersistentVolumeNodeNames(t *testing.T) { - tests := []struct { - name string - pv *v1.PersistentVolume - expectedNodeNames []string - }{ - { - name: "nil PV", - pv: nil, - }, - { - name: "PV missing node affinity", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - }, - }, - { - name: "PV node affinity missing required", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{}, - }, - }, - }, - { - name: "PV node affinity required zero selector terms", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{}, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required zero selector terms", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{}, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required zero match expressions", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{}, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required multiple match expressions", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: v1.NodeSelectorOpIn, - }, - { - Key: "bar", - Operator: v1.NodeSelectorOpIn, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required single match expression with no values", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{}, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{}, - }, - { - name: "PV node affinity required single match expression with single node", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - }, - }, - { - name: "PV node affinity required single match expression with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - "node2", - }, - }, - { - name: "PV node affinity required multiple match expressions with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "bar", - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node3", - "node4", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node3", - "node4", - }, - }, - { - name: "PV node affinity required multiple node selectors multiple match expressions with multiple nodes", - pv: &v1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1.PersistentVolumeSpec{ - NodeAffinity: &v1.VolumeNodeAffinity{ - Required: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - "node2", - }, - }, - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node2", - "node3", - }, - }, - }, - }, - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: v1.LabelHostname, - Operator: v1.NodeSelectorOpIn, - Values: []string{ - "node1", - }, - }, - }, - }, - }, - }, - }, - }, - }, - expectedNodeNames: []string{ - "node1", - "node2", - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - nodeNames := GetLocalPersistentVolumeNodeNames(test.pv) - if diff := cmp.Diff(test.expectedNodeNames, nodeNames); diff != "" { - t.Errorf("Unexpected nodeNames (-want, +got):\n%s", diff) - } - }) - } -} diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 2c880ad987af4..5cad5444db84e 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -671,7 +671,7 @@ func TestPVAffinityConflict(t *testing.T) { if _, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pod %q: %v", pod.Name, err) } - // Give time to scheduler to attempt to schedule pod + // Give time to shceduler to attempt to schedule pod if err := waitForPodUnschedulable(config.client, pod); err != nil { t.Errorf("Failed as Pod %s was not unschedulable: %v", pod.Name, err) } @@ -686,8 +686,8 @@ func TestPVAffinityConflict(t *testing.T) { if strings.Compare(p.Status.Conditions[0].Reason, "Unschedulable") != 0 { t.Fatalf("Failed as Pod %s reason was: %s but expected: Unschedulable", podName, p.Status.Conditions[0].Reason) } - if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match Pod's node affinity") { - t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match Pod's node affinity. Got message %q", podName, p.Status.Conditions[0].Message) + if !strings.Contains(p.Status.Conditions[0].Message, "node(s) didn't match Pod's node affinity") || !strings.Contains(p.Status.Conditions[0].Message, "node(s) had volume node affinity conflict") { + t.Fatalf("Failed as Pod's %s failure message does not contain expected message: node(s) didn't match Pod's node affinity, node(s) had volume node affinity conflict. Got message %q", podName, p.Status.Conditions[0].Message) } // Deleting test pod if err := config.client.CoreV1().Pods(config.ns).Delete(context.TODO(), podName, metav1.DeleteOptions{}); err != nil {