diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 7746458001108..815fea5cd88fc 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -3049,25 +3049,19 @@ func TestSyncLabels(t *testing.T) { func waitForVolumeUnmount( volumeManager kubeletvolume.VolumeManager, pod *v1.Pod) error { - var podVolumes kubecontainer.VolumeMap err := retryWithExponentialBackOff( time.Duration(50*time.Millisecond), func() (bool, error) { // Verify volumes detached - podVolumes = volumeManager.GetMountedVolumesForPod( + hasVolumes := volumeManager.HasPossiblyMountedVolumesForPod( util.GetUniquePodName(pod)) - - if len(podVolumes) != 0 { - return false, nil - } - - return true, nil + return !hasVolumes, nil }, ) if err != nil { return fmt.Errorf( - "Expected volumes to be unmounted. But some volumes are still mounted: %#v", podVolumes) + "Expected volumes to be unmounted. But some volumes are still mounted") } return nil diff --git a/pkg/kubelet/kubelet_volumes.go b/pkg/kubelet/kubelet_volumes.go index 16a9daaddc719..40320dcdf55af 100644 --- a/pkg/kubelet/kubelet_volumes.go +++ b/pkg/kubelet/kubelet_volumes.go @@ -76,9 +76,7 @@ func (kl *Kubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.B // podVolumesExist checks with the volume manager and returns true any of the // pods for the specified volume are mounted or are uncertain. func (kl *Kubelet) podVolumesExist(podUID types.UID) bool { - if mountedVolumes := - kl.volumeManager.GetPossiblyMountedVolumesForPod( - volumetypes.UniquePodName(podUID)); len(mountedVolumes) > 0 { + if kl.volumeManager.HasPossiblyMountedVolumesForPod(volumetypes.UniquePodName(podUID)) { return true } // TODO: This checks pod volume paths and whether they are mounted. If checking returns error, podVolumesExist will return true diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index 9ba12c2f31a25..4f8bf7c45ef9e 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -470,16 +470,12 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { kubelet.podManager.SetPods([]*v1.Pod{}) assert.NoError(t, kubelet.volumeManager.WaitForUnmount(context.Background(), pod)) - if actual := kubelet.volumeManager.GetMountedVolumesForPod(util.GetUniquePodName(pod)); len(actual) > 0 { - t.Fatalf("expected volume unmount to wait for no volumes: %v", actual) - } // Verify volumes unmounted - podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( + hasMountedVolumes := kubelet.volumeManager.HasPossiblyMountedVolumesForPod( util.GetUniquePodName(pod)) - assert.Empty(t, podVolumes, - "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes) + assert.False(t, hasMountedVolumes, "Expected volumes to be unmounted and detached. But some volumes are still mounted") assert.NoError(t, volumetest.VerifyTearDownCallCount( 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) @@ -557,9 +553,9 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( util.GetUniquePodName(pod)) - allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod( + hasVolumes := kubelet.volumeManager.HasPossiblyMountedVolumesForPod( util.GetUniquePodName(pod)) - assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes") + assert.True(t, hasVolumes, "HasPossiblyMountedVolumesForPod should return true") expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -644,9 +640,9 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { podVolumes := kubelet.volumeManager.GetMountedVolumesForPod( util.GetUniquePodName(pod)) - allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod( + hasVolumes := kubelet.volumeManager.HasPossiblyMountedVolumesForPod( util.GetUniquePodName(pod)) - assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes") + assert.True(t, hasVolumes, "HasPossiblyMountedVolumesForPod should return true") expectedPodVolumes := []string{"vol1"} assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod) @@ -672,9 +668,9 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { // Verify volumes unmounted podVolumes = kubelet.volumeManager.GetMountedVolumesForPod( util.GetUniquePodName(pod)) - allPodVolumes = kubelet.volumeManager.GetPossiblyMountedVolumesForPod( + hasVolumes = kubelet.volumeManager.HasPossiblyMountedVolumesForPod( util.GetUniquePodName(pod)) - assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes") + assert.False(t, hasVolumes, "HasPossiblyMountedVolumesForPod should return false") assert.Empty(t, podVolumes, "Expected volumes to be unmounted and detached. But some volumes are still mounted") diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index ea5cf573da388..899d87da6ef1b 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -149,9 +149,9 @@ type ActualStateOfWorld interface { // current actual state of the world. GetMountedVolumesForPod(podName volumetypes.UniquePodName) []MountedVolume - // GetMountedVolumeForPodByOuterVolumeSpecName returns the volume and true if - // the given outerVolumeSpecName is mounted on the given pod. - GetMountedVolumeForPodByOuterVolumeSpecName(podName volumetypes.UniquePodName, outerVolumeSpecName string) (MountedVolume, bool) + // GetMountedVolumeForPod returns the volume and true if + // the given name is mounted on the given pod. + GetMountedVolumeForPod(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (MountedVolume, bool) // GetPossiblyMountedVolumesForPod generates and returns a list of volumes for // the specified pod that either are attached and mounted or are "uncertain", @@ -356,12 +356,6 @@ type mountedPod struct { // /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/ volumeSpec *volume.Spec - // outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced - // directly in the pod. If the volume was referenced through a persistent - // volume claim, this contains the volume.Spec.Name() of the persistent - // volume claim - outerVolumeSpecName string - // remountRequired indicates the underlying volume has been successfully // mounted to this pod but it should be remounted to reflect changes in the // referencing pod. @@ -484,7 +478,6 @@ func (asw *actualStateOfWorld) CheckAndMarkVolumeAsUncertainViaReconstruction(op volumeName := opts.VolumeName mounter := opts.Mounter blockVolumeMapper := opts.BlockVolumeMapper - outerVolumeSpecName := opts.OuterVolumeSpecName volumeGIDValue := opts.VolumeGIDVolume volumeSpec := opts.VolumeSpec @@ -493,7 +486,6 @@ func (asw *actualStateOfWorld) CheckAndMarkVolumeAsUncertainViaReconstruction(op podUID: podUID, mounter: mounter, blockVolumeMapper: blockVolumeMapper, - outerVolumeSpecName: outerVolumeSpecName, volumeGIDValue: volumeGIDValue, volumeSpec: volumeSpec, remountRequired: false, @@ -731,7 +723,6 @@ func (asw *actualStateOfWorld) AddPodToVolume(markVolumeOpts operationexecutor.M volumeName := markVolumeOpts.VolumeName mounter := markVolumeOpts.Mounter blockVolumeMapper := markVolumeOpts.BlockVolumeMapper - outerVolumeSpecName := markVolumeOpts.OuterVolumeSpecName volumeGIDValue := markVolumeOpts.VolumeGIDVolume volumeSpec := markVolumeOpts.VolumeSpec asw.Lock() @@ -760,7 +751,6 @@ func (asw *actualStateOfWorld) AddPodToVolume(markVolumeOpts operationexecutor.M podUID: podUID, mounter: mounter, blockVolumeMapper: blockVolumeMapper, - outerVolumeSpecName: outerVolumeSpecName, volumeGIDValue: volumeGIDValue, volumeSpec: volumeSpec, volumeMountStateForPod: markVolumeOpts.VolumeMountState, @@ -1102,15 +1092,14 @@ func (asw *actualStateOfWorld) GetMountedVolumesForPod( return mountedVolume } -func (asw *actualStateOfWorld) GetMountedVolumeForPodByOuterVolumeSpecName( - podName volumetypes.UniquePodName, outerVolumeSpecName string) (MountedVolume, bool) { +func (asw *actualStateOfWorld) GetMountedVolumeForPod( + podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) (MountedVolume, bool) { asw.RLock() defer asw.RUnlock() - for _, volumeObj := range asw.attachedVolumes { - if podObj, hasPod := volumeObj.mountedPods[podName]; hasPod { - if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted && podObj.outerVolumeSpecName == outerVolumeSpecName { - return getMountedVolume(&podObj, &volumeObj), true - } + volumeObj := asw.attachedVolumes[volumeName] + if podObj, hasPod := volumeObj.mountedPods[podName]; hasPod { + if podObj.volumeMountStateForPod == operationexecutor.VolumeMounted { + return getMountedVolume(&podObj, &volumeObj), true } } @@ -1308,7 +1297,6 @@ func getMountedVolume( PodName: mountedPod.podName, VolumeName: attachedVolume.volumeName, InnerVolumeSpecName: mountedPod.volumeSpec.Name(), - OuterVolumeSpecName: mountedPod.outerVolumeSpecName, PluginName: attachedVolume.pluginName, PodUID: mountedPod.podUID, Mounter: mountedPod.mounter, diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index f7ccaded3ce8c..1b4f7fb7e0c8d 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -232,13 +232,12 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { // Act markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: podName, - PodUID: pod.UID, - VolumeName: generatedVolumeName, - Mounter: mounter, - BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), - VolumeSpec: volumeSpec, + PodName: podName, + PodUID: pod.UID, + VolumeName: generatedVolumeName, + Mounter: mounter, + BlockVolumeMapper: mapper, + VolumeSpec: volumeSpec, } err = asw.AddPodToVolume(markVolumeOpts) // Assert @@ -307,13 +306,12 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { } markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: podName, - PodUID: pod.UID, - VolumeName: generatedVolumeName, - Mounter: mounter, - BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), - VolumeSpec: volumeSpec, + PodName: podName, + PodUID: pod.UID, + VolumeName: generatedVolumeName, + Mounter: mounter, + BlockVolumeMapper: mapper, + VolumeSpec: volumeSpec, } err = asw.AddPodToVolume(markVolumeOpts) if err != nil { @@ -415,13 +413,12 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) { } markVolumeOpts1 := operationexecutor.MarkVolumeOpts{ - PodName: podName1, - PodUID: pod1.UID, - VolumeName: generatedVolumeName1, - Mounter: mounter1, - BlockVolumeMapper: mapper1, - OuterVolumeSpecName: volumeSpec1.Name(), - VolumeSpec: volumeSpec1, + PodName: podName1, + PodUID: pod1.UID, + VolumeName: generatedVolumeName1, + Mounter: mounter1, + BlockVolumeMapper: mapper1, + VolumeSpec: volumeSpec1, } err = asw.AddPodToVolume(markVolumeOpts1) if err != nil { @@ -441,13 +438,12 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) { } markVolumeOpts2 := operationexecutor.MarkVolumeOpts{ - PodName: podName2, - PodUID: pod2.UID, - VolumeName: generatedVolumeName1, - Mounter: mounter2, - BlockVolumeMapper: mapper2, - OuterVolumeSpecName: volumeSpec2.Name(), - VolumeSpec: volumeSpec2, + PodName: podName2, + PodUID: pod2.UID, + VolumeName: generatedVolumeName1, + Mounter: mounter2, + BlockVolumeMapper: mapper2, + VolumeSpec: volumeSpec2, } err = asw.AddPodToVolume(markVolumeOpts2) if err != nil { @@ -603,14 +599,13 @@ func TestActualStateOfWorld_FoundDuringReconstruction(t *testing.T) { } markVolumeOpts1 := operationexecutor.MarkVolumeOpts{ - PodName: podName1, - PodUID: pod1.UID, - VolumeName: generatedVolumeName1, - Mounter: mounter1, - BlockVolumeMapper: mapper1, - OuterVolumeSpecName: volumeSpec1.Name(), - VolumeSpec: volumeSpec1, - VolumeMountState: operationexecutor.VolumeMountUncertain, + PodName: podName1, + PodUID: pod1.UID, + VolumeName: generatedVolumeName1, + Mounter: mounter1, + BlockVolumeMapper: mapper1, + VolumeSpec: volumeSpec1, + VolumeMountState: operationexecutor.VolumeMountUncertain, } _, err = asw.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts1) if err != nil { @@ -687,13 +682,12 @@ func Test_MarkVolumeAsDetached_Negative_PodInVolume(t *testing.T) { } markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: podName, - PodUID: pod.UID, - VolumeName: generatedVolumeName, - Mounter: mounter, - BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), - VolumeSpec: volumeSpec, + PodName: podName, + PodUID: pod.UID, + VolumeName: generatedVolumeName, + Mounter: mounter, + BlockVolumeMapper: mapper, + VolumeSpec: volumeSpec, } err = asw.AddPodToVolume(markVolumeOpts) if err != nil { @@ -794,13 +788,12 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { // Act markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: podName, - PodUID: pod.UID, - VolumeName: volumeName, - Mounter: mounter, - BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), - VolumeSpec: volumeSpec, + PodName: podName, + PodUID: pod.UID, + VolumeName: volumeName, + Mounter: mounter, + BlockVolumeMapper: mapper, + VolumeSpec: volumeSpec, } err = asw.AddPodToVolume(markVolumeOpts) // Assert @@ -930,7 +923,6 @@ func Test_AddPodToVolume_Positive_SELinux(t *testing.T) { VolumeName: generatedVolumeName, Mounter: mounter, BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), VolumeSpec: volumeSpec, SELinuxMountContext: "system_u:object_r:container_file_t:s0:c0,c1", VolumeMountState: operationexecutor.VolumeMounted, @@ -1044,13 +1036,12 @@ func TestUncertainVolumeMounts(t *testing.T) { } markVolumeOpts1 := operationexecutor.MarkVolumeOpts{ - PodName: podName1, - PodUID: pod1.UID, - VolumeName: generatedVolumeName1, - Mounter: mounter1, - OuterVolumeSpecName: volumeSpec1.Name(), - VolumeSpec: volumeSpec1, - VolumeMountState: operationexecutor.VolumeMountUncertain, + PodName: podName1, + PodUID: pod1.UID, + VolumeName: generatedVolumeName1, + Mounter: mounter1, + VolumeSpec: volumeSpec1, + VolumeMountState: operationexecutor.VolumeMountUncertain, } err = asw.AddPodToVolume(markVolumeOpts1) if err != nil { diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index f09c4bec21b2e..0dfc390070e73 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -22,6 +22,7 @@ package cache import ( "fmt" + "slices" "sync" "time" @@ -99,6 +100,9 @@ type DesiredStateOfWorld interface { // attached volumes, false is returned. PodExistsInVolume(podName types.UniquePodName, volumeName v1.UniqueVolumeName, seLinuxMountContext string) bool + // GetVolumeName returns the UniqueVolumeName for the given pod, indexed by outerVolumeSpecName. + GetVolumeNamesForPod(podName types.UniquePodName) map[string]v1.UniqueVolumeName + // GetVolumesToMount generates and returns a list of volumes that should be // attached to this node and the pods they should be mounted to based on the // current desired state of the world. @@ -242,11 +246,8 @@ type podToMount struct { // PVC volumes it is from the dereferenced PV object. volumeSpec *volume.Spec - // outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced - // directly in the pod. If the volume was referenced through a persistent - // volume claim, this contains the volume.Spec.Name() of the persistent - // volume claim - outerVolumeSpecName string + // outerVolumeSpecNames are the podSpec.Volume[x].Name of the volume. + outerVolumeSpecNames []string // mountRequestTime stores time at which mount was requested mountRequestTime time.Time } @@ -356,8 +357,15 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( oldPodMount, ok := dsw.volumesToMount[volumeName].podsToMount[podName] mountRequestTime := time.Now() - if ok && !volumePlugin.RequiresRemount(volumeSpec) { - mountRequestTime = oldPodMount.mountRequestTime + var outerVolumeSpecNames []string + if ok { + if !volumePlugin.RequiresRemount(volumeSpec) { + mountRequestTime = oldPodMount.mountRequestTime + } + outerVolumeSpecNames = oldPodMount.outerVolumeSpecNames + } + if !slices.Contains(outerVolumeSpecNames, outerVolumeSpecName) { + outerVolumeSpecNames = append(outerVolumeSpecNames, outerVolumeSpecName) } if !ok { @@ -385,11 +393,11 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( // updated values (this is required for volumes that require remounting on // pod update, like Downward API volumes). dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{ - podName: podName, - pod: pod, - volumeSpec: volumeSpec, - outerVolumeSpecName: outerVolumeSpecName, - mountRequestTime: mountRequestTime, + podName: podName, + pod: pod, + volumeSpec: volumeSpec, + outerVolumeSpecNames: outerVolumeSpecNames, + mountRequestTime: mountRequestTime, } return volumeName, nil } @@ -562,6 +570,19 @@ func (dsw *desiredStateOfWorld) GetPods() map[types.UniquePodName]bool { return podList } +func (dsw *desiredStateOfWorld) GetVolumeNamesForPod(podName types.UniquePodName) map[string]v1.UniqueVolumeName { + dsw.RLock() + defer dsw.RUnlock() + + volumeNames := make(map[string]v1.UniqueVolumeName) + for volumeName, volumeObj := range dsw.volumesToMount { + for _, outerVolumeSpecName := range volumeObj.podsToMount[podName].outerVolumeSpecNames { + volumeNames[outerVolumeSpecName] = volumeName + } + } + return volumeNames +} + func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount { dsw.RLock() defer dsw.RUnlock() @@ -577,7 +598,7 @@ func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount { VolumeSpec: podObj.volumeSpec, PluginIsAttachable: volumeObj.pluginIsAttachable, PluginIsDeviceMountable: volumeObj.pluginIsDeviceMountable, - OuterVolumeSpecName: podObj.outerVolumeSpecName, + OuterVolumeSpecNames: podObj.outerVolumeSpecNames, VolumeGIDValue: volumeObj.volumeGIDValue, ReportedInUse: volumeObj.reportedInUse, MountRequestTime: podObj.mountRequestTime, diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go index 33b21e7a50343..b8269c8b7e651 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world_test.go @@ -17,6 +17,8 @@ limitations under the License. package cache import ( + "maps" + "slices" "testing" v1 "k8s.io/api/core/v1" @@ -76,7 +78,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) { verifyVolumeExistsDsw(t, generatedVolumeName, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolumeName, false /* expectReportedInUse */, dsw) + t, generatedVolumeName, []string{"volume-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, "" /* SELinuxContext */, dsw) verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) } @@ -130,14 +132,14 @@ func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) { } verifyVolumeExistsDsw(t, generatedVolumeName, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolumeName, false /* expectReportedInUse */, dsw) + t, generatedVolumeName, []string{"volume-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, "" /* SELinuxContext */, dsw) verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) } // Call AddPodToVolume() on different pods for different kinds of volumes -// Verities generated names are same for different pods if volume is device mountable or attachable -// Verities generated names are different for different pods if volume is not device mountble and attachable +// Verifies generated names are same for different pods if volume is device mountable or attachable +// Verifies generated names are different for different pods if volume is not device mountble and attachable func Test_AddPodToVolume_Positive_NamesForDifferentPodsAndDifferentVolumes(t *testing.T) { logger, _ := ktesting.NewTestContext(t) // Arrange @@ -299,6 +301,59 @@ func Test_AddPodToVolume_Positive_NamesForDifferentPodsAndDifferentVolumes(t *te } +func Test_AddPodToVolume_Positive_MultiOuterNames(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) + // Arrange + volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t) + seLinuxTranslator := util.NewFakeSELinuxLabelTranslator() + dsw := NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator) + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod5", + UID: "pod5uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume-name-1", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, { + Name: "volume-name-2", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, + }, + }, + } + + volumeSpec1 := &volume.Spec{Volume: &pod.Spec.Volumes[0]} + volumeSpec2 := &volume.Spec{Volume: &pod.Spec.Volumes[1]} + podName := util.GetUniquePodName(pod) + + // Act + _, err := dsw.AddPodToVolume( + logger, podName, pod, volumeSpec1, volumeSpec1.Name(), "" /* volumeGIDValue */, nil /* seLinuxContainerContexts */) + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + generatedVolumeName, err := dsw.AddPodToVolume( + logger, podName, pod, volumeSpec2, volumeSpec2.Name(), "" /* volumeGIDValue */, nil /* seLinuxContainerContexts */) + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + + // Assert + verifyVolumeExistsInVolumesToMount( + t, generatedVolumeName, []string{"volume-name-1", "volume-name-2"}, false /* expectReportedInUse */, dsw) +} + // Populates data struct with a new volume/pod // Calls DeletePodFromVolume() to removes the pod // Verifies newly added pod/volume are deleted @@ -336,7 +391,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) { } verifyVolumeExistsDsw(t, generatedVolumeName, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolumeName, false /* expectReportedInUse */, dsw) + t, generatedVolumeName, []string{"volume-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, "" /* SELinuxContext */, dsw) // Act @@ -454,15 +509,15 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { // Assert verifyVolumeExistsDsw(t, generatedVolume1Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume1Name, false /* expectReportedInUse */, dsw) + t, generatedVolume1Name, []string{"volume1-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsDsw(t, generatedVolume2Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume2Name, true /* expectReportedInUse */, dsw) + t, generatedVolume2Name, []string{"volume2-name"}, true /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsDsw(t, generatedVolume3Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume3Name, false /* expectReportedInUse */, dsw) + t, generatedVolume3Name, []string{"volume3-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, "" /* SELinuxContext */, dsw) // Act @@ -472,15 +527,15 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) { // Assert verifyVolumeExistsDsw(t, generatedVolume1Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume1Name, false /* expectReportedInUse */, dsw) + t, generatedVolume1Name, []string{"volume1-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod1Name, generatedVolume1Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsDsw(t, generatedVolume2Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume2Name, false /* expectReportedInUse */, dsw) + t, generatedVolume2Name, []string{"volume2-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod2Name, generatedVolume2Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsDsw(t, generatedVolume3Name, "" /* SELinuxContext */, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolume3Name, true /* expectReportedInUse */, dsw) + t, generatedVolume3Name, []string{"volume3-name"}, true /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, pod3Name, generatedVolume3Name, "" /* SELinuxContext */, dsw) } @@ -880,7 +935,7 @@ func Test_AddPodToVolume_SELinuxSinglePod(t *testing.T) { // Act generatedVolumeName, err := dsw.AddPodToVolume( - logger, podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGIDValue */, seLinuxContainerContexts) + logger, podName, pod, volumeSpec, pod.Spec.Volumes[0].Name, "" /* volumeGIDValue */, seLinuxContainerContexts) // Assert if tc.expectError { @@ -895,7 +950,7 @@ func Test_AddPodToVolume_SELinuxSinglePod(t *testing.T) { verifyVolumeExistsDsw(t, generatedVolumeName, tc.expectedSELinuxLabel, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolumeName, false /* expectReportedInUse */, dsw) + t, generatedVolumeName, []string{"volume-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, tc.expectedSELinuxLabel, dsw) verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) }) @@ -1230,7 +1285,7 @@ func Test_AddPodToVolume_SELinux_MultiplePods(t *testing.T) { // Act generatedVolumeName, err := dsw.AddPodToVolume( - logger, podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGIDValue */, seLinuxContainerContexts) + logger, podName, pod, volumeSpec, pod.Spec.Volumes[0].Name, "" /* volumeGIDValue */, seLinuxContainerContexts) // Assert if err != nil { @@ -1239,7 +1294,7 @@ func Test_AddPodToVolume_SELinux_MultiplePods(t *testing.T) { verifyVolumeExistsDsw(t, generatedVolumeName, tc.expectedSELinuxLabel, dsw) verifyVolumeExistsInVolumesToMount( - t, generatedVolumeName, false /* expectReportedInUse */, dsw) + t, generatedVolumeName, []string{"volume-name"}, false /* expectReportedInUse */, dsw) verifyPodExistsInVolumeDsw(t, podName, generatedVolumeName, tc.expectedSELinuxLabel, dsw) verifyVolumeExistsWithSpecNameInVolumeDsw(t, podName, volumeSpec.Name(), dsw) @@ -1254,7 +1309,7 @@ func Test_AddPodToVolume_SELinux_MultiplePods(t *testing.T) { // Act generatedVolumeName2, err := dsw.AddPodToVolume( - logger, pod2Name, pod2, volumeSpec, volumeSpec.Name(), "" /* volumeGIDValue */, seLinuxContainerContexts) + logger, pod2Name, pod2, volumeSpec, pod2.Spec.Volumes[0].Name, "" /* volumeGIDValue */, seLinuxContainerContexts) // Assert if tc.expectError { if err == nil { @@ -1275,6 +1330,102 @@ func Test_AddPodToVolume_SELinux_MultiplePods(t *testing.T) { } } +func TestGetVolumeNamesForPod(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) + volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t) + seLinuxTranslator := util.NewFakeSELinuxLabelTranslator() + dsw := NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator) + + pod1 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + UID: "pod1-uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume1-name", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, + { + Name: "volume1-dup", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, + { + Name: "volume3-name", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device3", + }, + }, + }, + }, + }, + } + pod2 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + UID: "pod2-uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume2-name", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device2", + }, + }, + }, + }, + }, + } + + addVolume := func(pod *v1.Pod, index int) { + volumeSpec := &volume.Spec{ + Volume: &pod.Spec.Volumes[index], + } + podName := util.GetUniquePodName(pod) + _, err := dsw.AddPodToVolume( + logger, podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGIDValue */, nil /* seLinuxContainerContexts */) + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + } + + addVolume(pod1, 0) + addVolume(pod1, 1) + addVolume(pod1, 2) + addVolume(pod2, 0) + + verifyVolumeNames := func(pod *v1.Pod, expectedVolumeNames map[string]v1.UniqueVolumeName) { + t.Run(pod.Name, func(t *testing.T) { + podName := util.GetUniquePodName(pod) + actualVolumeNames := dsw.GetVolumeNamesForPod(podName) + if !maps.Equal(expectedVolumeNames, actualVolumeNames) { + t.Errorf("GetVolumeNamesForPod returned incorrect value. Expected: <%v> Actual: <%v>", + expectedVolumeNames, actualVolumeNames) + } + }) + } + verifyVolumeNames(pod1, map[string]v1.UniqueVolumeName{ + "volume1-name": "fake-plugin/fake-device1", + "volume1-dup": "fake-plugin/fake-device1", + "volume3-name": "fake-plugin/fake-device3", + }) + verifyVolumeNames(pod2, map[string]v1.UniqueVolumeName{ + "volume2-name": "fake-plugin/fake-device2", + }) +} + func verifyVolumeExistsDsw( t *testing.T, expectedVolumeName v1.UniqueVolumeName, expectedSELinuxContext string, dsw DesiredStateOfWorld) { volumeExists := dsw.VolumeExists(expectedVolumeName, expectedSELinuxContext) @@ -1300,6 +1451,7 @@ func verifyVolumeDoesntExist( func verifyVolumeExistsInVolumesToMount( t *testing.T, expectedVolumeName v1.UniqueVolumeName, + expectedOuterNames []string, expectReportedInUse bool, dsw DesiredStateOfWorld) { volumesToMount := dsw.GetVolumesToMount() @@ -1313,6 +1465,12 @@ func verifyVolumeExistsInVolumesToMount( volume.ReportedInUse) } + names := volume.OuterVolumeSpecNames + slices.Sort(names) + if !slices.Equal(names, expectedOuterNames) { + t.Fatalf("Expected outer volume spec names to be %v, got %v", expectedOuterNames, names) + } + return } } diff --git a/pkg/kubelet/volumemanager/metrics/metrics_test.go b/pkg/kubelet/volumemanager/metrics/metrics_test.go index d2df2e3a66e63..d819b1ea8a2b6 100644 --- a/pkg/kubelet/volumemanager/metrics/metrics_test.go +++ b/pkg/kubelet/volumemanager/metrics/metrics_test.go @@ -83,14 +83,13 @@ func TestMetricCollection(t *testing.T) { } markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: podName, - PodUID: pod.UID, - VolumeName: generatedVolumeName, - Mounter: mounter, - BlockVolumeMapper: mapper, - OuterVolumeSpecName: volumeSpec.Name(), - VolumeSpec: volumeSpec, - VolumeMountState: operationexecutor.VolumeMounted, + PodName: podName, + PodUID: pod.UID, + VolumeName: generatedVolumeName, + Mounter: mounter, + BlockVolumeMapper: mapper, + VolumeSpec: volumeSpec, + VolumeMountState: operationexecutor.VolumeMounted, } err = asw.AddPodToVolume(markVolumeOpts) if err != nil { diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index f3404b0c43fff..005c3bf71c1b9 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -406,13 +406,12 @@ func TestFindAndRemoveDeletedPodsWithUncertain(t *testing.T) { // Mark the volume as uncertain opts := operationexecutor.MarkVolumeOpts{ - PodName: util.GetUniquePodName(pod), - PodUID: pod.UID, - VolumeName: expectedVolumeName, - OuterVolumeSpecName: "dswp-test-volume-name", - VolumeGIDVolume: "", - VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false), - VolumeMountState: operationexecutor.VolumeMountUncertain, + PodName: util.GetUniquePodName(pod), + PodUID: pod.UID, + VolumeName: expectedVolumeName, + VolumeGIDVolume: "", + VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false), + VolumeMountState: operationexecutor.VolumeMountUncertain, } err := dswp.actualStateOfWorld.MarkVolumeMountAsUncertain(opts) if err != nil { @@ -1394,13 +1393,12 @@ func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err) } markVolumeOpts := operationexecutor.MarkVolumeOpts{ - PodName: volumeToMount.PodName, - PodUID: volumeToMount.Pod.UID, - VolumeName: volumeToMount.VolumeName, - OuterVolumeSpecName: volumeToMount.OuterVolumeSpecName, - VolumeGIDVolume: volumeToMount.VolumeGIDValue, - VolumeSpec: volumeToMount.VolumeSpec, - VolumeMountState: operationexecutor.VolumeMounted, + PodName: volumeToMount.PodName, + PodUID: volumeToMount.Pod.UID, + VolumeName: volumeToMount.VolumeName, + VolumeGIDVolume: volumeToMount.VolumeGIDValue, + VolumeSpec: volumeToMount.VolumeSpec, + VolumeMountState: operationexecutor.VolumeMounted, } err = asw.MarkVolumeAsMounted(markVolumeOpts) if err != nil { diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct.go b/pkg/kubelet/volumemanager/reconciler/reconstruct.go index 44bbf6cd517e5..8ac331328f1dc 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct.go @@ -114,7 +114,6 @@ func (rc *reconciler) updateStates(logger klog.Logger, reconstructedVolumes map[ VolumeName: volume.volumeName, Mounter: volume.mounter, BlockVolumeMapper: volume.blockVolumeMapper, - OuterVolumeSpecName: volume.outerVolumeSpecName, VolumeGIDVolume: volume.volumeGIDValue, VolumeSpec: volume.volumeSpec, VolumeMountState: operationexecutor.VolumeMountUncertain, diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go b/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go index d685a3de00439..9d4f14bdb4c3b 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct_common.go @@ -72,7 +72,6 @@ type reconstructedVolume struct { volumeName v1.UniqueVolumeName podName volumetypes.UniquePodName volumeSpec *volumepkg.Spec - outerVolumeSpecName string pod *v1.Pod volumeGIDValue string devicePath string @@ -87,7 +86,6 @@ func (rv reconstructedVolume) MarshalLog() interface{} { VolumeName string `json:"volumeName"` PodName string `json:"podName"` VolumeSpecName string `json:"volumeSpecName"` - OuterVolumeSpecName string `json:"outerVolumeSpecName"` PodUID string `json:"podUID"` VolumeGIDValue string `json:"volumeGIDValue"` DevicePath string `json:"devicePath"` @@ -96,7 +94,6 @@ func (rv reconstructedVolume) MarshalLog() interface{} { VolumeName: string(rv.volumeName), PodName: string(rv.podName), VolumeSpecName: rv.volumeSpec.Name(), - OuterVolumeSpecName: rv.outerVolumeSpecName, PodUID: string(rv.pod.UID), VolumeGIDValue: rv.volumeGIDValue, DevicePath: rv.devicePath, @@ -376,17 +373,12 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (rvolume *reconstructe } reconstructedVolume := &reconstructedVolume{ - volumeName: uniqueVolumeName, - podName: volume.podName, - volumeSpec: volumeSpec, - // volume.volumeSpecName is actually InnerVolumeSpecName. It will not be used - // for volume cleanup. - // in case reconciler calls mountOrAttachVolumes, outerVolumeSpecName will - // be updated from dsw information in ASW.MarkVolumeAsMounted(). - outerVolumeSpecName: volume.volumeSpecName, - pod: pod, - deviceMounter: deviceMounter, - volumeGIDValue: "", + volumeName: uniqueVolumeName, + podName: volume.podName, + volumeSpec: volumeSpec, + pod: pod, + deviceMounter: deviceMounter, + volumeGIDValue: "", // devicePath is updated during updateStates() by checking node status's VolumesAttached data. // TODO: get device path directly from the volume mount path. devicePath: "", diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index b23a840ae2d77..f0c58f3bad1d2 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -120,19 +120,17 @@ type VolumeManager interface { WaitForAllPodsUnmount(ctx context.Context, pods []*v1.Pod) error // GetMountedVolumesForPod returns a VolumeMap containing the volumes - // referenced by the specified pod that are successfully attached and + // referenced by the specified pod that are desired and actually attached and // mounted. The key in the map is the OuterVolumeSpecName (i.e. // pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no // volumes. GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap - // GetPossiblyMountedVolumesForPod returns a VolumeMap containing the volumes - // referenced by the specified pod that are either successfully attached + // HasPossiblyMountedVolumesForPod returns whether the pod has + // any volumes that are either successfully attached // and mounted or are "uncertain", i.e. a volume plugin may be mounting - // them right now. The key in the map is the OuterVolumeSpecName (i.e. - // pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no - // volumes. - GetPossiblyMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap + // them right now. + HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool // GetExtraSupplementalGroupsForPod returns a list of the extra // supplemental groups for the Pod. These extra supplemental groups come @@ -321,8 +319,8 @@ func (vm *volumeManager) Run(ctx context.Context, sourcesReady config.SourcesRea func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { podVolumes := make(container.VolumeMap) - for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { - podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{ + for name, mountedVolume := range vm.getMountedVolumes(podName) { + podVolumes[name] = container.VolumeInfo{ Mounter: mountedVolume.Mounter, BlockVolumeMapper: mountedVolume.BlockVolumeMapper, ReadOnly: mountedVolume.VolumeSpec.ReadOnly, @@ -332,17 +330,8 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co return podVolumes } -func (vm *volumeManager) GetPossiblyMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { - podVolumes := make(container.VolumeMap) - for _, mountedVolume := range vm.actualStateOfWorld.GetPossiblyMountedVolumesForPod(podName) { - podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{ - Mounter: mountedVolume.Mounter, - BlockVolumeMapper: mountedVolume.BlockVolumeMapper, - ReadOnly: mountedVolume.VolumeSpec.ReadOnly, - InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName, - } - } - return podVolumes +func (vm *volumeManager) HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool { + return len(vm.actualStateOfWorld.GetPossiblyMountedVolumesForPod(podName)) > 0 } func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 { @@ -448,7 +437,7 @@ func (vm *volumeManager) WaitForAttachAndMount(ctx context.Context, pod *v1.Pod) unattachedVolumes := []string{} for _, volumeToMount := range unattachedVolumeMounts { - unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecName) + unattachedVolumes = append(unattachedVolumes, volumeToMount.OuterVolumeSpecNames...) } slices.Sort(unattachedVolumes) @@ -502,9 +491,9 @@ func (vm *volumeManager) WaitForUnmount(ctx context.Context, pod *v1.Pod) error vm.verifyVolumesUnmountedFunc(uniquePodName)) if err != nil { - var mountedVolumes []string + var mountedVolumes []v1.UniqueVolumeName for _, v := range vm.actualStateOfWorld.GetMountedVolumesForPod(uniquePodName) { - mountedVolumes = append(mountedVolumes, v.OuterVolumeSpecName) + mountedVolumes = append(mountedVolumes, v.VolumeName) } if len(mountedVolumes) == 0 { return nil @@ -544,7 +533,7 @@ func (vm *volumeManager) getVolumesNotInDSW(uniquePodName types.UniquePodName, e for _, volumeToMount := range vm.desiredStateOfWorld.GetVolumesToMount() { if volumeToMount.PodName == uniquePodName { - volumesNotInDSW.Delete(volumeToMount.OuterVolumeSpecName) + volumesNotInDSW.Delete(volumeToMount.OuterVolumeSpecNames...) } } @@ -572,13 +561,7 @@ func (vm *volumeManager) verifyVolumesMountedFunc(podName types.UniquePodName, e if errs := vm.desiredStateOfWorld.PopPodErrors(podName); len(errs) > 0 { return true, errors.New(strings.Join(errs, "; ")) } - for _, expectedVolume := range expectedVolumes { - _, found := vm.actualStateOfWorld.GetMountedVolumeForPodByOuterVolumeSpecName(podName, expectedVolume) - if !found { - return false, nil - } - } - return true, nil + return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil } } @@ -593,25 +576,42 @@ func (vm *volumeManager) verifyVolumesUnmountedFunc(podName types.UniquePodName) } } -// getUnmountedVolumes fetches the current list of mounted volumes from -// the actual state of the world, and uses it to process the list of -// expectedVolumes. It returns a list of unmounted volumes. -// The list also includes volume that may be mounted in uncertain state. -func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string { - mountedVolumes := sets.New[string]() - for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) { - mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName) +// getMountedVolumes returns volumes that are desired and actually mounted, +// indexed by the outer volume spec name. +func (vm *volumeManager) getMountedVolumes(podName types.UniquePodName) map[string]*cache.MountedVolume { + volumes := vm.actualStateOfWorld.GetMountedVolumesForPod(podName) + volumesByName := make(map[v1.UniqueVolumeName]*cache.MountedVolume, len(volumes)) + for i, mountedVolume := range volumes { + volumesByName[mountedVolume.VolumeName] = &volumes[i] + } + + volumeNames := vm.desiredStateOfWorld.GetVolumeNamesForPod(podName) + volumesByOuterName := make(map[string]*cache.MountedVolume, len(volumeNames)) + for outerName, volumeName := range volumeNames { + mountedVolume, ok := volumesByName[volumeName] + if ok { + volumesByOuterName[outerName] = mountedVolume + } } - return filterUnmountedVolumes(mountedVolumes, expectedVolumes) + + return volumesByOuterName } -// filterUnmountedVolumes adds each element of expectedVolumes that is not in -// mountedVolumes to a list of unmountedVolumes and returns it. -func filterUnmountedVolumes(mountedVolumes sets.Set[string], expectedVolumes []string) []string { +// getUnmountedVolumes returns a list of unmounted volumes. +// This includes the volumes in expectedVolumes, but not in one of DSW/ASW. +// The list also includes volume that may be mounted in uncertain state. +func (vm *volumeManager) getUnmountedVolumes(podName types.UniquePodName, expectedVolumes []string) []string { unmountedVolumes := []string{} - for _, expectedVolume := range expectedVolumes { - if !mountedVolumes.Has(expectedVolume) { - unmountedVolumes = append(unmountedVolumes, expectedVolume) + volumeNames := vm.desiredStateOfWorld.GetVolumeNamesForPod(podName) + for _, outerName := range expectedVolumes { + volumeName, ok := volumeNames[outerName] + if !ok { + unmountedVolumes = append(unmountedVolumes, outerName) + continue + } + _, ok = vm.actualStateOfWorld.GetMountedVolumeForPod(podName, volumeName) + if !ok { + unmountedVolumes = append(unmountedVolumes, outerName) } } slices.Sort(unmountedVolumes) diff --git a/pkg/kubelet/volumemanager/volume_manager_fake.go b/pkg/kubelet/volumemanager/volume_manager_fake.go index 02c424c380154..b57f01779e9fd 100644 --- a/pkg/kubelet/volumemanager/volume_manager_fake.go +++ b/pkg/kubelet/volumemanager/volume_manager_fake.go @@ -78,9 +78,9 @@ func (f *FakeVolumeManager) GetMountedVolumesForPod(podName types.UniquePodName) return nil } -// GetPossiblyMountedVolumesForPod is not implemented -func (f *FakeVolumeManager) GetPossiblyMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap { - return nil +// HasPossiblyMountedVolumesForPod is not implemented +func (f *FakeVolumeManager) HasPossiblyMountedVolumesForPod(podName types.UniquePodName) bool { + return false } // GetExtraSupplementalGroupsForPod is not implemented diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 08492d8891acd..c9946e7b95caf 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -656,7 +656,6 @@ func delayClaimBecomesBound( } func TestWaitForAllPodsUnmount(t *testing.T) { - tCtx := ktesting.Init(t) tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest") require.NoError(t, err, "Failed to create temp directory") defer func() { @@ -684,6 +683,7 @@ func TestWaitForAllPodsUnmount(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + var ctx context.Context = ktesting.Init(t) podManager := kubepod.NewBasicPodManager() node, pod, pv, claim := createObjects(test.podMode, test.podMode) @@ -691,8 +691,6 @@ func TestWaitForAllPodsUnmount(t *testing.T) { manager := newTestVolumeManager(t, tmpDir, podManager, kubeClient, node) - ctx, cancel := context.WithTimeout(tCtx, 1*time.Second) - defer cancel() sourcesReady := config.NewSourcesReady(func(_ sets.Set[string]) bool { return true }) go manager.Run(ctx, sourcesReady) @@ -706,11 +704,12 @@ func TestWaitForAllPodsUnmount(t *testing.T) { err := manager.WaitForAttachAndMount(ctx, pod) require.NoError(t, err, "Failed to wait for attach and mount") + ctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() err = manager.WaitForAllPodsUnmount(ctx, []*v1.Pod{pod}) if test.expectedError { - require.Error(t, err, "Expected error due to timeout") - require.Contains(t, err.Error(), "context deadline exceeded", "Expected deadline exceeded error") + require.ErrorIs(t, err, context.DeadlineExceeded, "Expected error due to timeout") } else { require.NoError(t, err, "Expected no error") } diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 462aafd00ed70..faa604e96a516 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -169,7 +169,6 @@ type MarkVolumeOpts struct { VolumeName v1.UniqueVolumeName Mounter volume.Mounter BlockVolumeMapper volume.BlockVolumeMapper - OuterVolumeSpecName string VolumeGIDVolume string VolumeSpec *volume.Spec VolumeMountState VolumeMountState @@ -422,10 +421,8 @@ type VolumeToMount struct { // InnerVolumeSpecName. VolumeSpec *volume.Spec - // outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the - // volume was referenced through a persistent volume claim, this contains - // the podSpec.Volume[x].Name of the persistent volume claim. - OuterVolumeSpecName string + // outerVolumeSpecNames are the podSpec.Volume[x].Name of the volume. + OuterVolumeSpecNames []string // Pod to mount the volume to. Used to create NewMounter. Pod *v1.Pod @@ -679,44 +676,6 @@ type MountedVolume struct { // fsType: ext4 InnerVolumeSpecName string - // outerVolumeSpecName is the podSpec.Volume[x].Name of the volume. If the - // volume was referenced through a persistent volume claim, this contains - // the podSpec.Volume[x].Name of the persistent volume claim. - // PVC example: - // kind: Pod - // apiVersion: v1 - // metadata: - // name: mypod - // spec: - // containers: - // - name: myfrontend - // image: dockerfile/nginx - // volumeMounts: - // - mountPath: "/var/www/html" - // name: mypd - // volumes: - // - name: mypd <- OuterVolumeSpecName - // persistentVolumeClaim: - // claimName: myclaim - // Non-PVC example: - // apiVersion: v1 - // kind: Pod - // metadata: - // name: test-pd - // spec: - // containers: - // - image: registry.k8s.io/test-webserver - // name: test-container - // volumeMounts: - // - mountPath: /test-pd - // name: test-volume - // volumes: - // - name: test-volume <- OuterVolumeSpecName - // gcePersistentDisk: - // pdName: my-data-disk - // fsType: ext4 - OuterVolumeSpecName string - // PluginName is the "Unescaped Qualified" name of the volume plugin used to // mount and unmount this volume. It can be used to fetch the volume plugin // to unmount with, on demand. It is also the name that plugins use, though @@ -758,13 +717,13 @@ type MountedVolume struct { // GenerateMsgDetailed returns detailed msgs for mounted volumes func (volume *MountedVolume) GenerateMsgDetailed(prefixMsg, suffixMsg string) (detailedMsg string) { detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID) - return generateVolumeMsgDetailed(prefixMsg, suffixMsg, volume.OuterVolumeSpecName, detailedStr) + return generateVolumeMsgDetailed(prefixMsg, suffixMsg, string(volume.VolumeName), detailedStr) } // GenerateMsg returns simple and detailed msgs for mounted volumes func (volume *MountedVolume) GenerateMsg(prefixMsg, suffixMsg string) (simpleMsg, detailedMsg string) { detailedStr := fmt.Sprintf("(UniqueName: %q) pod %q (UID: %q)", volume.VolumeName, volume.PodName, volume.PodUID) - return generateVolumeMsg(prefixMsg, suffixMsg, volume.OuterVolumeSpecName, detailedStr) + return generateVolumeMsg(prefixMsg, suffixMsg, string(volume.VolumeName), detailedStr) } // GenerateErrorDetailed returns simple and detailed errors for mounted volumes diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index a79e6cb628b1a..4cd9c7cfe6939 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -593,7 +593,6 @@ func (og *operationGenerator) GenerateMountVolumeFunc( PodUID: volumeToMount.Pod.UID, VolumeName: volumeToMount.VolumeName, Mounter: volumeMounter, - OuterVolumeSpecName: volumeToMount.OuterVolumeSpecName, VolumeGIDVolume: volumeToMount.VolumeGIDValue, VolumeSpec: volumeToMount.VolumeSpec, VolumeMountState: VolumeMounted, @@ -759,13 +758,12 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( if unmountErr != nil { // Mark the volume as uncertain, so SetUp is called for new pods. Teardown may be already in progress. opts := MarkVolumeOpts{ - PodName: volumeToUnmount.PodName, - PodUID: volumeToUnmount.PodUID, - VolumeName: volumeToUnmount.VolumeName, - OuterVolumeSpecName: volumeToUnmount.OuterVolumeSpecName, - VolumeGIDVolume: volumeToUnmount.VolumeGIDValue, - VolumeSpec: volumeToUnmount.VolumeSpec, - VolumeMountState: VolumeMountUncertain, + PodName: volumeToUnmount.PodName, + PodUID: volumeToUnmount.PodUID, + VolumeName: volumeToUnmount.VolumeName, + VolumeGIDVolume: volumeToUnmount.VolumeGIDValue, + VolumeSpec: volumeToUnmount.VolumeSpec, + VolumeMountState: VolumeMountUncertain, } markMountUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(opts) if markMountUncertainErr != nil { @@ -779,9 +777,8 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc( } klog.Infof( - "UnmountVolume.TearDown succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q", + "UnmountVolume.TearDown succeeded for volume %q pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q", volumeToUnmount.VolumeName, - volumeToUnmount.OuterVolumeSpecName, volumeToUnmount.PodName, volumeToUnmount.PodUID, volumeToUnmount.InnerVolumeSpecName, @@ -1025,14 +1022,13 @@ func (og *operationGenerator) GenerateMapVolumeFunc( } markVolumeOpts := MarkVolumeOpts{ - PodName: volumeToMount.PodName, - PodUID: volumeToMount.Pod.UID, - VolumeName: volumeToMount.VolumeName, - BlockVolumeMapper: blockVolumeMapper, - OuterVolumeSpecName: volumeToMount.OuterVolumeSpecName, - VolumeGIDVolume: volumeToMount.VolumeGIDValue, - VolumeSpec: volumeToMount.VolumeSpec, - VolumeMountState: VolumeMounted, + PodName: volumeToMount.PodName, + PodUID: volumeToMount.Pod.UID, + VolumeName: volumeToMount.VolumeName, + BlockVolumeMapper: blockVolumeMapper, + VolumeGIDVolume: volumeToMount.VolumeGIDValue, + VolumeSpec: volumeToMount.VolumeSpec, + VolumeMountState: VolumeMounted, } // Call MapPodDevice if blockVolumeMapper implements CustomBlockVolumeMapper @@ -1199,13 +1195,12 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( // cases below. The volume is marked as fully un-mapped at the end of this function, when everything // succeeds. markVolumeOpts := MarkVolumeOpts{ - PodName: volumeToUnmount.PodName, - PodUID: volumeToUnmount.PodUID, - VolumeName: volumeToUnmount.VolumeName, - OuterVolumeSpecName: volumeToUnmount.OuterVolumeSpecName, - VolumeGIDVolume: volumeToUnmount.VolumeGIDValue, - VolumeSpec: volumeToUnmount.VolumeSpec, - VolumeMountState: VolumeMountUncertain, + PodName: volumeToUnmount.PodName, + PodUID: volumeToUnmount.PodUID, + VolumeName: volumeToUnmount.VolumeName, + VolumeGIDVolume: volumeToUnmount.VolumeGIDValue, + VolumeSpec: volumeToUnmount.VolumeSpec, + VolumeMountState: VolumeMountUncertain, } markVolumeUncertainErr := actualStateOfWorld.MarkVolumeMountAsUncertain(markVolumeOpts) if markVolumeUncertainErr != nil { @@ -1234,9 +1229,8 @@ func (og *operationGenerator) GenerateUnmapVolumeFunc( } klog.Infof( - "UnmapVolume succeeded for volume %q (OuterVolumeSpecName: %q) pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q", + "UnmapVolume succeeded for volume %q pod %q (UID: %q). InnerVolumeSpecName %q. PluginName %q, VolumeGIDValue %q", volumeToUnmount.VolumeName, - volumeToUnmount.OuterVolumeSpecName, volumeToUnmount.PodName, volumeToUnmount.PodUID, volumeToUnmount.InnerVolumeSpecName, diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 7686a0b6fac2a..61f837d8896fb 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -211,6 +211,25 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { completeTest(ctx, f, c, ns, pv, pvc) }) + // The same as above, but with multiple volumes reference the same PVC in the pod. + ginkgo.It("create a PVC and use it multiple times in a single pod", func(ctx context.Context) { + pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true) + framework.ExpectNoError(err) + + framework.Logf("Creating nfs test pod") + pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc, pvc}, admissionapi.LevelPrivileged, + "touch /mnt/volume1/SUCCESS && cat /mnt/volume2/SUCCESS") + runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) + framework.ExpectNoError(err) + defer func() { + err := e2epod.DeletePodWithWait(ctx, c, runPod) + framework.ExpectNoError(err) + }() + + err = testPodSuccessOrFail(ctx, c, f.Timeouts, ns, runPod) + framework.ExpectNoError(err) + }) + // Create new PV without claim, verify it's in Available state and LastPhaseTransitionTime is set. f.It("create a PV: test phase transition timestamp is set and phase is Available", func(ctx context.Context) { pvObj := e2epv.MakePersistentVolume(pvConfig)