diff --git a/images/virtualization-artifact/pkg/controller/service/disk_service.go b/images/virtualization-artifact/pkg/controller/service/disk_service.go index 4865cf173f..5f2f49f009 100644 --- a/images/virtualization-artifact/pkg/controller/service/disk_service.go +++ b/images/virtualization-artifact/pkg/controller/service/disk_service.go @@ -17,7 +17,6 @@ limitations under the License. package service import ( - "cmp" "context" "errors" "fmt" @@ -43,6 +42,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/pointer" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/dvcr" "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -53,6 +53,8 @@ type DiskService struct { dvcrSettings *dvcr.Settings protection *ProtectionService controllerName string + + volumeAndAccessModesGetter volumemode.VolumeAndAccessModesGetter } func NewDiskService( @@ -62,10 +64,11 @@ func NewDiskService( controllerName string, ) *DiskService { return &DiskService{ - client: client, - dvcrSettings: dvcrSettings, - protection: protection, - controllerName: controllerName, + client: client, + dvcrSettings: dvcrSettings, + protection: protection, + controllerName: controllerName, + volumeAndAccessModesGetter: volumemode.NewVolumeAndAccessModesGetter(client, nil), } } @@ -125,54 +128,7 @@ func (s DiskService) Start( } func (s DiskService) GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { - if obj == nil { - return "", "", errors.New("object is nil") - } - if sc == nil { - return "", "", errors.New("storage class is nil") - } - - // Priority: object > storage class > storage profile. - - // 1. Get modes from annotations on the object. - accessMode, _ := s.parseAccessMode(obj) - volumeMode, _ := s.parseVolumeMode(obj) - - if accessMode != "" && volumeMode != "" { - return volumeMode, accessMode, nil - } - - // 2. Get modes from annotations on the storage class. - if m, exists := s.parseAccessMode(sc); accessMode == "" && exists { - accessMode = m - } - if m, exists := s.parseVolumeMode(sc); volumeMode == "" && exists { - volumeMode = m - } - - if accessMode != "" && volumeMode != "" { - return volumeMode, accessMode, nil - } - - // 3. Get modes from storage profile. - storageProfile, err := s.GetStorageProfile(ctx, sc.Name) - if err != nil { - return "", "", fmt.Errorf("get storage profile: %w", err) - } - - if storageProfile == nil { - return "", "", fmt.Errorf("storage profile %q not found: %w", sc.Name, ErrStorageProfileNotFound) - } - - storageCaps := s.parseStorageCapabilities(storageProfile.Status) - if accessMode == "" && storageCaps.AccessMode != "" { - accessMode = storageCaps.AccessMode - } - if volumeMode == "" && storageCaps.VolumeMode != "" { - volumeMode = storageCaps.VolumeMode - } - - return volumeMode, accessMode, nil + return s.volumeAndAccessModesGetter.GetVolumeAndAccessModes(ctx, obj, sc) } func (s DiskService) StartImmediate( @@ -446,67 +402,6 @@ func (s DiskService) GetStorageProfile(ctx context.Context, name string) (*cdiv1 return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &cdiv1.StorageProfile{}) } -type StorageCapabilities struct { - AccessMode corev1.PersistentVolumeAccessMode - VolumeMode corev1.PersistentVolumeMode -} - -func (cp StorageCapabilities) IsEmpty() bool { - return cp.AccessMode == "" && cp.VolumeMode == "" -} - -var accessModeWeights = map[corev1.PersistentVolumeAccessMode]int{ - corev1.ReadOnlyMany: 0, - corev1.ReadWriteOncePod: 1, - corev1.ReadWriteOnce: 2, - corev1.ReadWriteMany: 3, -} - -var volumeModeWeights = map[corev1.PersistentVolumeMode]int{ - corev1.PersistentVolumeFilesystem: 0, - corev1.PersistentVolumeBlock: 1, -} - -func getAccessModeMax(modes []corev1.PersistentVolumeAccessMode) corev1.PersistentVolumeAccessMode { - weight := -1 - var m corev1.PersistentVolumeAccessMode - for _, mode := range modes { - if accessModeWeights[mode] > weight { - weight = accessModeWeights[mode] - m = mode - } - } - return m -} - -func (s DiskService) parseVolumeMode(obj client.Object) (corev1.PersistentVolumeMode, bool) { - if obj == nil { - return "", false - } - switch obj.GetAnnotations()[annotations.AnnVirtualDiskVolumeMode] { - case string(corev1.PersistentVolumeBlock): - return corev1.PersistentVolumeBlock, true - case string(corev1.PersistentVolumeFilesystem): - return corev1.PersistentVolumeFilesystem, true - default: - return "", false - } -} - -func (s DiskService) parseAccessMode(obj client.Object) (corev1.PersistentVolumeAccessMode, bool) { - if obj == nil { - return "", false - } - switch obj.GetAnnotations()[annotations.AnnVirtualDiskAccessMode] { - case string(corev1.ReadWriteOnce): - return corev1.ReadWriteOnce, true - case string(corev1.ReadWriteMany): - return corev1.ReadWriteMany, true - default: - return "", false - } -} - func (s DiskService) isImmediateBindingMode(sc *storagev1.StorageClass) bool { if sc == nil { return false @@ -514,37 +409,6 @@ func (s DiskService) isImmediateBindingMode(sc *storagev1.StorageClass) bool { return sc.GetAnnotations()[annotations.AnnVirtualDiskBindingMode] == string(storagev1.VolumeBindingImmediate) } -func (s DiskService) parseStorageCapabilities(status cdiv1.StorageProfileStatus) StorageCapabilities { - var storageCapabilities []StorageCapabilities - for _, cp := range status.ClaimPropertySets { - var mode corev1.PersistentVolumeMode - if cp.VolumeMode == nil || *cp.VolumeMode == "" { - mode = corev1.PersistentVolumeFilesystem - } else { - mode = *cp.VolumeMode - } - storageCapabilities = append(storageCapabilities, StorageCapabilities{ - AccessMode: getAccessModeMax(cp.AccessModes), - VolumeMode: mode, - }) - } - slices.SortFunc(storageCapabilities, func(a, b StorageCapabilities) int { - if c := cmp.Compare(accessModeWeights[a.AccessMode], accessModeWeights[b.AccessMode]); c != 0 { - return c - } - return cmp.Compare(volumeModeWeights[a.VolumeMode], volumeModeWeights[b.VolumeMode]) - }) - - if len(storageCapabilities) == 0 { - return StorageCapabilities{ - AccessMode: corev1.ReadWriteOnce, - VolumeMode: corev1.PersistentVolumeFilesystem, - } - } - - return storageCapabilities[len(storageCapabilities)-1] -} - func (s DiskService) GetStorageClass(ctx context.Context, scName string) (*storagev1.StorageClass, error) { return object.FetchObject(ctx, types.NamespacedName{Name: scName}, s.client, &storagev1.StorageClass{}) } diff --git a/images/virtualization-artifact/pkg/controller/service/errors.go b/images/virtualization-artifact/pkg/controller/service/errors.go index bae4dfe549..7842a7f31d 100644 --- a/images/virtualization-artifact/pkg/controller/service/errors.go +++ b/images/virtualization-artifact/pkg/controller/service/errors.go @@ -22,7 +22,6 @@ import ( ) var ( - ErrStorageProfileNotFound = errors.New("storage profile not found") ErrDefaultStorageClassNotFound = errors.New("default storage class not found") ErrDataVolumeNotRunning = errors.New("pvc importer is not running") ErrDataVolumeProvisionerUnschedulable = errors.New("provisioner unschedulable") diff --git a/images/virtualization-artifact/pkg/controller/service/volumemode/getter.go b/images/virtualization-artifact/pkg/controller/service/volumemode/getter.go new file mode 100644 index 0000000000..72c1733324 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/volumemode/getter.go @@ -0,0 +1,204 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volumemode + +import ( + "cmp" + "context" + "errors" + "fmt" + "slices" + + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/types" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/deckhouse/virtualization-controller/pkg/common/annotations" + "github.com/deckhouse/virtualization-controller/pkg/common/object" +) + +var ErrStorageProfileNotFound = errors.New("storage profile not found") + +//go:generate go tool moq -rm -out mock.go . VolumeAndAccessModesGetter +type VolumeAndAccessModesGetter interface { + GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) +} + +func NewVolumeAndAccessModesGetter(client client.Client, storageProfileGetter func(ctx context.Context, name string) (*cdiv1.StorageProfile, error)) VolumeAndAccessModesGetter { + getter := &volumeAndAccessModesGetter{ + client: client, + storageProfileGetter: storageProfileGetter, + } + if getter.storageProfileGetter == nil { + getter.storageProfileGetter = getter.getStorageProfile + } + return getter +} + +type volumeAndAccessModesGetter struct { + client client.Client + storageProfileGetter func(ctx context.Context, name string) (*cdiv1.StorageProfile, error) +} + +func (s volumeAndAccessModesGetter) GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { + if obj == nil { + return "", "", errors.New("object is nil") + } + if sc == nil { + return "", "", errors.New("storage class is nil") + } + + // Priority: object > storage class > storage profile. + + // 1. Get modes from annotations on the object. + accessMode, _ := s.parseAccessMode(obj) + volumeMode, _ := s.parseVolumeMode(obj) + + if accessMode != "" && volumeMode != "" { + return volumeMode, accessMode, nil + } + + // 2. Get modes from annotations on the storage class. + if m, exists := s.parseAccessMode(sc); accessMode == "" && exists { + accessMode = m + } + if m, exists := s.parseVolumeMode(sc); volumeMode == "" && exists { + volumeMode = m + } + + if accessMode != "" && volumeMode != "" { + return volumeMode, accessMode, nil + } + + // 3. Get modes from storage profile. + storageProfile, err := s.storageProfileGetter(ctx, sc.Name) + if err != nil { + return "", "", fmt.Errorf("get storage profile: %w", err) + } + + if storageProfile == nil { + return "", "", fmt.Errorf("storage profile %q not found: %w", sc.Name, ErrStorageProfileNotFound) + } + + storageCaps := s.parseStorageCapabilities(storageProfile.Status) + if accessMode == "" && storageCaps.AccessMode != "" { + accessMode = storageCaps.AccessMode + } + if volumeMode == "" && storageCaps.VolumeMode != "" { + volumeMode = storageCaps.VolumeMode + } + + return volumeMode, accessMode, nil +} + +var accessModeWeights = map[corev1.PersistentVolumeAccessMode]int{ + corev1.ReadOnlyMany: 0, + corev1.ReadWriteOncePod: 1, + corev1.ReadWriteOnce: 2, + corev1.ReadWriteMany: 3, +} + +var volumeModeWeights = map[corev1.PersistentVolumeMode]int{ + corev1.PersistentVolumeFilesystem: 0, + corev1.PersistentVolumeBlock: 1, +} + +func getAccessModeMax(modes []corev1.PersistentVolumeAccessMode) corev1.PersistentVolumeAccessMode { + weight := -1 + var m corev1.PersistentVolumeAccessMode + for _, mode := range modes { + if accessModeWeights[mode] > weight { + weight = accessModeWeights[mode] + m = mode + } + } + return m +} + +func (s volumeAndAccessModesGetter) parseVolumeMode(obj client.Object) (corev1.PersistentVolumeMode, bool) { + if obj == nil { + return "", false + } + switch obj.GetAnnotations()[annotations.AnnVirtualDiskVolumeMode] { + case string(corev1.PersistentVolumeBlock): + return corev1.PersistentVolumeBlock, true + case string(corev1.PersistentVolumeFilesystem): + return corev1.PersistentVolumeFilesystem, true + default: + return "", false + } +} + +func (s volumeAndAccessModesGetter) parseAccessMode(obj client.Object) (corev1.PersistentVolumeAccessMode, bool) { + if obj == nil { + return "", false + } + switch obj.GetAnnotations()[annotations.AnnVirtualDiskAccessMode] { + case string(corev1.ReadWriteOnce): + return corev1.ReadWriteOnce, true + case string(corev1.ReadWriteMany): + return corev1.ReadWriteMany, true + default: + return "", false + } +} + +func (s volumeAndAccessModesGetter) parseStorageCapabilities(status cdiv1.StorageProfileStatus) StorageCapabilities { + var storageCapabilities []StorageCapabilities + for _, cp := range status.ClaimPropertySets { + var mode corev1.PersistentVolumeMode + if cp.VolumeMode == nil || *cp.VolumeMode == "" { + mode = corev1.PersistentVolumeFilesystem + } else { + mode = *cp.VolumeMode + } + storageCapabilities = append(storageCapabilities, StorageCapabilities{ + AccessMode: getAccessModeMax(cp.AccessModes), + VolumeMode: mode, + }) + } + slices.SortFunc(storageCapabilities, func(a, b StorageCapabilities) int { + if c := cmp.Compare(accessModeWeights[a.AccessMode], accessModeWeights[b.AccessMode]); c != 0 { + return c + } + return cmp.Compare(volumeModeWeights[a.VolumeMode], volumeModeWeights[b.VolumeMode]) + }) + + if len(storageCapabilities) == 0 { + return StorageCapabilities{ + AccessMode: corev1.ReadWriteOnce, + VolumeMode: corev1.PersistentVolumeFilesystem, + } + } + + return storageCapabilities[len(storageCapabilities)-1] +} + +func (s volumeAndAccessModesGetter) getStorageProfile(ctx context.Context, name string) (*cdiv1.StorageProfile, error) { + return object.FetchObject(ctx, types.NamespacedName{Name: name}, s.client, &cdiv1.StorageProfile{}) +} + +type StorageCapabilities struct { + AccessMode corev1.PersistentVolumeAccessMode + VolumeMode corev1.PersistentVolumeMode +} + +func (cp StorageCapabilities) IsEmpty() bool { + return cp.AccessMode == "" && cp.VolumeMode == "" +} diff --git a/images/virtualization-artifact/pkg/controller/service/volumemode/mock.go b/images/virtualization-artifact/pkg/controller/service/volumemode/mock.go new file mode 100644 index 0000000000..036a1550d0 --- /dev/null +++ b/images/virtualization-artifact/pkg/controller/service/volumemode/mock.go @@ -0,0 +1,90 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package volumemode + +import ( + "context" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sync" +) + +// Ensure, that VolumeAndAccessModesGetterMock does implement VolumeAndAccessModesGetter. +// If this is not the case, regenerate this file with moq. +var _ VolumeAndAccessModesGetter = &VolumeAndAccessModesGetterMock{} + +// VolumeAndAccessModesGetterMock is a mock implementation of VolumeAndAccessModesGetter. +// +// func TestSomethingThatUsesVolumeAndAccessModesGetter(t *testing.T) { +// +// // make and configure a mocked VolumeAndAccessModesGetter +// mockedVolumeAndAccessModesGetter := &VolumeAndAccessModesGetterMock{ +// GetVolumeAndAccessModesFunc: func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { +// panic("mock out the GetVolumeAndAccessModes method") +// }, +// } +// +// // use mockedVolumeAndAccessModesGetter in code that requires VolumeAndAccessModesGetter +// // and then make assertions. +// +// } +type VolumeAndAccessModesGetterMock struct { + // GetVolumeAndAccessModesFunc mocks the GetVolumeAndAccessModes method. + GetVolumeAndAccessModesFunc func(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) + + // calls tracks calls to the methods. + calls struct { + // GetVolumeAndAccessModes holds details about calls to the GetVolumeAndAccessModes method. + GetVolumeAndAccessModes []struct { + // Ctx is the ctx argument value. + Ctx context.Context + // Obj is the obj argument value. + Obj client.Object + // Sc is the sc argument value. + Sc *storagev1.StorageClass + } + } + lockGetVolumeAndAccessModes sync.RWMutex +} + +// GetVolumeAndAccessModes calls GetVolumeAndAccessModesFunc. +func (mock *VolumeAndAccessModesGetterMock) GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) { + if mock.GetVolumeAndAccessModesFunc == nil { + panic("VolumeAndAccessModesGetterMock.GetVolumeAndAccessModesFunc: method is nil but VolumeAndAccessModesGetter.GetVolumeAndAccessModes was just called") + } + callInfo := struct { + Ctx context.Context + Obj client.Object + Sc *storagev1.StorageClass + }{ + Ctx: ctx, + Obj: obj, + Sc: sc, + } + mock.lockGetVolumeAndAccessModes.Lock() + mock.calls.GetVolumeAndAccessModes = append(mock.calls.GetVolumeAndAccessModes, callInfo) + mock.lockGetVolumeAndAccessModes.Unlock() + return mock.GetVolumeAndAccessModesFunc(ctx, obj, sc) +} + +// GetVolumeAndAccessModesCalls gets all the calls that were made to GetVolumeAndAccessModes. +// Check the length with: +// +// len(mockedVolumeAndAccessModesGetter.GetVolumeAndAccessModesCalls()) +func (mock *VolumeAndAccessModesGetterMock) GetVolumeAndAccessModesCalls() []struct { + Ctx context.Context + Obj client.Object + Sc *storagev1.StorageClass +} { + var calls []struct { + Ctx context.Context + Obj client.Object + Sc *storagev1.StorageClass + } + mock.lockGetVolumeAndAccessModes.RLock() + calls = mock.calls.GetVolumeAndAccessModes + mock.lockGetVolumeAndAccessModes.RUnlock() + return calls +} diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go index 2d8e501a80..7b3378d897 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/migration.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/migration.go @@ -41,6 +41,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/kvbuilder" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization-controller/pkg/logger" "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -55,18 +56,14 @@ type storageClassValidator interface { IsStorageClassDeprecated(sc *storagev1.StorageClass) bool } -type volumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) -} - type MigrationHandler struct { client client.Client scValidator storageClassValidator - modeGetter volumeAndAccessModesGetter + modeGetter volumemode.VolumeAndAccessModesGetter gate featuregate.FeatureGate } -func NewMigrationHandler(client client.Client, storageClassValidator storageClassValidator, modeGetter volumeAndAccessModesGetter, gate featuregate.FeatureGate) *MigrationHandler { +func NewMigrationHandler(client client.Client, storageClassValidator storageClassValidator, modeGetter volumemode.VolumeAndAccessModesGetter, gate featuregate.FeatureGate) *MigrationHandler { return &MigrationHandler{ client: client, scValidator: storageClassValidator, diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go index aeb6ecc3ee..14ffe3cbd4 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/sources.go @@ -34,6 +34,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/source/step" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" @@ -142,7 +143,7 @@ func setPhaseConditionFromStorageError(err error, vd *v1alpha2.VirtualDisk, cb * switch { case err == nil: return false, nil - case errors.Is(err, service.ErrStorageProfileNotFound): + case errors.Is(err, volumemode.ErrStorageProfileNotFound): vd.Status.Phase = v1alpha2.DiskPending cb. Status(metav1.ConditionFalse). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go index 5099eb1c5e..62e2866c44 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/source/step/create_dv_step.go @@ -33,6 +33,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/provisioner" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" vdsupplements "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/supplements" "github.com/deckhouse/virtualization/api/core/v1alpha2" @@ -94,7 +95,7 @@ func (s CreateDataVolumeStep) Take(ctx context.Context, vd *v1alpha2.VirtualDisk switch { case err == nil: // OK. - case errors.Is(err, service.ErrStorageProfileNotFound): + case errors.Is(err, volumemode.ErrStorageProfileNotFound): vd.Status.Phase = v1alpha2.DiskFailed s.cb. Status(metav1.ConditionFalse). diff --git a/images/virtualization-artifact/pkg/controller/vd/internal/validator/storage_class_validator.go b/images/virtualization-artifact/pkg/controller/vd/internal/validator/storage_class_validator.go index 70b8346715..50a0c3f853 100644 --- a/images/virtualization-artifact/pkg/controller/vd/internal/validator/storage_class_validator.go +++ b/images/virtualization-artifact/pkg/controller/vd/internal/validator/storage_class_validator.go @@ -20,29 +20,25 @@ import ( "context" "fmt" - corev1 "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/equality" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" commonvd "github.com/deckhouse/virtualization-controller/pkg/common/vd" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" "github.com/deckhouse/virtualization-controller/pkg/featuregates" "github.com/deckhouse/virtualization-controller/pkg/version" "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -type volumeAndAccessModesGetter interface { - GetVolumeAndAccessModes(ctx context.Context, obj client.Object, sc *storagev1.StorageClass) (corev1.PersistentVolumeMode, corev1.PersistentVolumeAccessMode, error) -} type StorageClassValidator struct { client client.Client scService *intsvc.VirtualDiskStorageClassService - modeGetter volumeAndAccessModesGetter + modeGetter volumemode.VolumeAndAccessModesGetter } -func NewMigrationStorageClassValidator(client client.Client, scService *intsvc.VirtualDiskStorageClassService, modeGetter volumeAndAccessModesGetter) *StorageClassValidator { +func NewMigrationStorageClassValidator(client client.Client, scService *intsvc.VirtualDiskStorageClassService, modeGetter volumemode.VolumeAndAccessModesGetter) *StorageClassValidator { return &StorageClassValidator{ client: client, scService: scService, diff --git a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go index 3b7bb19603..b677c3fc83 100644 --- a/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go +++ b/images/virtualization-artifact/pkg/controller/vd/vd_webhook.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" intsvc "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/service" "github.com/deckhouse/virtualization-controller/pkg/controller/vd/internal/validator" "github.com/deckhouse/virtualization-controller/pkg/logger" @@ -40,14 +40,14 @@ type Validator struct { validators []VirtualDiskValidator } -func NewValidator(client client.Client, scService *intsvc.VirtualDiskStorageClassService, diskService *service.DiskService) *Validator { +func NewValidator(client client.Client, scService *intsvc.VirtualDiskStorageClassService, modeGetter volumemode.VolumeAndAccessModesGetter) *Validator { return &Validator{ validators: []VirtualDiskValidator{ validator.NewPVCSizeValidator(client), validator.NewSpecChangesValidator(scService), validator.NewISOSourceValidator(client), validator.NewNameValidator(), - validator.NewMigrationStorageClassValidator(client, scService, diskService), + validator.NewMigrationStorageClassValidator(client, scService, modeGetter), }, } } diff --git a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go index 8075929648..5dac3f1e2d 100644 --- a/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go +++ b/images/virtualization-artifact/pkg/controller/vi/internal/source/sources.go @@ -30,6 +30,7 @@ import ( "github.com/deckhouse/virtualization-controller/pkg/common/object" "github.com/deckhouse/virtualization-controller/pkg/controller/conditions" "github.com/deckhouse/virtualization-controller/pkg/controller/service" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" "github.com/deckhouse/virtualization-controller/pkg/controller/supplements" "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/api/core/v1alpha2/vicondition" @@ -209,7 +210,7 @@ func setPhaseConditionFromStorageError(err error, vi *v1alpha2.VirtualImage, cb switch { case err == nil: return false, nil - case errors.Is(err, service.ErrStorageProfileNotFound): + case errors.Is(err, volumemode.ErrStorageProfileNotFound): vi.Status.Phase = v1alpha2.ImageFailed cb. Status(metav1.ConditionFalse). diff --git a/test/e2e/internal/framework/client.go b/test/e2e/internal/framework/client.go index 398ffeb274..a65ce332c1 100644 --- a/test/e2e/internal/framework/client.go +++ b/test/e2e/internal/framework/client.go @@ -33,6 +33,7 @@ import ( "github.com/deckhouse/virtualization/test/e2e/internal/d8" gt "github.com/deckhouse/virtualization/test/e2e/internal/git" "github.com/deckhouse/virtualization/test/e2e/internal/kubectl" + "github.com/deckhouse/virtualization/test/e2e/internal/rewrite" ) var clients = Clients{} @@ -48,6 +49,7 @@ type Clients struct { d8virtualization d8.D8Virtualization client client.Client dynamic dynamic.Interface + rewriteClient rewrite.Client git gt.Git } @@ -68,6 +70,10 @@ func (c Clients) DynamicClient() dynamic.Interface { return c.dynamic } +func (c Clients) RewriteClient() rewrite.Client { + return c.rewriteClient +} + func (c Clients) Kubectl() kubectl.Kubectl { return c.kubectl } @@ -99,6 +105,7 @@ func init() { if err != nil { panic(err) } + clients.rewriteClient = rewrite.NewRewriteClient(clients.dynamic) clients.kubectl, err = kubectl.NewKubectl(kubectl.KubectlConf(conf.ClusterTransport)) if err != nil { panic(err) diff --git a/test/e2e/internal/framework/dump.go b/test/e2e/internal/framework/dump.go index 6ffbe67e18..0b36c83b87 100644 --- a/test/e2e/internal/framework/dump.go +++ b/test/e2e/internal/framework/dump.go @@ -89,6 +89,7 @@ func (f *Framework) saveTestCaseResources(testCaseFullText, dumpPath string) { }) if result.Error() != nil { GinkgoWriter.Printf("Get resources error:\n%s\n%w\n%s\n", result.GetCmd(), result.Error(), result.StdErr()) + return } // Stdout may present even if error is occurred. @@ -104,10 +105,12 @@ func (f *Framework) savePodAdditionalInfo(testCaseFullText, dumpPath string) { pods, err := f.Clients.kubeClient.CoreV1().Pods(f.Namespace().Name).List(context.Background(), metav1.ListOptions{}) if err != nil { GinkgoWriter.Printf("Failed to get PodList:\n%s\n", err) + return } if len(pods.Items) == 0 { GinkgoWriter.Println("The list of pods is empty; nothing to dump.") + return } for _, pod := range pods.Items { @@ -121,6 +124,7 @@ func (f *Framework) saveIntvirtvmDescriptions(testCaseFullText, dumpPath string) describeCmd := f.Clients.Kubectl().RawCommand(fmt.Sprintf("describe intvirtvm --namespace %s", f.Namespace().Name), ShortTimeout) if describeCmd.Error() != nil { GinkgoWriter.Printf("Failed to describe InternalVirtualizationVirtualMachine:\nError: %s\n", describeCmd.StdErr()) + return } fileName := fmt.Sprintf("%s/e2e_failed__%s__intvirtvm_describe", dumpPath, testCaseFullText) @@ -134,12 +138,14 @@ func (f *Framework) writePodLogs(name, namespace, filePath, testCaseFullText str podLogs, err := f.Clients.KubeClient().CoreV1().Pods(namespace).GetLogs(name, &corev1.PodLogOptions{}).Stream(context.Background()) if err != nil { GinkgoWriter.Printf("Failed to get logs:\nPodName: %s\nError: %w\n", name, err) + return } defer podLogs.Close() logs, err := io.ReadAll(podLogs) if err != nil { GinkgoWriter.Printf("Failed to read logs:\nPodName: %s\nError: %w\n", name, err) + return } fileName := fmt.Sprintf("%s/e2e_failed__%s__%s__logs.json", filePath, testCaseFullText, name) @@ -153,6 +159,7 @@ func (f *Framework) writePodDescription(name, namespace, filePath, testCaseFullT describeCmd := f.Clients.Kubectl().RawCommand(fmt.Sprintf("describe pod %s --namespace %s", name, namespace), ShortTimeout) if describeCmd.Error() != nil { GinkgoWriter.Printf("Failed to describe pod:\nPodName: %s\nError: %s\n", name, describeCmd.StdErr()) + return } fileName := fmt.Sprintf("%s/e2e_failed__%s__%s__describe", filePath, testCaseFullText, name) @@ -168,6 +175,7 @@ func (f *Framework) writeVirtualMachineGuestInfo(pod corev1.Pod, filePath, testC vlctlGuestInfoCmd := f.Clients.Kubectl().RawCommand(fmt.Sprintf("exec --stdin=true --tty=true %s --namespace %s -- vlctl guest info", pod.Name, pod.Namespace), ShortTimeout) if vlctlGuestInfoCmd.Error() != nil { GinkgoWriter.Printf("Failed to get pod guest info:\nPodName: %s\nError: %s\n", pod.Name, vlctlGuestInfoCmd.StdErr()) + return } fileName := fmt.Sprintf("%s/e2e_failed__%s__%s__vlctl_guest_info", filePath, testCaseFullText, pod.Name) diff --git a/test/e2e/internal/framework/framework.go b/test/e2e/internal/framework/framework.go index ca701dfd12..03cad61800 100644 --- a/test/e2e/internal/framework/framework.go +++ b/test/e2e/internal/framework/framework.go @@ -180,9 +180,8 @@ func (f *Framework) CreateWithDeferredDeletion(ctx context.Context, objs ...clie if err != nil { return err } + f.DeferDelete(obj) } - f.DeferDelete(objs...) - return nil } diff --git a/test/e2e/internal/rewrite/client.go b/test/e2e/internal/rewrite/client.go new file mode 100644 index 0000000000..5c80fd9d91 --- /dev/null +++ b/test/e2e/internal/rewrite/client.go @@ -0,0 +1,91 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rewrite + +import ( + "context" + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +type Client interface { + Get(ctx context.Context, name string, obj Object, opts ...Option) error +} + +func NewRewriteClient(dynamicClient dynamic.Interface) Client { + return rewriteClient{ + dynamicClient: dynamicClient, + } +} + +type rewriteClient struct { + dynamicClient dynamic.Interface +} + +type Object interface { + GVR() schema.GroupVersionResource +} + +type options struct { + namespace string +} + +type Option func(*options) + +func InNamespace(namespace string) Option { + return func(o *options) { + o.namespace = namespace + } +} + +func makeOptions(opts ...Option) options { + o := options{} + for _, opt := range opts { + opt(&o) + } + return o +} + +func (r rewriteClient) Get(ctx context.Context, name string, obj Object, opts ...Option) error { + o := makeOptions(opts...) + + var ( + u *unstructured.Unstructured + err error + ) + + if o.namespace != "" { + u, err = r.dynamicClient.Resource(obj.GVR()).Namespace(o.namespace).Get(ctx, name, metav1.GetOptions{}) + } else { + u, err = r.dynamicClient.Resource(obj.GVR()).Get(ctx, name, metav1.GetOptions{}) + } + + if err != nil { + return err + } + + bytes, err := json.Marshal(u) + if err != nil { + return err + } + + return json.Unmarshal(bytes, obj) +} diff --git a/test/e2e/internal/rewrite/types.go b/test/e2e/internal/rewrite/types.go new file mode 100644 index 0000000000..a18459d0d8 --- /dev/null +++ b/test/e2e/internal/rewrite/types.go @@ -0,0 +1,43 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rewrite + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" +) + +func rewriteCDIV1beta1(resource string) schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: "cdi.internal.virtualization.deckhouse.io", + Version: "v1beta1", + Resource: resource, + } +} + +func rewriteInternalVirtualizationResource(resource string) string { + return "internalvirtualization" + resource +} + +type StorageProfile struct { + *cdiv1beta1.StorageProfile `json:",inline"` +} + +func (StorageProfile) GVR() schema.GroupVersionResource { + resource := rewriteInternalVirtualizationResource("storageprofiles") + return rewriteCDIV1beta1(resource) +} diff --git a/test/e2e/vm/volume_migration_local_disks.go b/test/e2e/vm/volume_migration_local_disks.go index 03bfdaa461..3d87213597 100644 --- a/test/e2e/vm/volume_migration_local_disks.go +++ b/test/e2e/vm/volume_migration_local_disks.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "maps" + "os" "strconv" "time" @@ -43,7 +44,16 @@ import ( "github.com/deckhouse/virtualization/test/e2e/internal/util" ) -var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() { +func decoratorsForVolumeMigrations() []interface{} { + if os.Getenv("PARALLEL_VOLUME_MIGRATIONS") == "true" { + return nil + } + return []interface{}{Ordered, ContinueOnFailure} +} + +// Ordered is required due to concurrent migration limitations in the cluster to prevent test interference. +// ContinueOnFailure ensures all independent tests run even if one fails. +var _ = FDescribe("LocalVirtualDiskMigration", decoratorsForVolumeMigrations(), func() { var ( f = framework.NewFramework("volume-migration-local-disks") storageClass *storagev1.StorageClass @@ -51,9 +61,6 @@ var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() ) BeforeEach(func() { - // TODO: Remove Skip after fixing the issue. - Skip("This test case is not working everytime. Should be fixed.") - storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { Skip("TemplateStorageClass is not set.") @@ -210,7 +217,10 @@ var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() } }) - It("should be reverted first and completed second", func() { + // TODO: need v1.6.2 kubevirt to fix this test + FIt("should be reverted first and completed second", func() { + Skip("TODO: need v1.6.2 kubevirt to fix this test") + ns := f.Namespace().Name vm, vds := localMigrationRootAndAdditionalBuild() @@ -240,7 +250,7 @@ var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() untilVirtualDisksMigrationsFailed(f) - By("The second failed migration") + By("The second completed migration") const vmopName2 = "local-disks-migration-2" By("Starting migrations for virtual machines") @@ -250,7 +260,8 @@ var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName2, metav1.GetOptions{}) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseCompleted)) + completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) + g.Expect(completed.Status).To(Equal(metav1.ConditionTrue), "Reason: %s, Message: %s", completed.Reason, completed.Message) }).WithTimeout(framework.MaxTimeout).WithPolling(time.Second).Should(Succeed()) vm, err = f.VirtClient().VirtualMachines(ns).Get(context.Background(), vm.GetName(), metav1.GetOptions{}) @@ -475,18 +486,14 @@ var _ = Describe("LocalVirtualDiskMigration", Ordered, ContinueOnFailure, func() const vmopName = "local-disks-migration-with-rwo-vmbda" By("Starting migrations for virtual machines") - util.MigrateVirtualMachine(f, vm, vmopbuilder.WithName(vmopName)) - - By("Waiting for migration failed") - Eventually(func(g Gomega) { - vmop, err := f.VirtClient().VirtualMachineOperations(ns).Get(context.Background(), vmopName, metav1.GetOptions{}) - g.Expect(err).NotTo(HaveOccurred()) - - g.Expect(vmop.Status.Phase).To(Equal(v1alpha2.VMOPPhaseFailed)) - completed, _ := conditions.GetCondition(vmopcondition.TypeCompleted, vmop.Status.Conditions) - g.Expect(completed.Status).To(Equal(metav1.ConditionFalse)) - g.Expect(completed.Reason).To(Equal(vmopcondition.ReasonHotplugDisksNotShared.String())) - }).WithTimeout(framework.MiddleTimeout).WithPolling(time.Second).Should(Succeed()) + vmop := vmopbuilder.New([]vmopbuilder.Option{ + vmopbuilder.WithName(vmopName), + vmopbuilder.WithNamespace(vm.Namespace), + vmopbuilder.WithType(v1alpha2.VMOPTypeEvict), + vmopbuilder.WithVirtualMachine(vm.Name), + }...) + err = f.CreateWithDeferredDeletion(context.Background(), vmop) + Expect(err).To(MatchError(ContainSubstring("migration of the rwo virtual disk is not allowed if the virtual machine has hot-plugged block devices"))) }) }) @@ -496,7 +503,8 @@ func ExecStressNGInVirtualMachine(f *framework.Framework, vm *v1alpha2.VirtualMa cmd := "sudo nohup stress-ng --vm 1 --vm-bytes 100% --timeout 300s &>/dev/null &" By(fmt.Sprintf("Exec StressNG command for virtualmachine %s/%s", vm.Namespace, vm.Name)) - Expect(f.SSHCommand(vm.Name, vm.Namespace, cmd, options...)).To(Succeed()) + _, err := f.SSHCommand(vm.Name, vm.Namespace, cmd, options...) + Expect(err).NotTo(HaveOccurred()) By("Wait until stress-ng loads the memory more heavily") time.Sleep(20 * time.Second) diff --git a/test/e2e/vm/volume_migration_storage_class_changed.go b/test/e2e/vm/volume_migration_storage_class_changed.go index 6638dcceeb..fd940de8d5 100644 --- a/test/e2e/vm/volume_migration_storage_class_changed.go +++ b/test/e2e/vm/volume_migration_storage_class_changed.go @@ -26,34 +26,42 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + cdiv1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" crclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/deckhouse/virtualization-controller/pkg/common/patch" + "github.com/deckhouse/virtualization-controller/pkg/controller/service/volumemode" "github.com/deckhouse/virtualization/api/core/v1alpha2" "github.com/deckhouse/virtualization/test/e2e/internal/framework" "github.com/deckhouse/virtualization/test/e2e/internal/object" + "github.com/deckhouse/virtualization/test/e2e/internal/rewrite" "github.com/deckhouse/virtualization/test/e2e/internal/util" ) -var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { +// Ordered is required due to concurrent migration limitations in the cluster to prevent test interference. +// ContinueOnFailure ensures all independent tests run even if one fails. +var _ = FDescribe("StorageClassMigration", decoratorsForVolumeMigrations(), func() { var ( f = framework.NewFramework("volume-migration-storage-class-changed") + modeGetter = volumemode.NewVolumeAndAccessModesGetter(f.Clients.GenericClient(), getStorageProfile(f)) storageClass *storagev1.StorageClass vi *v1alpha2.VirtualImage nextStorageClass string ) BeforeEach(func() { - // TODO: Remove Skip after fixing the issue. - Skip("This test case is not working everytime. Should be fixed.") - storageClass = framework.GetConfig().StorageClass.TemplateStorageClass if storageClass == nil { Skip("TemplateStorageClass is not set.") } + // dirty hack to get volume mode. GetVolumeAndAccessModes needs no nil object. + notEmptyVD := &v1alpha2.VirtualDisk{} + volumeMode, _, err := modeGetter.GetVolumeAndAccessModes(context.Background(), notEmptyVD, storageClass) + scList, err := f.KubeClient().StorageV1().StorageClasses().List(context.Background(), metav1.ListOptions{}) Expect(err).NotTo(HaveOccurred()) @@ -61,7 +69,20 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { if sc.Name == storageClass.Name { continue } - if sc.VolumeBindingMode != nil && *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { + // TODO: Add support for storage classes using the local volume provisioner. + // Temporarily disabled because the storage layer itself has stability problems. + if sc.Provisioner == "local.csi.storage.deckhouse.io" { + GinkgoWriter.Printf("Skipping local storage class %s\n", sc.Name) + continue + } + + nextVolumeMode, _, err := modeGetter.GetVolumeAndAccessModes(context.Background(), notEmptyVD, &sc) + if err != nil { + GinkgoWriter.Printf("Skipping storage class %s: cannot get volume mode: %s\n", sc.Name, err) + continue + } + + if volumeMode == nextVolumeMode { nextStorageClass = sc.Name break } @@ -136,7 +157,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - err = PatchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) + err = patchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) By("Wait until VM migration succeeded") @@ -185,7 +206,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { util.UntilVMAgentReady(crclient.ObjectKeyFromObject(vm), framework.LongTimeout) By("Patch VD with new storage class") - err = PatchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) + err = patchStorageClassName(context.Background(), f, nextStorageClass, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) Eventually(func() error { @@ -201,7 +222,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { } // revert migration - err = PatchStorageClassName(context.Background(), f, storageClass.Name, vdsForMigration...) + err = patchStorageClassName(context.Background(), f, storageClass.Name, vdsForMigration...) Expect(err).NotTo(HaveOccurred()) return nil @@ -211,7 +232,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { }, Entry("when only root disk changed storage class", storageClassMigrationRootOnlyBuild, vdRootName), Entry("when root disk changed storage class and one local additional disk", storageClassMigrationRootAndLocalAdditionalBuild, vdRootName), - Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), // TODO:fixme + Entry("when root disk changed storage class and one additional disk", storageClassMigrationRootAndAdditionalBuild, vdRootName, vdAdditionalName), Entry("when only additional disk changed storage class", storageClassMigrationAdditionalOnlyBuild, vdAdditionalName), ) @@ -239,7 +260,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { for _, sc := range toStorageClasses { By(fmt.Sprintf("Patch VD %s with new storage class %s", vdForMigration.Name, sc)) - err = PatchStorageClassName(context.Background(), f, sc, vdForMigration) + err = patchStorageClassName(context.Background(), f, sc, vdForMigration) Expect(err).NotTo(HaveOccurred()) Eventually(func() error { @@ -288,7 +309,7 @@ var _ = Describe("StorageClassMigration", Ordered, ContinueOnFailure, func() { }) }) -func PatchStorageClassName(ctx context.Context, f *framework.Framework, scName string, vds ...*v1alpha2.VirtualDisk) error { +func patchStorageClassName(ctx context.Context, f *framework.Framework, scName string, vds ...*v1alpha2.VirtualDisk) error { patchBytes, err := patch.NewJSONPatch(patch.WithReplace("/spec/persistentVolumeClaim/storageClassName", scName)).Bytes() if err != nil { return fmt.Errorf("new json patch: %w", err) @@ -303,3 +324,17 @@ func PatchStorageClassName(ctx context.Context, f *framework.Framework, scName s return nil } + +func getStorageProfile(f *framework.Framework) func(ctx context.Context, name string) (*cdiv1.StorageProfile, error) { + return func(ctx context.Context, name string) (*cdiv1.StorageProfile, error) { + obj := &rewrite.StorageProfile{} + err := f.RewriteClient().Get(ctx, name, obj) + if err != nil { + if k8serrors.IsNotFound(err) { + return nil, nil + } + return nil, err + } + return obj.StorageProfile, nil + } +}