Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions api/v1alpha1/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions api/v1alpha2/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions api/v1alpha3/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions api/v1alpha4/zz_generated.conversion.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 7 additions & 0 deletions api/v1alpha5/virtualmachine_storage_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,13 @@ type VirtualMachineVolumeStatus struct {

// +optional

// ProvisioningMode describes the volume's observed provisioning mode.
// This indicates whether the disk is thin-provisioned, thick-provisioned,
// or thick-provisioned with eager zeroing.
ProvisioningMode VolumeProvisioningMode `json:"provisioningMode,omitempty"`

// +optional

// Crypto describes the volume's encryption status.
Crypto *VirtualMachineVolumeCryptoStatus `json:"crypto,omitempty"`

Expand Down
10 changes: 10 additions & 0 deletions config/crd/bases/vmoperator.vmware.com_virtualmachines.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13430,6 +13430,16 @@ spec:
name:
description: Name describes the name of the volume.
type: string
provisioningMode:
description: |-
ProvisioningMode describes the volume's observed provisioning mode.
This indicates whether the disk is thin-provisioned, thick-provisioned,
or thick-provisioned with eager zeroing.
enum:
- Thin
- Thick
- ThickEagerZero
type: string
requested:
anyOf:
- type: integer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,7 @@ func (r *Reconciler) getVMVolStatusesFromBatchAttachment(
vmVolStatus.UnitNumber = existingVol.UnitNumber
vmVolStatus.DiskMode = existingVol.DiskMode
vmVolStatus.SharingMode = existingVol.SharingMode
vmVolStatus.ProvisioningMode = existingVol.ProvisioningMode

// Add PVC capacity information
if err := r.updateVolumeStatusWithPVCInfo(
Expand Down Expand Up @@ -1018,6 +1019,7 @@ func (r *Reconciler) getVMVolStatusesFromLegacyAttachments(
vmVolStatus.UnitNumber = existingVol.UnitNumber
vmVolStatus.DiskMode = existingVol.DiskMode
vmVolStatus.SharingMode = existingVol.SharingMode
vmVolStatus.ProvisioningMode = existingVol.ProvisioningMode

// Add PVC capacity information
if err := r.updateVolumeStatusWithPVCInfo(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,21 +61,21 @@ func intgTestsReconcile() {
dummyDiskUUID1 = uuid.New().String()
dummyDiskUUID2 = uuid.New().String()

vmVolume1 = vmopv1.VirtualMachineVolume{
Name: "cns-volume-1",
VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{
PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pvc-volume-1",
vmVolume1 = vmopv1.VirtualMachineVolume{
Name: "cns-volume-1",
VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{
PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pvc-volume-1",
},
},
},
},
ControllerType: vmopv1.VirtualControllerTypeSCSI,
ControllerBusNumber: ptr.To(int32(0)),
UnitNumber: ptr.To(int32(0)),
DiskMode: vmopv1.VolumeDiskModePersistent,
SharingMode: vmopv1.VolumeSharingModeNone,
}
ControllerType: vmopv1.VirtualControllerTypeSCSI,
ControllerBusNumber: ptr.To(int32(0)),
UnitNumber: ptr.To(int32(0)),
DiskMode: vmopv1.VolumeDiskModePersistent,
SharingMode: vmopv1.VolumeSharingModeNone,
}

pvc1 = &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -94,21 +94,21 @@ func intgTestsReconcile() {
},
}

vmVolume2 = vmopv1.VirtualMachineVolume{
Name: "cns-volume-2",
VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{
PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pvc-volume-2",
vmVolume2 = vmopv1.VirtualMachineVolume{
Name: "cns-volume-2",
VirtualMachineVolumeSource: vmopv1.VirtualMachineVolumeSource{
PersistentVolumeClaim: &vmopv1.PersistentVolumeClaimVolumeSource{
PersistentVolumeClaimVolumeSource: corev1.PersistentVolumeClaimVolumeSource{
ClaimName: "pvc-volume-2",
},
},
},
},
ControllerType: vmopv1.VirtualControllerTypeSCSI,
ControllerBusNumber: ptr.To(int32(0)),
UnitNumber: ptr.To(int32(1)),
DiskMode: vmopv1.VolumeDiskModePersistent,
SharingMode: vmopv1.VolumeSharingModeNone,
}
ControllerType: vmopv1.VirtualControllerTypeSCSI,
ControllerBusNumber: ptr.To(int32(0)),
UnitNumber: ptr.To(int32(1)),
DiskMode: vmopv1.VolumeDiskModePersistent,
SharingMode: vmopv1.VolumeSharingModeNone,
}

pvc2 = &corev1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Expand Down
92 changes: 92 additions & 0 deletions pkg/providers/vsphere/vmlifecycle/update_status.go
Original file line number Diff line number Diff line change
Expand Up @@ -1283,6 +1283,55 @@ func updateStorageUsage(vmCtx pkgctx.VirtualMachineContext) []error {
return errs
}

// convertDiskMode converts vSphere disk mode string to vmopv1.VolumeDiskMode.
func convertDiskMode(diskMode string) vmopv1.VolumeDiskMode {
switch diskMode {
case string(vimtypes.VirtualDiskModeIndependent_persistent):
return vmopv1.VolumeDiskModeIndependentPersistent
case string(vimtypes.VirtualDiskModeIndependent_nonpersistent):
return vmopv1.VolumeDiskModeIndependentNonPersistent
case string(vimtypes.VirtualDiskModeNonpersistent):
return vmopv1.VolumeDiskModeNonPersistent
case string(vimtypes.VirtualDiskModePersistent):
return vmopv1.VolumeDiskModePersistent
default:
// Default to persistent if unknown or empty.
return vmopv1.VolumeDiskModePersistent
}
}

// convertSharingMode converts vSphere sharing mode to vmopv1.VolumeSharingMode.
func convertSharingMode(sharing vimtypes.VirtualDiskSharing) vmopv1.VolumeSharingMode {
switch sharing {
case vimtypes.VirtualDiskSharingSharingMultiWriter:
return vmopv1.VolumeSharingModeMultiWriter
case vimtypes.VirtualDiskSharingSharingNone:
return vmopv1.VolumeSharingModeNone
default:
// Default to None if unknown or empty.
return vmopv1.VolumeSharingModeNone
}
}

// convertProvisioningMode determines the provisioning mode from ThinProvisioned
// and EagerlyScrub flags.
func convertProvisioningMode(thinProvisioned, eagerlyScrub *bool) vmopv1.VolumeProvisioningMode {
// If ThinProvisioned is true, it's thin provisioned.
if thinProvisioned != nil && *thinProvisioned {
return vmopv1.VolumeProvisioningModeThin
}
// If ThinProvisioned is false or nil, check EagerlyScrub for thick variants.
if eagerlyScrub != nil && *eagerlyScrub {
return vmopv1.VolumeProvisioningModeThickEagerZero
}
// Default to thick (lazy zeroed) if thinProvisioned is false.
if thinProvisioned != nil && !*thinProvisioned {
return vmopv1.VolumeProvisioningModeThick
}
// If both are nil, return empty string (unknown/unset).
return ""
}

func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
var (
moVM = vmCtx.MoVM
Expand Down Expand Up @@ -1356,6 +1405,8 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
KeyID: ddi.CryptoKey.KeyID,
}
}
// Note: DiskMode, SharingMode, and ProvisioningMode are set later
// in a single pass for all volumes (both classic and managed).
// This is for a rare case when VM is upgraded from v1alpha3 to
// v1alpha4+. Since vm.status.volume.requested was introduced in
// v1alpha4. So we need to patch it if it's missing from status for
Expand Down Expand Up @@ -1420,6 +1471,8 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
KeyID: ddi.CryptoKey.KeyID,
}
}
// Note: DiskMode, SharingMode, and ProvisioningMode are set later
// in a single pass for all volumes (both classic and managed).
vm.Status.Volumes = append(vm.Status.Volumes, volStatus)
}
}
Expand All @@ -1437,6 +1490,45 @@ func updateVolumeStatus(vmCtx pkgctx.VirtualMachineContext) {
}
})

// Update disk attachment properties for ALL volumes (classic and managed).
// This is done in a single pass at the end to ensure consistency.
Copy link
Contributor

@bryanv bryanv Dec 3, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you mean by consistency here? And why not in the earlier loop?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Earlier loop has separate handling for classic / unmanaged disks and PVCs. So, if I handle it there, I would need to duplicate it in both, if and else blocks.
Consistency may have been wrong choice of a word.

// For managed volumes, the volume controllers populate status from CnsNodeVmAttachment,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor

@lubronzhan lubronzhan Nov 25, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This got me thinking, we are also patching status.volume with detaching suffix here, then we should also add these for detaching disks in volumeController. You don't have to include this as part of this PR

// which doesn't include DiskMode, SharingMode, or ProvisioningMode.
// We populate these properties here by matching DiskUUID with vSphere disk info.
diskInfoByUUID := make(map[string]pkgvol.VirtualDiskInfo)
for _, di := range info.Disks {
if di.UUID != "" {
diskInfoByUUID[di.UUID] = di
}
}

for i := range vm.Status.Volumes {
vol := &vm.Status.Volumes[i]
if vol.DiskUUID == "" {
continue
}

di, ok := diskInfoByUUID[vol.DiskUUID]
if !ok {
vmCtx.Logger.V(4).Info("No disk info found for volume",
"volumeName", vol.Name,
"volumeType", vol.Type,
"diskUUID", vol.DiskUUID)
continue
}

// Populate disk attachment properties from vSphere disk info.
if di.DiskMode != "" {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If these are "", should the status field be cleared?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Didn't bother about that because a diskMode going from not-empty to empty is not a valid scenario unless the disk's backing is changed to the two specific backing that doesn't have disk mode:

  • VirtualDiskRawDiskVer2BackingInfo
  • VirtualDiskPartitionedRawDiskVer2BackingInfo
    Which are very rare.

https://github.com/vmware-tanzu/vm-operator/blob/main/pkg/util/devices.go#L288-L335.

Also, for regular disks, diskMode is immutable property, so there's no case of a disk's mode changing once it is non-empty.

LMK if this makes sense.

vol.DiskMode = convertDiskMode(di.DiskMode)
}
if di.Sharing != "" {
vol.SharingMode = convertSharingMode(di.Sharing)
}
if provMode := convertProvisioningMode(di.ThinProvisioned, di.EagerlyScrub); provMode != "" {
vol.ProvisioningMode = provMode
}
}

// This sort order is consistent with the logic from the volumes controller.
vmopv1.SortVirtualMachineVolumeStatuses(vm.Status.Volumes)
}
Expand Down
Loading