From ceb16c20dfa221533258c7542e2776dfc6845bba Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 2 Oct 2020 02:46:44 +0530 Subject: [PATCH 01/31] vmware: migration improvements Signed-off-by: Abhishek Kumar --- .../com/cloud/hypervisor/HypervisorGuru.java | 3 +- .../main/java/com/cloud/vm/UserVmService.java | 2 +- .../api/command/admin/vm/MigrateVMCmd.java | 2 +- .../MigrateVirtualMachineWithVolumeCmd.java | 33 +- .../agent/api/MigrateVmToPoolCommand.java | 23 +- .../api/storage/MigrateVolumeCommand.java | 10 + .../cloudstack/storage/to/VolumeObjectTO.java | 12 +- .../com/cloud/vm/VirtualMachineManager.java | 2 +- .../service/VolumeOrchestrationService.java | 2 +- .../cloud/vm/VirtualMachineManagerImpl.java | 271 +++++++--- .../com/cloud/vm/VmWorkStorageMigration.java | 10 +- .../orchestration/VolumeOrchestrator.java | 84 +++- .../vm/VirtualMachineManagerImplTest.java | 69 +-- .../com/cloud/hypervisor/guru/VMwareGuru.java | 83 +++- .../vmware/resource/VmwareResource.java | 387 +++++++++++++-- .../motion/VmwareStorageMotionStrategy.java | 67 ++- .../cloud/hypervisor/HypervisorGuruBase.java | 3 +- .../cloud/server/ManagementServerImpl.java | 20 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 72 ++- .../component/test_interpod_migration.py | 464 ++++++++++++++++++ 20 files changed, 1363 insertions(+), 256 deletions(-) create mode 100644 test/integration/component/test_interpod_migration.py diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java index 8a109649e969..81befda005fb 100644 --- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java +++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java @@ -27,6 +27,7 @@ import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.storage.StoragePool; +import com.cloud.storage.Volume; import com.cloud.utils.Pair; import com.cloud.utils.component.Adapter; import com.cloud.vm.NicProfile; @@ -99,5 +100,5 @@ boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backu * @param destination the primary storage pool to migrate to * @return a list of commands to perform for a successful migration */ - List finalizeMigrate(VirtualMachine vm, StoragePool destination); + List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool); } diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 56a6dfd25a27..5e0e176ed51d 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -486,7 +486,7 @@ VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, UserVm moveVMToUser(AssignVMCmd moveUserVMCmd) throws ResourceAllocationException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; - VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool); + VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumeToPool); UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java index 9f73ae586a08..e79f6bc008cd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java @@ -172,7 +172,7 @@ public void execute() { if (getHostId() != null) { migratedVm = _userVmService.migrateVirtualMachine(getVirtualMachineId(), destinationHost); } else if (getStoragePoolId() != null) { - migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); + migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool, null); } if (migratedVm != null) { UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm) migratedVm).get(0); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java index 65d71cc1300a..de1ce2b86271 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java @@ -21,8 +21,6 @@ import java.util.Iterator; import java.util.Map; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiErrorCode; @@ -32,6 +30,8 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; import org.apache.cloudstack.api.response.UserVmResponse; +import org.apache.commons.collections.MapUtils; +import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -61,7 +61,6 @@ public class MigrateVirtualMachineWithVolumeCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, - required = true, description = "Destination Host ID to migrate VM to.") private Long hostId; @@ -97,7 +96,7 @@ public Long getVirtualMachineId() { public Map getVolumeToPool() { Map volumeToPoolMap = new HashMap(); - if (migrateVolumeTo != null && !migrateVolumeTo.isEmpty()) { + if (MapUtils.isNotEmpty(migrateVolumeTo)) { Collection allValues = migrateVolumeTo.values(); Iterator iter = allValues.iterator(); while (iter.hasNext()) { @@ -141,19 +140,35 @@ public String getEventDescription() { @Override public void execute() { + if (hostId == null && MapUtils.isEmpty(migrateVolumeTo)) { + throw new InvalidParameterValueException(String.format("Either %s or %s must be passed for migrating the VM", ApiConstants.HOST_ID, ApiConstants.MIGRATE_TO)); + } + UserVm userVm = _userVmService.getUserVm(getVirtualMachineId()); if (userVm == null) { throw new InvalidParameterValueException("Unable to find the VM by id=" + getVirtualMachineId()); } - Host destinationHost = _resourceService.getHost(getHostId()); - // OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs - if (destinationHost == null) { - throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId()); + if (!VirtualMachine.State.Running.equals(userVm.getState()) && hostId != null) { + throw new InvalidParameterValueException(String.format("VM ID: %s is not in Running state to migrate it to new host", userVm.getUuid())); + } + + if (!VirtualMachine.State.Stopped.equals(userVm.getState()) && hostId == null) { + throw new InvalidParameterValueException(String.format("VM ID: %s is not in Stopped state to migrate, use %s parameter to migrate it to a new host", userVm.getUuid(), ApiConstants.HOST_ID)); } try { - VirtualMachine migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, getVolumeToPool()); + VirtualMachine migratedVm = null; + if (hostId != null) { + Host destinationHost = _resourceService.getHost(getHostId()); + // OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs + if (destinationHost == null) { + throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id =" + getHostId()); + } + migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, getVolumeToPool()); + } else if (MapUtils.isNotEmpty(migrateVolumeTo)) { + migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), null, getVolumeToPool()); + } if (migratedVm != null) { UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm)migratedVm).get(0); response.setResponseName(getCommandName()); diff --git a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java index 91a911d7c181..ad4a47022807 100644 --- a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java +++ b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java @@ -18,9 +18,12 @@ // package com.cloud.agent.api; -import com.cloud.agent.api.to.VolumeTO; - import java.util.Collection; +import java.util.List; + +import com.cloud.agent.api.to.StorageFilerTO; +import com.cloud.agent.api.to.VolumeTO; +import com.cloud.utils.Pair; /** * used to tell the agent to migrate a vm to a different primary storage pool. @@ -32,6 +35,8 @@ public class MigrateVmToPoolCommand extends Command { private String vmName; private String destinationPool; private boolean executeInSequence = false; + private List> volumeToFilerAsList; + private String targetClusterHost; protected MigrateVmToPoolCommand() { } @@ -43,10 +48,14 @@ protected MigrateVmToPoolCommand() { * @param destinationPool the primary storage pool to migrate the VM to * @param executeInSequence */ - public MigrateVmToPoolCommand(String vmName, Collection volumes, String destinationPool, boolean executeInSequence) { + public MigrateVmToPoolCommand(String vmName, Collection volumes, String destinationPool, + List>volumeToFilerto, String targetHost, + boolean executeInSequence) { this.vmName = vmName; this.volumes = volumes; this.destinationPool = destinationPool; + this.targetClusterHost = targetHost; + this.volumeToFilerAsList = volumeToFilerto; this.executeInSequence = executeInSequence; } @@ -62,6 +71,14 @@ public String getVmName() { return vmName; } + public List> getVolumeToFilerAsList() { + return volumeToFilerAsList; + } + + public String getTargetClusterHost() { + return targetClusterHost; + } + @Override public boolean executeInSequence() { return executeInSequence; diff --git a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java index 9902a86fb893..3f2c425a103b 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java @@ -34,6 +34,7 @@ public class MigrateVolumeCommand extends Command { StorageFilerTO sourcePool; String attachedVmName; Volume.Type volumeType; + String targetClusterHost; private DataTO srcData; private DataTO destData; @@ -68,6 +69,11 @@ public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map setWait(timeout); } + public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String targetClusterHost) { + this(volumeId, volumePath, sourcePool, targetPool); + this.targetClusterHost = targetClusterHost; + } + @Override public boolean executeInSequence() { return true; @@ -125,6 +131,10 @@ public Map getDestDetails() { return destDetails; } + public String getTargetClusterHost() { + return targetClusterHost; + } + public int getWaitInMillSeconds() { return getWait() * 1000; } diff --git a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java index e47d13ed6693..7230a3045b42 100644 --- a/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java +++ b/core/src/main/java/org/apache/cloudstack/storage/to/VolumeObjectTO.java @@ -19,7 +19,6 @@ package org.apache.cloudstack.storage.to; -import com.cloud.storage.MigrationOptions; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import com.cloud.agent.api.to.DataObjectType; @@ -27,6 +26,7 @@ import com.cloud.agent.api.to.DataTO; import com.cloud.hypervisor.Hypervisor; import com.cloud.offering.DiskOffering.DiskCacheMode; +import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage; import com.cloud.storage.Volume; @@ -62,6 +62,7 @@ public class VolumeObjectTO implements DataTO { private Hypervisor.HypervisorType hypervisorType; private MigrationOptions migrationOptions; private boolean directDownload; + private String dataStoreUuid; public VolumeObjectTO() { @@ -313,4 +314,13 @@ public MigrationOptions getMigrationOptions() { public boolean isDirectDownload() { return directDownload; } + + public String getDataStoreUuid() { + return dataStoreUuid; + } + + public void setDataStoreUuid(String dataStoreUuid) { + this.dataStoreUuid = dataStoreUuid; + } + } diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index dc54f543c324..09a754f8fd8e 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -151,7 +151,7 @@ void advanceReboot(String vmUuid, Map param VirtualMachine findById(long vmId); - void storageMigration(String vmUuid, StoragePool storagePoolId); + void storageMigration(String vmUuid, StoragePool storagePoolId, Map volumeToPool); /** * @param vmInstance diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 9458de763538..7831c6bde25d 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -109,7 +109,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map volumeToPool); - boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool) throws StorageUnavailableException; + boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool, Map volumeToPool) throws StorageUnavailableException; void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index b8342b4a60e3..8b0385528c9e 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2124,7 +2124,7 @@ protected boolean checkVmOnHost(final VirtualMachine vm, final long hostId) thro } @Override - public void storageMigration(final String vmUuid, final StoragePool destPool) { + public void storageMigration(final String vmUuid, final StoragePool destPool, final Map volumeToPool) { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance @@ -2132,14 +2132,14 @@ public void storageMigration(final String vmUuid, final StoragePool destPool) { final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { - orchestrateStorageMigration(vmUuid, destPool); + orchestrateStorageMigration(vmUuid, destPool, volumeToPool); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { - final Outcome outcome = migrateVmStorageThroughJobQueue(vmUuid, destPool); + final Outcome outcome = migrateVmStorageThroughJobQueue(vmUuid, destPool, volumeToPool); try { final VirtualMachine vm = outcome.get(); @@ -2160,10 +2160,10 @@ public void storageMigration(final String vmUuid, final StoragePool destPool) { } } - private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool) { + private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool, final Map volumeToPool) { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); - preStorageMigrationStateCheck(destPool, vm); + Map volumeToPoolMap = prepareVmStorageMigration(vm, destPool, volumeToPool); try { if(s_logger.isDebugEnabled()) { @@ -2172,7 +2172,7 @@ private void orchestrateStorageMigration(final String vmUuid, final StoragePool vm.getInstanceName())); } - migrateThroughHypervisorOrStorage(destPool, vm); + migrateThroughHypervisorOrStorage(destPool, volumeToPoolMap, vm); } catch (ConcurrentOperationException | InsufficientCapacityException // possibly InsufficientVirtualNetworkCapacityException or InsufficientAddressCapacityException @@ -2191,24 +2191,18 @@ private void orchestrateStorageMigration(final String vmUuid, final StoragePool } } - private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO vm) { + private Answer[] attemptHypervisorMigration(VMInstanceVO vm, StoragePool destPool, Map volumeToPool, Long hostId) { + if (hostId == null) { + return null; + } final HypervisorGuru hvGuru = _hvGuruMgr.getGuru(vm.getHypervisorType()); // OfflineVmwareMigration: in case of vmware call vcenter to do it for us. // OfflineVmwareMigration: should we check the proximity of source and destination // OfflineVmwareMigration: if we are in the same cluster/datacentre/pool or whatever? // OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not - List commandsToSend = hvGuru.finalizeMigrate(vm, destPool); - - Long hostId = vm.getHostId(); - // OfflineVmwareMigration: probably this is null when vm is stopped - if(hostId == null) { - hostId = vm.getLastHostId(); - if (s_logger.isDebugEnabled()) { - s_logger.debug(String.format("host id is null, using last host id %d", hostId) ); - } - } + List commandsToSend = hvGuru.finalizeMigrate(vm, destPool, volumeToPool); - if(CollectionUtils.isNotEmpty(commandsToSend)) { + if (CollectionUtils.isNotEmpty(commandsToSend)) { Commands commandsContainer = new Commands(Command.OnError.Stop); commandsContainer.addCommands(commandsToSend); try { @@ -2222,19 +2216,31 @@ private Answer[] attemptHypervisorMigration(StoragePool destPool, VMInstanceVO v return null; } - private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { + private void afterHypervisorMigrationCleanup(StoragePool destPool, Map volumeToPool, VMInstanceVO vm, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { boolean isDebugEnabled = s_logger.isDebugEnabled(); if(isDebugEnabled) { - String msg = String.format("cleaning up after hypervisor pool migration volumes for VM %s(%s) to pool %s(%s)", vm.getInstanceName(), vm.getUuid(), destPool.getName(), destPool.getUuid()); + String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); + if (destPool != null) { + msg += String.format(" to pool %s(%s)", destPool.getName(), destPool.getUuid()); + } s_logger.debug(msg); } - setDestinationPoolAndReallocateNetwork(destPool, vm); + StoragePool rootVolumePool = destPool; + if (rootVolumePool == null && MapUtils.isNotEmpty(volumeToPool)) { + for (Map.Entry entry : volumeToPool.entrySet()) { + if (Type.ROOT.equals(entry.getKey().getVolumeType())) { + rootVolumePool = entry.getValue(); + break; + } + } + } + setDestinationPoolAndReallocateNetwork(rootVolumePool, vm); // OfflineVmwareMigration: don't set this to null or have another way to address the command; twice migrating will lead to an NPE - Long destPodId = destPool.getPodId(); + Long destPodId = rootVolumePool != null ? rootVolumePool.getPodId() : null; Long vmPodId = vm.getPodIdToDeployIn(); if (destPodId == null || ! destPodId.equals(vmPodId)) { if(isDebugEnabled) { - String msg = String.format("resetting lasHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId); + String msg = String.format("Resetting lastHost for VM %s(%s) as pod (%s) is no good.", vm.getInstanceName(), vm.getUuid(), destPodId); s_logger.debug(msg); } @@ -2242,66 +2248,141 @@ private void afterHypervisorMigrationCleanup(StoragePool destPool, VMInstanceVO vm.setPodIdToDeployIn(destPodId); // OfflineVmwareMigration: a consecutive migration will fail probably (no host not pod) }// else keep last host set for this vm - markVolumesInPool(vm,destPool, hypervisorMigrationResults); + markVolumesInPool(vm, hypervisorMigrationResults); // OfflineVmwareMigration: deal with answers, if (hypervisorMigrationResults.length > 0) // OfflineVmwareMigration: iterate over the volumes for data updates } - private void markVolumesInPool(VMInstanceVO vm, StoragePool destPool, Answer[] hypervisorMigrationResults) { + private void markVolumesInPool(VMInstanceVO vm, Answer[] hypervisorMigrationResults) { MigrateVmToPoolAnswer relevantAnswer = null; + if (hypervisorMigrationResults.length == 1 && !hypervisorMigrationResults[0].getResult()) { + throw new CloudRuntimeException(String.format("VM ID: %s migration failed. %s", vm.getUuid(), hypervisorMigrationResults[0].getDetails())); + } for (Answer answer : hypervisorMigrationResults) { if (s_logger.isTraceEnabled()) { - s_logger.trace(String.format("received an %s: %s", answer.getClass().getSimpleName(), answer)); + s_logger.trace(String.format("Received an %s: %s", answer.getClass().getSimpleName(), answer)); } if (answer instanceof MigrateVmToPoolAnswer) { relevantAnswer = (MigrateVmToPoolAnswer) answer; } } if (relevantAnswer == null) { - throw new CloudRuntimeException("no relevant migration results found"); + throw new CloudRuntimeException("No relevant migration results found"); + } + List results = relevantAnswer.getVolumeTos(); + if (results == null) { + results = new ArrayList<>(); } List volumes = _volsDao.findUsableVolumesForInstance(vm.getId()); if(s_logger.isDebugEnabled()) { - String msg = String.format("found %d volumes for VM %s(uuid:%s, id:%d)", volumes.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); + String msg = String.format("Found %d volumes for VM %s(uuid:%s, id:%d)", results.size(), vm.getInstanceName(), vm.getUuid(), vm.getId()); s_logger.debug(msg); } - for (VolumeObjectTO result : relevantAnswer.getVolumeTos() ) { + for (VolumeObjectTO result : results ) { if(s_logger.isDebugEnabled()) { - s_logger.debug(String.format("updating volume (%d) with path '%s' on pool '%d'", result.getId(), result.getPath(), destPool.getId())); + s_logger.debug(String.format("Updating volume (%d) with path '%s' on pool '%s'", result.getId(), result.getPath(), result.getDataStoreUuid())); } VolumeVO volume = _volsDao.findById(result.getId()); + StoragePool pool = _storagePoolDao.findPoolByUUID(result.getDataStoreUuid()); + if (volume == null || pool == null) { + continue; + } volume.setPath(result.getPath()); - volume.setPoolId(destPool.getId()); + volume.setPoolId(pool.getId()); _volsDao.update(volume.getId(), volume); } } - private void migrateThroughHypervisorOrStorage(StoragePool destPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException { + private Pair findClusterAndHostIdForVm(VMInstanceVO vm) { + Long hostId = vm.getHostId(); + Long clusterId = null; + // OfflineVmwareMigration: probably this is null when vm is stopped + if(hostId == null) { + hostId = vm.getLastHostId(); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("host id is null, using last host id %d", hostId) ); + } + } + if (hostId == null) { + List volumes = _volsDao.findByInstanceAndType(vm.getId(), Type.ROOT); + if (CollectionUtils.isNotEmpty(volumes)) { + VolumeVO rootVolume = volumes.get(0); + if (rootVolume.getPoolId() != null) { + StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId()); + if (pool != null && pool.getClusterId() != null) { + clusterId = pool.getClusterId(); + List hosts = _hostDao.findHypervisorHostInCluster(pool.getClusterId()); + if (CollectionUtils.isNotEmpty(hosts)) { + hostId = hosts.get(0).getId(); + } + } + } + } + } + if (clusterId == null && hostId != null) { + HostVO host = _hostDao.findById(hostId); + if (host != null) { + clusterId = host.getId(); + } + } + return new Pair<>(clusterId, hostId); + } + + private void migrateThroughHypervisorOrStorage(StoragePool destPool, Map volumeToPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException { final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); - final Long srchostId = vm.getHostId() != null ? vm.getHostId() : vm.getLastHostId(); - final HostVO srcHost = _hostDao.findById(srchostId); - final Long srcClusterId = srcHost.getClusterId(); - Answer[] hypervisorMigrationResults = attemptHypervisorMigration(destPool, vm); + Pair vmClusterAndHost = findClusterAndHostIdForVm(vm); + final Long sourceClusterId = vmClusterAndHost.first(); + final Long sourceHostId = vmClusterAndHost.second(); + Answer[] hypervisorMigrationResults = attemptHypervisorMigration(vm, destPool, volumeToPool, sourceHostId); boolean migrationResult = false; if (hypervisorMigrationResults == null) { // OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it. - migrationResult = volumeMgr.storageMigration(profile, destPool); + migrationResult = volumeMgr.storageMigration(profile, destPool, volumeToPool); if (migrationResult) { - afterStorageMigrationCleanup(destPool, vm, srcHost, srcClusterId); + afterStorageMigrationCleanup(destPool, volumeToPool, vm, _hostDao.findById(sourceHostId), sourceClusterId); } else { s_logger.debug("Storage migration failed"); } } else { - afterHypervisorMigrationCleanup(destPool, vm, srcHost, srcClusterId, hypervisorMigrationResults); + afterHypervisorMigrationCleanup(destPool, volumeToPool, vm, hypervisorMigrationResults); } } - private void preStorageMigrationStateCheck(StoragePool destPool, VMInstanceVO vm) { - if (destPool == null) { - throw new CloudRuntimeException("Unable to migrate vm: missing destination storage pool"); + private Map prepareVmStorageMigration(VMInstanceVO vm, StoragePool destPool, Map volumeToPool) { + Map volumeToPoolMap = new HashMap<>(); + if (destPool == null && MapUtils.isEmpty(volumeToPool)) { + throw new CloudRuntimeException("Unable to migrate vm: missing both destination storage pool and volume to pool mapping"); + } + if (destPool != null) { + checkDestinationForTags(destPool, vm); + } else if (MapUtils.isNotEmpty(volumeToPool)) { + Cluster cluster = null; + Long dataCenterId = null; + for (Map.Entry entry: volumeToPool.entrySet()) { + StoragePool pool = _storagePoolDao.findById(entry.getValue()); + if (pool.getClusterId() != null) { + cluster = _clusterDao.findById(pool.getClusterId()); + break; + } + dataCenterId = pool.getDataCenterId(); + } + Long podId = null; + Long clusterId = null; + if (cluster != null) { + dataCenterId = cluster.getDataCenterId(); + podId = cluster.getPodId(); + clusterId = cluster.getId(); + } + if (dataCenterId == null) { + String msg = "Unable to migrate vm: failed to create deployment destination with given volume to pool map"; + s_logger.debug(msg); + throw new CloudRuntimeException(msg); + } + final DataCenterDeployment destination = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + // Create a map of which volume should go in which storage pool. + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destination, volumeToPool); } - - checkDestinationForTags(destPool, vm); try { stateTransitTo(vm, Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { @@ -2309,6 +2390,7 @@ private void preStorageMigrationStateCheck(StoragePool destPool, VMInstanceVO vm s_logger.debug(msg); throw new CloudRuntimeException(msg, e); } + return volumeToPoolMap; } private void checkDestinationForTags(StoragePool destPool, VMInstanceVO vm) { @@ -2347,12 +2429,22 @@ static boolean matches(List volumeTags, List storagePoolTags) { } - private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { + private void afterStorageMigrationCleanup(StoragePool destPool, Map volumeToPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { + if (destPool == null && MapUtils.isNotEmpty(volumeToPool)) { + for (Map.Entry entry : volumeToPool.entrySet()) { + if (Type.ROOT.equals(entry.getKey().getVolumeType())) { + destPool = entry.getValue(); + break; + } + } + } setDestinationPoolAndReallocateNetwork(destPool, vm); //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool vm.setLastHostId(null); - vm.setPodIdToDeployIn(destPool.getPodId()); + if (destPool != null) { + vm.setPodIdToDeployIn(destPool.getPodId()); + } // If VM was cold migrated between clusters belonging to two different VMware DCs, // unregister the VM from the source host and cleanup the associated VM files. @@ -2364,7 +2456,7 @@ private void afterStorageMigrationCleanup(StoragePool destPool, VMInstanceVO vm, private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInstanceVO vm) throws InsufficientCapacityException { //if the vm is migrated to different pod in basic mode, need to reallocate ip - if (destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) { + if (destPool != null && destPool.getPodId() != null && !destPool.getPodId().equals(vm.getPodIdToDeployIn())) { if (s_logger.isDebugEnabled()) { String msg = String.format("as the pod for vm %s has changed we are reallocating its network", vm.getInstanceName()); s_logger.debug(msg); @@ -2378,7 +2470,7 @@ private void setDestinationPoolAndReallocateNetwork(StoragePool destPool, VMInst private void afterStorageMigrationVmwareVMcleanup(StoragePool destPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) { // OfflineVmwareMigration: this should only happen on storage migration, else the guru would already have issued the command final Long destClusterId = destPool.getClusterId(); - if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId)) { + if (srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId) && srcHost != null) { final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { @@ -2642,13 +2734,23 @@ protected void migrate(final VMInstanceVO vm, final long srcHostId, final Deploy /** * We create the mapping of volumes and storage pool to migrate the VMs according to the information sent by the user. - * If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, Host, Map, List)} + * If the user did not enter a complete mapping, the volumes that were left behind will be auto mapped using {@link #createStoragePoolMappingsForVolumes(VirtualMachineProfile, DataCenterDeployment, Map, List)} */ protected Map createMappingVolumeAndStoragePool(VirtualMachineProfile profile, Host targetHost, Map userDefinedMapOfVolumesAndStoragePools) { + return createMappingVolumeAndStoragePool(profile, + new DataCenterDeployment(targetHost.getDataCenterId(), targetHost.getPodId(), targetHost.getClusterId(), targetHost.getId(), null, null), + userDefinedMapOfVolumesAndStoragePools); + } + + private Map createMappingVolumeAndStoragePool(final VirtualMachineProfile profile, final DataCenterDeployment plan, final Map userDefinedMapOfVolumesAndStoragePools) { + Host targetHost = null; + if (plan.getHostId() != null) { + targetHost = _hostDao.findById(plan.getHostId()); + } Map volumeToPoolObjectMap = buildMapUsingUserInformation(profile, targetHost, userDefinedMapOfVolumesAndStoragePools); List volumesNotMapped = findVolumesThatWereNotMappedByTheUser(profile, volumeToPoolObjectMap); - createStoragePoolMappingsForVolumes(profile, targetHost, volumeToPoolObjectMap, volumesNotMapped); + createStoragePoolMappingsForVolumes(profile, plan, volumeToPoolObjectMap, volumesNotMapped); return volumeToPoolObjectMap; } @@ -2683,7 +2785,7 @@ protected Map buildMapUsingUserInformation(VirtualMachinePr StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); executeManagedStorageChecksWhenTargetStoragePoolProvided(currentPool, volume, targetPool); - if (_poolHostDao.findByPoolHost(targetPool.getId(), targetHost.getId()) == null) { + if (targetHost != null && _poolHostDao.findByPoolHost(targetPool.getId(), targetHost.getId()) == null) { throw new CloudRuntimeException( String.format("Cannot migrate the volume [%s] to the storage pool [%s] while migrating VM [%s] to target host [%s]. The host does not have access to the storage pool entered.", volume.getUuid(), targetPool.getUuid(), profile.getUuid(), targetHost.getUuid())); @@ -2691,6 +2793,17 @@ protected Map buildMapUsingUserInformation(VirtualMachinePr if (currentPool.getId() == targetPool.getId()) { s_logger.info(String.format("The volume [%s] is already allocated in storage pool [%s].", volume.getUuid(), targetPool.getUuid())); } + final DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); + List storageTags = storageMgr.getStoragePoolTagList(targetPool.getId()); + if(!matches(StringUtils.csvTagsToList(diskOffering.getTags()), storageTags)) { + String msg = String.format("Cannot migrate volume [%s] with disk offering tags '%s' to storage pool [%s] with tags '%s'", + volume.getName(), + diskOffering.getTags(), + targetPool.getName(), + StringUtils.listToCsvTags(storageTags)); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } volumeToPoolObjectMap.put(volume, targetPool); } return volumeToPoolObjectMap; @@ -2718,13 +2831,17 @@ protected void executeManagedStorageChecksWhenTargetStoragePoolProvided(StorageP * For each one of the volumes we will map it to a storage pool that is available via the target host. * An exception is thrown if we cannot find a storage pool that is accessible in the target host to migrate the volume to. */ - protected void createStoragePoolMappingsForVolumes(VirtualMachineProfile profile, Host targetHost, Map volumeToPoolObjectMap, List allVolumes) { + protected void createStoragePoolMappingsForVolumes(VirtualMachineProfile profile, DataCenterDeployment plan, Map volumeToPoolObjectMap, List allVolumes) { for (Volume volume : allVolumes) { StoragePoolVO currentPool = _storagePoolDao.findById(volume.getPoolId()); + Host targetHost = null; + if (plan.getHostId() != null) { + targetHost = _hostDao.findById(plan.getHostId()); + } executeManagedStorageChecksWhenTargetStoragePoolNotProvided(targetHost, currentPool, volume); - if (ScopeType.HOST.equals(currentPool.getScope()) || isStorageCrossClusterMigration(targetHost, currentPool)) { - createVolumeToStoragePoolMappingIfPossible(profile, targetHost, volumeToPoolObjectMap, volume, currentPool); + if (ScopeType.HOST.equals(currentPool.getScope()) || isStorageCrossClusterMigration(plan.getClusterId(), currentPool)) { + createVolumeToStoragePoolMappingIfPossible(profile, plan, volumeToPoolObjectMap, volume, currentPool); } else { volumeToPoolObjectMap.put(volume, currentPool); } @@ -2742,17 +2859,17 @@ protected void executeManagedStorageChecksWhenTargetStoragePoolNotProvided(Host if (!currentPool.isManaged()) { return; } - if (_poolHostDao.findByPoolHost(currentPool.getId(), targetHost.getId()) == null) { + if (targetHost != null && _poolHostDao.findByPoolHost(currentPool.getId(), targetHost.getId()) == null) { throw new CloudRuntimeException(String.format("The target host does not have access to the volume's managed storage pool. [volumeId=%s, storageId=%s, targetHostId=%s].", volume.getUuid(), currentPool.getUuid(), targetHost.getUuid())); } } /** - * Return true if the VM migration is a cross cluster migration. To execute that, we check if the volume current storage pool cluster is different from the target host cluster. + * Return true if the VM migration is a cross cluster migration. To execute that, we check if the volume current storage pool cluster is different from the target cluster. */ - protected boolean isStorageCrossClusterMigration(Host targetHost, StoragePoolVO currentPool) { - return ScopeType.CLUSTER.equals(currentPool.getScope()) && currentPool.getClusterId() != targetHost.getClusterId(); + protected boolean isStorageCrossClusterMigration(Long clusterId, StoragePoolVO currentPool) { + return clusterId != null && ScopeType.CLUSTER.equals(currentPool.getScope()) && !currentPool.getClusterId().equals(clusterId); } /** @@ -2764,37 +2881,44 @@ protected boolean isStorageCrossClusterMigration(Host targetHost, StoragePoolVO * * Side note: this method should only be called if the volume is on local storage or if we are executing a cross cluster migration. */ - protected void createVolumeToStoragePoolMappingIfPossible(VirtualMachineProfile profile, Host targetHost, Map volumeToPoolObjectMap, Volume volume, + protected void createVolumeToStoragePoolMappingIfPossible(VirtualMachineProfile profile, DataCenterDeployment plan, Map volumeToPoolObjectMap, Volume volume, StoragePoolVO currentPool) { - List storagePoolList = getCandidateStoragePoolsToMigrateLocalVolume(profile, targetHost, volume); + List storagePoolList = getCandidateStoragePoolsToMigrateLocalVolume(profile, plan, volume); if (CollectionUtils.isEmpty(storagePoolList)) { - throw new CloudRuntimeException(String.format("There is not storage pools available at the target host [%s] to migrate volume [%s]", targetHost.getUuid(), volume.getUuid())); + String msg; + if (plan.getHostId() != null) { + Host targetHost = _hostDao.findById(plan.getHostId()); + msg = String.format("There are no storage pools available at the target host [%s] to migrate volume [%s]", targetHost.getUuid(), volume.getUuid()); + } else { + Cluster targetCluster = _clusterDao.findById(plan.getClusterId()); + msg = String.format("There are no storage pools available in the target cluster [%s] to migrate volume [%s]", targetCluster.getUuid(), volume.getUuid()); + } + throw new CloudRuntimeException(msg); } Collections.shuffle(storagePoolList); - boolean canTargetHostAccessVolumeCurrentStoragePool = false; + boolean candidatePoolsListContainsVolumeCurrentStoragePool = false; for (StoragePool storagePool : storagePoolList) { if (storagePool.getId() == currentPool.getId()) { - canTargetHostAccessVolumeCurrentStoragePool = true; + candidatePoolsListContainsVolumeCurrentStoragePool = true; break; } } - if (!canTargetHostAccessVolumeCurrentStoragePool) { + if (!candidatePoolsListContainsVolumeCurrentStoragePool) { volumeToPoolObjectMap.put(volume, _storagePoolDao.findByUuid(storagePoolList.get(0).getUuid())); } } /** - * We use {@link StoragePoolAllocator} objects to find storage pools connected to the targetHost where we would be able to allocate the given volume. + * We use {@link StoragePoolAllocator} objects to find storage pools for given DataCenterDeployment where we would be able to allocate the given volume. */ - protected List getCandidateStoragePoolsToMigrateLocalVolume(VirtualMachineProfile profile, Host targetHost, Volume volume) { + protected List getCandidateStoragePoolsToMigrateLocalVolume(VirtualMachineProfile profile, DataCenterDeployment plan, Volume volume) { List poolList = new ArrayList<>(); DiskOfferingVO diskOffering = _diskOfferingDao.findById(volume.getDiskOfferingId()); DiskProfile diskProfile = new DiskProfile(volume, diskOffering, profile.getHypervisorType()); - DataCenterDeployment plan = new DataCenterDeployment(targetHost.getDataCenterId(), targetHost.getPodId(), targetHost.getClusterId(), targetHost.getId(), null, null); ExcludeList avoid = new ExcludeList(); StoragePoolVO volumeStoragePool = _storagePoolDao.findById(volume.getPoolId()); @@ -2807,7 +2931,7 @@ protected List getCandidateStoragePoolsToMigrateLocalVolume(Virtual continue; } for (StoragePool pool : poolListFromAllocator) { - if (pool.isLocal() || isStorageCrossClusterMigration(targetHost, volumeStoragePool)) { + if (pool.isLocal() || isStorageCrossClusterMigration(plan.getClusterId(), volumeStoragePool)) { poolList.add(pool); } } @@ -5222,7 +5346,7 @@ private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPoo } public Outcome migrateVmStorageThroughJobQueue( - final String vmUuid, final StoragePool destPool) { + final String vmUuid, final StoragePool destPool, final Map volumeToPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); @@ -5253,7 +5377,7 @@ public Outcome migrateVmStorageThroughJobQueue( // save work context info (there are some duplications) final VmWorkStorageMigration workInfo = new VmWorkStorageMigration(user.getId(), account.getId(), vm.getId(), - VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool.getId()); + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool != null ? destPool.getId() : null, volumeToPool); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); @@ -5599,8 +5723,11 @@ private Pair orchestrateStorageMigration(final VmWorkSto s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; - final StoragePool pool = (PrimaryDataStoreInfo)dataStoreMgr.getPrimaryDataStore(work.getDestStoragePoolId()); - orchestrateStorageMigration(vm.getUuid(), pool); + StoragePool pool = null; + if (work.getDestStoragePoolId() != null) { + pool = (PrimaryDataStoreInfo) dataStoreMgr.getPrimaryDataStore(work.getDestStoragePoolId()); + } + orchestrateStorageMigration(vm.getUuid(), pool, work.getVolumeToPool()); return new Pair(JobInfo.Status.SUCCEEDED, null); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java index 1d7d55ec171e..7254bdac7c67 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java @@ -16,18 +16,26 @@ // under the License. package com.cloud.vm; +import java.util.Map; + public class VmWorkStorageMigration extends VmWork { private static final long serialVersionUID = -8677979691741157474L; Long destPoolId; + Map volumeToPool; - public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, Long destPoolId) { + public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, Long destPoolId, Map volumeToPool) { super(userId, accountId, vmId, handlerName); this.destPoolId = destPoolId; + this.volumeToPool = volumeToPool; } public Long getDestStoragePoolId() { return destPoolId; } + + public Map getVolumeToPool() { + return volumeToPool; + } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 3e68d3a4ab05..d515800221ad 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -18,6 +18,8 @@ */ package org.apache.cloudstack.engine.orchestration; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -68,6 +70,7 @@ import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.commons.collections.MapUtils; import org.apache.log4j.Logger; import com.cloud.agent.api.to.DataTO; @@ -139,7 +142,6 @@ import com.cloud.vm.VmWorkTakeVolumeSnapshot; import com.cloud.vm.dao.UserVmCloneSettingDao; import com.cloud.vm.dao.UserVmDao; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { @@ -1065,37 +1067,69 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos } @Override - public boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool) throws StorageUnavailableException { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - List volumesNeedToMigrate = new ArrayList(); + public boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool, Map volumeToPool) throws StorageUnavailableException { + if (destPool != null) { + List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); + List volumesNeedToMigrate = new ArrayList(); + + for (VolumeVO volume : vols) { + if (volume.getState() != Volume.State.Ready) { + s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + } + + if (volume.getPoolId() == destPool.getId()) { + s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); + continue; + } - for (VolumeVO volume : vols) { - if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + volumesNeedToMigrate.add(volume); } - if (volume.getPoolId() == destPool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); - continue; + if (volumesNeedToMigrate.isEmpty()) { + s_logger.debug("No volume need to be migrated"); + return true; } - volumesNeedToMigrate.add(volume); - } + // OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing + if (s_logger.isDebugEnabled()) { + s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); + } + for (Volume vol : volumesNeedToMigrate) { + Volume result = migrateVolume(vol, destPool); + if (result == null) { + return false; + } + } + } else if (MapUtils.isNotEmpty(volumeToPool)) { + Map volumeStoragePoolMap = new HashMap<>(); + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = entry.getKey(); + StoragePool pool = entry.getValue(); + if (volume.getState() != Volume.State.Ready) { + s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + } - if (volumesNeedToMigrate.isEmpty()) { - s_logger.debug("No volume need to be migrated"); - return true; - } + if (volume.getPoolId() == pool.getId()) { + s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + pool.getId()); + continue; + } + volumeStoragePoolMap.put(volume, volumeToPool.get(volume)); + } - // OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing - if (s_logger.isDebugEnabled()) { - s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); - } - for (Volume vol : volumesNeedToMigrate) { - Volume result = migrateVolume(vol, destPool); - if (result == null) { - return false; + if (MapUtils.isEmpty(volumeStoragePoolMap)) { + s_logger.debug("No volume need to be migrated"); + return true; + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); + } + for (Map.Entry entry : volumeStoragePoolMap.entrySet()) { + Volume result = migrateVolume(entry.getKey(), entry.getValue()); + if (result == null) { + return false; + } } } return true; diff --git a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java index 1725a413145c..d2d5fc89819e 100644 --- a/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java +++ b/engine/orchestration/src/test/java/com/cloud/vm/VirtualMachineManagerImplTest.java @@ -49,6 +49,7 @@ import com.cloud.agent.api.Command; import com.cloud.agent.api.StopAnswer; import com.cloud.agent.api.StopCommand; +import com.cloud.deploy.DataCenterDeployment; import com.cloud.deploy.DeploymentPlan; import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; @@ -96,6 +97,8 @@ public class VirtualMachineManagerImplTest { private long hostMockId = 1L; @Mock private HostVO hostMock; + @Mock + private DataCenterDeployment dataCenterDeploymentMock; @Mock private VirtualMachineProfile virtualMachineProfileMock; @@ -127,6 +130,7 @@ public void setup() { when(vmInstanceMock.getHostId()).thenReturn(2L); when(vmInstanceMock.getType()).thenReturn(VirtualMachine.Type.User); when(hostMock.getId()).thenReturn(hostMockId); + when(dataCenterDeploymentMock.getHostId()).thenReturn(hostMockId); Mockito.doReturn(vmInstanceVoMockId).when(virtualMachineProfileMock).getId(); @@ -227,33 +231,30 @@ public void testCheckIfCanUpgrade() throws Exception { @Test public void isStorageCrossClusterMigrationTestStorageTypeEqualsCluster() { - Mockito.doReturn(1L).when(hostMock).getClusterId(); Mockito.doReturn(2L).when(storagePoolVoMock).getClusterId(); Mockito.doReturn(ScopeType.CLUSTER).when(storagePoolVoMock).getScope(); - boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(1L, storagePoolVoMock); Assert.assertTrue(returnedValue); } @Test public void isStorageCrossClusterMigrationTestStorageSameCluster() { - Mockito.doReturn(1L).when(hostMock).getClusterId(); Mockito.doReturn(1L).when(storagePoolVoMock).getClusterId(); Mockito.doReturn(ScopeType.CLUSTER).when(storagePoolVoMock).getScope(); - boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(1L, storagePoolVoMock); assertFalse(returnedValue); } @Test public void isStorageCrossClusterMigrationTestStorageTypeEqualsZone() { - Mockito.doReturn(1L).when(hostMock).getClusterId(); Mockito.doReturn(2L).when(storagePoolVoMock).getClusterId(); Mockito.doReturn(ScopeType.ZONE).when(storagePoolVoMock).getScope(); - boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + boolean returnedValue = virtualMachineManagerImpl.isStorageCrossClusterMigration(1L, storagePoolVoMock); assertFalse(returnedValue); } @@ -384,7 +385,7 @@ public void getCandidateStoragePoolsToMigrateLocalVolumeTestLocalVolume() { Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class), Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL)); - List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); Assert.assertEquals(1, poolList.size()); Assert.assertEquals(storagePoolVoMock, poolList.get(0)); @@ -402,8 +403,8 @@ public void getCandidateStoragePoolsToMigrateLocalVolumeTestCrossClusterMigratio Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class), Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL)); - Mockito.doReturn(true).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(true).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); + List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); Assert.assertEquals(1, poolList.size()); Assert.assertEquals(storagePoolVoMock, poolList.get(0)); @@ -421,8 +422,8 @@ public void getCandidateStoragePoolsToMigrateLocalVolumeTestWithinClusterMigrati Mockito.doReturn(poolListMock).when(storagePoolAllocatorMock).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class), Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL)); - Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); + List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); Assert.assertTrue(poolList.isEmpty()); } @@ -455,8 +456,8 @@ public void getCandidateStoragePoolsToMigrateLocalVolumeTestMoreThanOneAllocator Mockito.doReturn(new ArrayList<>()).when(storagePoolAllocatorMock3).allocateToPool(Mockito.any(DiskProfile.class), Mockito.any(VirtualMachineProfile.class), Mockito.any(DeploymentPlan.class), Mockito.any(ExcludeList.class), Mockito.eq(StoragePoolAllocator.RETURN_UPTO_ALL)); - Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); + List poolList = virtualMachineManagerImpl.getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); Assert.assertTrue(poolList.isEmpty()); @@ -470,9 +471,9 @@ public void getCandidateStoragePoolsToMigrateLocalVolumeTestMoreThanOneAllocator @Test(expected = CloudRuntimeException.class) public void createVolumeToStoragePoolMappingIfPossibleTestNotStoragePoolsAvailable() { - Mockito.doReturn(null).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(null).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); - virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, new HashMap<>(), volumeVoMock, storagePoolVoMock); + virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, new HashMap<>(), volumeVoMock, storagePoolVoMock); } @Test @@ -480,10 +481,10 @@ public void createVolumeToStoragePoolMappingIfPossibleTestTargetHostAccessCurren List storagePoolList = new ArrayList<>(); storagePoolList.add(storagePoolVoMock); - Mockito.doReturn(storagePoolList).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(storagePoolList).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); HashMap volumeToPoolObjectMap = new HashMap<>(); - virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); Assert.assertTrue(volumeToPoolObjectMap.isEmpty()); } @@ -498,10 +499,10 @@ public void createVolumeToStoragePoolMappingIfPossibleTestTargetHostDoesNotAcces List storagePoolList = new ArrayList<>(); storagePoolList.add(storagePoolMockOther); - Mockito.doReturn(storagePoolList).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, hostMock, volumeVoMock); + Mockito.doReturn(storagePoolList).when(virtualMachineManagerImpl).getCandidateStoragePoolsToMigrateLocalVolume(virtualMachineProfileMock, dataCenterDeploymentMock, volumeVoMock); HashMap volumeToPoolObjectMap = new HashMap<>(); - virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + virtualMachineManagerImpl.createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); assertFalse(volumeToPoolObjectMap.isEmpty()); Assert.assertEquals(storagePoolMockOther, volumeToPoolObjectMap.get(volumeVoMock)); @@ -516,14 +517,14 @@ public void createStoragePoolMappingsForVolumesTestLocalStoragevolume() { Mockito.doReturn(ScopeType.HOST).when(storagePoolVoMock).getScope(); Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, + Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); - virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes); + virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, allVolumes); Assert.assertTrue(volumeToPoolObjectMap.isEmpty()); Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.verify(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + Mockito.verify(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); } @Test @@ -535,15 +536,15 @@ public void createStoragePoolMappingsForVolumesTestCrossCluterMigration() { Mockito.doReturn(ScopeType.CLUSTER).when(storagePoolVoMock).getScope(); Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); - Mockito.doReturn(true).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + Mockito.doReturn(true).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); - virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes); + virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, allVolumes); Assert.assertTrue(volumeToPoolObjectMap.isEmpty()); Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.verify(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); - Mockito.verify(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + Mockito.verify(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + Mockito.verify(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); } @Test @@ -555,17 +556,17 @@ public void createStoragePoolMappingsForVolumesTestNotCrossCluterMigrationWithCl Mockito.doReturn(ScopeType.CLUSTER).when(storagePoolVoMock).getScope(); Mockito.doNothing().when(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); - Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); + Mockito.doNothing().when(virtualMachineManagerImpl).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); + Mockito.doReturn(false).when(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); - virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, allVolumes); + virtualMachineManagerImpl.createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, allVolumes); assertFalse(volumeToPoolObjectMap.isEmpty()); Assert.assertEquals(storagePoolVoMock, volumeToPoolObjectMap.get(volumeVoMock)); Mockito.verify(virtualMachineManagerImpl).executeManagedStorageChecksWhenTargetStoragePoolNotProvided(hostMock, storagePoolVoMock, volumeVoMock); - Mockito.verify(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMock, storagePoolVoMock); - Mockito.verify(virtualMachineManagerImpl, Mockito.times(0)).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumeVoMock, + Mockito.verify(virtualMachineManagerImpl).isStorageCrossClusterMigration(hostMockId, storagePoolVoMock); + Mockito.verify(virtualMachineManagerImpl, Mockito.times(0)).createVolumeToStoragePoolMappingIfPossible(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumeVoMock, storagePoolVoMock); } @@ -578,7 +579,7 @@ public void createMappingVolumeAndStoragePoolTest() { Mockito.anyMapOf(Long.class, Long.class)); Mockito.doReturn(volumesNotMapped).when(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap); - Mockito.doNothing().when(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped); + Mockito.doNothing().when(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumesNotMapped); Map mappingVolumeAndStoragePool = virtualMachineManagerImpl.createMappingVolumeAndStoragePool(virtualMachineProfileMock, hostMock, new HashMap<>()); @@ -587,7 +588,7 @@ public void createMappingVolumeAndStoragePoolTest() { InOrder inOrder = Mockito.inOrder(virtualMachineManagerImpl); inOrder.verify(virtualMachineManagerImpl).buildMapUsingUserInformation(Mockito.eq(virtualMachineProfileMock), Mockito.eq(hostMock), Mockito.anyMapOf(Long.class, Long.class)); inOrder.verify(virtualMachineManagerImpl).findVolumesThatWereNotMappedByTheUser(virtualMachineProfileMock, volumeToPoolObjectMap); - inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, hostMock, volumeToPoolObjectMap, volumesNotMapped); + inOrder.verify(virtualMachineManagerImpl).createStoragePoolMappingsForVolumes(virtualMachineProfileMock, dataCenterDeploymentMock, volumeToPoolObjectMap, volumesNotMapped); } @Test diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 740b66844b54..032323f55504 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.hypervisor.guru; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.util.ArrayList; import java.util.Date; import java.util.HashMap; @@ -42,6 +44,7 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; @@ -62,6 +65,7 @@ import com.cloud.agent.api.to.DataStoreTO; import com.cloud.agent.api.to.DataTO; import com.cloud.agent.api.to.DiskTO; +import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; import com.cloud.cluster.ClusterManager; @@ -149,8 +153,6 @@ import com.vmware.vim25.VirtualMachineConfigSummary; import com.vmware.vim25.VirtualMachineRuntimeInfo; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - public class VMwareGuru extends HypervisorGuruBase implements HypervisorGuru, Configurable { private static final Logger s_logger = Logger.getLogger(VMwareGuru.class); @@ -209,8 +211,8 @@ protected VMwareGuru() { return vmwareVmImplementer.implement(vm, toVirtualMachineTO(vm), getClusterId(vm.getId())); } - long getClusterId(long vmId) { - long clusterId; + Long getClusterId(long vmId) { + Long clusterId = null; Long hostId; hostId = _vmDao.findById(vmId).getHostId(); @@ -218,7 +220,20 @@ long getClusterId(long vmId) { // If VM is in stopped state then hostId would be undefined. Hence read last host's Id instead. hostId = _vmDao.findById(vmId).getLastHostId(); } - clusterId = _hostDao.findById(hostId).getClusterId(); + if (hostId != null) { + clusterId = _hostDao.findById(hostId).getClusterId(); + } else { + List volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT); + if (CollectionUtils.isNotEmpty(volumes)) { + VolumeVO rootVolume = volumes.get(0); + if (rootVolume.getPoolId() != null) { + StoragePoolVO pool = _storagePoolDao.findById(rootVolume.getPoolId()); + if (pool != null && pool.getClusterId() != null) { + clusterId = pool.getClusterId(); + } + } + } + } return clusterId; } @@ -418,9 +433,11 @@ private static String resolveNameInGuid(String guid) { @Override public Map getClusterSettings(long vmId) { Map details = new HashMap(); - long clusterId = getClusterId(vmId); - details.put(VmwareReserveCpu.key(), VmwareReserveCpu.valueIn(clusterId).toString()); - details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString()); + Long clusterId = getClusterId(vmId); + if (clusterId != null) { + details.put(VmwareReserveCpu.key(), VmwareReserveCpu.valueIn(clusterId).toString()); + details.put(VmwareReserveMemory.key(), VmwareReserveMemory.valueIn(clusterId).toString()); + } return details; } @@ -1056,24 +1073,54 @@ private VirtualDisk getAttachedDisk(VirtualMachineMO vmMo, String diskPath) thro return null; } - @Override public List finalizeMigrate(VirtualMachine vm, StoragePool destination) { + @Override public List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool) { List commands = new ArrayList(); // OfflineVmwareMigration: specialised migration command - List volumes = _volumeDao.findByInstance(vm.getId()); List vols = new ArrayList<>(); - for (Volume volume : volumes) { - VolumeTO vol = new VolumeTO(volume, destination); - vols.add(vol); + List> volumeToFilerto = new ArrayList>(); + Long poolClusterId = null; + Host clusterHost = null; + if (destination != null) { + List volumes = _volumeDao.findByInstance(vm.getId()); + for (Volume volume : volumes) { + VolumeTO vol = new VolumeTO(volume, destination); + vols.add(vol); + } + } else if (MapUtils.isNotEmpty(volumeToPool)) { + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = entry.getKey(); + StoragePool pool = entry.getValue(); + VolumeTO volumeTo = new VolumeTO(volume, _storagePoolDao.findById(pool.getId())); + StorageFilerTO filerTo = new StorageFilerTO(pool); + if (pool.getClusterId() != null) { + poolClusterId = pool.getClusterId(); + } + volumeToFilerto.add(new Pair(volumeTo, filerTo)); + vols.add(volumeTo); + } + } + final Long destClusterId = destination != null ? destination.getClusterId() : poolClusterId; + final Long srcClusterId = getClusterId(vm.getId()); + final boolean isInterClusterMigration = srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId); + if (isInterClusterMigration) { + // Without host vMotion might fail between non-shared storages with error similar to, + // https://kb.vmware.com/s/article/1003795 + // As this is offline migration VM won't be started on this host + List hosts = _hostDao.findHypervisorHostInCluster(destClusterId); + if (CollectionUtils.isNotEmpty(hosts)) { + clusterHost = hosts.get(0); + } + if (clusterHost == null) { + throw new CloudRuntimeException("Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different clusters without shared storages"); + } } - MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, destination.getUuid(), true); + MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, + destination == null ? null : destination.getUuid(), volumeToFilerto, clusterHost == null ? null : clusterHost.getGuid(), true); commands.add(migrateVmToPoolCommand); // OfflineVmwareMigration: cleanup if needed - final Long destClusterId = destination.getClusterId(); - final Long srcClusterId = getClusterId(vm.getId()); - - if (srcClusterId != null && destClusterId != null && !srcClusterId.equals(destClusterId)) { + if (isInterClusterMigration) { final String srcDcName = _clusterDetailsDao.getVmwareDcName(srcClusterId); final String destDcName = _clusterDetailsDao.getVmwareDcName(destClusterId); if (srcDcName != null && destDcName != null && !srcDcName.equals(destDcName)) { diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index c79c023dac72..9b4c20d10e6b 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -16,6 +16,9 @@ // under the License. package com.cloud.hypervisor.vmware.resource; +import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.io.File; import java.io.IOException; import java.io.UnsupportedEncodingException; @@ -55,6 +58,7 @@ import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.cloudstack.vm.UnmanagedInstanceTO; import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.math.NumberUtils; import org.apache.log4j.Logger; @@ -318,8 +322,8 @@ import com.vmware.vim25.VirtualEthernetCardNetworkBackingInfo; import com.vmware.vim25.VirtualEthernetCardOpaqueNetworkBackingInfo; import com.vmware.vim25.VirtualIDEController; -import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineBootOptions; +import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineFileLayoutEx; import com.vmware.vim25.VirtualMachineFileLayoutExFileInfo; @@ -341,9 +345,6 @@ import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; -import static com.cloud.utils.HumanReadableJson.getHumanReadableBytesJson; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); public static final String VMDK_EXTENSION = ".vmdk"; @@ -4117,7 +4118,7 @@ protected Answer execute(PrepareForMigrationCommand cmd) { protected Answer execute(MigrateVmToPoolCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("excuting MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool())); + s_logger.info(String.format("Executing MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool())); if (s_logger.isDebugEnabled()) { s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd)); } @@ -4129,13 +4130,17 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { try { VirtualMachineMO vmMo = getVirtualMachineMO(vmName, hyperHost); if (vmMo == null) { - String msg = "VM " + vmName + " does not exist in VMware datacenter"; - s_logger.error(msg); - throw new CloudRuntimeException(msg); + s_logger.info("VM " + vmName + " was not found in the cluster of host " + hyperHost.getHyperHostName() + ". Looking for the VM in datacenter."); + ManagedObjectReference dcMor = hyperHost.getHyperHostDatacenter(); + DatacenterMO dcMo = new DatacenterMO(hyperHost.getContext(), dcMor); + vmMo = dcMo.findVm(vmName); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist in VMware datacenter"; + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } } - - String poolUuid = cmd.getDestinationPool(); - return migrateAndAnswer(vmMo, poolUuid, hyperHost, cmd); + return migrateAndAnswer(vmMo, cmd.getDestinationPool(), hyperHost, cmd); } catch (Throwable e) { // hopefully only CloudRuntimeException :/ if (e instanceof Exception) { return new Answer(cmd, (Exception) e); @@ -4149,37 +4154,46 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { } private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception { - ManagedObjectReference morDs = getTargetDatastoreMOReference(poolUuid, hyperHost); + String targetClusterHostName = null; + + VmwareHypervisorHost targetClusterHost = null; + List> volToFiler = new ArrayList<>(); + if (cmd instanceof MigrateVmToPoolCommand) { + MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd; + targetClusterHostName = mcmd.getTargetClusterHost(); + volToFiler = mcmd.getVolumeToFilerAsList(); + } else if (cmd instanceof MigrateVolumeCommand) { + targetClusterHostName = ((MigrateVolumeCommand)cmd).getTargetClusterHost(); + } + if (StringUtils.isNotBlank(targetClusterHostName)) { + String targetClusterHostMorInfo = targetClusterHostName.split("@")[0]; + ManagedObjectReference morTgtClusterHost = new ManagedObjectReference(); + morTgtClusterHost.setType(targetClusterHostMorInfo.split(":")[0]); + morTgtClusterHost.setValue(targetClusterHostMorInfo.split(":")[1]); + targetClusterHost = new HostMO(getServiceContext(), morTgtClusterHost); + } try { // OfflineVmwareMigration: getVolumesFromCommand(cmd); - Map volumeDeviceKey = getVolumesFromCommand(vmMo, cmd); - if (s_logger.isTraceEnabled()) { - for (Integer diskId : volumeDeviceKey.keySet()) { - s_logger.trace(String.format("disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); - } - } - if (vmMo.changeDatastore(morDs)) { - // OfflineVmwareMigration: create target specification to include in answer - // Consolidate VM disks after successful VM migration - // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. - if (!vmMo.consolidateVmDisks()) { - s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); - } else { - s_logger.debug("Successfully consolidated disks of VM " + vmMo.getVmName() + "."); + Map volumeDeviceKey = new HashMap<>(); + if (CollectionUtils.isEmpty(volToFiler)) { // Else device keys will be found in relocateVirtualMachine + volumeDeviceKey = getVolumesFromCommand(vmMo, cmd); + if (s_logger.isTraceEnabled()) { + for (Integer diskId: volumeDeviceKey.keySet()) { + s_logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); + } } - return createAnswerForCmd(vmMo, poolUuid, cmd, volumeDeviceKey); - } else { - return new Answer(cmd, false, "failed to changes data store for VM" + vmMo.getVmName()); } + List volumeToList = relocateVirtualMachine(hyperHost, vmMo.getName(), null, null, targetClusterHost, poolUuid, volToFiler); + return createAnswerForCmd(vmMo, poolUuid, volumeToList, cmd, volumeDeviceKey); } catch (Exception e) { - String msg = "change data store for VM " + vmMo.getVmName() + " failed"; + String msg = "Change data store for VM " + vmMo.getVmName() + " failed"; s_logger.error(msg + ": " + e.getLocalizedMessage()); throw new CloudRuntimeException(msg, e); } } - Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, Map volumeDeviceKey) throws Exception { + Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, List volumeObjectToList, Command cmd, Map volumeDeviceKey) throws Exception { List volumeToList = new ArrayList<>(); VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); VirtualDisk[] disks = vmMo.getAllDiskDevice(); @@ -4194,16 +4208,22 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, Command cmd, M } throw new CloudRuntimeException("not expecting more then one disk after migrate volume command"); } else if (cmd instanceof MigrateVmToPoolCommand) { - for (VirtualDisk disk : disks) { - VolumeObjectTO newVol = new VolumeObjectTO(); - String newPath = vmMo.getVmdkFileBaseName(disk); - VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolUuid); - newVol.setId(volumeDeviceKey.get(disk.getKey())); - newVol.setPath(newPath); - newVol.setChainInfo(_gson.toJson(diskInfo)); - volumeToList.add(newVol); + if (poolUuid != null) { + for (VirtualDisk disk : disks) { + VolumeObjectTO newVol = new VolumeObjectTO(); + String newPath = vmMo.getVmdkFileBaseName(disk); + String poolName = poolUuid.replace("-", ""); + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName); + newVol.setId(volumeDeviceKey.get(disk.getKey())); + newVol.setPath(newPath); + newVol.setChainInfo(_gson.toJson(diskInfo)); + newVol.setDataStoreUuid(poolUuid); + volumeToList.add(newVol); + } + } else if (CollectionUtils.isNotEmpty(volumeObjectToList)) { + volumeToList = volumeObjectToList; } - return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand) cmd, volumeToList); + return new MigrateVmToPoolAnswer((MigrateVmToPoolCommand)cmd, volumeToList); } return new Answer(cmd, false, null); } @@ -7185,4 +7205,291 @@ private Answer execute(PrepareUnmanageVMInstanceCommand cmd) { return new PrepareUnmanageVMInstanceAnswer(cmd, true, "OK"); } + + /* + * Method to relocate a virtual machine. This migrates VM and its volumes to given host, datastores. + * It is used for MigrateVolumeCommand (detached volume case), MigrateVmToPoolCommand and MigrateVmWithStorageCommand. + */ + + private List relocateVirtualMachine(final VmwareHypervisorHost hypervisorHost, + final String name, final VirtualMachineTO vmTo, + final String targetHost, final VmwareHypervisorHost targetClusterHost, + final String poolUuid, final List> volToFiler) throws Exception { + String vmName = name; + if (vmName == null && vmTo != null) { + vmName = vmTo.getName(); + } + VmwareHypervisorHost sourceHyperHost = hypervisorHost; + VmwareHypervisorHost targetHyperHost = targetClusterHost; + VirtualMachineMO vmMo = null; + ManagedObjectReference morSourceHostDc = null; + ManagedObjectReference morTargetHostDc = null; + ManagedObjectReference morTargetHost = new ManagedObjectReference(); + VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); + List diskLocators = new ArrayList(); + Set mountedDatastoresAtSource = new HashSet(); + List volumeToList = new ArrayList<>(); + Map volumeDeviceKey = new HashMap(); + if (StringUtils.isNotBlank(targetHost)) { + String targetHostMorInfo = targetHost.split("@")[0]; + morTargetHost.setType(targetHostMorInfo.split(":")[0]); + morTargetHost.setValue(targetHostMorInfo.split(":")[1]); + } + + try { + if (sourceHyperHost == null) { + sourceHyperHost = getHyperHost(getServiceContext()); + } + if (targetHyperHost == null && StringUtils.isNotBlank(targetHost)) { + targetHyperHost = new HostMO(getServiceContext(), morTargetHost); + } + morSourceHostDc = sourceHyperHost.getHyperHostDatacenter(); + DatacenterMO dcMo = new DatacenterMO(sourceHyperHost.getContext(), morSourceHostDc); + if (targetHyperHost != null) { + morTargetHostDc = targetHyperHost.getHyperHostDatacenter(); + if (!morSourceHostDc.getValue().equalsIgnoreCase(morTargetHostDc.getValue())) { + String msg = "VM " + vmName + " cannot be migrated between different datacenter"; + throw new CloudRuntimeException(msg); + } + } + + // find VM through source host (VM is not at the target host yet) + vmMo = sourceHyperHost.findVmOnHyperHost(vmName); + if (vmMo == null) { + String msg = "VM " + vmName + " does not exist on host: " + sourceHyperHost.getHyperHostName(); + s_logger.warn(msg); + // find VM through source host (VM is not at the target host yet) + vmMo = dcMo.findVm(vmName); + if (vmMo == null) { + msg = "VM " + vmName + " does not exist on datacenter: " + dcMo.getName(); + s_logger.error(msg); + throw new Exception(msg); + } + // VM host has changed + sourceHyperHost = vmMo.getRunningHost(); + } + + vmName = vmMo.getName(); + String srcHostApiVersion = ((HostMO)sourceHyperHost).getHostAboutInfo().getApiVersion(); + + if (StringUtils.isNotBlank(poolUuid)) { + String dsName = poolUuid.replace("-", ""); + ManagedObjectReference morDatastore = null; + String msg; + if (targetClusterHost != null) { + morDatastore = dcMo.findDatastore(dsName); + } else { + morDatastore = getTargetDatastoreMOReference(dsName, sourceHyperHost); + } + if (morDatastore == null) { + msg = "Unable to find the target datastore: " + dsName + + (targetClusterHost != null ? "in datacenter: " + dcMo.getName() : " on host: " + sourceHyperHost.getHyperHostName()) + + " to execute migration"; + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + relocateSpec.setDatastore(morDatastore); + } else if (CollectionUtils.isNotEmpty(volToFiler)) { + // Specify destination datastore location for each volume + for (Pair entry : volToFiler) { + VolumeTO volume = entry.first(); + StorageFilerTO filerTo = entry.second(); + if (s_logger.isDebugEnabled()) { + s_logger.debug(String.format("Preparing spec for volume: %s to migrate it to datastore: %s", volume.getName(), filerTo.getUuid())); + } + String dsName = filerTo.getUuid().replace("-", ""); + ManagedObjectReference morVolumeDatastore = dcMo.findDatastore(dsName); + if (morVolumeDatastore == null) { + String msg = "Unable to find the target datastore: " + dsName + " in datacenter: " + dcMo.getName() + " to execute migration"; + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + + String mountedDs = getMountedDatastoreName(sourceHyperHost, srcHostApiVersion, filerTo); + if (mountedDs != null) { + mountedDatastoresAtSource.add(mountedDs); + } + + if (volume.getType() == Volume.Type.ROOT) { + relocateSpec.setDatastore(morVolumeDatastore); + } + VirtualMachineRelocateSpecDiskLocator diskLocator = new VirtualMachineRelocateSpecDiskLocator(); + diskLocator.setDatastore(morVolumeDatastore); + Pair diskInfo = getVirtualDiskInfo(vmMo, volume.getPath() + VMDK_EXTENSION); + String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); + if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { + vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); + } + int diskId = diskInfo.first().getKey(); + diskLocator.setDiskId(diskId); + + diskLocators.add(diskLocator); + volumeDeviceKey.put(volume.getId(), diskId); + } + // If a target datastore is provided for the VM, then by default all volumes associated with the VM will be migrated to that target datastore. + // Hence set the existing datastore as target datastore for volumes that are not to be migrated. + List> diskDatastores = vmMo.getAllDiskDatastores(); + for (Pair diskDatastore : diskDatastores) { + if (!volumeDeviceKey.containsValue(diskDatastore.first().intValue())) { + VirtualMachineRelocateSpecDiskLocator diskLocator = new VirtualMachineRelocateSpecDiskLocator(); + diskLocator.setDiskId(diskDatastore.first().intValue()); + diskLocator.setDatastore(diskDatastore.second()); + diskLocators.add(diskLocator); + } + } + + relocateSpec.getDisk().addAll(diskLocators); + } + + // Specific section for MigrateVmWithStorageCommand + if (vmTo != null) { + // Prepare network at target before migration + NicTO[] nics = vmTo.getNics(); + for (NicTO nic : nics) { + // prepare network on the host + prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTargetHost), nic, false, vmTo.getType()); + } + // Ensure secondary storage mounted on target host + VmwareManager mgr = targetHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); + Pair secStoreUrlAndId = mgr.getSecondaryStorageStoreUrlAndId(Long.parseLong(_dcId)); + String secStoreUrl = secStoreUrlAndId.first(); + Long secStoreId = secStoreUrlAndId.second(); + if (secStoreUrl == null) { + String msg = "secondary storage for dc " + _dcId + " is not ready yet?"; + throw new Exception(msg); + } + mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId); + ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnSpecificHost(secStoreUrl, targetHyperHost); + if (morSecDs == null) { + String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; + throw new Exception(msg); + } + } + + if (srcHostApiVersion.compareTo("5.1") < 0) { + // Migrate VM's volumes to target datastore(s). + if (!vmMo.changeDatastore(relocateSpec)) { + throw new Exception("Change datastore operation failed during storage migration"); + } else { + s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)"); + } + // Migrate VM to target host. + if (targetHyperHost != null) { + ManagedObjectReference morPool = targetHyperHost.getHyperHostOwnerResourcePool(); + if (!vmMo.migrate(morPool, targetHyperHost.getMor())) { + throw new Exception("VM migration to target host failed during storage migration"); + } else { + s_logger.debug("Successfully migrated VM " + vmName + " from " + sourceHyperHost.getHyperHostName() + " to " + targetHyperHost.getHyperHostName()); + } + } + } else { + // Add target host to relocate spec + if (targetHyperHost != null) { + relocateSpec.setHost(targetHyperHost.getMor()); + relocateSpec.setPool(targetHyperHost.getHyperHostOwnerResourcePool()); + } + if (!vmMo.changeDatastore(relocateSpec)) { + throw new Exception("Change datastore operation failed during storage migration"); + } else { + s_logger.debug("Successfully migrated VM " + vmName + + (targetClusterHost != null ? " from " + sourceHyperHost.getHyperHostName() + " to " + targetHyperHost.getHyperHostName() + " and " : " with ") + + "its storage to target datastore(s)"); + } + } + + // Consolidate VM disks. + // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. + if (!vmMo.consolidateVmDisks()) { + s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); + } else { + s_logger.debug("Successfully consolidated disks of VM " + vmName + "."); + } + + if (MapUtils.isNotEmpty(volumeDeviceKey)) { + // Update and return volume path and chain info for every disk because that could have changed after migration + VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); + for (Pair entry : volToFiler) { + final VolumeTO volume = entry.first(); + final long volumeId = volume.getId(); + VirtualDisk[] disks = vmMo.getAllDiskDevice(); + for (VirtualDisk disk : disks) { + if (volumeDeviceKey.get(volumeId) == disk.getKey()) { + VolumeObjectTO newVol = new VolumeObjectTO(); + newVol.setDataStoreUuid(entry.second().getUuid()); + String newPath = vmMo.getVmdkFileBaseName(disk); + String poolName = entry.second().getUuid().replace("-", ""); + VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName); + newVol.setId(volumeId); + newVol.setPath(newPath); + newVol.setChainInfo(_gson.toJson(diskInfo)); + volumeToList.add(newVol); + break; + } + } + } + } + } catch (Throwable e) { + if (e instanceof RemoteException) { + s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); + invalidateServiceContext(); + } + throw e; + } finally { + // Cleanup datastores mounted on source host + for (String mountedDatastore : mountedDatastoresAtSource) { + s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); + try { + sourceHyperHost.unmountDatastore(mountedDatastore); + } catch (Exception unmountEx) { + s_logger.warn("Failed to unmount datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName() + ". Seems the datastore is still being used by " + sourceHyperHost.getHyperHostName() + + ". Please unmount manually to cleanup."); + } + s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + sourceHyperHost.getHyperHostName()); + } + } + + // Only when volToFiler is not empty a filled list of VolumeObjectTO is returned else it will be empty + return volumeToList; + } + + private String getMountedDatastoreName(VmwareHypervisorHost sourceHyperHost, String sourceHostApiVersion, StorageFilerTO filerTo) throws Exception { + String mountedDatastoreName = null; + // If host version is below 5.1 then simultaneous change of VM's datastore and host is not supported. + // So since only the datastore will be changed first, ensure the target datastore is mounted on source host. + if (sourceHostApiVersion.compareTo("5.1") < 0) { + s_logger.debug(String.format("Host: %s version is %s, vMotion without shared storage cannot be done. Check source host has target datastore mounted or can be mounted", sourceHyperHost.getHyperHostName(), sourceHostApiVersion)); + ManagedObjectReference morVolumeDatastoreAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(sourceHyperHost, filerTo.getUuid()); + String volumeDatastoreName = filerTo.getUuid().replace("-", ""); + String volumeDatastoreHost = filerTo.getHost(); + String volumeDatastorePath = filerTo.getPath(); + int volumeDatastorePort = filerTo.getPort(); + + // If datastore is NFS and target datastore is not already mounted on source host then mount the datastore. + if (filerTo.getType().equals(StoragePoolType.NetworkFilesystem)) { + if (morVolumeDatastoreAtSource == null) { + morVolumeDatastoreAtSource = sourceHyperHost.mountDatastore(false, volumeDatastoreHost, volumeDatastorePort, volumeDatastorePath, volumeDatastoreName); + if (morVolumeDatastoreAtSource == null) { + throw new Exception("Unable to mount NFS datastore " + volumeDatastoreHost + ":/" + volumeDatastorePath + " on host: " + sourceHyperHost.getHyperHostName()); + } + mountedDatastoreName = volumeDatastoreName; + s_logger.debug("Mounted NFS datastore " + volumeDatastoreHost + ":/" + volumeDatastorePath + " on host: " + sourceHyperHost.getHyperHostName()); + } + } + + // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. + if (filerTo.getType().equals(StoragePoolType.VMFS)) { + if (morVolumeDatastoreAtSource == null) { + s_logger.warn("Host: " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be manually mounted on host for successful storage migration."); + throw new Exception("Target VMFS datastore: " + volumeDatastorePath + " is not mounted on host: " + sourceHyperHost.getHyperHostName()); + } + DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morVolumeDatastoreAtSource); + String srcHostValue = sourceHyperHost.getMor().getValue(); + if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { + s_logger.warn("Host " + sourceHyperHost.getHyperHostName() + " version is below 5.1, target VMFS datastore(s) need to be accessible to host for a successful storage migration."); + throw new Exception("Target VMFS datastore: " + volumeDatastorePath + " is not accessible on host: " + sourceHyperHost.getHyperHostName()); + } + } + } + return mountedDatastoreName; + } } diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 2463e75c01d3..806376c75b85 100644 --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -26,6 +26,20 @@ import javax.inject.Inject; +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; +import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; + import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.MigrateWithStorageAnswer; @@ -53,18 +67,6 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.vm.VMInstanceVO; import com.cloud.vm.dao.VMInstanceDao; -import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; -import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; -import org.apache.cloudstack.engine.subsystem.api.storage.StrategyPriority; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; -import org.apache.cloudstack.framework.async.AsyncCompletionCallback; -import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; -import org.apache.cloudstack.storage.to.VolumeObjectTO; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; @Component public class VmwareStorageMotionStrategy implements DataMotionStrategy { @@ -88,9 +90,7 @@ public StrategyPriority canHandle(DataObject srcData, DataObject destData) { if (isOnVmware(srcData, destData) && isOnPrimary(srcData, destData) && isVolumesOnly(srcData, destData) - && isDettached(srcData) - && isIntraCluster(srcData, destData) - && isStoreScopeEqual(srcData, destData)) { + && isDettached(srcData)) { if (s_logger.isDebugEnabled()) { String msg = String.format("%s can handle the request because %d(%s) and %d(%s) share the VMware cluster %s (== %s)" , this.getClass() @@ -185,20 +185,41 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As throw new UnsupportedOperationException(); } StoragePool sourcePool = (StoragePool) srcData.getDataStore(); + ScopeType sourceScopeType = srcData.getDataStore().getScope().getScopeType(); StoragePool targetPool = (StoragePool) destData.getDataStore(); + ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType(); + Long hostId = null; + String targetClusterHostGuid = null; + // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM + if (ScopeType.CLUSTER.equals(sourceScopeType) && ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { + // Without host vMotion might fail between non-shared storages with error similar to, + // https://kb.vmware.com/s/article/1003795 + List hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId()); + if (CollectionUtils.isNotEmpty(hosts)) { + targetClusterHostGuid = hosts.get(0).getGuid(); + } + if (targetClusterHostGuid == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages"); + } + } else if (ScopeType.CLUSTER.equals(sourceScopeType)) { + hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); + if (hostId == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName()); + } + } else if (ScopeType.CLUSTER.equals(targetScopeType)) { + hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId()); + if (hostId == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + targetPool.getName()); + } + } MigrateVolumeCommand cmd = new MigrateVolumeCommand(srcData.getId() , srcData.getTO().getPath() , sourcePool - , targetPool); + , targetPool + , targetClusterHostGuid); // OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding Answer answer; - ScopeType scopeType = srcData.getDataStore().getScope().getScopeType(); - if (ScopeType.CLUSTER == scopeType) { - // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM - Long hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); - if (hostId == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in cluster: " + sourcePool.getName()); - } + if (hostId != null) { answer = agentMgr.easySend(hostId, cmd); } else { answer = agentMgr.sendTo(sourcePool.getDataCenterId(), HypervisorType.VMware, cmd); diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index 2ae35fc08ad7..ce48d8519b81 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -47,6 +47,7 @@ import com.cloud.service.dao.ServiceOfferingDao; import com.cloud.service.dao.ServiceOfferingDetailsDao; import com.cloud.storage.StoragePool; +import com.cloud.storage.Volume; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.component.AdapterBase; @@ -297,7 +298,7 @@ public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location return false; } - public List finalizeMigrate(VirtualMachine vm, StoragePool destination) { + public List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool) { return null; } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 80b54c09dfa2..98524b6ea692 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1279,7 +1279,7 @@ public Ternary, Integer>, List, Map, Integer>, List, Map hypervisorCapabilitiesList = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); + if (hypervisorCapabilitiesList != null) { + for (HypervisorCapabilitiesVO hypervisorCapabilities : hypervisorCapabilitiesList) { + if (hypervisorCapabilities.isStorageMotionSupported()) { + hostCapabilities = hypervisorCapabilities; + break; + } + } + } + } + hostSupportsStorageMigration = hostCapabilities != null && hostCapabilities.isStorageMotionSupported(); + } + if (hostSupportsStorageMigration && hasSuitablePoolsForVolume(volume, host, vmProfile)) { requiresStorageMotion.put(host, true); } else { iterator.remove(); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index f99a0663e298..e21f2c5a6d7a 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.vm; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.io.IOException; import java.io.StringReader; import java.io.UnsupportedEncodingException; @@ -47,8 +49,6 @@ import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; -import com.cloud.exception.UnsupportedServiceException; -import com.cloud.hypervisor.Hypervisor; import org.apache.cloudstack.acl.ControlledEntity.ACLType; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.affinity.AffinityGroupService; @@ -188,6 +188,7 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.StorageUnavailableException; +import com.cloud.exception.UnsupportedServiceException; import com.cloud.exception.VirtualMachineMigrationException; import com.cloud.gpu.GPU; import com.cloud.ha.HighAvailabilityManager; @@ -195,6 +196,7 @@ import com.cloud.host.HostVO; import com.cloud.host.Status; import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; import com.cloud.hypervisor.HypervisorCapabilitiesVO; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; @@ -328,8 +330,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - public class UserVmManagerImpl extends ManagerBase implements UserVmManager, VirtualMachineGuru, UserVmService, Configurable { private static final Logger s_logger = Logger.getLogger(UserVmManagerImpl.class); @@ -5494,7 +5494,7 @@ public UserVm getUserVm(long vmId) { } @Override - public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { + public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumeToPool) { // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { @@ -5530,14 +5530,34 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { } } + Map volumeToPoolIds = new HashMap<>(); + + if (destPool != null) { + checkDestinationHypervisorType(destPool, vm); + } else if (MapUtils.isNotEmpty(volumeToPool)) { + Long poolClusterId = null; + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = _volsDao.findByUuid(entry.getKey()); + StoragePoolVO pool = _storagePoolDao.findPoolByUUID(entry.getValue()); + if (poolClusterId != null && + !(ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && + !poolClusterId.equals(pool.getClusterId())) { + throw new InvalidParameterValueException("VM's disk cannot be migrated, input destination storage pools belong to different clusters"); + } + if (pool.getClusterId() != null) { + poolClusterId = pool.getClusterId(); + } + checkDestinationHypervisorType(pool, vm); + volumeToPoolIds.put(volume.getId(), pool.getId()); + } + } + // Check that Vm does not have VM Snapshots if (_vmSnapshotDao.findByVm(vmId).size() > 0) { throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); } - checkDestinationHypervisorType(destPool, vm); - - _itMgr.storageMigration(vm.getUuid(), destPool); + _itMgr.storageMigration(vm.getUuid(), destPool, volumeToPoolIds); return _vmDao.findById(vm.getId()); } @@ -5998,30 +6018,38 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio } } - if (!srcHostVersion.equals(destinationHostVersion)) { - throw new CloudRuntimeException("The source and destination hosts are not of the same type and version. Source hypervisor type and version: " + - srcHost.getHypervisorType().toString() + " " + srcHost.getHypervisorVersion() + ", Destination hypervisor type and version: " + - destinationHost.getHypervisorType().toString() + " " + destinationHost.getHypervisorVersion()); - } - - HypervisorCapabilitiesVO capabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion()); - - if (capabilities == null && HypervisorType.KVM.equals(srcHost.getHypervisorType())) { + HypervisorCapabilitiesVO sourceCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(srcHost.getHypervisorType(), srcHost.getHypervisorVersion()); + if (sourceCapabilities == null && HypervisorType.KVM.equals(srcHost.getHypervisorType())) { List lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); - if (lstHypervisorCapabilities != null) { for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) { if (hypervisorCapabilities.isStorageMotionSupported()) { - capabilities = hypervisorCapabilities; - + sourceCapabilities = hypervisorCapabilities; break; } } } } + if (sourceCapabilities == null || !sourceCapabilities.isStorageMotionSupported()) { + throw new CloudRuntimeException("Migration with storage isn't supported for source host ID: " + srcHost.getUuid() + " on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); + } - if (!capabilities.isStorageMotionSupported()) { - throw new CloudRuntimeException("Migration with storage isn't supported on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); + if (!srcHostVersion.equals(destinationHostVersion)) { + HypervisorCapabilitiesVO destinationCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(destinationHost.getHypervisorType(), destinationHostVersion); + if (destinationCapabilities == null && HypervisorType.KVM.equals(destinationHost.getHypervisorType())) { + List lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); + if (lstHypervisorCapabilities != null) { + for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) { + if (hypervisorCapabilities.isStorageMotionSupported()) { + destinationCapabilities = hypervisorCapabilities; + break; + } + } + } + } + if (destinationCapabilities == null || !destinationCapabilities.isStorageMotionSupported()) { + throw new CloudRuntimeException("Migration with storage isn't supported for target host ID: " + srcHost.getUuid() + " on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); + } } // Check if destination host is up. diff --git a/test/integration/component/test_interpod_migration.py b/test/integration/component/test_interpod_migration.py new file mode 100644 index 000000000000..b8b2e9759753 --- /dev/null +++ b/test/integration/component/test_interpod_migration.py @@ -0,0 +1,464 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" BVT tests for Virtual Machine Life Cycle +""" +# Import Local Modules +from marvin.cloudstackTestCase import cloudstackTestCase +from marvin.cloudstackAPI import (attachVolume, + detachVolume, + deleteVolume, + attachIso, + detachIso, + deleteIso, + startVirtualMachine, + stopVirtualMachine, + migrateVirtualMachineWithVolume) +from marvin.lib.utils import (cleanup_resources) +from marvin.lib.base import (Account, + Host, + Pod, + StoragePool, + ServiceOffering, + DiskOffering, + VirtualMachine, + Iso, + Volume) +from marvin.lib.common import (get_domain, + get_zone, + get_template) +from marvin.lib.decoratorGenerators import skipTestIf +from marvin.codes import FAILED, PASS +from nose.plugins.attrib import attr +# Import System modules +import time + +_multiprocess_shared_ = True + + +class TestVMMigration(cloudstackTestCase): + + @classmethod + def setUpClass(cls): + testClient = super(TestVMMigration, cls).getClsTestClient() + cls.apiclient = testClient.getApiClient() + cls.services = testClient.getParsedTestDataConfig() + + # Get Zone, Domain and templates + cls.domain = get_domain(cls.apiclient) + cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) + cls.services['mode'] = cls.zone.networktype + + cls.cleanup = [] + cls.hypervisorNotSupported = False + cls.hypervisor = cls.testClient.getHypervisorInfo() + if cls.hypervisor.lower() not in ['vmware']: + cls.hypervisorNotSupported = True + + if cls.hypervisorNotSupported == False: + cls.pods = Pod.list(cls.apiclient, zoneid=cls.zone.id, listall=True) + if len(cls.pods) < 2: + assert False, "Not enough pods found: %d" % len(cls.pods) + cls.computeOfferingStorageTags = None + cls.diskOfferingStorageTags = None + + for pod in cls.pods: + podStoragePools = StoragePool.list( + cls.apiclient, + scope='CLUSTER', + podid=pod.id) + if len(podStoragePools) < 1: + assert False, "Not enough CLUSTER scope storage pools found for pod: %s" % pod.id + taggedPool = [] + for pool in podStoragePools: + if pool.tags != None and len(pool.tags) > 0: + taggedPool.append(pool) + if len(taggedPool) < 2: + assert False, "No CLUSTER scope, tagged storage pools found for pod: %s" % pod.id + if cls.computeOfferingStorageTags == None: + cls.computeOfferingStorageTags = taggedPool[0].tags + if cls.diskOfferingStorageTags == None: + cls.diskOfferingStorageTags = taggedPool[1].tags + + template = get_template( + cls.apiclient, + cls.zone.id, + cls.services["ostype"]) + if template == FAILED: + assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] + + # Set Zones and disk offerings + cls.services["small"]["zoneid"] = cls.zone.id + cls.services["small"]["template"] = template.id + + cls.services["iso"]["zoneid"] = cls.zone.id + + cls.account = Account.create( + cls.apiclient, + cls.services["account"], + domainid=cls.domain.id) + cls.debug(cls.account.id) + + compute_offering_service = cls.services["service_offerings"]["tiny"].copy() + compute_offering_service["tags"] = cls.computeOfferingStorageTags + cls.service_offering = ServiceOffering.create( + cls.apiclient, + compute_offering_service) + disk_offering_service = cls.services["disk_offering"].copy() + disk_offering_service["disksize"] = 1 + cls.untagged_disk_offering = DiskOffering.create( + cls.apiclient, + disk_offering_service) + disk_offering_service["tags"] = cls.diskOfferingStorageTags + cls.tagged_disk_offering = DiskOffering.create( + cls.apiclient, + disk_offering_service) + cls.hostId = None + host = cls.getOldestHost(cls.pods[0].id, cls.pods[1].id) + if host != None: + cls.hostId = host.id + + cls.cleanup = [ + cls.service_offering, + cls.untagged_disk_offering, + cls.tagged_disk_offering, + cls.account + ] + + @classmethod + def tearDownClass(cls): + try: + cleanup_resources(cls.apiclient, cls.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + def setUp(self): + self.apiclient = self.testClient.getApiClient() + self.dbclient = self.testClient.getDbConnection() + self.virtual_machine = None + if self.hypervisorNotSupported == False: + self.virtual_machine = VirtualMachine.create( + self.apiclient, + self.services["small"], + accountid=self.account.name, + domainid=self.account.domainid, + serviceofferingid=self.service_offering.id, + mode=self.services['mode'], + hostid=self.hostId + ) + + self.cleanup = [] + + def tearDown(self): + try: + if self.virtual_machine != None: + self.virtual_machine.delete(self.apiclient, expunge=True) + # Clean up, terminate the created accounts, domains etc + cleanup_resources(self.apiclient, self.cleanup) + except Exception as e: + raise Exception("Warning: Exception during cleanup : %s" % e) + + return + + @classmethod + def getOldestHost(cls, pod1_id, pod2_id): + selectedHost = None + hosts = Host.list(cls.apiclient, type='Routing', podid=pod1_id) + morehosts = Host.list(cls.apiclient, type='Routing', podid=pod2_id) + if isinstance(morehosts, list) and len(morehosts)>0: + if isinstance(hosts, list) and len(hosts)>0: + hosts.extend(morehosts) + if isinstance(hosts, list) and len(hosts)>0: + selectedHost = hosts[0] + # Very basic way to get lowest version host + for host in hosts: + if int(host.hypervisorversion.replace(".", "")) < int(selectedHost.hypervisorversion.replace(".", "")): + selectedHost = host + return selectedHost + + @skipTestIf("hypervisorNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_01_migrate_running_vm(self): + """Test Running Virtual Machine Migration Without DATA disk or ISO + """ + # Validate the following: + # 1. Start VM if not running + # 2. Migrate VM to a different pod multiple times + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id) + if vmResponse.state != 'Running': + self.startVm(vmResponse.id) + migrationCount = 1 + while migrationCount > 0: + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id, 'Running') + hostId = self.getDifferentPodHost(vmResponse.id, vmResponse.hostid).id + self.debug("#%d migration, current host ID: %s, new host ID: %s" % ((2-migrationCount), vmResponse.hostid, hostId)) + self.migrateVmWithVolume(vmResponse.id, hostId) + migrationCount = migrationCount - 1 + if migrationCount > 0: + time.sleep(self.services["sleep"]) + return + + @skipTestIf("hypervisorNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_02_migrate_running_vm_with_disk_and_iso(self): + """Test Running Virtual Machine Migration With DATA disks or ISO + """ + # Validate the following: + # 1. Start VM if not running + # 2. Add disks and ISO to the VM + # 3. Migrate VM to a different pod multiple times + # 4. Remove disks and ISO from the VM + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id) + if vmResponse.state != 'Running': + self.startVm(vmResponse.id) + vol1 = self.addVolumeToVm(vmResponse.id, self.tagged_disk_offering) + vol2 = self.addVolumeToVm(vmResponse.id, self.untagged_disk_offering) + # self.addIsoToVm(vmResponse.id) + migrationCount = 1 + while migrationCount > 0: + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id, 'Running') + hostId = self.getDifferentPodHost(vmResponse.id, vmResponse.hostid).id + self.debug("#%d migration, current host ID: %s, new host ID: %s" % ((2-migrationCount), vmResponse.hostid, hostId)) + self.migrateVmWithVolume(vmResponse.id, hostId) + migrationCount = migrationCount - 1 + if migrationCount > 0: + time.sleep(self.services["sleep"]) + self.removeVolumeFromVm(vol1.id) + self.removeVolumeFromVm(vol2.id) + # self.removeIsoFromVm(vmResponse.id, vmResponse.isoid) + return + + @skipTestIf("hypervisorNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_03_migrate_stopped_vm(self): + """Test Stopped Virtual Machine Migration Without DATA disk or ISO + """ + # Validate the following: + # 1. Stop VM if not already stopped + # 2. Migrate VM to a different pod multiple times with volume to pool mapping + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id) + if vmResponse.state != 'Stopped': + self.stopVm(vmResponse.id) + migrationCount = 3 + while migrationCount > 0: + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id, 'Stopped') + migrateTo = self.getDifferentPodVolumeStoragePoolMapping(vmResponse.id) + self.debug("#%d migration, mapping: %s" % ((4-migrationCount), migrateTo)) + self.migrateVmWithVolume(vmResponse.id, None, migrateTo) + migrationCount = migrationCount - 1 + if migrationCount > 0: + time.sleep(self.services["sleep"]) + return + + @skipTestIf("hypervisorNotSupported") + @attr(tags=["devcloud", "advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") + def test_04_migrate_stopped_vm_with_disk_and_iso(self): + """Test Stopped Virtual Machine Migration With DATA disk or ISO + """ + # Validate the following: + # 1. Start VM if not running + # 2. Add disks and ISO to the VM + # 3. Stop the VM + # 4. Migrate VM to a different pod multiple times with volume to pool mapping + # 5. Start VM and remove disks and ISO from the VM + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id) + if vmResponse.state != 'Running': + self.startVm(vmResponse.id) + vol1 = self.addVolumeToVm(vmResponse.id, self.tagged_disk_offering) + vol2 = self.addVolumeToVm(vmResponse.id, self.untagged_disk_offering) + # self.addIsoToVm(vmResponse.id) + self.stopVm(vmResponse.id) + migrationCount = 3 + while migrationCount > 0: + vmResponse = self.getVmVerifiedResponse(self.virtual_machine.id, 'Stopped') + migrateTo = self.getDifferentPodVolumeStoragePoolMapping(vmResponse.id) + self.debug("#%d migration, mapping: %s" % ((4-migrationCount), migrateTo)) + self.migrateVmWithVolume(vmResponse.id, None, migrateTo) + migrationCount = migrationCount - 1 + if migrationCount > 0: + time.sleep(self.services["sleep"]) + self.removeVolumeFromVm(vol1.id) + self.removeVolumeFromVm(vol2.id) + # self.removeIsoFromVm(vmResponse.id, vmResponse.isoid) + return + + def startVm(self, vm_id): + startVirtualMachineCmd = startVirtualMachine.startVirtualMachineCmd() + startVirtualMachineCmd.id = vm_id + self.apiclient.startVirtualMachine(startVirtualMachineCmd) + + def stopVm(self, vm_id): + stopVirtualMachineCmd = stopVirtualMachine.stopVirtualMachineCmd() + stopVirtualMachineCmd.id = vm_id + self.apiclient.stopVirtualMachine(stopVirtualMachineCmd) + + def addVolumeToVm(self, vm_id, disk_offering): + volume = Volume.create( + self.apiclient, + self.services["volume"], + zoneid=self.zone.id, + diskofferingid=disk_offering.id, + account=self.account.name, + domainid=self.account.domainid) + cmd = attachVolume.attachVolumeCmd() + cmd.id = volume.id + cmd.virtualmachineid = vm_id + attachedVolume = self.apiclient.attachVolume(cmd) + return attachedVolume + + def removeVolumeFromVm(self, volume_id): + cmd = detachVolume.detachVolumeCmd() + cmd.id = volume_id + detachedVolume = self.apiclient.detachVolume(cmd) + cmd = deleteVolume.deleteVolumeCmd() + cmd.id = volume_id + self.apiclient.deleteVolume(cmd) + return + + def addIsoToVm(self, vm_id): + iso = Iso.create( + self.apiclient, + self.services["iso"], + account=self.account.name, + domainid=self.account.domainid) + cmd = attachIso.attachIsoCmd() + cmd.id = iso.id + cmd.virtualmachineid = vm_id + attachedIso = self.apiclient.attachIso(cmd) + return + + def removeIsoFromVm(self, vm_id, iso_id): + cmd = detachIso.detachIsoCmd() + cmd.virtualmachineid = vm_id + self.apiclient.detachIso(cmd) + cmd = deleteIso.deleteIsoCmd() + cmd.id = iso_id + self.apiclient.deleteIso(cmd) + return + + def getVmVerifiedResponse(self, vm_id, state=None): + list_vm_response = VirtualMachine.list( + self.apiclient, + id=self.virtual_machine.id) + self.debug( + "Verify listVirtualMachines response for virtual machine: %s" \ + % self.virtual_machine.id) + self.assertEqual( + isinstance(list_vm_response, list), + True, + "Check list response returns a valid list") + self.assertNotEqual( + len(list_vm_response), + 0, + "Check VM available in List Virtual Machines") + vmResponse = list_vm_response[0] + if state != None: + self.assertEqual( + vmResponse.state, + state, + "VM not in state: %s" % state) + return vmResponse + + def getDifferentPodHost(self, vm_id, host_id): + host = None + currentHost = Host.list(self.apiclient, id=host_id) + self.assertEqual( + isinstance(currentHost, list), + True, + "Check host list response returns a valid list") + self.assertNotEqual( + len(currentHost), + 0, + "Check current host for VM ID: %s available in List Hosts" % vm_id) + currentHost = currentHost[0] + hosts = Host.listForMigration(self.apiclient, virtualmachineid=vm_id) + self.assertEqual( + isinstance(hosts, list), + True, + "Check host list response returns a valid list") + self.assertNotEqual( + len(hosts), + 0, + "Hosts suitable for migration for VM ID: %s not found" % vm_id) + for hostForMigration in hosts: + if hostForMigration.podid != currentHost.podid: + host = hostForMigration + break + self.assertNotEqual( + host, + None, + "Host suitable for migration for VM ID: %s in a different pod not found" % vm_id) + return host + + def getPodStoragePoolWithTags(self, pod_id, tags=None): + pool = None + storage_pools = StoragePool.list( + self.apiclient, + podid=pod_id, + listall=True) + if isinstance(storage_pools, list) and len(storage_pools) > 0: + if tags != None: + for storage_pool in storage_pools: + if storage_pool.tags == tags: + pool = storage_pool + break + else: + pool = storage_pool[0] + return pool + + def getDifferentPodVolumeStoragePoolMapping(self, vm_id): + rootVolume = Volume.list(self.apiclient, virtualmachineid=vm_id, listall=True, type='ROOT') + self.assertEqual( + isinstance(rootVolume, list), + True, + "Check VM volumes list response returns a valid list") + self.assertNotEqual( + len(rootVolume), + 0, + "Check VM ROOT volume available in List Volumes") + rootVolume = rootVolume[0] + volumeStoragePool = StoragePool.list( + self.apiclient, + id=rootVolume.storageid) + self.assertEqual( + isinstance(volumeStoragePool, list), + True, + "Check VM ROOT Volume storage list response returns a valid list") + self.assertNotEqual( + len(volumeStoragePool), + 0, + "Check VM ROOT Volume storage available in List Storage Pools") + volumeStoragePool = volumeStoragePool[0] + podId = self.pods[0].id + if volumeStoragePool.podid == podId: + podId = self.pods[1].id + pool = self.getPodStoragePoolWithTags(podId, self.computeOfferingStorageTags) + self.assertNotEqual( + pool, + None, + "Target storage pool mapping for VM ID: %s failed" % vm_id) + migrateTo = { "volume": rootVolume.id, "pool": pool.id} + return [migrateTo] + + def migrateVmWithVolume(self, vm_id, host_id, migrate_to=None): + migrateVirtualMachineWithVolumeCmd = migrateVirtualMachineWithVolume.migrateVirtualMachineWithVolumeCmd() + migrateVirtualMachineWithVolumeCmd.virtualmachineid = vm_id + if host_id != None: + migrateVirtualMachineWithVolumeCmd.hostid = host_id + if migrate_to != None: + migrateVirtualMachineWithVolumeCmd.migrateto = migrate_to + response = self.apiclient.migrateVirtualMachineWithVolume(migrateVirtualMachineWithVolumeCmd) + return response From 1fa0c79750038673a72b1c75e0d67b72f10b5847 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Mon, 5 Oct 2020 13:51:39 +0530 Subject: [PATCH 02/31] ui changes Signed-off-by: Abhishek Kumar --- ui/scripts/instances.js | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 3a272d9cd243..312d4187a4e2 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -2582,12 +2582,32 @@ } }, action: function(args) { + var rootVolume = {}; $.ajax({ - url: createURL("migrateVirtualMachine&storageid=" + args.data.storageId + "&virtualmachineid=" + args.context.instances[0].id), + url: createURL("listVolumes&virtualmachineid=" + args.context.instances[0].id), dataType: "json", + async: false, + success: function(json) { + var volumes = json.listvolumesresponse.volume; + $(volumes).each(function() { + if (this.type == 'ROOT') { + rootVolume = this + return false; + } + }); + } + }); + var data = { + 'virtualmachineid': args.context.instances[0].id, + 'migrateto[0].volume': rootVolume.id, + 'migrateto[0].pool': args.data.storageId + } + $.ajax({ + url: createURL("migrateVirtualMachineWithVolume"), + data: data, async: true, success: function(json) { - var jid = json.migratevirtualmachineresponse.jobid; + var jid = json.migratevirtualmachinewithvolumeresponse.jobid; args.response.success({ _custom: { jobId: jid, From c41d5fba6a0e6c0bfda77ebf0465445a4cb86986 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Mon, 5 Oct 2020 15:38:26 +0530 Subject: [PATCH 03/31] null hypervisor check Signed-off-by: Abhishek Kumar --- .../java/com/cloud/server/ManagementServerImpl.java | 10 +++++++++- .../src/main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 98524b6ea692..f96129ae5045 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1273,6 +1273,10 @@ public Ternary, Integer>, List, Map, Integer> allHostsPair = null; List allHosts = null; final Map requiresStorageMotion = new HashMap(); @@ -1289,6 +1293,10 @@ public Ternary, Integer>, List, Map iterator = allHosts.iterator(); iterator.hasNext();) { final Host host = iterator.next(); + String hostHypervisorVersion = host.getHypervisorVersion(); + if (HypervisorType.KVM.equals(host.getHypervisorType()) && hostHypervisorVersion == null) { + hostHypervisorVersion = ""; + } if (volClusterId != null) { if (storagePool.isLocal() || !host.getClusterId().equals(volClusterId) || usesLocal) { @@ -1301,7 +1309,7 @@ public Ternary, Integer>, List, Map hypervisorCapabilitiesList = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index e21f2c5a6d7a..6519ce87d1d8 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -6034,7 +6034,7 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio throw new CloudRuntimeException("Migration with storage isn't supported for source host ID: " + srcHost.getUuid() + " on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); } - if (!srcHostVersion.equals(destinationHostVersion)) { + if (srcHostVersion != null && !srcHostVersion.equals(destinationHostVersion)) { HypervisorCapabilitiesVO destinationCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(destinationHost.getHypervisorType(), destinationHostVersion); if (destinationCapabilities == null && HypervisorType.KVM.equals(destinationHost.getHypervisorType())) { List lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); From 003af0e77d26be14e1571fd8dd3e145073ed3da4 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 6 Oct 2020 12:45:28 +0530 Subject: [PATCH 04/31] review changes Signed-off-by: Abhishek Kumar --- .../com/cloud/hypervisor/HypervisorGuru.java | 2 +- .../agent/api/MigrateVmToPoolCommand.java | 22 +- .../api/storage/MigrateVolumeCommand.java | 10 +- .../com/cloud/vm/VirtualMachineManager.java | 3 +- .../service/VolumeOrchestrationService.java | 2 +- .../cloud/vm/VirtualMachineManagerImpl.java | 131 ++++--- .../com/cloud/vm/VmWorkStorageMigration.java | 8 +- .../orchestration/VolumeOrchestrator.java | 83 ++-- .../dao/HypervisorCapabilitiesDao.java | 2 + .../dao/HypervisorCapabilitiesDaoImpl.java | 17 + .../com/cloud/hypervisor/guru/VMwareGuru.java | 57 ++- .../vmware/resource/VmwareResource.java | 360 +++--------------- .../motion/VmwareStorageMotionStrategy.java | 8 +- .../cloud/hypervisor/HypervisorGuruBase.java | 2 +- .../cloud/server/ManagementServerImpl.java | 38 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 51 +-- 16 files changed, 246 insertions(+), 550 deletions(-) diff --git a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java index 81befda005fb..96518ac17693 100644 --- a/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java +++ b/api/src/main/java/com/cloud/hypervisor/HypervisorGuru.java @@ -100,5 +100,5 @@ boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location, Backu * @param destination the primary storage pool to migrate to * @return a list of commands to perform for a successful migration */ - List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool); + List finalizeMigrate(VirtualMachine vm, Map volumeToPool); } diff --git a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java index ad4a47022807..066afb218230 100644 --- a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java +++ b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java @@ -33,10 +33,9 @@ public class MigrateVmToPoolCommand extends Command { private Collection volumes; private String vmName; - private String destinationPool; private boolean executeInSequence = false; private List> volumeToFilerAsList; - private String targetClusterHost; + private String hostGuidInTargetCluster; protected MigrateVmToPoolCommand() { } @@ -45,17 +44,16 @@ protected MigrateVmToPoolCommand() { * * @param vmName the name of the VM to migrate * @param volumes used to supply feedback on vmware generated names - * @param destinationPool the primary storage pool to migrate the VM to + * @param volumeToFilerTo the volume to primary storage pool map to migrate the VM to * @param executeInSequence */ - public MigrateVmToPoolCommand(String vmName, Collection volumes, String destinationPool, - List>volumeToFilerto, String targetHost, + public MigrateVmToPoolCommand(String vmName, Collection volumes, + List>volumeToFilerTo, String hostGuidInTargetCluster, boolean executeInSequence) { this.vmName = vmName; this.volumes = volumes; - this.destinationPool = destinationPool; - this.targetClusterHost = targetHost; - this.volumeToFilerAsList = volumeToFilerto; + this.hostGuidInTargetCluster = hostGuidInTargetCluster; + this.volumeToFilerAsList = volumeToFilerTo; this.executeInSequence = executeInSequence; } @@ -63,10 +61,6 @@ public Collection getVolumes() { return volumes; } - public String getDestinationPool() { - return destinationPool; - } - public String getVmName() { return vmName; } @@ -75,8 +69,8 @@ public List> getVolumeToFilerAsList() { return volumeToFilerAsList; } - public String getTargetClusterHost() { - return targetClusterHost; + public String getHostGuidInTargetCluster() { + return hostGuidInTargetCluster; } @Override diff --git a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java index 3f2c425a103b..f3ca63b4cd3a 100644 --- a/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java +++ b/core/src/main/java/com/cloud/agent/api/storage/MigrateVolumeCommand.java @@ -34,7 +34,7 @@ public class MigrateVolumeCommand extends Command { StorageFilerTO sourcePool; String attachedVmName; Volume.Type volumeType; - String targetClusterHost; + String hostGuidInTargetCluster; private DataTO srcData; private DataTO destData; @@ -71,7 +71,7 @@ public MigrateVolumeCommand(DataTO srcData, DataTO destData, Map public MigrateVolumeCommand(long volumeId, String volumePath, StoragePool sourcePool, StoragePool targetPool, String targetClusterHost) { this(volumeId, volumePath, sourcePool, targetPool); - this.targetClusterHost = targetClusterHost; + this.hostGuidInTargetCluster = targetClusterHost; } @Override @@ -131,11 +131,11 @@ public Map getDestDetails() { return destDetails; } - public String getTargetClusterHost() { - return targetClusterHost; + public String getHostGuidInTargetCluster() { + return hostGuidInTargetCluster; } public int getWaitInMillSeconds() { return getWait() * 1000; } -} \ No newline at end of file +} diff --git a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java index 09a754f8fd8e..2f1a0418f393 100644 --- a/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java +++ b/engine/api/src/main/java/com/cloud/vm/VirtualMachineManager.java @@ -40,7 +40,6 @@ import com.cloud.offering.DiskOffering; import com.cloud.offering.DiskOfferingInfo; import com.cloud.offering.ServiceOffering; -import com.cloud.storage.StoragePool; import com.cloud.template.VirtualMachineTemplate; import com.cloud.user.Account; import com.cloud.uservm.UserVm; @@ -151,7 +150,7 @@ void advanceReboot(String vmUuid, Map param VirtualMachine findById(long vmId); - void storageMigration(String vmUuid, StoragePool storagePoolId, Map volumeToPool); + void storageMigration(String vmUuid, Map volumeToPool); /** * @param vmInstance diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 7831c6bde25d..a9d7e0caf35a 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -109,7 +109,7 @@ DiskProfile allocateRawVolume(Type type, String name, DiskOffering offering, Lon void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHost, Host destHost, Map volumeToPool); - boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool, Map volumeToPool) throws StorageUnavailableException; + boolean storageMigration(VirtualMachineProfile vm, Map volumeToPool) throws StorageUnavailableException; void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 8b0385528c9e..95d9f1f14ae4 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -23,14 +23,17 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.TimeZone; import java.util.UUID; import java.util.concurrent.Executors; @@ -50,7 +53,6 @@ import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.cloudstack.engine.orchestration.service.VolumeOrchestrationService; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.PrimaryDataStoreInfo; import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; import org.apache.cloudstack.framework.ca.Certificate; import org.apache.cloudstack.framework.config.ConfigKey; @@ -925,7 +927,7 @@ public void advanceStart(final String vmUuid, final Map volumeToPool) { + public void storageMigration(final String vmUuid, final Map volumeToPool) { final AsyncJobExecutionContext jobContext = AsyncJobExecutionContext.getCurrentExecutionContext(); if (jobContext.isJobDispatchedBy(VmWorkConstants.VM_WORK_JOB_DISPATCHER)) { // avoid re-entrance @@ -2132,14 +2134,14 @@ public void storageMigration(final String vmUuid, final StoragePool destPool, fi final VirtualMachine vm = _vmDao.findByUuid(vmUuid); placeHolder = createPlaceHolderWork(vm.getId()); try { - orchestrateStorageMigration(vmUuid, destPool, volumeToPool); + orchestrateStorageMigration(vmUuid, volumeToPool); } finally { if (placeHolder != null) { _workJobDao.expunge(placeHolder.getId()); } } } else { - final Outcome outcome = migrateVmStorageThroughJobQueue(vmUuid, destPool, volumeToPool); + final Outcome outcome = migrateVmStorageThroughJobQueue(vmUuid, volumeToPool); try { final VirtualMachine vm = outcome.get(); @@ -2160,10 +2162,10 @@ public void storageMigration(final String vmUuid, final StoragePool destPool, fi } } - private void orchestrateStorageMigration(final String vmUuid, final StoragePool destPool, final Map volumeToPool) { + private void orchestrateStorageMigration(final String vmUuid, final Map volumeToPool) { final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); - Map volumeToPoolMap = prepareVmStorageMigration(vm, destPool, volumeToPool); + Map volumeToPoolMap = prepareVmStorageMigration(vm, volumeToPool); try { if(s_logger.isDebugEnabled()) { @@ -2172,7 +2174,7 @@ private void orchestrateStorageMigration(final String vmUuid, final StoragePool vm.getInstanceName())); } - migrateThroughHypervisorOrStorage(destPool, volumeToPoolMap, vm); + migrateThroughHypervisorOrStorage(vm, volumeToPoolMap); } catch (ConcurrentOperationException | InsufficientCapacityException // possibly InsufficientVirtualNetworkCapacityException or InsufficientAddressCapacityException @@ -2191,7 +2193,7 @@ private void orchestrateStorageMigration(final String vmUuid, final StoragePool } } - private Answer[] attemptHypervisorMigration(VMInstanceVO vm, StoragePool destPool, Map volumeToPool, Long hostId) { + private Answer[] attemptHypervisorMigration(VMInstanceVO vm, Map volumeToPool, Long hostId) { if (hostId == null) { return null; } @@ -2200,7 +2202,7 @@ private Answer[] attemptHypervisorMigration(VMInstanceVO vm, StoragePool destPoo // OfflineVmwareMigration: should we check the proximity of source and destination // OfflineVmwareMigration: if we are in the same cluster/datacentre/pool or whatever? // OfflineVmwareMigration: we are checking on success to optionally delete an old vm if we are not - List commandsToSend = hvGuru.finalizeMigrate(vm, destPool, volumeToPool); + List commandsToSend = hvGuru.finalizeMigrate(vm, volumeToPool); if (CollectionUtils.isNotEmpty(commandsToSend)) { Commands commandsContainer = new Commands(Command.OnError.Stop); @@ -2216,16 +2218,13 @@ private Answer[] attemptHypervisorMigration(VMInstanceVO vm, StoragePool destPoo return null; } - private void afterHypervisorMigrationCleanup(StoragePool destPool, Map volumeToPool, VMInstanceVO vm, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { + private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map volumeToPool, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { boolean isDebugEnabled = s_logger.isDebugEnabled(); if(isDebugEnabled) { String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); - if (destPool != null) { - msg += String.format(" to pool %s(%s)", destPool.getName(), destPool.getUuid()); - } s_logger.debug(msg); } - StoragePool rootVolumePool = destPool; + StoragePool rootVolumePool = null; if (rootVolumePool == null && MapUtils.isNotEmpty(volumeToPool)) { for (Map.Entry entry : volumeToPool.entrySet()) { if (Type.ROOT.equals(entry.getKey().getVolumeType())) { @@ -2328,61 +2327,57 @@ private Pair findClusterAndHostIdForVm(VMInstanceVO vm) { return new Pair<>(clusterId, hostId); } - private void migrateThroughHypervisorOrStorage(StoragePool destPool, Map volumeToPool, VMInstanceVO vm) throws StorageUnavailableException, InsufficientCapacityException { + private void migrateThroughHypervisorOrStorage(VMInstanceVO vm, Map volumeToPool) throws StorageUnavailableException, InsufficientCapacityException { final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); Pair vmClusterAndHost = findClusterAndHostIdForVm(vm); final Long sourceClusterId = vmClusterAndHost.first(); final Long sourceHostId = vmClusterAndHost.second(); - Answer[] hypervisorMigrationResults = attemptHypervisorMigration(vm, destPool, volumeToPool, sourceHostId); + Answer[] hypervisorMigrationResults = attemptHypervisorMigration(vm, volumeToPool, sourceHostId); boolean migrationResult = false; if (hypervisorMigrationResults == null) { // OfflineVmwareMigration: if the HypervisorGuru can't do it, let the volume manager take care of it. - migrationResult = volumeMgr.storageMigration(profile, destPool, volumeToPool); + migrationResult = volumeMgr.storageMigration(profile, volumeToPool); if (migrationResult) { - afterStorageMigrationCleanup(destPool, volumeToPool, vm, _hostDao.findById(sourceHostId), sourceClusterId); + postStorageMigrationCleanup(vm, volumeToPool, _hostDao.findById(sourceHostId), sourceClusterId); } else { s_logger.debug("Storage migration failed"); } } else { - afterHypervisorMigrationCleanup(destPool, volumeToPool, vm, hypervisorMigrationResults); + afterHypervisorMigrationCleanup(vm, volumeToPool, hypervisorMigrationResults); } } - private Map prepareVmStorageMigration(VMInstanceVO vm, StoragePool destPool, Map volumeToPool) { + private Map prepareVmStorageMigration(VMInstanceVO vm, Map volumeToPool) { Map volumeToPoolMap = new HashMap<>(); - if (destPool == null && MapUtils.isEmpty(volumeToPool)) { - throw new CloudRuntimeException("Unable to migrate vm: missing both destination storage pool and volume to pool mapping"); - } - if (destPool != null) { - checkDestinationForTags(destPool, vm); - } else if (MapUtils.isNotEmpty(volumeToPool)) { - Cluster cluster = null; - Long dataCenterId = null; - for (Map.Entry entry: volumeToPool.entrySet()) { - StoragePool pool = _storagePoolDao.findById(entry.getValue()); - if (pool.getClusterId() != null) { - cluster = _clusterDao.findById(pool.getClusterId()); - break; - } - dataCenterId = pool.getDataCenterId(); - } - Long podId = null; - Long clusterId = null; - if (cluster != null) { - dataCenterId = cluster.getDataCenterId(); - podId = cluster.getPodId(); - clusterId = cluster.getId(); - } - if (dataCenterId == null) { - String msg = "Unable to migrate vm: failed to create deployment destination with given volume to pool map"; - s_logger.debug(msg); - throw new CloudRuntimeException(msg); + if (MapUtils.isEmpty(volumeToPool)) { + throw new CloudRuntimeException("Unable to migrate vm: missing volume to pool mapping"); + } + Cluster cluster = null; + Long dataCenterId = null; + for (Map.Entry entry: volumeToPool.entrySet()) { + StoragePool pool = _storagePoolDao.findById(entry.getValue()); + if (pool.getClusterId() != null) { + cluster = _clusterDao.findById(pool.getClusterId()); + break; } - final DataCenterDeployment destination = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); - // Create a map of which volume should go in which storage pool. - final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); - volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destination, volumeToPool); + dataCenterId = pool.getDataCenterId(); + } + Long podId = null; + Long clusterId = null; + if (cluster != null) { + dataCenterId = cluster.getDataCenterId(); + podId = cluster.getPodId(); + clusterId = cluster.getId(); + } + if (dataCenterId == null) { + String msg = "Unable to migrate vm: failed to create deployment destination with given volume to pool map"; + s_logger.debug(msg); + throw new CloudRuntimeException(msg); } + final DataCenterDeployment destination = new DataCenterDeployment(dataCenterId, podId, clusterId, null, null, null); + // Create a map of which volume should go in which storage pool. + final VirtualMachineProfile profile = new VirtualMachineProfileImpl(vm); + volumeToPoolMap = createMappingVolumeAndStoragePool(profile, destination, volumeToPool); try { stateTransitTo(vm, Event.StorageMigrationRequested, null); } catch (final NoTransitionException e) { @@ -2429,27 +2424,28 @@ static boolean matches(List volumeTags, List storagePoolTags) { } - private void afterStorageMigrationCleanup(StoragePool destPool, Map volumeToPool, VMInstanceVO vm, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { - if (destPool == null && MapUtils.isNotEmpty(volumeToPool)) { + private void postStorageMigrationCleanup(VMInstanceVO vm, Map volumeToPool, HostVO srcHost, Long srcClusterId) throws InsufficientCapacityException { + StoragePool rootVolumePool = null; + if (MapUtils.isNotEmpty(volumeToPool)) { for (Map.Entry entry : volumeToPool.entrySet()) { if (Type.ROOT.equals(entry.getKey().getVolumeType())) { - destPool = entry.getValue(); + rootVolumePool = entry.getValue(); break; } } } - setDestinationPoolAndReallocateNetwork(destPool, vm); + setDestinationPoolAndReallocateNetwork(rootVolumePool, vm); //when start the vm next time, don;'t look at last_host_id, only choose the host based on volume/storage pool vm.setLastHostId(null); - if (destPool != null) { - vm.setPodIdToDeployIn(destPool.getPodId()); + if (rootVolumePool != null) { + vm.setPodIdToDeployIn(rootVolumePool.getPodId()); } // If VM was cold migrated between clusters belonging to two different VMware DCs, // unregister the VM from the source host and cleanup the associated VM files. if (vm.getHypervisorType().equals(HypervisorType.VMware)) { - afterStorageMigrationVmwareVMcleanup(destPool, vm, srcHost, srcClusterId); + afterStorageMigrationVmwareVMcleanup(rootVolumePool, vm, srcHost, srcClusterId); } } @@ -5346,12 +5342,19 @@ private void checkConcurrentJobsPerDatastoreThreshhold(final StoragePool destPoo } public Outcome migrateVmStorageThroughJobQueue( - final String vmUuid, final StoragePool destPool, final Map volumeToPool) { + final String vmUuid, final Map volumeToPool) { final CallContext context = CallContext.current(); final User user = context.getCallingUser(); final Account account = context.getCallingAccount(); + Collection poolIds = volumeToPool.values(); + Set uniquePoolIds = new HashSet<>(poolIds); + for (Long poolId : uniquePoolIds) { + StoragePoolVO pool = _storagePoolDao.findById(poolId); + checkConcurrentJobsPerDatastoreThreshhold(pool); + } + final VMInstanceVO vm = _vmDao.findByUuid(vmUuid); final List pendingWorkJobs = _workJobDao.listPendingWorkJobs( @@ -5377,7 +5380,7 @@ public Outcome migrateVmStorageThroughJobQueue( // save work context info (there are some duplications) final VmWorkStorageMigration workInfo = new VmWorkStorageMigration(user.getId(), account.getId(), vm.getId(), - VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, destPool != null ? destPool.getId() : null, volumeToPool); + VirtualMachineManagerImpl.VM_WORK_JOB_HANDLER, volumeToPool); workJob.setCmdInfo(VmWorkSerializer.serialize(workInfo)); _jobMgr.submitAsyncJob(workJob, VmWorkConstants.VM_WORK_QUEUE, vm.getId()); @@ -5723,11 +5726,7 @@ private Pair orchestrateStorageMigration(final VmWorkSto s_logger.info("Unable to find vm " + work.getVmId()); } assert vm != null; - StoragePool pool = null; - if (work.getDestStoragePoolId() != null) { - pool = (PrimaryDataStoreInfo) dataStoreMgr.getPrimaryDataStore(work.getDestStoragePoolId()); - } - orchestrateStorageMigration(vm.getUuid(), pool, work.getVolumeToPool()); + orchestrateStorageMigration(vm.getUuid(), work.getVolumeToPool()); return new Pair(JobInfo.Status.SUCCEEDED, null); } diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java index 7254bdac7c67..07e8549d2246 100644 --- a/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VmWorkStorageMigration.java @@ -21,20 +21,14 @@ public class VmWorkStorageMigration extends VmWork { private static final long serialVersionUID = -8677979691741157474L; - Long destPoolId; Map volumeToPool; - public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, Long destPoolId, Map volumeToPool) { + public VmWorkStorageMigration(long userId, long accountId, long vmId, String handlerName, Map volumeToPool) { super(userId, accountId, vmId, handlerName); - this.destPoolId = destPoolId; this.volumeToPool = volumeToPool; } - public Long getDestStoragePoolId() { - return destPoolId; - } - public Map getVolumeToPool() { return volumeToPool; } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index d515800221ad..9461e302ed64 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1067,69 +1067,34 @@ public void migrateVolumes(VirtualMachine vm, VirtualMachineTO vmTo, Host srcHos } @Override - public boolean storageMigration(VirtualMachineProfile vm, StoragePool destPool, Map volumeToPool) throws StorageUnavailableException { - if (destPool != null) { - List vols = _volsDao.findUsableVolumesForInstance(vm.getId()); - List volumesNeedToMigrate = new ArrayList(); - - for (VolumeVO volume : vols) { - if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - } - - if (volume.getPoolId() == destPool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + destPool.getId()); - continue; - } - - volumesNeedToMigrate.add(volume); - } - - if (volumesNeedToMigrate.isEmpty()) { - s_logger.debug("No volume need to be migrated"); - return true; + public boolean storageMigration(VirtualMachineProfile vm, Map volumeToPool) throws StorageUnavailableException { + Map volumeStoragePoolMap = new HashMap<>(); + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = entry.getKey(); + StoragePool pool = entry.getValue(); + if (volume.getState() != Volume.State.Ready) { + s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); + throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); } - // OfflineVmwareMigration: in case we can (vmware?) don't itterate over volumes but tell the hypervisor to do the thing - if (s_logger.isDebugEnabled()) { - s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); - } - for (Volume vol : volumesNeedToMigrate) { - Volume result = migrateVolume(vol, destPool); - if (result == null) { - return false; - } - } - } else if (MapUtils.isNotEmpty(volumeToPool)) { - Map volumeStoragePoolMap = new HashMap<>(); - for (Map.Entry entry : volumeToPool.entrySet()) { - Volume volume = entry.getKey(); - StoragePool pool = entry.getValue(); - if (volume.getState() != Volume.State.Ready) { - s_logger.debug("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - throw new CloudRuntimeException("volume: " + volume.getId() + " is in " + volume.getState() + " state"); - } - - if (volume.getPoolId() == pool.getId()) { - s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + pool.getId()); - continue; - } - volumeStoragePoolMap.put(volume, volumeToPool.get(volume)); + if (volume.getPoolId() == pool.getId()) { + s_logger.debug("volume: " + volume.getId() + " is on the same storage pool: " + pool.getId()); + continue; } + volumeStoragePoolMap.put(volume, volumeToPool.get(volume)); + } - if (MapUtils.isEmpty(volumeStoragePoolMap)) { - s_logger.debug("No volume need to be migrated"); - return true; - } - if (s_logger.isDebugEnabled()) { - s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); - } - for (Map.Entry entry : volumeStoragePoolMap.entrySet()) { - Volume result = migrateVolume(entry.getKey(), entry.getValue()); - if (result == null) { - return false; - } + if (MapUtils.isEmpty(volumeStoragePoolMap)) { + s_logger.debug("No volume need to be migrated"); + return true; + } + if (s_logger.isDebugEnabled()) { + s_logger.debug("Offline vm migration was not done up the stack in VirtualMachineManager so trying here."); + } + for (Map.Entry entry : volumeStoragePoolMap.entrySet()) { + Volume result = migrateVolume(entry.getKey(), entry.getValue()); + if (result == null) { + return false; } } return true; diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java index 83c32b1c2efd..45c88060496b 100644 --- a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java +++ b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDao.java @@ -37,4 +37,6 @@ public interface HypervisorCapabilitiesDao extends GenericDao getHypervisorsWithDefaultEntries(); + + Boolean isStorageMotionSupported(HypervisorType hypervisorType, String hypervisorVersion); } diff --git a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java index 5cecff2af95f..09b39749ec29 100644 --- a/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/hypervisor/dao/HypervisorCapabilitiesDaoImpl.java @@ -119,4 +119,21 @@ public List getHypervisorsWithDefaultEntries() { } return hvs; } + + @Override + public Boolean isStorageMotionSupported(HypervisorType hypervisorType, String hypervisorVersion) { + HypervisorCapabilitiesVO hostCapabilities = findByHypervisorTypeAndVersion(hypervisorType, hypervisorVersion); + if (hostCapabilities == null && HypervisorType.KVM.equals(hypervisorType)) { + List hypervisorCapabilitiesList = listAllByHypervisorType(HypervisorType.KVM); + if (hypervisorCapabilitiesList != null) { + for (HypervisorCapabilitiesVO hypervisorCapabilities : hypervisorCapabilitiesList) { + if (hypervisorCapabilities.isStorageMotionSupported()) { + hostCapabilities = hypervisorCapabilities; + break; + } + } + } + } + return hostCapabilities != null && hostCapabilities.isStorageMotionSupported(); + } } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 032323f55504..c833ed402756 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -44,7 +44,6 @@ import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; @@ -213,15 +212,21 @@ protected VMwareGuru() { Long getClusterId(long vmId) { Long clusterId = null; - Long hostId; - - hostId = _vmDao.findById(vmId).getHostId(); - if (hostId == null) { + Long hostId = null; + VMInstanceVO vm = _vmDao.findById(vmId); + if (vm != null) { + hostId = _vmDao.findById(vmId).getHostId(); + } + if (vm != null && hostId == null) { // If VM is in stopped state then hostId would be undefined. Hence read last host's Id instead. hostId = _vmDao.findById(vmId).getLastHostId(); } + HostVO host = null; if (hostId != null) { - clusterId = _hostDao.findById(hostId).getClusterId(); + host = _hostDao.findById(hostId); + } + if (host != null) { + clusterId = host.getClusterId(); } else { List volumes = _volumeDao.findByInstanceAndType(vmId, Volume.Type.ROOT); if (CollectionUtils.isNotEmpty(volumes)) { @@ -1073,34 +1078,26 @@ private VirtualDisk getAttachedDisk(VirtualMachineMO vmMo, String diskPath) thro return null; } - @Override public List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool) { + @Override public List finalizeMigrate(VirtualMachine vm, Map volumeToPool) { List commands = new ArrayList(); // OfflineVmwareMigration: specialised migration command List vols = new ArrayList<>(); - List> volumeToFilerto = new ArrayList>(); + List> volumeToFilerTo = new ArrayList>(); Long poolClusterId = null; - Host clusterHost = null; - if (destination != null) { - List volumes = _volumeDao.findByInstance(vm.getId()); - for (Volume volume : volumes) { - VolumeTO vol = new VolumeTO(volume, destination); - vols.add(vol); - } - } else if (MapUtils.isNotEmpty(volumeToPool)) { - for (Map.Entry entry : volumeToPool.entrySet()) { - Volume volume = entry.getKey(); - StoragePool pool = entry.getValue(); - VolumeTO volumeTo = new VolumeTO(volume, _storagePoolDao.findById(pool.getId())); - StorageFilerTO filerTo = new StorageFilerTO(pool); - if (pool.getClusterId() != null) { - poolClusterId = pool.getClusterId(); - } - volumeToFilerto.add(new Pair(volumeTo, filerTo)); - vols.add(volumeTo); + Host hostInTargetCluster = null; + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = entry.getKey(); + StoragePool pool = entry.getValue(); + VolumeTO volumeTo = new VolumeTO(volume, _storagePoolDao.findById(pool.getId())); + StorageFilerTO filerTo = new StorageFilerTO(pool); + if (pool.getClusterId() != null) { + poolClusterId = pool.getClusterId(); } + volumeToFilerTo.add(new Pair(volumeTo, filerTo)); + vols.add(volumeTo); } - final Long destClusterId = destination != null ? destination.getClusterId() : poolClusterId; + final Long destClusterId = poolClusterId; final Long srcClusterId = getClusterId(vm.getId()); final boolean isInterClusterMigration = srcClusterId != null && destClusterId != null && ! srcClusterId.equals(destClusterId); if (isInterClusterMigration) { @@ -1109,14 +1106,14 @@ private VirtualDisk getAttachedDisk(VirtualMachineMO vmMo, String diskPath) thro // As this is offline migration VM won't be started on this host List hosts = _hostDao.findHypervisorHostInCluster(destClusterId); if (CollectionUtils.isNotEmpty(hosts)) { - clusterHost = hosts.get(0); + hostInTargetCluster = hosts.get(0); } - if (clusterHost == null) { + if (hostInTargetCluster == null) { throw new CloudRuntimeException("Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different clusters without shared storages"); } } MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, - destination == null ? null : destination.getUuid(), volumeToFilerto, clusterHost == null ? null : clusterHost.getGuid(), true); + volumeToFilerTo, hostInTargetCluster == null ? null : hostInTargetCluster.getGuid(), true); commands.add(migrateVmToPoolCommand); // OfflineVmwareMigration: cleanup if needed diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 9b4c20d10e6b..3a064f115f44 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4118,7 +4118,7 @@ protected Answer execute(PrepareForMigrationCommand cmd) { protected Answer execute(MigrateVmToPoolCommand cmd) { if (s_logger.isInfoEnabled()) { - s_logger.info(String.format("Executing MigrateVmToPoolCommand %s -> %s", cmd.getVmName(), cmd.getDestinationPool())); + s_logger.info(String.format("Executing MigrateVmToPoolCommand %s", cmd.getVmName())); if (s_logger.isDebugEnabled()) { s_logger.debug("MigrateVmToPoolCommand: " + _gson.toJson(cmd)); } @@ -4140,7 +4140,7 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { throw new CloudRuntimeException(msg); } } - return migrateAndAnswer(vmMo, cmd.getDestinationPool(), hyperHost, cmd); + return migrateAndAnswer(vmMo, null, hyperHost, cmd); } catch (Throwable e) { // hopefully only CloudRuntimeException :/ if (e instanceof Exception) { return new Answer(cmd, (Exception) e); @@ -4154,23 +4154,22 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { } private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception { - String targetClusterHostName = null; - - VmwareHypervisorHost targetClusterHost = null; + String hostNameInTargetCluster = null; + VmwareHypervisorHost hostInTargetCluster = null; List> volToFiler = new ArrayList<>(); if (cmd instanceof MigrateVmToPoolCommand) { MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd; - targetClusterHostName = mcmd.getTargetClusterHost(); + hostNameInTargetCluster = mcmd.getHostGuidInTargetCluster(); volToFiler = mcmd.getVolumeToFilerAsList(); } else if (cmd instanceof MigrateVolumeCommand) { - targetClusterHostName = ((MigrateVolumeCommand)cmd).getTargetClusterHost(); + hostNameInTargetCluster = ((MigrateVolumeCommand)cmd).getHostGuidInTargetCluster(); } - if (StringUtils.isNotBlank(targetClusterHostName)) { - String targetClusterHostMorInfo = targetClusterHostName.split("@")[0]; - ManagedObjectReference morTgtClusterHost = new ManagedObjectReference(); - morTgtClusterHost.setType(targetClusterHostMorInfo.split(":")[0]); - morTgtClusterHost.setValue(targetClusterHostMorInfo.split(":")[1]); - targetClusterHost = new HostMO(getServiceContext(), morTgtClusterHost); + if (StringUtils.isNotBlank(hostNameInTargetCluster)) { + String hostInTargetClusterMorInfo = hostNameInTargetCluster.split("@")[0]; + ManagedObjectReference morHostInTargetCluster = new ManagedObjectReference(); + morHostInTargetCluster.setType(hostInTargetClusterMorInfo.split(":")[0]); + morHostInTargetCluster.setValue(hostInTargetClusterMorInfo.split(":")[1]); + hostInTargetCluster = new HostMO(getServiceContext(), morHostInTargetCluster); } try { @@ -4184,7 +4183,7 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy } } } - List volumeToList = relocateVirtualMachine(hyperHost, vmMo.getName(), null, null, targetClusterHost, poolUuid, volToFiler); + List volumeToList = relocateVirtualMachine(hyperHost, vmMo.getName(), null, null, hostInTargetCluster, poolUuid, volToFiler); return createAnswerForCmd(vmMo, poolUuid, volumeToList, cmd, volumeDeviceKey); } catch (Exception e) { String msg = "Change data store for VM " + vmMo.getVmName() + " failed"; @@ -4208,21 +4207,7 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, List diskLocators = new ArrayList(); - VirtualMachineRelocateSpecDiskLocator diskLocator = null; - - String tgtDsName = ""; - String tgtDsHost; - String tgtDsPath; - int tgtDsPort; - VolumeTO volume; - StorageFilerTO filerTo; - Set mountedDatastoresAtSource = new HashSet(); - List volumeToList = new ArrayList(); - Map volumeDeviceKey = new HashMap(); - - List> volToFiler = cmd.getVolumeToFilerAsList(); - String tgtHost = cmd.getTargetHost(); - String tgtHostMorInfo = tgtHost.split("@")[0]; - morTgtHost.setType(tgtHostMorInfo.split(":")[0]); - morTgtHost.setValue(tgtHostMorInfo.split(":")[1]); + final VirtualMachineTO vmTo = cmd.getVirtualMachine(); + final List> volToFiler = cmd.getVolumeToFilerAsList(); + final String targetHost = cmd.getTargetHost(); try { - srcHyperHost = getHyperHost(getServiceContext()); - tgtHyperHost = new HostMO(getServiceContext(), morTgtHost); - morDc = srcHyperHost.getHyperHostDatacenter(); - morDcOfTargetHost = tgtHyperHost.getHyperHostDatacenter(); - if (!morDc.getValue().equalsIgnoreCase(morDcOfTargetHost.getValue())) { - String msg = "Source host & target host are in different datacentesr"; - throw new CloudRuntimeException(msg); - } - VmwareManager mgr = tgtHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); - String srcHostApiVersion = ((HostMO) srcHyperHost).getHostAboutInfo().getApiVersion(); - - // find VM through datacenter (VM is not at the target host yet) - vmMo = srcHyperHost.findVmOnPeerHyperHost(vmName); - if (vmMo == null) { - String msg = "VM " + vmName + " does not exist in VMware datacenter " + morDc.getValue(); - s_logger.error(msg); - throw new Exception(msg); - } - vmName = vmMo.getName(); - - // Specify destination datastore location for each volume - for (Pair entry : volToFiler) { - volume = entry.first(); - filerTo = entry.second(); - - s_logger.debug("Preparing spec for volume : " + volume.getName()); - morDsAtTarget = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(tgtHyperHost, filerTo.getUuid()); - morDsAtSource = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(srcHyperHost, filerTo.getUuid()); - if (morDsAtTarget == null) { - String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " on target host: " + tgtHyperHost.getHyperHostName() - + " to execute MigrateWithStorageCommand"; - s_logger.error(msg); - throw new Exception(msg); - } - morTgtDatastore = morDsAtTarget; - - // If host version is below 5.1 then simultaneous change of VM's datastore and host is not supported. - // So since only the datastore will be changed first, ensure the target datastore is mounted on source host. - if (srcHostApiVersion.compareTo("5.1") < 0) { - tgtDsName = filerTo.getUuid().replace("-", ""); - tgtDsHost = filerTo.getHost(); - tgtDsPath = filerTo.getPath(); - tgtDsPort = filerTo.getPort(); - - // If datastore is NFS and target datastore is not already mounted on source host then mount the datastore. - if (filerTo.getType().equals(StoragePoolType.NetworkFilesystem)) { - if (morDsAtSource == null) { - morDsAtSource = srcHyperHost.mountDatastore(false, tgtDsHost, tgtDsPort, tgtDsPath, tgtDsName); - if (morDsAtSource == null) { - throw new Exception("Unable to mount NFS datastore " + tgtDsHost + ":/" + tgtDsPath + " on " + _hostName); - } - mountedDatastoresAtSource.add(tgtDsName); - s_logger.debug("Mounted datastore " + tgtDsHost + ":/" + tgtDsPath + " on " + _hostName); - } - } - - // If datastore is VMFS and target datastore is not mounted or accessible to source host then fail migration. - if (filerTo.getType().equals(StoragePoolType.VMFS)) { - if (morDsAtSource == null) { - s_logger.warn( - "If host version is below 5.1, then target VMFS datastore(s) need to manually mounted on source host for a successful live storage migration."); - throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not mounted on source host: " + _hostName); - } - DatastoreMO dsAtSourceMo = new DatastoreMO(getServiceContext(), morDsAtSource); - String srcHostValue = srcHyperHost.getMor().getValue(); - if (!dsAtSourceMo.isAccessibleToHost(srcHostValue)) { - s_logger.warn("If host version is below 5.1, then target VMFS datastore(s) need to accessible to source host for a successful live storage migration."); - throw new Exception("Target VMFS datastore: " + tgtDsPath + " is not accessible on source host: " + _hostName); - } - } - morTgtDatastore = morDsAtSource; - } - - if (volume.getType() == Volume.Type.ROOT) { - relocateSpec.setDatastore(morTgtDatastore); - } - diskLocator = new VirtualMachineRelocateSpecDiskLocator(); - diskLocator.setDatastore(morDsAtSource); - Pair diskInfo = getVirtualDiskInfo(vmMo, appendFileType(volume.getPath(), VMDK_EXTENSION)); - String vmdkAbsFile = getAbsoluteVmdkFile(diskInfo.first()); - if (vmdkAbsFile != null && !vmdkAbsFile.isEmpty()) { - vmMo.updateAdapterTypeIfRequired(vmdkAbsFile); - } - int diskId = diskInfo.first().getKey(); - diskLocator.setDiskId(diskId); - - diskLocators.add(diskLocator); - volumeDeviceKey.put(volume.getId(), diskId); - } - // If a target datastore is provided for the VM, then by default all volumes associated with the VM will be migrated to that target datastore. - // Hence set the existing datastore as target datastore for volumes that are not to be migrated. - List> diskDatastores = vmMo.getAllDiskDatastores(); - for (Pair diskDatastore : diskDatastores) { - if (!volumeDeviceKey.containsValue(diskDatastore.first().intValue())) { - diskLocator = new VirtualMachineRelocateSpecDiskLocator(); - diskLocator.setDiskId(diskDatastore.first().intValue()); - diskLocator.setDatastore(diskDatastore.second()); - diskLocators.add(diskLocator); - } - } - - relocateSpec.getDisk().addAll(diskLocators); - - // Prepare network at target before migration - NicTO[] nics = vmTo.getNics(); - for (NicTO nic : nics) { - // prepare network on the host - prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTgtHost), nic, false, vmTo.getType()); - } - - // Ensure all secondary storage mounted on target host - List> secStoreUrlAndIdList = mgr.getSecondaryStorageStoresUrlAndIdList(Long.parseLong(_dcId)); - for (Pair secStoreUrlAndId : secStoreUrlAndIdList) { - String secStoreUrl = secStoreUrlAndId.first(); - Long secStoreId = secStoreUrlAndId.second(); - if (secStoreUrl == null) { - String msg = String.format("Secondary storage for dc %s is not ready yet?", _dcId); - throw new Exception(msg); - } - - if (vmTo.getType() != VirtualMachine.Type.User) { - mgr.prepareSecondaryStorageStore(secStoreUrl, secStoreId); - } - - ManagedObjectReference morSecDs = prepareSecondaryDatastoreOnSpecificHost(secStoreUrl, tgtHyperHost); - if (morSecDs == null) { - String msg = "Failed to prepare secondary storage on host, secondary store url: " + secStoreUrl; - throw new Exception(msg); - } - } - - if (srcHostApiVersion.compareTo("5.1") < 0) { - // Migrate VM's volumes to target datastore(s). - if (!vmMo.changeDatastore(relocateSpec)) { - throw new Exception("Change datastore operation failed during storage migration"); - } else { - s_logger.debug("Successfully migrated storage of VM " + vmName + " to target datastore(s)"); - } - - // Migrate VM to target host. - ManagedObjectReference morPool = tgtHyperHost.getHyperHostOwnerResourcePool(); - if (!vmMo.migrate(morPool, tgtHyperHost.getMor())) { - throw new Exception("VM migration to target host failed during storage migration"); - } else { - s_logger.debug("Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName()); - } - } else { - // Simultaneously migrate VM's volumes to target datastore and VM to target host. - relocateSpec.setHost(tgtHyperHost.getMor()); - relocateSpec.setPool(tgtHyperHost.getHyperHostOwnerResourcePool()); - if (!vmMo.changeDatastore(relocateSpec)) { - throw new Exception("Change datastore operation failed during storage migration"); - } else { - s_logger.debug( - "Successfully migrated VM " + vmName + " from " + _hostName + " to " + tgtHyperHost.getHyperHostName() + " and its storage to target datastore(s)"); - } - } - - // Consolidate VM disks. - // In case of a linked clone VM, if VM's disks are not consolidated, further VM operations such as volume snapshot, VM snapshot etc. will result in DB inconsistencies. - if (!vmMo.consolidateVmDisks()) { - s_logger.warn("VM disk consolidation failed after storage migration. Yet proceeding with VM migration."); - } else { - s_logger.debug("Successfully consolidated disks of VM " + vmName + "."); - } - - // Update and return volume path and chain info for every disk because that could have changed after migration - VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); - for (Pair entry : volToFiler) { - volume = entry.first(); - long volumeId = volume.getId(); - VirtualDisk[] disks = vmMo.getAllDiskDevice(); - for (VirtualDisk disk : disks) { - if (volumeDeviceKey.get(volumeId) == disk.getKey()) { - VolumeObjectTO newVol = new VolumeObjectTO(); - String newPath = vmMo.getVmdkFileBaseName(disk); - String poolName = entry.second().getUuid().replace("-", ""); - VirtualMachineDiskInfo diskInfo = diskInfoBuilder.getDiskInfoByBackingFileBaseName(newPath, poolName); - newVol.setId(volumeId); - newVol.setPath(newPath); - newVol.setChainInfo(_gson.toJson(diskInfo)); - volumeToList.add(newVol); - break; - } - } - } - + List volumeToList = relocateVirtualMachine(null, null, vmTo, targetHost, null, null, volToFiler); return new MigrateWithStorageAnswer(cmd, volumeToList); } catch (Throwable e) { - if (e instanceof RemoteException) { - s_logger.warn("Encountered remote exception at vCenter, invalidating VMware session context"); - invalidateServiceContext(); - } - - String msg = "MigrationCommand failed due to " + VmwareHelper.getExceptionMessage(e); + String msg = "MigrateWithStorageCommand failed due to " + VmwareHelper.getExceptionMessage(e); s_logger.warn(msg, e); - return new MigrateWithStorageAnswer(cmd, (Exception) e); - } finally { - // Cleanup datastores mounted on source host - for (String mountedDatastore : mountedDatastoresAtSource) { - s_logger.debug("Attempting to unmount datastore " + mountedDatastore + " at " + _hostName); - try { - srcHyperHost.unmountDatastore(mountedDatastore); - } catch (Exception unmountEx) { - s_logger.debug("Failed to unmount datastore " + mountedDatastore + " at " + _hostName + ". Seems the datastore is still being used by " + _hostName - + ". Please unmount manually to cleanup."); - } - s_logger.debug("Successfully unmounted datastore " + mountedDatastore + " at " + _hostName); - } + return new MigrateWithStorageAnswer(cmd, (Exception)e); } } @@ -4595,7 +4347,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { VmwareHypervisorHost hyperHost = getHyperHost(getServiceContext()); VirtualMachineMO vmMo = null; - DatastoreMO dsMo = null; + DatastoreMO targetDsMo = null; ManagedObjectReference morSourceDS = null; String vmdkDataStorePath = null; @@ -4610,12 +4362,29 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { // OfflineVmwareMigration: 1. find data(store) // OfflineVmwareMigration: more robust would be to find the store given the volume as it might have been moved out of band or due to error // example: DatastoreMO existingVmDsMo = new DatastoreMO(dcMo.getContext(), dcMo.findDatastore(fileInDatastore.getDatastoreName())); - - morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); - dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); + ManagedObjectReference morSourceDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); + DatastoreMO sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDs); + String targetDsName = cmd.getTargetPool().getUuid(); + String hostNameInTargetCluster = cmd.getHostGuidInTargetCluster(); + VmwareHypervisorHost hostInTargetCluster = null; + if (StringUtils.isNotBlank(hostNameInTargetCluster)) { + String hostInTargetClusterMorInfo = hostNameInTargetCluster.split("@")[0]; + ManagedObjectReference morHostInTargetCluster = new ManagedObjectReference(); + morHostInTargetCluster.setType(hostInTargetClusterMorInfo.split(":")[0]); + morHostInTargetCluster.setValue(hostInTargetClusterMorInfo.split(":")[1]); + hostInTargetCluster = new HostMO(getServiceContext(), morHostInTargetCluster); + } + VmwareHypervisorHost dsHost = hostInTargetCluster == null ? hyperHost : hostInTargetCluster; + ManagedObjectReference morTargetDS = getTargetDatastoreMOReference(targetDsName, hyperHost); + if(morTargetDS == null) { + String msg = "Unable to find the target datastore: " + targetDsName + " on host: " + dsHost.getHyperHostName(); + s_logger.error(msg); + throw new CloudRuntimeException(msg); + } + targetDsMo = new DatastoreMO(dsHost.getContext(), morTargetDS); s_logger.info("Create worker VM " + vmName); // OfflineVmwareMigration: 2. create the worker with access to the data(store) - vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, dsMo, vmName); + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName); if (vmMo == null) { // OfflineVmwareMigration: don't throw a general Exception but think of a specific one throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); @@ -4624,18 +4393,18 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { synchronized (this) { // OfflineVmwareMigration: 3. attach the disk to the worker String vmdkFileName = path + VMDK_EXTENSION; - vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName); - if (!dsMo.fileExists(vmdkDataStorePath)) { + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(sourceDsMo, vmdkFileName); + if (!targetDsMo.fileExists(vmdkDataStorePath)) { if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path)); } - vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, path, vmdkFileName); + vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(sourceDsMo, path, vmdkFileName); } - if (!dsMo.fileExists(vmdkDataStorePath)) { + if (!targetDsMo.fileExists(vmdkDataStorePath)) { if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, vmName)); } - vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(dsMo, vmName, vmdkFileName); + vmdkDataStorePath = VmwareStorageLayoutHelper.getVmwareDatastorePathFromVmdkFileName(sourceDsMo, vmName, vmdkFileName); } if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("attaching %s to %s for migration", vmdkDataStorePath, vmMo.getVmName())); @@ -4685,7 +4454,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { // OfflineVmwareMigration: worker *may* have been renamed vmName = vmMo.getVmName(); morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid()); - dsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); + targetDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration"); VirtualDisk[] disks = vmMo.getAllDiskDevice(); String format = "disk %d was migrated to %s"; @@ -4693,7 +4462,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { if (s_logger.isTraceEnabled()) { s_logger.trace(String.format(format, disk.getKey(), vmMo.getVmdkFileBaseName(disk))); } - vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION); + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(targetDsMo, vmMo.getVmdkFileBaseName(disk) + VMDK_EXTENSION); vmMo.detachDisk(vmdkDataStorePath, false); } s_logger.info("Destroy worker VM '" + vmName + "' after volume migration"); @@ -4706,10 +4475,10 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { String newPath = ((MigrateVolumeAnswer) answer).getVolumePath(); String vmdkFileName = newPath + VMDK_EXTENSION; try { - VmwareStorageLayoutHelper.syncVolumeToRootFolder(dsMo.getOwnerDatacenter().first(), dsMo, newPath, vmName); - vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(dsMo, vmdkFileName); + VmwareStorageLayoutHelper.syncVolumeToRootFolder(targetDsMo.getOwnerDatacenter().first(), targetDsMo, newPath, vmName); + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(targetDsMo, vmdkFileName); - if (!dsMo.fileExists(vmdkDataStorePath)) { + if (!targetDsMo.fileExists(vmdkDataStorePath)) { String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath); s_logger.error(msg); answer = new Answer(cmd, false, msg); @@ -7213,14 +6982,14 @@ private Answer execute(PrepareUnmanageVMInstanceCommand cmd) { private List relocateVirtualMachine(final VmwareHypervisorHost hypervisorHost, final String name, final VirtualMachineTO vmTo, - final String targetHost, final VmwareHypervisorHost targetClusterHost, + final String targetHost, final VmwareHypervisorHost hostInTargetCluster, final String poolUuid, final List> volToFiler) throws Exception { String vmName = name; if (vmName == null && vmTo != null) { vmName = vmTo.getName(); } VmwareHypervisorHost sourceHyperHost = hypervisorHost; - VmwareHypervisorHost targetHyperHost = targetClusterHost; + VmwareHypervisorHost targetHyperHost = hostInTargetCluster; VirtualMachineMO vmMo = null; ManagedObjectReference morSourceHostDc = null; ManagedObjectReference morTargetHostDc = null; @@ -7273,17 +7042,12 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h String srcHostApiVersion = ((HostMO)sourceHyperHost).getHostAboutInfo().getApiVersion(); if (StringUtils.isNotBlank(poolUuid)) { - String dsName = poolUuid.replace("-", ""); + VmwareHypervisorHost dsHost = targetHyperHost == null ? sourceHyperHost : targetHyperHost; ManagedObjectReference morDatastore = null; String msg; - if (targetClusterHost != null) { - morDatastore = dcMo.findDatastore(dsName); - } else { - morDatastore = getTargetDatastoreMOReference(dsName, sourceHyperHost); - } + morDatastore = getTargetDatastoreMOReference(poolUuid, dsHost); if (morDatastore == null) { - msg = "Unable to find the target datastore: " + dsName + - (targetClusterHost != null ? "in datacenter: " + dcMo.getName() : " on host: " + sourceHyperHost.getHyperHostName()) + + msg = "Unable to find the target datastore: " + poolUuid + " on host: " + dsHost.getHyperHostName() + " to execute migration"; s_logger.error(msg); throw new CloudRuntimeException(msg); @@ -7291,16 +7055,16 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h relocateSpec.setDatastore(morDatastore); } else if (CollectionUtils.isNotEmpty(volToFiler)) { // Specify destination datastore location for each volume + VmwareHypervisorHost dsHost = targetHyperHost == null ? sourceHyperHost : targetHyperHost; for (Pair entry : volToFiler) { VolumeTO volume = entry.first(); StorageFilerTO filerTo = entry.second(); if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("Preparing spec for volume: %s to migrate it to datastore: %s", volume.getName(), filerTo.getUuid())); } - String dsName = filerTo.getUuid().replace("-", ""); - ManagedObjectReference morVolumeDatastore = dcMo.findDatastore(dsName); + ManagedObjectReference morVolumeDatastore = getTargetDatastoreMOReference(filerTo.getUuid(), dsHost); if (morVolumeDatastore == null) { - String msg = "Unable to find the target datastore: " + dsName + " in datacenter: " + dcMo.getName() + " to execute migration"; + String msg = "Unable to find the target datastore: " + filerTo.getUuid() + " in datacenter: " + dcMo.getName() + " to execute migration"; s_logger.error(msg); throw new CloudRuntimeException(msg); } @@ -7392,7 +7156,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h throw new Exception("Change datastore operation failed during storage migration"); } else { s_logger.debug("Successfully migrated VM " + vmName + - (targetClusterHost != null ? " from " + sourceHyperHost.getHyperHostName() + " to " + targetHyperHost.getHyperHostName() + " and " : " with ") + + (hostInTargetCluster != null ? " from " + sourceHyperHost.getHyperHostName() + " to " + targetHyperHost.getHyperHostName() + " and " : " with ") + "its storage to target datastore(s)"); } } diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 806376c75b85..758d94588045 100644 --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -189,16 +189,16 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As StoragePool targetPool = (StoragePool) destData.getDataStore(); ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType(); Long hostId = null; - String targetClusterHostGuid = null; + String hostGuidInTargetCluster = null; // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM if (ScopeType.CLUSTER.equals(sourceScopeType) && ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { // Without host vMotion might fail between non-shared storages with error similar to, // https://kb.vmware.com/s/article/1003795 List hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId()); if (CollectionUtils.isNotEmpty(hosts)) { - targetClusterHostGuid = hosts.get(0).getGuid(); + hostGuidInTargetCluster = hosts.get(0).getGuid(); } - if (targetClusterHostGuid == null) { + if (hostGuidInTargetCluster == null) { throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages"); } } else if (ScopeType.CLUSTER.equals(sourceScopeType)) { @@ -216,7 +216,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As , srcData.getTO().getPath() , sourcePool , targetPool - , targetClusterHostGuid); + , hostGuidInTargetCluster); // OfflineVmwareMigration: should be ((StoragePool)srcData.getDataStore()).getHypervisor() but that is NULL, so hardcoding Answer answer; if (hostId != null) { diff --git a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java index ce48d8519b81..c320a7a55377 100644 --- a/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java +++ b/server/src/main/java/com/cloud/hypervisor/HypervisorGuruBase.java @@ -298,7 +298,7 @@ public boolean attachRestoredVolumeToVirtualMachine(long zoneId, String location return false; } - public List finalizeMigrate(VirtualMachine vm, StoragePool destination, Map volumeToPool) { + public List finalizeMigrate(VirtualMachine vm, Map volumeToPool) { return null; } } diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index f96129ae5045..d85ae0e2b625 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -1244,15 +1244,16 @@ public Ternary, Integer>, List, Map, Integer>, List, Map, Integer> allHostsPair = null; List allHosts = null; final Map requiresStorageMotion = new HashMap(); @@ -1293,9 +1290,9 @@ public Ternary, Integer>, List, Map iterator = allHosts.iterator(); iterator.hasNext();) { final Host host = iterator.next(); - String hostHypervisorVersion = host.getHypervisorVersion(); - if (HypervisorType.KVM.equals(host.getHypervisorType()) && hostHypervisorVersion == null) { - hostHypervisorVersion = ""; + String hostVersion = host.getHypervisorVersion(); + if (HypervisorType.KVM.equals(host.getHypervisorType()) && hostVersion == null) { + hostVersion = ""; } if (volClusterId != null) { @@ -1308,21 +1305,10 @@ public Ternary, Integer>, List, Map hypervisorCapabilitiesList = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); - if (hypervisorCapabilitiesList != null) { - for (HypervisorCapabilitiesVO hypervisorCapabilities : hypervisorCapabilitiesList) { - if (hypervisorCapabilities.isStorageMotionSupported()) { - hostCapabilities = hypervisorCapabilities; - break; - } - } - } - } - hostSupportsStorageMigration = hostCapabilities != null && hostCapabilities.isStorageMotionSupported(); + boolean hostSupportsStorageMigration = false; + if ((srcHostVersion != null && srcHostVersion.equals(hostVersion)) || + Boolean.TRUE.equals(_hypervisorCapabilitiesDao.isStorageMotionSupported(host.getHypervisorType(), hostVersion))) { + hostSupportsStorageMigration = true; } if (hostSupportsStorageMigration && hasSuitablePoolsForVolume(volume, host, vmProfile)) { requiresStorageMotion.put(host, true); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 6519ce87d1d8..7ba399eae1ca 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -198,7 +198,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.hypervisor.Hypervisor; import com.cloud.hypervisor.Hypervisor.HypervisorType; -import com.cloud.hypervisor.HypervisorCapabilitiesVO; import com.cloud.hypervisor.dao.HypervisorCapabilitiesDao; import com.cloud.hypervisor.kvm.dpdk.DpdkHelper; import com.cloud.network.IpAddressManager; @@ -5534,6 +5533,10 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumes = _volsDao.findByInstance(vm.getId()); + for (VolumeVO volume : volumes) { + volumeToPoolIds.put(volume.getId(), destPool.getId()); + } } else if (MapUtils.isNotEmpty(volumeToPool)) { Long poolClusterId = null; for (Map.Entry entry : volumeToPool.entrySet()) { @@ -5557,7 +5560,7 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); - if (lstHypervisorCapabilities != null) { - for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) { - if (hypervisorCapabilities.isStorageMotionSupported()) { - sourceCapabilities = hypervisorCapabilities; - break; - } - } - } - } - if (sourceCapabilities == null || !sourceCapabilities.isStorageMotionSupported()) { + if (!Boolean.TRUE.equals(_hypervisorCapabilitiesDao.isStorageMotionSupported(srcHost.getHypervisorType(), srcHostVersion))) { throw new CloudRuntimeException("Migration with storage isn't supported for source host ID: " + srcHost.getUuid() + " on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); } - if (srcHostVersion != null && !srcHostVersion.equals(destinationHostVersion)) { - HypervisorCapabilitiesVO destinationCapabilities = _hypervisorCapabilitiesDao.findByHypervisorTypeAndVersion(destinationHost.getHypervisorType(), destinationHostVersion); - if (destinationCapabilities == null && HypervisorType.KVM.equals(destinationHost.getHypervisorType())) { - List lstHypervisorCapabilities = _hypervisorCapabilitiesDao.listAllByHypervisorType(HypervisorType.KVM); - if (lstHypervisorCapabilities != null) { - for (HypervisorCapabilitiesVO hypervisorCapabilities : lstHypervisorCapabilities) { - if (hypervisorCapabilities.isStorageMotionSupported()) { - destinationCapabilities = hypervisorCapabilities; - break; - } - } - } - } - if (destinationCapabilities == null || !destinationCapabilities.isStorageMotionSupported()) { + if (srcHostVersion == null || !srcHostVersion.equals(destHostVersion)) { + if (!Boolean.TRUE.equals(_hypervisorCapabilitiesDao.isStorageMotionSupported(destinationHost.getHypervisorType(), destHostVersion))) { throw new CloudRuntimeException("Migration with storage isn't supported for target host ID: " + srcHost.getUuid() + " on hypervisor " + srcHost.getHypervisorType() + " of version " + srcHost.getHypervisorVersion()); } } From 6aad7a7600c8264d61a623ef5f4f5d68818131ba Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 6 Oct 2020 14:44:53 +0530 Subject: [PATCH 05/31] review changes Signed-off-by: Abhishek Kumar --- .../main/java/com/cloud/vm/UserVmService.java | 4 +- .../api/command/admin/vm/MigrateVMCmd.java | 2 +- .../MigrateVirtualMachineWithVolumeCmd.java | 2 +- .../agent/api/MigrateVmToPoolCommand.java | 14 +---- .../vmware/resource/VmwareResource.java | 23 ++----- .../java/com/cloud/vm/UserVmManagerImpl.java | 63 ++++++++++--------- 6 files changed, 48 insertions(+), 60 deletions(-) diff --git a/api/src/main/java/com/cloud/vm/UserVmService.java b/api/src/main/java/com/cloud/vm/UserVmService.java index 5e0e176ed51d..eab9c736a377 100644 --- a/api/src/main/java/com/cloud/vm/UserVmService.java +++ b/api/src/main/java/com/cloud/vm/UserVmService.java @@ -486,7 +486,9 @@ VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, UserVm moveVMToUser(AssignVMCmd moveUserVMCmd) throws ResourceAllocationException, ConcurrentOperationException, ResourceUnavailableException, InsufficientCapacityException; - VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumeToPool); + VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool); + + VirtualMachine vmStorageMigration(Long vmId, Map volumeToPool); UserVm restoreVM(RestoreVMCmd cmd) throws InsufficientCapacityException, ResourceUnavailableException; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java index e79f6bc008cd..9f73ae586a08 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVMCmd.java @@ -172,7 +172,7 @@ public void execute() { if (getHostId() != null) { migratedVm = _userVmService.migrateVirtualMachine(getVirtualMachineId(), destinationHost); } else if (getStoragePoolId() != null) { - migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool, null); + migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); } if (migratedVm != null) { UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm) migratedVm).get(0); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java index de1ce2b86271..e4fa4f10c341 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java @@ -167,7 +167,7 @@ public void execute() { } migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, getVolumeToPool()); } else if (MapUtils.isNotEmpty(migrateVolumeTo)) { - migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), null, getVolumeToPool()); + migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), getVolumeToPool()); } if (migratedVm != null) { UserVmResponse response = _responseGenerator.createUserVmResponse(ResponseView.Full, "virtualmachine", (UserVm)migratedVm).get(0); diff --git a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java index 066afb218230..16e25338d474 100644 --- a/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java +++ b/core/src/main/java/com/cloud/agent/api/MigrateVmToPoolCommand.java @@ -18,7 +18,6 @@ // package com.cloud.agent.api; -import java.util.Collection; import java.util.List; import com.cloud.agent.api.to.StorageFilerTO; @@ -31,7 +30,6 @@ * */ public class MigrateVmToPoolCommand extends Command { - private Collection volumes; private String vmName; private boolean executeInSequence = false; private List> volumeToFilerAsList; @@ -43,24 +41,18 @@ protected MigrateVmToPoolCommand() { /** * * @param vmName the name of the VM to migrate - * @param volumes used to supply feedback on vmware generated names * @param volumeToFilerTo the volume to primary storage pool map to migrate the VM to + * @param hostGuidInTargetCluster GUID of host in target cluster when migrating across clusters * @param executeInSequence */ - public MigrateVmToPoolCommand(String vmName, Collection volumes, - List>volumeToFilerTo, String hostGuidInTargetCluster, - boolean executeInSequence) { + public MigrateVmToPoolCommand(String vmName, List> volumeToFilerTo, + String hostGuidInTargetCluster, boolean executeInSequence) { this.vmName = vmName; - this.volumes = volumes; this.hostGuidInTargetCluster = hostGuidInTargetCluster; this.volumeToFilerAsList = volumeToFilerTo; this.executeInSequence = executeInSequence; } - public Collection getVolumes() { - return volumes; - } - public String getVmName() { return vmName; } diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 3a064f115f44..46395668218b 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4175,8 +4175,9 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy try { // OfflineVmwareMigration: getVolumesFromCommand(cmd); Map volumeDeviceKey = new HashMap<>(); - if (CollectionUtils.isEmpty(volToFiler)) { // Else device keys will be found in relocateVirtualMachine - volumeDeviceKey = getVolumesFromCommand(vmMo, cmd); + if (cmd instanceof MigrateVolumeCommand) { // Else device keys will be found in relocateVirtualMachine + MigrateVolumeCommand mcmd = (MigrateVolumeCommand) cmd; + addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId()); if (s_logger.isTraceEnabled()) { for (Integer diskId: volumeDeviceKey.keySet()) { s_logger.trace(String.format("Disk to migrate has disk id %d and volumeId %d", diskId, volumeDeviceKey.get(diskId))); @@ -4184,7 +4185,7 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy } } List volumeToList = relocateVirtualMachine(hyperHost, vmMo.getName(), null, null, hostInTargetCluster, poolUuid, volToFiler); - return createAnswerForCmd(vmMo, poolUuid, volumeToList, cmd, volumeDeviceKey); + return createAnswerForCmd(vmMo, volumeToList, cmd, volumeDeviceKey); } catch (Exception e) { String msg = "Change data store for VM " + vmMo.getVmName() + " failed"; s_logger.error(msg + ": " + e.getLocalizedMessage()); @@ -4192,7 +4193,7 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy } } - Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, List volumeObjectToList, Command cmd, Map volumeDeviceKey) throws Exception { + Answer createAnswerForCmd(VirtualMachineMO vmMo, List volumeObjectToList, Command cmd, Map volumeDeviceKey) throws Exception { List volumeToList = new ArrayList<>(); VirtualMachineDiskInfoBuilder diskInfoBuilder = vmMo.getDiskInfoBuilder(); VirtualDisk[] disks = vmMo.getAllDiskDevice(); @@ -4213,20 +4214,6 @@ Answer createAnswerForCmd(VirtualMachineMO vmMo, String poolUuid, List getVolumesFromCommand(VirtualMachineMO vmMo, Command cmd) throws Exception { - Map volumeDeviceKey = new HashMap(); - if (cmd instanceof MigrateVmToPoolCommand) { - MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand) cmd; - for (VolumeTO volume : mcmd.getVolumes()) { - addVolumeDiskmapping(vmMo, volumeDeviceKey, volume.getPath(), volume.getId()); - } - } else if (cmd instanceof MigrateVolumeCommand) { - MigrateVolumeCommand mcmd = (MigrateVolumeCommand) cmd; - addVolumeDiskmapping(vmMo, volumeDeviceKey, mcmd.getVolumePath(), mcmd.getVolumeId()); - } - return volumeDeviceKey; - } - private void addVolumeDiskmapping(VirtualMachineMO vmMo, Map volumeDeviceKey, String volumePath, long volumeId) throws Exception { if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("locating disk for volume (%d) using path %s", volumeId, volumePath)); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 7ba399eae1ca..ac73b1170afb 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5492,8 +5492,7 @@ public UserVm getUserVm(long vmId) { return _vmDao.findById(vmId); } - @Override - public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumeToPool) { + private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { // access check - only root admin can migrate VM Account caller = CallContext.current().getCallingAccount(); if (!_accountMgr.isRootAdmin(caller.getId())) { @@ -5529,40 +5528,48 @@ public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool, Map volumeToPoolIds = new HashMap<>(); - - if (destPool != null) { - checkDestinationHypervisorType(destPool, vm); - List volumes = _volsDao.findByInstance(vm.getId()); - for (VolumeVO volume : volumes) { - volumeToPoolIds.put(volume.getId(), destPool.getId()); - } - } else if (MapUtils.isNotEmpty(volumeToPool)) { - Long poolClusterId = null; - for (Map.Entry entry : volumeToPool.entrySet()) { - Volume volume = _volsDao.findByUuid(entry.getKey()); - StoragePoolVO pool = _storagePoolDao.findPoolByUUID(entry.getValue()); - if (poolClusterId != null && - !(ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && - !poolClusterId.equals(pool.getClusterId())) { - throw new InvalidParameterValueException("VM's disk cannot be migrated, input destination storage pools belong to different clusters"); - } - if (pool.getClusterId() != null) { - poolClusterId = pool.getClusterId(); - } - checkDestinationHypervisorType(pool, vm); - volumeToPoolIds.put(volume.getId(), pool.getId()); - } - } - // Check that Vm does not have VM Snapshots if (_vmSnapshotDao.findByVm(vmId).size() > 0) { throw new InvalidParameterValueException("VM's disk cannot be migrated, please remove all the VM Snapshots for this VM"); } + return vm; + } + + @Override + public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { + VMInstanceVO vm = preVmStorageMigrationCheck(vmId); + Map volumeToPoolIds = new HashMap<>(); + checkDestinationHypervisorType(destPool, vm); + List volumes = _volsDao.findByInstance(vm.getId()); + for (VolumeVO volume : volumes) { + volumeToPoolIds.put(volume.getId(), destPool.getId()); + } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); return _vmDao.findById(vm.getId()); + } + @Override + public VirtualMachine vmStorageMigration(Long vmId, Map volumeToPool) { + VMInstanceVO vm = preVmStorageMigrationCheck(vmId); + Map volumeToPoolIds = new HashMap<>(); + Long poolClusterId = null; + for (Map.Entry entry : volumeToPool.entrySet()) { + Volume volume = _volsDao.findByUuid(entry.getKey()); + StoragePoolVO pool = _storagePoolDao.findPoolByUUID(entry.getValue()); + if (poolClusterId != null && + !(ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && + !poolClusterId.equals(pool.getClusterId())) { + throw new InvalidParameterValueException("VM's disk cannot be migrated, input destination storage pools belong to different clusters"); + } + if (pool.getClusterId() != null) { + poolClusterId = pool.getClusterId(); + } + checkDestinationHypervisorType(pool, vm); + volumeToPoolIds.put(volume.getId(), pool.getId()); + } + _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); + return _vmDao.findById(vm.getId()); } private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) { From 341a69fe389e5925fcd2d23dbf73c175d5502a1c Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 6 Oct 2020 16:52:36 +0530 Subject: [PATCH 06/31] fix Signed-off-by: Abhishek Kumar --- .../src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index c833ed402756..2f03aebbbdcb 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -1112,7 +1112,7 @@ private VirtualDisk getAttachedDisk(VirtualMachineMO vmMo, String diskPath) thro throw new CloudRuntimeException("Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different clusters without shared storages"); } } - MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), vols, + MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), volumeToFilerTo, hostInTargetCluster == null ? null : hostInTargetCluster.getGuid(), true); commands.add(migrateVmToPoolCommand); From 503ae44e18fca578d4733bdbc50a9db789fa15e3 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 7 Oct 2020 18:08:27 +0530 Subject: [PATCH 07/31] fixes Signed-off-by: Abhishek Kumar --- .../vmware/resource/VmwareResource.java | 38 +++++-------------- .../motion/VmwareStorageMotionStrategy.java | 25 ++++++------ .../java/com/cloud/vm/UserVmManagerImpl.java | 2 +- .../hypervisor/vmware/util/VmwareHelper.java | 14 +++++++ 4 files changed, 35 insertions(+), 44 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 46395668218b..0819eb539c3b 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4155,7 +4155,6 @@ protected Answer execute(MigrateVmToPoolCommand cmd) { private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHypervisorHost hyperHost, Command cmd) throws Exception { String hostNameInTargetCluster = null; - VmwareHypervisorHost hostInTargetCluster = null; List> volToFiler = new ArrayList<>(); if (cmd instanceof MigrateVmToPoolCommand) { MigrateVmToPoolCommand mcmd = (MigrateVmToPoolCommand)cmd; @@ -4164,13 +4163,8 @@ private Answer migrateAndAnswer(VirtualMachineMO vmMo, String poolUuid, VmwareHy } else if (cmd instanceof MigrateVolumeCommand) { hostNameInTargetCluster = ((MigrateVolumeCommand)cmd).getHostGuidInTargetCluster(); } - if (StringUtils.isNotBlank(hostNameInTargetCluster)) { - String hostInTargetClusterMorInfo = hostNameInTargetCluster.split("@")[0]; - ManagedObjectReference morHostInTargetCluster = new ManagedObjectReference(); - morHostInTargetCluster.setType(hostInTargetClusterMorInfo.split(":")[0]); - morHostInTargetCluster.setValue(hostInTargetClusterMorInfo.split(":")[1]); - hostInTargetCluster = new HostMO(getServiceContext(), morHostInTargetCluster); - } + VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), + hostNameInTargetCluster); try { // OfflineVmwareMigration: getVolumesFromCommand(cmd); @@ -4352,17 +4346,10 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { ManagedObjectReference morSourceDs = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getSourcePool().getUuid()); DatastoreMO sourceDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDs); String targetDsName = cmd.getTargetPool().getUuid(); - String hostNameInTargetCluster = cmd.getHostGuidInTargetCluster(); - VmwareHypervisorHost hostInTargetCluster = null; - if (StringUtils.isNotBlank(hostNameInTargetCluster)) { - String hostInTargetClusterMorInfo = hostNameInTargetCluster.split("@")[0]; - ManagedObjectReference morHostInTargetCluster = new ManagedObjectReference(); - morHostInTargetCluster.setType(hostInTargetClusterMorInfo.split(":")[0]); - morHostInTargetCluster.setValue(hostInTargetClusterMorInfo.split(":")[1]); - hostInTargetCluster = new HostMO(getServiceContext(), morHostInTargetCluster); - } + VmwareHypervisorHost hostInTargetCluster = VmwareHelper.getHostMOFromHostName(getServiceContext(), + cmd.getHostGuidInTargetCluster()); VmwareHypervisorHost dsHost = hostInTargetCluster == null ? hyperHost : hostInTargetCluster; - ManagedObjectReference morTargetDS = getTargetDatastoreMOReference(targetDsName, hyperHost); + ManagedObjectReference morTargetDS = getTargetDatastoreMOReference(targetDsName, dsHost); if(morTargetDS == null) { String msg = "Unable to find the target datastore: " + targetDsName + " on host: " + dsHost.getHyperHostName(); s_logger.error(msg); @@ -6979,30 +6966,23 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h VmwareHypervisorHost targetHyperHost = hostInTargetCluster; VirtualMachineMO vmMo = null; ManagedObjectReference morSourceHostDc = null; - ManagedObjectReference morTargetHostDc = null; - ManagedObjectReference morTargetHost = new ManagedObjectReference(); VirtualMachineRelocateSpec relocateSpec = new VirtualMachineRelocateSpec(); List diskLocators = new ArrayList(); Set mountedDatastoresAtSource = new HashSet(); List volumeToList = new ArrayList<>(); Map volumeDeviceKey = new HashMap(); - if (StringUtils.isNotBlank(targetHost)) { - String targetHostMorInfo = targetHost.split("@")[0]; - morTargetHost.setType(targetHostMorInfo.split(":")[0]); - morTargetHost.setValue(targetHostMorInfo.split(":")[1]); - } try { if (sourceHyperHost == null) { sourceHyperHost = getHyperHost(getServiceContext()); } if (targetHyperHost == null && StringUtils.isNotBlank(targetHost)) { - targetHyperHost = new HostMO(getServiceContext(), morTargetHost); + targetHyperHost = VmwareHelper.getHostMOFromHostName(getServiceContext(), targetHost); } morSourceHostDc = sourceHyperHost.getHyperHostDatacenter(); DatacenterMO dcMo = new DatacenterMO(sourceHyperHost.getContext(), morSourceHostDc); if (targetHyperHost != null) { - morTargetHostDc = targetHyperHost.getHyperHostDatacenter(); + ManagedObjectReference morTargetHostDc = targetHyperHost.getHyperHostDatacenter(); if (!morSourceHostDc.getValue().equalsIgnoreCase(morTargetHostDc.getValue())) { String msg = "VM " + vmName + " cannot be migrated between different datacenter"; throw new CloudRuntimeException(msg); @@ -7093,12 +7073,12 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h } // Specific section for MigrateVmWithStorageCommand - if (vmTo != null) { + if (vmTo != null && targetHyperHost != null) { // Prepare network at target before migration NicTO[] nics = vmTo.getNics(); for (NicTO nic : nics) { // prepare network on the host - prepareNetworkFromNicInfo(new HostMO(getServiceContext(), morTargetHost), nic, false, vmTo.getType()); + prepareNetworkFromNicInfo((HostMO)targetHyperHost, nic, false, vmTo.getType()); } // Ensure secondary storage mounted on target host VmwareManager mgr = targetHyperHost.getContext().getStockObject(VmwareManager.CONTEXT_STOCK_NAME); diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 758d94588045..51264d9a09a3 100644 --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -191,20 +191,17 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As Long hostId = null; String hostGuidInTargetCluster = null; // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM - if (ScopeType.CLUSTER.equals(sourceScopeType) && ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { - // Without host vMotion might fail between non-shared storages with error similar to, - // https://kb.vmware.com/s/article/1003795 - List hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId()); - if (CollectionUtils.isNotEmpty(hosts)) { - hostGuidInTargetCluster = hosts.get(0).getGuid(); - } - if (hostGuidInTargetCluster == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages"); - } - } else if (ScopeType.CLUSTER.equals(sourceScopeType)) { - hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); - if (hostId == null) { - throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName()); + if (ScopeType.CLUSTER.equals(sourceScopeType)) { + if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { + // Without host vMotion might fail between non-shared storages with error similar to, + // https://kb.vmware.com/s/article/1003795 + List hosts = hostDao.findHypervisorHostInCluster(targetPool.getClusterId()); + if (CollectionUtils.isNotEmpty(hosts)) { + hostGuidInTargetCluster = hosts.get(0).getGuid(); + } + if (hostGuidInTargetCluster == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different cluster without shared storages"); + } } } else if (ScopeType.CLUSTER.equals(targetScopeType)) { hostId = findSuitableHostIdForWorkerVmPlacement(targetPool.getClusterId()); diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index ac73b1170afb..1cc8f8518d90 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5558,7 +5558,7 @@ public VirtualMachine vmStorageMigration(Long vmId, Map volumeTo Volume volume = _volsDao.findByUuid(entry.getKey()); StoragePoolVO pool = _storagePoolDao.findPoolByUUID(entry.getValue()); if (poolClusterId != null && - !(ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && + (ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && !poolClusterId.equals(pool.getClusterId())) { throw new InvalidParameterValueException("VM's disk cannot be migrated, input destination storage pools belong to different clusters"); } diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java index 181b2ef183f6..0f476ecb2f13 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/util/VmwareHelper.java @@ -812,4 +812,18 @@ public static XMLGregorianCalendar getXMLGregorianCalendar(final Date date, fina return DatatypeFactory.newInstance().newXMLGregorianCalendar(gregorianCalendar); } + public static HostMO getHostMOFromHostName(final VmwareContext context, final String hostName) { + HostMO host = null; + if (com.cloud.utils.StringUtils.isNotBlank(hostName) && hostName.contains("@")) { + String hostMorInfo = hostName.split("@")[0]; + if (hostMorInfo.contains(":")) { + ManagedObjectReference morHost = new ManagedObjectReference(); + morHost.setType(hostMorInfo.split(":")[0]); + morHost.setValue(hostMorInfo.split(":")[1]); + host = new HostMO(context, morHost); + } + } + return host; + } + } From 45ad746bdc749ea508a970893a5ee965ad659773 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 13 Nov 2020 15:04:38 +0530 Subject: [PATCH 08/31] fix volume migration Signed-off-by: Abhishek Kumar --- .../vmware/resource/VmwareResource.java | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 5b311028c36c..03408e523921 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -48,11 +48,12 @@ import javax.naming.ConfigurationException; import javax.xml.datatype.XMLGregorianCalendar; -import com.cloud.agent.api.to.DataTO; -import com.cloud.agent.api.to.DeployAsIsInfoTO; -import com.cloud.agent.api.ValidateVcenterDetailsCommand; import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.storage.command.CopyCommand; +import org.apache.cloudstack.storage.command.StorageSubSystemCommand; import org.apache.cloudstack.storage.configdrive.ConfigDrive; +import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; +import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; import org.apache.cloudstack.storage.to.TemplateObjectTO; import org.apache.cloudstack.storage.to.VolumeObjectTO; import org.apache.cloudstack.utils.volume.VirtualMachineDiskInfo; @@ -166,6 +167,7 @@ import com.cloud.agent.api.UpgradeSnapshotCommand; import com.cloud.agent.api.ValidateSnapshotAnswer; import com.cloud.agent.api.ValidateSnapshotCommand; +import com.cloud.agent.api.ValidateVcenterDetailsCommand; import com.cloud.agent.api.VmDiskStatsEntry; import com.cloud.agent.api.VmStatsEntry; import com.cloud.agent.api.VolumeStatsEntry; @@ -182,12 +184,13 @@ import com.cloud.agent.api.storage.DestroyCommand; import com.cloud.agent.api.storage.MigrateVolumeAnswer; import com.cloud.agent.api.storage.MigrateVolumeCommand; -import com.cloud.agent.api.to.deployasis.OVFPropertyTO; import com.cloud.agent.api.storage.PrimaryStorageDownloadAnswer; import com.cloud.agent.api.storage.PrimaryStorageDownloadCommand; import com.cloud.agent.api.storage.ResizeVolumeAnswer; import com.cloud.agent.api.storage.ResizeVolumeCommand; import com.cloud.agent.api.to.DataStoreTO; +import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DeployAsIsInfoTO; import com.cloud.agent.api.to.DiskTO; import com.cloud.agent.api.to.IpAddressTO; import com.cloud.agent.api.to.NfsTO; @@ -195,6 +198,7 @@ import com.cloud.agent.api.to.StorageFilerTO; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.agent.api.to.VolumeTO; +import com.cloud.agent.api.to.deployasis.OVFPropertyTO; import com.cloud.agent.resource.virtualnetwork.VRScripts; import com.cloud.agent.resource.virtualnetwork.VirtualRouterDeployer; import com.cloud.agent.resource.virtualnetwork.VirtualRoutingResource; @@ -223,8 +227,8 @@ import com.cloud.hypervisor.vmware.mo.HypervisorHostHelper; import com.cloud.hypervisor.vmware.mo.NetworkDetails; import com.cloud.hypervisor.vmware.mo.PbmProfileManagerMO; -import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.StoragepodMO; +import com.cloud.hypervisor.vmware.mo.TaskMO; import com.cloud.hypervisor.vmware.mo.VirtualEthernetCardType; import com.cloud.hypervisor.vmware.mo.VirtualMachineDiskInfoBuilder; import com.cloud.hypervisor.vmware.mo.VirtualMachineMO; @@ -293,7 +297,6 @@ import com.vmware.vim25.HostPortGroupSpec; import com.vmware.vim25.ManagedObjectReference; import com.vmware.vim25.NasDatastoreInfo; -import com.vmware.vim25.VirtualMachineDefinedProfileSpec; import com.vmware.vim25.ObjectContent; import com.vmware.vim25.OptionValue; import com.vmware.vim25.PerfCounterInfo; @@ -328,6 +331,7 @@ import com.vmware.vim25.VirtualIDEController; import com.vmware.vim25.VirtualMachineBootOptions; import com.vmware.vim25.VirtualMachineConfigSpec; +import com.vmware.vim25.VirtualMachineDefinedProfileSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineFileLayoutEx; import com.vmware.vim25.VirtualMachineFileLayoutExFileInfo; @@ -347,10 +351,6 @@ import com.vmware.vim25.VmConfigSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; -import org.apache.cloudstack.storage.command.CopyCommand; -import org.apache.cloudstack.storage.command.StorageSubSystemCommand; -import org.apache.cloudstack.storage.resource.NfsSecondaryStorageResource; -import org.apache.cloudstack.storage.to.PrimaryDataStoreTO; public class VmwareResource implements StoragePoolResource, ServerResource, VmwareHostService, VirtualRouterDeployer { private static final Logger s_logger = Logger.getLogger(VmwareResource.class); @@ -4636,7 +4636,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { // OfflineVmwareMigration: 3. attach the disk to the worker String vmdkFileName = path + VMDK_EXTENSION; vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(sourceDsMo, vmdkFileName); - if (!destinationDsMo.fileExists(vmdkDataStorePath)) { + if (!sourceDsMo.fileExists(vmdkDataStorePath)) { if (s_logger.isDebugEnabled()) { s_logger.debug(String.format("path not found (%s), trying under '%s'", vmdkFileName, path)); } @@ -4699,8 +4699,6 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { try { // OfflineVmwareMigration: worker *may* have been renamed vmName = vmMo.getVmName(); - morSourceDS = HypervisorHostHelper.findDatastoreWithBackwardsCompatibility(hyperHost, cmd.getTargetPool().getUuid()); - destinationDsMo = new DatastoreMO(hyperHost.getContext(), morSourceDS); s_logger.info("Dettaching disks before destroying worker VM '" + vmName + "' after volume migration"); VirtualDisk[] disks = vmMo.getAllDiskDevice(); String format = "disk %d was migrated to %s"; @@ -4722,8 +4720,8 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { String newPath = ((MigrateVolumeAnswer) answer).getVolumePath(); String vmdkFileName = newPath + VMDK_EXTENSION; try { - VmwareStorageLayoutHelper.syncVolumeToRootFolder(sourceDsMo.getOwnerDatacenter().first(), sourceDsMo, newPath, vmName); - vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(sourceDsMo, vmdkFileName); + VmwareStorageLayoutHelper.syncVolumeToRootFolder(destinationDsMo.getOwnerDatacenter().first(), destinationDsMo, newPath, vmName); + vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(destinationDsMo, vmdkFileName); if (!sourceDsMo.fileExists(vmdkDataStorePath)) { String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath); From 39683d8998c106a8b640d64021ec2a47cfdfe7d5 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 13 Nov 2020 15:05:47 +0530 Subject: [PATCH 09/31] fix Signed-off-by: Abhishek Kumar --- .../com/cloud/hypervisor/vmware/resource/VmwareResource.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 03408e523921..79a9b31182cb 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4723,7 +4723,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { VmwareStorageLayoutHelper.syncVolumeToRootFolder(destinationDsMo.getOwnerDatacenter().first(), destinationDsMo, newPath, vmName); vmdkDataStorePath = VmwareStorageLayoutHelper.getLegacyDatastorePathFromVmdkFileName(destinationDsMo, vmdkFileName); - if (!sourceDsMo.fileExists(vmdkDataStorePath)) { + if (!destinationDsMo.fileExists(vmdkDataStorePath)) { String msg = String.format("Migration of volume '%s' failed; file (%s) not found as path '%s'", cmd.getVolumePath(), vmdkFileName, vmdkDataStorePath); s_logger.error(msg); answer = new Answer(cmd, false, msg); From 74f85ba25e7daf1803fee090b3b73aa8ed203732 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 13 Nov 2020 17:15:02 +0530 Subject: [PATCH 10/31] fix datastore type getter Signed-off-by: Abhishek Kumar --- .../cloud/hypervisor/vmware/mo/DatastoreMO.java | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java index 804af6286d10..432a1de55085 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/DatastoreMO.java @@ -16,6 +16,13 @@ // under the License. package com.cloud.hypervisor.vmware.mo; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.log4j.Logger; + import com.cloud.exception.CloudException; import com.cloud.hypervisor.vmware.util.VmwareContext; import com.cloud.utils.Pair; @@ -34,12 +41,6 @@ import com.vmware.vim25.PropertySpec; import com.vmware.vim25.SelectionSpec; import com.vmware.vim25.TraversalSpec; -import org.apache.log4j.Logger; - -import java.util.ArrayList; -import java.util.List; - -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; public class DatastoreMO extends BaseMO { private static final Logger s_logger = Logger.getLogger(DatastoreMO.class); @@ -459,6 +460,6 @@ public boolean isDatastoreStoragePolicyComplaint(String storagePolicyId) throws public String getDatastoreType() throws Exception { DatastoreSummary summary = _context.getVimClient().getDynamicProperty(getMor(), "summary"); - return summary.getType(); + return summary.getType() == null ? "" : summary.getType(); } } From 9b1e0f67bd5e6de1ef424c924378c9a18cdff0e6 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 17 Nov 2020 11:57:05 +0530 Subject: [PATCH 11/31] fix Signed-off-by: Abhishek Kumar --- .../cloud/hypervisor/vmware/resource/VmwareResource.java | 2 +- .../storage/motion/VmwareStorageMotionStrategy.java | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 79a9b31182cb..b4267ca05331 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -7399,7 +7399,7 @@ private List relocateVirtualMachine(final VmwareHypervisorHost h } // Specific section for MigrateVmWithStorageCommand - if (vmTo != null && targetHyperHost != null) { + if (vmTo != null) { // Prepare network at target before migration NicTO[] nics = vmTo.getNics(); for (NicTO nic : nics) { diff --git a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java index 5163493416d9..8d08c388823f 100644 --- a/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java +++ b/plugins/hypervisors/vmware/src/main/java/org/apache/cloudstack/storage/motion/VmwareStorageMotionStrategy.java @@ -193,8 +193,12 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As ScopeType targetScopeType = destData.getDataStore().getScope().getScopeType(); Long hostId = null; String hostGuidInTargetCluster = null; - // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM if (ScopeType.CLUSTER.equals(sourceScopeType)) { + // Find Volume source cluster and select any Vmware hypervisor host to attach worker VM + hostId = findSuitableHostIdForWorkerVmPlacement(sourcePool.getClusterId()); + if (hostId == null) { + throw new CloudRuntimeException("Offline Migration failed, unable to find suitable host for worker VM placement in the cluster of storage pool: " + sourcePool.getName()); + } if (ScopeType.CLUSTER.equals(targetScopeType) && !sourcePool.getClusterId().equals(targetPool.getClusterId())) { // Without host vMotion might fail between non-shared storages with error similar to, // https://kb.vmware.com/s/article/1003795 From d409b3b97f5a97f185da2b10d3db792b4a0f7a40 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 18 Nov 2020 14:54:02 +0530 Subject: [PATCH 12/31] log Signed-off-by: Abhishek Kumar --- .../com/cloud/hypervisor/vmware/resource/VmwareResource.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index b4267ca05331..92ebc4bd44ea 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4627,6 +4627,10 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { s_logger.info("Create worker VM " + vmName); // OfflineVmwareMigration: 2. create the worker with access to the data(store) vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, null); + HostMO h = new HostMO(dsHost.getContext(), dsHost.getMor()); + s_logger.info("Log4321 - Created worker VM " + vmMo.getVirtualHardwareVersion() + + " host: " + vmMo.getRunningHost().getHostAboutInfo().getVersion() + " host API: " + vmMo.getRunningHost().getHostAboutInfo().getApiVersion() + + " DShost: " + h.getHostAboutInfo().getVersion() + " DShost API: " + h.getHostAboutInfo().getApiVersion()); if (vmMo == null) { // OfflineVmwareMigration: don't throw a general Exception but think of a specific one throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); From 2039b5589c25766f702212ed290bf12536a73a46 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 18 Nov 2020 16:24:04 +0530 Subject: [PATCH 13/31] get host hardware version Signed-off-by: Abhishek Kumar --- .../vmware/resource/VmwareResource.java | 14 ++-- .../vmware/mo/HypervisorHostHelper.java | 68 +++++++++++++------ 2 files changed, 57 insertions(+), 25 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java index 92ebc4bd44ea..6019d72dbc74 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/vmware/resource/VmwareResource.java @@ -4618,6 +4618,14 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { isvVolsInvolved = true; vmName = getWorkerName(getServiceContext(), cmd, 0, destinationDsMo); } + String hardwareVersion = null; + if (hostInTargetCluster != null) { + Integer sourceHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(hyperHost); + Integer destinationHardwareVersion = HypervisorHostHelper.getHostHardwareVersion(dsHost); + if (sourceHardwareVersion != null && destinationHardwareVersion != null && !sourceHardwareVersion.equals(destinationHardwareVersion)) { + hardwareVersion = String.valueOf(Math.min(sourceHardwareVersion, destinationHardwareVersion)); + } + } // OfflineVmwareMigration: refactor for re-use // OfflineVmwareMigration: 1. find data(store) @@ -4626,11 +4634,7 @@ private Answer migrateVolume(MigrateVolumeCommand cmd) { s_logger.info("Create worker VM " + vmName); // OfflineVmwareMigration: 2. create the worker with access to the data(store) - vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, null); - HostMO h = new HostMO(dsHost.getContext(), dsHost.getMor()); - s_logger.info("Log4321 - Created worker VM " + vmMo.getVirtualHardwareVersion() + - " host: " + vmMo.getRunningHost().getHostAboutInfo().getVersion() + " host API: " + vmMo.getRunningHost().getHostAboutInfo().getApiVersion() + - " DShost: " + h.getHostAboutInfo().getVersion() + " DShost API: " + h.getHostAboutInfo().getApiVersion()); + vmMo = HypervisorHostHelper.createWorkerVM(hyperHost, sourceDsMo, vmName, hardwareVersion); if (vmMo == null) { // OfflineVmwareMigration: don't throw a general Exception but think of a specific one throw new CloudRuntimeException("Unable to create a worker VM for volume operation"); diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 37d97aeaf313..006ef79745a5 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -18,6 +18,7 @@ import java.io.ByteArrayInputStream; import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.io.StringWriter; import java.net.URI; @@ -28,6 +29,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.UUID; import javax.xml.parsers.DocumentBuilderFactory; import javax.xml.parsers.ParserConfigurationException; @@ -37,17 +39,6 @@ import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; -import com.vmware.vim25.ConcurrentAccessFaultMsg; -import com.vmware.vim25.DuplicateNameFaultMsg; -import com.vmware.vim25.FileFaultFaultMsg; -import com.vmware.vim25.InsufficientResourcesFaultFaultMsg; -import com.vmware.vim25.InvalidDatastoreFaultMsg; -import com.vmware.vim25.InvalidNameFaultMsg; -import com.vmware.vim25.InvalidStateFaultMsg; -import com.vmware.vim25.OutOfBoundsFaultMsg; -import com.vmware.vim25.RuntimeFaultFaultMsg; -import com.vmware.vim25.TaskInProgressFaultMsg; -import com.vmware.vim25.VmConfigFaultFaultMsg; import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; @@ -80,19 +71,20 @@ import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.net.NetUtils; import com.cloud.utils.nicira.nvp.plugin.NiciraNvpApiVersion; -import com.vmware.vim25.OvfCreateDescriptorParams; -import com.vmware.vim25.OvfCreateDescriptorResult; import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.BoolPolicy; -import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.ClusterConfigInfoEx; -import com.vmware.vim25.DatacenterConfigInfo; +import com.vmware.vim25.ConcurrentAccessFaultMsg; +import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DVPortSetting; import com.vmware.vim25.DVPortgroupConfigInfo; import com.vmware.vim25.DVPortgroupConfigSpec; import com.vmware.vim25.DVSSecurityPolicy; import com.vmware.vim25.DVSTrafficShapingPolicy; +import com.vmware.vim25.DatacenterConfigInfo; +import com.vmware.vim25.DuplicateNameFaultMsg; import com.vmware.vim25.DynamicProperty; +import com.vmware.vim25.FileFaultFaultMsg; import com.vmware.vim25.HostNetworkSecurityPolicy; import com.vmware.vim25.HostNetworkTrafficShapingPolicy; import com.vmware.vim25.HostPortGroup; @@ -101,6 +93,10 @@ import com.vmware.vim25.HttpNfcLeaseDeviceUrl; import com.vmware.vim25.HttpNfcLeaseInfo; import com.vmware.vim25.HttpNfcLeaseState; +import com.vmware.vim25.InsufficientResourcesFaultFaultMsg; +import com.vmware.vim25.InvalidDatastoreFaultMsg; +import com.vmware.vim25.InvalidNameFaultMsg; +import com.vmware.vim25.InvalidStateFaultMsg; import com.vmware.vim25.LocalizedMethodFault; import com.vmware.vim25.LongPolicy; import com.vmware.vim25.ManagedObjectReference; @@ -108,11 +104,16 @@ import com.vmware.vim25.NumericRange; import com.vmware.vim25.ObjectContent; import com.vmware.vim25.OptionValue; +import com.vmware.vim25.OutOfBoundsFaultMsg; +import com.vmware.vim25.OvfCreateDescriptorParams; +import com.vmware.vim25.OvfCreateDescriptorResult; import com.vmware.vim25.OvfCreateImportSpecParams; import com.vmware.vim25.OvfCreateImportSpecResult; -import com.vmware.vim25.OvfFileItem; import com.vmware.vim25.OvfFile; +import com.vmware.vim25.OvfFileItem; import com.vmware.vim25.ParaVirtualSCSIController; +import com.vmware.vim25.RuntimeFaultFaultMsg; +import com.vmware.vim25.TaskInProgressFaultMsg; import com.vmware.vim25.VMwareDVSConfigSpec; import com.vmware.vim25.VMwareDVSPortSetting; import com.vmware.vim25.VMwareDVSPortgroupPolicy; @@ -121,25 +122,24 @@ import com.vmware.vim25.VirtualBusLogicController; import com.vmware.vim25.VirtualController; import com.vmware.vim25.VirtualDevice; -import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualDeviceConfigSpec; import com.vmware.vim25.VirtualDeviceConfigSpecOperation; +import com.vmware.vim25.VirtualDisk; import com.vmware.vim25.VirtualIDEController; import com.vmware.vim25.VirtualLsiLogicController; import com.vmware.vim25.VirtualLsiLogicSASController; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualMachineFileInfo; import com.vmware.vim25.VirtualMachineGuestOsIdentifier; +import com.vmware.vim25.VirtualMachineImportSpec; import com.vmware.vim25.VirtualMachineVideoCard; import com.vmware.vim25.VirtualSCSIController; import com.vmware.vim25.VirtualSCSISharing; -import com.vmware.vim25.VirtualMachineImportSpec; +import com.vmware.vim25.VmConfigFaultFaultMsg; import com.vmware.vim25.VmwareDistributedVirtualSwitchPvlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchTrunkVlanSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanIdSpec; import com.vmware.vim25.VmwareDistributedVirtualSwitchVlanSpec; -import java.io.FileWriter; -import java.util.UUID; public class HypervisorHostHelper { private static final Logger s_logger = Logger.getLogger(HypervisorHostHelper.class); @@ -2207,4 +2207,32 @@ public static void createBaseFolderInDatastore(DatastoreMO dsMo, VmwareHyperviso dsMo.makeDirectory(hiddenFolderPath, hyperHost.getHyperHostDatacenter()); } } + + public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { + Integer version = null; + HostMO hostMo = new HostMO(host.getContext(), host.getMor()); + String hostApiVersion = ""; + try { + hostApiVersion = hostMo.getHostAboutInfo().getApiVersion(); + } catch (Exception ignored) {} + if (hostApiVersion == null) { + hostApiVersion = ""; + } + if (hostApiVersion.equalsIgnoreCase("6.5")) { + version = 13; + } else if (hostApiVersion.equalsIgnoreCase("6.0")) { + version = 11; + } else if (hostApiVersion.equalsIgnoreCase("5.5")) { + version = 10; + } else if (hostApiVersion.equalsIgnoreCase("5.1")) { + version = 9; + } else if (hostApiVersion.equalsIgnoreCase("5.0")) { + version = 8; + } else if (hostApiVersion.startsWith("4.")) { + version = 7; + } else if (hostApiVersion.equalsIgnoreCase("3.5")) { + version = 4; + } + return version; + } } From aee302b79b9e2c96c0f4dfbdf309eb4b2d569e3f Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 18 Nov 2020 17:50:41 +0530 Subject: [PATCH 14/31] add newer version Signed-off-by: Abhishek Kumar --- .../cloud/hypervisor/vmware/mo/HypervisorHostHelper.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 006ef79745a5..1c995d3bac3c 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -2218,7 +2218,11 @@ public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { if (hostApiVersion == null) { hostApiVersion = ""; } - if (hostApiVersion.equalsIgnoreCase("6.5")) { + if (hostApiVersion.equalsIgnoreCase("7.0")) { + version = 17; + } else if (hostApiVersion.equalsIgnoreCase("6.7")) { + version = 14; + } else if (hostApiVersion.equalsIgnoreCase("6.5")) { version = 13; } else if (hostApiVersion.equalsIgnoreCase("6.0")) { version = 11; From 98246835a7dd03ba11d03974548ed75b85ffef8a Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Wed, 18 Nov 2020 20:20:09 +0530 Subject: [PATCH 15/31] retrieve hardwareVersion from sdk Signed-off-by: Abhishek Kumar --- .../cloud/hypervisor/vmware/mo/HostMO.java | 11 ++++++-- .../vmware/mo/HypervisorHostHelper.java | 28 ++++--------------- 2 files changed, 13 insertions(+), 26 deletions(-) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java index 0457039293a3..020e92ae1d0c 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java @@ -28,10 +28,14 @@ import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.VmwareHelper; +import com.cloud.utils.Pair; import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.ClusterDasConfigInfo; +import com.vmware.vim25.ComputeResourceConfigInfo; import com.vmware.vim25.ComputeResourceSummary; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DatastoreSummary; @@ -66,9 +70,6 @@ import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualNicManagerNetConfig; -import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.cloud.hypervisor.vmware.util.VmwareHelper; -import com.cloud.utils.Pair; public class HostMO extends BaseMO implements VmwareHypervisorHost { private static final Logger s_logger = Logger.getLogger(HostMO.class); @@ -278,6 +279,10 @@ public AboutInfo getHostAboutInfo() throws Exception { return (AboutInfo)_context.getVimClient().getDynamicProperty(_mor, "config.product"); } + public ComputeResourceConfigInfo getHostConfigInfo() throws Exception { + return (ComputeResourceConfigInfo)_context.getVimClient().getDynamicProperty(_mor, "configurationEx"); + } + public VmwareHostType getHostType() throws Exception { AboutInfo aboutInfo = getHostAboutInfo(); if ("VMware ESXi".equals(aboutInfo.getName())) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 1c995d3bac3c..77f34bfd00b0 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -74,6 +74,7 @@ import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.BoolPolicy; import com.vmware.vim25.ClusterConfigInfoEx; +import com.vmware.vim25.ComputeResourceConfigInfo; import com.vmware.vim25.ConcurrentAccessFaultMsg; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DVPortSetting; @@ -2211,31 +2212,12 @@ public static void createBaseFolderInDatastore(DatastoreMO dsMo, VmwareHyperviso public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { Integer version = null; HostMO hostMo = new HostMO(host.getContext(), host.getMor()); - String hostApiVersion = ""; + ComputeResourceConfigInfo info = null; try { - hostApiVersion = hostMo.getHostAboutInfo().getApiVersion(); + info = hostMo.getHostConfigInfo(); } catch (Exception ignored) {} - if (hostApiVersion == null) { - hostApiVersion = ""; - } - if (hostApiVersion.equalsIgnoreCase("7.0")) { - version = 17; - } else if (hostApiVersion.equalsIgnoreCase("6.7")) { - version = 14; - } else if (hostApiVersion.equalsIgnoreCase("6.5")) { - version = 13; - } else if (hostApiVersion.equalsIgnoreCase("6.0")) { - version = 11; - } else if (hostApiVersion.equalsIgnoreCase("5.5")) { - version = 10; - } else if (hostApiVersion.equalsIgnoreCase("5.1")) { - version = 9; - } else if (hostApiVersion.equalsIgnoreCase("5.0")) { - version = 8; - } else if (hostApiVersion.startsWith("4.")) { - version = 7; - } else if (hostApiVersion.equalsIgnoreCase("3.5")) { - version = 4; + if (info != null) { + version = Integer.valueOf(info.getDefaultHardwareVersionKey()); } return version; } From 4419ce6758a2fcd35aa38e4c654520a9070f8dbd Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 19 Nov 2020 10:21:58 +0530 Subject: [PATCH 16/31] revert to hardwareversion mapping Signed-off-by: Abhishek Kumar --- .../vmware/mo/HypervisorHostHelper.java | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 77f34bfd00b0..7d750fa07d33 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -2218,6 +2218,33 @@ public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { } catch (Exception ignored) {} if (info != null) { version = Integer.valueOf(info.getDefaultHardwareVersionKey()); + } else { + String hostApiVersion = ""; + try { + hostApiVersion = hostMo.getHostAboutInfo().getApiVersion(); + } catch (Exception ignored) {} + if (hostApiVersion == null) { + hostApiVersion = ""; + } + if (hostApiVersion.equalsIgnoreCase("7.0")) { + version = 17; + } else if (hostApiVersion.equalsIgnoreCase("6.7")) { + version = 14; + } else if (hostApiVersion.equalsIgnoreCase("6.5")) { + version = 13; + } else if (hostApiVersion.equalsIgnoreCase("6.0")) { + version = 11; + } else if (hostApiVersion.equalsIgnoreCase("5.5")) { + version = 10; + } else if (hostApiVersion.equalsIgnoreCase("5.1")) { + version = 9; + } else if (hostApiVersion.equalsIgnoreCase("5.0")) { + version = 8; + } else if (hostApiVersion.startsWith("4.")) { + version = 7; + } else if (hostApiVersion.equalsIgnoreCase("3.5")) { + version = 4; + } } return version; } From 4085a14b8c2417e4ad2494e9ff92088ec9cb53af Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 19 Nov 2020 13:00:32 +0530 Subject: [PATCH 17/31] fix ui Signed-off-by: Abhishek Kumar --- ui/scripts/instances.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index 312d4187a4e2..f58492d7a2b2 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -2583,9 +2583,14 @@ }, action: function(args) { var rootVolume = {}; + var data = { + virtualmachineid: args.context.instances[0].id, + listAll: true + } $.ajax({ url: createURL("listVolumes&virtualmachineid=" + args.context.instances[0].id), dataType: "json", + data: data, async: false, success: function(json) { var volumes = json.listvolumesresponse.volume; @@ -2597,7 +2602,7 @@ }); } }); - var data = { + data = { 'virtualmachineid': args.context.instances[0].id, 'migrateto[0].volume': rootVolume.id, 'migrateto[0].pool': args.data.storageId From dd7825f2a7765ab308f7c900f134124259b7b7c5 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Thu, 19 Nov 2020 13:16:06 +0530 Subject: [PATCH 18/31] fix Signed-off-by: Abhishek Kumar --- ui/scripts/instances.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/scripts/instances.js b/ui/scripts/instances.js index f58492d7a2b2..4992e41c7bb2 100644 --- a/ui/scripts/instances.js +++ b/ui/scripts/instances.js @@ -2588,7 +2588,7 @@ listAll: true } $.ajax({ - url: createURL("listVolumes&virtualmachineid=" + args.context.instances[0].id), + url: createURL("listVolumes"), dataType: "json", data: data, async: false, From 9ca75f04c6164bbdde05a7712e6c4933a456e683 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 20 Nov 2020 10:37:46 +0530 Subject: [PATCH 19/31] changes for system vm Signed-off-by: Abhishek Kumar --- .../admin/systemvm/MigrateSystemVMCmd.java | 55 +- .../main/java/com/cloud/api/ApiDBUtils.java | 13 +- .../java/com/cloud/api/ApiResponseHelper.java | 9 +- .../cloud/server/ManagementServerImpl.java | 9 +- .../java/com/cloud/vm/UserVmManagerImpl.java | 60 +- ui/l10n/en.js | 6 + ui/scripts/system.js | 684 +++++++++++++++++- 7 files changed, 779 insertions(+), 57 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java index ab0018b9e803..7fc804f27841 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/systemvm/MigrateSystemVMCmd.java @@ -16,7 +16,7 @@ // under the License. package org.apache.cloudstack.api.command.admin.systemvm; -import org.apache.log4j.Logger; +import java.util.HashMap; import org.apache.cloudstack.acl.SecurityChecker.AccessType; import org.apache.cloudstack.api.ACL; @@ -27,8 +27,10 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.StoragePoolResponse; import org.apache.cloudstack.api.response.SystemVmResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; import com.cloud.event.EventTypes; import com.cloud.exception.ConcurrentOperationException; @@ -37,6 +39,7 @@ import com.cloud.exception.ResourceUnavailableException; import com.cloud.exception.VirtualMachineMigrationException; import com.cloud.host.Host; +import com.cloud.storage.StoragePool; import com.cloud.user.Account; import com.cloud.vm.VirtualMachine; @@ -54,7 +57,6 @@ public class MigrateSystemVMCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.HOST_ID, type = CommandType.UUID, entityType = HostResponse.class, - required = true, description = "destination Host ID to migrate VM to") private Long hostId; @@ -66,6 +68,13 @@ public class MigrateSystemVMCmd extends BaseAsyncCmd { description = "the ID of the virtual machine") private Long virtualMachineId; + @Parameter(name = ApiConstants.STORAGE_ID, + since = "4.16.0", + type = CommandType.UUID, + entityType = StoragePoolResponse.class, + description = "Destination storage pool ID to migrate VM volumes to. Required for migrating the root disk volume") + private Long storageId; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -78,6 +87,10 @@ public Long getVirtualMachineId() { return virtualMachineId; } + public Long getStorageId() { + return storageId; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -109,15 +122,43 @@ public String getEventDescription() { @Override public void execute() { + if (getHostId() == null && getStorageId() == null) { + throw new InvalidParameterValueException("Either hostId or storageId must be specified"); + } - Host destinationHost = _resourceService.getHost(getHostId()); - if (destinationHost == null) { - throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); + if (getHostId() != null && getStorageId() != null) { + throw new InvalidParameterValueException("Only one of hostId and storageId can be specified"); + } + + Host destinationHost = null; + if (getHostId() != null) { + destinationHost = _resourceService.getHost(getHostId()); + if (destinationHost == null) { + throw new InvalidParameterValueException("Unable to find the host to migrate the VM, host id=" + getHostId()); + } + if (destinationHost.getType() != Host.Type.Routing) { + throw new InvalidParameterValueException("The specified host(" + destinationHost.getName() + ") is not suitable to migrate the VM, please specify another one"); + } + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to host Id: " + getHostId()); + } + + // OfflineMigration performed when this parameter is specified + StoragePool destStoragePool = null; + if (getStorageId() != null) { + destStoragePool = _storageService.getStoragePool(getStorageId()); + if (destStoragePool == null) { + throw new InvalidParameterValueException("Unable to find the storage pool to migrate the VM"); + } + CallContext.current().setEventDetails("VM Id: " + getVirtualMachineId() + " to storage pool Id: " + getStorageId()); } try { - CallContext.current().setEventDetails("VM Id: " + this._uuidMgr.getUuid(VirtualMachine.class, getVirtualMachineId()) + " to host Id: " + this._uuidMgr.getUuid(Host.class, getHostId())); //FIXME : Should not be calling UserVmService to migrate all types of VMs - need a generic VM layer - VirtualMachine migratedVm = _userVmService.migrateVirtualMachine(getVirtualMachineId(), destinationHost); + VirtualMachine migratedVm = null; + if (getHostId() != null) { + migratedVm = _userVmService.migrateVirtualMachineWithVolume(getVirtualMachineId(), destinationHost, new HashMap()); + } else if (getStorageId() != null) { + migratedVm = _userVmService.vmStorageMigration(getVirtualMachineId(), destStoragePool); + } if (migratedVm != null) { // return the generic system VM instance response SystemVmResponse response = _responseGenerator.createSystemVmResponse(migratedVm); diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index a2433ab634d6..9f4c9f2db213 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -311,6 +311,7 @@ import com.cloud.uservm.UserVm; import com.cloud.utils.EnumUtils; import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; import com.cloud.vm.ConsoleProxyVO; import com.cloud.vm.DomainRouterVO; import com.cloud.vm.InstanceGroup; @@ -1732,7 +1733,17 @@ public static String findJobInstanceUuid(AsyncJob job) { /////////////////////////////////////////////////////////////////////// public static DomainRouterResponse newDomainRouterResponse(DomainRouterJoinVO vr, Account caller) { - return s_domainRouterJoinDao.newDomainRouterResponse(vr, caller); + DomainRouterResponse response = s_domainRouterJoinDao.newDomainRouterResponse(vr, caller); + if (StringUtils.isBlank(response.getHypervisor())) { + VMInstanceVO vm = ApiDBUtils.findVMInstanceById(vr.getId()); + if (vm.getLastHostId() != null) { + HostVO host = ApiDBUtils.findHostById(vm.getLastHostId()); + if (host != null) { + response.setHypervisor(host.getHypervisorType().toString()); + } + } + } + return response; } public static DomainRouterResponse fillRouterDetails(DomainRouterResponse vrData, DomainRouterJoinVO vr) { diff --git a/server/src/main/java/com/cloud/api/ApiResponseHelper.java b/server/src/main/java/com/cloud/api/ApiResponseHelper.java index baac70711e70..c537ba10cdc5 100644 --- a/server/src/main/java/com/cloud/api/ApiResponseHelper.java +++ b/server/src/main/java/com/cloud/api/ApiResponseHelper.java @@ -16,6 +16,8 @@ // under the License. package com.cloud.api; +import static com.cloud.utils.NumbersUtil.toHumanReadableSize; + import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Calendar; @@ -350,8 +352,6 @@ import com.cloud.vm.snapshot.VMSnapshotVO; import com.cloud.vm.snapshot.dao.VMSnapshotDao; -import static com.cloud.utils.NumbersUtil.toHumanReadableSize; - public class ApiResponseHelper implements ResponseGenerator { private static final Logger s_logger = Logger.getLogger(ApiResponseHelper.class); @@ -1399,6 +1399,11 @@ public SystemVmResponse createSystemVmResponse(VirtualMachine vm) { vmResponse.setHostName(host.getName()); vmResponse.setHypervisor(host.getHypervisorType().toString()); } + } else if (vm.getLastHostId() != null) { + Host lastHost = ApiDBUtils.findHostById(vm.getLastHostId()); + if (lastHost != null) { + vmResponse.setHypervisor(lastHost.getHypervisorType().toString()); + } } if (vm.getType() == Type.SecondaryStorageVm || vm.getType() == Type.ConsoleProxy) { diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index 94e4b2aa1345..fb3263549f89 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -40,7 +40,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.Storage; import org.apache.cloudstack.acl.ControlledEntity; import org.apache.cloudstack.affinity.AffinityGroupProcessor; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; @@ -653,6 +652,7 @@ import com.cloud.storage.GuestOSVO; import com.cloud.storage.GuestOsCategory; import com.cloud.storage.ScopeType; +import com.cloud.storage.Storage; import com.cloud.storage.StorageManager; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; @@ -1257,7 +1257,7 @@ public Ternary, Integer>, List, Map, Integer>, List, Map requiresStorageMotion = new HashMap(); DataCenterDeployment plan = null; if (canMigrateWithStorage) { - allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), null, null, null, keyword, + Long podId = !VirtualMachine.Type.User.equals(vm.getType()) ? srcHost.getPodId() : null; + allHostsPair = searchForServers(startIndex, pageSize, null, hostType, null, srcHost.getDataCenterId(), podId, null, null, keyword, null, null, srcHost.getHypervisorType(), null, srcHost.getId()); allHosts = allHostsPair.first(); hostsForMigrationWithStorage = new ArrayList<>(allHosts); @@ -1336,7 +1337,7 @@ public Ternary, Integer>, List, Map vols = _volsDao.findByInstance(vm.getId()); @@ -5689,17 +5688,36 @@ private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { return vm; } + private VirtualMachine findMigratedVm(long vmId, VirtualMachine.Type vmType) { + if (VirtualMachine.Type.User.equals(vmType)) { + return _vmDao.findById(vmId); + } + return _vmInstanceDao.findById(vmId); + } + @Override public VirtualMachine vmStorageMigration(Long vmId, StoragePool destPool) { VMInstanceVO vm = preVmStorageMigrationCheck(vmId); Map volumeToPoolIds = new HashMap<>(); checkDestinationHypervisorType(destPool, vm); List volumes = _volsDao.findByInstance(vm.getId()); + StoragePoolVO destinationPoolVo = _storagePoolDao.findById(destPool.getId()); + Long destPoolPodId = ScopeType.CLUSTER.equals(destinationPoolVo.getScope()) || ScopeType.HOST.equals(destinationPoolVo.getScope()) ? + destinationPoolVo.getPodId() : null; for (VolumeVO volume : volumes) { + if (!VirtualMachine.Type.User.equals(vm.getType())) { + // Migrate within same pod as source storage and same cluster for all disks only. Hypervisor check already done + StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId()); + if (destPoolPodId != null && + (ScopeType.CLUSTER.equals(pool.getScope()) || ScopeType.HOST.equals(pool.getScope())) && + !destPoolPodId.equals(pool.getPodId())) { + throw new InvalidParameterValueException("Storage migration of non-user VMs cannot be done between storage pools of different pods"); + } + } volumeToPoolIds.put(volume.getId(), destPool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); - return _vmDao.findById(vm.getId()); + return findMigratedVm(vm.getId(), vm.getType()); } @Override @@ -5722,7 +5740,7 @@ public VirtualMachine vmStorageMigration(Long vmId, Map volumeTo volumeToPoolIds.put(volume.getId(), pool.getId()); } _itMgr.storageMigration(vm.getUuid(), volumeToPoolIds); - return _vmDao.findById(vm.getId()); + return findMigratedVm(vm.getId(), vm.getType()); } private void checkDestinationHypervisorType(StoragePool destPool, VMInstanceVO vm) { @@ -5866,12 +5884,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr collectVmNetworkStatistics(uservm); } _itMgr.migrate(vm.getUuid(), srcHostId, dest); - VMInstanceVO vmInstance = _vmInstanceDao.findById(vmId); - if (vmInstance.getType().equals(VirtualMachine.Type.User)) { - return _vmDao.findById(vmId); - } else { - return vmInstance; - } + return findMigratedVm(vm.getId(), vm.getType()); } private boolean isOnSupportedHypevisorForMigration(VMInstanceVO vm) { @@ -6171,6 +6184,13 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio destinationHost.getHypervisorType().toString() + " " + destHostVersion); } + if (!VirtualMachine.Type.User.equals(vm.getType())) { + // for System VMs check that the destination host is within the same pod + if (srcHost.getPodId() != null && !srcHost.getPodId().equals(destinationHost.getPodId())) { + throw new InvalidParameterValueException("Cannot migrate the VM, destination host is not in the same pod as current host of the VM"); + } + } + if (HypervisorType.KVM.equals(srcHost.getHypervisorType())) { if (srcHostVersion == null) { srcHostVersion = ""; @@ -6204,16 +6224,18 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio List vmVolumes = _volsDao.findUsableVolumesForInstance(vm.getId()); Map volToPoolObjectMap = new HashMap(); - if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId().equals(srcHost.getClusterId())) { - if (volumeToPool.isEmpty()) { - // If the destination host is in the same cluster and volumes do not have to be migrated across pools - // then fail the call. migrateVirtualMachine api should have been used. - throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destinationHost - + " doesn't involve migrating the volumes."); + if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId().equals(srcHost.getClusterId()) + && MapUtils.isEmpty(volumeToPool)) { + // If the destination host is in the same cluster and volumes do not have to be migrated across pools + // then call migrateVirtualMachine for non-user VMs and exception for user VMs + if (!VirtualMachine.Type.User.equals(vm.getType())) { + return migrateVirtualMachine(vmId, destinationHost); } + throw new InvalidParameterValueException("Migration of the vm " + vm + "from host " + srcHost + " to destination host " + destinationHost + + " doesn't involve migrating the volumes."); } - if (!volumeToPool.isEmpty()) { + if (MapUtils.isNotEmpty(volumeToPool)) { // Check if all the volumes and pools passed as parameters are valid. for (Map.Entry entry : volumeToPool.entrySet()) { VolumeVO volume = _volsDao.findByUuid(entry.getKey()); @@ -6263,7 +6285,7 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio checkHostsDedication(vm, srcHostId, destinationHost.getId()); _itMgr.migrateWithStorage(vm.getUuid(), srcHostId, destinationHost.getId(), volToPoolObjectMap); - return _vmDao.findById(vm.getId()); + return findMigratedVm(vm.getId(), vm.getType()); } @DB diff --git a/ui/l10n/en.js b/ui/l10n/en.js index 8e25c54b04cd..1eddca8130a8 100644 --- a/ui/l10n/en.js +++ b/ui/l10n/en.js @@ -257,8 +257,10 @@ var dictionary = { "label.action.migrate.instance.processing":"Migrating Instance....", "label.action.migrate.router":"Migrate Router", "label.action.migrate.router.processing":"Migrating Router....", +"label.action.migrate.router.to.ps":"Migrate router to another primary storage", "label.action.migrate.systemvm":"Migrate System VM", "label.action.migrate.systemvm.processing":"Migrating System VM....", +"label.action.migrate.systemvm.to.ps":"Migrate system VM to another primary storage", "label.action.reboot.instance":"Reboot Instance", "label.action.reboot.instance.processing":"Rebooting Instance....", "label.action.reboot.router":"Reboot Router", @@ -1178,6 +1180,7 @@ var dictionary = { "label.migrate.instance.to.host":"Migrate instance to another host", "label.migrate.instance.to.ps":"Migrate instance to another primary storage", "label.migrate.lb.vm":"Migrate LB VM", +"label.migrate.lb.vm.to.ps":"Migrate LB VM to another primary storage", "label.migrate.router.to":"Migrate Router to", "label.migrate.systemvm.to":"Migrate System VM to", "label.migrate.to.host":"Migrate to host", @@ -2310,6 +2313,9 @@ var dictionary = { "message.migrate.instance.to.host":"Please confirm that you want to migrate instance to another host.", "message.migrate.instance.select.host":"Please select a host for migration", "message.migrate.instance.to.ps":"Please confirm that you want to migrate instance to another primary storage.", +"message.migrate.lb.vm.to.ps":"Please confirm that you want to migrate LB VM to another primary storage.", +"message.migrate.router.to.ps":"Please confirm that you want to migrate router to another primary storage.", +"message.migrate.system.vm.to.ps":"Please confirm that you want to migrate system VM to another primary storage.", "message.migrate.router.confirm":"Please confirm the host you wish to migrate the router to:", "message.migrate.systemvm.confirm":"Please confirm the host you wish to migrate the system VM to:", "message.migrate.volume":"Please confirm that you want to migrate volume to another primary storage.", diff --git a/ui/scripts/system.js b/ui/scripts/system.js index ca0eb7e3aeb7..136cb0ed60c6 100755 --- a/ui/scripts/system.js +++ b/ui/scripts/system.js @@ -2710,10 +2710,17 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - items.push({ - id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") - }); + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") + }); + } }); args.response.success({ data: items @@ -2770,6 +2777,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.router.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.router.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.router.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.router.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.routers[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.routers[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listRouters&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listroutersresponse.router; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return routerActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + viewConsole: { label: 'label.view.console', action: { @@ -3254,10 +3344,17 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - items.push({ - id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") - }); + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") + }); + } }); args.response.success({ data: items @@ -3314,6 +3411,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.migrate.lb.vm.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.lb.vm.to.ps'; + }, + notification: function(args) { + return 'label.migrate.lb.vm.to.ps'; + } + }, + createForm: { + title: 'label.migrate.lb.vm.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.internallbinstances[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.internallbinstances[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listInternalLoadBalancerVMs&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listinternallbvmssresponse.internalloadbalancervm; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return internallbinstanceActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + viewConsole: { label: 'label.view.console', action: { @@ -3882,10 +4062,17 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - items.push({ - id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") - }); + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") + }); + } }); args.response.success({ data: items @@ -3942,6 +4129,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.router.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.router.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.router.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.router.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.routers[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.routers[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listRouters&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listroutersresponse.router; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return routerActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + diagnostics: { label: 'label.action.run.diagnostics', messages: { @@ -6823,10 +7093,17 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - items.push({ - id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") - }); + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") + }); + } }); args.response.success({ data: items @@ -6883,6 +7160,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.router.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.router.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.router.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.router.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.routers[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.routers[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listRouters&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listroutersresponse.router; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return routerActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + viewConsole: { label: 'label.view.console', action: { @@ -8903,10 +9263,15 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - if (this.requiresStorageMotion == false) { + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { items.push({ id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") }); } }); @@ -8960,6 +9325,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.system.vm.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.system.vm.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.system.vm.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.system.vm.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.systemVMs[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.systemVMs[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listSystemVms&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listsystemvmsresponse.systemvm; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return systemvmActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + diagnostics: { label: 'label.action.run.diagnostics', messages: { @@ -10394,10 +10842,17 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - items.push({ - id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") - }); + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") + }); + } }); args.response.success({ data: items @@ -10454,6 +10909,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.router.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.router.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.router.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.router.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.routers[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.routers[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listRouters&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listroutersresponse.router; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return routerActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + // VR Diagnostics diagnostics: { label: 'label.action.run.diagnostics', @@ -11920,10 +12458,15 @@ var hostObjs = json.findhostsformigrationresponse.host; var items =[]; $(hostObjs).each(function () { - if (this.requiresStorageMotion == false) { + if (this.requiresStorageMotion == true) { + items.push({ + id: this.id, + description: (this.name + " (" + (this.suitableformigration ? "Suitable, " : "Not Suitable, ") + "Storage migration required)") + }); + } else { items.push({ id: this.id, - description: (this.name + " (" + (this.suitableformigration ? "Suitable": "Not Suitable") + ")") + description: (this.name + " (" + (this.suitableformigration ? "Suitable" : "Not Suitable") + ")") }); } }); @@ -11977,6 +12520,89 @@ } }, + migrateToAnotherStorage: { + label: 'label.action.migrate.system.vm.to.ps', + compactLabel: 'label.migrate.to.storage', + messages: { + confirm: function(args) { + return 'message.migrate.system.vm.to.ps'; + }, + notification: function(args) { + return 'label.action.migrate.system.vm.to.ps'; + } + }, + createForm: { + title: 'label.action.migrate.system.vm.to.ps', + desc: '', + fields: { + storageId: { + label: 'label.primary.storage', + validation: { + required: true + }, + select: function(args) { + $.ajax({ + url: createURL("listStoragePools&zoneid=" + args.context.systemVMs[0].zoneid), + dataType: "json", + async: true, + success: function(json) { + var pools = json.liststoragepoolsresponse.storagepool; + var items = []; + $(pools).each(function() { + items.push({ + id: this.id, + description: this.name + }); + }); + args.response.success({ + data: items + }); + } + }); + } + } + } + }, + action: function(args) { + var data = { + 'virtualmachineid': args.context.systemVMs[0].id, + 'storageid': args.data.storageId + } + $.ajax({ + url: createURL("migrateSystemVm"), + data: data, + async: true, + success: function(json) { + var jid = json.migratesystemvmresponse.jobid; + args.response.success({ + _custom: { + jobId: jid, + getUpdatedItem: function (json) { + $.ajax({ + url: createURL("listSystemVms&id=" + json.queryasyncjobresultresponse.jobresult.systemvm.id), + dataType: "json", + async: false, + success: function (json) { + var items = json.listsystemvmsresponse.systemvm; + if (items != null && items.length > 0) { + return items[0]; + } + } + }); + }, + getActionFilter: function() { + return systemvmActionfilter; + } + } + }); + } + }); + }, + notification: { + poll: pollAsyncJobResult + } + }, + diagnostics: { label: 'label.action.run.diagnostics', messages: { @@ -22729,6 +23355,9 @@ //when router is Stopped, all hypervisors support scaleUp(change service offering) allowedActions.push("scaleUp"); + if (isAdmin() && jsonObj.hypervisor == "VMware") { + allowedActions.push("migrateToAnotherStorage"); + } allowedActions.push("remove"); } if (jsonObj.state == 'Starting' || jsonObj.state == 'Stopping' || jsonObj.state == 'Migrating') { @@ -22748,6 +23377,10 @@ allowedActions.push("migrate"); } else if (jsonObj.state == 'Stopped') { allowedActions.push("start"); + + if (isAdmin() && jsonObj.hypervisor == "VMware") { + allowedActions.push("migrateToAnotherStorage"); + } } if (jsonObj.state == 'Starting' || jsonObj.state == 'Stopping' || jsonObj.state == 'Migrating') { allowedActions.push("viewConsole"); @@ -22781,6 +23414,9 @@ //when systemvm is Stopped, all hypervisors support scaleUp(change service offering) allowedActions.push("scaleUp"); + if (isAdmin() && jsonObj.hypervisor == "VMware") { + allowedActions.push("migrateToAnotherStorage"); + } allowedActions.push("remove"); } else if (jsonObj.state == 'Error') { allowedActions.push("remove"); From 919c5dd8f25db8ba510115386a790298301b95b0 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 20 Nov 2020 10:52:09 +0530 Subject: [PATCH 20/31] fix Signed-off-by: Abhishek Kumar --- server/src/main/java/com/cloud/api/ApiDBUtils.java | 6 +++--- server/src/main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/com/cloud/api/ApiDBUtils.java b/server/src/main/java/com/cloud/api/ApiDBUtils.java index 9f4c9f2db213..928d839d41ed 100644 --- a/server/src/main/java/com/cloud/api/ApiDBUtils.java +++ b/server/src/main/java/com/cloud/api/ApiDBUtils.java @@ -1737,9 +1737,9 @@ public static DomainRouterResponse newDomainRouterResponse(DomainRouterJoinVO vr if (StringUtils.isBlank(response.getHypervisor())) { VMInstanceVO vm = ApiDBUtils.findVMInstanceById(vr.getId()); if (vm.getLastHostId() != null) { - HostVO host = ApiDBUtils.findHostById(vm.getLastHostId()); - if (host != null) { - response.setHypervisor(host.getHypervisorType().toString()); + HostVO lastHost = ApiDBUtils.findHostById(vm.getLastHostId()); + if (lastHost != null) { + response.setHypervisor(lastHost.getHypervisorType().toString()); } } } diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 7313ac9677c2..28071fba087e 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5667,7 +5667,7 @@ private VMInstanceVO preVmStorageMigrationCheck(Long vmId) { } if (vm.getType() != VirtualMachine.Type.User && !HypervisorType.VMware.equals(vm.getHypervisorType())) { - throw new InvalidParameterValueException("can only do storage migration on user vm for hypervisor: " + vm.getHypervisorType().toString()); + throw new InvalidParameterValueException("cannot do storage migration on non-user vm for hypervisor: " + vm.getHypervisorType().toString() + ", only supported for VMware"); } List vols = _volsDao.findByInstance(vm.getId()); From 8f6b1d16b4b44336cfe70d0eb6979742fb314172 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 20 Nov 2020 16:12:04 +0530 Subject: [PATCH 21/31] fix npe on defaultnic Signed-off-by: Abhishek Kumar --- .../src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 9250f317f63a..2656dbc1332c 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -43,7 +43,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao; import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.command.admin.vm.MigrateVMCmd; @@ -144,6 +143,7 @@ import com.cloud.deploy.DeploymentPlanner; import com.cloud.deploy.DeploymentPlanner.ExcludeList; import com.cloud.deploy.DeploymentPlanningManager; +import com.cloud.deployasis.dao.UserVmDeployAsIsDetailsDao; import com.cloud.event.EventTypes; import com.cloud.event.UsageEventUtils; import com.cloud.event.UsageEventVO; @@ -3081,7 +3081,7 @@ private void orchestrateMigrateWithStorage(final String vmUuid, final long srcHo Nic defaultNic = _networkModel.getDefaultNic(vm.getId()); - if (defaultNic != null) { + if (defaultNic != null && VirtualMachine.Type.User.equals(vm.getType())) { UserVmVO userVm = _userVmDao.findById(vm.getId()); Map details = userVmDetailsDao.listDetailsKeyPairs(vm.getId()); userVm.setDetails(details); From c765b87556bd8c8fd4f25ee5b66d6a325d74ca65 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Fri, 20 Nov 2020 18:25:42 +0530 Subject: [PATCH 22/31] revert SDK configurationEx property changes Signed-off-by: Abhishek Kumar --- .../cloud/hypervisor/vmware/mo/HostMO.java | 11 +--- .../vmware/mo/HypervisorHostHelper.java | 55 ++++++++----------- 2 files changed, 26 insertions(+), 40 deletions(-) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java index 020e92ae1d0c..0457039293a3 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HostMO.java @@ -28,14 +28,10 @@ import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; -import com.cloud.hypervisor.vmware.util.VmwareContext; -import com.cloud.hypervisor.vmware.util.VmwareHelper; -import com.cloud.utils.Pair; import com.google.gson.Gson; import com.vmware.vim25.AboutInfo; import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.ClusterDasConfigInfo; -import com.vmware.vim25.ComputeResourceConfigInfo; import com.vmware.vim25.ComputeResourceSummary; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DatastoreSummary; @@ -70,6 +66,9 @@ import com.vmware.vim25.TraversalSpec; import com.vmware.vim25.VirtualMachineConfigSpec; import com.vmware.vim25.VirtualNicManagerNetConfig; +import com.cloud.hypervisor.vmware.util.VmwareContext; +import com.cloud.hypervisor.vmware.util.VmwareHelper; +import com.cloud.utils.Pair; public class HostMO extends BaseMO implements VmwareHypervisorHost { private static final Logger s_logger = Logger.getLogger(HostMO.class); @@ -279,10 +278,6 @@ public AboutInfo getHostAboutInfo() throws Exception { return (AboutInfo)_context.getVimClient().getDynamicProperty(_mor, "config.product"); } - public ComputeResourceConfigInfo getHostConfigInfo() throws Exception { - return (ComputeResourceConfigInfo)_context.getVimClient().getDynamicProperty(_mor, "configurationEx"); - } - public VmwareHostType getHostType() throws Exception { AboutInfo aboutInfo = getHostAboutInfo(); if ("VMware ESXi".equals(aboutInfo.getName())) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 7d750fa07d33..1c995d3bac3c 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -74,7 +74,6 @@ import com.vmware.vim25.AlreadyExistsFaultMsg; import com.vmware.vim25.BoolPolicy; import com.vmware.vim25.ClusterConfigInfoEx; -import com.vmware.vim25.ComputeResourceConfigInfo; import com.vmware.vim25.ConcurrentAccessFaultMsg; import com.vmware.vim25.CustomFieldStringValue; import com.vmware.vim25.DVPortSetting; @@ -2212,39 +2211,31 @@ public static void createBaseFolderInDatastore(DatastoreMO dsMo, VmwareHyperviso public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { Integer version = null; HostMO hostMo = new HostMO(host.getContext(), host.getMor()); - ComputeResourceConfigInfo info = null; + String hostApiVersion = ""; try { - info = hostMo.getHostConfigInfo(); + hostApiVersion = hostMo.getHostAboutInfo().getApiVersion(); } catch (Exception ignored) {} - if (info != null) { - version = Integer.valueOf(info.getDefaultHardwareVersionKey()); - } else { - String hostApiVersion = ""; - try { - hostApiVersion = hostMo.getHostAboutInfo().getApiVersion(); - } catch (Exception ignored) {} - if (hostApiVersion == null) { - hostApiVersion = ""; - } - if (hostApiVersion.equalsIgnoreCase("7.0")) { - version = 17; - } else if (hostApiVersion.equalsIgnoreCase("6.7")) { - version = 14; - } else if (hostApiVersion.equalsIgnoreCase("6.5")) { - version = 13; - } else if (hostApiVersion.equalsIgnoreCase("6.0")) { - version = 11; - } else if (hostApiVersion.equalsIgnoreCase("5.5")) { - version = 10; - } else if (hostApiVersion.equalsIgnoreCase("5.1")) { - version = 9; - } else if (hostApiVersion.equalsIgnoreCase("5.0")) { - version = 8; - } else if (hostApiVersion.startsWith("4.")) { - version = 7; - } else if (hostApiVersion.equalsIgnoreCase("3.5")) { - version = 4; - } + if (hostApiVersion == null) { + hostApiVersion = ""; + } + if (hostApiVersion.equalsIgnoreCase("7.0")) { + version = 17; + } else if (hostApiVersion.equalsIgnoreCase("6.7")) { + version = 14; + } else if (hostApiVersion.equalsIgnoreCase("6.5")) { + version = 13; + } else if (hostApiVersion.equalsIgnoreCase("6.0")) { + version = 11; + } else if (hostApiVersion.equalsIgnoreCase("5.5")) { + version = 10; + } else if (hostApiVersion.equalsIgnoreCase("5.1")) { + version = 9; + } else if (hostApiVersion.equalsIgnoreCase("5.0")) { + version = 8; + } else if (hostApiVersion.startsWith("4.")) { + version = 7; + } else if (hostApiVersion.equalsIgnoreCase("3.5")) { + version = 4; } return version; } From fa661f3e5fcbc127a700b663a84b774d134f2af1 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Mon, 7 Dec 2020 17:24:13 +0530 Subject: [PATCH 23/31] use static mapping for api version to hardware version Signed-off-by: Abhishek Kumar --- .../vmware/mo/HypervisorHostHelper.java | 62 +++++++++++++------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java index 1c995d3bac3c..0535e1f3dd3d 100644 --- a/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java +++ b/vmware-base/src/main/java/com/cloud/hypervisor/vmware/mo/HypervisorHostHelper.java @@ -153,6 +153,48 @@ public class HypervisorHostHelper { public static final String VSPHERE_DATASTORE_BASE_FOLDER = "fcd"; public static final String VSPHERE_DATASTORE_HIDDEN_FOLDER = ".hidden"; + protected final static Map apiVersionHardwareVersionMap; + + static { + apiVersionHardwareVersionMap = new HashMap(); + apiVersionHardwareVersionMap.put("3.5", 4); + apiVersionHardwareVersionMap.put("3.6", 4); + apiVersionHardwareVersionMap.put("3.7", 4); + apiVersionHardwareVersionMap.put("3.8", 4); + apiVersionHardwareVersionMap.put("3.9", 4); + apiVersionHardwareVersionMap.put("4.0", 7); + apiVersionHardwareVersionMap.put("4.1", 7); + apiVersionHardwareVersionMap.put("4.2", 7); + apiVersionHardwareVersionMap.put("4.3", 7); + apiVersionHardwareVersionMap.put("4.4", 7); + apiVersionHardwareVersionMap.put("4.5", 7); + apiVersionHardwareVersionMap.put("4.6", 7); + apiVersionHardwareVersionMap.put("4.7", 7); + apiVersionHardwareVersionMap.put("4.8", 7); + apiVersionHardwareVersionMap.put("4.9", 7); + apiVersionHardwareVersionMap.put("5.0", 8); + apiVersionHardwareVersionMap.put("5.1", 9); + apiVersionHardwareVersionMap.put("5.2", 9); + apiVersionHardwareVersionMap.put("5.3", 9); + apiVersionHardwareVersionMap.put("5.4", 9); + apiVersionHardwareVersionMap.put("5.5", 10); + apiVersionHardwareVersionMap.put("5.6", 10); + apiVersionHardwareVersionMap.put("5.7", 10); + apiVersionHardwareVersionMap.put("5.8", 10); + apiVersionHardwareVersionMap.put("5.9", 10); + apiVersionHardwareVersionMap.put("6.0", 11); + apiVersionHardwareVersionMap.put("6.1", 11); + apiVersionHardwareVersionMap.put("6.2", 11); + apiVersionHardwareVersionMap.put("6.3", 11); + apiVersionHardwareVersionMap.put("6.4", 11); + apiVersionHardwareVersionMap.put("6.5", 13); + apiVersionHardwareVersionMap.put("6.6", 13); + apiVersionHardwareVersionMap.put("6.7", 14); + apiVersionHardwareVersionMap.put("6.8", 14); + apiVersionHardwareVersionMap.put("6.9", 14); + apiVersionHardwareVersionMap.put("7.0", 17); + } + public static VirtualMachineMO findVmFromObjectContent(VmwareContext context, ObjectContent[] ocs, String name, String instanceNameCustomField) { if (ocs != null && ocs.length > 0) { @@ -2218,25 +2260,7 @@ public static Integer getHostHardwareVersion(VmwareHypervisorHost host) { if (hostApiVersion == null) { hostApiVersion = ""; } - if (hostApiVersion.equalsIgnoreCase("7.0")) { - version = 17; - } else if (hostApiVersion.equalsIgnoreCase("6.7")) { - version = 14; - } else if (hostApiVersion.equalsIgnoreCase("6.5")) { - version = 13; - } else if (hostApiVersion.equalsIgnoreCase("6.0")) { - version = 11; - } else if (hostApiVersion.equalsIgnoreCase("5.5")) { - version = 10; - } else if (hostApiVersion.equalsIgnoreCase("5.1")) { - version = 9; - } else if (hostApiVersion.equalsIgnoreCase("5.0")) { - version = 8; - } else if (hostApiVersion.startsWith("4.")) { - version = 7; - } else if (hostApiVersion.equalsIgnoreCase("3.5")) { - version = 4; - } + version = apiVersionHardwareVersionMap.get(hostApiVersion); return version; } } From 131b5e06a7886b045d3089b20189742aa5a710c1 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 29 Dec 2020 12:30:01 +0530 Subject: [PATCH 24/31] fix systemvm migration with volume on zone-wide store Signed-off-by: Abhishek Kumar --- .../java/com/cloud/vm/UserVmManagerImpl.java | 47 +++++++++++++------ 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index dbca34da7ece..126b6da5136d 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5829,6 +5829,12 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr // check if migrating to same host long srcHostId = vm.getHostId(); + Host srcHost = _resourceMgr.getHost(srcHostId); + if (srcHost == null) { + throw new InvalidParameterValueException("Cannot migrate VM, host with id: " + srcHostId + " for VM not found"); + } + + if (destinationHost.getId() == srcHostId) { throw new InvalidParameterValueException("Cannot migrate VM, VM is already present on this host, please specify valid destination host to migrate the VM"); } @@ -5840,13 +5846,9 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr } if (vm.getType() != VirtualMachine.Type.User) { - // for System VMs check that the destination host is within the same - // cluster - HostVO srcHost = _hostDao.findById(srcHostId); - if (srcHost != null && srcHost.getClusterId() != null && destinationHost.getClusterId() != null) { - if (srcHost.getClusterId().longValue() != destinationHost.getClusterId().longValue()) { - throw new InvalidParameterValueException("Cannot migrate the VM, destination host is not in the same cluster as current host of the VM"); - } + // for System VMs check that the destination host is within the same pod + if (srcHost.getPodId() != null && !srcHost.getPodId().equals(destinationHost.getPodId())) { + throw new InvalidParameterValueException("Cannot migrate the VM, destination host is not in the same pod as current host of the VM") } } @@ -6123,6 +6125,23 @@ private boolean isImplicitPlannerUsedByOffering(long offeringId) { return implicitPlannerUsed; } + private boolean isVmVolumesOnZoneWideStore(VMInstanceVO vm) { + final List volumes = _volsDao.findByInstance(vm.getId()); + if (CollectionUtils.isEmpty(volumes)) { + return false; + } + for (Volume volume : volumes) { + if (volume == null || volume.getPoolId() == null) { + return false; + } + StoragePoolVO pool = _storagePoolDao.findById(volume.getPoolId()); + if (pool == null || !ScopeType.ZONE.equals(pool.getScope())) { + return false; + } + } + return true; + } + @Override @ActionEvent(eventType = EventTypes.EVENT_VM_MIGRATE, eventDescription = "migrating VM", async = true) public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinationHost, Map volumeToPool) throws ResourceUnavailableException, @@ -6166,11 +6185,11 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio long srcHostId = vm.getHostId(); Host srcHost = _resourceMgr.getHost(srcHostId); - if(srcHost == null ){ - throw new InvalidParameterValueException("Cannot migrate VM, there is not Host with id: " + srcHostId); + if (srcHost == null) { + throw new InvalidParameterValueException("Cannot migrate VM, host with id: " + srcHostId + " for VM not found"); } - // Check if src and destination hosts are valid and migrating to same host + // Check if source and destination hosts are valid and migrating to same host if (destinationHost.getId() == srcHostId) { throw new InvalidParameterValueException("Cannot migrate VM, VM is already present on this host, please" + " specify valid destination host to migrate the VM"); } @@ -6225,10 +6244,10 @@ public VirtualMachine migrateVirtualMachineWithVolume(Long vmId, Host destinatio List vmVolumes = _volsDao.findUsableVolumesForInstance(vm.getId()); Map volToPoolObjectMap = new HashMap(); - if (!isVMUsingLocalStorage(vm) && destinationHost.getClusterId().equals(srcHost.getClusterId()) - && MapUtils.isEmpty(volumeToPool)) { - // If the destination host is in the same cluster and volumes do not have to be migrated across pools - // then call migrateVirtualMachine for non-user VMs and exception for user VMs + if (!isVMUsingLocalStorage(vm) && MapUtils.isEmpty(volumeToPool) + && (destinationHost.getClusterId().equals(srcHost.getClusterId()) || isVmVolumesOnZoneWideStore(vm))){ + // If volumes do not have to be migrated + // call migrateVirtualMachine for non-user VMs else throw exception if (!VirtualMachine.Type.User.equals(vm.getType())) { return migrateVirtualMachine(vmId, destinationHost); } From 102ceea0f47be25e04ad3492fed3906c1d0b92a0 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 29 Dec 2020 13:23:44 +0530 Subject: [PATCH 25/31] fix Signed-off-by: Abhishek Kumar --- server/src/main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 126b6da5136d..77531ce97d22 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -5848,7 +5848,7 @@ public VirtualMachine migrateVirtualMachine(Long vmId, Host destinationHost) thr if (vm.getType() != VirtualMachine.Type.User) { // for System VMs check that the destination host is within the same pod if (srcHost.getPodId() != null && !srcHost.getPodId().equals(destinationHost.getPodId())) { - throw new InvalidParameterValueException("Cannot migrate the VM, destination host is not in the same pod as current host of the VM") + throw new InvalidParameterValueException("Cannot migrate the VM, destination host is not in the same pod as current host of the VM"); } } From 892cdeb7857670f8e0c5357572fbc59f4c37b998 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Mon, 4 Jan 2021 16:24:42 +0530 Subject: [PATCH 26/31] fixes Signed-off-by: Abhishek Kumar --- .../src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java | 2 +- server/src/main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java index 1c258dc6bdde..a5921264895a 100644 --- a/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java +++ b/plugins/hypervisors/vmware/src/main/java/com/cloud/hypervisor/guru/VMwareGuru.java @@ -1109,7 +1109,7 @@ private VirtualDisk getAttachedDisk(VirtualMachineMO vmMo, String diskPath) thro hostInTargetCluster = hosts.get(0); } if (hostInTargetCluster == null) { - throw new CloudRuntimeException("Migration failed, unable to find suitable target host for worker VM placement while migrating between storage pools of different clusters without shared storages"); + throw new CloudRuntimeException("Migration failed, unable to find suitable target host for VM placement while migrating between storage pools of different clusters without shared storages"); } } MigrateVmToPoolCommand migrateVmToPoolCommand = new MigrateVmToPoolCommand(vm.getInstanceName(), diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java index 77531ce97d22..950e6493d600 100644 --- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java +++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java @@ -6126,7 +6126,7 @@ private boolean isImplicitPlannerUsedByOffering(long offeringId) { } private boolean isVmVolumesOnZoneWideStore(VMInstanceVO vm) { - final List volumes = _volsDao.findByInstance(vm.getId()); + final List volumes = _volsDao.findCreatedByInstance(vm.getId()); if (CollectionUtils.isEmpty(volumes)) { return false; } From da7de1fe93a695e5e1ffb08431e7a15d82a3ea90 Mon Sep 17 00:00:00 2001 From: Abhishek Kumar Date: Tue, 12 Jan 2021 16:22:39 +0530 Subject: [PATCH 27/31] fix post hypervisor storage migration cleanup Signed-off-by: Abhishek Kumar --- .../cloud/vm/VirtualMachineManagerImpl.java | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 05e6033af48d..5d0132b113a2 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2237,14 +2237,14 @@ private Answer[] attemptHypervisorMigration(VMInstanceVO vm, Map volumeToPool, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { + private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map volumeToPool, Long sourceClusterId, Answer[] hypervisorMigrationResults) throws InsufficientCapacityException { boolean isDebugEnabled = s_logger.isDebugEnabled(); if(isDebugEnabled) { String msg = String.format("Cleaning up after hypervisor pool migration volumes for VM %s(%s)", vm.getInstanceName(), vm.getUuid()); s_logger.debug(msg); } StoragePool rootVolumePool = null; - if (rootVolumePool == null && MapUtils.isNotEmpty(volumeToPool)) { + if (MapUtils.isNotEmpty(volumeToPool)) { for (Map.Entry entry : volumeToPool.entrySet()) { if (Type.ROOT.equals(entry.getKey().getVolumeType())) { rootVolumePool = entry.getValue(); @@ -2253,17 +2253,15 @@ private void afterHypervisorMigrationCleanup(VMInstanceVO vm, Map findClusterAndHostIdForVm(VMInstanceVO vm) { if (clusterId == null && hostId != null) { HostVO host = _hostDao.findById(hostId); if (host != null) { - clusterId = host.getId(); + clusterId = host.getClusterId(); } } return new Pair<>(clusterId, hostId); @@ -2362,7 +2360,7 @@ private void migrateThroughHypervisorOrStorage(VMInstanceVO vm, Map Date: Fri, 22 Jan 2021 16:12:49 +0530 Subject: [PATCH 28/31] ui: moved over new ui changes from old repo Signed-off-by: Abhishek Kumar --- ui/public/locales/en.json | 8 +++++ ui/src/config/section/compute.js | 12 ++----- ui/src/config/section/infra/ilbvms.js | 19 ++++++---- ui/src/config/section/infra/routers.js | 23 +++++++------ ui/src/config/section/infra/systemVms.js | 23 +++++++------ ui/src/views/compute/MigrateWizard.vue | 44 ++++++++++++++++++++---- 6 files changed, 83 insertions(+), 46 deletions(-) diff --git a/ui/public/locales/en.json b/ui/public/locales/en.json index 5b1cc6a95178..f14878c1115c 100644 --- a/ui/public/locales/en.json +++ b/ui/public/locales/en.json @@ -213,8 +213,10 @@ "label.action.migrate.instance.processing": "Migrating Instance....", "label.action.migrate.router": "Migrate Router", "label.action.migrate.router.processing": "Migrating Router....", +"label.action.migrate.router.to.ps": "Migrate router to another primary storage", "label.action.migrate.systemvm": "Migrate System VM", "label.action.migrate.systemvm.processing": "Migrating System VM....", +"label.action.migrate.systemvm.to.ps": "Migrate system VM to another primary storage", "label.action.project.add.account": "Add Account to Project", "label.action.project.add.user": "Add User to Project", "label.action.reboot.instance": "Reboot Instance", @@ -1358,6 +1360,7 @@ "label.migrate.instance.to.host": "Migrate instance to another host", "label.migrate.instance.to.ps": "Migrate instance to another primary storage", "label.migrate.lb.vm": "Migrate LB VM", +"label.migrate.lb.vm.to.ps": "Migrate LB VM to another primary storage", "label.migrate.router.to": "Migrate Router to", "label.migrate.systemvm.to": "Migrate System VM to", "label.migrate.to.host": "Migrate to host", @@ -2030,6 +2033,7 @@ "label.storage.tags": "Storage Tags", "label.storage.traffic": "Storage Traffic", "label.storageid": "Primary Storage", +"label.storage.migration.required": "Storage Migration Required", "label.storagemotionenabled": "Storage Motion Enabled", "label.storagepolicy": "Storage policy", "label.storagepool": "Storage Pool", @@ -2931,12 +2935,16 @@ "message.migrate.instance.to.ps": "Please confirm that you want to migrate instance to another primary storage.", "message.migrate.router.confirm": "Please confirm the host you wish to migrate the router to:", "message.migrate.systemvm.confirm": "Please confirm the host you wish to migrate the system VM to:", +"message.migrate.lb.vm.to.ps": "Please confirm that you want to migrate LB VM to another primary storage.", +"message.migrate.router.to.ps": "Please confirm that you want to migrate router to another primary storage.", +"message.migrate.system.vm.to.ps": "Please confirm that you want to migrate system VM to another primary storage.", "message.migrate.volume": "Please confirm that you want to migrate volume to another primary storage.", "message.migrate.volume.failed": "Migrating volume failed", "message.migrate.volume.processing": "Migrating volume...", "message.migrating.failed": "Migration failed", "message.migrating.processing": "Migration in progress for", "message.migrating.vm.to.host.failed": "Failed to migrate VM to host", +"message.migrating.vm.to.storage.failed": "Failed to migrate VM to storage", "message.move.acl.order": "Move ACL rule order", "message.move.acl.order.failed": "Failed to move ACL rule", "message.move.acl.order.processing": "Moving ACL rule...", diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 89501406503f..e55e51684fe5 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -299,16 +299,8 @@ export default { docHelp: 'adminguide/virtual_machines.html#moving-vms-between-hosts-manual-live-migration', dataView: true, show: (record, store) => { return ['Stopped'].includes(record.state) && ['Admin'].includes(store.userInfo.roletype) }, - args: ['storageid', 'virtualmachineid'], - mapping: { - storageid: { - api: 'listStoragePools', - params: (record) => { return { zoneid: record.zoneid } } - }, - virtualmachineid: { - value: (record) => { return record.id } - } - } + component: () => import('@/views/compute/MigrateVMStorage'), + popup: true }, { api: 'resetPasswordForVirtualMachine', diff --git a/ui/src/config/section/infra/ilbvms.js b/ui/src/config/section/infra/ilbvms.js index 8b2434e3c40f..393a769604eb 100644 --- a/ui/src/config/section/infra/ilbvms.js +++ b/ui/src/config/section/infra/ilbvms.js @@ -45,13 +45,18 @@ export default { icon: 'drag', label: 'label.action.migrate.router', dataView: true, - show: (record) => { return record.state === 'Running' }, - args: ['virtualmachineid', 'hostid'], - mapping: { - virtualmachineid: { - value: (record) => { return record.id } - } - } + show: (record, store) => { return record.state === 'Running' && ['Admin'].includes(store.userInfo.roletype) }, + component: () => import('@/views/compute/MigrateWizard'), + popup: true + }, + { + api: 'migrateSystemVm', + icon: 'drag', + label: 'label.action.migrate.systemvm.to.ps', + dataView: true, + show: (record, store) => { return ['Stopped'].includes(record.state) && ['VMware'].includes(record.hypervisor) }, + component: () => import('@/views/compute/MigrateVMStorage'), + popup: true } ] } diff --git a/ui/src/config/section/infra/routers.js b/ui/src/config/section/infra/routers.js index b10991409c62..6ffa4680a803 100644 --- a/ui/src/config/section/infra/routers.js +++ b/ui/src/config/section/infra/routers.js @@ -104,17 +104,18 @@ export default { icon: 'drag', label: 'label.action.migrate.router', dataView: true, - show: (record, store) => { return ['Running'].includes(record.state) && ['Admin'].includes(store.userInfo.roletype) }, - args: ['virtualmachineid', 'hostid'], - mapping: { - virtualmachineid: { - value: (record) => { return record.id } - }, - hostid: { - api: 'findHostsForMigration', - params: (record) => { return { virtualmachineid: record.id } } - } - } + show: (record, store) => { return record.state === 'Running' && ['Admin'].includes(store.userInfo.roletype) }, + component: () => import('@/views/compute/MigrateWizard'), + popup: true + }, + { + api: 'migrateSystemVm', + icon: 'drag', + label: 'label.action.migrate.systemvm.to.ps', + dataView: true, + show: (record, store) => { return ['Stopped'].includes(record.state) && ['VMware'].includes(record.hypervisor) }, + component: () => import('@/views/compute/MigrateVMStorage'), + popup: true }, { api: 'runDiagnostics', diff --git a/ui/src/config/section/infra/systemVms.js b/ui/src/config/section/infra/systemVms.js index 8b3c66afd358..bc20b904d6c5 100644 --- a/ui/src/config/section/infra/systemVms.js +++ b/ui/src/config/section/infra/systemVms.js @@ -69,17 +69,18 @@ export default { icon: 'drag', label: 'label.action.migrate.systemvm', dataView: true, - show: (record) => { return record.state === 'Running' }, - args: ['virtualmachineid', 'hostid'], - mapping: { - virtualmachineid: { - value: (record) => { return record.id } - }, - hostid: { - api: 'findHostsForMigration', - params: (record) => { return { virtualmachineid: record.id } } - } - } + show: (record, store) => { return record.state === 'Running' && ['Admin'].includes(store.userInfo.roletype) }, + component: () => import('@/views/compute/MigrateWizard'), + popup: true + }, + { + api: 'migrateSystemVm', + icon: 'drag', + label: 'label.action.migrate.systemvm.to.ps', + dataView: true, + show: (record, store) => { return ['Stopped'].includes(record.state) && ['VMware'].includes(record.hypervisor) }, + component: () => import('@/views/compute/MigrateVMStorage'), + popup: true }, { api: 'runDiagnostics', diff --git a/ui/src/views/compute/MigrateWizard.vue b/ui/src/views/compute/MigrateWizard.vue index 46346e941a1a..f5082e9a6963 100644 --- a/ui/src/views/compute/MigrateWizard.vue +++ b/ui/src/views/compute/MigrateWizard.vue @@ -47,6 +47,15 @@
{{ record.memoryused | byteToGigabyte }} GB
+
+ {{ record.clustername }} +
+
+ {{ record.podname }} +
+
+ {{ record.requiresStorageMotion ? $t('label.yes') : $t('label.no') }} +