Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,18 @@

import com.cloud.agent.api.to.NicTO;
import com.cloud.agent.api.to.VirtualMachineTO;
import com.cloud.deploy.DataCenterDeployment;
import com.cloud.deploy.DeployDestination;
import com.cloud.deploy.DeploymentPlan;
import com.cloud.deploy.DeploymentPlanner;
import com.cloud.deploy.DeploymentPlanner.ExcludeList;
import com.cloud.exception.AgentUnavailableException;
import com.cloud.exception.ConcurrentOperationException;
import com.cloud.exception.InsufficientCapacityException;
import com.cloud.exception.InsufficientServerCapacityException;
import com.cloud.exception.OperationTimedoutException;
import com.cloud.exception.ResourceUnavailableException;
import com.cloud.host.Host;
import com.cloud.hypervisor.Hypervisor.HypervisorType;
import com.cloud.network.Network;
import com.cloud.offering.DiskOffering;
Expand Down Expand Up @@ -246,6 +249,10 @@ static String getHypervisorHostname(String name) {

UserVm restoreVirtualMachine(long vmId, Long newTemplateId) throws ResourceUnavailableException, InsufficientCapacityException;

boolean checkIfVmHasClusterWideVolumes(Long vmId);

DataCenterDeployment getMigrationDeployment(VirtualMachine vm, Host host, Long poolId, ExcludeList excludes);

/**
* Returns true if the VM's Root volume is allocated at a local storage pool
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@
import com.cloud.configuration.Resource.ResourceType;
import com.cloud.dc.ClusterDetailsDao;
import com.cloud.dc.ClusterDetailsVO;
import com.cloud.dc.ClusterVO;
import com.cloud.dc.DataCenter;
import com.cloud.dc.DataCenterVO;
import com.cloud.dc.HostPodVO;
Expand Down Expand Up @@ -250,6 +251,8 @@
import com.cloud.vm.snapshot.dao.VMSnapshotDao;
import com.google.common.base.Strings;

import static com.cloud.configuration.ConfigurationManagerImpl.MIGRATE_VM_ACROSS_CLUSTERS;

public class VirtualMachineManagerImpl extends ManagerBase implements VirtualMachineManager, VmWorkJobHandler, Listener, Configurable {
private static final Logger s_logger = Logger.getLogger(VirtualMachineManagerImpl.class);

Expand Down Expand Up @@ -3368,9 +3371,9 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f
}
}

final DataCenterDeployment plan = new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null);
final ExcludeList excludes = new ExcludeList();
excludes.addHost(hostId);
DataCenterDeployment plan = getMigrationDeployment(vm, host, poolId, excludes);

DeployDestination dest = null;
while (true) {
Expand All @@ -3382,16 +3385,12 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f
s_logger.warn("Unable to create deployment, affinity rules associted to the VM conflict", e2);
throw new CloudRuntimeException("Unable to create deployment, affinity rules associted to the VM conflict");
}

if (dest != null) {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found destination " + dest + " for migrating to.");
}
} else {
if (s_logger.isDebugEnabled()) {
s_logger.debug("Unable to find destination for migrating the vm " + profile);
}
throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", host.getClusterId());
if (dest == null) {
s_logger.warn("Unable to find destination for migrating the vm " + profile);
throw new InsufficientServerCapacityException("Unable to find a server to migrate to.", DataCenter.class, host.getDataCenterId());
}
if (s_logger.isDebugEnabled()) {
s_logger.debug("Found destination " + dest + " for migrating to.");
}

excludes.addHost(dest.getHost().getId());
Expand Down Expand Up @@ -3420,6 +3419,41 @@ private void orchestrateMigrateAway(final String vmUuid, final long srcHostId, f
}
}

/**
* Check if the virtual machine has any volume in cluster-wide pool
* @param vmId id of the virtual machine
* @return true if volume exists on cluster-wide pool else false
*/
@Override
public boolean checkIfVmHasClusterWideVolumes(Long vmId) {
final List<VolumeVO> volumesList = _volsDao.findCreatedByInstance(vmId);

return volumesList.parallelStream()
.anyMatch(vol -> _storagePoolDao.findById(vol.getPoolId()).getScope().equals(ScopeType.CLUSTER));

}

@Override
public DataCenterDeployment getMigrationDeployment(final VirtualMachine vm, final Host host, final Long poolId, final ExcludeList excludes) {
if (MIGRATE_VM_ACROSS_CLUSTERS.valueIn(host.getDataCenterId()) &&
(HypervisorType.VMware.equals(host.getHypervisorType()) || !checkIfVmHasClusterWideVolumes(vm.getId()))) {
s_logger.info("Searching for hosts in the zone for vm migration");
List<Long> clustersToExclude = _clusterDao.listAllClusters(host.getDataCenterId());
List<ClusterVO> clusterList = _clusterDao.listByDcHyType(host.getDataCenterId(), host.getHypervisorType().toString());
for (ClusterVO cluster : clusterList) {
clustersToExclude.remove(cluster.getId());
}
for (Long clusterId : clustersToExclude) {
excludes.addCluster(clusterId);
}
if (VirtualMachine.systemVMs.contains(vm.getType())) {
return new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), null, null, poolId, null);
}
return new DataCenterDeployment(host.getDataCenterId(), null, null, null, poolId, null);
}
return new DataCenterDeployment(host.getDataCenterId(), host.getPodId(), host.getClusterId(), null, poolId, null);
}

protected class CleanupTask extends ManagedContextRunnable {
@Override
protected void runInContext() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
public interface CapacityDao extends GenericDao<CapacityVO, Long> {
CapacityVO findByHostIdType(Long hostId, short capacityType);

List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone);
List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone);

List<Long> listHostsWithEnoughCapacity(int requiredCpu, long requiredRam, Long clusterId, String hostType);

Expand All @@ -37,7 +37,7 @@ public interface CapacityDao extends GenericDao<CapacityVO, Long> {

List<SummedCapacity> findNonSharedStorageForClusterPodZone(Long zoneId, Long podId, Long clusterId);

Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityType, boolean isZone);
Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, long vmId, short capacityType, boolean isZone);

List<SummedCapacity> findCapacityBy(Integer capacityType, Long zoneId, Long podId, Long clusterId);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,13 +75,24 @@ public class CapacityDaoImpl extends GenericDaoBase<CapacityVO, Long> implements
+ " AND host_capacity.host_id IN (SELECT capacity.host_id FROM `cloud`.`op_host_capacity` capacity JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id= cluster_details.cluster_id) where capacity_type='0' AND cluster_details.name='memoryOvercommitRatio' AND ((total_capacity* cluster_details.value) - used_capacity ) >= ?)) ";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART1 =
"SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity WHERE ";
"SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity ) FROM `cloud`.`op_host_capacity` capacity ";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2 =
" AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
" AND capacity_type = ? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity) ASC";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_JOIN_1 =
"JOIN host ON capacity.host_id = host.id " +
"LEFT JOIN (SELECT affinity_group.id, agvm.instance_id FROM affinity_group_vm_map agvm JOIN affinity_group ON agvm.affinity_group_id = affinity_group.id AND affinity_group.type='ExplicitDedication') AS ag ON ag.instance_id = ? " +
"LEFT JOIN dedicated_resources dr_pod ON dr_pod.pod_id IS NOT NULL AND dr_pod.pod_id = host.pod_id " +
"LEFT JOIN dedicated_resources dr_cluster ON dr_cluster.cluster_id IS NOT NULL AND dr_cluster.cluster_id = host.cluster_id " +
"LEFT JOIN dedicated_resources dr_host ON dr_host.host_id IS NOT NULL AND dr_host.host_id = host.id ";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_JOIN_2 =
" AND ((ag.id IS NULL AND dr_pod.pod_id IS NULL AND dr_cluster.cluster_id IS NULL AND dr_host.host_id IS NULL) OR " +
"(dr_pod.affinity_group_id = ag.id OR dr_cluster.affinity_group_id = ag.id OR dr_host.affinity_group_id = ag.id))";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1 =
"SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) WHERE ";
"SELECT capacity.cluster_id, SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) FROM `cloud`.`op_host_capacity` capacity INNER JOIN `cloud`.`cluster_details` cluster_details ON (capacity.cluster_id = cluster_details.cluster_id) ";

private static final String ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART2 =
" AND capacity_type = ? AND cluster_details.name =? GROUP BY capacity.cluster_id ORDER BY SUM(used_capacity+reserved_capacity)/SUM(total_capacity * cluster_details.value) ASC";
Expand Down Expand Up @@ -572,7 +583,7 @@ public CapacityVO findByHostIdType(Long hostId, short capacityType) {
}

@Override
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) {
public List<Long> listClustersInZoneOrPodByHostCapacities(long id, long vmId, int requiredCpu, long requiredRam, short capacityTypeForOrdering, boolean isZone) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
Expand Down Expand Up @@ -854,7 +865,7 @@ public boolean removeBy(Short capacityType, Long zoneId, Long podId, Long cluste
}

@Override
public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, short capacityTypeForOrdering, boolean isZone) {
public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long id, long vmId, short capacityTypeForOrdering, boolean isZone) {
TransactionLegacy txn = TransactionLegacy.currentTxn();
PreparedStatement pstmt = null;
List<Long> result = new ArrayList<Long>();
Expand All @@ -866,11 +877,14 @@ public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_OVERCOMMIT_CAPACITY_PART1);
}

sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_JOIN_1);
if (isZone) {
sql.append(" data_center_id = ?");
sql.append("WHERE capacity.capacity_state = 'Enabled' AND capacity.data_center_id = ?");
} else {
sql.append(" pod_id = ?");
sql.append("WHERE capacity.capacity_state = 'Enabled' AND capacity.pod_id = ?");
}
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_JOIN_2);

if (capacityTypeForOrdering != Capacity.CAPACITY_TYPE_CPU && capacityTypeForOrdering != Capacity.CAPACITY_TYPE_MEMORY) {
sql.append(ORDER_CLUSTERS_BY_AGGREGATE_CAPACITY_PART2);
} else {
Expand All @@ -879,13 +893,14 @@ public Pair<List<Long>, Map<Long, Double>> orderClustersByAggregateCapacity(long

try {
pstmt = txn.prepareAutoCloseStatement(sql.toString());
pstmt.setLong(1, id);
pstmt.setShort(2, capacityTypeForOrdering);
pstmt.setLong(1, vmId);
pstmt.setLong(2, id);
pstmt.setShort(3, capacityTypeForOrdering);

if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_CPU) {
pstmt.setString(3, "cpuOvercommitRatio");
pstmt.setString(4, "cpuOvercommitRatio");
} else if (capacityTypeForOrdering == Capacity.CAPACITY_TYPE_MEMORY) {
pstmt.setString(3, "memoryOvercommitRatio");
pstmt.setString(4, "memoryOvercommitRatio");
}

ResultSet rs = pstmt.executeQuery();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -382,15 +382,15 @@ private void initializeForTest(VirtualMachineProfileImpl vmProfile, DataCenterDe
clustersWithEnoughCapacity.add(2L);
clustersWithEnoughCapacity.add(3L);
when(
capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, noOfCpusInOffering * cpuSpeedInOffering, ramInOffering * 1024L * 1024L,
capacityDao.listClustersInZoneOrPodByHostCapacities(dataCenterId, 12L, noOfCpusInOffering * cpuSpeedInOffering, ramInOffering * 1024L * 1024L,
Capacity.CAPACITY_TYPE_CPU, true)).thenReturn(clustersWithEnoughCapacity);

Map<Long, Double> clusterCapacityMap = new HashMap<Long, Double>();
clusterCapacityMap.put(1L, 2048D);
clusterCapacityMap.put(2L, 2048D);
clusterCapacityMap.put(3L, 2048D);
Pair<List<Long>, Map<Long, Double>> clustersOrderedByCapacity = new Pair<List<Long>, Map<Long, Double>>(clustersWithEnoughCapacity, clusterCapacityMap);
when(capacityDao.orderClustersByAggregateCapacity(dataCenterId, Capacity.CAPACITY_TYPE_CPU, true)).thenReturn(clustersOrderedByCapacity);
when(capacityDao.orderClustersByAggregateCapacity(dataCenterId, 12L, Capacity.CAPACITY_TYPE_CPU, true)).thenReturn(clustersOrderedByCapacity);

List<Long> disabledClusters = new ArrayList<Long>();
List<Long> clustersWithDisabledPods = new ArrayList<Long>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -439,6 +439,8 @@ public class ConfigurationManagerImpl extends ManagerBase implements Configurati

public static final ConfigKey<Integer> VM_USERDATA_MAX_LENGTH = new ConfigKey<Integer>("Advanced", Integer.class, VM_USERDATA_MAX_LENGTH_STRING, "32768",
"Max length of vm userdata after base64 decoding. Default is 32768 and maximum is 1048576", true);
public static final ConfigKey<Boolean> MIGRATE_VM_ACROSS_CLUSTERS = new ConfigKey<Boolean>(Boolean.class, "migrate.vm.across.clusters", "Advanced", "false",
"Indicates whether the VM can be migrated to different cluster if no host is found in same cluster",true, ConfigKey.Scope.Zone, null);

private static final String IOPS_READ_RATE = "IOPS Read";
private static final String IOPS_WRITE_RATE = "IOPS Write";
Expand Down Expand Up @@ -6535,6 +6537,6 @@ public String getConfigComponentName() {
public ConfigKey<?>[] getConfigKeys() {
return new ConfigKey<?>[] {SystemVMUseLocalStorage, IOPS_MAX_READ_LENGTH, IOPS_MAX_WRITE_LENGTH,
BYTES_MAX_READ_LENGTH, BYTES_MAX_WRITE_LENGTH, ADD_HOST_ON_SERVICE_RESTART_KVM, SET_HOST_DOWN_TO_MAINTENANCE, VM_SERVICE_OFFERING_MAX_CPU_CORES,
VM_SERVICE_OFFERING_MAX_RAM_SIZE, VM_USERDATA_MAX_LENGTH};
VM_SERVICE_OFFERING_MAX_RAM_SIZE, VM_USERDATA_MAX_LENGTH, MIGRATE_VM_ACROSS_CLUSTERS};
}
}
8 changes: 4 additions & 4 deletions server/src/main/java/com/cloud/deploy/FirstFitPlanner.java
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ private List<Long> scanClustersForDestinationInZoneOrPod(long id, boolean isZone
long requiredRam = offering.getRamSize() * 1024L * 1024L;

//list clusters under this zone by cpu and ram capacity
Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo = listClustersByCapacity(id, requiredCpu, requiredRam, avoid, isZone);
Pair<List<Long>, Map<Long, Double>> clusterCapacityInfo = listClustersByCapacity(id, vmProfile.getId(), requiredCpu, requiredRam, avoid, isZone);
List<Long> prioritizedClusterIds = clusterCapacityInfo.first();
if (!prioritizedClusterIds.isEmpty()) {
if (avoid.getClustersToAvoid() != null) {
Expand Down Expand Up @@ -441,7 +441,7 @@ protected List<Long> reorderPods(Pair<List<Long>, Map<Long, Double>> podCapacity
return podIdsByCapacity;
}

protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone) {
protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, long vmId, int requiredCpu, long requiredRam, ExcludeList avoid, boolean isZone) {
//look at the aggregate available cpu and ram per cluster
//although an aggregate value may be false indicator that a cluster can host a vm, it will at the least eliminate those clusters which definitely cannot

Expand All @@ -456,11 +456,11 @@ protected Pair<List<Long>, Map<Long, Double>> listClustersByCapacity(long id, in
capacityType = Capacity.CAPACITY_TYPE_MEMORY;
}

List<Long> clusterIdswithEnoughCapacity = capacityDao.listClustersInZoneOrPodByHostCapacities(id, requiredCpu, requiredRam, capacityType, isZone);
List<Long> clusterIdswithEnoughCapacity = capacityDao.listClustersInZoneOrPodByHostCapacities(id, vmId, requiredCpu, requiredRam, capacityType, isZone);
if (s_logger.isTraceEnabled()) {
s_logger.trace("ClusterId List having enough CPU and RAM capacity: " + clusterIdswithEnoughCapacity);
}
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderClustersByAggregateCapacity(id, capacityType, isZone);
Pair<List<Long>, Map<Long, Double>> result = capacityDao.orderClustersByAggregateCapacity(id, vmId, capacityType, isZone);
List<Long> clusterIdsOrderedByAggregateCapacity = result.first();
//only keep the clusters that have enough capacity to host this VM
if (s_logger.isTraceEnabled()) {
Expand Down
Loading