Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,18 @@
package org.apache.cloudstack.engine.orchestration.service;

import java.util.List;
import java.util.concurrent.Future;

import org.apache.cloudstack.api.response.MigrationResponse;
import org.apache.cloudstack.engine.subsystem.api.storage.DataStore;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo;
import org.apache.cloudstack.engine.subsystem.api.storage.TemplateService.TemplateApiResult;
import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy;

public interface StorageOrchestrationService {
MigrationResponse migrateData(Long srcDataStoreId, List<Long> destDatastores, MigrationPolicy migrationPolicy);

MigrationResponse migrateResources(Long srcImgStoreId, Long destImgStoreId, List<Long> templateIdList, List<Long> snapshotIdList);

Future<TemplateApiResult> orchestrateTemplateCopyToImageStore(TemplateInfo source, DataStore destStore);
}
Original file line number Diff line number Diff line change
Expand Up @@ -78,4 +78,6 @@ public TemplateInfo getTemplate() {
AsyncCallFuture<TemplateApiResult> createDatadiskTemplateAsync(TemplateInfo parentTemplate, TemplateInfo dataDiskTemplate, String path, String diskId, long fileSize, boolean bootable);

List<DatadiskTO> getTemplateDatadisksOnImageStore(TemplateInfo templateInfo, String configurationId);

AsyncCallFuture<TemplateApiResult> copyTemplateToImageStore(DataObject source, DataStore destStore);
}
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,10 @@ public interface StorageManager extends StorageService {
"storage.pool.host.connect.workers", "1",
"Number of worker threads to be used to connect hosts to a primary storage", true);

ConfigKey<Boolean> COPY_PUBLIC_TEMPLATES_FROM_OTHER_STORAGES = new ConfigKey<>(Boolean.class, "copy.public.templates.from.other.storages",
"Storage", "true", "Allow SSVMs to try copying public templates from one secondary storage to another instead of downloading them from the source.",
true, ConfigKey.Scope.Zone, null);

/**
* should we execute in sequence not involving any storages?
* @return tru if commands should execute in sequence
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,12 @@
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

import javax.inject.Inject;

Expand Down Expand Up @@ -206,12 +208,22 @@ public int compare(DataObject o1, DataObject o2) {

protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore, Map<DataObject, Pair<List<TemplateInfo>, Long>> childTemplates, List<TemplateDataStoreVO> templates) {
List<TemplateInfo> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();

for (TemplateDataStoreVO template : templates) {
VMTemplateVO templateVO = templateDao.findById(template.getTemplateId());
if (shouldMigrateTemplate(template, templateVO)) {
files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore));
long templateId = template.getTemplateId();
if (idsForMigration.contains(templateId)) {
logger.warn("Template store reference [{}] is duplicated; not considering it for migration.", template);
continue;
}
VMTemplateVO templateVO = templateDao.findById(templateId);
if (!shouldMigrateTemplate(template, templateVO)) {
continue;
}
files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore));
idsForMigration.add(templateId);
}

for (TemplateInfo template: files) {
List<VMTemplateVO> children = templateDao.listByParentTemplatetId(template.getId());
List<TemplateInfo> temps = new ArrayList<>();
Expand All @@ -221,6 +233,7 @@ protected List<DataObject> getAllReadyTemplates(DataStore srcDataStore, Map<Data
}
childTemplates.put(template, new Pair<>(temps, getTotalChainSize(temps)));
}

return (List<DataObject>) (List<?>) files;
}

Expand Down Expand Up @@ -263,16 +276,37 @@ protected boolean shouldMigrateTemplate(TemplateDataStoreVO template, VMTemplate
*/
protected List<DataObject> getAllReadySnapshotsAndChains(DataStore srcDataStore, Map<DataObject, Pair<List<SnapshotInfo>, Long>> snapshotChains, List<SnapshotDataStoreVO> snapshots) {
List<SnapshotInfo> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();

for (SnapshotDataStoreVO snapshot : snapshots) {
SnapshotVO snapshotVO = snapshotDao.findById(snapshot.getSnapshotId());
if (snapshot.getState() == ObjectInDataStoreStateMachine.State.Ready &&
snapshotVO != null && snapshotVO.getHypervisorType() != Hypervisor.HypervisorType.Simulator
&& snapshot.getParentSnapshotId() == 0 ) {
SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), snapshot.getDataStoreId(), snapshot.getRole());
if (snap != null) {
files.add(snap);
}
long snapshotId = snapshot.getSnapshotId();
if (idsForMigration.contains(snapshotId)) {
logger.warn("Snapshot store reference [{}] is duplicated; not considering it for migration.", snapshot);
continue;
}
if (snapshot.getState() != ObjectInDataStoreStateMachine.State.Ready) {
logger.warn("Not migrating snapshot [{}] because its state is not ready.", snapshot);
continue;
}
SnapshotVO snapshotVO = snapshotDao.findById(snapshotId);
if (snapshotVO == null) {
logger.debug("Not migrating snapshot [{}] because we could not find its database entry.", snapshot);
continue;
}
if (snapshotVO.getHypervisorType() == Hypervisor.HypervisorType.Simulator) {
logger.debug("Not migrating snapshot [{}] because its hypervisor type is simulator.", snapshot);
continue;
}
if (snapshot.getParentSnapshotId() != 0) {
continue; // The child snapshot will be migrated in the for loop below.
}
SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), snapshot.getDataStoreId(), snapshot.getRole());
if (snap == null) {
logger.debug("Not migrating snapshot [{}] because we could not get its information.", snapshot);
continue;
}
files.add(snap);
idsForMigration.add(snapshotId);
}

for (SnapshotInfo parent : files) {
Expand All @@ -285,7 +319,7 @@ protected List<DataObject> getAllReadySnapshotsAndChains(DataStore srcDataStore,
chain.addAll(children);
}
}
snapshotChains.put(parent, new Pair<List<SnapshotInfo>, Long>(chain, getTotalChainSize(chain)));
snapshotChains.put(parent, new Pair<>(chain, getTotalChainSize(chain)));
}

return (List<DataObject>) (List<?>) files;
Expand All @@ -306,14 +340,31 @@ protected Long getTotalChainSize(List<? extends DataObject> chain) {

protected List<DataObject> getAllReadyVolumes(DataStore srcDataStore, List<VolumeDataStoreVO> volumes) {
List<DataObject> files = new LinkedList<>();
Set<Long> idsForMigration = new HashSet<>();

for (VolumeDataStoreVO volume : volumes) {
if (volume.getState() == ObjectInDataStoreStateMachine.State.Ready) {
VolumeInfo volumeInfo = volumeFactory.getVolume(volume.getVolumeId(), srcDataStore);
if (volumeInfo != null && volumeInfo.getHypervisorType() != Hypervisor.HypervisorType.Simulator) {
files.add(volumeInfo);
}
long volumeId = volume.getVolumeId();
if (idsForMigration.contains(volumeId)) {
logger.warn("Volume store reference [{}] is duplicated; not considering it for migration.", volume);
continue;
}
if (volume.getState() != ObjectInDataStoreStateMachine.State.Ready) {
logger.debug("Not migrating volume [{}] because its state is not ready.", volume);
continue;
}
VolumeInfo volumeInfo = volumeFactory.getVolume(volume.getVolumeId(), srcDataStore);
if (volumeInfo == null) {
logger.debug("Not migrating volume [{}] because we could not get its information.", volume);
continue;
}
if (volumeInfo.getHypervisorType() == Hypervisor.HypervisorType.Simulator) {
logger.debug("Not migrating volume [{}] because its hypervisor type is simulator.", volume);
continue;
}
files.add(volumeInfo);
idsForMigration.add(volumeId);
}

return files;
}

Expand All @@ -325,10 +376,9 @@ protected List<DataObject> getAllReadyVolumes(DataStore srcDataStore) {
/** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool
* size when SSVMs scale
*/
protected int activeSSVMCount(DataStore dataStore) {
long datacenterId = dataStore.getScope().getScopeId();
protected int activeSSVMCount(Long zoneId) {
List<SecondaryStorageVmVO> ssvms =
secStorageVmDao.getSecStorageVmListInStates(null, datacenterId, VirtualMachine.State.Running, VirtualMachine.State.Migrating);
secStorageVmDao.getSecStorageVmListInStates(null, zoneId, VirtualMachine.State.Running, VirtualMachine.State.Migrating);
int activeSSVMs = 0;
for (SecondaryStorageVmVO vm : ssvms) {
String name = "s-"+vm.getId()+"-VM";
Expand Down
Loading
Loading