diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml
index 4d901f10bb40..03964642e6b8 100644
--- a/engine/schema/pom.xml
+++ b/engine/schema/pom.xml
@@ -106,7 +106,7 @@
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm")
templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv")
- File file = new File("./engine/schema/dist/systemvm-templates/md5sum.txt")
+ File file = new File("./engine/schema/dist/systemvm-templates/sha512sum.txt")
def lines = file.readLines()
for (template in templateList) {
def data = lines.findAll { it.contains(template) }
@@ -135,7 +135,7 @@
wget
- ${project.systemvm.template.location}/${cs.version}/md5sum.txt
+ ${project.systemvm.template.location}/${cs.version}/sha512sum.txt
${basedir}/dist/systemvm-templates/
true
true
@@ -205,7 +205,7 @@
true
${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-kvm.qcow2.bz2
${basedir}/dist/systemvm-templates/
- ${kvm.checksum}
+ ${kvm.checksum}
@@ -241,7 +241,7 @@
true
${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-vmware.ova
${basedir}/dist/systemvm-templates/
- ${vmware.checksum}
+ ${vmware.checksum}
@@ -277,7 +277,7 @@
true
${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-xen.vhd.bz2
${basedir}/dist/systemvm-templates/
- ${xen.checksum}
+ ${xen.checksum}
@@ -313,7 +313,7 @@
true
${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-ovm.raw.bz2
${basedir}/dist/systemvm-templates/
- ${ovm.checksum}
+ ${ovm.checksum}
@@ -349,7 +349,7 @@
true
${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip
${basedir}/dist/systemvm-templates/
- ${hyperv.checksum}
+ ${hyperv.checksum}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/AsyncPhysicalDiskConnectorDecorator.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/AsyncPhysicalDiskConnectorDecorator.java
new file mode 100644
index 000000000000..148ddb6cae1d
--- /dev/null
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/AsyncPhysicalDiskConnectorDecorator.java
@@ -0,0 +1,53 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+package com.cloud.hypervisor.kvm.storage;
+
+import java.util.Map;
+
+/**
+ * Decorator for StorageAdapters that implement asynchronous physical disk connections to improve
+ * performance on VM starts with large numbers of disks.
+ */
+public interface AsyncPhysicalDiskConnectorDecorator {
+ /**
+ * Initiates a connection attempt (may or may not complete it depending on implementation)
+ * @param path
+ * @param pool
+ * @param details
+ * @return
+ */
+ public boolean startConnectPhysicalDisk(String path, KVMStoragePool pool, Map details);
+
+ /**
+ * Tests if the physical disk is connected
+ * @param path
+ * @param pool
+ * @param details
+ * @return
+ */
+ public boolean isConnected(String path, KVMStoragePool pool, Map details);
+
+ /**
+ * Completes a connection attempt after isConnected returns true;
+ * @param path
+ * @param pool
+ * @param details
+ * @return
+ * @throws Exception
+ */
+ public boolean finishConnectPhysicalDisk(String path, KVMStoragePool pool, Map details) throws Exception;
+}
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
index 0198cc5bbde5..73cdf7879fe9 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStoragePoolManager.java
@@ -20,6 +20,7 @@
import java.lang.reflect.Modifier;
import java.net.URI;
import java.net.URISyntaxException;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
@@ -42,9 +43,11 @@
import com.cloud.hypervisor.kvm.resource.KVMHABase.PoolType;
import com.cloud.hypervisor.kvm.resource.KVMHAMonitor;
import com.cloud.storage.Storage;
+import com.cloud.storage.StorageManager;
import com.cloud.storage.Storage.StoragePoolType;
import com.cloud.storage.StorageLayer;
import com.cloud.storage.Volume;
+import com.cloud.utils.StringUtils;
import com.cloud.utils.Pair;
import com.cloud.utils.Ternary;
import com.cloud.utils.exception.CloudRuntimeException;
@@ -164,6 +167,19 @@ public boolean connectPhysicalDisk(StoragePoolType type, String poolUuid, String
return adaptor.connectPhysicalDisk(volPath, pool, details, false);
}
+ private static class ConnectingDiskInfo {
+ ConnectingDiskInfo(VolumeObjectTO volume, StorageAdaptor adaptor, KVMStoragePool pool, Map details) {
+ this.volume = volume;
+ this.adapter = adaptor;
+ this.pool = pool;
+ this.details = details;
+ }
+ VolumeObjectTO volume;
+ KVMStoragePool pool = null;
+ StorageAdaptor adapter = null;
+ Map details = null;
+ }
+
public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean isVMMigrate) {
boolean result = false;
@@ -171,6 +187,10 @@ public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean is
List disks = Arrays.asList(vmSpec.getDisks());
+
+ // disks that connect in background
+ List connectingDisks = new ArrayList<>();
+
for (DiskTO disk : disks) {
if (disk.getType() == Volume.Type.ISO) {
result = true;
@@ -187,17 +207,79 @@ public boolean connectPhysicalDisksViaVmSpec(VirtualMachineTO vmSpec, boolean is
KVMStoragePool pool = getStoragePool(store.getPoolType(), store.getUuid());
StorageAdaptor adaptor = getStorageAdaptor(pool.getType());
- result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails(), isVMMigrate);
+ if (adaptor instanceof AsyncPhysicalDiskConnectorDecorator) {
+ // If the adaptor supports async disk connection, we can start the connection
+ // and return immediately, allowing the connection to complete in the background.
+ result = ((AsyncPhysicalDiskConnectorDecorator) adaptor).startConnectPhysicalDisk(vol.getPath(), pool, disk.getDetails());
+ if (!result) {
+ logger.error("Failed to start connecting disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
+ return false;
+ }
- if (!result) {
- logger.error("Failed to connect disks via Instance spec for Instance: " + vmName + " volume:" + vol.toString());
- return result;
+ // add disk to list of disks to check later
+ connectingDisks.add(new ConnectingDiskInfo(vol, adaptor, pool, disk.getDetails()));
+ } else {
+ result = adaptor.connectPhysicalDisk(vol.getPath(), pool, disk.getDetails(), isVMMigrate);
+
+ if (!result) {
+ logger.error("Failed to connect disks via vm spec for vm: " + vmName + " volume:" + vol.toString());
+ return result;
+ }
+ }
+ }
+
+ // if we have any connecting disks to check, wait for them to connect or timeout
+ if (!connectingDisks.isEmpty()) {
+ for (ConnectingDiskInfo connectingDisk : connectingDisks) {
+ StorageAdaptor adaptor = connectingDisk.adapter;
+ KVMStoragePool pool = connectingDisk.pool;
+ VolumeObjectTO volume = connectingDisk.volume;
+ Map details = connectingDisk.details;
+ long diskWaitTimeMillis = getDiskWaitTimeMillis(details);
+
+ // wait for the disk to connect
+ long startTime = System.currentTimeMillis();
+ while (System.currentTimeMillis() - startTime < diskWaitTimeMillis) {
+ if (((AsyncPhysicalDiskConnectorDecorator) adaptor).isConnected(volume.getPath(), pool, details)) {
+ logger.debug(String.format("Disk %s connected successfully for VM %s", volume.getPath(), vmName));
+ break;
+ }
+
+ sleep(1000); // wait for 1 second before checking again
+ }
}
}
return result;
}
+ private long getDiskWaitTimeMillis(Map details) {
+ int waitTimeInSec = 60; // default wait time in seconds
+ if (details != null && details.containsKey(StorageManager.STORAGE_POOL_DISK_WAIT.toString())) {
+ String waitTime = details.get(StorageManager.STORAGE_POOL_DISK_WAIT.toString());
+ if (StringUtils.isNotEmpty(waitTime)) {
+ waitTimeInSec = Integer.valueOf(waitTime).intValue();
+ logger.debug(String.format("%s set to %s", StorageManager.STORAGE_POOL_DISK_WAIT.toString(), waitTimeInSec));
+ }
+ } else {
+ // wait at least 60 seconds even if input was lower
+ if (waitTimeInSec < 60) {
+ logger.debug(String.format("%s was less than 60s. Increasing to 60s default.", StorageManager.STORAGE_POOL_DISK_WAIT.toString()));
+ waitTimeInSec = 60;
+ }
+ }
+ return waitTimeInSec * 1000; // convert to milliseconds
+ }
+
+ private boolean sleep(long millis) {
+ try {
+ Thread.sleep(millis);
+ return true;
+ } catch (InterruptedException e) {
+ return false;
+ }
+ }
+
public boolean disconnectPhysicalDisk(Map volumeToDisconnect) {
logger.debug(String.format("Disconnect physical disks using volume map: %s", volumeToDisconnect.toString()));
if (MapUtils.isEmpty(volumeToDisconnect)) {
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
index 33bd41ee6ba3..edaf08440d51 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/KVMStorageProcessor.java
@@ -1607,8 +1607,20 @@ public Answer dettachVolume(final DettachCommand cmd) {
storagePoolMgr.disconnectPhysicalDisk(primaryStore.getPoolType(), primaryStore.getUuid(), vol.getPath());
return new DettachAnswer(disk);
- } catch (final LibvirtException | InternalErrorException | CloudRuntimeException e) {
- logger.debug(String.format("Failed to detach volume [id: %d, uuid: %s, name: %s, path: %s], due to ", vol.getId(), vol.getUuid(), vol.getName(), vol.getPath()), e);
+ } catch (final LibvirtException e) {
+ // check if the error was related to an already unplugged event - we can safely ignore
+ if (e.getMessage() != null && e.getMessage().contains("is already in the process of unplug")) {
+ logger.debug("Volume: " + vol.getPath() + " is already unplugged, ignoring the error");
+ return new DettachAnswer(disk);
+ } else {
+ logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
+ return new DettachAnswer(e.toString());
+ }
+ } catch (final InternalErrorException e) {
+ logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
+ return new DettachAnswer(e.toString());
+ } catch (final CloudRuntimeException e) {
+ logger.debug("Failed to detach volume: " + vol.getPath() + ", due to ", e);
return new DettachAnswer(e.toString());
} finally {
vol.clearPassphrase();
diff --git a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
index 7ba29ffc26ea..7f5e387d2b13 100644
--- a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
+++ b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/storage/MultipathSCSIAdapterBase.java
@@ -65,10 +65,12 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
* Property keys and defaults
*/
static final Property CLEANUP_FREQUENCY_SECS = new Property("multimap.cleanup.frequency.secs", 60);
- static final Property CLEANUP_TIMEOUT_SECS = new Property("multimap.cleanup.timeout.secs", 4);
+ static final Property CLEANUP_TIMEOUT_SECS = new Property("multimap.cleanup.timeout.secs", 600);
static final Property CLEANUP_ENABLED = new Property("multimap.cleanup.enabled", true);
static final Property CLEANUP_SCRIPT = new Property("multimap.cleanup.script", "cleanStaleMaps.sh");
static final Property CONNECT_SCRIPT = new Property("multimap.connect.script", "connectVolume.sh");
+ static final Property START_CONNECT_SCRIPT = new Property("multimap.startconnect.script", "startConnectVolume.sh");
+ static final Property FINISH_CONNECT_SCRIPT = new Property("multimap.finishconnect.script", "finishConnectVolume.sh");
static final Property COPY_SCRIPT = new Property("multimap.copy.script", "copyVolume.sh");
static final Property DISCONNECT_SCRIPT = new Property("multimap.disconnect.script", "disconnectVolume.sh");
static final Property RESIZE_SCRIPT = new Property("multimap.resize.script", "resizeVolume.sh");
@@ -78,6 +80,8 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
static Timer cleanupTimer = new Timer();
private static int cleanupTimeoutSecs = CLEANUP_TIMEOUT_SECS.getFinalValue();
private static String connectScript = CONNECT_SCRIPT.getFinalValue();
+ private static String startConnectScript = START_CONNECT_SCRIPT.getFinalValue();
+ private static String finishConnectScript = FINISH_CONNECT_SCRIPT.getFinalValue();
private static String disconnectScript = DISCONNECT_SCRIPT.getFinalValue();
private static String cleanupScript = CLEANUP_SCRIPT.getFinalValue();
private static String resizeScript = RESIZE_SCRIPT.getFinalValue();
@@ -98,6 +102,16 @@ public abstract class MultipathSCSIAdapterBase implements StorageAdaptor {
throw new Error("Unable to find the connectVolume.sh script");
}
+ startConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), startConnectScript);
+ if (startConnectScript == null) {
+ throw new Error("Unable to find the startConnectVolume.sh script");
+ }
+
+ finishConnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), finishConnectScript);
+ if (finishConnectScript == null) {
+ throw new Error("Unable to find the finishConnectVolume.sh script");
+ }
+
disconnectScript = Script.findScript(STORAGE_SCRIPTS_DIR.getFinalValue(), disconnectScript);
if (disconnectScript == null) {
throw new Error("Unable to find the disconnectVolume.sh script");
@@ -164,9 +178,11 @@ private KVMPhysicalDisk getPhysicalDisk(AddressInfo address, KVMStoragePool pool
// validate we have a connection, if not we need to connect first.
if (!isConnected(address.getPath())) {
- if (!connectPhysicalDisk(address, pool, null)) {
- throw new CloudRuntimeException("Unable to connect to volume " + address.getPath());
- }
+ LOGGER.warn("Physical disk " + address.getPath() + " is not connected, a request to connectPhysicalDisk must be made before it can be used.");
+ return null;
+ } else {
+ LOGGER.debug("Physical disk " + address.getPath() + " is connected, proceeding to get its size.");
+
}
long diskSize = getPhysicalDiskSize(address.getPath());
@@ -222,8 +238,91 @@ private boolean connectPhysicalDisk(AddressInfo address, KVMStoragePool pool, Ma
if (StringUtils.isNotEmpty(waitTime)) {
waitTimeInSec = Integer.valueOf(waitTime).intValue();
}
+ } else {
+ // wait at least 60 seconds even if input was lower
+ if (waitTimeInSec < 60) {
+ LOGGER.debug(String.format("multimap.disk.wait.secs was less than 60. Increasing to 60"));
+ waitTimeInSec = 60;
+ }
+ }
+
+ if (!startConnect(address, pool, waitTimeInSec)) {
+ LOGGER.error("Failed to trigger connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
+ return false;
+ }
+
+ LOGGER.debug("Waiting for disk to become available after connect for address [" + address.getPath() + "] of the storage pool: " + pool.getUuid());
+
+ // loop through and call isConnected() until true or the waitTimeInSec is exceeded
+ long startTime = System.currentTimeMillis();
+ while (System.currentTimeMillis() - startTime < TimeUnit.SECONDS.toMillis(waitTimeInSec)) {
+ if (isConnected(address.getPath())) {
+ LOGGER.info("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is connected");
+ return true;
+ }
+ try {
+ Thread.sleep(1000); // wait 1 second before checking again
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ LOGGER.error("Disk " + address.getPath() + " of the storage pool: " + pool.getUuid() + " is not connected after waiting for " + waitTimeInSec + " seconds");
+ return false;
+ }
+
+ public boolean startConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) {
+ LOGGER.info("startConnectPhysicalDisk called for [" + volumePath + "]");
+
+ if (StringUtils.isEmpty(volumePath)) {
+ LOGGER.error("Unable to connect physical disk due to insufficient data - volume path is undefined");
+ return false;
+ }
+
+ if (pool == null) {
+ LOGGER.error("Unable to connect physical disk due to insufficient data - pool is not set");
+ return false;
+ }
+
+ // we expect WWN values in the volumePath so need to convert it to an actual physical path
+ AddressInfo address = this.parseAndValidatePath(volumePath);
+
+ return startConnect(address, pool, diskWaitTimeSecs);
+ }
+
+ public boolean finishConnectPhysicalDisk(String volumePath, KVMStoragePool pool, Map details) throws Exception {
+ LOGGER.info("finishConnectPhysicalDisk called for [" + volumePath + "]");
+
+ if (StringUtils.isEmpty(volumePath)) {
+ LOGGER.error("Unable to finish connect physical disk due to insufficient data - volume path is undefined");
+ return false;
+ }
+
+ if (pool == null) {
+ LOGGER.error("Unable to finish connect physical disk due to insufficient data - pool is not set");
+ return false;
+ }
+
+ // we expect WWN values in the volumePath so need to convert it to an actual physical path
+ AddressInfo address = this.parseAndValidatePath(volumePath);
+
+ return finishConnect(address, pool, diskWaitTimeSecs);
+ }
+
+
+ /**
+ * Tests if the physical disk is connected
+ */
+ public boolean isConnected(String path, KVMStoragePool pool, Map details) {
+ AddressInfo address = this.parseAndValidatePath(path);
+ if (address.getAddress() == null) {
+ LOGGER.debug(String.format("isConnected(path,pool) returning FALSE, volume path has no address field: %s", path));
+ return false;
}
- return waitForDiskToBecomeAvailable(address, pool, waitTimeInSec);
+ if (isConnected(address.getPath())) {
+ return true;
+ }
+ return false;
}
@Override
@@ -441,24 +540,74 @@ String getExtractCommandForDownloadedFile(String downloadedTemplateFile, String
}
}
- boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
- LOGGER.debug("Waiting for the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid() + " to become available for " + waitTimeInSec + " secs");
+ /**
+ * Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
+ * @param address
+ * @param pool
+ * @param waitTimeInSec
+ * @return
+ */
+ boolean startConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
+ LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
+ long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
+ Process p = null;
+ try {
+ String lun;
+ if (address.getConnectionId() == null) {
+ lun = "-";
+ } else {
+ lun = address.getConnectionId();
+ }
- long scriptTimeoutSecs = 30; // how long to wait for each script execution to run
- long maxTries = 10; // how many max retries to attempt the script
- long waitTimeInMillis = waitTimeInSec * 1000; // how long overall to wait
- int timeBetweenTries = 1000; // how long to sleep between tries
- // wait at least 60 seconds even if input was lower
- if (waitTimeInSec < 60) {
- waitTimeInSec = 60;
+ ProcessBuilder builder = new ProcessBuilder(startConnectScript, lun, address.getAddress());
+ p = builder.start();
+ if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
+ int rc = p.exitValue();
+ StringBuffer output = new StringBuffer();
+ if (rc == 0) {
+ BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
+ String line = null;
+ while ((line = input.readLine()) != null) {
+ output.append(line);
+ output.append(" ");
+ }
+ LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
+ } else {
+ LOGGER.warn("Failure triggering LUN discovery via " + startConnectScript);
+ BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
+ String line = null;
+ while ((line = error.readLine()) != null) {
+ LOGGER.warn("error --> " + line);
+ }
+ }
+ } else {
+ LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, startConnectScript));
+ return false;
+ }
+ } catch (IOException | InterruptedException | IllegalThreadStateException e) {
+ LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
+ return false;
+ } finally {
+ if (p != null && p.isAlive()) {
+ p.destroyForcibly();
+ }
}
- KVMPhysicalDisk physicalDisk = null;
- // Rescan before checking for the physical disk
- int tries = 0;
- while (waitTimeInMillis > 0 && tries < maxTries) {
- tries++;
- long start = System.currentTimeMillis();
+ return true;
+ }
+
+ /**
+ * Trigger (but does not wait for success) a LUN connect operation for the given address and storage pool.
+ * @param address
+ * @param pool
+ * @param waitTimeInSec
+ * @return
+ */
+ boolean finishConnect(AddressInfo address, KVMStoragePool pool, long waitTimeInSec) {
+ LOGGER.debug("Triggering connect for : " + address.getPath() + " of the storage pool: " + pool.getUuid());
+ long scriptTimeoutSecs = waitTimeInSec - 1; // how long to wait for each script execution to run
+ Process p = null;
+ try {
String lun;
if (address.getConnectionId() == null) {
lun = "-";
@@ -466,59 +615,41 @@ boolean waitForDiskToBecomeAvailable(AddressInfo address, KVMStoragePool pool, l
lun = address.getConnectionId();
}
- Process p = null;
- try {
- ProcessBuilder builder = new ProcessBuilder(connectScript, lun, address.getAddress());
- p = builder.start();
- if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
- int rc = p.exitValue();
- StringBuffer output = new StringBuffer();
- if (rc == 0) {
- BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
- String line = null;
- while ((line = input.readLine()) != null) {
- output.append(line);
- output.append(" ");
- }
-
- physicalDisk = getPhysicalDisk(address, pool);
- if (physicalDisk != null && physicalDisk.getSize() > 0) {
- LOGGER.debug("Found the volume using id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
- return true;
- }
-
- break;
- } else {
- LOGGER.warn("Failure discovering LUN via " + connectScript);
- BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
- String line = null;
- while ((line = error.readLine()) != null) {
- LOGGER.warn("error --> " + line);
- }
+ ProcessBuilder builder = new ProcessBuilder(finishConnectScript, lun, address.getAddress());
+ p = builder.start();
+ if (p.waitFor(scriptTimeoutSecs, TimeUnit.SECONDS)) {
+ int rc = p.exitValue();
+ StringBuffer output = new StringBuffer();
+ if (rc == 0) {
+ BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream()));
+ String line = null;
+ while ((line = input.readLine()) != null) {
+ output.append(line);
+ output.append(" ");
}
+ LOGGER.debug("LUN discovery triggered for " + address.getPath() + " of the storage pool: " + pool.getUuid() + ", output: " + output.toString());
} else {
- LOGGER.debug("Timeout waiting for " + connectScript + " to complete - try " + tries);
- }
- } catch (IOException | InterruptedException | IllegalThreadStateException e) {
- LOGGER.warn("Problem performing scan on SCSI hosts - try " + tries, e);
- } finally {
- if (p != null && p.isAlive()) {
- p.destroyForcibly();
+ LOGGER.warn("Failure triggering LUN discovery via " + finishConnectScript);
+ BufferedReader error = new BufferedReader(new InputStreamReader(p.getErrorStream()));
+ String line = null;
+ while ((line = error.readLine()) != null) {
+ LOGGER.warn("error --> " + line);
+ }
}
+ } else {
+ LOGGER.debug(String.format("Timeout [%s] waiting for %s to complete", scriptTimeoutSecs, finishConnectScript));
+ return false;
}
-
- long elapsed = System.currentTimeMillis() - start;
- waitTimeInMillis = waitTimeInMillis - elapsed;
-
- try {
- Thread.sleep(timeBetweenTries);
- } catch (Exception ex) {
- // don't do anything
+ } catch (IOException | InterruptedException | IllegalThreadStateException e) {
+ LOGGER.warn("Problem performing LUN discovery for " + address.getPath() + " of the storage pool: " + pool.getUuid(), e);
+ return false;
+ } finally {
+ if (p != null && p.isAlive()) {
+ p.destroyForcibly();
}
}
- LOGGER.debug("Unable to find the volume with id: " + address.getPath() + " of the storage pool: " + pool.getUuid());
- return false;
+ return true;
}
boolean isConnected(String path) {
diff --git a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java
index e573f453a6c3..7efbc186876c 100644
--- a/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java
+++ b/plugins/storage/volume/adaptive/src/main/java/org/apache/cloudstack/storage/datastore/driver/AdaptiveDataStoreDriverImpl.java
@@ -269,7 +269,7 @@ public void createAsync(DataStore dataStore, DataObject dataObject,
dataIn.setExternalUuid(volume.getExternalUuid());
// update the cloudstack metadata about the volume
- persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null);
+ persistVolumeOrTemplateData(storagePool, details, dataObject, volume, null, volume.getAllocatedSizeInBytes());
result = new CreateCmdResult(dataObject.getUuid(), new Answer(null));
result.setSuccess(true);
@@ -346,14 +346,17 @@ public void copyAsync(DataObject srcdata, DataObject destdata,
// if we copied from one volume to another, the target volume's disk offering or user input may be of a larger size
// we won't, however, shrink a volume if its smaller.
+ long size = destdata.getSize();
if (outVolume.getAllocatedSizeInBytes() < destdata.getSize()) {
- logger.info("Resizing volume {} to requested target volume size of {}", destdata, destdata.getSize());
+ logger.info("Resizing volume {} to requested target volume size of {}", destdata.getUuid(), destdata.getSize());
api.resize(context, destIn, destdata.getSize());
+ } else if (outVolume.getAllocatedSizeInBytes() > destdata.getSize()) {
+ size = outVolume.getAllocatedSizeInBytes();
}
// initial volume info does not have connection map yet. That is added when grantAccess is called later.
String finalPath = generatePathInfo(outVolume, null);
- persistVolumeData(storagePool, details, destdata, outVolume, null);
+ persistVolumeData(storagePool, details, destdata, outVolume, null, size);
logger.info("Copy completed from [{}] to [{}]", srcdata, destdata);
VolumeObjectTO voto = new VolumeObjectTO();
@@ -384,15 +387,11 @@ public boolean canCopy(DataObject srcData, DataObject destData) {
logger.debug("canCopy: Checking srcData [{}:{}:{} AND destData [{}:{}:{}]",
srcData, srcData.getType(), srcData.getDataStore(), destData, destData.getType(), destData.getDataStore());
try {
- if (!isSameProvider(srcData)) {
+ if (!srcData.getDataStore().getUuid().equals(destData.getDataStore().getUuid())) {
logger.debug("canCopy: No we can't -- the source provider is NOT the correct type for this driver!");
return false;
}
- if (!isSameProvider(destData)) {
- logger.debug("canCopy: No we can't -- the destination provider is NOT the correct type for this driver!");
- return false;
- }
logger.debug(
"canCopy: Source and destination are the same so we can copy via storage endpoint, checking that the source actually exists");
StoragePoolVO poolVO = _storagePoolDao.findById(srcData.getDataStore().getId());
@@ -500,7 +499,7 @@ public boolean grantAccess(DataObject dataObject, Host host, DataStore dataStore
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map connIdMap = api.getConnectionIdMap(dataIn);
- persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
+ persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
logger.info("Granted host {} access to volume {}", host, dataObject);
@@ -534,7 +533,7 @@ public void revokeAccess(DataObject dataObject, Host host, DataStore dataStore)
ProviderVolume vol = api.getVolume(context, sourceIn);
ProviderAdapterDataObject dataIn = newManagedDataObject(dataObject, storagePool);
Map connIdMap = api.getConnectionIdMap(dataIn);
- persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap);
+ persistVolumeOrTemplateData(storagePool, details, dataObject, vol, connIdMap, null);
logger.info("Revoked access for host {} to volume {}", host, dataObject);
} catch (Throwable e) {
@@ -725,6 +724,7 @@ public Map getCapabilities() {
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_SNAPSHOT.toString(), Boolean.TRUE.toString());
mapCapabilities.put(DataStoreCapabilities.CAN_CREATE_VOLUME_FROM_VOLUME.toString(), Boolean.TRUE.toString()); // set to false because it causes weird behavior when copying templates to root volumes
mapCapabilities.put(DataStoreCapabilities.CAN_REVERT_VOLUME_TO_SNAPSHOT.toString(), Boolean.TRUE.toString());
+ mapCapabilities.put("CAN_CLONE_VOLUME_FROM_TEMPLATE", Boolean.TRUE.toString());
ProviderAdapterFactory factory = _adapterFactoryMap.getFactory(this.getProviderName());
if (factory != null) {
mapCapabilities.put("CAN_DIRECT_ATTACH_SNAPSHOT", factory.canDirectAttachSnapshot().toString());
@@ -840,55 +840,96 @@ public boolean canHostAccessStoragePool(Host host, StoragePool pool) {
}
void persistVolumeOrTemplateData(StoragePoolVO storagePool, Map storagePoolDetails,
- DataObject dataObject, ProviderVolume volume, Map connIdMap) {
+ DataObject dataObject, ProviderVolume volume, Map connIdMap, Long size) {
if (dataObject.getType() == DataObjectType.VOLUME) {
- persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
+ persistVolumeData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
} else if (dataObject.getType() == DataObjectType.TEMPLATE) {
- persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap);
+ persistTemplateData(storagePool, storagePoolDetails, dataObject, volume, connIdMap, size);
}
}
void persistVolumeData(StoragePoolVO storagePool, Map details, DataObject dataObject,
- ProviderVolume managedVolume, Map connIdMap) {
+ ProviderVolume managedVolume, Map connIdMap, Long size) {
+
+ // Get the volume by dataObject id
VolumeVO volumeVO = _volumeDao.findById(dataObject.getId());
+ long volumeId = volumeVO.getId();
+ // Generate path for volume and details
String finalPath = generatePathInfo(managedVolume, connIdMap);
- volumeVO.setPath(finalPath);
- volumeVO.setFormat(ImageFormat.RAW);
- volumeVO.setPoolId(storagePool.getId());
- volumeVO.setExternalUuid(managedVolume.getExternalUuid());
- volumeVO.setDisplay(true);
- volumeVO.setDisplayVolume(true);
- _volumeDao.update(volumeVO.getId(), volumeVO);
- volumeVO = _volumeDao.findById(volumeVO.getId());
+ try {
+ if (finalPath != null) {
+ volumeVO.setPath(finalPath);
+ }
+ volumeVO.setFormat(ImageFormat.RAW);
+ volumeVO.setPoolId(storagePool.getId());
+ volumeVO.setExternalUuid(managedVolume.getExternalUuid());
+ volumeVO.setDisplay(true);
+ volumeVO.setDisplayVolume(true);
+ // the size may have been adjusted by the storage provider
+ if (size != null) {
+ volumeVO.setSize(size);
+ }
+ _volumeDao.update(volumeVO.getId(), volumeVO);
+ } catch (Throwable e) {
+ logger.error("Failed to persist volume path", e);
+ throw e;
+ }
- VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
- DiskTO.PATH, finalPath, true);
- _volumeDetailsDao.persist(volumeDetailVO);
+ // PATH
+ try {
+ // If volume_detail exist
+ _volumeDetailsDao.removeDetail(volumeId, DiskTO.PATH);
+ VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, DiskTO.PATH, finalPath, true);
+ _volumeDetailsDao.persist(volumeDetailVO);
+ } catch (Exception e) {
+ logger.error("Failed to persist volume path", e);
+ throw e;
+ }
- volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
- ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
- _volumeDetailsDao.persist(volumeDetailVO);
+ // EXTERNAL_NAME
+ try {
+ _volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_NAME);
+ VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_NAME, managedVolume.getExternalName(), true);
+ _volumeDetailsDao.persist(volumeDetailVO);
+ } catch (Exception e) {
+ logger.error("Failed to persist volume external name", e);
+ throw e;
+ }
- volumeDetailVO = new VolumeDetailVO(volumeVO.getId(),
- ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
- _volumeDetailsDao.persist(volumeDetailVO);
+ // EXTERNAL_UUID
+ try {
+ _volumeDetailsDao.removeDetail(volumeId, ProviderAdapterConstants.EXTERNAL_UUID);
+ VolumeDetailVO volumeDetailVO = new VolumeDetailVO(volumeId, ProviderAdapterConstants.EXTERNAL_UUID, managedVolume.getExternalUuid(), true);
+ _volumeDetailsDao.persist(volumeDetailVO);
+ } catch (Exception e) {
+ logger.error("Failed to persist volume external uuid", e);
+ throw e;
+ }
}
void persistTemplateData(StoragePoolVO storagePool, Map details, DataObject dataObject,
- ProviderVolume volume, Map connIdMap) {
+ ProviderVolume volume, Map connIdMap, Long size) {
TemplateInfo templateInfo = (TemplateInfo) dataObject;
VMTemplateStoragePoolVO templatePoolRef = _vmTemplatePoolDao.findByPoolTemplate(storagePool.getId(),
templateInfo.getId(), null);
templatePoolRef.setInstallPath(generatePathInfo(volume, connIdMap));
templatePoolRef.setLocalDownloadPath(volume.getExternalName());
- templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
+ if (size == null) {
+ templatePoolRef.setTemplateSize(volume.getAllocatedSizeInBytes());
+ } else {
+ templatePoolRef.setTemplateSize(size);
+ }
_vmTemplatePoolDao.update(templatePoolRef.getId(), templatePoolRef);
}
String generatePathInfo(ProviderVolume volume, Map connIdMap) {
+ if (volume == null) {
+ return null;
+ }
+
String finalPath = String.format("type=%s; address=%s; providerName=%s; providerID=%s;",
volume.getAddressType().toString(), volume.getAddress().toLowerCase(), volume.getExternalName(), volume.getExternalUuid());
@@ -938,15 +979,6 @@ ProviderAdapterContext newManagedVolumeContext(DataObject obj) {
return ctx;
}
- boolean isSameProvider(DataObject obj) {
- StoragePoolVO storagePool = this._storagePoolDao.findById(obj.getDataStore().getId());
- if (storagePool != null && storagePool.getStorageProviderName().equals(this.getProviderName())) {
- return true;
- } else {
- return false;
- }
- }
-
ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool storagePool) {
ProviderAdapterDataObject dataIn = new ProviderAdapterDataObject();
if (data instanceof VolumeInfo) {
@@ -1002,4 +1034,8 @@ ProviderAdapterDataObject newManagedDataObject(DataObject data, StoragePool stor
public boolean volumesRequireGrantAccessWhenUsed() {
return true;
}
+
+ public boolean zoneWideVolumesAvailableWithoutClusterMotion() {
+ return true;
+ }
}
diff --git a/plugins/storage/volume/storpool/pom.xml b/plugins/storage/volume/storpool/pom.xml
index 4341347082c1..881e62aeefb8 100644
--- a/plugins/storage/volume/storpool/pom.xml
+++ b/plugins/storage/volume/storpool/pom.xml
@@ -80,6 +80,7 @@
+
set-properties
validate
diff --git a/plugins/user-authenticators/ldap/pom.xml b/plugins/user-authenticators/ldap/pom.xml
index 32f699aabd6e..f9522ef1696c 100644
--- a/plugins/user-authenticators/ldap/pom.xml
+++ b/plugins/user-authenticators/ldap/pom.xml
@@ -52,6 +52,7 @@
+
compile
compileTests
@@ -114,11 +115,10 @@
${groovy.version}
test
-
-
+
cglib
cglib-nodep
- test
+ ${cs.cglib.version}
org.zapodot
diff --git a/scripts/storage/multipath/connectVolume.sh b/scripts/storage/multipath/connectVolume.sh
index fb8387ece473..b25b13572307 100755
--- a/scripts/storage/multipath/connectVolume.sh
+++ b/scripts/storage/multipath/connectVolume.sh
@@ -29,103 +29,40 @@ WWID=${2:?"WWID required"}
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
-systemctl is-active multipathd || systemctl restart multipathd || {
- echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
- logger -t "CS_SCSI_VOL_FIND" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
+START_CONNECT=$(dirname $0)/startConnectVolume.sh
+if [ -x "${START_CONNECT}" ]; then
+ echo "$(date): Starting connect process for ${WWID} on lun ${LUN}"
+ ${START_CONNECT} ${LUN} ${WWID}
+ if [ $? -ne 0 ]; then
+ echo "$(date): Failed to start connect process for ${WWID} on lun ${LUN}"
+ logger -t "CS_SCSI_VOL_FIND" "${WWID} failed to start connect process on lun ${LUN}"
+ exit 1
+ fi
+else
+ echo "$(date): Unable to find startConnect.sh script!"
exit 1
-}
-
-echo "$(date): Looking for ${WWID} on lun ${LUN}"
-
-# get vendor OUI. we will only delete a device on the designated lun if it matches the
-# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
-# host on different fiber channel hosts with the same LUN
-INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
-echo "$(date): Incoming OUI: ${INCOMING_OUI}"
-
-# first we need to check if any stray references are left from a previous use of this lun
-for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
- lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
+fi
- if [ ! -z "${lingering_devs}" ]; then
- for dev in ${lingering_devs}; do
- LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
- FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
- if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
- continue;
- fi
- dev=$(echo $dev | awk -F: '{ print $1}')
- logger -t "CS_SCSI_VOL_FIND" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
- MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
- MP_WWID=${MP_WWID:1} # strip first character (3) off
- # don't do this if the WWID passed in matches the WWID from multipath
- if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
- # run full removal again so all devices and multimap are cleared
- $(dirname $0)/disconnectVolume.sh ${MP_WWID}
- # we don't have a multimap but we may still have some stranded devices to clean up
- elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
- echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
- fi
- done
- sleep 3
- fi
+# wait for the device path to show up
+while [ ! -e /dev/mapper/3${WWID} ]; do
+ echo "$(date): Waiting for /dev/mapper/3${WWID} to appear"
+ sleep 1
done
-logger -t "CS_SCSI_VOL_FIND" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
-
-# wait for multipath to map the new lun to the WWID
-echo "$(date): Waiting for multipath entry to show up for the WWID"
-while true; do
- ls /dev/mapper/3${WWID} >/dev/null 2>&1
- if [ $? == 0 ]; then
- break
- fi
-
- logger -t "CS_SCSI_VOL_FIND" "${WWID} not available yet, triggering scan"
-
- # instruct bus to scan for new lun
- for fchost in $(ls /sys/class/fc_host); do
- echo " --> Scanning ${fchost}"
- echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
- done
-
- multipath -v2 2>/dev/null
-
- ls /dev/mapper/3${WWID} >/dev/null 2>&1
- if [ $? == 0 ]; then
- break
+FINISH_CONNECT=$(dirname $0)/finishConnectVolume.sh
+if [ -x "${FINISH_CONNECT}" ]; then
+ echo "$(date): Starting post-connect validation for ${WWID} on lun ${LUN}"
+ ${FINISH_CONNECT} ${LUN} ${WWID}
+ if [ $? -ne 0 ]; then
+ echo "$(date): Failed to finish connect process for ${WWID} on lun ${LUN}"
+ logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} failed to finish connect process on lun ${LUN}"
+ exit 1
fi
-
- sleep 5
-done
-
-echo "$(date): Doing a recan to make sure we have proper current size locally"
-for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
- echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
-done
-
-sleep 3
-
-multipathd reconfigure
-
-sleep 3
-
-# cleanup any old/faulty paths
-delete_needed=false
-multipath -l 3${WWID}
-for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
- logger -t "CS_SCSI_VOL_FIND" "${WWID} multipath contains faulty path ${dev}, removing"
- echo 1 > /sys/block/${dev}/device/delete;
- delete_needed=true
-done
-
-if [ "${delete_needed}" == "true" ]; then
- sleep 10
- multipath -v2 >/dev/null
+else
+ echo "$(date): Unable to find finishConnect.sh script!"
+ exit 1
fi
-multipath -l 3${WWID}
-
logger -t "CS_SCSI_VOL_FIND" "${WWID} successfully discovered and available"
echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}"
diff --git a/scripts/storage/multipath/disconnectVolume.sh b/scripts/storage/multipath/disconnectVolume.sh
index f894076927f1..b07272c22e1d 100755
--- a/scripts/storage/multipath/disconnectVolume.sh
+++ b/scripts/storage/multipath/disconnectVolume.sh
@@ -26,6 +26,14 @@
#########################################################################################
WWID=${1:?"WWID required"}
+BACKGROUND="${2}"
+
+# move the script to run in the background, no need to block other flows for this to complete
+if [ -z "${BACKGROUND}" ]; then
+ nohup "$0" "${WWID}" --background &
+ exit 0
+fi
+
WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
echo "$(date): Removing ${WWID}"
@@ -36,6 +44,9 @@ systemctl is-active multipathd || systemctl restart multipathd || {
exit 1
}
+# Remove any active IO on the device so it can be removed.
+multipathd disablequeueing map 3${WWID}
+
# first get dm- name
DM_NAME=$(ls -lrt /dev/mapper/3${WWID} | awk '{ print $NF }' | awk -F'/' '{print $NF}')
SLAVE_DEVS=""
@@ -66,9 +77,6 @@ fi
logger -t CS_SCSI_VOL_REMOVE "${WWID} successfully purged from multipath along with slave devices"
-# Added to give time for the event to be fired to the server
-sleep 10
-
echo "$(date): ${WWID} removed"
exit 0
diff --git a/scripts/storage/multipath/finishConnectVolume.sh b/scripts/storage/multipath/finishConnectVolume.sh
new file mode 100755
index 000000000000..0ed8376589d2
--- /dev/null
+++ b/scripts/storage/multipath/finishConnectVolume.sh
@@ -0,0 +1,79 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#####################################################################################
+#
+# Given a lun # and a WWID for a volume provisioned externally, find the volume
+# through the SCSI bus and make sure its visible via multipath
+#
+#####################################################################################
+
+
+LUN=${1:?"LUN required"}
+WWID=${2:?"WWID required"}
+
+WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
+
+systemctl is-active multipathd || systemctl restart multipathd || {
+ echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
+ logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
+ exit 1
+}
+
+echo "$(date): Doing post-connect validation for ${WWID} on lun ${LUN}"
+
+# get vendor OUI. we will only delete a device on the designated lun if it matches the
+# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
+# host on different fiber channel hosts with the same LUN
+INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
+echo "$(date): Incoming OUI: ${INCOMING_OUI}"
+
+logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} looking for disk path at /dev/mapper/3${WWID}"
+
+echo "$(date): Doing a recan to make sure we have proper current size locally"
+for device in $(multipath -ll 3${WWID} | egrep '^ ' | awk '{print $2}'); do
+ echo "1" > /sys/bus/scsi/drivers/sd/${device}/rescan;
+done
+
+sleep 3
+
+multipathd reconfigure
+
+sleep 3
+
+# cleanup any old/faulty paths
+delete_needed=false
+multipath -l 3${WWID}
+for dev in $(multipath -l 3${WWID} 2>/dev/null| grep failed | awk '{print $3}' ); do
+ logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} multipath contains faulty path ${dev}, removing"
+ echo 1 > /sys/block/${dev}/device/delete;
+ delete_needed=true
+done
+
+if [ "${delete_needed}" == "true" ]; then
+ sleep 10
+ multipath -v2 >/dev/null
+fi
+
+multipath -l 3${WWID}
+
+logger -t "CS_SCSI_VOL_CONN_FINISH" "${WWID} successfully discovered and available"
+
+echo "$(date): Complete - found mapped LUN at /dev/mapper/3${WWID}"
+
+exit 0
diff --git a/scripts/storage/multipath/startConnectVolume.sh b/scripts/storage/multipath/startConnectVolume.sh
new file mode 100755
index 000000000000..0e85d910c58a
--- /dev/null
+++ b/scripts/storage/multipath/startConnectVolume.sh
@@ -0,0 +1,101 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+#####################################################################################
+#
+# Given a lun # and a WWID for a volume provisioned externally, find the volume
+# through the SCSI bus and make sure its visible via multipath
+#
+#####################################################################################
+
+
+LUN=${1:?"LUN required"}
+WWID=${2:?"WWID required"}
+
+WWID=$(echo $WWID | tr '[:upper:]' '[:lower:]')
+
+systemctl is-active multipathd || systemctl restart multipathd || {
+ echo "$(date): Multipathd is NOT running and cannot be started. This must be corrected before this host can access this storage volume."
+ logger -t "CS_SCSI_VOL_CONN_START" "${WWID} cannot be mapped to this host because multipathd is not currently running and cannot be started"
+ exit 1
+}
+
+echo "$(date): Looking for ${WWID} on lun ${LUN}"
+
+# get vendor OUI. we will only delete a device on the designated lun if it matches the
+# incoming WWN OUI value. This is because multiple storage arrays may be mapped to the
+# host on different fiber channel hosts with the same LUN
+INCOMING_OUI=$(echo ${WWID} | cut -c2-7)
+echo "$(date): Incoming OUI: ${INCOMING_OUI}"
+
+# first we need to check if any stray references are left from a previous use of this lun
+for fchost in $(ls /sys/class/fc_host | sed -e 's/host//g'); do
+ lingering_devs=$(lsscsi -w "${fchost}:*:*:${LUN}" | grep /dev | awk '{if (NF > 6) { printf("%s:%s ", $NF, $(NF-1));} }' | sed -e 's/0x/3/g')
+
+ if [ ! -z "${lingering_devs}" ]; then
+ for dev in ${lingering_devs}; do
+ LSSCSI_WWID=$(echo $dev | awk -F: '{print $2}' | sed -e 's/0x/3/g')
+ FOUND_OUI=$(echo ${LSSCSI_WWID} | cut -c3-8)
+ if [ "${INCOMING_OUI}" != "${FOUND_OUI}" ]; then
+ continue;
+ fi
+ dev=$(echo $dev | awk -F: '{ print $1}')
+ logger -t "CS_SCSI_VOL_CONN_START" "${WWID} processing identified a lingering device ${dev} from previous lun use, attempting to clean up"
+ MP_WWID=$(multipath -l ${dev} | head -1 | awk '{print $1}')
+ MP_WWID=${MP_WWID:1} # strip first character (3) off
+ # don't do this if the WWID passed in matches the WWID from multipath
+ if [ ! -z "${MP_WWID}" ] && [ "${MP_WWID}" != "${WWID}" ]; then
+ # run full removal again so all devices and multimap are cleared
+ $(dirname $0)/disconnectVolume.sh ${MP_WWID}
+ # we don't have a multimap but we may still have some stranded devices to clean up
+ elif [ "${LSSCSI_WWID}" != "${WWID}" ]; then
+ echo "1" > /sys/block/$(echo ${dev} | awk -F'/' '{print $NF}')/device/delete
+ fi
+ done
+ sleep 3
+ fi
+done
+
+logger -t "CS_SCSI_VOL_CONN_START" "${WWID} awaiting disk path at /dev/mapper/3${WWID}"
+
+# wait for multipath to map the new lun to the WWID
+echo "$(date): Triggering discovery for multipath WWID ${WWID} on LUN ${LUN}"
+ls /dev/mapper/3${WWID} >/dev/null 2>&1
+if [ $? == 0 ]; then
+ logger -t "CS_SCSI_VOL_CONN_START" "${WWID} already available at /dev/mapper/3${WWID}, no need to trigger a scan"
+ break
+fi
+
+# instruct bus to scan for new lun
+for fchost in $(ls /sys/class/fc_host); do
+ echo " --> Scanning ${fchost}"
+ echo "- - ${LUN}" > /sys/class/scsi_host/${fchost}/scan
+done
+
+multipath -v2 2>/dev/null
+
+ls /dev/mapper/3${WWID} >/dev/null 2>&1
+if [ $? == 0 ]; then
+ logger -t "CS_SCSI_VOL_CONN_START" "${WWID} scan triggered and device immediately became visible at /dev/mapper/3${WWID}"
+fi
+
+logger -t "CS_SCSI_VOL_CONN_START" "${WWID} successfully triggered discovery"
+
+echo "$(date): Complete - Triggered discovery of ${WWID}, watch for device at /dev/mapper/3${WWID}"
+
+exit 0
diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java
index 88a9fd34bd13..1ca6f4e629f8 100644
--- a/server/src/main/java/com/cloud/network/as/AutoScaleManager.java
+++ b/server/src/main/java/com/cloud/network/as/AutoScaleManager.java
@@ -39,6 +39,12 @@ public interface AutoScaleManager extends AutoScaleService {
"The Number of worker threads to scan the autoscale vm groups.",
false);
+ ConfigKey UseAutoscaleVmHostnamePrefixEnabled = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Boolean.class,
+ "autoscale.vm.hostname.prefixenabled",
+ "true",
+ "If true, the auto scale vm group name will be used as a prefix for the auto scale vm hostnames.",
+ true);
+
ConfigKey AutoScaleErroredInstanceThreshold = new ConfigKey<>(ConfigKey.CATEGORY_ADVANCED, Integer.class,
"autoscale.errored.instance.threshold",
"10",
diff --git a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java
index 050b256f27dc..4e58d1af5a43 100644
--- a/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java
+++ b/server/src/main/java/com/cloud/network/as/AutoScaleManagerImpl.java
@@ -294,6 +294,7 @@ public class AutoScaleManagerImpl extends ManagerBase implements AutoScaleManage
PARAM_OVERRIDE_DISK_OFFERING_ID, PARAM_SSH_KEYPAIRS, PARAM_AFFINITY_GROUP_IDS, PARAM_NETWORK_IDS);
protected static final String VM_HOSTNAME_PREFIX = "autoScaleVm-";
+
protected static final int VM_HOSTNAME_RANDOM_SUFFIX_LENGTH = 6;
private static final Long DEFAULT_HOST_ID = -1L;
@@ -1952,6 +1953,19 @@ public void updateVmDetails(Map deployParams, Map