Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion chv.nix
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,8 @@ let
commonArgs = {
meta = cloud-hypervisor-meta;

src = craneLib'.cleanCargoSource cloud-hypervisor-src;
# src = craneLib'.cleanCargoSource cloud-hypervisor-src;
src = cloud-hypervisor-src;

patches =
let
Expand Down
24 changes: 12 additions & 12 deletions flake.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

10 changes: 5 additions & 5 deletions flake.nix
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,14 @@
# A local path can be used for developing or testing local changes. Make
# sure the submodules in a local libvirt checkout are populated.
libvirt-src = {
# url = "git+file:<path/to/libvirt>?submodules=1";
url = "git+https://github.com/cyberus-technology/libvirt?ref=gardenlinux&submodules=1";
# url = "git+ssh://git@gitlab.cyberus-technology.de/cyberus/cloud/libvirt?ref=managedsave-fix&submodules=1";
# url = "git+file:/home/skober/repos/libvirt?submodules=1";
url = "git+https://github.com/hertrste/libvirt?ref=cpu-profiles&submodules=1";
# url = "git+ssh://git@gitlab.cyberus-technology.de/shertrampf/libvirt.git?ref=cpu-profiles&submodules=1";
flake = false;
};
cloud-hypervisor-src = {
# url = "git+file:<path/to/cloud-hypervisor>";
url = "github:cyberus-technology/cloud-hypervisor?ref=gardenlinux";
# url = "git+file::<path/to/cloud-hypervisor>";
url = "github:cyberus-technology/cloud-hypervisor?ref=cyberus-fork-cpu-profiles-cpuid";
flake = false;
};
edk2-src = {
Expand Down
18 changes: 18 additions & 0 deletions tests/common.nix
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ let
numa ? false,
hugepages ? false,
prefault ? false,
cpuModel ? "",
serial ? "pty",
}:
''
Expand Down Expand Up @@ -79,6 +80,16 @@ let
''
else
''
${
if cpuModel == "" then
""
else
''
<cpu mode='custom' match='exact' check='full'>
<model fallback='forbid'>${cpuModel}</model>
</cpu>
''
}
<vcpu placement='static'>2</vcpu>
${
if hugepages then
Expand Down Expand Up @@ -421,6 +432,13 @@ in
})}";
};
};
"/etc/domain-chv-cpu-sapphire-rapid.xml" = {
"C+" = {
argument = "${pkgs.writeText "cirros-sapphire-rapid.xml" (virsh_ch_xml {
cpuModel = "sapphire-rapids";
})}";
};
};
"/etc/new_interface.xml" = {
"C+" = {
argument = "${pkgs.writeText "new_interface.xml" new_interface}";
Expand Down
10 changes: 10 additions & 0 deletions tests/default.nix
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,14 @@
;
testScriptFile = ./testscript_long_migration_with_load.py;
};

cpu_profiles = import ./libvirt-test.nix {
inherit
pkgs
libvirt-src
nixos-image
chv-ovmf
;
testScriptFile = ./testscript_cpu_profiles.py;
};
}
22 changes: 22 additions & 0 deletions tests/testscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -1278,10 +1278,32 @@ def test_live_migration_with_vcpu_pinning(self):
assert int(taskset_vcpu0_controller, 16) == int(taskset_vcpu0_compute, 16)
assert int(taskset_vcpu2_controller, 16) == int(taskset_vcpu2_compute, 16)

def test_cpu_models(self):
"""
This tests checks that cpu-models API call is implemented and returns
at least a sapphire-rapids model.
Further, we check that the domcapabilities API call returns the
expected CPU profile as usable.
Both is required to be able to use the specific CPU profile.
While the 'virsh cpu-models' call only lists the CPU profiles the VMM
supports, the 'virsh domcapabilities' call takes into account the hosts
architecture. Thus, the latter reports what CPU profile actually can be
used in the current environment.
"""
out = controllerVM.succeed("virsh cpu-models x86_64")
self.assertIn("sapphire-rapids", out)

out = controllerVM.succeed("virsh domcapabilities")
self.assertIn(
"<model usable='yes' vendor='Intel' canonical='sapphire-rapids'>sapphire-rapids</model>",
out,
)


def suite():
# Test cases in alphabetical order
testcases = [
LibvirtTests.test_cpu_models,
LibvirtTests.test_disk_is_locked,
LibvirtTests.test_disk_resize_qcow2,
LibvirtTests.test_disk_resize_raw,
Expand Down
133 changes: 133 additions & 0 deletions tests/testscript_cpu_profiles.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
import time
import unittest

# Following is required to allow proper linting of the python code in IDEs.
# Because certain functions like start_all() and certain objects like computeVM
# or other machines are added by Nix, we need to provide certain stub objects
# in order to allow the IDE to lint the python code successfully.
if "start_all" not in globals():
from nixos_test_stubs import start_all, computeVM, controllerVM # type: ignore


class LibvirtTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
start_all()
controllerVM.wait_for_unit("multi-user.target")
computeVM.wait_for_unit("multi-user.target")
controllerVM.succeed("cp /etc/nixos.img /nfs-root/")
controllerVM.succeed("chmod 0666 /nfs-root/nixos.img")

controllerVM.succeed(
'virt-admin -c virtchd:///system daemon-log-outputs "2:journald 1:file:/var/log/libvirt/libvirtd.log"'
)
controllerVM.succeed(
"virt-admin -c virtchd:///system daemon-timeout --timeout 0"
)

computeVM.succeed(
'virt-admin -c virtchd:///system daemon-log-outputs "2:journald 1:file:/var/log/libvirt/libvirtd.log"'
)
computeVM.succeed("virt-admin -c virtchd:///system daemon-timeout --timeout 0")

controllerVM.succeed("mkdir -p /var/lib/libvirt/storage-pools/nfs-share")
computeVM.succeed("mkdir -p /var/lib/libvirt/storage-pools/nfs-share")

controllerVM.succeed("ssh -o StrictHostKeyChecking=no computeVM echo")
computeVM.succeed("ssh -o StrictHostKeyChecking=no controllerVM echo")

controllerVM.succeed(
'virsh pool-define-as --name "nfs-share" --type netfs --source-host "localhost" --source-path "nfs-root" --source-format "nfs" --target "/var/lib/libvirt/storage-pools/nfs-share"'
)
controllerVM.succeed("virsh pool-start nfs-share")

computeVM.succeed(
'virsh pool-define-as --name "nfs-share" --type netfs --source-host "controllerVM" --source-path "nfs-root" --source-format "nfs" --target "/var/lib/libvirt/storage-pools/nfs-share"'
)
computeVM.succeed("virsh pool-start nfs-share")

def setUp(self):
print(f"\n\nRunning test: {self._testMethodName}\n\n")

def tearDown(self):
# Destroy and undefine all running and persistent domains
controllerVM.execute(
'virsh list --name | while read domain; do [[ -n "$domain" ]] && virsh destroy "$domain"; done'
)
controllerVM.execute(
'virsh list --all --name | while read domain; do [[ -n "$domain" ]] && virsh undefine "$domain"; done'
)
computeVM.execute(
'virsh list --name | while read domain; do [[ -n "$domain" ]] && virsh destroy "$domain"; done'
)
computeVM.execute(
'virsh list --all --name | while read domain; do [[ -n "$domain" ]] && virsh undefine "$domain"; done'
)

# After undefining and destroying all domains, there should not be any .xml files left
# Any files left here, indicate that we do not clean up properly
controllerVM.fail("find /run/libvirt/ch -name *.xml | grep .")
controllerVM.fail("find /var/lib/libvirt/ch -name *.xml | grep .")
computeVM.fail("find /run/libvirt/ch -name *.xml | grep .")
computeVM.fail("find /var/lib/libvirt/ch -name *.xml | grep .")

# Destroy any remaining huge page allocations.
controllerVM.succeed("echo 0 > /proc/sys/vm/nr_hugepages")
computeVM.succeed("echo 0 > /proc/sys/vm/nr_hugepages")

# Remove any remaining vm logs.
controllerVM.succeed("rm -f /tmp/*.log")
computeVM.succeed("rm -f /tmp/*.log")

def test_live_cpu_profile_sapphire_rapids(self):
"""
Test using the Sapphire Rapids CPU profile for live migration works as expected.
"""

controllerVM.succeed("virsh define /etc/domain-chv-cpu-sapphire-rapid.xml")
controllerVM.succeed("virsh start testvm")

assert wait_for_ssh(controllerVM)

run_loops = 500
for i in range(run_loops):
print(f"Run {i + 1}/{run_loops}")

controllerVM.succeed(
"virsh migrate --domain testvm --desturi ch+tcp://computeVM/session --persistent --live --p2p"
)
assert wait_for_ssh(computeVM)

computeVM.succeed(
"virsh migrate --domain testvm --desturi ch+tcp://controllerVM/session --persistent --live --p2p"
)
assert wait_for_ssh(controllerVM)


def suite():
suite = unittest.TestSuite()
suite.addTest(LibvirtTests("test_live_cpu_profile_sapphire_rapids"))
return suite


def wait_for_ssh(machine, user="root", password="root", ip="192.168.1.2"):
retries = 100
for i in range(retries):
print(f"Wait for ssh {i}/{retries}")
status, _ = ssh(machine, "echo hello", user, password, ip="192.168.1.2")
if status == 0:
return True
time.sleep(1)
return False


def ssh(machine, cmd, user="root", password="root", ip="192.168.1.2"):
status, out = machine.execute(
f"sshpass -p {password} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {user}@{ip} {cmd}"
)
return status, out


runner = unittest.TextTestRunner()
if not runner.run(suite()).wasSuccessful():
raise Exception("Test Run unsuccessful")
Loading