diff --git a/chv.nix b/chv.nix index c905510..1b70e4b 100644 --- a/chv.nix +++ b/chv.nix @@ -17,7 +17,8 @@ let commonArgs = { meta = cloud-hypervisor-meta; - src = craneLib'.cleanCargoSource cloud-hypervisor-src; + # src = craneLib'.cleanCargoSource cloud-hypervisor-src; + src = cloud-hypervisor-src; patches = let diff --git a/flake.lock b/flake.lock index e6b1af5..519e20c 100644 --- a/flake.lock +++ b/flake.lock @@ -3,16 +3,16 @@ "cloud-hypervisor-src": { "flake": false, "locked": { - "lastModified": 1763989972, - "narHash": "sha256-icCOV+oG4jppP4MVS5GW7phtiSUNGqrxYpLwjlOwsoc=", + "lastModified": 1762791932, + "narHash": "sha256-DMo2ylmaTMhen73HEv4OZmv7e/u5MHBM9qDh7P76cLw=", "owner": "cyberus-technology", "repo": "cloud-hypervisor", - "rev": "dc905d98b7c9903a86f78a31754effe9548d335b", + "rev": "035da3aaae0f701dabd89ef5a10f83a48cf9c0e7", "type": "github" }, "original": { "owner": "cyberus-technology", - "ref": "gardenlinux", + "ref": "cyberus-fork-cpu-profiles-cpuid", "repo": "cloud-hypervisor", "type": "github" } @@ -90,20 +90,20 @@ "libvirt-src": { "flake": false, "locked": { - "lastModified": 1764087192, - "narHash": "sha256-IqpK4GPW00E7pKtcWWn+6z5XSz7HUcLTOYZ+hTor57g=", - "ref": "gardenlinux", - "rev": "6e38797ab4c34ec6afe286d9455193b6b775dffc", - "revCount": 52379, + "lastModified": 1764747048, + "narHash": "sha256-YZKdV96kMxB0kVcCIJ72RaU4ZDG9s7mzYb/XCmo8h04=", + "ref": "cpu-profiles", + "rev": "6fcf89ae3da239fd1d4ec6d984f6feb97ee2f3e1", + "revCount": 52399, "submodules": true, "type": "git", - "url": "https://github.com/cyberus-technology/libvirt" + "url": "https://github.com/hertrste/libvirt" }, "original": { - "ref": "gardenlinux", + "ref": "cpu-profiles", "submodules": true, "type": "git", - "url": "https://github.com/cyberus-technology/libvirt" + "url": "https://github.com/hertrste/libvirt" } }, "nixpkgs": { diff --git a/flake.nix b/flake.nix index 5021f33..7dcde3b 100644 --- a/flake.nix +++ b/flake.nix @@ -8,14 +8,14 @@ # A local path can be used for developing or testing local changes. Make # sure the submodules in a local libvirt checkout are populated. libvirt-src = { - # url = "git+file:?submodules=1"; - url = "git+https://github.com/cyberus-technology/libvirt?ref=gardenlinux&submodules=1"; - # url = "git+ssh://git@gitlab.cyberus-technology.de/cyberus/cloud/libvirt?ref=managedsave-fix&submodules=1"; + # url = "git+file:/home/skober/repos/libvirt?submodules=1"; + url = "git+https://github.com/hertrste/libvirt?ref=cpu-profiles&submodules=1"; + # url = "git+ssh://git@gitlab.cyberus-technology.de/shertrampf/libvirt.git?ref=cpu-profiles&submodules=1"; flake = false; }; cloud-hypervisor-src = { - # url = "git+file:"; - url = "github:cyberus-technology/cloud-hypervisor?ref=gardenlinux"; + # url = "git+file::"; + url = "github:cyberus-technology/cloud-hypervisor?ref=cyberus-fork-cpu-profiles-cpuid"; flake = false; }; edk2-src = { diff --git a/tests/common.nix b/tests/common.nix index 434febd..ab5b66a 100644 --- a/tests/common.nix +++ b/tests/common.nix @@ -22,6 +22,7 @@ let numa ? false, hugepages ? false, prefault ? false, + cpuModel ? "", serial ? "pty", }: '' @@ -79,6 +80,16 @@ let '' else '' + ${ + if cpuModel == "" then + "" + else + '' + + ${cpuModel} + + '' + } 2 ${ if hugepages then @@ -421,6 +432,13 @@ in })}"; }; }; + "/etc/domain-chv-cpu-sapphire-rapid.xml" = { + "C+" = { + argument = "${pkgs.writeText "cirros-sapphire-rapid.xml" (virsh_ch_xml { + cpuModel = "sapphire-rapids"; + })}"; + }; + }; "/etc/new_interface.xml" = { "C+" = { argument = "${pkgs.writeText "new_interface.xml" new_interface}"; diff --git a/tests/default.nix b/tests/default.nix index 7d297fd..c95a03b 100644 --- a/tests/default.nix +++ b/tests/default.nix @@ -25,4 +25,14 @@ ; testScriptFile = ./testscript_long_migration_with_load.py; }; + + cpu_profiles = import ./libvirt-test.nix { + inherit + pkgs + libvirt-src + nixos-image + chv-ovmf + ; + testScriptFile = ./testscript_cpu_profiles.py; + }; } diff --git a/tests/testscript.py b/tests/testscript.py index 63b00d5..2ebcec0 100644 --- a/tests/testscript.py +++ b/tests/testscript.py @@ -1278,10 +1278,32 @@ def test_live_migration_with_vcpu_pinning(self): assert int(taskset_vcpu0_controller, 16) == int(taskset_vcpu0_compute, 16) assert int(taskset_vcpu2_controller, 16) == int(taskset_vcpu2_compute, 16) + def test_cpu_models(self): + """ + This tests checks that cpu-models API call is implemented and returns + at least a sapphire-rapids model. + Further, we check that the domcapabilities API call returns the + expected CPU profile as usable. + Both is required to be able to use the specific CPU profile. + While the 'virsh cpu-models' call only lists the CPU profiles the VMM + supports, the 'virsh domcapabilities' call takes into account the hosts + architecture. Thus, the latter reports what CPU profile actually can be + used in the current environment. + """ + out = controllerVM.succeed("virsh cpu-models x86_64") + self.assertIn("sapphire-rapids", out) + + out = controllerVM.succeed("virsh domcapabilities") + self.assertIn( + "sapphire-rapids", + out, + ) + def suite(): # Test cases in alphabetical order testcases = [ + LibvirtTests.test_cpu_models, LibvirtTests.test_disk_is_locked, LibvirtTests.test_disk_resize_qcow2, LibvirtTests.test_disk_resize_raw, diff --git a/tests/testscript_cpu_profiles.py b/tests/testscript_cpu_profiles.py new file mode 100644 index 0000000..7de7eeb --- /dev/null +++ b/tests/testscript_cpu_profiles.py @@ -0,0 +1,133 @@ +import time +import unittest + +# Following is required to allow proper linting of the python code in IDEs. +# Because certain functions like start_all() and certain objects like computeVM +# or other machines are added by Nix, we need to provide certain stub objects +# in order to allow the IDE to lint the python code successfully. +if "start_all" not in globals(): + from nixos_test_stubs import start_all, computeVM, controllerVM # type: ignore + + +class LibvirtTests(unittest.TestCase): + @classmethod + def setUpClass(cls): + start_all() + controllerVM.wait_for_unit("multi-user.target") + computeVM.wait_for_unit("multi-user.target") + controllerVM.succeed("cp /etc/nixos.img /nfs-root/") + controllerVM.succeed("chmod 0666 /nfs-root/nixos.img") + + controllerVM.succeed( + 'virt-admin -c virtchd:///system daemon-log-outputs "2:journald 1:file:/var/log/libvirt/libvirtd.log"' + ) + controllerVM.succeed( + "virt-admin -c virtchd:///system daemon-timeout --timeout 0" + ) + + computeVM.succeed( + 'virt-admin -c virtchd:///system daemon-log-outputs "2:journald 1:file:/var/log/libvirt/libvirtd.log"' + ) + computeVM.succeed("virt-admin -c virtchd:///system daemon-timeout --timeout 0") + + controllerVM.succeed("mkdir -p /var/lib/libvirt/storage-pools/nfs-share") + computeVM.succeed("mkdir -p /var/lib/libvirt/storage-pools/nfs-share") + + controllerVM.succeed("ssh -o StrictHostKeyChecking=no computeVM echo") + computeVM.succeed("ssh -o StrictHostKeyChecking=no controllerVM echo") + + controllerVM.succeed( + 'virsh pool-define-as --name "nfs-share" --type netfs --source-host "localhost" --source-path "nfs-root" --source-format "nfs" --target "/var/lib/libvirt/storage-pools/nfs-share"' + ) + controllerVM.succeed("virsh pool-start nfs-share") + + computeVM.succeed( + 'virsh pool-define-as --name "nfs-share" --type netfs --source-host "controllerVM" --source-path "nfs-root" --source-format "nfs" --target "/var/lib/libvirt/storage-pools/nfs-share"' + ) + computeVM.succeed("virsh pool-start nfs-share") + + def setUp(self): + print(f"\n\nRunning test: {self._testMethodName}\n\n") + + def tearDown(self): + # Destroy and undefine all running and persistent domains + controllerVM.execute( + 'virsh list --name | while read domain; do [[ -n "$domain" ]] && virsh destroy "$domain"; done' + ) + controllerVM.execute( + 'virsh list --all --name | while read domain; do [[ -n "$domain" ]] && virsh undefine "$domain"; done' + ) + computeVM.execute( + 'virsh list --name | while read domain; do [[ -n "$domain" ]] && virsh destroy "$domain"; done' + ) + computeVM.execute( + 'virsh list --all --name | while read domain; do [[ -n "$domain" ]] && virsh undefine "$domain"; done' + ) + + # After undefining and destroying all domains, there should not be any .xml files left + # Any files left here, indicate that we do not clean up properly + controllerVM.fail("find /run/libvirt/ch -name *.xml | grep .") + controllerVM.fail("find /var/lib/libvirt/ch -name *.xml | grep .") + computeVM.fail("find /run/libvirt/ch -name *.xml | grep .") + computeVM.fail("find /var/lib/libvirt/ch -name *.xml | grep .") + + # Destroy any remaining huge page allocations. + controllerVM.succeed("echo 0 > /proc/sys/vm/nr_hugepages") + computeVM.succeed("echo 0 > /proc/sys/vm/nr_hugepages") + + # Remove any remaining vm logs. + controllerVM.succeed("rm -f /tmp/*.log") + computeVM.succeed("rm -f /tmp/*.log") + + def test_live_cpu_profile_sapphire_rapids(self): + """ + Test using the Sapphire Rapids CPU profile for live migration works as expected. + """ + + controllerVM.succeed("virsh define /etc/domain-chv-cpu-sapphire-rapid.xml") + controllerVM.succeed("virsh start testvm") + + assert wait_for_ssh(controllerVM) + + run_loops = 500 + for i in range(run_loops): + print(f"Run {i + 1}/{run_loops}") + + controllerVM.succeed( + "virsh migrate --domain testvm --desturi ch+tcp://computeVM/session --persistent --live --p2p" + ) + assert wait_for_ssh(computeVM) + + computeVM.succeed( + "virsh migrate --domain testvm --desturi ch+tcp://controllerVM/session --persistent --live --p2p" + ) + assert wait_for_ssh(controllerVM) + + +def suite(): + suite = unittest.TestSuite() + suite.addTest(LibvirtTests("test_live_cpu_profile_sapphire_rapids")) + return suite + + +def wait_for_ssh(machine, user="root", password="root", ip="192.168.1.2"): + retries = 100 + for i in range(retries): + print(f"Wait for ssh {i}/{retries}") + status, _ = ssh(machine, "echo hello", user, password, ip="192.168.1.2") + if status == 0: + return True + time.sleep(1) + return False + + +def ssh(machine, cmd, user="root", password="root", ip="192.168.1.2"): + status, out = machine.execute( + f"sshpass -p {password} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {user}@{ip} {cmd}" + ) + return status, out + + +runner = unittest.TextTestRunner() +if not runner.run(suite()).wasSuccessful(): + raise Exception("Test Run unsuccessful")