diff --git a/libvirt/tests/cfg/virtual_network/qemu/netperf.cfg b/libvirt/tests/cfg/virtual_network/qemu/netperf.cfg new file mode 100644 index 0000000000..a7b0d1c7a5 --- /dev/null +++ b/libvirt/tests/cfg/virtual_network/qemu/netperf.cfg @@ -0,0 +1,148 @@ +- virtual_network.qemu_test.netperf: image_copy + create_vm_libvirt = yes + master_images_clone = img1 + remove_image_image1 = yes + main_vm = vm1 + vms = ${main_vm} + kill_vm_libvirt = yes + ovmf: + kill_vm_libvirt_options = --nvram + virt_test_type = libvirt + type = netperf + not_preprocess = yes + image_snapshot = yes + setup_ksm = no + take_regular_screendumps = no + store_vm_register = no + # sometimes since vm performance issue or processors could not handle + # at same time with mis-config sessions number, it may not all clients + # up at same times, please modify tires parameter to repeat sub tests + # to increase the successful probalility + tries = 5 + # Please update following comments params when you need special cfg for + # your test nic cards + # nic1 is for control, nic2 is for data connection + nics += ' nic2' + # queues = 4 + enable_msix_vectors = yes + #Configure different types of network adapters. + nic_model_nic1 = virtio + nic_model_nic2 = virtio + i386, x86_64: + nic_model_nic2 = e1000 + netdst_nic1 = private + netdst_nic2 = switch + # please fix the mac for nic2 if you needed with this, this can be empty + # nic_mac_nic2 = + # please add the physical nic you want to add to your private bridge + # this can be empty + # physical_nic = + # bridge_force_create=yes + # bridge_nic1 = + #numa configration + netperf_with_numa = yes + vdpa_add_flows = yes + # configure netperf test parameters, some seconds will be took to + # wait all the clients work, this wait time should be less than + # 0.5 * l, the wait time will augments if you have move + # threads. So experientially suggest l should be not less than 60. + l = 60 + #Test protocol and test data configration + protocols = "TCP_STREAM TCP_MAERTS TCP_RR" + sessions = "1 2 4 8" + sessions_rr = "1 25 50" + sizes = "64 256 1024 4096 16384 65535" + sizes_rr = "64 256 4096" + #client configuration + client = localhost + username_client = root + password_client = 123456 + shell_client_client = ssh + shell_port_client = 22 + shell_prompt_client = \[root@.{0,50}][\#\$] + #host configuration + shell_port_host = 22 + password_host = 123456 + username_host = root + os_type_client = linux + os_type_host = linux + shell_prompt_host = \[root@.{0,50}][\#\$] + #Test base env configration + ver_cmd = rpm -q qemu-kvm + netperf_version = 2.7.1 + netperf_pkg = netperf/netperf-2.7.1.tar.bz2 + setup_cmd = "cd /tmp && rm -rf netperf-2.7.1 && tar xvfj netperf-2.7.1.tar.bz2 && cd netperf-2.7.1 && sh autogen.sh && CFLAGS=-Wno-implicit-function-declaration ./configure --enable-burst --enable-demo=yes && make" + ppc64: + setup_cmd = "cd /tmp && rm -rf netperf-2.7.1 && tar xvfj netperf-2.7.1.tar.bz2 && cd netperf-2.7.1 && sh autogen.sh && CFLAGS=-Wno-implicit-function-declaration ./configure --build=ppc64 --enable-burst --enable-demo=yes && make" + ppc64le: + setup_cmd = "cd /tmp && rm -rf netperf-2.7.1 && tar xvfj netperf-2.7.1.tar.bz2 && cd netperf-2.7.1 && sh autogen.sh && CFLAGS=-Wno-implicit-function-declaration ./configure --build=ppc64le --enable-burst --enable-demo=yes && make" + log_hostinfo_script = scripts/rh_perf_log_hostinfo_script.sh + host_tuned_profile = "tuned-adm profile virtual-host" + client_tuned_profile = "tuned-adm profile virtual-host" + client_kill_linux = "killall netperf" + client_kill_windows = "taskkill /F /IM netperf*" + # Now the get status functions are implemented for RHEL and Fedora guests. + # Not test with other guests, please set this depends on your guest os + # environment. + RHEL, Fedora: + get_status_in_guest = yes + Linux: + # log_guestinfo_script = scripts/rh_perf_log_guestinfo_script.sh + # log_guestinfo_exec = bash + # log_guestinfo_path = /tmp/log_guestinfo.sh + server_tuned_profile = "tuned-adm profile virtual-guest" + server_mtu_cmd = "ifconfig %s mtu %s" + Windows: + # log_guestinfo_script = scripts/rh_perf_log_guestinfo_script.bat + # log_guestinfo_exec = cmd /c + # log_guestinfo_path = C:\log_guestinfo.bat + server_mtu_cmd = "netsh interface ipv4 set interface "%s" mtu=%s" + i386, x86_64: + cpu_model_flags = ",hv_time,hv_relaxed,hv_vapic,hv_spinlocks=0xfff" + windows_disable_firewall = "netsh advfirewall set allprofiles state off" + devcon_dirname = "win7_" + i386: + devcon_dirname += "x86" + x86_64: + devcon_dirname += "amd64" + client_mtu_cmd = "ifconfig %s mtu %s" + host_mtu_cmd = "ifconfig %s mtu %s" + #FIXME: it's a workaround, it needs a better way to disable all the firewall rules + env_setup_cmd = "systemctl stop firewalld.service ; service iptables stop ; iptables -F ; nft flush ruleset;" + env_setup_cmd += " echo 2 > /proc/sys/net/ipv4/conf/all/arp_ignore;" + env_setup_cmd += " echo 0 > /sys/kernel/mm/ksm/run;" + env_setup_cmd += " echo 0 > /proc/sys/kernel/watchdog;" + env_setup_cmd += " echo 0 > /proc/sys/kernel/nmi_watchdog;" + env_setup_cmd += " setenforce 1" + variants: + - host_guest: + Windows: + netserv_start_cmd = "start /b %s:\netserver-2.6.0.exe" + guest_ver_cmd = "ver" + use_cygwin = no + variants: + - @default_setting: + Linux: + no Jeos + # to test exthost <-> guest: + # client = + # client_public_ip = + # server_private_ip = + Windows: + #client = + config_cmds = driver_verifier_query + driver_verifier_query = "verifier /querysettings" + reboot_after_config = yes + install_vioprot_cmd = "netcfg -v -l %s -c p -i VIOPROT" + cdroms += " virtio winutils" + virtio_win_media_type = iso + driver_name = netkvm VIOPROT + devcon_path = "WIN_UTILS:\devcon\${devcon_dirname}\devcon.exe" + device_name = "Red Hat VirtIO Ethernet Adapter" + device_hwid = '"PCI\VEN_1AF4&DEV_1000" "PCI\VEN_1AF4&DEV_1041"' + variants: + - with_jumbo: + mtu = 9000 + # please config physical nic name of client for jumbo frame case by uncommenting it + # client_physical_nic = + - default: diff --git a/libvirt/tests/src/virtual_network/qemu/netperf.py b/libvirt/tests/src/virtual_network/qemu/netperf.py new file mode 100644 index 0000000000..413977f08a --- /dev/null +++ b/libvirt/tests/src/virtual_network/qemu/netperf.py @@ -0,0 +1,814 @@ +import logging +import os +import re +import threading +import time + +from avocado.utils import process +from virttest import ( + env_process, + error_context, + remote, + utils_misc, + utils_net, + utils_test, + virt_vm, +) + +from provider.virtual_network import netperf_base, vdpa_utils, win_driver_utils, network_base + +LOG_JOB = logging.getLogger("avocado.test") + +_netserver_started = False + + +def start_netserver_win(session, start_cmd, test): + check_reg = re.compile(r"NETSERVER.*EXE", re.I) + if not check_reg.findall(session.cmd_output("tasklist")): + session.sendline(start_cmd) + if not utils_misc.wait_for( + lambda: check_reg.findall(session.cmd_output("tasklist")), + 30, + 5, + 1, + "Wait netserver start", + ): + msg = "Can not start netserver with command %s" % start_cmd + test.fail(msg) + + +@error_context.context_aware +def run(test, params, env): + """ + Network stress test with netperf. + + 1) Boot up VM(s), setup SSH authorization between host + and guest(s)/external host + 2) Prepare the test environment in server/client/host + 3) Execute netperf tests, collect and analyze the results + + :param test: QEMU test object. + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + + def mtu_set(mtu): + """ + Set server/client/host's mtu + + :param mtu: mtu value to be set + """ + + server_mtu_cmd = params.get("server_mtu_cmd") + client_mtu_cmd = params.get("client_mtu_cmd") + host_mtu_cmd = params.get("host_mtu_cmd") + client_physical_nic = params.get("client_physical_nic") + error_context.context("Changing the MTU of guest", test.log.info) + if params.get("os_type") == "linux": + ethname = utils_net.get_linux_ifname(server_ctl, mac) + netperf_base.ssh_cmd(server_ctl, server_mtu_cmd % (ethname, mtu)) + elif params.get("os_type") == "windows": + connection_id = utils_net.get_windows_nic_attribute( + server_ctl, "macaddress", mac, "netconnectionid" + ) + netperf_base.ssh_cmd(server_ctl, server_mtu_cmd % (connection_id, mtu)) + + error_context.context("Changing the MTU of client", test.log.info) + if client_physical_nic: + netperf_base.ssh_cmd(client, client_mtu_cmd % (client_physical_nic, mtu)) + + netdst = params.get("netdst", "switch") + host_bridges = utils_net.Bridge() + br_in_use = host_bridges.list_br() + target_ifaces = [] + if netdst in br_in_use: + ifaces_in_use = host_bridges.list_iface() + target_ifaces = list(ifaces_in_use + br_in_use) + + add_flows = params.get("vdpa_ovs_add_flows", "yes") == "yes" + ovs_handler = vdpa_utils.OVSHandler(vm) + target_ifaces.extend( + ovs_handler.get_vdpa_ovs_info(add_flows=add_flows, return_ports=True) + ) + + if vm.virtnet[0].nettype == "macvtap": + target_ifaces.extend([vm.virtnet[0].netdst, vm.get_ifname(0)]) + error_context.context("Change all Bridge NICs MTU to %s" % mtu, test.log.info) + for iface in target_ifaces: + try: + process.run( + host_mtu_cmd % (iface, mtu), ignore_status=False, shell=True + ) + except process.CmdError as err: + if "SIOCSIFMTU" in err.result.stderr.decode(): + test.cancel("The ethenet device does not support jumbo,cancel test") + + pinned_node = 0 + host_numa = utils_misc.NumaInfo() + node_list = host_numa.online_nodes_withcpumem + mem = int(params["mem"]) + mem_kb = mem * 1024 + try: + for node in node_list: + node_mem_free = int(host_numa.read_from_node_meminfo(node, "MemFree")) + if node_mem_free > mem_kb: + pinned_node = node + break + params["qemu_command_prefix"] = f"numactl -m {pinned_node}" + vm_name = params["main_vm"] + env_process.process( + test, params, env, env_process.preprocess_image, env_process.preprocess_vm + ) + vm = env.get_vm(vm_name) + vm.verify_alive() + except virt_vm.VMCreateError as e: + if f"node argument {pinned_node} is out of range" in str(e): + test.cancel(f"The node: {pinned_node} used for VM pinning is not valid") + test.error(e) + + login_timeout = int(params.get("login_timeout", 360)) + + config_cmds = params.get("config_cmds") + if config_cmds: + for config_cmd in config_cmds.split(","): + cmd = params.get(config_cmd.strip()) + session = vm.wait_for_serial_login(timeout=login_timeout) + if cmd: + s, o = session.cmd_status_output(cmd) + test.log.info(o) + if "querysettings" in cmd: + if ".sys" in o: + verifier_clear_cmd = "verifier /reset" + status, output = session.cmd_status_output(verifier_clear_cmd) + test.log.info(output) + if ".sys" in output: + msg = "%s does not work correctly" % verifier_clear_cmd + test.error(msg) + elif s != 0: + msg = "Config command %s failed. Output: %s" % (cmd, o) + test.error(msg) + session.close() + if params.get("reboot_after_config", "yes") == "yes": + vm.reboot(method="system_reset", serial=True) + + failover_exist = False + for i in params.get("nics").split(): + nic_params = params.object_params(i) + if nic_params.get("failover_pair_id"): + failover_exist = True + break + if failover_exist: + if params.get("os_type") == "linux": + session = vm.wait_for_serial_login(timeout=login_timeout) + ifname = utils_net.get_linux_ifname(session) + for i in ifname: + cmd = "ethtool -i %s |grep driver| awk -F': ' '{print $2}'" % i + driver = session.cmd_output(cmd).strip() + if driver == "net_failover": + session.cmd_output("dhclient -r && dhclient %s" % i) + break + if params.get("os_type") == "windows" and params.get("install_vioprot_cmd"): + media_type = params["virtio_win_media_type"] + driver_name = params["driver_name"] + session = vm.wait_for_login(nic_index=2, timeout=login_timeout) + for drv_name in driver_name.split(): + inf_path = win_driver_utils.get_driver_inf_path( + session, test, media_type, drv_name + ) + if drv_name == "netkvm": + device_name = params.get("device_name") + device_hwid = params.get("device_hwid") + devcon_path = utils_misc.set_winutils_letter( + session, params.get("devcon_path") + ) + status, output = session.cmd_status_output("dir %s" % devcon_path) + if status: + test.error("Not found devcon.exe, details: %s" % output) + + error_context.context( + "Uninstall %s driver" % driver_name, test.log.info + ) + win_driver_utils.uninstall_driver( + session, + test, + devcon_path, + driver_name, + device_name, + device_hwid, + ) + for hwid in device_hwid.split(): + install_driver_cmd = "%s install %s %s" % ( + devcon_path, + inf_path, + hwid, + ) + status, output = session.cmd_status_output( + install_driver_cmd, timeout=login_timeout + ) + if status: + test.fail( + "Failed to install driver '%s', " + "details:\n%s" % (driver_name, output) + ) + if driver_name == "VIOPROT": + test.log.info("Will install inf file found at '%s'", inf_path) + install_cmd = params.get("install_vioprot_cmd") % inf_path + status, output = session.cmd_status_output(install_cmd) + if status: + test.error("Install inf file failed, output=%s" % output) + session.cmd_output_safe("ipconfig /renew", timeout=login_timeout) + session.close() + else: + try: + vm.wait_for_serial_login( + timeout=login_timeout, restart_network=True + ).close() + except virt_vm.VMIPAddressMissingError: + pass + + if len(params.get("nics", "").split()) > 1: + session = vm.wait_for_login(nic_index=1, timeout=login_timeout) + else: + session = vm.wait_for_login(timeout=login_timeout) + + mac = vm.get_mac_address(0) + if params.get("os_type") == "linux": + ethname = utils_net.get_linux_ifname(session, mac) + queues = int(params.get("queues", 1)) + if queues > 1: + if params.get("os_type") == "linux": + session.cmd_status_output("ethtool -L %s combined %s" % (ethname, queues)) + else: + test.log.info("FIXME: support to enable MQ for Windows guest!") + + if params.get("server_private_ip") and params.get("os_type") == "linux": + server_ip = params.get("server_private_ip") + cmd = "systemctl stop NetworkManager.service" + cmd += " && ifconfig %s %s up" % (ethname, server_ip) + session.cmd_output(cmd) + else: + server_ip = vm.wait_for_get_address(0, timeout=90) + + if len(params.get("nics", "").split()) > 1: + server_ctl = vm.wait_for_login(nic_index=1, timeout=login_timeout) + server_ctl_ip = vm.wait_for_get_address(1, timeout=90) + else: + server_ctl = vm.wait_for_login(timeout=login_timeout) + server_ctl_ip = server_ip + + if params.get("rh_perf_envsetup_script"): + utils_test.service_setup(vm, session, test.virtdir) + session.close() + + test.log.debug( + process.system_output( + "numactl --hardware", verbose=False, ignore_status=True, shell=True + ).decode() + ) + test.log.debug( + process.system_output( + "numactl --show", verbose=False, ignore_status=True, shell=True + ).decode() + ) + # Pin guest vCPUs and vhost threads. + # Use the specified NUMA node if provided; otherwise, pin sequentially to host CPUs. + network_base.pin_vcpu_vhost_threads(vm, params.get("numa_node")) + + host = params.get("host", "localhost") + host_ip = host + if host != "localhost": + params_host = params.object_params("host") + host = remote.wait_for_login( + params_host.get("shell_client"), + host_ip, + params_host.get("shell_port"), + params_host.get("username"), + params_host.get("password"), + params_host.get("shell_prompt"), + ) + + client = params.get("client", "localhost") + client_ip = client + clients = [] + client_pub_ip = None + # client session 1 for control, session 2 for data communication + for i in range(2): + if client != "localhost" and params.get("os_type_client") == "linux": + client_pub_ip = params.get("client_public_ip") + tmp = remote.wait_for_login( + params.get("shell_client_client"), + client_pub_ip, + params.get("shell_port_client"), + params.get("username_client"), + params.get("password_client"), + params.get("shell_prompt_client"), + ) + cmd = "ifconfig %s %s up" % (params.get("client_physical_nic"), client_ip) + netperf_base.ssh_cmd(tmp, cmd) + else: + tmp = "localhost" + clients.append(tmp) + client = clients[0] + + error_context.context("Prepare env of server/client/host", test.log.info) + prepare_list = set([server_ctl, client, host]) + tag_dict = {server_ctl: "server", client: "client", host: "host"} + if client_pub_ip: + ip_dict = {server_ctl: server_ctl_ip, client: client_pub_ip, host: host_ip} + else: + ip_dict = {server_ctl: server_ctl_ip, client: client_ip, host: host_ip} + for i in prepare_list: + params_tmp = params.object_params(tag_dict[i]) + if params_tmp.get("os_type") == "linux": + shell_port = int(params_tmp["shell_port"]) + password = params_tmp["password"] + username = params_tmp["username"] + netperf_base.env_setup( + test, params, i, ip_dict[i], username, shell_port, password + ) + elif params_tmp.get("os_type") == "windows": + windows_disable_firewall = params.get("windows_disable_firewall") + netperf_base.ssh_cmd(i, windows_disable_firewall) + netperf_base.tweak_tuned_profile(params, server_ctl, client, host) + mtu = int(params.get("mtu", "1500")) + mtu_set(mtu) + + env.stop_ip_sniffing() + + try: + error_context.context("Start netperf testing", test.log.info) + start_test( + server_ip, + server_ctl, + host, + clients, + test.resultsdir, + test_duration=int(params.get("l")), + sessions_rr=params.get("sessions_rr"), + sessions=params.get("sessions"), + sizes_rr=params.get("sizes_rr"), + sizes=params.get("sizes"), + protocols=params.get("protocols"), + netserver_port=params.get("netserver_port", "12865"), + params=params, + test=test, + ) + + if params.get("log_hostinfo_script"): + src = os.path.join(test.virtdir, params.get("log_hostinfo_script")) + path = os.path.join(test.resultsdir, "systeminfo") + process.system_output( + "bash %s %s &> %s" % (src, test.resultsdir, path), shell=True + ) + + if params.get("log_guestinfo_script") and params.get("log_guestinfo_exec"): + src = os.path.join(test.virtdir, params.get("log_guestinfo_script")) + path = os.path.join(test.resultsdir, "systeminfo") + destpath = params.get("log_guestinfo_path", "/tmp/log_guestinfo.sh") + vm.copy_files_to(src, destpath, nic_index=1) + logexec = params.get("log_guestinfo_exec", "bash") + output = server_ctl.cmd_output("%s %s" % (logexec, destpath)) + logfile = open(path, "a+") + logfile.write(output) + logfile.close() + finally: + if mtu != 1500: + mtu_default = 1500 + error_context.context( + "Change back server, client and host's mtu to %s" % mtu_default + ) + mtu_set(mtu_default) + if ( + params.get("client_physical_nic") + and params.get("os_type_client") == "linux" + ): + cmd = "ifconfig %s 0.0.0.0" % params.get("client_physical_nic") + netperf_base.ssh_cmd(client, cmd) + + +# FIXME: `test` should be a mandatory argument here +@error_context.context_aware +def start_test( + server, + server_ctl, + host, + clients, + resultsdir, + test_duration=60, + sessions_rr="50 100 250 500", + sessions="1 2 4", + sizes_rr="64 256 512 1024 2048", + sizes="64 256 512 1024 2048 4096", + protocols="TCP_STREAM TCP_MAERTS TCP_RR TCP_CRR", + netserver_port=None, + params=None, + test=None, +): + """ + Start to test with different kind of configurations + + :param server: netperf server ip for data connection + :param server_ctl: ip to control netperf server + :param host: localhost ip + :param clients: netperf clients' ip + :param resultsdir: directory to restore the results + :param test_duration: test duration + :param sessions_rr: sessions number list for RR test + :param sessions: sessions number list + :param sizes_rr: request/response sizes (TCP_RR, UDP_RR) + :param sizes: send size (TCP_STREAM, UDP_STREAM) + :param protocols: test type + :param netserver_port: netserver listen port + :param params: Dictionary with the test parameters. + """ + if params is None: + params = {} + + fd = open("%s/netperf-result.%s.RHS" % (resultsdir, time.time()), "w") + netperf_base.record_env_version(test, params, host, server_ctl, fd, test_duration) + + record_list = [ + "size", + "sessions", + "throughput", + "trans.rate", + "CPU", + "thr_per_CPU", + "rx_pkts", + "tx_pkts", + "rx_byts", + "tx_byts", + "re_pkts", + "exits", + "tpkt_per_exit", + ] + + for i in range(int(params.get("queues", 0))): + record_list.append("rx_intr_%s" % i) + record_list.append("rx_intr_sum") + for i in range(int(params.get("queues", 0))): + record_list.append("tx_intr_%s" % i) + record_list.append("tx_intr_sum") + base = params.get("format_base", "12") + fbase = params.get("format_fbase", "2") + + output = netperf_base.ssh_cmd(host, "mpstat 1 1 |grep CPU") + mpstat_head = re.findall(r"CPU\s+.*", output)[0].split() + mpstat_key = params.get("mpstat_key", "%idle") + if mpstat_key in mpstat_head: + mpstat_index = mpstat_head.index(mpstat_key) + 1 + else: + mpstat_index = 0 + + for protocol in protocols.split(): + error_context.context("Testing %s protocol" % protocol, test.log.info) + protocol_log = "" + if protocol in ("TCP_RR", "TCP_CRR"): + sessions_test = sessions_rr.split() + sizes_test = sizes_rr.split() + protocol_log = protocol + else: + sessions_test = sessions.split() + sizes_test = sizes.split() + if protocol == "TCP_STREAM": + protocol_log = protocol + " (RX)" + elif protocol == "TCP_MAERTS": + protocol_log = protocol + " (TX)" + fd.write("Category:" + protocol_log + "\n") + + record_header = True + for i in sizes_test: + for j in sessions_test: + if protocol in ("TCP_RR", "TCP_CRR"): + nf_args = "-t %s -v 1 -- -r %s,%s" % (protocol, i, i) + elif protocol == "TCP_MAERTS": + nf_args = "-C -c -t %s -- -m ,%s" % (protocol, i) + else: + nf_args = "-C -c -t %s -- -m %s" % (protocol, i) + + ret = launch_client( + j, + server, + server_ctl, + host, + clients, + test_duration, + nf_args, + netserver_port, + params, + test, + ) + if ret: + thu = float(ret["thu"]) + cpu = 100 - float(ret["mpstat"].split()[mpstat_index]) + normal = thu / cpu + if ret.get("tx_pkt") and ret.get("exits"): + ret["tpkt_per_exit"] = float(ret["tx_pkts"]) / float( + ret["exits"] + ) + + ret["size"] = int(i) + ret["sessions"] = int(j) + if protocol in ("TCP_RR", "TCP_CRR"): + ret["trans.rate"] = thu + else: + ret["throughput"] = thu + ret["CPU"] = cpu + ret["thr_per_CPU"] = normal + row, key_list = netperf_base.netperf_record( + ret, record_list, header=record_header, base=base, fbase=fbase + ) + category = "" + if record_header: + record_header = False + category = row.split("\n")[0] + + test.write_test_keyval({"category": category}) + prefix = "%s--%s--%s" % (protocol, i, j) + for key in key_list: + test.write_test_keyval({"%s--%s" % (prefix, key): ret[key]}) + + test.log.info(row) + fd.write(row + "\n") + + fd.flush() + + test.log.debug("Remove temporary files") + process.system_output( + "rm -f /tmp/netperf.%s.nf" % ret["pid"], + verbose=False, + ignore_status=True, + shell=True, + ) + test.log.info("Netperf thread completed successfully") + else: + test.log.debug( + "Not all netperf clients start to work, please enlarge" + " '%s' number or skip this tests", + int(j), + ) + continue + fd.close() + + +@error_context.context_aware +def launch_client( + sessions, + server, + server_ctl, + host, + clients, + l, + nf_args, + port, + params, + test, +): + """Launch netperf clients""" + + netperf_version = params.get("netperf_version", "2.6.0") + client_path = "/tmp/netperf-%s/src/netperf" % netperf_version + server_path = "/tmp/netperf-%s/src/netserver" % netperf_version + get_status_flag = params.get("get_status_in_guest", "no") == "yes" + global _netserver_started + # Start netserver + if _netserver_started: + test.log.debug("Netserver already started.") + else: + error_context.context("Start Netserver on guest", test.log.info) + if params.get("os_type") == "windows": + timeout = float(params.get("timeout", "240")) + cdrom_drv = utils_misc.get_winutils_vol(server_ctl) + start_session = server_ctl + netserv_start_cmd = params.get("netserv_start_cmd") % cdrom_drv + test.log.info( + "Start netserver on windows guest, cmd is: %s", netserv_start_cmd + ) + start_netserver_win(start_session, netserv_start_cmd, test) + else: + test.log.info("Netserver start cmd is '%s'", server_path) + netperf_base.ssh_cmd(server_ctl, "pidof netserver || %s" % server_path) + ncpu = netperf_base.ssh_cmd( + server_ctl, "cat /proc/cpuinfo |grep processor |wc -l" + ) + ncpu = re.findall(r"\d+", ncpu)[-1] + + test.log.info("Netserver start successfully") + + def count_interrupt(name): + """ + Get a list of interrut number for each queue + + @param name: the name of interrupt, such as "virtio0-input" + """ + sum = 0 + intr = [] + stat = netperf_base.ssh_cmd(server_ctl, "grep %s /proc/interrupts" % name) + for i in stat.strip().split("\n"): + for cpu in range(int(ncpu)): + sum += int(i.split()[cpu + 1]) + intr.append(sum) + sum = 0 + return intr + + def get_state(): + ifname = None + for i in netperf_base.ssh_cmd(server_ctl, "ifconfig").split("\n\n"): + if server in i: + ifname = re.findall(r"(\w+\d+)[:\s]", i)[0] + if ifname is None: + raise RuntimeError(f"no available iface associated with {server}") + + path = "find /sys/devices|grep net/%s/statistics" % ifname + cmd = ( + "%s/rx_packets|xargs cat;%s/tx_packets|xargs cat;" + "%s/rx_bytes|xargs cat;%s/tx_bytes|xargs cat" % (path, path, path, path) + ) + output = netperf_base.ssh_cmd(server_ctl, cmd).split()[-4:] + + nrx = int(output[0]) + ntx = int(output[1]) + nrxb = int(output[2]) + ntxb = int(output[3]) + + nre = int( + netperf_base.ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1").split()[ + 12 + ] + ) + state_list = [ + "rx_pkts", + nrx, + "tx_pkts", + ntx, + "rx_byts", + nrxb, + "tx_byts", + ntxb, + "re_pkts", + nre, + ] + try: + nrx_intr = count_interrupt("virtio.-input") + ntx_intr = count_interrupt("virtio.-output") + sum = 0 + for i in range(len(nrx_intr)): + state_list.append("rx_intr_%s" % i) + state_list.append(nrx_intr[i]) + sum += nrx_intr[i] + state_list.append("rx_intr_sum") + state_list.append(sum) + + sum = 0 + for i in range(len(ntx_intr)): + state_list.append("tx_intr_%s" % i) + state_list.append(ntx_intr[i]) + sum += ntx_intr[i] + state_list.append("tx_intr_sum") + state_list.append(sum) + + except IndexError: + ninit = count_interrupt("virtio.") + state_list.append("intr") + state_list.append(ninit) + + exits = int(netperf_base.ssh_cmd(host, "cat /sys/kernel/debug/kvm/exits")) + state_list.append("exits") + state_list.append(exits) + + return state_list + + def thread_cmd(params, i, numa_enable, client_s, timeout): + fname = "/tmp/netperf.%s.nf" % pid + option = "`command -v python python3 | head -1 ` " + option += "/tmp/netperf_agent.py %d %s -D 1 -H %s -l %s %s" % ( + i, + client_path, + server, + int(l) * 1.5, + nf_args, + ) + option += " >> %s" % fname + netperf_base.netperf_thread(params, numa_enable, client_s, option, fname) + + def all_clients_up(): + try: + content = netperf_base.ssh_cmd(clients[-1], "cat %s" % fname) + except: + content = "" + return False + if int(sessions) == len(re.findall("MIGRATE", content)): + return True + return False + + def stop_netperf_clients(): + if params.get("os_type_client") == "linux": + netperf_base.ssh_cmd( + clients[-1], params.get("client_kill_linux"), ignore_status=True + ) + else: + netperf_base.ssh_cmd( + clients[-1], params.get("client_kill_windows"), ignore_status=True + ) + + def parse_demo_result(fname, sessions): + """ + Process the demo result, remove the noise from head, + and compute the final throughout. + + :param fname: result file name + :param sessions: sessions' number + """ + fd = open(fname) + lines = fd.readlines() + fd.close() + + for i in range(1, len(lines) + 1): + if "AF_INET" in lines[-i]: + break + nresult = i - 1 + if nresult < int(sessions): + test.error( + "We couldn't expect this parallism, expect %s get %s" + % (sessions, nresult) + ) + + niteration = nresult // sessions + result = 0.0 + for this in lines[-sessions * niteration :]: + if "Interim" in this: + result += float(re.findall(r"Interim result: *(\S+)", this)[0]) + result = result / niteration + test.log.debug("niteration: %s", niteration) + return result + + tries = int(params.get("tries", 1)) + while tries > 0: + error_context.context("Start netperf client threads", test.log.info) + pid = str(os.getpid()) + fname = "/tmp/netperf.%s.nf" % pid + netperf_base.ssh_cmd(clients[-1], "rm -f %s" % fname) + numa_enable = params.get("netperf_with_numa", "yes") == "yes" + timeout_netperf_start = int(l) * 0.5 + client_thread = threading.Thread( + target=thread_cmd, + kwargs={ + "params": params, + "i": int(sessions), + "numa_enable": numa_enable, + "client_s": clients[0], + "timeout": timeout_netperf_start, + }, + ) + client_thread.start() + + ret = {} + ret["pid"] = pid + + if utils_misc.wait_for( + all_clients_up, + timeout_netperf_start, + 0.0, + 0.2, + "Wait until all netperf clients start to work", + ): + test.log.debug("All netperf clients start to work.") + + # real & effective test starts + if get_status_flag: + start_state = get_state() + ret["mpstat"] = netperf_base.ssh_cmd( + host, "mpstat 1 %d |tail -n 1" % (l - 1) + ) + finished_result = netperf_base.ssh_cmd(clients[-1], "cat %s" % fname) + + # stop netperf clients + stop_netperf_clients() + + # real & effective test ends + if get_status_flag: + end_state = get_state() + if len(start_state) != len(end_state): + msg = "Initial state not match end state:\n" + msg += " start state: %s\n" % start_state + msg += " end state: %s\n" % end_state + test.log.warning(msg) + else: + for i in range(len(end_state) // 2): + ret[end_state[i * 2]] = ( + end_state[i * 2 + 1] - start_state[i * 2 + 1] + ) + + client_thread.join() + + error_context.context("Testing Results Treatment and Report", test.log.info) + f = open(fname, "w") + f.write(finished_result) + f.close() + ret["thu"] = parse_demo_result(fname, int(sessions)) + return ret + break + else: + stop_netperf_clients() + tries = tries - 1 + test.log.debug("left %s times", tries) diff --git a/provider/virtual_network/netperf_base.py b/provider/virtual_network/netperf_base.py new file mode 100644 index 0000000000..15df464e1c --- /dev/null +++ b/provider/virtual_network/netperf_base.py @@ -0,0 +1,147 @@ +import logging +import os + +import six +from avocado.utils import process +from virttest import data_dir, error_context, remote, utils_misc, utils_test + +LOG_JOB = logging.getLogger("avocado.test") + + + +@error_context.context_aware +def record_env_version(test, params, host, server_ctl, fd, test_duration): + """ + Get host kernel/qemu/guest kernel version + + """ + ver_cmd = params.get("ver_cmd", "rpm -q qemu-kvm") + guest_ver_cmd = params.get("guest_ver_cmd", "uname -r") + + test.write_test_keyval({"kvm-userspace-ver": ssh_cmd(host, ver_cmd).strip()}) + test.write_test_keyval( + {"guest-kernel-ver": ssh_cmd(server_ctl, guest_ver_cmd).strip()} + ) + test.write_test_keyval({"session-length": test_duration}) + fd.write("### kvm-userspace-ver : %s\n" % ssh_cmd(host, ver_cmd).strip()) + fd.write("### guest-kernel-ver : %s\n" % ssh_cmd(server_ctl, guest_ver_cmd).strip()) + fd.write("### kvm_version : %s\n" % os.uname()[2]) + fd.write("### session-length : %s\n" % test_duration) + + +def env_setup(test, params, session, ip, username, shell_port, password): + """ + Prepare the test environment in server/client/host + + """ + error_context.context("Setup env for %s" % ip) + if params.get("env_setup_cmd"): + ssh_cmd(session, params.get("env_setup_cmd"), ignore_status=True) + + pkg = params["netperf_pkg"] + pkg = os.path.join(data_dir.get_deps_dir(), pkg) + remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp") + ssh_cmd(session, params.get("setup_cmd")) + + agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py") + remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp") + + +def tweak_tuned_profile(params, server_ctl, client, host): + """ + + Tweak configuration with truned profile + + """ + + client_tuned_profile = params.get("client_tuned_profile") + server_tuned_profile = params.get("server_tuned_profile") + host_tuned_profile = params.get("host_tuned_profile") + error_context.context("Changing tune profile of guest", LOG_JOB.info) + if server_tuned_profile: + ssh_cmd(server_ctl, server_tuned_profile) + + error_context.context("Changing tune profile of client/host", LOG_JOB.info) + if client_tuned_profile: + ssh_cmd(client, client_tuned_profile) + if host_tuned_profile: + ssh_cmd(host, host_tuned_profile) + + +def ssh_cmd(session, cmd, timeout=120, ignore_status=False): + """ + Execute remote command and return the output + + :param session: a remote shell session or tag for localhost + :param cmd: executed command + :param timeout: timeout for the command + """ + if session == "localhost": + o = process.system_output( + cmd, timeout=timeout, ignore_status=ignore_status, shell=True + ).decode() + else: + o = session.cmd(cmd, timeout=timeout, ignore_all_errors=ignore_status) + return o + + +def netperf_thread(params, numa_enable, client_s, option, fname): + """ + Start netperf thread on client + + """ + cmd = "" + if numa_enable: + n = abs(int(params.get("numa_node"))) - 1 + cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n) + cmd += option + cmd += " >> %s" % fname + LOG_JOB.info("Start netperf thread by cmd '%s'", cmd) + ssh_cmd(client_s, cmd) + + +def format_result(result, base="17", fbase="2"): + """ + Format the result to a fixed length string. + + :param result: result need to convert + :param base: the length of converted string + :param fbase: the decimal digit for float + """ + if isinstance(result, six.string_types): + value = "%" + base + "s" + elif isinstance(result, int): + value = "%" + base + "d" + elif isinstance(result, float): + value = "%" + base + "." + fbase + "f" + else: + raise TypeError(f"unexpected result type: {type(result).__name__}") + return value % result + + +def netperf_record(results, filter_list, header=False, base="17", fbase="2"): + """ + Record the results in a certain format. + + :param results: a dict include the results for the variables + :param filter_list: variable list which is wanted to be shown in the + record file, /also fix the order of variables + :param header: if record the variables as a column name before the results + :param base: the length of a variable + :param fbase: the decimal digit for float + """ + key_list = [] + for key in filter_list: + if key in results: + key_list.append(key) + + record = "" + if header: + for key in key_list: + record += "%s|" % format_result(key, base=base, fbase=fbase) + record = record.rstrip("|") + record += "\n" + for key in key_list: + record += "%s|" % format_result(results[key], base=base, fbase=fbase) + record = record.rstrip("|") + return record, key_list diff --git a/provider/virtual_network/network_base.py b/provider/virtual_network/network_base.py index 39c54c2ae5..12bdb0ad8b 100644 --- a/provider/virtual_network/network_base.py +++ b/provider/virtual_network/network_base.py @@ -575,3 +575,100 @@ def set_guest_iface_mtu(test, vm_session, iface_name='', mtu=1500): status, output = vm_session.cmd_status_output(cmd) if status: test.fail("Fail to set mtu on guest interface: %s." % output) + + +def pin_vcpus(vm, cpus): + """ + Pin all vcpus of a VM to the given CPU list. + + :param vm: libvirt vm object + :param cpus: sorted list of host cpu ids in the node node + :return: number of vcpus pinned + """ + + dominfo = vm.dominfo() + vcpu_count = int(dominfo.get("CPU(s)", 0)) + + for vcpu in range(vcpu_count): + cpu = cpus[vcpu] + vm.vcpupin(vcpu, str(cpu)) + LOG.info("Pinned %d vcpu of vm %s to host cpu %s", + vcpu, vm.name, cpu) + return vcpu_count + + +def get_vhost_tids(vm): + """ + Get vhost thread IDs for a VM. + + Compatible with both environments: + - Old: vhost threads are qemu user threads + - New: vhost threads are kernel threads named 'vhost-' + + :param vm: libvirt vm object + :return: list of vhost thread ids + """ + pid = vm.get_pid() + if not pid: + LOG.warning("Cannot get QEMU PID for VM %s", vm.name) + return [] + + ps_cmd = ( + "(ps -L -p {} -o tid,comm | awk '$2~/^vhost/{{print $1; found=1}} END{{exit !found}}') || " + "(ps -eLo pid,comm | awk '$2==\"vhost-{}\"{{print $1}}')" + ).format(pid, pid) + + out = process.system_output(ps_cmd, shell=True, ignore_status=True).decode() + tids = [int(t) for t in out.splitlines() if t.isdigit()] + LOG.info("Detected vhost threads for VM %s (pid %s): %s", vm.name, pid, tids or "none") + return tids + + +def pin_vhost_threads(vm, vhost_tids, cpus): + """ + Pin vhost threads to specified host cpus. + + :param vm: libvirt vm object + :param vhost_tids: list of vhost thread ids + :param cpus: list of host cpu ids + """ + for tid, cpu in zip(vhost_tids, cpus): + cmd = "taskset -pc %s %s" % (cpu, tid) + process.run(cmd, shell=True) + LOG.info( + "Pinned vhost thread %s of vm %s to cpu %s", + tid, vm.name, cpu + ) + + +def pin_vcpu_vhost_threads(vm, node=None): + """ + Pin VM vCPUs and vhost threads to host CPUs. + + If a numa node is provided, pin to CPUs in that node. + If node is None, pin sequentially to all available CPUs. + + :param vm: libvirt VM object + :param node: NUMA node ID or None + """ + try: + if node is not None: + node = utils_misc.NumaNode(int(node)) + cpus = sorted(node.cpus) + else: + cpus = list(range(os.cpu_count())) + # pin vCPUs + vcpu_count = pin_vcpus(vm, cpus) + + # handle vhost pinning + vhost_tids = get_vhost_tids(vm) + if not vhost_tids: + LOG.info("No vhost threads detected for VM %s, skip vhost pinning", vm.name) + elif vcpu_count + len(vhost_tids) > len(cpus): + LOG.info("Skip vhost pinning for VM %s: insufficient CPUs", + vm.name) + else: + vhost_cpus = cpus[vcpu_count : vcpu_count + len(vhost_tids)] + pin_vhost_threads(vm, vhost_tids, vhost_cpus) + except Exception as e: + raise exceptions.TestError("Failed to pin VM threads: %s" % e) diff --git a/provider/virtual_network/vdpa_utils.py b/provider/virtual_network/vdpa_utils.py new file mode 100644 index 0000000000..6cc222854e --- /dev/null +++ b/provider/virtual_network/vdpa_utils.py @@ -0,0 +1,132 @@ +import logging + +from avocado.utils import process +from virttest import openvswitch, utils_net + +LOG_JOB = logging.getLogger("avocado.test") + + +def check_ovs_status(): + """ + Check if ovs-vsctl and openvswitch service are installed and running. + :return: True if both are available and running, otherwise False + :rtype: bool + """ + cmd = "which ovs-vsctl && systemctl status openvswitch.service" + return process.system(cmd, ignore_status=True, shell=True) == 0 + + +def get_vf_pci_address(nic_netdst): + """ + Get vf pci address from a given network destination. + + :param nic_netdst: Network destination address + :type nic_netdst: str + + :return: VF pci address + :rtype: str + """ + cmd = ( + "vdpa dev show | grep {0} | grep -o 'pci/[^[:space:]]*' | " + "awk -F/ '{{print $2}}'" + ).format(nic_netdst) + return process.system_output(cmd, shell=True).decode().strip() + + +def get_pf_pci_address(vf_pci): + """ + Get pf pci address using vf pci address. + + :param vf_pci: VF pci address + :type vf_pci: str + + :return: VF pci address + :rtype: str + """ + cmd = ( + "grep PCI_SLOT_NAME /sys/bus/pci/devices/{0}/physfn/uevent | cut -d'=' -f2" + ).format(vf_pci) + return process.system_output(cmd, shell=True).decode().strip() + + +def get_pf_port(pf_pci): + """ + Get the port for the pf pci address. + + :param pf_pci: PF pci address + :type pf_pci: str + + :return: Port name + :rtype: str + """ + cmd = "ls /sys/bus/pci/devices/{0}/net/ | head -n 1".format(pf_pci) + return process.system_output(cmd, shell=True).decode().strip() + + +def add_flows_to_ovs_bridge(bridge, ovs): + """ + Add flow rules to the given ovs bridge. + + :parma bridge: OVS bridge name + :type bridge: str + :param ovs: OVS instance + :type ovs: OpenVSwitch + """ + utils_net.openflow_manager( + bridge, "add-flow", flow_options="in_port=1,idle_timeout=0,actions=output:2" + ) + utils_net.openflow_manager( + bridge, "add-flow", flow_options="in_port=2,idle_timeout=0,actions=output:1" + ) + utils_net.openflow_manager(bridge, "dump-flows") + + +class OVSHandler: + def __init__(self, vm): + self.vm = vm + if check_ovs_status(): + self.ovs = openvswitch.OpenVSwitchControl() + else: + self.ovs = None + + def get_vdpa_ovs_info(self, add_flows=True, return_ports=True): + """ + Get OVS bridge and port information. + + :param add_flows: Whether to add flows rules to the ovs bridge + :type add_flows: bool + :param return_ports: Whether to return port names + :type return_ports: bool + + :return: list of target interfaces(bridges and ports) if return_port is Ture, + else empty list + :rtype: list + """ + if not self.ovs: + LOG_JOB.error("Could not find existing Open vSwitch service") + return [] + + target_ifaces = [] + + for nic in self.vm.virtnet: + ovs_br = None + if nic.nettype == "vdpa": + vf_pci = get_vf_pci_address(nic.netdst) + pf_pci = get_pf_pci_address(vf_pci) + port = get_pf_port(pf_pci) + manager, ovs_br = utils_net.find_current_bridge(port) + else: + try: + manager, ovs_br = utils_net.find_current_bridge(nic.netdst) + except NotImplementedError: + ovs_br = None + if ovs_br: + if add_flows: + add_flows_to_ovs_bridge(ovs_br, self.ovs) + if return_ports: + if manager: + ports = set(manager.list_ports(ovs_br)) + target_ifaces.extend(ports) + target_ifaces.append(ovs_br) + + return target_ifaces diff --git a/provider/windows/win_driver_utils.py b/provider/windows/win_driver_utils.py new file mode 100644 index 0000000000..7d6e4441ee --- /dev/null +++ b/provider/windows/win_driver_utils.py @@ -0,0 +1,458 @@ +""" +windows driver utility functions. + +:copyright: Red Hat Inc. +""" + +import logging +import os +import re +import time + +import aexpect +from virttest import data_dir, error_context, utils_misc, utils_test +from virttest.utils_version import VersionInterval +from virttest.utils_windows import system, virtio_win, wmic + +LOG_JOB = logging.getLogger("avocado.test") + + +QUERY_TIMEOUT = 360 +INSTALL_TIMEOUT = 360 +OPERATION_TIMEOUT = 120 + +driver_info_dict = { + "netkvm": { + "hwid": '"PCI\\VEN_1AF4&DEV_1000" "PCI\\VEN_1AF4&DEV_1041"', + "device_name": "Red Hat VirtIO Ethernet Adapter", + }, + "viorng": { + "hwid": '"PCI\\VEN_1AF4&DEV_1005" "PCI\\VEN_1AF4&DEV_1044"', + "device_name": "VirtIO RNG Device", + }, + "vioser": { + "hwid": '"PCI\\VEN_1AF4&DEV_1003" "PCI\\VEN_1AF4&DEV_1043"', + "device_name": "VirtIO Serial Driver", + }, + "balloon": { + "hwid": '"PCI\\VEN_1AF4&DEV_1002" "PCI\\VEN_1AF4&DEV_1045"', + "device_name": "VirtIO Balloon Driver", + }, + "pvpanic": {"hwid": '"ACPI\\QEMU0001"', "device_name": "QEMU PVPanic Device"}, + "vioinput": { + "hwid": '"PCI\\VEN_1AF4&DEV_1052"', + "device_name": "VirtIO Input Driver", + }, + "viofs": {"hwid": '"PCI\\VEN_1AF4&DEV_105A"', "device_name": "VirtIO FS Device"}, + "viostor": { + "hwid": '"PCI\\VEN_1AF4&DEV_1001" "PCI\\VEN_1AF4&DEV_1042"', + "device_name": "Red Hat VirtIO SCSI controller", + }, + "vioscsi": { + "hwid": '"PCI\\VEN_1AF4&DEV_1004" "PCI\\VEN_1AF4&DEV_1048"', + "device_name": "Red Hat VirtIO SCSI pass-through controller", + }, + "fwcfg": { + "hwid": '"ACPI\\VEN_QEMU&DEV_0002" "ACPI\\QEMU0002"', + "device_name": "QEMU FWCfg Device", + }, + "viomem": { + "hwid": r'"PCI\VEN_1AF4&DEV_1002" "PCI\VEN_1AF4&DEV_1058"', + "device_name": "VirtIO Viomem Driver", + }, +} + + +def _pnpdrv_info(session, name_pattern, props=None): + """Get the driver props eg: InfName""" + cmd = wmic.make_query( + "path win32_pnpsigneddriver", + "DeviceName like '%s'" % name_pattern, + props=props, + get_swch=wmic.FMT_TYPE_LIST, + ) + return wmic.parse_list(session.cmd(cmd, timeout=QUERY_TIMEOUT)) + + +def uninstall_driver(session, test, devcon_path, driver_name, device_name, device_hwid): + """ + Uninstall driver. + + :param session: The guest session object. + :param test: kvm test object + :param devcon_path: devcon.exe path. + :param driver_name: driver name. + :param device_name: device name. + :param device_hwid: device hardware id. + """ + devcon_path = utils_misc.set_winutils_letter(session, devcon_path) + status, output = session.cmd_status_output( + "dir %s" % devcon_path, timeout=OPERATION_TIMEOUT + ) + if status: + test.error("Not found devcon.exe, details: %s" % output) + LOG_JOB.info("Uninstalling previous installed driver") + # find the inf name and remove the repeated one + inf_list_all = _pnpdrv_info(session, device_name, ["InfName"]) + inf_list = list(set(inf_list_all)) + + # pnputil flags available starting in Windows 10, + # version 1607, build 14393 later + build_ver = system.version(session).split(".")[2] + if int(build_ver) > 14393: + uninst_store_cmd = "pnputil /delete-driver %s /uninstall /force" % inf_list[0] + else: + uninst_store_cmd = "pnputil /f /d %s" % inf_list[0] + status, output = session.cmd_status_output(uninst_store_cmd, INSTALL_TIMEOUT) + # for viostor and vioscsi, they need system reboot + # acceptable status: OK(0), REBOOT(3010) + if status not in (0, 3010): + test.error( + "Failed to uninstall driver '%s' from store, " + "details:\n%s" % (driver_name, output) + ) + uninst_cmd = "%s remove %s" % (devcon_path, device_hwid) + status, output = session.cmd_status_output(uninst_cmd, INSTALL_TIMEOUT) + # acceptable status: OK(0), REBOOT(1) + if status > 1: + test.error( + "Failed to uninstall driver '%s', details:\n%s" % (driver_name, output) + ) + + +def get_driver_inf_path(session, test, media_type, driver_name): + """ + Get driver inf path from virtio win iso,such as E:\viofs\2k19\amd64. + + :param session: The guest session object. + :param test: kvm test object. + :param media_type: media type. + :param driver_name: driver name. + """ + try: + get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) + get_product_dirname = getattr(virtio_win, "product_dirname_%s" % media_type) + get_arch_dirname = getattr(virtio_win, "arch_dirname_%s" % media_type) + except AttributeError: + test.error("Not supported virtio win media type '%s'" % media_type) + viowin_ltr = get_drive_letter(session) + if not viowin_ltr: + test.error("Could not find virtio-win drive in guest") + guest_name = get_product_dirname(session) + if not guest_name: + test.error("Could not get product dirname of the vm") + guest_arch = get_arch_dirname(session) + if not guest_arch: + test.error("Could not get architecture dirname of the vm") + + inf_middle_path = ( + "{name}\\{arch}" if media_type == "iso" else "{arch}\\{name}" + ).format(name=guest_name, arch=guest_arch) + inf_find_cmd = 'dir /b /s %s\\%s.inf | findstr "\\%s\\\\"' + inf_find_cmd %= (viowin_ltr, driver_name, inf_middle_path) + inf_path = session.cmd(inf_find_cmd, timeout=OPERATION_TIMEOUT).strip() + LOG_JOB.info("Found inf file '%s'", inf_path) + return inf_path + + +@error_context.context_aware +def install_driver_by_virtio_media( + session, test, devcon_path, media_type, driver_name, device_hwid +): + """ + Install driver by virtio media. + + :param session: The guest session object. + :param test: kvm test object + :param devcon_path: devcon.exe path. + :param media_type: media type. + :param driver_name: driver name. + :param device_hwid: device hardware id. + """ + devcon_path = utils_misc.set_winutils_letter(session, devcon_path) + status, output = session.cmd_status_output( + "dir %s" % devcon_path, timeout=OPERATION_TIMEOUT + ) + if status: + test.error("Not found devcon.exe, details: %s" % output) + error_context.context("Installing target driver", LOG_JOB.info) + installed_any = False + for hwid in device_hwid.split(): + output = session.cmd_output("%s find %s" % (devcon_path, hwid)) + if re.search("No matching devices found", output, re.I): + continue + inf_path = get_driver_inf_path(session, test, media_type, driver_name) + inst_cmd = "%s updateni %s %s" % (devcon_path, inf_path, hwid) + status, output = session.cmd_status_output(inst_cmd, INSTALL_TIMEOUT) + # acceptable status: OK(0), REBOOT(1) + if status > 1: + test.fail( + "Failed to install driver '%s', details:\n%s" % (driver_name, output) + ) + installed_any |= True + if not installed_any: + test.error("Failed to find target devices by hwids: '%s'" % device_hwid) + + +def autoit_installer_check(params, session): + """ + Check if AUTOIT3.EXE is running. + :param params: the dict used for parameters + :param session: The guest session object. + :return: True if it is running. + """ + autoit_check_cmd = params.get( + "autoit_check_cmd", "tasklist |findstr /i autoit3_.*exe" + ) + try: + return session.cmd_status(autoit_check_cmd) == 0 + except ( + aexpect.ShellTimeoutError, + aexpect.ShellProcessTerminatedError, + aexpect.ShellStatusError, + ): + LOG_JOB.info("VM is rebooting...") + return False + + +def run_installer(vm, session, test, params, run_installer_cmd): + """ + Install/uninstall/repair virtio-win drivers and qxl,spice and + qemu-ga-win by installer. + If installer(virtio-win) version is in [1.9.37, 1.9.40] + then installer itself will restart vm for installation and + uninstallation function; otherwise there is no need to reboot guest. + While for repair function, installer itself always restart vm by + itself; + + :param vm: vm object. + :param session: The guest session object. + :param test: kvm test object + :param params: the dict used for parameters + :param run_installer_cmd: install/uninstall/repair cmd. + :return session: a new session after restart of installer + """ + cdrom_virtio = params["cdrom_virtio"] + installer_restart_version = params.get( + "installer_restart_version", "[1.9.37.0, 1.9.40.0]" + ) + cdrom_virtio_path = os.path.basename( + utils_misc.get_path(data_dir.get_data_dir(), cdrom_virtio) + ) + match = re.search(r"virtio-win-(\d+\.\d+(?:\.\d+)?-\d+)", cdrom_virtio_path) + cdrom_virtio_version = re.sub("-", ".", match.group(1)) + # run installer cmd + run_installer_cmd = utils_misc.set_winutils_letter(session, run_installer_cmd) + session.cmd(run_installer_cmd) + + if not utils_misc.wait_for( + lambda: not autoit_installer_check(params, session), 240, 2, 2 + ): + test.fail("Autoit exe stop there for 240s, please have a check.") + restart_con_ver = cdrom_virtio_version in VersionInterval(installer_restart_version) + restart_con_repair = "repair" in run_installer_cmd + if restart_con_ver or restart_con_repair: + # Wait for vm re-start by installer itself + if not utils_misc.wait_for(lambda: not session.is_responsive(), 120, 5, 5): + test.fail( + "The previous session still exists,seems that the vm doesn't restart." + ) + session = vm.wait_for_login(timeout=360) + # for the early virtio-win instller, rebooting is needed. + if cdrom_virtio_version in VersionInterval("(,1.9.37.0)"): + session = vm.reboot(session) + return session + + +def remove_driver_by_msi(session, vm, params): + """ + Remove virtio_win drivers by msi. + + :param session: The guest session object + :param vm: + :param params: the dict used for parameters + :return: a new session after restart os + """ + media_type = params.get("virtio_win_media_type", "iso") + get_drive_letter = getattr(virtio_win, "drive_letter_%s" % media_type) + drive_letter = get_drive_letter(session) + msi_path = drive_letter + "\\" + params["msi_name"] + msi_uninstall_cmd = params["msi_uninstall_cmd"] % msi_path + vm.send_key("meta_l-d") + # msi uninstall cmd will restart os. + session.cmd(msi_uninstall_cmd) + time.sleep(15) + return vm.wait_for_login(timeout=360) + + +def copy_file_to_samepath(session, test, params): + """ + Copy autoit scripts and installer tool to the same path. + + :param session: The guest session object. + :param test: kvm test object + :param params: the dict used for parameters + """ + LOG_JOB.info("Copy autoit scripts and virtio-win-guest-tools.exe to the same path.") + dst_path = r"C:\\" + vol_virtio_key = "VolumeName like '%virtio-win%'" + vol_virtio = utils_misc.get_win_disk_vol(session, vol_virtio_key) + + installer_path = r"%s:\%s" % (vol_virtio, "virtio-win-guest-tools.exe") + install_script_path = utils_misc.set_winutils_letter( + session, params["install_script_path"] + ) + repair_script_path = utils_misc.set_winutils_letter( + session, params["repair_script_path"] + ) + uninstall_script_path = utils_misc.set_winutils_letter( + session, params["uninstall_script_path"] + ) + src_files = [ + installer_path, + install_script_path, + repair_script_path, + uninstall_script_path, + ] + if params.get("msi_name"): + msi_path = r"%s:\%s" % (vol_virtio, params["msi_name"]) + uninstall_msi_script_path = utils_misc.set_winutils_letter( + session, params["uninstall_msi_script_path"] + ) + src_files.extend([msi_path, uninstall_msi_script_path]) + + for src_file in src_files: + copy_cmd = "xcopy %s %s /Y" % (src_file, dst_path) + status, output = session.cmd_status_output(copy_cmd) + if status != 0: + test.error("Copy file error, the detailed info:\n%s." % output) + + +def enable_driver(session, test, cmd): + """ + Enable driver. + + :param session: The guest session object + :param test: kvm test object + :param cmd: Driver enable cmd + """ + cmd = utils_misc.set_winutils_letter(session, cmd) + status, output = session.cmd_status_output(cmd) + if status != 0: + test.fail("failed to enable driver, %s" % output) + + +def disable_driver(session, vm, test, cmd): + """ + Disable driver. + + :param session: The guest session object + :param vm: vm object + :param test: kvm test object + :param cmd: Driver disable command + """ + cmd = utils_misc.set_winutils_letter(session, cmd) + status, output = session.cmd_status_output(cmd) + if status != 0: + if "reboot" in output: + session = vm.reboot(session) + else: + test.fail("failed to disable driver, %s" % output) + return session + + +def get_device_id(session, test, driver_name): + """ + Get device id from guest. + + :param session: The guest session object + :param test: kvm test object + :param driver_name: Driver name + """ + device_name = driver_info_dict[driver_name]["device_name"] + device_hwid = driver_info_dict[driver_name]["hwid"] + + output = _pnpdrv_info(session, device_name, ["DeviceID"]) + # workaround for viostor/vioscsi to get data device id + device_id = output[0] + device_id = "&".join(device_id.split("&")) + find_devices = False + for hwid in device_hwid.split(): + hwid = hwid.split('"')[1] + if hwid in device_id: + find_devices = True + if not find_devices: + test.fail("Didn't find driver info from guest %s" % output) + return device_id + + +def load_driver(session, test, params, load_method="enable"): + """ + Load driver. + + :param session: The guest session object + :param test: kvm test object + :param params: the dict used for parameters + :param load_method: Load driver method + """ + driver_name = params["driver_name"] + devcon_path = params["devcon_path"] + if load_method != "enable": + params.get("virtio_win_media_type", "iso") + + device_hwid = driver_info_dict[driver_name]["hwid"] + install_driver_by_virtio_media(session, test, devcon_path, device_hwid) + else: + device_id = get_device_id(session, test, driver_name) + cmd = '%s enable "@%s"' % (devcon_path, device_id) + enable_driver(session, test, cmd) + utils_test.qemu.windrv_verify_running(session, test, driver_name) + + +def unload_driver(session, vm, test, params, load_method="enable"): + """ + Unload driver. + + :param session: The guest session object + :param vm: vm object + :param test: kvm test object + :param params: the dict used for parameters + :param load_method: Load driver method + """ + driver_name = params["driver_name"] + devcon_path = params["devcon_path"] + if load_method != "enable": + device_name = driver_info_dict[driver_name]["device_name"] + device_hwid = driver_info_dict[driver_name]["hwid"] + uninstall_driver( + session, test, devcon_path, driver_name, device_name, device_hwid + ) + else: + device_id = get_device_id(session, test, driver_name) + cmd = '%s disable "@%s"' % (devcon_path, device_id) + session = disable_driver(session, vm, test, cmd) + return session + + +def memory_leak_check(vm, test, params, load_method="enable"): + """ + In order to let the driver verifier to catch memory leaks, driver + should be unloaded after driver function.Note that if want to use + this function, driver verifier should be enabled before driver function. + + :param vm: vm object + :param test: kvm test object + :param params: the dict used for parameters + :param load_method: Load driver method + """ + session = vm.wait_for_login() + session = unload_driver(session, vm, test, params, load_method) + time.sleep(10) + if vm.is_alive() is False: + test.fail( + "VM is not alive after uninstall driver,please check if it is a memory leak" + ) + if load_method != "enable": + session = vm.reboot(session) + load_driver(session, test, params, load_method) + session.close()