diff --git a/qemu/tests/cfg/hotplug_mem_migration.cfg b/qemu/tests/cfg/hotplug_mem_migration.cfg index 3c6b652df1..34fd6bbe21 100644 --- a/qemu/tests/cfg/hotplug_mem_migration.cfg +++ b/qemu/tests/cfg/hotplug_mem_migration.cfg @@ -32,6 +32,17 @@ numa_test = 'numactl -m %s dd if=/dev/urandom of=/tmp/numa_test/test ' numa_test += 'bs=1k count=%d && rm -rf /tmp/numa_test/' stress_args = '--cpu 4 --io 4 --vm 2 --vm-bytes 4096M' + take_regular_screendumps = no # FIXME: + store_vm_info = no # FIXME: + nodes = node1 node2 + node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + pools = p1 + vm_node = node1 + mig_dest_node = node2 + pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}] + image_pool_name_image1 = p1 + image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2 variants with_cache: - @default: - enable_dirty_ring: diff --git a/qemu/tests/cfg/migrate.cfg b/qemu/tests/cfg/migrate.cfg index 9699251ecd..4bc6a20e9f 100644 --- a/qemu/tests/cfg/migrate.cfg +++ b/qemu/tests/cfg/migrate.cfg @@ -14,6 +14,17 @@ # you can uncomment the following line to enable the state # check # vmstate_check = yes + take_regular_screendumps = no # FIXME: + store_vm_info = no # FIXME: + nodes = node1 node2 + node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + pools = p1 + vm_node = node1 + mig_dest_node = node2 + pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}] + image_pool_name_image1 = p1 + image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2 variants: - @default: - with_filter_off: diff --git a/qemu/tests/cfg/multi_host_basic.cfg b/qemu/tests/cfg/multi_host_basic.cfg new file mode 100644 index 0000000000..7db80b6c2a --- /dev/null +++ b/qemu/tests/cfg/multi_host_basic.cfg @@ -0,0 +1,70 @@ +- multi_host_basic: + virt_test_type = qemu + type = multi_host_basic + restart_vm = yes + kill_vm = yes + login_timeout = 240 + check_vm_needs_restart = no # FIXME: Work around for it + take_regular_screendumps = no + store_vm_info = no + variants: + - @default: + - with_hotplug: + variants: + - with_block: + hotplug_images = "stg0" + images += " ${hotplug_images}" + boot_drive_stg0 = no + image_name_stg0 = /mnt/images/storage0 + image_size_stg0 = 1G + remove_image_stg0 = yes + force_create_image_stg0 = yes + + - with_unplug: + variants: + - with_block: + unplug_images = "stg0" + images += " ${unplug_images}" + image_name_stg0 = /mnt/images/storage0 + image_size_stg0 = 1G + remove_image_stg0 = yes + force_create_image_stg0 = yes + + variants: + - @default: + reboot = yes + nodes = node1 node2 + vms = vm1 vm2 + node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + vm_node_vm1 = node1 + vm_node_vm2 = node2 + - with_pool: + reboot = yes + nodes = node1 node2 + vms = vm1 vm2 + node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + vm_node_vm1 = node1 + vm_node_vm2 = node2 + image_format = qcow2 + + pools = p1 p2 p3 + pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "filesystem"}, {"key": "access.nodes", "operator": "==", "values": ['node1']}] + pool_selectors_p2 = [{"key": "type", "operator": "==", "values": "filesystem"}, {"key": "access.nodes", "operator": "==", "values": ['node2']}] + pool_selectors_p3 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": "node2"}] + image_pool_name_image1_vm1 = p1 + image_pool_name_image1_vm2 = p2 + variants: + - @default: + - with_migration: + node_selectors_node1 = [{"key": "cpu_model_name", "operator": "eq", "values": "Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_model_name", "operator": "==", "values": "Intel(R) Xeon(R) Silver 4116 CPU @ 2.10GHz"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + reboot = no + vms = vm4 + vm_node_vm4 = node1 + mig_dest_node_vm4 = node2 + pool_selectors_p3 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}] + image_pool_name_image1_vm4 = p3 + image_name_image1_vm4 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2 + image_format_image1_vm4 = qcow2 diff --git a/qemu/tests/cfg/nic_hotplug.cfg b/qemu/tests/cfg/nic_hotplug.cfg index d62ecd8399..9a4d41ec66 100644 --- a/qemu/tests/cfg/nic_hotplug.cfg +++ b/qemu/tests/cfg/nic_hotplug.cfg @@ -87,6 +87,17 @@ requires_root = yes type = migration_after_nichotplug kill_vm = yes + take_regular_screendumps = no # FIXME: + store_vm_info = no # FIXME: + nodes = node1 node2 + node_selectors_node1 = [{"key": "cpu_vendor_id", "operator": "eq", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + node_selectors_node2 = [{"key": "cpu_vendor_id", "operator": "==", "values": "GenuineIntel"}, {"key": "hostname", "operator": "contains", "values": "redhat.com"}] + pools = p1 + vm_node = node1 + mig_dest_node = node2 + pool_selectors_p1 = [{"key": "type", "operator": "==", "values": "nfs"}, {"key": "access.nodes", "operator": "contains", "values": ["node1", "node2"]}] + image_pool_name_image1 = p1 + image_name_image1 = /mnt/images/rhel950-64-virtio-scsi-ovmf.qcow2 variants: - after_nichotplug: with_unplug = no diff --git a/qemu/tests/hotplug_mem_migration.py b/qemu/tests/hotplug_mem_migration.py index 12f7bb4f92..2b5bc620de 100644 --- a/qemu/tests/hotplug_mem_migration.py +++ b/qemu/tests/hotplug_mem_migration.py @@ -79,7 +79,8 @@ def _compare_mem_size(online_mem, expect_mem_size): # do migration mig_timeout = params.get_numeric("mig_timeout", 1200, float) mig_protocol = params.get("migration_protocol", "tcp") - vm.migrate(mig_timeout, mig_protocol, env=env) + dst_node = params.get("mig_dest_node") + vm.migrate(mig_timeout, mig_protocol, dest_host=dst_node, env=env) for target_mem in target_mems.split(): hotplug_test.unplug_memory(vm, target_mem) hotplug_test.check_memory(vm) diff --git a/qemu/tests/migration.py b/qemu/tests/migration.py index eb9aebd42b..bd6b6b9e6b 100644 --- a/qemu/tests/migration.py +++ b/qemu/tests/migration.py @@ -229,8 +229,10 @@ def guest_stress_deamon(): for func in pre_migrate: func(vm, params, test) if i % 2 == 0: + dst_node = params.get("mig_dest_node") test.log.info("Round %s ping...", str(i / 2)) else: + dst_node = params.get("vm_node") test.log.info("Round %s pong...", str(i / 2)) try: vm.migrate( @@ -239,6 +241,7 @@ def guest_stress_deamon(): mig_cancel_delay, offline, check, + dest_host=dst_node, migration_exec_cmd_src=mig_exec_cmd_src, migration_exec_cmd_dst=mig_exec_cmd_dst, migrate_capabilities=capabilities, diff --git a/qemu/tests/migration_after_nichotplug.py b/qemu/tests/migration_after_nichotplug.py index 64607fb896..a26da4dc7a 100644 --- a/qemu/tests/migration_after_nichotplug.py +++ b/qemu/tests/migration_after_nichotplug.py @@ -131,7 +131,8 @@ def check_nic_is_empty(): set_link(nic_name, up=True) error_context.context("Migrate from source VM to Destination VM", test.log.info) - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) + dst_node = params.get("mig_dest_node") + vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, dest_host=dst_node, env=env) if with_unplug: error_context.context( diff --git a/qemu/tests/migration_with_file_transfer.py b/qemu/tests/migration_with_file_transfer.py index 279a6b6b98..9379a59ef5 100644 --- a/qemu/tests/migration_with_file_transfer.py +++ b/qemu/tests/migration_with_file_transfer.py @@ -32,17 +32,18 @@ def run(test, params, env): host_path = "/tmp/file-%s" % utils_misc.generate_random_string(6) host_path_returned = "%s-returned" % host_path guest_path = params.get("guest_path", "/tmp/file") - file_size = params.get("file_size", "500") + file_size = params.get("file_size", "1000") transfer_timeout = int(params.get("transfer_timeout", "240")) if mig_protocol == "exec": mig_file = os.path.join( test.tmpdir, "tmp-%s" % utils_misc.generate_random_string(8) ) + cnt = 0 try: - process.run("dd if=/dev/urandom of=%s bs=1M count=%s" % (host_path, file_size)) + process.run("dd if=/dev/zero of=%s bs=1M count=%s" % (host_path, file_size)) - def run_and_migrate(bg): + def run_and_migrate(bg, cnt): bg.start() try: while bg.is_alive(): @@ -54,14 +55,20 @@ def run_and_migrate(bg): if mig_protocol == "exec" and migration_exec_cmd_src: migration_exec_cmd_src %= mig_file # pylint: disable=E0606 migration_exec_cmd_dst %= mig_file + if cnt % 2 == 0: + dest_host = params.get("mig_dest_node") + else: + dest_host = params.get("vm_node") vm.migrate( mig_timeout, mig_protocol, mig_cancel_delay, + dest_host=dest_host, env=env, migration_exec_cmd_src=migration_exec_cmd_src, migration_exec_cmd_dst=migration_exec_cmd_dst, ) + cnt += 1 except Exception: # If something bad happened in the main thread, ignore # exceptions raised in the background thread @@ -69,6 +76,7 @@ def run_and_migrate(bg): raise else: bg.join() + return cnt error_context.context( "transferring file to guest while migrating", test.log.info @@ -78,7 +86,7 @@ def run_and_migrate(bg): (host_path, guest_path), dict(verbose=True, timeout=transfer_timeout), ) - run_and_migrate(bg) + cnt = run_and_migrate(bg, cnt) error_context.context( "transferring file back to host while migrating", test.log.info @@ -88,7 +96,7 @@ def run_and_migrate(bg): (guest_path, host_path_returned), dict(verbose=True, timeout=transfer_timeout), ) - run_and_migrate(bg) + run_and_migrate(bg, cnt) # Make sure the returned file is identical to the original one error_context.context("comparing hashes", test.log.info) diff --git a/qemu/tests/migration_with_netperf.py b/qemu/tests/migration_with_netperf.py index f952052825..6cf734821b 100644 --- a/qemu/tests/migration_with_netperf.py +++ b/qemu/tests/migration_with_netperf.py @@ -124,10 +124,20 @@ def run(test, params, env): m_count = 0 while netperf_client_h.is_netperf_running(): m_count += 1 + if m_count % 2 == 0: + dest_host = params.get("vm_node") + else: + dest_host = params.get("mig_dest_node") error_context.context( "Start migration iterations: %s " % m_count, test.log.info ) - vm.migrate(mig_timeout, mig_protocol, mig_cancel_delay, env=env) + vm.migrate( + mig_timeout, + mig_protocol, + mig_cancel_delay, + dest_host=dest_host, + env=env, + ) finally: if netperf_server_g: if netperf_server_g.is_server_running(): diff --git a/qemu/tests/migration_with_reboot.py b/qemu/tests/migration_with_reboot.py index 47d5342ca9..0f0a1fe889 100644 --- a/qemu/tests/migration_with_reboot.py +++ b/qemu/tests/migration_with_reboot.py @@ -48,15 +48,22 @@ def run(test, params, env): bg = utils_misc.InterruptedThread( vm.reboot, kwargs={"session": session, "timeout": login_timeout} ) + bg.daemon = True bg.start() try: + cnt = 0 while bg.is_alive(): for func in pre_migrate: func(vm, params, test) + if cnt % 2 == 0: + dest_host = params.get("mig_dest_node") + else: + dest_host = params.get("vm_node") vm.migrate( mig_timeout, mig_protocol, mig_cancel_delay, + dest_host=dest_host, env=env, migration_exec_cmd_src=migration_exec_cmd_src, migration_exec_cmd_dst=migration_exec_cmd_dst, @@ -64,6 +71,7 @@ def run(test, params, env): # run some functions after migrate finish. for func in post_migrate: func(vm, params, test) + cnt += 1 except Exception: # If something bad happened in the main thread, ignore exceptions # raised in the background thread diff --git a/qemu/tests/migration_with_speed_measurement.py b/qemu/tests/migration_with_speed_measurement.py index 1cd06c1891..d4e8c6b14c 100644 --- a/qemu/tests/migration_with_speed_measurement.py +++ b/qemu/tests/migration_with_speed_measurement.py @@ -141,7 +141,10 @@ def get_migration_statistic(vm): time.sleep(2) clonevm = vm.migrate( - mig_timeout, mig_protocol, not_wait_for_migration=True, env=env + mig_timeout, mig_protocol, + not_wait_for_migration=True, + dest_host=params.get("mig_dest_node"), + env=env ) mig_speed = int(float(utils_misc.normalize_data_size(mig_speed, "M"))) diff --git a/qemu/tests/multi_host_basic.py b/qemu/tests/multi_host_basic.py new file mode 100644 index 0000000000..4222783ce1 --- /dev/null +++ b/qemu/tests/multi_host_basic.py @@ -0,0 +1,127 @@ +import logging + +from virttest import data_dir, error_context +from virttest.vt_vmm.api import vmm +from virttest.vt_vmm.utils.instance_spec import qemu_spec + +LOG = logging.getLogger("avocado." + __name__) + + +@error_context.context_aware +def run(test, params, env): + """ + :param test: QEMU test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + + def block_hotplug(vm, image_name): + """ + Hotplug disks and verify it in qtree. + + :param image_name: Image name of hotplug disk + :return: List of objects for hotplug disk. + """ + LOG.info("Hotplug the image: %s", image_name) + vm_params = vm.params + node = vmm.get_instance_node(vm.instance_id) + disk_spec = qemu_spec.define_disk_device_spec( + vm.name, vm_params, node.tag, image_name + ) + vmm.attach_instance_device(vm.instance_id, disk_spec) + blocks_info = vm.monitor.info("block") + LOG.info("After hotplug block, the qmp info of %s: %s", vm.name, blocks_info) + blk_info = session.cmd_output("lsblk") + LOG.info("After hotplug block, the block info of %s: %s", vm.name, blk_info) + + def block_unplug(vm, image_name): + """ + Hotplug disks and verify it in qtree. + + :param image_name: Image name of hotplug disk + :return: List of objects for hotplug disk. + """ + LOG.info("Unplug the image: %s", image_name) + vm_params = vm.params + node = vmm.get_instance_node(vm.instance_id) + disk_spec = qemu_spec.define_disk_device_spec( + vm.name, vm_params, node.tag, image_name + ) + vmm.detach_instance_device(vm.instance_id, disk_spec) + blocks_info = vm.monitor.info("block") + LOG.info("After unplug block, the qmp info of %s: %s", vm.name, blocks_info) + blk_info = session.cmd_output("lsblk") + LOG.info("After unplug block, the block info of %s: %s", vm.name, blk_info) + + timeout = float(params.get("login_timeout", 240)) + vms = env.get_all_vms() + for vm in vms: + error_context.context("Try to log into guest '%s'." % vm.name, test.log.info) + session = vm.wait_for_login(timeout=timeout, status_check=False) + vm_ver = session.cmd_output("cat /proc/version") + LOG.info("Version of %s: %s", vm.name, vm_ver) + cpus_info = vm.monitor.info("cpus", debug=False) + LOG.info("CPU info of %s: %s", vm.name, cpus_info) + blocks_info = vm.monitor.info("block") + LOG.info("Block info of %s: %s", vm.name, blocks_info) + + hotplug_images = params.get("hotplug_images") + if hotplug_images: + for img in hotplug_images.split(): + block_hotplug(vm, img) + + unplug_images = params.get("unplug_images") + if unplug_images: + for img in unplug_images.split(): + block_unplug(vm, img) + + if params.get("reboot") == "yes": + reboot_method = params.get("reboot_method", "shell") + session = vm.reboot(session, reboot_method, 0, timeout, True) + vm_info = session.cmd_output("uname -a") + LOG.info("Info %s: %s", vm.name, vm_info) + blocks_info = vm.monitor.info("block") + LOG.info("Block info of %s: %s", vm.name, blocks_info) + vm.pause() + vm.resume() + session.close() + + for vm in vms: + vm_params = params.object_params(vm.name) + src_node = vm_params.get("vm_node") + dst_node = vm_params.get("mig_dest_node") + if dst_node: + error_context.context( + f"Migrating the guest {vm.name} from {src_node} to {dst_node}", + test.log.info, + ) + vm.migrate( + timeout=3600, + protocol="tcp", + cancel_delay=None, + offline=False, + stable_check=False, + clean=True, + save_path=data_dir.get_tmp_dir(), + dest_host=dst_node, + remote_port=None, + not_wait_for_migration=False, + fd_src=None, + fd_dst=None, + migration_exec_cmd_src=None, + migration_exec_cmd_dst=None, + env=None, + migrate_capabilities=None, + mig_inner_funcs=None, + migrate_parameters=(None, None), + ) + + error_context.context( + "Try to log into guest '%s'." % vm.name, test.log.info + ) + session = vm.wait_for_login(timeout=timeout) + vm_ver = session.cmd_output("cat /proc/version") + LOG.info("Version of %s: %s", vm.name, vm_ver) + + cpus_info = vm.monitor.info("cpus", debug=False) + LOG.info("CPU info of %s: %s", vm.name, cpus_info)