qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v3 20/29] tests/functional: extend test_aarch64_virt with vul


From: Thomas Huth
Subject: Re: [PATCH v3 20/29] tests/functional: extend test_aarch64_virt with vulkan test
Date: Wed, 8 Jan 2025 08:03:28 +0100
User-agent: Mozilla Thunderbird

On 07/01/2025 17.51, Alex Bennée wrote:
Now we have virtio-gpu Vulkan support lets add a test for it.

I'm not a native speaker, but maybe rather:

Now that we have virtio-gpu Vulkan support, let's add a test for it.

?

...
diff --git a/tests/functional/test_aarch64_virt.py 
b/tests/functional/test_aarch64_virt.py
index 2d9995a95d..bb68048537 100755
--- a/tests/functional/test_aarch64_virt.py
+++ b/tests/functional/test_aarch64_virt.py
@@ -13,11 +13,14 @@
  import logging
  from subprocess import check_call, DEVNULL
+from qemu.machine.machine import VMLaunchFailure
+
+from qemu_test import BUILD_DIR
  from qemu_test import QemuSystemTest, Asset
-from qemu_test import exec_command_and_wait_for_pattern
+from qemu_test import exec_command, exec_command_and_wait_for_pattern
  from qemu_test import wait_for_console_pattern
  from qemu_test import get_qemu_img
-
+from qemu_test import skipIfMissingCommands, get_qemu_img

Daniel recently tried to standardize two empty lines between the import and class statements, so could you please keep the empty line here?

  class Aarch64VirtMachine(QemuSystemTest):
      KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 '
@@ -73,15 +76,16 @@ def common_aarch64_virt(self, machine):
          Common code to launch basic virt machine with kernel+initrd
          and a scratch disk.
          """
+        self.set_machine('virt')
+        self.require_accelerator("tcg")

Thanks for moving this to the beginning of the function!

          logger = logging.getLogger('aarch64_virt')
kernel_path = self.ASSET_KERNEL.fetch() - self.set_machine('virt')
          self.vm.set_console()
          kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
                                 'console=ttyAMA0')
-        self.require_accelerator("tcg")
          self.vm.add_args('-cpu', 'max,pauth-impdef=on',
                           '-machine', machine,
                           '-accel', 'tcg',
@@ -102,7 +106,9 @@ def common_aarch64_virt(self, machine):
# Add the device
          self.vm.add_args('-blockdev',
-                         
f"driver=qcow2,file.driver=file,file.filename={image_path},node-name=scratch")
+                         "driver=qcow2,file."
+                         "driver=file,file."

The line breaks look weird here. I'd rather start a new line after the comma, not after the dot...? (also the changes look rather unrelated to the patch subject?)

+                         f"filename={image_path},node-name=scratch")
          self.vm.add_args('-device',
                           'virtio-blk-device,drive=scratch')
@@ -131,5 +137,73 @@ def test_aarch64_virt_gicv2(self):
          self.common_aarch64_virt("virt,gic-version=2")
+ ASSET_VIRT_GPU_KERNEL = Asset(
+        'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+        'download?path=%2F&files='
+        'Image',
+        '89e5099d26166204cc5ca4bb6d1a11b92c217e1f82ec67e3ba363d09157462f6')
+
+    ASSET_VIRT_GPU_ROOTFS = Asset(
+        'https://fileserver.linaro.org/s/ce5jXBFinPxtEdx/'
+        'download?path=%2F&files='
+        'rootfs.ext4.zstd',
+        '792da7573f5dc2913ddb7c638151d4a6b2d028a4cb2afb38add513c1924bdad4')
+
+    @skipIfMissingCommands('zstd')
+    def test_aarch64_virt_with_gpu(self):
+        # This tests boots with a buildroot test image that contains
+        # vkmark and other GPU exercising tools. We run a headless
+        # weston that nevertheless still exercises the virtio-gpu
+        # backend.
+
+        self.set_machine('virt')
+        self.require_accelerator("tcg")
+
+        kernel_path = self.ASSET_VIRT_GPU_KERNEL.fetch()
+        image_path = self.uncompress(self.ASSET_VIRT_GPU_ROOTFS, format="zstd")
+
+        self.vm.set_console()
+        kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
+                               'console=ttyAMA0 root=/dev/vda')
+
+        self.vm.add_args("-accel", "tcg")
+        self.vm.add_args("-cpu", "neoverse-v1,pauth-impdef=on")
+        self.vm.add_args("-machine", "virt,gic-version=max",
+                         '-kernel', kernel_path,
+                         '-append', kernel_command_line)
+        self.vm.add_args("-smp", "2", "-m", "2048")
+        self.vm.add_args("-device",
+                         "virtio-gpu-gl-pci,hostmem=4G,blob=on,venus=on")
+        self.vm.add_args("-display", "egl-headless")
+        self.vm.add_args("-display", "dbus,gl=on")
+        self.vm.add_args("-device", "virtio-blk-device,drive=hd0")
+        self.vm.add_args("-blockdev",
+                         "driver=raw,file.driver=file,"
+                         "node-name=hd0,read-only=on,"
+                         f"file.filename={image_path}")
+        self.vm.add_args("-snapshot")
+
+        try:
+            self.vm.launch()
+        except VMLaunchFailure as excp:
+            if "old virglrenderer, blob resources unsupported" in excp.output:
+                self.skipTest("No blob support for virtio-gpu")
+            elif "old virglrenderer, venus unsupported" in excp.output:
+                self.skipTest("No venus support for virtio-gpu")
+            else:
+                self.log.info("unhandled launch failure: {excp.output}")
+                raise excp
+
+        self.wait_for_console_pattern('buildroot login:')
+        exec_command(self, 'root')
+        exec_command(self, 'export XDG_RUNTIME_DIR=/tmp')
+        exec_command_and_wait_for_pattern(self,
+                                          "weston -B headless "
+                                          "--renderer gl "
+                                          "--shell kiosk "
+                                          "-- vkmark -b:duration=1.0",
+                                          "vkmark Score")

The new test looks fine to me!

 Thomas




reply via email to

[Prev in Thread] Current Thread [Next in Thread]