2021-02-24 10:59:14

by Sumera Priyadarsini

[permalink] [raw]
Subject: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

Add a virtual hardware or vblank-less mode as a module to enable
VKMS to emulate virtual graphic drivers. This mode can be enabled
by setting enable_virtual_hw=1 at the time of loading VKMS.

A new function vkms_crtc_composer() has been added to bypass the
vblank mode and is called directly in the atomic hook in
vkms_atomic_begin(). However, some crc captures still use vblanks
which causes the crc-based igt tests to crash. Currently, I am unsure
about how to approach one-shot implementation of crc reads so I am
still working on that.

This patch has been tested with the igt tests, kms_writeback, kms_atomic,
kms_lease, kms_flip, kms_pipe_get_crc and preserves results except for
subtests related to crc reads and skips tests that rely on vertical
blanking. This patch must be tested after incorporating the
igt-tests patch: https://lists.freedesktop.org/archives/igt-dev/2021-February/029355.html .

The patch is based on Rodrigo Siqueira's
patch(https://patchwork.freedesktop.org/patch/316851/?series=48469&rev=3)
and the ensuing review.

Signed-off-by: Sumera Priyadarsini <[email protected]>
---
drivers/gpu/drm/vkms/vkms_composer.c | 46 ++++++++++++++++++++++++++++
drivers/gpu/drm/vkms/vkms_crtc.c | 17 ++++++++--
drivers/gpu/drm/vkms/vkms_drv.c | 18 ++++++++---
drivers/gpu/drm/vkms/vkms_drv.h | 2 ++
4 files changed, 75 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 66c6842d70db..7a8aaf5c5555 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -247,6 +247,52 @@ void vkms_composer_worker(struct work_struct *work)
drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
}

+void vkms_crtc_composer(struct vkms_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_composer *primary_composer = NULL;
+ struct vkms_composer *cursor_composer = NULL;
+ void *vaddr_out = NULL;
+ u32 crc32 = 0;
+ int ret;
+ bool wb_pending;
+
+ wb_pending = crtc_state->wb_pending;
+
+ if (crtc_state->num_active_planes >= 1)
+ primary_composer = crtc_state->active_planes[0]->composer;
+
+ if (crtc_state->num_active_planes == 2)
+ cursor_composer = crtc_state->active_planes[1]->composer;
+
+ if (!primary_composer)
+ return;
+
+ if (wb_pending)
+ vaddr_out = crtc_state->active_writeback;
+
+ ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
+ if (ret) {
+ if (ret == -EINVAL && !wb_pending)
+ kfree(vaddr_out);
+ return;
+ }
+
+ crc32 = compute_crc(vaddr_out, primary_composer);
+
+ if (wb_pending) {
+ drm_writeback_signal_completion(&out->wb_connector, 0);
+ spin_lock_irq(&out->composer_lock);
+ crtc_state->wb_pending = false;
+ spin_unlock_irq(&out->composer_lock);
+ } else {
+ kfree(vaddr_out);
+ }
+
+ drm_crtc_add_crc_entry(crtc, true, 0, &crc32);
+}
+
static const char * const pipe_crc_sources[] = {"auto"};

const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 6164349cdf11..38de791a4882 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -224,13 +224,19 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- drm_crtc_vblank_on(crtc);
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
+
+ if (!vkmsdev->config->virtual_hw)
+ drm_crtc_vblank_on(crtc);
}

static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- drm_crtc_vblank_off(crtc);
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
+
+ if (!vkmsdev->config->virtual_hw)
+ drm_crtc_vblank_off(crtc);
}

static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -248,8 +254,13 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc->state);
+ struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
+
+ if (vkmsdev->config->virtual_hw)
+ vkms_crtc_composer(vkms_state);

- if (crtc->state->event) {
+ if (crtc->state->event && !vkmsdev->config->virtual_hw) {
spin_lock(&crtc->dev->event_lock);

if (drm_crtc_vblank_get(crtc) != 0)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 2173b82606f6..945c4495d62a 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -44,6 +44,11 @@ static bool enable_writeback = true;
module_param_named(enable_writeback, enable_writeback, bool, 0444);
MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");

+static bool enable_virtual_hw = false;
+module_param_named(enable_virtual_hw, enable_virtual_hw, bool, 0444);
+MODULE_PARM_DESC(enable_virtual_hw, "Enable/Disable virtual hardware mode(virtual \
+hardware mode disables vblank interrupts)");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);

static void vkms_release(struct drm_device *dev)
@@ -159,12 +164,14 @@ static int vkms_create(struct vkms_config *config)
goto out_devres;
}

- vkms_device->drm.irq_enabled = true;
+ vkms_device->drm.irq_enabled = !vkms_device->config->virtual_hw;

- ret = drm_vblank_init(&vkms_device->drm, 1);
- if (ret) {
- DRM_ERROR("Failed to vblank\n");
- goto out_devres;
+ if (!vkms_device->config->virtual_hw) {
+ ret = drm_vblank_init(&vkms_device->drm, 1);
+ if (ret) {
+ DRM_ERROR("Failed to vblank\n");
+ goto out_devres;
+ }
}

ret = vkms_modeset_init(vkms_device);
@@ -198,6 +205,7 @@ static int __init vkms_init(void)

config->cursor = enable_cursor;
config->writeback = enable_writeback;
+ config->virtual_hw = enable_virtual_hw;

return vkms_create(config);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 35540c7c4416..d4a45518639c 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -85,6 +85,7 @@ struct vkms_device;
struct vkms_config {
bool writeback;
bool cursor;
+ bool virtual_hw;
/* only set when instantiated */
struct vkms_device *dev;
};
@@ -127,6 +128,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
/* Composer Support */
void vkms_composer_worker(struct work_struct *work);
void vkms_set_composer(struct vkms_output *out, bool enabled);
+void vkms_crtc_composer(struct vkms_crtc_state *crtc_state);

/* Writeback */
int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
--
2.25.1


2021-02-25 09:59:07

by Daniel Vetter

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
<[email protected]> wrote:
>
> Add a virtual hardware or vblank-less mode as a module to enable
> VKMS to emulate virtual graphic drivers. This mode can be enabled
> by setting enable_virtual_hw=1 at the time of loading VKMS.
>
> A new function vkms_crtc_composer() has been added to bypass the
> vblank mode and is called directly in the atomic hook in
> vkms_atomic_begin(). However, some crc captures still use vblanks
> which causes the crc-based igt tests to crash. Currently, I am unsure
> about how to approach one-shot implementation of crc reads so I am
> still working on that.

Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
one-shot upload and damage tracking, what do you think is the best way
to capture crc for validation? Assuming that's even on the plans
anywhere ...

Ideally it'd be a crc that the host side captures, so that we really
have end-to-end validation, including the damage uploads and all that.

For vkms we're going for now with one-shot crc generation after each
atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
but seems like the most fitting model.
Other option would be that we'd wire up something on the kernel side
that generates a crc on-demand every time igt reads a new crc value
(maybe with some rate limiting). But that's not really how virtual hw
works when everything is pushed explicitly to the host side.

Thoughts?

> This patch has been tested with the igt tests, kms_writeback, kms_atomic,
> kms_lease, kms_flip, kms_pipe_get_crc and preserves results except for
> subtests related to crc reads and skips tests that rely on vertical
> blanking. This patch must be tested after incorporating the
> igt-tests patch: https://lists.freedesktop.org/archives/igt-dev/2021-February/029355.html .
>
> The patch is based on Rodrigo Siqueira's
> patch(https://patchwork.freedesktop.org/patch/316851/?series=48469&rev=3)
> and the ensuing review.
>
> Signed-off-by: Sumera Priyadarsini <[email protected]>

I think the logic in the patch looks good, some comments below about polishing.
-Daniel

> ---
> drivers/gpu/drm/vkms/vkms_composer.c | 46 ++++++++++++++++++++++++++++
> drivers/gpu/drm/vkms/vkms_crtc.c | 17 ++++++++--
> drivers/gpu/drm/vkms/vkms_drv.c | 18 ++++++++---
> drivers/gpu/drm/vkms/vkms_drv.h | 2 ++
> 4 files changed, 75 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
> index 66c6842d70db..7a8aaf5c5555 100644
> --- a/drivers/gpu/drm/vkms/vkms_composer.c
> +++ b/drivers/gpu/drm/vkms/vkms_composer.c
> @@ -247,6 +247,52 @@ void vkms_composer_worker(struct work_struct *work)
> drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
> }
>
> +void vkms_crtc_composer(struct vkms_crtc_state *crtc_state)
> +{
> + struct drm_crtc *crtc = crtc_state->base.crtc;
> + struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
> + struct vkms_composer *primary_composer = NULL;
> + struct vkms_composer *cursor_composer = NULL;
> + void *vaddr_out = NULL;
> + u32 crc32 = 0;
> + int ret;
> + bool wb_pending;
> +
> + wb_pending = crtc_state->wb_pending;
> +
> + if (crtc_state->num_active_planes >= 1)
> + primary_composer = crtc_state->active_planes[0]->composer;
> +
> + if (crtc_state->num_active_planes == 2)
> + cursor_composer = crtc_state->active_planes[1]->composer;
> +
> + if (!primary_composer)
> + return;
> +
> + if (wb_pending)
> + vaddr_out = crtc_state->active_writeback;
> +
> + ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
> + if (ret) {
> + if (ret == -EINVAL && !wb_pending)
> + kfree(vaddr_out);
> + return;
> + }
> +
> + crc32 = compute_crc(vaddr_out, primary_composer);
> +
> + if (wb_pending) {
> + drm_writeback_signal_completion(&out->wb_connector, 0);
> + spin_lock_irq(&out->composer_lock);
> + crtc_state->wb_pending = false;
> + spin_unlock_irq(&out->composer_lock);
> + } else {
> + kfree(vaddr_out);
> + }
> +
> + drm_crtc_add_crc_entry(crtc, true, 0, &crc32);
> +}

This duplicates code that we also have vkms_composer_worker(). It
would be good to if we can share this code (either by
composer_worker() call this function you add here, or by having a
composer_common function which does all the common parts.

I also realized that we have a problem now with the kzalloc in
compose_planes, that would need to be fixed up somehow. But can be
done in a follow up patch, and it's a bit tricky to explain exactly
why, but short summary is: We're not allowed to allocate memory from
the crtc commit functions, only from crtc_check callback.

Also since this is run while you hold a spinlock the kernel should
complain into dmesg. At least if the basic debug stuff is all enabled,
CONFIG_DEBUG_ATOMIC_SLEEP is the Kconfig option you need. Would be
good to check that.

> +
> static const char * const pipe_crc_sources[] = {"auto"};
>
> const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
> diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
> index 6164349cdf11..38de791a4882 100644
> --- a/drivers/gpu/drm/vkms/vkms_crtc.c
> +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
> @@ -224,13 +224,19 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
> static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
> struct drm_atomic_state *state)
> {
> - drm_crtc_vblank_on(crtc);
> + struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
> +
> + if (!vkmsdev->config->virtual_hw)
> + drm_crtc_vblank_on(crtc);
> }
>
> static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
> struct drm_atomic_state *state)
> {
> - drm_crtc_vblank_off(crtc);
> + struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
> +
> + if (!vkmsdev->config->virtual_hw)
> + drm_crtc_vblank_off(crtc);
> }
>
> static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
> @@ -248,8 +254,13 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
> struct drm_atomic_state *state)
> {
> struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
> + struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc->state);
> + struct vkms_device *vkmsdev = drm_device_to_vkms_device(crtc->dev);
> +
> + if (vkmsdev->config->virtual_hw)
> + vkms_crtc_composer(vkms_state);

We're holding vkms_output->lock spinlock while doing this fairly
expensive computation, that's not good. And this spinlock was only
there to protect against concurrent access from the vblank hrtimer, so
we don't really need this I think, and can make it conditional on
!virtual_hw too.

At that point all our crtc functions have conditions for virtual hw
(except atomic_check), so I think it'd be cleaner to duplicate the
functions into vkms_vblank_crtc_* and vkms_virtual_crtc, with two
struct drm_crtc_helper_funcs and we do the if once when we set up the
crtc in vkms_crtc_init.

>
> - if (crtc->state->event) {
> + if (crtc->state->event && !vkmsdev->config->virtual_hw) {
> spin_lock(&crtc->dev->event_lock);
>
> if (drm_crtc_vblank_get(crtc) != 0)
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
> index 2173b82606f6..945c4495d62a 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.c
> +++ b/drivers/gpu/drm/vkms/vkms_drv.c
> @@ -44,6 +44,11 @@ static bool enable_writeback = true;
> module_param_named(enable_writeback, enable_writeback, bool, 0444);
> MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
>
> +static bool enable_virtual_hw = false;
> +module_param_named(enable_virtual_hw, enable_virtual_hw, bool, 0444);
> +MODULE_PARM_DESC(enable_virtual_hw, "Enable/Disable virtual hardware mode(virtual \
> +hardware mode disables vblank interrupts)");
> +
> DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
>
> static void vkms_release(struct drm_device *dev)
> @@ -159,12 +164,14 @@ static int vkms_create(struct vkms_config *config)
> goto out_devres;
> }
>
> - vkms_device->drm.irq_enabled = true;
> + vkms_device->drm.irq_enabled = !vkms_device->config->virtual_hw;
>
> - ret = drm_vblank_init(&vkms_device->drm, 1);
> - if (ret) {
> - DRM_ERROR("Failed to vblank\n");
> - goto out_devres;
> + if (!vkms_device->config->virtual_hw) {
> + ret = drm_vblank_init(&vkms_device->drm, 1);
> + if (ret) {
> + DRM_ERROR("Failed to vblank\n");
> + goto out_devres;
> + }
> }
>
> ret = vkms_modeset_init(vkms_device);
> @@ -198,6 +205,7 @@ static int __init vkms_init(void)
>
> config->cursor = enable_cursor;
> config->writeback = enable_writeback;
> + config->virtual_hw = enable_virtual_hw;
>
> return vkms_create(config);
> }
> diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
> index 35540c7c4416..d4a45518639c 100644
> --- a/drivers/gpu/drm/vkms/vkms_drv.h
> +++ b/drivers/gpu/drm/vkms/vkms_drv.h
> @@ -85,6 +85,7 @@ struct vkms_device;
> struct vkms_config {
> bool writeback;
> bool cursor;
> + bool virtual_hw;
> /* only set when instantiated */
> struct vkms_device *dev;
> };
> @@ -127,6 +128,7 @@ int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
> /* Composer Support */
> void vkms_composer_worker(struct work_struct *work);
> void vkms_set_composer(struct vkms_output *out, bool enabled);
> +void vkms_crtc_composer(struct vkms_crtc_state *crtc_state);
>
> /* Writeback */
> int vkms_enable_writeback_connector(struct vkms_device *vkmsdev);
> --
> 2.25.1
>


--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

2021-02-25 10:49:11

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On Thu, Feb 25, 2021 at 10:09:42AM +0100, Daniel Vetter wrote:
> On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
> <[email protected]> wrote:
> >
> > Add a virtual hardware or vblank-less mode as a module to enable
> > VKMS to emulate virtual graphic drivers. This mode can be enabled
> > by setting enable_virtual_hw=1 at the time of loading VKMS.
> >
> > A new function vkms_crtc_composer() has been added to bypass the
> > vblank mode and is called directly in the atomic hook in
> > vkms_atomic_begin(). However, some crc captures still use vblanks
> > which causes the crc-based igt tests to crash. Currently, I am unsure
> > about how to approach one-shot implementation of crc reads so I am
> > still working on that.
>
> Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
> one-shot upload and damage tracking, what do you think is the best way
> to capture crc for validation? Assuming that's even on the plans
> anywhere ...
>
> Ideally it'd be a crc that the host side captures, so that we really
> have end-to-end validation, including the damage uploads and all that.

Disclaimer: Not knowing much about the crc thing beside having noticed
it exists and seems to be used for display content checking.

> For vkms we're going for now with one-shot crc generation after each
> atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
> but seems like the most fitting model.
> Other option would be that we'd wire up something on the kernel side
> that generates a crc on-demand every time igt reads a new crc value
> (maybe with some rate limiting). But that's not really how virtual hw
> works when everything is pushed explicitly to the host side.

igt runs inside the guest, right?

You can ask qemu to write out a screen dump. Reading that and calculate
a crc from it should be easy. But the tricky part is how to coordinate
guest and host then. qemu autotesting typically runs on the host,
connected to qemu (monitor) and guest (ssh or serial console) so it can
control both host and guest side.

Another option to access the screen would be vnc. With user networking
and a guest forwarding rule it should be possible to allow the guest
access the qemu vnc server for its own display. Would be more effort to
capture the display, but it would for the most part take the host out of
the picture. The guest could coordinate everything on its own then.

On-demand crc via debugfs or ioctl would work too, but yes that wouldn't
verify the host-side. At least not without virtio protocol extensions.
We could add a new command asking the host to crc the display and return
the result for on-demand crc. Or add a crc request flag to an existing
command (VIRTIO_GPU_CMD_RESOURCE_FLUSH probably).

take care,
Gerd

2021-02-25 10:52:36

by Daniel Vetter

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On Thu, Feb 25, 2021 at 11:25:20AM +0100, Gerd Hoffmann wrote:
> On Thu, Feb 25, 2021 at 10:09:42AM +0100, Daniel Vetter wrote:
> > On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
> > <[email protected]> wrote:
> > >
> > > Add a virtual hardware or vblank-less mode as a module to enable
> > > VKMS to emulate virtual graphic drivers. This mode can be enabled
> > > by setting enable_virtual_hw=1 at the time of loading VKMS.
> > >
> > > A new function vkms_crtc_composer() has been added to bypass the
> > > vblank mode and is called directly in the atomic hook in
> > > vkms_atomic_begin(). However, some crc captures still use vblanks
> > > which causes the crc-based igt tests to crash. Currently, I am unsure
> > > about how to approach one-shot implementation of crc reads so I am
> > > still working on that.
> >
> > Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
> > one-shot upload and damage tracking, what do you think is the best way
> > to capture crc for validation? Assuming that's even on the plans
> > anywhere ...
> >
> > Ideally it'd be a crc that the host side captures, so that we really
> > have end-to-end validation, including the damage uploads and all that.
>
> Disclaimer: Not knowing much about the crc thing beside having noticed
> it exists and seems to be used for display content checking.
>
> > For vkms we're going for now with one-shot crc generation after each
> > atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
> > but seems like the most fitting model.
> > Other option would be that we'd wire up something on the kernel side
> > that generates a crc on-demand every time igt reads a new crc value
> > (maybe with some rate limiting). But that's not really how virtual hw
> > works when everything is pushed explicitly to the host side.
>
> igt runs inside the guest, right?

Yup. There's some debugfs files for capture crc on a specific CRTC. So
supporting this would mean some virtio-gpu revision so you could ask the
host side for a crc when you do a screen update, and the host side would
send that back to you on a virtio channel as some kind of message.

> You can ask qemu to write out a screen dump. Reading that and calculate
> a crc from it should be easy. But the tricky part is how to coordinate
> guest and host then. qemu autotesting typically runs on the host,
> connected to qemu (monitor) and guest (ssh or serial console) so it can
> control both host and guest side.
>
> Another option to access the screen would be vnc. With user networking
> and a guest forwarding rule it should be possible to allow the guest
> access the qemu vnc server for its own display. Would be more effort to
> capture the display, but it would for the most part take the host out of
> the picture. The guest could coordinate everything on its own then.
>
> On-demand crc via debugfs or ioctl would work too, but yes that wouldn't
> verify the host-side. At least not without virtio protocol extensions.
> We could add a new command asking the host to crc the display and return
> the result for on-demand crc. Or add a crc request flag to an existing
> command (VIRTIO_GPU_CMD_RESOURCE_FLUSH probably).

Yup, I think that's what would be needed. The question here is, what do
you think would be the most natural way for virtio host side stack to
support this? If it ever happens ofc.

igt does support some other ways to capture crc (chamelium boards acting
as display sink). The nice thing with having this in the kernel driver as
part of the "hardware" is that you can guarantee that the frames are a
match. Maybe more relevant for real hw with vblank, where it's also
interesting to know whether the screen did update on the right frame.
-Daniel
--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

2021-02-25 13:33:45

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On Thu, Feb 25, 2021 at 11:32:08AM +0100, Daniel Vetter wrote:
> On Thu, Feb 25, 2021 at 11:25:20AM +0100, Gerd Hoffmann wrote:
> > On Thu, Feb 25, 2021 at 10:09:42AM +0100, Daniel Vetter wrote:
> > > On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
> > > <[email protected]> wrote:
> > > >
> > > > Add a virtual hardware or vblank-less mode as a module to enable
> > > > VKMS to emulate virtual graphic drivers. This mode can be enabled
> > > > by setting enable_virtual_hw=1 at the time of loading VKMS.
> > > >
> > > > A new function vkms_crtc_composer() has been added to bypass the
> > > > vblank mode and is called directly in the atomic hook in
> > > > vkms_atomic_begin(). However, some crc captures still use vblanks
> > > > which causes the crc-based igt tests to crash. Currently, I am unsure
> > > > about how to approach one-shot implementation of crc reads so I am
> > > > still working on that.
> > >
> > > Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
> > > one-shot upload and damage tracking, what do you think is the best way
> > > to capture crc for validation? Assuming that's even on the plans
> > > anywhere ...
> > >
> > > Ideally it'd be a crc that the host side captures, so that we really
> > > have end-to-end validation, including the damage uploads and all that.
> >
> > Disclaimer: Not knowing much about the crc thing beside having noticed
> > it exists and seems to be used for display content checking.
> >
> > > For vkms we're going for now with one-shot crc generation after each
> > > atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
> > > but seems like the most fitting model.
> > > Other option would be that we'd wire up something on the kernel side
> > > that generates a crc on-demand every time igt reads a new crc value
> > > (maybe with some rate limiting). But that's not really how virtual hw
> > > works when everything is pushed explicitly to the host side.
> >
> > igt runs inside the guest, right?
>
> Yup. There's some debugfs files for capture crc on a specific CRTC. So
> supporting this would mean some virtio-gpu revision so you could ask the
> host side for a crc when you do a screen update, and the host side would
> send that back to you on a virtio channel as some kind of message.

Waded through the source code a bit. So, the vkms crc code merges all
planes (specifically the cursor plane) before calculating the crc.
Which is a bit of a problem, we try to avoid that and rarely actually
merge the planes anywhere in the virtualization stack. Instead we
prefer to pass through the cursor plane separately, so we can -- for
example -- use that to simply set the cursor sprite of the qemu gtk
window. It's much more snappy because moving+rendering the pointer
doesn't need a round-trip to the guest then.

So, it would be quite some effort on the host side, we would have to
merge planes just for crc calculation.

> > You can ask qemu to write out a screen dump.

Hmm, the (hardware) cursor is not in the screen dump either.

A software cursor (when using for example cirrus which has no cursor
plane) would be there.

> > Another option to access the screen would be vnc.

vnc clients can get the cursor sprite. They can't get the position
though, only set it (it's a one-way ticket in the vnc protocol).
Typically not a problem because desktops set the position in response
to the pointer events so guest + host position match nevertheless.
But for test cases which don't look at input events and set the cursor
to a fixed place this is a problem ...

> > On-demand crc via debugfs or ioctl would work too, but yes that wouldn't
> > verify the host-side. At least not without virtio protocol extensions.
> > We could add a new command asking the host to crc the display and return
> > the result for on-demand crc. Or add a crc request flag to an existing
> > command (VIRTIO_GPU_CMD_RESOURCE_FLUSH probably).
>
> Yup, I think that's what would be needed. The question here is, what do
> you think would be the most natural way for virtio host side stack to
> support this?

virtio has feature flags, so we can easily introduce an extension in a
backward compatible way. Each command sends a reply, with optional
payload, so it would make sense to send the crc with the
VIRTIO_GPU_CMD_RESOURCE_FLUSH reply.

Alternatively introduce a communication channel independent of the gpu,
using for example virtio-serial or vsock, let the guest send crc
requests to qemu that way. Which would work with all qemu display
devices, not only virtio-gpu.

take care,
Gerd

2021-02-25 15:08:04

by Daniel Vetter

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On Thu, Feb 25, 2021 at 2:27 PM Gerd Hoffmann <[email protected]> wrote:
>
> On Thu, Feb 25, 2021 at 11:32:08AM +0100, Daniel Vetter wrote:
> > On Thu, Feb 25, 2021 at 11:25:20AM +0100, Gerd Hoffmann wrote:
> > > On Thu, Feb 25, 2021 at 10:09:42AM +0100, Daniel Vetter wrote:
> > > > On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
> > > > <[email protected]> wrote:
> > > > >
> > > > > Add a virtual hardware or vblank-less mode as a module to enable
> > > > > VKMS to emulate virtual graphic drivers. This mode can be enabled
> > > > > by setting enable_virtual_hw=1 at the time of loading VKMS.
> > > > >
> > > > > A new function vkms_crtc_composer() has been added to bypass the
> > > > > vblank mode and is called directly in the atomic hook in
> > > > > vkms_atomic_begin(). However, some crc captures still use vblanks
> > > > > which causes the crc-based igt tests to crash. Currently, I am unsure
> > > > > about how to approach one-shot implementation of crc reads so I am
> > > > > still working on that.
> > > >
> > > > Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
> > > > one-shot upload and damage tracking, what do you think is the best way
> > > > to capture crc for validation? Assuming that's even on the plans
> > > > anywhere ...
> > > >
> > > > Ideally it'd be a crc that the host side captures, so that we really
> > > > have end-to-end validation, including the damage uploads and all that.
> > >
> > > Disclaimer: Not knowing much about the crc thing beside having noticed
> > > it exists and seems to be used for display content checking.
> > >
> > > > For vkms we're going for now with one-shot crc generation after each
> > > > atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
> > > > but seems like the most fitting model.
> > > > Other option would be that we'd wire up something on the kernel side
> > > > that generates a crc on-demand every time igt reads a new crc value
> > > > (maybe with some rate limiting). But that's not really how virtual hw
> > > > works when everything is pushed explicitly to the host side.
> > >
> > > igt runs inside the guest, right?
> >
> > Yup. There's some debugfs files for capture crc on a specific CRTC. So
> > supporting this would mean some virtio-gpu revision so you could ask the
> > host side for a crc when you do a screen update, and the host side would
> > send that back to you on a virtio channel as some kind of message.
>
> Waded through the source code a bit. So, the vkms crc code merges all
> planes (specifically the cursor plane) before calculating the crc.
> Which is a bit of a problem, we try to avoid that and rarely actually
> merge the planes anywhere in the virtualization stack. Instead we
> prefer to pass through the cursor plane separately, so we can -- for
> example -- use that to simply set the cursor sprite of the qemu gtk
> window. It's much more snappy because moving+rendering the pointer
> doesn't need a round-trip to the guest then.
>
> So, it would be quite some effort on the host side, we would have to
> merge planes just for crc calculation.
>
> > > You can ask qemu to write out a screen dump.
>
> Hmm, the (hardware) cursor is not in the screen dump either.
>
> A software cursor (when using for example cirrus which has no cursor
> plane) would be there.
>
> > > Another option to access the screen would be vnc.
>
> vnc clients can get the cursor sprite. They can't get the position
> though, only set it (it's a one-way ticket in the vnc protocol).
> Typically not a problem because desktops set the position in response
> to the pointer events so guest + host position match nevertheless.
> But for test cases which don't look at input events and set the cursor
> to a fixed place this is a problem ...

Hm yeah that sounds like a bit too much to wire through, and kinda
defeats end-to-end testing if qemu would take a separate path for crc
generation.
-Daniel

> > > On-demand crc via debugfs or ioctl would work too, but yes that wouldn't
> > > verify the host-side. At least not without virtio protocol extensions.
> > > We could add a new command asking the host to crc the display and return
> > > the result for on-demand crc. Or add a crc request flag to an existing
> > > command (VIRTIO_GPU_CMD_RESOURCE_FLUSH probably).
> >
> > Yup, I think that's what would be needed. The question here is, what do
> > you think would be the most natural way for virtio host side stack to
> > support this?
>
> virtio has feature flags, so we can easily introduce an extension in a
> backward compatible way. Each command sends a reply, with optional
> payload, so it would make sense to send the crc with the
> VIRTIO_GPU_CMD_RESOURCE_FLUSH reply.
>
> Alternatively introduce a communication channel independent of the gpu,
> using for example virtio-serial or vsock, let the guest send crc
> requests to qemu that way. Which would work with all qemu display
> devices, not only virtio-gpu.
>
> take care,
> Gerd
>
> _______________________________________________
> dri-devel mailing list
> [email protected]
> https://lists.freedesktop.org/mailman/listinfo/dri-devel



--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

2021-02-25 16:29:18

by Zack Rusin

[permalink] [raw]
Subject: Re: [RFC PATCH] drm/vkms: Add support for virtual hardware mode

On 2/25/21 4:09 AM, Daniel Vetter wrote:
> On Wed, Feb 24, 2021 at 11:55 AM Sumera Priyadarsini
> <[email protected]> wrote:
>>
>> Add a virtual hardware or vblank-less mode as a module to enable
>> VKMS to emulate virtual graphic drivers. This mode can be enabled
>> by setting enable_virtual_hw=1 at the time of loading VKMS.
>>
>> A new function vkms_crtc_composer() has been added to bypass the
>> vblank mode and is called directly in the atomic hook in
>> vkms_atomic_begin(). However, some crc captures still use vblanks
>> which causes the crc-based igt tests to crash. Currently, I am unsure
>> about how to approach one-shot implementation of crc reads so I am
>> still working on that.
>
> Gerd, Zack: For virtual hw like virtio-gpu or vmwgfx that does
> one-shot upload and damage tracking, what do you think is the best way
> to capture crc for validation? Assuming that's even on the plans
> anywhere ...
>
> Ideally it'd be a crc that the host side captures, so that we really
> have end-to-end validation, including the damage uploads and all that.
>
> For vkms we're going for now with one-shot crc generation after each
> atomic flip (or DIRTYFB ioctl call). Will need a pile of igt changes,
> but seems like the most fitting model.
> Other option would be that we'd wire up something on the kernel side
> that generates a crc on-demand every time igt reads a new crc value
> (maybe with some rate limiting). But that's not really how virtual hw
> works when everything is pushed explicitly to the host side.

Well, this is a bit tricky. With virtual gpu's the presentation is not
necessarily well defined. Technically the presentation might not even
happen (i.e. someone disconnected the screen to a running vm), or it
could happen on a completely different machine (i.e. someone is remotely
running a vm), etc. With recent vmwgfx the guest owns the presentation
surface (i.e. screen targets), it's not a big deal to set software
cursors and get the host to generate CRC but it's not too different from
doing it inside the guest. Realistically we could provide anything,
generating CRC's out of some block of memory is trivial and so is
putting it in either some register or any dedicated guest memory. It's
more about what kind of guarantees we could reasonable provide, or more
precisely "what kind of presentation testing could we do for a
GL/Vulkan/DX12 app that could be potentially running over vnc".

For us the response to that has basically been "if the screen target
memory in guest matches what the app thought it should be then we're
done", but one of my pet peeves in our stack has been the difficulty of
testing presentation fully so I'd be very excited to hear other ideas.

My guess is that without GL/Vulkan/DX12 extensions which actually expose
the underlying CRC info from the display engines we can't really provide
very strong guarantees and we won't be able to provide much better
support than just sharing the CRC of the in guest screen target memory.
With that model in mind the design you mention above is basically as
good as it gets for what we have.

z