2018-08-29 12:22:17

by Gerd Hoffmann

[permalink] [raw]
Subject: [PATCH 2/2] drm/virtio: add iommu support.

Use the dma mapping api and properly add iommu mappings for
objects, unless virtio is in iommu quirk mode.

Signed-off-by: Gerd Hoffmann <[email protected]>
---
drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
drivers/gpu/drm/virtio/virtgpu_vq.c | 46 +++++++++++++++++++++++++++++-------
2 files changed, 38 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index cbbff01077..ec9a38f995 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -57,6 +57,7 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;

struct sg_table *pages;
+ uint32_t mapped;
void *vmap;
bool dumb;
struct ttm_place placement_code;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index af24e91267..bf631d32d4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -424,7 +424,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
}

static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+ uint32_t resource_id,
+ struct virtio_gpu_fence **fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);

- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}

void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -848,9 +849,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence **fence)
{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si;
+ int si, nents;

if (!obj->pages) {
int ret;
@@ -860,23 +862,33 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
return ret;
}

+ if (use_dma_api) {
+ obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
+ nents = obj->mapped;
+ } else {
+ nents = obj->pages->nents;
+ }
+
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(obj->pages->nents,
- sizeof(struct virtio_gpu_mem_entry),
+ ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}

- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
- ents[si].addr = cpu_to_le64(sg_phys(sg));
+ for_each_sg(obj->pages->sgl, sg, nents, si) {
+ ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}

virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
- ents, obj->pages->nents,
+ ents, nents,
fence);
obj->hw_res_handle = resource_id;
return 0;
@@ -885,7 +897,23 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj)
{
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle);
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_fence *fence;
+
+ if (use_dma_api && obj->mapped) {
+ /* detach backing and wait for the host process it ... */
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ dma_fence_wait(&fence->f, true);
+ dma_fence_put(&fence->f);
+
+ /* ... then tear down iommu mappings */
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->mapped,
+ DMA_TO_DEVICE);
+ obj->mapped = 0;
+ } else {
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
+ }
}

void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
--
2.9.3



2018-09-03 23:52:16

by Dave Airlie

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

For the series,

Reviewed-by: Dave Airlie <[email protected]>
On Wed, 29 Aug 2018 at 22:20, Gerd Hoffmann <[email protected]> wrote:
>
> Use the dma mapping api and properly add iommu mappings for
> objects, unless virtio is in iommu quirk mode.
>
> Signed-off-by: Gerd Hoffmann <[email protected]>
> ---
> drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
> drivers/gpu/drm/virtio/virtgpu_vq.c | 46 +++++++++++++++++++++++++++++-------
> 2 files changed, 38 insertions(+), 9 deletions(-)
>
> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
> index cbbff01077..ec9a38f995 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
> @@ -57,6 +57,7 @@ struct virtio_gpu_object {
> uint32_t hw_res_handle;
>
> struct sg_table *pages;
> + uint32_t mapped;
> void *vmap;
> bool dumb;
> struct ttm_place placement_code;
> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
> index af24e91267..bf631d32d4 100644
> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
> @@ -424,7 +424,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
> }
>
> static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
> - uint32_t resource_id)
> + uint32_t resource_id,
> + struct virtio_gpu_fence **fence)
> {
> struct virtio_gpu_resource_detach_backing *cmd_p;
> struct virtio_gpu_vbuffer *vbuf;
> @@ -435,7 +436,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
> cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
> cmd_p->resource_id = cpu_to_le32(resource_id);
>
> - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
> + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
> }
>
> void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
> @@ -848,9 +849,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> uint32_t resource_id,
> struct virtio_gpu_fence **fence)
> {
> + bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
> struct virtio_gpu_mem_entry *ents;
> struct scatterlist *sg;
> - int si;
> + int si, nents;
>
> if (!obj->pages) {
> int ret;
> @@ -860,23 +862,33 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> return ret;
> }
>
> + if (use_dma_api) {
> + obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
> + obj->pages->sgl, obj->pages->nents,
> + DMA_TO_DEVICE);
> + nents = obj->mapped;
> + } else {
> + nents = obj->pages->nents;
> + }
> +
> /* gets freed when the ring has consumed it */
> - ents = kmalloc_array(obj->pages->nents,
> - sizeof(struct virtio_gpu_mem_entry),
> + ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
> GFP_KERNEL);
> if (!ents) {
> DRM_ERROR("failed to allocate ent list\n");
> return -ENOMEM;
> }
>
> - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
> - ents[si].addr = cpu_to_le64(sg_phys(sg));
> + for_each_sg(obj->pages->sgl, sg, nents, si) {
> + ents[si].addr = cpu_to_le64(use_dma_api
> + ? sg_dma_address(sg)
> + : sg_phys(sg));
> ents[si].length = cpu_to_le32(sg->length);
> ents[si].padding = 0;
> }
>
> virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
> - ents, obj->pages->nents,
> + ents, nents,
> fence);
> obj->hw_res_handle = resource_id;
> return 0;
> @@ -885,7 +897,23 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
> void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
> struct virtio_gpu_object *obj)
> {
> - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle);
> + bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
> + struct virtio_gpu_fence *fence;
> +
> + if (use_dma_api && obj->mapped) {
> + /* detach backing and wait for the host process it ... */
> + virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
> + dma_fence_wait(&fence->f, true);
> + dma_fence_put(&fence->f);
> +
> + /* ... then tear down iommu mappings */
> + dma_unmap_sg(vgdev->vdev->dev.parent,
> + obj->pages->sgl, obj->mapped,
> + DMA_TO_DEVICE);
> + obj->mapped = 0;
> + } else {
> + virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
> + }
> }
>
> void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
> --
> 2.9.3
>
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: [email protected]
> For additional commands, e-mail: [email protected]
>

2018-09-12 06:52:58

by Jiandi An

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.


I tested this with Gerd's qemu side fix with in the sirius/virtio-gpu-iommu
branch https://git.kraxel.org/cgit/qemu/commit/?h=sirius/virtio-gpu-iommu&id=86f9d30e4a44ca47f007e51ab5744f87e79fb83e

While this resolves the issue where iommu_platform attribute can be specified
for virtio-gpu device in qemu and the virtqueue vring goes through dma-api
swiotlb bounce buffer which is what we want for AMD SEV since DMA bounce
buffer is set as decrypted, I still observe the issue where for AMD SEV,
the guest graphical display is all garbage. I see that the frame buffer
ttm-pages from guest go through dma map api in this patch, but it looks like
the other path where virtio_gpufb_create() calls virtio_gpu_vmap_fb() after
virtio_gpu_alloc_object() that creates the ttm->pages.

The virtio_gpu_vmap_fb() calls down ttm_bo_kmap and eventual vmap() to get
a guest virtual address. It is in the PTE of this vmap mapping that I need
to call set_memory_decrypted() to get the graphical working in guest without
seeing garbage on the display.

virtio_gpu_alloc_object()
virtio_gpu_object_create()
sturct virtio_gpu_object bo kzalloc()
ttm_bo_init()
...
ttm_tt_create()
bo->ttm = bdev->driver->ttm_tt_create()
virtio_gpu_ttm_tt_create()
...
ttm_tt_populate()
ttm_pool_populate()
ttm_get_pages(ttm->pages, ttm->num_pages)

virtio_gpu_vmap_fb()
virtio_gpu_object_kmap(obj, NULL)
ttm_bo_kmap
ttm_mem_io_reserve()
ttm_bo_kmap_ttm()
vmap()
struct virtio_gpu_object bo->vmap =
ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);


There is a separate userspace path for example when displace manager
kicks in,
virtio_gpu_alloc_object() followed by virtio_gpu_object_attach() are
called through
the ioctl virtio_gpu_resource_create_ioctl(). The mapping of the
pages created in this
page are handled in the do_page_fault() path in ttm_bo_vm_ops's
ttm_bo_vm_fault() handler.

do_page_fault()
handle_mm_fault()
__do_page_fault()
ttm_bo_vm_fault()
ttm_bo_reserve()
__ttm_bo_reserve()
ttm_mem_io_reserve_vm()
ttm_mem_io_reserve()
bdev->driver->io_mem_reserve()
virtio_gpu_ttm_io_mem_reserve()
mem->bus.is_iomem = false
mem->mem_type == TTM_PL_TT

The prot for the page mapping here also needs fix to mark the it as decrypted.

With these two spot fixes and with this patch and the QEMU side fix, able to
boot guest with graphical display with AMD SEV without seeing garbage on the
display.

I attempted to fix it in the ttm layer and here is the discussion
https://lore.kernel.org/lkml/[email protected]/

The ttm maintainer Christian is suggesting to map and set ttm->pages as decrypted
right after ttm->pages are allocated.

Just checking with you guys maybe there is a better way to handle this in
the virtio gpu layer instead of the common ttm layer. Could you guys shine some
light on this? Thanks.

- Jiandi

On 09/03/2018 06:50 PM, Dave Airlie wrote:
> For the series,
>
> Reviewed-by: Dave Airlie <[email protected]>
> On Wed, 29 Aug 2018 at 22:20, Gerd Hoffmann <[email protected]> wrote:
>>
>> Use the dma mapping api and properly add iommu mappings for
>> objects, unless virtio is in iommu quirk mode.
>>
>> Signed-off-by: Gerd Hoffmann <[email protected]>
>> ---
>> drivers/gpu/drm/virtio/virtgpu_drv.h | 1 +
>> drivers/gpu/drm/virtio/virtgpu_vq.c | 46 +++++++++++++++++++++++++++++-------
>> 2 files changed, 38 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
>> index cbbff01077..ec9a38f995 100644
>> --- a/drivers/gpu/drm/virtio/virtgpu_drv.h
>> +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
>> @@ -57,6 +57,7 @@ struct virtio_gpu_object {
>> uint32_t hw_res_handle;
>>
>> struct sg_table *pages;
>> + uint32_t mapped;
>> void *vmap;
>> bool dumb;
>> struct ttm_place placement_code;
>> diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
>> index af24e91267..bf631d32d4 100644
>> --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
>> +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
>> @@ -424,7 +424,8 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
>> }
>>
>> static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
>> - uint32_t resource_id)
>> + uint32_t resource_id,
>> + struct virtio_gpu_fence **fence)
>> {
>> struct virtio_gpu_resource_detach_backing *cmd_p;
>> struct virtio_gpu_vbuffer *vbuf;
>> @@ -435,7 +436,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
>> cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
>> cmd_p->resource_id = cpu_to_le32(resource_id);
>>
>> - virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
>> + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
>> }
>>
>> void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
>> @@ -848,9 +849,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
>> uint32_t resource_id,
>> struct virtio_gpu_fence **fence)
>> {
>> + bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
>> struct virtio_gpu_mem_entry *ents;
>> struct scatterlist *sg;
>> - int si;
>> + int si, nents;
>>
>> if (!obj->pages) {
>> int ret;
>> @@ -860,23 +862,33 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
>> return ret;
>> }
>>
>> + if (use_dma_api) {
>> + obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
>> + obj->pages->sgl, obj->pages->nents,
>> + DMA_TO_DEVICE);
>> + nents = obj->mapped;
>> + } else {
>> + nents = obj->pages->nents;
>> + }
>> +
>> /* gets freed when the ring has consumed it */
>> - ents = kmalloc_array(obj->pages->nents,
>> - sizeof(struct virtio_gpu_mem_entry),
>> + ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
>> GFP_KERNEL);
>> if (!ents) {
>> DRM_ERROR("failed to allocate ent list\n");
>> return -ENOMEM;
>> }
>>
>> - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
>> - ents[si].addr = cpu_to_le64(sg_phys(sg));
>> + for_each_sg(obj->pages->sgl, sg, nents, si) {
>> + ents[si].addr = cpu_to_le64(use_dma_api
>> + ? sg_dma_address(sg)
>> + : sg_phys(sg));
>> ents[si].length = cpu_to_le32(sg->length);
>> ents[si].padding = 0;
>> }
>>
>> virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
>> - ents, obj->pages->nents,
>> + ents, nents,
>> fence);
>> obj->hw_res_handle = resource_id;
>> return 0;
>> @@ -885,7 +897,23 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
>> void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
>> struct virtio_gpu_object *obj)
>> {
>> - virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle);
>> + bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
>> + struct virtio_gpu_fence *fence;
>> +
>> + if (use_dma_api && obj->mapped) {
>> + /* detach backing and wait for the host process it ... */
>> + virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
>> + dma_fence_wait(&fence->f, true);
>> + dma_fence_put(&fence->f);
>> +
>> + /* ... then tear down iommu mappings */
>> + dma_unmap_sg(vgdev->vdev->dev.parent,
>> + obj->pages->sgl, obj->mapped,
>> + DMA_TO_DEVICE);
>> + obj->mapped = 0;
>> + } else {
>> + virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
>> + }
>> }
>>
>> void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
>> --
>> 2.9.3
>>
>>
>> ---------------------------------------------------------------------
>> To unsubscribe, e-mail: [email protected]
>> For additional commands, e-mail: [email protected]
>>
>
>
>

2018-09-12 07:26:14

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

Hi,

> I attempted to fix it in the ttm layer and here is the discussion
> https://lore.kernel.org/lkml/[email protected]/
>
> The ttm maintainer Christian is suggesting to map and set ttm->pages as decrypted
> right after ttm->pages are allocated.
>
> Just checking with you guys maybe there is a better way to handle this in
> the virtio gpu layer instead of the common ttm layer. Could you guys shine some
> light on this? Thanks.

I think the tty layer is the right place to fix this. virtio just calls
down to ttm for mappings. I think virtio should just hint to ttm using
a flag in some struct, probably ttm_buffer_object or
ttm_mem_type_manager, that the objects need decrypted mappings.

cheers,
Gerd


2018-09-18 17:54:58

by Jiandi An

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.



On 09/12/2018 02:25 AM, Gerd Hoffmann wrote:
> Hi,
>
>> I attempted to fix it in the ttm layer and here is the discussion
>> https://lore.kernel.org/lkml/[email protected]/
>>
>> The ttm maintainer Christian is suggesting to map and set ttm->pages as decrypted
>> right after ttm->pages are allocated.
>>
>> Just checking with you guys maybe there is a better way to handle this in
>> the virtio gpu layer instead of the common ttm layer. Could you guys shine some
>> light on this? Thanks.
>
> I think the tty layer is the right place to fix this. virtio just calls
> down to ttm for mappings. I think virtio should just hint to ttm using
> a flag in some struct, probably ttm_buffer_object or
> ttm_mem_type_manager, that the objects need decrypted mappings.
>
> cheers,
> Gerd
>

I tested this patch with non SEV guest. It gives a blank black screen if booting
with swiotlb=force. dma_sync is missing if dma op uses swiotlb as bounce
buffer. I tried to put a dma_sync_sg_for_device() on virtio_gpu_object obj->pages-sgl
before VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D is sent. This fixes the kernel console path.
Once display manger is kicked off for example (sudo systemctl start lightdm.service) and
resource id 3 gets created from user space down, it still gives a blank black screen.

In addition, I added dma_sync_sg_for_device() before sending VIRTIO_GPU_CMD_RESOURCE_FLUSH
and VIRTIO_GPU_CMD_SET_SCANOUT, still blank black screen after display manger is kicked off.

Do you know which path I'm still missing as far as VIRTIO_GPU_CMD goes?

Thanks
- Jiandi

2018-09-19 04:46:24

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

Hi,

> buffer. I tried to put a dma_sync_sg_for_device() on virtio_gpu_object obj->pages-sgl
> before VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D is sent. This fixes the kernel console path.

That should be the right place.

> Once display manger is kicked off for example (sudo systemctl start lightdm.service) and
> resource id 3 gets created from user space down, it still gives a blank black screen.

Hmm. I'd suspect there is simply a code path missing. Can you send the
patch you have?

cheers,
Gerd


2018-09-19 07:15:32

by Jiandi An

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.



On 09/18/2018 11:46 PM, Gerd Hoffmann wrote:
> Hi,
>
>> buffer. I tried to put a dma_sync_sg_for_device() on virtio_gpu_object obj->pages-sgl
>> before VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D is sent. This fixes the kernel console path.
>
> That should be the right place.
>
>> Once display manger is kicked off for example (sudo systemctl start lightdm.service) and
>> resource id 3 gets created from user space down, it still gives a blank black screen.
>
> Hmm. I'd suspect there is simply a code path missing. Can you send the
> patch you have?
>
> cheers,
> Gerd
>

I sent the patch. For now it does dma_sync_sg on the pages in
TRANSFER_TO_HOST_2D/3D when use_dma_api is true.

https://lore.kernel.org/lkml/[email protected]/

- Jiandi

2018-09-19 11:39:22

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

> >> Once display manger is kicked off for example (sudo systemctl start lightdm.service) and
> >> resource id 3 gets created from user space down, it still gives a blank black screen.
> >
> > Hmm. I'd suspect there is simply a code path missing. Can you send the
> > patch you have?
> >
> > cheers,
> > Gerd
> >
>
> I sent the patch. For now it does dma_sync_sg on the pages in
> TRANSFER_TO_HOST_2D/3D when use_dma_api is true.
>
> https://lore.kernel.org/lkml/[email protected]/

Hmm, the way it is hooked up it should not miss any resource update.
So not sure why it isn't working.
Pushed the patch nevertheless as it is clearly a step into the right
direction.

cheers,
Gerd


2018-09-19 22:08:57

by Jiandi An

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.



On 09/19/2018 06:38 AM, Gerd Hoffmann wrote:
>>>> Once display manger is kicked off for example (sudo systemctl start lightdm.service) and
>>>> resource id 3 gets created from user space down, it still gives a blank black screen.
>>>
>>> Hmm. I'd suspect there is simply a code path missing. Can you send the
>>> patch you have?
>>>
>>> cheers,
>>> Gerd
>>>
>>
>> I sent the patch. For now it does dma_sync_sg on the pages in
>> TRANSFER_TO_HOST_2D/3D when use_dma_api is true.
>>
>> https://lore.kernel.org/lkml/[email protected]/
>
> Hmm, the way it is hooked up it should not miss any resource update.
> So not sure why it isn't working.
> Pushed the patch nevertheless as it is clearly a step into the right
> direction.
>
> cheers,
> Gerd
>


I did more tracing and digging. The ttm-pages that needs to be dma_synced
are inside virtio_gpu_object.

There are 4 VIRTIO_GPU_CMD_RESOURCE_CREATE_2D/_ATTACH_BACKING coming down
the driver creating virtio_gpu_objects with resource_id = 1, 2, 3, 4 respectively.

resource_id = 1 is created during driver load in the path of
virtio_gpu_probe()
virtio_gpu_fbdev_init()
virtio_gpufb_create()

In this path virtio_gpu_framebuffer_init() is called where it ties
virtio_gpu_object -> drm_gem_object into
virtio_gpu_framebuffer -> drm_framebuffer -> drm_gem_object obj[0]

So later with given drm_gem_object, gem_to_virtio_gpu_obj() can get to
the virtio_gpu_object that contains it.

When kicking off display manager such as lightdm
resource_id = 2, 3, and 4 are created in the drm_mode_create_dumb ioctl path
drm_mode_create_dumb()
drm_ioctl()
drm_mode_create_dumb_ioctl()
virtio_gpu_mode_dumb_create()

In this path a different virtio_gpu_object is created for each resource_id.
The virtio_gpu_object or the underlying drm_gem_object is not tied to
virtio_gpu_framebuffer. It is published to userspace through drm_file which
registers the gem_handle for the drm_gem_object.

After display manger is kicked off, the VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D
command is acting on the virtio_gpu_object resource created for resource_id = 3.

In virtio_gpu_cmd_transfer_to_host(), it only has access to struct virtio_gpu_device,
so the virtio_gpu_object it accesses through virtio_gpu_device is resource_id = 1's
virtio_gpu_object.

void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint32_t resource_id, uint64_t offset,
...
struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);


In struct drm_framebuffer, there is struct drm_gem_object *obj[4], only first
element is being used here. Can the virtio_gpu_object created from user space
for resource_id 3 be saved here to tie it to virtio_gpu_framebuffer?

Is there better way to get to the virtio_gpu_object created in the
virtio_gpu_mode_dumb_create() path from virtio_gpu_device or somehow from drm_file
via gem_handle down at the layer of virtio_gpu_cmd_transfer_to_host()?

Thanks
- Jiandi

2018-09-20 06:32:48

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

Hi,

> void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
> uint32_t resource_id, uint64_t offset,
> ...
> struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
> struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
> struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);

Ah, right. Should have noticed this on review. You sync the fbcon
framebuffer unconfitionally ...

> Is there better way to get to the virtio_gpu_object created in the
> virtio_gpu_mode_dumb_create() path from virtio_gpu_device or somehow from drm_file
> via gem_handle down at the layer of virtio_gpu_cmd_transfer_to_host()?

Just pass it down, the call sites all know it (see patch just sent).

cheers,
Gerd


2018-09-21 04:58:44

by Jiandi An

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.



On 09/20/2018 01:32 AM, Gerd Hoffmann wrote:
> Hi,
>
>> void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
>> uint32_t resource_id, uint64_t offset,
>> ...
>> struct virtio_gpu_fbdev *vgfbdev = vgdev->vgfbdev;
>> struct virtio_gpu_framebuffer *fb = &vgfbdev->vgfb;
>> struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->base.obj[0]);
>
> Ah, right. Should have noticed this on review. You sync the fbcon
> framebuffer unconfitionally ...
>
>> Is there better way to get to the virtio_gpu_object created in the
>> virtio_gpu_mode_dumb_create() path from virtio_gpu_device or somehow from drm_file
>> via gem_handle down at the layer of virtio_gpu_cmd_transfer_to_host()?
>
> Just pass it down, the call sites all know it (see patch just sent).

Tested that patch you sent. Together with this patch it also resolves
the virtio gpu graphical display issue for SEV guest.

Is there a way to optimize the dma_sync_sg to only sync on the pages
being updated instead of all the pages in the obj sgl list every time?

Thanks.
- Jiandi

>
> cheers,
> Gerd
>

2018-09-21 05:48:34

by Gerd Hoffmann

[permalink] [raw]
Subject: Re: [virtio-dev] [PATCH 2/2] drm/virtio: add iommu support.

Hi,

> > Just pass it down, the call sites all know it (see patch just sent).
>
> Tested that patch you sent. Together with this patch it also resolves
> the virtio gpu graphical display issue for SEV guest.

Cool. Can you ack or review the patch so I can commit it?

> Is there a way to optimize the dma_sync_sg to only sync on the pages
> being updated instead of all the pages in the obj sgl list every time?

Well, the rectangle is passed to virtio_gpu_cmd_transfer_to_host_2d, so
it should be possible to figure which pages are affected and trim the
scatter list accordingly.

cheers,
Gerd