Now, as Google's user guide, if userspace need clean ION buffer's
cache, they should call ioctl(fd, DMA_BUF_IOCTL_SYNC, sync). Then
we found that ion_dma_buf_begin_cpu_access/ion_dma_buf_end_cpu_access
will do ION buffer's map_kernel, it's not necessary. And if usersapce
only need clean cache, they will call ion_dma_buf_end_cpu_access by
dmabuf's ioctl, ION buffer's kmap_cnt will be wrong value -1, then
driver could not get right kernel vaddr by dma_buf_kmap.
Signed-off-by: Qing Xia <[email protected]>
---
drivers/staging/android/ion/ion.c | 46 ++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 25 deletions(-)
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 9907332..f7e2812 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -303,45 +303,47 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
- return buffer->vaddr + offset * PAGE_SIZE;
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ if (IS_ERR(vaddr))
+ return vaddr;
+
+ return vaddr + offset * PAGE_SIZE;
+ }
+
+ return NULL;
}
static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
void *ptr)
{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
}
static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
struct ion_dma_buf_attachment *a;
- int ret = 0;
-
- /*
- * TODO: Move this elsewhere because we don't always need a vaddr
- */
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unlock;
- }
- mutex_unlock(&buffer->lock);
- }
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
direction);
}
-
-unlock:
mutex_unlock(&buffer->lock);
- return ret;
+
+ return 0;
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
@@ -350,12 +352,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
struct ion_buffer *buffer = dmabuf->priv;
struct ion_dma_buf_attachment *a;
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
- }
-
mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
--
2.7.3
On 12/24/18 12:19 AM, Qing Xia wrote:
> Now, as Google's user guide, if userspace need clean ION buffer's
> cache, they should call ioctl(fd, DMA_BUF_IOCTL_SYNC, sync). Then
> we found that ion_dma_buf_begin_cpu_access/ion_dma_buf_end_cpu_access
> will do ION buffer's map_kernel, it's not necessary. And if usersapce
> only need clean cache, they will call ion_dma_buf_end_cpu_access by
> dmabuf's ioctl, ION buffer's kmap_cnt will be wrong value -1, then
> driver could not get right kernel vaddr by dma_buf_kmap.
>
The problem is this subtly violates how dma_buf is supposed
to work. All setup is supposed to happen in begin_cpu_access.
I agree calling map kernel each time isn't great but I think
this needs to be worked out with dma_buf.
Thanks,
Laura
> Signed-off-by: Qing Xia <[email protected]>
> ---
> drivers/staging/android/ion/ion.c | 46 ++++++++++++++++++---------------------
> 1 file changed, 21 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
> index 9907332..f7e2812 100644
> --- a/drivers/staging/android/ion/ion.c
> +++ b/drivers/staging/android/ion/ion.c
> @@ -303,45 +303,47 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
> static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
> {
> struct ion_buffer *buffer = dmabuf->priv;
> + void *vaddr;
>
> - return buffer->vaddr + offset * PAGE_SIZE;
> + if (buffer->heap->ops->map_kernel) {
> + mutex_lock(&buffer->lock);
> + vaddr = ion_buffer_kmap_get(buffer);
> + mutex_unlock(&buffer->lock);
> + if (IS_ERR(vaddr))
> + return vaddr;
> +
> + return vaddr + offset * PAGE_SIZE;
> + }
> +
> + return NULL;
> }
>
> static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
> void *ptr)
> {
> + struct ion_buffer *buffer = dmabuf->priv;
> +
> + if (buffer->heap->ops->map_kernel) {
> + mutex_lock(&buffer->lock);
> + ion_buffer_kmap_put(buffer);
> + mutex_unlock(&buffer->lock);
> + }
> }
>
> static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
> enum dma_data_direction direction)
> {
> struct ion_buffer *buffer = dmabuf->priv;
> - void *vaddr;
> struct ion_dma_buf_attachment *a;
> - int ret = 0;
> -
> - /*
> - * TODO: Move this elsewhere because we don't always need a vaddr
> - */
> - if (buffer->heap->ops->map_kernel) {
> - mutex_lock(&buffer->lock);
> - vaddr = ion_buffer_kmap_get(buffer);
> - if (IS_ERR(vaddr)) {
> - ret = PTR_ERR(vaddr);
> - goto unlock;
> - }
> - mutex_unlock(&buffer->lock);
> - }
>
> mutex_lock(&buffer->lock);
> list_for_each_entry(a, &buffer->attachments, list) {
> dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
> direction);
> }
> -
> -unlock:
> mutex_unlock(&buffer->lock);
> - return ret;
> +
> + return 0;
> }
>
> static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
> @@ -350,12 +352,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
> struct ion_buffer *buffer = dmabuf->priv;
> struct ion_dma_buf_attachment *a;
>
> - if (buffer->heap->ops->map_kernel) {
> - mutex_lock(&buffer->lock);
> - ion_buffer_kmap_put(buffer);
> - mutex_unlock(&buffer->lock);
> - }
> -
> mutex_lock(&buffer->lock);
> list_for_each_entry(a, &buffer->attachments, list) {
> dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
>
On 2019/1/3 6:36, Laura Abbott wrote:
> On 12/24/18 12:19 AM, Qing Xia wrote:
>> Now, as Google's user guide, if userspace need clean ION buffer's
>> cache, they should call ioctl(fd, DMA_BUF_IOCTL_SYNC, sync). Then
>> we found that ion_dma_buf_begin_cpu_access/ion_dma_buf_end_cpu_access
>> will do ION buffer's map_kernel, it's not necessary. And if usersapce
>> only need clean cache, they will call ion_dma_buf_end_cpu_access by
>> dmabuf's ioctl, ION buffer's kmap_cnt will be wrong value -1, then
>> driver could not get right kernel vaddr by dma_buf_kmap.
>>
>
> The problem is this subtly violates how dma_buf is supposed
> to work. All setup is supposed to happen in begin_cpu_access.
> I agree calling map kernel each time isn't great but I think
> this needs to be worked out with dma_buf.
>
> Thanks,
> Laura
>
Thanks for your explanation.
>> Signed-off-by: Qing Xia <[email protected]>
>> ---
>> drivers/staging/android/ion/ion.c | 46 ++++++++++++++++++---------------------
>> 1 file changed, 21 insertions(+), 25 deletions(-)
>>
>> diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
>> index 9907332..f7e2812 100644
>> --- a/drivers/staging/android/ion/ion.c
>> +++ b/drivers/staging/android/ion/ion.c
>> @@ -303,45 +303,47 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
>> static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
>> {
>> struct ion_buffer *buffer = dmabuf->priv;
>> + void *vaddr;
>> - return buffer->vaddr + offset * PAGE_SIZE;
>> + if (buffer->heap->ops->map_kernel) {
>> + mutex_lock(&buffer->lock);
>> + vaddr = ion_buffer_kmap_get(buffer);
>> + mutex_unlock(&buffer->lock);
>> + if (IS_ERR(vaddr))
>> + return vaddr;
>> +
>> + return vaddr + offset * PAGE_SIZE;
>> + }
>> +
>> + return NULL;
>> }
>> static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
>> void *ptr)
>> {
>> + struct ion_buffer *buffer = dmabuf->priv;
>> +
>> + if (buffer->heap->ops->map_kernel) {
>> + mutex_lock(&buffer->lock);
>> + ion_buffer_kmap_put(buffer);
>> + mutex_unlock(&buffer->lock);
>> + }
>> }
>> static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
>> enum dma_data_direction direction)
>> {
>> struct ion_buffer *buffer = dmabuf->priv;
>> - void *vaddr;
>> struct ion_dma_buf_attachment *a;
>> - int ret = 0;
>> -
>> - /*
>> - * TODO: Move this elsewhere because we don't always need a vaddr
>> - */
>> - if (buffer->heap->ops->map_kernel) {
>> - mutex_lock(&buffer->lock);
>> - vaddr = ion_buffer_kmap_get(buffer);
>> - if (IS_ERR(vaddr)) {
>> - ret = PTR_ERR(vaddr);
>> - goto unlock;
>> - }
>> - mutex_unlock(&buffer->lock);
>> - }
>> mutex_lock(&buffer->lock);
>> list_for_each_entry(a, &buffer->attachments, list) {
>> dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
>> direction);
>> }
>> -
>> -unlock:
>> mutex_unlock(&buffer->lock);
>> - return ret;
>> +
>> + return 0;
>> }
>> static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
>> @@ -350,12 +352,6 @@ static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
>> struct ion_buffer *buffer = dmabuf->priv;
>> struct ion_dma_buf_attachment *a;
>> - if (buffer->heap->ops->map_kernel) {
>> - mutex_lock(&buffer->lock);
>> - ion_buffer_kmap_put(buffer);
>> - mutex_unlock(&buffer->lock);
>> - }
>> -
>> mutex_lock(&buffer->lock);
>> list_for_each_entry(a, &buffer->attachments, list) {
>> dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
>>
>
>
> .
>