2021-07-12 12:19:27

by Yunsheng Lin

[permalink] [raw]
Subject: [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias

As suggested by Alexander, "A DMA mapping should be page
aligned anyway so the lower 12 bits would be reserved 0",
so it might make more sense to repurpose the lower 12 bits
of the dma address to store the pagecnt_bias for frag page
support in page pool.

As newly added page_pool_get_pagecnt_bias() may be called
outside of the softirq context, so annotate the access to
page->dma_addr[0] with READ_ONCE() and WRITE_ONCE().

And page_pool_get_pagecnt_bias_ptr() is added to implement
the pagecnt_bias atomic updating when a page is passsed to
the user.

Other three interfaces using page->dma_addr[0] is only called
in the softirq context during normal rx processing, hopefully
the barrier in the rx processing will ensure the correct order
between getting and setting pagecnt_bias.

Signed-off-by: Yunsheng Lin <[email protected]>
---
include/net/page_pool.h | 29 +++++++++++++++++++++++++++--
net/core/page_pool.c | 8 +++++++-
2 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 8d7744d..84cd972 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -200,17 +200,42 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,

static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
{
- dma_addr_t ret = page->dma_addr[0];
+ dma_addr_t ret = READ_ONCE(page->dma_addr[0]) & PAGE_MASK;
if (sizeof(dma_addr_t) > sizeof(unsigned long))
ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
return ret;
}

-static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
+static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
{
+ if (WARN_ON(addr & ~PAGE_MASK))
+ return false;
+
page->dma_addr[0] = addr;
if (sizeof(dma_addr_t) > sizeof(unsigned long))
page->dma_addr[1] = upper_32_bits(addr);
+
+ return true;
+}
+
+static inline int page_pool_get_pagecnt_bias(struct page *page)
+{
+ return READ_ONCE(page->dma_addr[0]) & ~PAGE_MASK;
+}
+
+static inline unsigned long *page_pool_pagecnt_bias_ptr(struct page *page)
+{
+ return page->dma_addr;
+}
+
+static inline void page_pool_set_pagecnt_bias(struct page *page, int bias)
+{
+ unsigned long dma_addr_0 = READ_ONCE(page->dma_addr[0]);
+
+ dma_addr_0 &= PAGE_MASK;
+ dma_addr_0 |= bias;
+
+ WRITE_ONCE(page->dma_addr[0], dma_addr_0);
}

static inline bool is_page_pool_compiled_in(void)
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 78838c6..1abefc6 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -198,7 +198,13 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
if (dma_mapping_error(pool->p.dev, dma))
return false;

- page_pool_set_dma_addr(page, dma);
+ if (unlikely(!page_pool_set_dma_addr(page, dma))) {
+ dma_unmap_page_attrs(pool->p.dev, dma,
+ PAGE_SIZE << pool->p.order,
+ pool->p.dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ return false;
+ }

if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
--
2.7.4


2021-07-12 16:04:33

by Alexander Duyck

[permalink] [raw]
Subject: Re: [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias

On Mon, Jul 12, 2021 at 5:17 AM Yunsheng Lin <[email protected]> wrote:
>
> As suggested by Alexander, "A DMA mapping should be page
> aligned anyway so the lower 12 bits would be reserved 0",
> so it might make more sense to repurpose the lower 12 bits
> of the dma address to store the pagecnt_bias for frag page
> support in page pool.
>
> As newly added page_pool_get_pagecnt_bias() may be called
> outside of the softirq context, so annotate the access to
> page->dma_addr[0] with READ_ONCE() and WRITE_ONCE().
>
> And page_pool_get_pagecnt_bias_ptr() is added to implement
> the pagecnt_bias atomic updating when a page is passsed to
> the user.
>
> Other three interfaces using page->dma_addr[0] is only called
> in the softirq context during normal rx processing, hopefully
> the barrier in the rx processing will ensure the correct order
> between getting and setting pagecnt_bias.
>
> Signed-off-by: Yunsheng Lin <[email protected]>
> ---
> include/net/page_pool.h | 29 +++++++++++++++++++++++++++--
> net/core/page_pool.c | 8 +++++++-
> 2 files changed, 34 insertions(+), 3 deletions(-)
>
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 8d7744d..84cd972 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -200,17 +200,42 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
>
> static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
> {
> - dma_addr_t ret = page->dma_addr[0];
> + dma_addr_t ret = READ_ONCE(page->dma_addr[0]) & PAGE_MASK;
> if (sizeof(dma_addr_t) > sizeof(unsigned long))
> ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
> return ret;
> }
>
> -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> {
> + if (WARN_ON(addr & ~PAGE_MASK))
> + return false;
> +
> page->dma_addr[0] = addr;
> if (sizeof(dma_addr_t) > sizeof(unsigned long))
> page->dma_addr[1] = upper_32_bits(addr);
> +
> + return true;
> +}
> +

Rather than making this a part of the check here it might make more
sense to pull this out and perform the WARN_ON after the check for
dma_mapping_error.

Also it occurs to me that we only really have to do this in the case
where dma_addr_t is larger than the size of a long. Otherwise we could
just have the code split things so that dma_addr[0] is the dma_addr
and dma_addr[1] is our pagecnt_bias value in which case we could
probably just skip the check.

> +static inline int page_pool_get_pagecnt_bias(struct page *page)
> +{
> + return READ_ONCE(page->dma_addr[0]) & ~PAGE_MASK;
> +}
> +
> +static inline unsigned long *page_pool_pagecnt_bias_ptr(struct page *page)
> +{
> + return page->dma_addr;
> +}
> +
> +static inline void page_pool_set_pagecnt_bias(struct page *page, int bias)
> +{
> + unsigned long dma_addr_0 = READ_ONCE(page->dma_addr[0]);
> +
> + dma_addr_0 &= PAGE_MASK;
> + dma_addr_0 |= bias;
> +
> + WRITE_ONCE(page->dma_addr[0], dma_addr_0);
> }
>
> static inline bool is_page_pool_compiled_in(void)
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 78838c6..1abefc6 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -198,7 +198,13 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> if (dma_mapping_error(pool->p.dev, dma))
> return false;
>

So instead of adding to the function below you could just add your
WARN_ON check here with the unmapping call.

> - page_pool_set_dma_addr(page, dma);
> + if (unlikely(!page_pool_set_dma_addr(page, dma))) {
> + dma_unmap_page_attrs(pool->p.dev, dma,
> + PAGE_SIZE << pool->p.order,
> + pool->p.dma_dir,
> + DMA_ATTR_SKIP_CPU_SYNC);
> + return false;
> + }
>
> if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
> --
> 2.7.4
>

2021-07-12 17:22:53

by Jesper Dangaard Brouer

[permalink] [raw]
Subject: Re: [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias



On 12/07/2021 18.02, Alexander Duyck wrote:
> On Mon, Jul 12, 2021 at 5:17 AM Yunsheng Lin <[email protected]> wrote:
>>
>> As suggested by Alexander, "A DMA mapping should be page
>> aligned anyway so the lower 12 bits would be reserved 0",
>> so it might make more sense to repurpose the lower 12 bits
>> of the dma address to store the pagecnt_bias for frag page
>> support in page pool.
>>
>> As newly added page_pool_get_pagecnt_bias() may be called
>> outside of the softirq context, so annotate the access to
>> page->dma_addr[0] with READ_ONCE() and WRITE_ONCE().
>>
>> And page_pool_get_pagecnt_bias_ptr() is added to implement
>> the pagecnt_bias atomic updating when a page is passsed to
>> the user.
>>
>> Other three interfaces using page->dma_addr[0] is only called
>> in the softirq context during normal rx processing, hopefully
>> the barrier in the rx processing will ensure the correct order
>> between getting and setting pagecnt_bias.
>>
>> Signed-off-by: Yunsheng Lin <[email protected]>
>> ---
>> include/net/page_pool.h | 29 +++++++++++++++++++++++++++--
>> net/core/page_pool.c | 8 +++++++-
>> 2 files changed, 34 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
>> index 8d7744d..84cd972 100644
>> --- a/include/net/page_pool.h
>> +++ b/include/net/page_pool.h
>> @@ -200,17 +200,42 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
>>
>> static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
>> {
>> - dma_addr_t ret = page->dma_addr[0];
>> + dma_addr_t ret = READ_ONCE(page->dma_addr[0]) & PAGE_MASK;
>> if (sizeof(dma_addr_t) > sizeof(unsigned long))
>> ret |= (dma_addr_t)page->dma_addr[1] << 16 << 16;
>> return ret;
>> }
>>
>> -static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
>> +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
>> {
>> + if (WARN_ON(addr & ~PAGE_MASK))
>> + return false;
>> +
>> page->dma_addr[0] = addr;
>> if (sizeof(dma_addr_t) > sizeof(unsigned long))
>> page->dma_addr[1] = upper_32_bits(addr);
>> +
>> + return true;
>> +}
>> +
>
> Rather than making this a part of the check here it might make more
> sense to pull this out and perform the WARN_ON after the check for
> dma_mapping_error.

I need to point out that I don't like WARN_ON and BUG_ON code in
fast-path code, because compiler adds 'ud2' assembler instructions that
influences the instruction-cache fetching in the CPU. Yes, I have seen
a measuresable impact from this before.


> Also it occurs to me that we only really have to do this in the case
> where dma_addr_t is larger than the size of a long. Otherwise we could
> just have the code split things so that dma_addr[0] is the dma_addr
> and dma_addr[1] is our pagecnt_bias value in which case we could
> probably just skip the check.

The dance to get 64-bit DMA addr on 32-bit systems is rather ugly and
confusing, sadly. We could take advantage of this, I just hope this
will not make it uglier.


>> +static inline int page_pool_get_pagecnt_bias(struct page *page)
>> +{
>> + return READ_ONCE(page->dma_addr[0]) & ~PAGE_MASK;
>> +}
>> +
>> +static inline unsigned long *page_pool_pagecnt_bias_ptr(struct page *page)
>> +{
>> + return page->dma_addr;
>> +}
>> +
>> +static inline void page_pool_set_pagecnt_bias(struct page *page, int bias)
>> +{
>> + unsigned long dma_addr_0 = READ_ONCE(page->dma_addr[0]);
>> +
>> + dma_addr_0 &= PAGE_MASK;
>> + dma_addr_0 |= bias;
>> +
>> + WRITE_ONCE(page->dma_addr[0], dma_addr_0);
>> }
>>
>> static inline bool is_page_pool_compiled_in(void)
>> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
>> index 78838c6..1abefc6 100644
>> --- a/net/core/page_pool.c
>> +++ b/net/core/page_pool.c
>> @@ -198,7 +198,13 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
>> if (dma_mapping_error(pool->p.dev, dma))
>> return false;
>>
>
> So instead of adding to the function below you could just add your
> WARN_ON check here with the unmapping call.
>
>> - page_pool_set_dma_addr(page, dma);
>> + if (unlikely(!page_pool_set_dma_addr(page, dma))) {
>> + dma_unmap_page_attrs(pool->p.dev, dma,
>> + PAGE_SIZE << pool->p.order,
>> + pool->p.dma_dir,
>> + DMA_ATTR_SKIP_CPU_SYNC);
>> + return false;
>> + }
>>
>> if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
>> page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
>> --
>> 2.7.4
>>
>

2021-07-12 18:09:17

by Ilias Apalodimas

[permalink] [raw]
Subject: Re: [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias

[...]
> > > +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> > > {
> > > + if (WARN_ON(addr & ~PAGE_MASK))
> > > + return false;
> > > +
> > > page->dma_addr[0] = addr;
> > > if (sizeof(dma_addr_t) > sizeof(unsigned long))
> > > page->dma_addr[1] = upper_32_bits(addr);
> > > +
> > > + return true;
> > > +}
> > > +
> >
> > Rather than making this a part of the check here it might make more
> > sense to pull this out and perform the WARN_ON after the check for
> > dma_mapping_error.
>
> I need to point out that I don't like WARN_ON and BUG_ON code in fast-path
> code, because compiler adds 'ud2' assembler instructions that influences the
> instruction-cache fetching in the CPU. Yes, I have seen a measuresable
> impact from this before.
>
>
> > Also it occurs to me that we only really have to do this in the case
> > where dma_addr_t is larger than the size of a long. Otherwise we could
> > just have the code split things so that dma_addr[0] is the dma_addr
> > and dma_addr[1] is our pagecnt_bias value in which case we could
> > probably just skip the check.
>
> The dance to get 64-bit DMA addr on 32-bit systems is rather ugly and
> confusing, sadly. We could take advantage of this, I just hope this will
> not make it uglier.

Note here that we can only use this because dma_addr is not aliased to
compound page anymore (after the initial page_pool recycling patchset).
We must keep this in mind if we even restructure struct page.

Can we do something more radical for this? The 64/32 bit dance is only
there for 32 bit systems with 64 bit dma. Since the last time we asked
about this no one seemed to care about these, and I really doubt we'll get
an ethernet driver for them (that needs recycling....), can we *only* support
frag allocation and recycling for 'normal' systems? We could always just r
e-purpose dma_addr[1] for those.

Regards
/Ilias

>
>
> > > +static inline int page_pool_get_pagecnt_bias(struct page *page)
> > > +{
> > > + return READ_ONCE(page->dma_addr[0]) & ~PAGE_MASK;
> > > +}
> > > +
> > > +static inline unsigned long *page_pool_pagecnt_bias_ptr(struct page *page)
> > > +{
> > > + return page->dma_addr;
> > > +}
> > > +
> > > +static inline void page_pool_set_pagecnt_bias(struct page *page, int bias)
> > > +{
> > > + unsigned long dma_addr_0 = READ_ONCE(page->dma_addr[0]);
> > > +
> > > + dma_addr_0 &= PAGE_MASK;
> > > + dma_addr_0 |= bias;
> > > +
> > > + WRITE_ONCE(page->dma_addr[0], dma_addr_0);
> > > }
> > >
> > > static inline bool is_page_pool_compiled_in(void)
> > > diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> > > index 78838c6..1abefc6 100644
> > > --- a/net/core/page_pool.c
> > > +++ b/net/core/page_pool.c
> > > @@ -198,7 +198,13 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> > > if (dma_mapping_error(pool->p.dev, dma))
> > > return false;
> > >
> >
> > So instead of adding to the function below you could just add your
> > WARN_ON check here with the unmapping call.
> >
> > > - page_pool_set_dma_addr(page, dma);
> > > + if (unlikely(!page_pool_set_dma_addr(page, dma))) {
> > > + dma_unmap_page_attrs(pool->p.dev, dma,
> > > + PAGE_SIZE << pool->p.order,
> > > + pool->p.dma_dir,
> > > + DMA_ATTR_SKIP_CPU_SYNC);
> > > + return false;
> > > + }
> > >
> > > if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> > > page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
> > > --
> > > 2.7.4
> > >
> >
>

2021-07-13 06:40:11

by Yunsheng Lin

[permalink] [raw]
Subject: Re: [PATCH rfc v3 2/4] page_pool: add interface for getting and setting pagecnt_bias

On 2021/7/13 2:08, Ilias Apalodimas wrote:
> [...]
>>>> +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
>>>> {
>>>> + if (WARN_ON(addr & ~PAGE_MASK))
>>>> + return false;
>>>> +
>>>> page->dma_addr[0] = addr;
>>>> if (sizeof(dma_addr_t) > sizeof(unsigned long))
>>>> page->dma_addr[1] = upper_32_bits(addr);
>>>> +
>>>> + return true;
>>>> +}
>>>> +
>>>
>>> Rather than making this a part of the check here it might make more
>>> sense to pull this out and perform the WARN_ON after the check for
>>> dma_mapping_error.
>>
>> I need to point out that I don't like WARN_ON and BUG_ON code in fast-path
>> code, because compiler adds 'ud2' assembler instructions that influences the
>> instruction-cache fetching in the CPU. Yes, I have seen a measuresable
>> impact from this before.
>>
>>
>>> Also it occurs to me that we only really have to do this in the case
>>> where dma_addr_t is larger than the size of a long. Otherwise we could
>>> just have the code split things so that dma_addr[0] is the dma_addr
>>> and dma_addr[1] is our pagecnt_bias value in which case we could
>>> probably just skip the check.
>>
>> The dance to get 64-bit DMA addr on 32-bit systems is rather ugly and
>> confusing, sadly. We could take advantage of this, I just hope this will
>> not make it uglier.
>
> Note here that we can only use this because dma_addr is not aliased to
> compound page anymore (after the initial page_pool recycling patchset).
> We must keep this in mind if we even restructure struct page.
>
> Can we do something more radical for this? The 64/32 bit dance is only
> there for 32 bit systems with 64 bit dma. Since the last time we asked
> about this no one seemed to care about these, and I really doubt we'll get
> an ethernet driver for them (that needs recycling....), can we *only* support
> frag allocation and recycling for 'normal' systems? We could always just r
> e-purpose dma_addr[1] for those.

Will define a macro for "sizeof(dma_addr_t) > sizeof(unsigned long)" to
decide whether to use the dma_addr[1], hopefully the compiler will optimize
out the unused code in a specific system.

>
> Regards
> /Ilias
>
>>
>>
>>>> +static inline int page_pool_get_pagecnt_bias(struct page *page)
>>>> +{
>>>> + return READ_ONCE(page->dma_addr[0]) & ~PAGE_MASK;
>>>> +}
>>>> +
>>>> +static inline unsigned long *page_pool_pagecnt_bias_ptr(struct page *page)
>>>> +{
>>>> + return page->dma_addr;
>>>> +}
>>>> +
>>>> +static inline void page_pool_set_pagecnt_bias(struct page *page, int bias)
>>>> +{
>>>> + unsigned long dma_addr_0 = READ_ONCE(page->dma_addr[0]);
>>>> +
>>>> + dma_addr_0 &= PAGE_MASK;
>>>> + dma_addr_0 |= bias;
>>>> +
>>>> + WRITE_ONCE(page->dma_addr[0], dma_addr_0);
>>>> }
>>>>
>>>> static inline bool is_page_pool_compiled_in(void)
>>>> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
>>>> index 78838c6..1abefc6 100644
>>>> --- a/net/core/page_pool.c
>>>> +++ b/net/core/page_pool.c
>>>> @@ -198,7 +198,13 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
>>>> if (dma_mapping_error(pool->p.dev, dma))
>>>> return false;
>>>>
>>>
>>> So instead of adding to the function below you could just add your
>>> WARN_ON check here with the unmapping call.

Ok.

>>>
>>>> - page_pool_set_dma_addr(page, dma);
>>>> + if (unlikely(!page_pool_set_dma_addr(page, dma))) {
>>>> + dma_unmap_page_attrs(pool->p.dev, dma,
>>>> + PAGE_SIZE << pool->p.order,
>>>> + pool->p.dma_dir,
>>>> + DMA_ATTR_SKIP_CPU_SYNC);
>>>> + return false;
>>>> + }
>>>>
>>>> if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
>>>> page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
>>>> --
>>>> 2.7.4
>>>>
>>>
>>
> .
>