Hi all,
Today's linux-next merge of the net-next tree got a conflict in:
net/core/page_pool.c
between commit:
4321de4497b2 ("page_pool: check for DMA sync shortcut earlier")
from the dma-mapping tree and commit:
ef9226cd56b7 ("page_pool: constify some read-only function arguments")
from the net-next tree.
I fixed it up (see below) and can carry the fix as necessary. This
is now fixed as far as linux-next is concerned, but any non trivial
conflicts should be mentioned to your upstream maintainer when your tree
is submitted for merging. You may also want to consider cooperating
with the maintainer of the conflicting tree to minimise any particularly
complex conflicts.
--
Cheers,
Stephen Rothwell
diff --cc net/core/page_pool.c
index 4f9d1bd7f4d1,8bcc7014a61a..000000000000
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@@ -398,26 -384,16 +399,26 @@@ static struct page *__page_pool_get_cac
return page;
}
-static void page_pool_dma_sync_for_device(const struct page_pool *pool,
- const struct page *page,
- unsigned int dma_sync_size)
+static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
- struct page *page,
++ const struct page *page,
+ u32 dma_sync_size)
{
+#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
dma_addr_t dma_addr = page_pool_get_dma_addr(page);
dma_sync_size = min(dma_sync_size, pool->p.max_len);
- dma_sync_single_range_for_device(pool->p.dev, dma_addr,
- pool->p.offset, dma_sync_size,
- pool->p.dma_dir);
+ __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
+ dma_sync_size, pool->p.dma_dir);
+#endif
+}
+
+static __always_inline void
+page_pool_dma_sync_for_device(const struct page_pool *pool,
- struct page *page,
++ const struct page *page,
+ u32 dma_sync_size)
+{
+ if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
+ __page_pool_dma_sync_for_device(pool, page, dma_sync_size);
}
static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
@@@ -708,10 -688,11 +710,9 @@@ __page_pool_put_page(struct page_pool *
if (likely(__page_pool_page_can_be_recycled(page))) {
/* Read barrier done in page_ref_count / READ_ONCE */
- if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
- page_pool_dma_sync_for_device(pool, page,
- dma_sync_size);
+ page_pool_dma_sync_for_device(pool, page, dma_sync_size);
- if (allow_direct && in_softirq() &&
- page_pool_recycle_in_cache(page, pool))
+ if (allow_direct && page_pool_recycle_in_cache(page, pool))
return NULL;
/* Page found as candidate for recycling */
Hi all,
On Thu, 9 May 2024 11:53:07 +1000 Stephen Rothwell <[email protected]> wrote:
>
> Today's linux-next merge of the net-next tree got a conflict in:
>
> net/core/page_pool.c
>
> between commit:
>
> 4321de4497b2 ("page_pool: check for DMA sync shortcut earlier")
>
> from the dma-mapping tree and commit:
>
> ef9226cd56b7 ("page_pool: constify some read-only function arguments")
>
> from the net-next tree.
>
> I fixed it up (see below) and can carry the fix as necessary. This
> is now fixed as far as linux-next is concerned, but any non trivial
> conflicts should be mentioned to your upstream maintainer when your tree
> is submitted for merging. You may also want to consider cooperating
> with the maintainer of the conflicting tree to minimise any particularly
> complex conflicts.
>
>
> diff --cc net/core/page_pool.c
> index 4f9d1bd7f4d1,8bcc7014a61a..000000000000
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@@ -398,26 -384,16 +399,26 @@@ static struct page *__page_pool_get_cac
> return page;
> }
>
> -static void page_pool_dma_sync_for_device(const struct page_pool *pool,
> - const struct page *page,
> - unsigned int dma_sync_size)
> +static void __page_pool_dma_sync_for_device(const struct page_pool *pool,
> - struct page *page,
> ++ const struct page *page,
> + u32 dma_sync_size)
> {
> +#if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC)
> dma_addr_t dma_addr = page_pool_get_dma_addr(page);
>
> dma_sync_size = min(dma_sync_size, pool->p.max_len);
> - dma_sync_single_range_for_device(pool->p.dev, dma_addr,
> - pool->p.offset, dma_sync_size,
> - pool->p.dma_dir);
> + __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset,
> + dma_sync_size, pool->p.dma_dir);
> +#endif
> +}
> +
> +static __always_inline void
> +page_pool_dma_sync_for_device(const struct page_pool *pool,
> - struct page *page,
> ++ const struct page *page,
> + u32 dma_sync_size)
> +{
> + if (pool->dma_sync && dma_dev_need_sync(pool->p.dev))
> + __page_pool_dma_sync_for_device(pool, page, dma_sync_size);
> }
>
> static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
> @@@ -708,10 -688,11 +710,9 @@@ __page_pool_put_page(struct page_pool *
> if (likely(__page_pool_page_can_be_recycled(page))) {
> /* Read barrier done in page_ref_count / READ_ONCE */
>
> - if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
> - page_pool_dma_sync_for_device(pool, page,
> - dma_sync_size);
> + page_pool_dma_sync_for_device(pool, page, dma_sync_size);
>
> - if (allow_direct && in_softirq() &&
> - page_pool_recycle_in_cache(page, pool))
> + if (allow_direct && page_pool_recycle_in_cache(page, pool))
> return NULL;
>
> /* Page found as candidate for recycling */
This is now a conflict between the dma-mapping tree and Linus' tree.
--
Cheers,
Stephen Rothwell