Currently, in kfree_rcu_shrink_scan(), the drain_page_cache() is
executed before kfree_rcu_monitor() to drain page cache, if the bnode
structure's->gp_snap has done, the kvfree_rcu_bulk() will fill the
page cache again in kfree_rcu_monitor(), this commit add a check
for krcp structure's->backoff_page_cache_fill in put_cached_bnode(),
if the krcp structure's->backoff_page_cache_fill is set, prevent page
cache growing and start fill page from krcp->nr_bkv_objs, if the
krcp->nr_bkv_objs is greater or equal to nr_pages, not alloc page,
avoid unnecessary __get_free_page() calls.
Signed-off-by: Zqiang <[email protected]>
---
kernel/rcu/tree.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 41daae3239b5..d5639025bd1a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -2908,6 +2908,8 @@ static inline bool
put_cached_bnode(struct kfree_rcu_cpu *krcp,
struct kvfree_rcu_bulk_data *bnode)
{
+ if (atomic_read(&krcp->backoff_page_cache_fill))
+ return false;
// Check the limit.
if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
return false;
@@ -3223,7 +3225,7 @@ static void fill_page_cache_func(struct work_struct *work)
nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
1 : rcu_min_cached_objs;
- for (i = 0; i < nr_pages; i++) {
+ for (i = krcp->nr_bkv_objs; i < nr_pages; i++) {
bnode = (struct kvfree_rcu_bulk_data *)
__get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
--
2.32.0
On Tue, Apr 11, 2023 at 10:27:33PM +0800, Zqiang wrote:
> Currently, in kfree_rcu_shrink_scan(), the drain_page_cache() is
> executed before kfree_rcu_monitor() to drain page cache, if the bnode
> structure's->gp_snap has done, the kvfree_rcu_bulk() will fill the
> page cache again in kfree_rcu_monitor(), this commit add a check
> for krcp structure's->backoff_page_cache_fill in put_cached_bnode(),
> if the krcp structure's->backoff_page_cache_fill is set, prevent page
> cache growing and start fill page from krcp->nr_bkv_objs, if the
> krcp->nr_bkv_objs is greater or equal to nr_pages, not alloc page,
> avoid unnecessary __get_free_page() calls.
>
> Signed-off-by: Zqiang <[email protected]>
> ---
> kernel/rcu/tree.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 41daae3239b5..d5639025bd1a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2908,6 +2908,8 @@ static inline bool
> put_cached_bnode(struct kfree_rcu_cpu *krcp,
> struct kvfree_rcu_bulk_data *bnode)
> {
> + if (atomic_read(&krcp->backoff_page_cache_fill))
> + return false;
This one is still missing the logic that allows that cache to hold
one page during low memory? Or is that being handled somewhere else?
Thanx, Paul
> // Check the limit.
> if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
> return false;
> @@ -3223,7 +3225,7 @@ static void fill_page_cache_func(struct work_struct *work)
> nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
> 1 : rcu_min_cached_objs;
>
> - for (i = 0; i < nr_pages; i++) {
> + for (i = krcp->nr_bkv_objs; i < nr_pages; i++) {
> bnode = (struct kvfree_rcu_bulk_data *)
> __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
>
> --
> 2.32.0
>
> Currently, in kfree_rcu_shrink_scan(), the drain_page_cache() is
> executed before kfree_rcu_monitor() to drain page cache, if the bnode
> structure's->gp_snap has done, the kvfree_rcu_bulk() will fill the
> page cache again in kfree_rcu_monitor(), this commit add a check
> for krcp structure's->backoff_page_cache_fill in put_cached_bnode(),
> if the krcp structure's->backoff_page_cache_fill is set, prevent page
> cache growing and start fill page from krcp->nr_bkv_objs, if the
> krcp->nr_bkv_objs is greater or equal to nr_pages, not alloc page,
> avoid unnecessary __get_free_page() calls.
>
> Signed-off-by: Zqiang <[email protected]>
> ---
> kernel/rcu/tree.c | 4 +++-
> 1 file changed, 3 insertions(+), 1 deletion(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 41daae3239b5..d5639025bd1a 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -2908,6 +2908,8 @@ static inline bool
> put_cached_bnode(struct kfree_rcu_cpu *krcp,
> struct kvfree_rcu_bulk_data *bnode)
> {
> + if (atomic_read(&krcp->backoff_page_cache_fill))
> + return false;
>
>This one is still missing the logic that allows that cache to hold
>one page during low memory? Or is that being handled somewhere else?
>
Sorry, please ignore this patch, I will resend as suggested by Ulad,
thanks for review????.
>
> Thanx, Paul
>
> // Check the limit.
> if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
> return false;
> @@ -3223,7 +3225,7 @@ static void fill_page_cache_func(struct work_struct *work)
> nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
> 1 : rcu_min_cached_objs;
>
> - for (i = 0; i < nr_pages; i++) {
> + for (i = krcp->nr_bkv_objs; i < nr_pages; i++) {
> bnode = (struct kvfree_rcu_bulk_data *)
> __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
>
> --
> 2.32.0
>