From: Dave Chinner <[email protected]>
Start to clean up the shrinker code by factoring out the calculation
that determines how much work to do. This separates the calculation
from clamping and other adjustments that are done before the
shrinker work is run. Document the scan batch size calculation
better while we are there.
Also convert the calculation for the amount of work to be done to
use 64 bit logic so we don't have to keep jumping through hoops to
keep calculations within 32 bits on 32 bit systems.
Signed-off-by: Dave Chinner <[email protected]>
---
mm/vmscan.c | 97 ++++++++++++++++++++++++++++++++++++++---------------
1 file changed, 70 insertions(+), 27 deletions(-)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a215d71d9d4b..2d39ec37c04d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -459,13 +459,68 @@ EXPORT_SYMBOL(unregister_shrinker);
#define SHRINK_BATCH 128
+/*
+ * Calculate the number of new objects to scan this time around. Return
+ * the work to be done. If there are freeable objects, return that number in
+ * @freeable_objects.
+ */
+static int64_t shrink_scan_count(struct shrink_control *shrinkctl,
+ struct shrinker *shrinker, int priority,
+ int64_t *freeable_objects)
+{
+ int64_t delta;
+ int64_t freeable;
+
+ freeable = shrinker->count_objects(shrinker, shrinkctl);
+ if (freeable == 0 || freeable == SHRINK_EMPTY)
+ return freeable;
+
+ if (shrinker->seeks) {
+ /*
+ * shrinker->seeks is a measure of how much IO is required to
+ * reinstantiate the object in memory. The default value is 2
+ * which is typical for a cold inode requiring a directory read
+ * and an inode read to re-instantiate.
+ *
+ * The scan batch size is defined by the shrinker priority, but
+ * to be able to bias the reclaim we increase the default batch
+ * size by 4. Hence we end up with a scan batch multipler that
+ * scales like so:
+ *
+ * ->seeks scan batch multiplier
+ * 1 4.00x
+ * 2 2.00x
+ * 3 1.33x
+ * 4 1.00x
+ * 8 0.50x
+ *
+ * IOWs, the more seeks it takes to pull the item into cache,
+ * the smaller the reclaim scan batch. Hence we put more reclaim
+ * pressure on caches that are fast to repopulate and to keep a
+ * rough balance between caches that have different costs.
+ */
+ delta = freeable >> (priority - 2);
+ do_div(delta, shrinker->seeks);
+ } else {
+ /*
+ * These objects don't require any IO to create. Trim them
+ * aggressively under memory pressure to keep them from causing
+ * refetches in the IO caches.
+ */
+ delta = freeable / 2;
+ }
+
+ *freeable_objects = freeable;
+ return delta > 0 ? delta : 0;
+}
+
static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
struct shrinker *shrinker, int priority)
{
unsigned long freed = 0;
- unsigned long long delta;
long total_scan;
- long freeable;
+ int64_t freeable_objects = 0;
+ int64_t scan_count;
long nr;
long new_nr;
int nid = shrinkctl->nid;
@@ -476,9 +531,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
nid = 0;
- freeable = shrinker->count_objects(shrinker, shrinkctl);
- if (freeable == 0 || freeable == SHRINK_EMPTY)
- return freeable;
+ scan_count = shrink_scan_count(shrinkctl, shrinker, priority,
+ &freeable_objects);
+ if (scan_count == 0 || scan_count == SHRINK_EMPTY)
+ return scan_count;
/*
* copy the current shrinker scan count into a local variable
@@ -487,25 +543,11 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
*/
nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
- total_scan = nr;
- if (shrinker->seeks) {
- delta = freeable >> priority;
- delta *= 4;
- do_div(delta, shrinker->seeks);
- } else {
- /*
- * These objects don't require any IO to create. Trim
- * them aggressively under memory pressure to keep
- * them from causing refetches in the IO caches.
- */
- delta = freeable / 2;
- }
-
- total_scan += delta;
+ total_scan = nr + scan_count;
if (total_scan < 0) {
pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
shrinker->scan_objects, total_scan);
- total_scan = freeable;
+ total_scan = scan_count;
next_deferred = nr;
} else
next_deferred = total_scan;
@@ -522,19 +564,20 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* Hence only allow the shrinker to scan the entire cache when
* a large delta change is calculated directly.
*/
- if (delta < freeable / 4)
- total_scan = min(total_scan, freeable / 2);
+ if (scan_count < freeable_objects / 4)
+ total_scan = min_t(long, total_scan, freeable_objects / 2);
/*
* Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of
* freeable entries.
*/
- if (total_scan > freeable * 2)
- total_scan = freeable * 2;
+ if (total_scan > freeable_objects * 2)
+ total_scan = freeable_objects * 2;
trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
- freeable, delta, total_scan, priority);
+ freeable_objects, scan_count,
+ total_scan, priority);
/*
* If the shrinker can't run (e.g. due to gfp_mask constraints), then
@@ -559,7 +602,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* possible.
*/
while (total_scan >= batch_size ||
- total_scan >= freeable) {
+ total_scan >= freeable_objects) {
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);
--
2.24.0.rc0
Hi Dave,
Thank you for the patch! Perhaps something to improve:
[auto build test WARNING on xfs-linux/for-next]
[also build test WARNING on v5.4-rc5 next-20191031]
[if your patch is applied to the wrong git tree, please drop us a note to help
improve the system. BTW, we also suggest to use '--base' option to specify the
base tree in git format-patch, please see https://stackoverflow.com/a/37406982]
url: https://github.com/0day-ci/linux/commits/Dave-Chinner/xfs-Lower-CIL-flush-limit-for-large-logs/20191102-153137
base: https://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git for-next
config: parisc-c3000_defconfig (attached as .config)
compiler: hppa-linux-gcc (GCC) 7.4.0
reproduce:
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# save the attached .config to linux build tree
GCC_VERSION=7.4.0 make.cross ARCH=parisc
If you fix the issue, kindly add following tag
Reported-by: kbuild test robot <[email protected]>
All warnings (new ones prefixed by >>):
In file included from ./arch/parisc/include/generated/asm/div64.h:1:0,
from include/linux/kernel.h:18,
from arch/parisc/include/asm/bug.h:5,
from include/linux/bug.h:5,
from include/linux/mmdebug.h:5,
from include/linux/mm.h:9,
from mm/vmscan.c:17:
mm/vmscan.c: In function 'shrink_scan_count':
include/asm-generic/div64.h:226:28: warning: comparison of distinct pointer types lacks a cast
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
^
>> mm/vmscan.c:502:3: note: in expansion of macro 'do_div'
do_div(delta, shrinker->seeks);
^~~~~~
vim +/do_div +502 mm/vmscan.c
460
461 /*
462 * Calculate the number of new objects to scan this time around. Return
463 * the work to be done. If there are freeable objects, return that number in
464 * @freeable_objects.
465 */
466 static int64_t shrink_scan_count(struct shrink_control *shrinkctl,
467 struct shrinker *shrinker, int priority,
468 int64_t *freeable_objects)
469 {
470 int64_t delta;
471 int64_t freeable;
472
473 freeable = shrinker->count_objects(shrinker, shrinkctl);
474 if (freeable == 0 || freeable == SHRINK_EMPTY)
475 return freeable;
476
477 if (shrinker->seeks) {
478 /*
479 * shrinker->seeks is a measure of how much IO is required to
480 * reinstantiate the object in memory. The default value is 2
481 * which is typical for a cold inode requiring a directory read
482 * and an inode read to re-instantiate.
483 *
484 * The scan batch size is defined by the shrinker priority, but
485 * to be able to bias the reclaim we increase the default batch
486 * size by 4. Hence we end up with a scan batch multipler that
487 * scales like so:
488 *
489 * ->seeks scan batch multiplier
490 * 1 4.00x
491 * 2 2.00x
492 * 3 1.33x
493 * 4 1.00x
494 * 8 0.50x
495 *
496 * IOWs, the more seeks it takes to pull the item into cache,
497 * the smaller the reclaim scan batch. Hence we put more reclaim
498 * pressure on caches that are fast to repopulate and to keep a
499 * rough balance between caches that have different costs.
500 */
501 delta = freeable >> (priority - 2);
> 502 do_div(delta, shrinker->seeks);
503 } else {
504 /*
505 * These objects don't require any IO to create. Trim them
506 * aggressively under memory pressure to keep them from causing
507 * refetches in the IO caches.
508 */
509 delta = freeable / 2;
510 }
511
512 *freeable_objects = freeable;
513 return delta > 0 ? delta : 0;
514 }
515
---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation
On Fri, Nov 01, 2019 at 10:46:01AM +1100, Dave Chinner wrote:
> From: Dave Chinner <[email protected]>
>
> Start to clean up the shrinker code by factoring out the calculation
> that determines how much work to do. This separates the calculation
> from clamping and other adjustments that are done before the
> shrinker work is run. Document the scan batch size calculation
> better while we are there.
>
> Also convert the calculation for the amount of work to be done to
> use 64 bit logic so we don't have to keep jumping through hoops to
> keep calculations within 32 bits on 32 bit systems.
>
> Signed-off-by: Dave Chinner <[email protected]>
> ---
I assume the kbuild warning thing will be fixed up...
> mm/vmscan.c | 97 ++++++++++++++++++++++++++++++++++++++---------------
> 1 file changed, 70 insertions(+), 27 deletions(-)
>
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index a215d71d9d4b..2d39ec37c04d 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -459,13 +459,68 @@ EXPORT_SYMBOL(unregister_shrinker);
>
> #define SHRINK_BATCH 128
>
> +/*
> + * Calculate the number of new objects to scan this time around. Return
> + * the work to be done. If there are freeable objects, return that number in
> + * @freeable_objects.
> + */
> +static int64_t shrink_scan_count(struct shrink_control *shrinkctl,
> + struct shrinker *shrinker, int priority,
> + int64_t *freeable_objects)
> +{
> + int64_t delta;
> + int64_t freeable;
> +
> + freeable = shrinker->count_objects(shrinker, shrinkctl);
> + if (freeable == 0 || freeable == SHRINK_EMPTY)
> + return freeable;
> +
> + if (shrinker->seeks) {
> + /*
> + * shrinker->seeks is a measure of how much IO is required to
> + * reinstantiate the object in memory. The default value is 2
> + * which is typical for a cold inode requiring a directory read
> + * and an inode read to re-instantiate.
> + *
> + * The scan batch size is defined by the shrinker priority, but
> + * to be able to bias the reclaim we increase the default batch
> + * size by 4. Hence we end up with a scan batch multipler that
> + * scales like so:
> + *
> + * ->seeks scan batch multiplier
> + * 1 4.00x
> + * 2 2.00x
> + * 3 1.33x
> + * 4 1.00x
> + * 8 0.50x
> + *
> + * IOWs, the more seeks it takes to pull the item into cache,
> + * the smaller the reclaim scan batch. Hence we put more reclaim
> + * pressure on caches that are fast to repopulate and to keep a
> + * rough balance between caches that have different costs.
> + */
> + delta = freeable >> (priority - 2);
Does anything prevent priority < 2 here?
> + do_div(delta, shrinker->seeks);
> + } else {
> + /*
> + * These objects don't require any IO to create. Trim them
> + * aggressively under memory pressure to keep them from causing
> + * refetches in the IO caches.
> + */
> + delta = freeable / 2;
> + }
> +
> + *freeable_objects = freeable;
> + return delta > 0 ? delta : 0;
> +}
> +
> static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> struct shrinker *shrinker, int priority)
> {
> unsigned long freed = 0;
> - unsigned long long delta;
> long total_scan;
> - long freeable;
> + int64_t freeable_objects = 0;
> + int64_t scan_count;
> long nr;
> long new_nr;
> int nid = shrinkctl->nid;
...
> @@ -487,25 +543,11 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> */
> nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
>
> - total_scan = nr;
> - if (shrinker->seeks) {
> - delta = freeable >> priority;
> - delta *= 4;
> - do_div(delta, shrinker->seeks);
> - } else {
> - /*
> - * These objects don't require any IO to create. Trim
> - * them aggressively under memory pressure to keep
> - * them from causing refetches in the IO caches.
> - */
> - delta = freeable / 2;
> - }
> -
> - total_scan += delta;
> + total_scan = nr + scan_count;
> if (total_scan < 0) {
> pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
> shrinker->scan_objects, total_scan);
> - total_scan = freeable;
> + total_scan = scan_count;
Same question as before: why the change in assignment? freeable was the
->count_objects() return value, which is now stored in freeable_objects.
FWIW, the change seems to make sense in that it just factors out the
deferred count, but it's not clear if it's intentional...
Brian
> next_deferred = nr;
> } else
> next_deferred = total_scan;
> @@ -522,19 +564,20 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> * Hence only allow the shrinker to scan the entire cache when
> * a large delta change is calculated directly.
> */
> - if (delta < freeable / 4)
> - total_scan = min(total_scan, freeable / 2);
> + if (scan_count < freeable_objects / 4)
> + total_scan = min_t(long, total_scan, freeable_objects / 2);
>
> /*
> * Avoid risking looping forever due to too large nr value:
> * never try to free more than twice the estimate number of
> * freeable entries.
> */
> - if (total_scan > freeable * 2)
> - total_scan = freeable * 2;
> + if (total_scan > freeable_objects * 2)
> + total_scan = freeable_objects * 2;
>
> trace_mm_shrink_slab_start(shrinker, shrinkctl, nr,
> - freeable, delta, total_scan, priority);
> + freeable_objects, scan_count,
> + total_scan, priority);
>
> /*
> * If the shrinker can't run (e.g. due to gfp_mask constraints), then
> @@ -559,7 +602,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
> * possible.
> */
> while (total_scan >= batch_size ||
> - total_scan >= freeable) {
> + total_scan >= freeable_objects) {
> unsigned long ret;
> unsigned long nr_to_scan = min(batch_size, total_scan);
>
> --
> 2.24.0.rc0
>
On Mon, Nov 04, 2019 at 10:29:39AM -0500, Brian Foster wrote:
> On Fri, Nov 01, 2019 at 10:46:01AM +1100, Dave Chinner wrote:
> > From: Dave Chinner <[email protected]>
> >
> > Start to clean up the shrinker code by factoring out the calculation
> > that determines how much work to do. This separates the calculation
> > from clamping and other adjustments that are done before the
> > shrinker work is run. Document the scan batch size calculation
> > better while we are there.
> >
> > Also convert the calculation for the amount of work to be done to
> > use 64 bit logic so we don't have to keep jumping through hoops to
> > keep calculations within 32 bits on 32 bit systems.
> >
> > Signed-off-by: Dave Chinner <[email protected]>
> > ---
>
> I assume the kbuild warning thing will be fixed up...
>
> > mm/vmscan.c | 97 ++++++++++++++++++++++++++++++++++++++---------------
> > 1 file changed, 70 insertions(+), 27 deletions(-)
> >
> > diff --git a/mm/vmscan.c b/mm/vmscan.c
> > index a215d71d9d4b..2d39ec37c04d 100644
> > --- a/mm/vmscan.c
> > +++ b/mm/vmscan.c
> > @@ -459,13 +459,68 @@ EXPORT_SYMBOL(unregister_shrinker);
> >
> > #define SHRINK_BATCH 128
> >
> > +/*
> > + * Calculate the number of new objects to scan this time around. Return
> > + * the work to be done. If there are freeable objects, return that number in
> > + * @freeable_objects.
> > + */
> > +static int64_t shrink_scan_count(struct shrink_control *shrinkctl,
> > + struct shrinker *shrinker, int priority,
> > + int64_t *freeable_objects)
> > +{
> > + int64_t delta;
> > + int64_t freeable;
> > +
> > + freeable = shrinker->count_objects(shrinker, shrinkctl);
> > + if (freeable == 0 || freeable == SHRINK_EMPTY)
> > + return freeable;
> > +
> > + if (shrinker->seeks) {
> > + /*
> > + * shrinker->seeks is a measure of how much IO is required to
> > + * reinstantiate the object in memory. The default value is 2
> > + * which is typical for a cold inode requiring a directory read
> > + * and an inode read to re-instantiate.
> > + *
> > + * The scan batch size is defined by the shrinker priority, but
> > + * to be able to bias the reclaim we increase the default batch
> > + * size by 4. Hence we end up with a scan batch multipler that
> > + * scales like so:
> > + *
> > + * ->seeks scan batch multiplier
> > + * 1 4.00x
> > + * 2 2.00x
> > + * 3 1.33x
> > + * 4 1.00x
> > + * 8 0.50x
> > + *
> > + * IOWs, the more seeks it takes to pull the item into cache,
> > + * the smaller the reclaim scan batch. Hence we put more reclaim
> > + * pressure on caches that are fast to repopulate and to keep a
> > + * rough balance between caches that have different costs.
> > + */
> > + delta = freeable >> (priority - 2);
>
> Does anything prevent priority < 2 here?
Nope. I regularly see priority 1 here when the OOM killer is about
to strike. Doesn't appear to have caused any problems - the scan
counts have all come out correct (i.e. ends up as a >> 0) according
to the tracing, but I'll fix this up to avoid hitting this.
>
> > - delta = freeable >> priority;
> > - delta *= 4;
> > - do_div(delta, shrinker->seeks);
> > - } else {
> > - /*
> > - * These objects don't require any IO to create. Trim
> > - * them aggressively under memory pressure to keep
> > - * them from causing refetches in the IO caches.
> > - */
> > - delta = freeable / 2;
> > - }
> > -
> > - total_scan += delta;
> > + total_scan = nr + scan_count;
> > if (total_scan < 0) {
> > pr_err("shrink_slab: %pS negative objects to delete nr=%ld\n",
> > shrinker->scan_objects, total_scan);
> > - total_scan = freeable;
> > + total_scan = scan_count;
>
> Same question as before: why the change in assignment? freeable was the
> ->count_objects() return value, which is now stored in freeable_objects.
we don't want to try to free the entire cache on an 64-bit integer
overflow. scan_count is the work we calculated we need to do this
shrinker invocation, so if we overflow because of other factors then
we should just do the work we need to do in this scan.
> FWIW, the change seems to make sense in that it just factors out the
> deferred count, but it's not clear if it's intentional...
It was intentional.
-Dave.
--
Dave Chinner
[email protected]