When we're using LZ4 multi compression streams for zram swap,
we found out page allocation failure message in system running test.
That was not only once, but a few(2 - 5 times per test).
Also, some failure cases were continually occurring to try allocation
order 3.
In order to make parallel compression private data, we should call
kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
2/3 size memory to allocate in that time, page allocation fails.
This patch makes to use vmalloc() as fallback of kmalloc(), this
prevents page alloc failure warning.
After using this, we never found warning message in running test, also
It could reduce process startup latency about 60-120ms in each case.
For reference a call trace :
Binder_1: page allocation failure: order:3, mode:0x10c0d0
CPU: 0 PID: 424 Comm: Binder_1 Tainted: GW 3.10.49-perf-g991d02b-dirty #20
Call trace:
[<ffffffc0002069c8>] dump_backtrace+0x0/0x270
[<ffffffc000206c48>] show_stack+0x10/0x1c
[<ffffffc000cb51c8>] dump_stack+0x1c/0x28
[<ffffffc0002bbfc8>] warn_alloc_failed+0xfc/0x11c
[<ffffffc0002bf518>] __alloc_pages_nodemask+0x724/0x7f0
[<ffffffc0002bf5f8>] __get_free_pages+0x14/0x5c
[<ffffffc0002ed6a4>] kmalloc_order_trace+0x38/0xd8
[<ffffffc0005d9738>] zcomp_lz4_create+0x2c/0x38
[<ffffffc0005d78f0>] zcomp_strm_alloc+0x34/0x78
[<ffffffc0005d7a58>] zcomp_strm_multi_find+0x124/0x1ec
[<ffffffc0005d7c14>] zcomp_strm_find+0xc/0x18
[<ffffffc0005d8fa0>] zram_bvec_rw+0x2fc/0x780
[<ffffffc0005d9680>] zram_make_request+0x25c/0x2d4
[<ffffffc00040f8ac>] generic_make_request+0x80/0xbc
[<ffffffc00040f98c>] submit_bio+0xa4/0x15c
[<ffffffc0002e8bb0>] __swap_writepage+0x218/0x230
[<ffffffc0002e8c04>] swap_writepage+0x3c/0x4c
[<ffffffc0002c7384>] shrink_page_list+0x51c/0x8d0
[<ffffffc0002c7e88>] shrink_inactive_list+0x3f8/0x60c
[<ffffffc0002c86c8>] shrink_lruvec+0x33c/0x4cc
[<ffffffc0002c8894>] shrink_zone+0x3c/0x100
[<ffffffc0002c8c10>] try_to_free_pages+0x2b8/0x54c
[<ffffffc0002bf308>] __alloc_pages_nodemask+0x514/0x7f0
[<ffffffc0002bf5f8>] __get_free_pages+0x14/0x5c
[<ffffffc0003446cc>] proc_info_read+0x50/0xe4
[<ffffffc0002f5204>] vfs_read+0xa0/0x12c
[<ffffffc0002f59c8>] SyS_read+0x44/0x74
DMA: 3397*4kB (MC) 26*8kB (RC) 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB
0*512kB 0*1024kB 0*2048kB 0*4096kB = 13796kB
Signed-off-by: Kyeongdon Kim <[email protected]>
---
drivers/block/zram/zcomp_lz4.c | 12 ++++++++++--
drivers/block/zram/zcomp_lzo.c | 12 ++++++++++--
2 files changed, 20 insertions(+), 4 deletions(-)
diff --git a/drivers/block/zram/zcomp_lz4.c b/drivers/block/zram/zcomp_lz4.c
index f2afb7e..0cc4799 100644
--- a/drivers/block/zram/zcomp_lz4.c
+++ b/drivers/block/zram/zcomp_lz4.c
@@ -10,17 +10,25 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lz4.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lz4.h"
static void *zcomp_lz4_create(void)
{
- return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ ret = kzalloc(LZ4_MEM_COMPRESS,
+ __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
+ if (!ret)
+ ret = vzalloc(LZ4_MEM_COMPRESS);
+ return ret;
}
static void zcomp_lz4_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
diff --git a/drivers/block/zram/zcomp_lzo.c b/drivers/block/zram/zcomp_lzo.c
index da1bc47..59b8aa4 100644
--- a/drivers/block/zram/zcomp_lzo.c
+++ b/drivers/block/zram/zcomp_lzo.c
@@ -10,17 +10,25 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/lzo.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
#include "zcomp_lzo.h"
static void *lzo_create(void)
{
- return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ void *ret;
+
+ ret = kzalloc(LZO1X_MEM_COMPRESS,
+ __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
+ if (!ret)
+ ret = vzalloc(LZO1X_MEM_COMPRESS);
+ return ret;
}
static void lzo_destroy(void *private)
{
- kfree(private);
+ kvfree(private);
}
static int lzo_compress(const unsigned char *src, unsigned char *dst,
--
1.8.2
On Mon, Nov 23, 2015 at 03:21:15PM +0900, Kyeongdon Kim wrote:
> When we're using LZ4 multi compression streams for zram swap,
> we found out page allocation failure message in system running test.
> That was not only once, but a few(2 - 5 times per test).
> Also, some failure cases were continually occurring to try allocation
> order 3.
>
> In order to make parallel compression private data, we should call
> kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
> 2/3 size memory to allocate in that time, page allocation fails.
> This patch makes to use vmalloc() as fallback of kmalloc(), this
> prevents page alloc failure warning.
>
> After using this, we never found warning message in running test, also
> It could reduce process startup latency about 60-120ms in each case.
>
> For reference a call trace :
>
> Binder_1: page allocation failure: order:3, mode:0x10c0d0
> CPU: 0 PID: 424 Comm: Binder_1 Tainted: GW 3.10.49-perf-g991d02b-dirty #20
> Call trace:
> [<ffffffc0002069c8>] dump_backtrace+0x0/0x270
> [<ffffffc000206c48>] show_stack+0x10/0x1c
> [<ffffffc000cb51c8>] dump_stack+0x1c/0x28
> [<ffffffc0002bbfc8>] warn_alloc_failed+0xfc/0x11c
> [<ffffffc0002bf518>] __alloc_pages_nodemask+0x724/0x7f0
> [<ffffffc0002bf5f8>] __get_free_pages+0x14/0x5c
> [<ffffffc0002ed6a4>] kmalloc_order_trace+0x38/0xd8
> [<ffffffc0005d9738>] zcomp_lz4_create+0x2c/0x38
> [<ffffffc0005d78f0>] zcomp_strm_alloc+0x34/0x78
> [<ffffffc0005d7a58>] zcomp_strm_multi_find+0x124/0x1ec
> [<ffffffc0005d7c14>] zcomp_strm_find+0xc/0x18
> [<ffffffc0005d8fa0>] zram_bvec_rw+0x2fc/0x780
> [<ffffffc0005d9680>] zram_make_request+0x25c/0x2d4
> [<ffffffc00040f8ac>] generic_make_request+0x80/0xbc
> [<ffffffc00040f98c>] submit_bio+0xa4/0x15c
> [<ffffffc0002e8bb0>] __swap_writepage+0x218/0x230
> [<ffffffc0002e8c04>] swap_writepage+0x3c/0x4c
> [<ffffffc0002c7384>] shrink_page_list+0x51c/0x8d0
> [<ffffffc0002c7e88>] shrink_inactive_list+0x3f8/0x60c
> [<ffffffc0002c86c8>] shrink_lruvec+0x33c/0x4cc
> [<ffffffc0002c8894>] shrink_zone+0x3c/0x100
> [<ffffffc0002c8c10>] try_to_free_pages+0x2b8/0x54c
> [<ffffffc0002bf308>] __alloc_pages_nodemask+0x514/0x7f0
> [<ffffffc0002bf5f8>] __get_free_pages+0x14/0x5c
> [<ffffffc0003446cc>] proc_info_read+0x50/0xe4
> [<ffffffc0002f5204>] vfs_read+0xa0/0x12c
> [<ffffffc0002f59c8>] SyS_read+0x44/0x74
> DMA: 3397*4kB (MC) 26*8kB (RC) 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB
> 0*512kB 0*1024kB 0*2048kB 0*4096kB = 13796kB
>
> Signed-off-by: Kyeongdon Kim <[email protected]>
Acked-by: Minchan Kim <[email protected]>
On (11/23/15 15:35), Minchan Kim wrote:
[..]
> >
> > Signed-off-by: Kyeongdon Kim <[email protected]>
> Acked-by: Minchan Kim <[email protected]>
Acked-by: Sergey Senozhatsky <[email protected]>
-ss
On Mon, 23 Nov 2015 15:21:15 +0900 Kyeongdon Kim <[email protected]> wrote:
> When we're using LZ4 multi compression streams for zram swap,
> we found out page allocation failure message in system running test.
> That was not only once, but a few(2 - 5 times per test).
> Also, some failure cases were continually occurring to try allocation
> order 3.
>
> In order to make parallel compression private data, we should call
> kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
> 2/3 size memory to allocate in that time, page allocation fails.
> This patch makes to use vmalloc() as fallback of kmalloc(), this
> prevents page alloc failure warning.
>
> After using this, we never found warning message in running test, also
> It could reduce process startup latency about 60-120ms in each case.
>
> ...
>
> --- a/drivers/block/zram/zcomp_lz4.c
> +++ b/drivers/block/zram/zcomp_lz4.c
> @@ -10,17 +10,25 @@
> #include <linux/kernel.h>
> #include <linux/slab.h>
> #include <linux/lz4.h>
> +#include <linux/vmalloc.h>
> +#include <linux/mm.h>
>
> #include "zcomp_lz4.h"
>
> static void *zcomp_lz4_create(void)
> {
> - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
> + void *ret;
> +
> + ret = kzalloc(LZ4_MEM_COMPRESS,
> + __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
> + if (!ret)
> + ret = vzalloc(LZ4_MEM_COMPRESS);
> + return ret;
> }
What's the reasoning behind the modification to the gfp flags?
It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
two (at least) can be retained. And given that vmalloc() uses
GFP_KERNEL, what's the point in clearing those flags for the kmalloc()
case?
If this change (or something like it) remains in place, it should have
a comment which fully explains the reasons, please.
Hello Andrew,
On Mon, Nov 23, 2015 at 02:52:26PM -0800, Andrew Morton wrote:
> On Mon, 23 Nov 2015 15:21:15 +0900 Kyeongdon Kim <[email protected]> wrote:
>
> > When we're using LZ4 multi compression streams for zram swap,
> > we found out page allocation failure message in system running test.
> > That was not only once, but a few(2 - 5 times per test).
> > Also, some failure cases were continually occurring to try allocation
> > order 3.
> >
> > In order to make parallel compression private data, we should call
> > kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
> > 2/3 size memory to allocate in that time, page allocation fails.
> > This patch makes to use vmalloc() as fallback of kmalloc(), this
> > prevents page alloc failure warning.
> >
> > After using this, we never found warning message in running test, also
> > It could reduce process startup latency about 60-120ms in each case.
> >
> > ...
> >
> > --- a/drivers/block/zram/zcomp_lz4.c
> > +++ b/drivers/block/zram/zcomp_lz4.c
> > @@ -10,17 +10,25 @@
> > #include <linux/kernel.h>
> > #include <linux/slab.h>
> > #include <linux/lz4.h>
> > +#include <linux/vmalloc.h>
> > +#include <linux/mm.h>
> >
> > #include "zcomp_lz4.h"
> >
> > static void *zcomp_lz4_create(void)
> > {
> > - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
> > + void *ret;
> > +
> > + ret = kzalloc(LZ4_MEM_COMPRESS,
> > + __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
> > + if (!ret)
> > + ret = vzalloc(LZ4_MEM_COMPRESS);
> > + return ret;
> > }
>
> What's the reasoning behind the modification to the gfp flags?
>
> It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
> two (at least) can be retained. And given that vmalloc() uses
This function is used in swapout and fs write path so we couldn't use
those flags.
> GFP_KERNEL, what's the point in clearing those flags for the kmalloc()
> case?
When I reviewed this patch, I wanted to fix it with another patch
because we should handle another places in zcomp and Sergey sent it
today. But I think Sergey's patch is stable material so I hope
Kyeongdon resend this patch with fixing vmalloc part.
>
> If this change (or something like it) remains in place, it should have
> a comment which fully explains the reasons, please.
Kyeongdon, Could you resend this patch with fixing vzalloc part and
adding comment?
>
--
Kind regards,
Minchan Kim
On Tue, 24 Nov 2015 08:28:57 +0900 Minchan Kim <[email protected]> wrote:
> > What's the reasoning behind the modification to the gfp flags?
> >
> > It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
> > two (at least) can be retained. And given that vmalloc() uses
>
> This function is used in swapout and fs write path so we couldn't use
> those flags.
We can use __GFP_RECLAIM (used to be __GFP_WAIT). That permits the
allocation to wait for in-flight IO to complete and to reclaim clean
pagecache.
On Mon, Nov 23, 2015 at 03:40:29PM -0800, Andrew Morton wrote:
> On Tue, 24 Nov 2015 08:28:57 +0900 Minchan Kim <[email protected]> wrote:
>
> > > What's the reasoning behind the modification to the gfp flags?
> > >
> > > It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
> > > two (at least) can be retained. And given that vmalloc() uses
> >
> > This function is used in swapout and fs write path so we couldn't use
> > those flags.
>
> We can use __GFP_RECLAIM (used to be __GFP_WAIT). That permits the
> allocation to wait for in-flight IO to complete and to reclaim clean
> pagecache.
Generally, you're right but in case of zram, it would be unfortunate.
It would be void *most of time* because it is called in reclaim context
and reclaim path bails out to avoid recursion of direct reclaim
by PF_MEMALLOC without trying reclaim.
However, the reason I said *most of time* is we has another context
the funcion could be called.
"disksize_store"->zcomp_create
In the place, we should make sure the successful allocation to work
zram at least so that path should use another gfp.
I will work for that.
Thanks, Andrew,
>
--
Kind regards,
Minchan Kim
On (11/24/15 09:35), Minchan Kim wrote:
[..]
> > We can use __GFP_RECLAIM (used to be __GFP_WAIT). That permits the
> > allocation to wait for in-flight IO to complete and to reclaim clean
> > pagecache.
>
> Generally, you're right but in case of zram, it would be unfortunate.
>
> It would be void *most of time* because it is called in reclaim context
> and reclaim path bails out to avoid recursion of direct reclaim
> by PF_MEMALLOC without trying reclaim.
> However, the reason I said *most of time* is we has another context
> the funcion could be called.
>
> "disksize_store"->zcomp_create
>
> In the place, we should make sure the successful allocation to work
> zram at least so that path should use another gfp.
> I will work for that.
Hm... is it really worth it? passing a bool to zcomp_strm_alloc() (so
it can decide what gfp flags to use) or gfp flags directly is just a
bit complicated. what's the problem with GFP_NOIO (__GFP_RECLAIM) in
the first place (sorry if I'm missing something terribly obvious)?
alternatively, we can just remove the 'dynamic' streams allocation part
and allocate all of the streams via sysfs store path only.
-ss
On Tue, Nov 24, 2015 at 10:06:22AM +0900, Sergey Senozhatsky wrote:
> On (11/24/15 09:35), Minchan Kim wrote:
> [..]
> > > We can use __GFP_RECLAIM (used to be __GFP_WAIT). That permits the
> > > allocation to wait for in-flight IO to complete and to reclaim clean
> > > pagecache.
> >
> > Generally, you're right but in case of zram, it would be unfortunate.
> >
> > It would be void *most of time* because it is called in reclaim context
> > and reclaim path bails out to avoid recursion of direct reclaim
> > by PF_MEMALLOC without trying reclaim.
> > However, the reason I said *most of time* is we has another context
> > the funcion could be called.
> >
> > "disksize_store"->zcomp_create
> >
> > In the place, we should make sure the successful allocation to work
> > zram at least so that path should use another gfp.
> > I will work for that.
>
> Hm... is it really worth it? passing a bool to zcomp_strm_alloc() (so
> it can decide what gfp flags to use) or gfp flags directly is just a
> bit complicated. what's the problem with GFP_NOIO (__GFP_RECLAIM) in
> the first place (sorry if I'm missing something terribly obvious)?
No, you didn't miss anything and your question is really proper.
Actually, I was on same page with you but when I think more,
I guess it makes code looks clean and right way for structuring, IMO.
So, I coded now and am preasure with it. I hope you are on same
page when you look at new patchset. :)
>
> alternatively, we can just remove the 'dynamic' streams allocation part
> and allocate all of the streams via sysfs store path only.
Hmm, I don't think it's really trouble part we cannot fix easily
so let's stay with it!
>
> -ss
--
Kind regards,
Minchan Kim
Hello Andrew,
On 2015-11-24 오전 7:52, Andrew Morton wrote:
> On Mon, 23 Nov 2015 15:21:15 +0900 Kyeongdon Kim <[email protected]>
> wrote:
>
>> When we're using LZ4 multi compression streams for zram swap,
>> we found out page allocation failure message in system running test.
>> That was not only once, but a few(2 - 5 times per test).
>> Also, some failure cases were continually occurring to try allocation
>> order 3.
>>
>> In order to make parallel compression private data, we should call
>> kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
>> 2/3 size memory to allocate in that time, page allocation fails.
>> This patch makes to use vmalloc() as fallback of kmalloc(), this
>> prevents page alloc failure warning.
>>
>> After using this, we never found warning message in running test, also
>> It could reduce process startup latency about 60-120ms in each case.
>>
>> ...
>>
>> --- a/drivers/block/zram/zcomp_lz4.c
>> +++ b/drivers/block/zram/zcomp_lz4.c
>> @@ -10,17 +10,25 @@
>> #include <linux/kernel.h>
>> #include <linux/slab.h>
>> #include <linux/lz4.h>
>> +#include <linux/vmalloc.h>
>> +#include <linux/mm.h>
>>
>> #include "zcomp_lz4.h"
>>
>> static void *zcomp_lz4_create(void)
>> {
>> - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
>> + void *ret;
>> +
>> + ret = kzalloc(LZ4_MEM_COMPRESS,
>> + __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
>> + if (!ret)
>> + ret = vzalloc(LZ4_MEM_COMPRESS);
>> + return ret;
>> }
>
> What's the reasoning behind the modification to the gfp flags?
>
> It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
> two (at least) can be retained. And given that vmalloc() uses
> GFP_KERNEL, what's the point in clearing those flags for the kmalloc()
> case?
>
> If this change (or something like it) remains in place, it should have
> a comment which fully explains the reasons, please.
Sorry for the delay in replying,
I just tried to remove that warning message. If there are more rightable
gfp flags(like a code in Minchan's patch), we can use it.
Thanks,
On 2015-11-24 오전 8:28, Minchan Kim wrote:
> Hello Andrew,
>
> On Mon, Nov 23, 2015 at 02:52:26PM -0800, Andrew Morton wrote:
>> On Mon, 23 Nov 2015 15:21:15 +0900 Kyeongdon Kim
> <[email protected]> wrote:
>>
>> > When we're using LZ4 multi compression streams for zram swap,
>> > we found out page allocation failure message in system running test.
>> > That was not only once, but a few(2 - 5 times per test).
>> > Also, some failure cases were continually occurring to try allocation
>> > order 3.
>> >
>> > In order to make parallel compression private data, we should call
>> > kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
>> > 2/3 size memory to allocate in that time, page allocation fails.
>> > This patch makes to use vmalloc() as fallback of kmalloc(), this
>> > prevents page alloc failure warning.
>> >
>> > After using this, we never found warning message in running test, also
>> > It could reduce process startup latency about 60-120ms in each case.
>> >
>> > ...
>> >
>> > --- a/drivers/block/zram/zcomp_lz4.c
>> > +++ b/drivers/block/zram/zcomp_lz4.c
>> > @@ -10,17 +10,25 @@
>> > #include <linux/kernel.h>
>> > #include <linux/slab.h>
>> > #include <linux/lz4.h>
>> > +#include <linux/vmalloc.h>
>> > +#include <linux/mm.h>
>> >
>> > #include "zcomp_lz4.h"
>> >
>> > static void *zcomp_lz4_create(void)
>> > {
>> > - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
>> > + void *ret;
>> > +
>> > + ret = kzalloc(LZ4_MEM_COMPRESS,
>> > + __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
>> > + if (!ret)
>> > + ret = vzalloc(LZ4_MEM_COMPRESS);
>> > + return ret;
>> > }
>>
>> What's the reasoning behind the modification to the gfp flags?
>>
>> It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
>> two (at least) can be retained. And given that vmalloc() uses
>
> This function is used in swapout and fs write path so we couldn't use
> those flags.
>
>> GFP_KERNEL, what's the point in clearing those flags for the kmalloc()
>> case?
>
> When I reviewed this patch, I wanted to fix it with another patch
> because we should handle another places in zcomp and Sergey sent it
> today. But I think Sergey's patch is stable material so I hope
> Kyeongdon resend this patch with fixing vmalloc part.
>
>>
>> If this change (or something like it) remains in place, it should have
>> a comment which fully explains the reasons, please.
>
> Kyeongdon, Could you resend this patch with fixing vzalloc part and
> adding comment?
>
Sorry for the delay in replying,
I just checked your comments and patch set.
[PATCH 1/3] zram/zcomp: use GFP_NOIO to allocate stream
[PATCH 2/3] zram: try vmalloc() after kmalloc()
[RFC 3/3] zram: pass gfp from zcomp frontend to backend
First of all, thanks for your summary and organized code set, and
If Sergey agrees with that, I have no question about the patch 2/3.
>>
>
> --
> Kind regards,
> Minchan Kim
On Tue, Nov 24, 2015 at 05:09:06PM +0900, Kyeongdon Kim wrote:
> On 2015-11-24 오전 8:28, Minchan Kim wrote:
> > Hello Andrew,
> >
> > On Mon, Nov 23, 2015 at 02:52:26PM -0800, Andrew Morton wrote:
> >> On Mon, 23 Nov 2015 15:21:15 +0900 Kyeongdon Kim
> > <[email protected]> wrote:
> >>
> >> > When we're using LZ4 multi compression streams for zram swap,
> >> > we found out page allocation failure message in system running test.
> >> > That was not only once, but a few(2 - 5 times per test).
> >> > Also, some failure cases were continually occurring to try allocation
> >> > order 3.
> >> >
> >> > In order to make parallel compression private data, we should call
> >> > kzalloc() with order 2/3 in runtime(lzo/lz4). But if there is no order
> >> > 2/3 size memory to allocate in that time, page allocation fails.
> >> > This patch makes to use vmalloc() as fallback of kmalloc(), this
> >> > prevents page alloc failure warning.
> >> >
> >> > After using this, we never found warning message in running test, also
> >> > It could reduce process startup latency about 60-120ms in each case.
> >> >
> >> > ...
> >> >
> >> > --- a/drivers/block/zram/zcomp_lz4.c
> >> > +++ b/drivers/block/zram/zcomp_lz4.c
> >> > @@ -10,17 +10,25 @@
> >> > #include <linux/kernel.h>
> >> > #include <linux/slab.h>
> >> > #include <linux/lz4.h>
> >> > +#include <linux/vmalloc.h>
> >> > +#include <linux/mm.h>
> >> >
> >> > #include "zcomp_lz4.h"
> >> >
> >> > static void *zcomp_lz4_create(void)
> >> > {
> >> > - return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
> >> > + void *ret;
> >> > +
> >> > + ret = kzalloc(LZ4_MEM_COMPRESS,
> >> > + __GFP_NORETRY|__GFP_NOWARN|__GFP_NOMEMALLOC);
> >> > + if (!ret)
> >> > + ret = vzalloc(LZ4_MEM_COMPRESS);
> >> > + return ret;
> >> > }
> >>
> >> What's the reasoning behind the modification to the gfp flags?
> >>
> >> It clears __GFP_FS, __GFP_IO and even __GFP_WAIT. I suspect the latter
> >> two (at least) can be retained. And given that vmalloc() uses
> >
> > This function is used in swapout and fs write path so we couldn't use
> > those flags.
> >
> >> GFP_KERNEL, what's the point in clearing those flags for the kmalloc()
> >> case?
> >
> > When I reviewed this patch, I wanted to fix it with another patch
> > because we should handle another places in zcomp and Sergey sent it
> > today. But I think Sergey's patch is stable material so I hope
> > Kyeongdon resend this patch with fixing vmalloc part.
> >
> >>
> >> If this change (or something like it) remains in place, it should have
> >> a comment which fully explains the reasons, please.
> >
> > Kyeongdon, Could you resend this patch with fixing vzalloc part and
> > adding comment?
> >
> Sorry for the delay in replying,
> I just checked your comments and patch set.
>
> [PATCH 1/3] zram/zcomp: use GFP_NOIO to allocate stream
> [PATCH 2/3] zram: try vmalloc() after kmalloc()
> [RFC 3/3] zram: pass gfp from zcomp frontend to backend
>
> First of all, thanks for your summary and organized code set, and
> If Sergey agrees with that, I have no question about the patch 2/3.
Thanks. I will send revised version tomorrow.
>
> >>
> >
> > --
> > Kind regards,
> > Minchan Kim
>
--
Kind regards,
Minchan Kim