The kunmap_atomic should use virtual address getting by kmap_atomic.
However, some pieces of code in zsmalloc uses modified address,
not the one got by kmap_atomic for kunmap_atomic.
It's okay for working because zsmalloc modifies the address
inner PAGE_SIZE bounday so it works with current kmap_atomic's
implementation. But it's still fragile with potential changing
of kmap_atomic so let's correct it.
Signed-off-by: Minchan Kim <[email protected]>
---
mm/zsmalloc.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b3b57ef85830..85e14f584048 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -629,6 +629,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
struct page *next_page;
struct link_free *link;
unsigned int i = 1;
+ void *vaddr;
/*
* page->index stores offset of first object starting
@@ -639,8 +640,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
if (page != first_page)
page->index = off;
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
+ vaddr = kmap_atomic(page);
+ link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
link->next = obj_location_to_handle(page, i++);
@@ -654,7 +655,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
*/
next_page = get_next_page(page);
link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
@@ -1055,6 +1056,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
unsigned long obj;
struct link_free *link;
struct size_class *class;
+ void *vaddr;
struct page *first_page, *m_page;
unsigned long m_objidx, m_offset;
@@ -1083,11 +1085,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
+ vaddr = kmap_atomic(m_page);
+ link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->inuse++;
/* Now move the zspage to another fullness group, if required */
@@ -1103,6 +1105,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
+ void *vaddr;
int class_idx;
struct size_class *class;
@@ -1121,10 +1124,10 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
spin_lock(&class->lock);
/* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
+ vaddr = kmap_atomic(f_page);
+ link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
first_page->inuse--;
--
2.0.0
On Fri, Nov 14, 2014 at 10:11:01AM +0900, Minchan Kim wrote:
> The kunmap_atomic should use virtual address getting by kmap_atomic.
> However, some pieces of code in zsmalloc uses modified address,
> not the one got by kmap_atomic for kunmap_atomic.
>
> It's okay for working because zsmalloc modifies the address
> inner PAGE_SIZE bounday so it works with current kmap_atomic's
> implementation. But it's still fragile with potential changing
> of kmap_atomic so let's correct it.
Seems like you could just use PAGE_MASK to get the base page address
from link like this:
---
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index b3b57ef..d6ca05a 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -654,7 +654,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
*/
next_page = get_next_page(page);
link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
+ kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
page = next_page;
off %= PAGE_SIZE;
}
@@ -1087,7 +1087,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
m_offset / sizeof(*link);
first_page->freelist = link->next;
memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
+ kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
first_page->inuse++;
/* Now move the zspage to another fullness group, if required */
@@ -1124,7 +1124,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
+ f_offset);
link->next = first_page->freelist;
- kunmap_atomic(link);
+ kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
first_page->freelist = (void *)obj;
first_page->inuse--;
---
This seems cleaner, but, at the same time, it isn't obvious that we are
passing the same value to kunmap_atomic() that we got from
kmap_atomic(). Just a thought.
Either way:
Reviewed-by: Seth Jennings <[email protected]>
>
> Signed-off-by: Minchan Kim <[email protected]>
> ---
> mm/zsmalloc.c | 21 ++++++++++++---------
> 1 file changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index b3b57ef85830..85e14f584048 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -629,6 +629,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> struct page *next_page;
> struct link_free *link;
> unsigned int i = 1;
> + void *vaddr;
>
> /*
> * page->index stores offset of first object starting
> @@ -639,8 +640,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> if (page != first_page)
> page->index = off;
>
> - link = (struct link_free *)kmap_atomic(page) +
> - off / sizeof(*link);
> + vaddr = kmap_atomic(page);
> + link = (struct link_free *)vaddr + off / sizeof(*link);
>
> while ((off += class->size) < PAGE_SIZE) {
> link->next = obj_location_to_handle(page, i++);
> @@ -654,7 +655,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> */
> next_page = get_next_page(page);
> link->next = obj_location_to_handle(next_page, 0);
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
> page = next_page;
> off %= PAGE_SIZE;
> }
> @@ -1055,6 +1056,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> unsigned long obj;
> struct link_free *link;
> struct size_class *class;
> + void *vaddr;
>
> struct page *first_page, *m_page;
> unsigned long m_objidx, m_offset;
> @@ -1083,11 +1085,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> obj_handle_to_location(obj, &m_page, &m_objidx);
> m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
>
> - link = (struct link_free *)kmap_atomic(m_page) +
> - m_offset / sizeof(*link);
> + vaddr = kmap_atomic(m_page);
> + link = (struct link_free *)vaddr + m_offset / sizeof(*link);
> first_page->freelist = link->next;
> memset(link, POISON_INUSE, sizeof(*link));
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
>
> first_page->inuse++;
> /* Now move the zspage to another fullness group, if required */
> @@ -1103,6 +1105,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> struct link_free *link;
> struct page *first_page, *f_page;
> unsigned long f_objidx, f_offset;
> + void *vaddr;
>
> int class_idx;
> struct size_class *class;
> @@ -1121,10 +1124,10 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> spin_lock(&class->lock);
>
> /* Insert this object in containing zspage's freelist */
> - link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
> - + f_offset);
> + vaddr = kmap_atomic(f_page);
> + link = (struct link_free *)(vaddr + f_offset);
> link->next = first_page->freelist;
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
> first_page->freelist = (void *)obj;
>
> first_page->inuse--;
> --
> 2.0.0
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected]. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
On (11/14/14 09:07), Seth Jennings wrote:
> On Fri, Nov 14, 2014 at 10:11:01AM +0900, Minchan Kim wrote:
> > The kunmap_atomic should use virtual address getting by kmap_atomic.
> > However, some pieces of code in zsmalloc uses modified address,
> > not the one got by kmap_atomic for kunmap_atomic.
> >
> > It's okay for working because zsmalloc modifies the address
> > inner PAGE_SIZE bounday so it works with current kmap_atomic's
> > implementation. But it's still fragile with potential changing
> > of kmap_atomic so let's correct it.
>
> Seems like you could just use PAGE_MASK to get the base page address
> from link like this:
>
both solutions look good to me:
Acked-by: Sergey Senozhatsky <[email protected]>
-ss
> ---
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index b3b57ef..d6ca05a 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -654,7 +654,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> */
> next_page = get_next_page(page);
> link->next = obj_location_to_handle(next_page, 0);
> - kunmap_atomic(link);
> + kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
> page = next_page;
> off %= PAGE_SIZE;
> }
> @@ -1087,7 +1087,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> m_offset / sizeof(*link);
> first_page->freelist = link->next;
> memset(link, POISON_INUSE, sizeof(*link));
> - kunmap_atomic(link);
> + kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
>
> first_page->inuse++;
> /* Now move the zspage to another fullness group, if required */
> @@ -1124,7 +1124,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
> + f_offset);
> link->next = first_page->freelist;
> - kunmap_atomic(link);
> + kunmap_atomic((void *)((unsigned long)link & PAGE_MASK));
> first_page->freelist = (void *)obj;
>
> first_page->inuse--;
> ---
>
> This seems cleaner, but, at the same time, it isn't obvious that we are
> passing the same value to kunmap_atomic() that we got from
> kmap_atomic(). Just a thought.
>
> Either way:
>
> Reviewed-by: Seth Jennings <[email protected]>
>
> >
> > Signed-off-by: Minchan Kim <[email protected]>
> > ---
> > mm/zsmalloc.c | 21 ++++++++++++---------
> > 1 file changed, 12 insertions(+), 9 deletions(-)
> >
> > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> > index b3b57ef85830..85e14f584048 100644
> > --- a/mm/zsmalloc.c
> > +++ b/mm/zsmalloc.c
> > @@ -629,6 +629,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> > struct page *next_page;
> > struct link_free *link;
> > unsigned int i = 1;
> > + void *vaddr;
> >
> > /*
> > * page->index stores offset of first object starting
> > @@ -639,8 +640,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> > if (page != first_page)
> > page->index = off;
> >
> > - link = (struct link_free *)kmap_atomic(page) +
> > - off / sizeof(*link);
> > + vaddr = kmap_atomic(page);
> > + link = (struct link_free *)vaddr + off / sizeof(*link);
> >
> > while ((off += class->size) < PAGE_SIZE) {
> > link->next = obj_location_to_handle(page, i++);
> > @@ -654,7 +655,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> > */
> > next_page = get_next_page(page);
> > link->next = obj_location_to_handle(next_page, 0);
> > - kunmap_atomic(link);
> > + kunmap_atomic(vaddr);
> > page = next_page;
> > off %= PAGE_SIZE;
> > }
> > @@ -1055,6 +1056,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> > unsigned long obj;
> > struct link_free *link;
> > struct size_class *class;
> > + void *vaddr;
> >
> > struct page *first_page, *m_page;
> > unsigned long m_objidx, m_offset;
> > @@ -1083,11 +1085,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> > obj_handle_to_location(obj, &m_page, &m_objidx);
> > m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
> >
> > - link = (struct link_free *)kmap_atomic(m_page) +
> > - m_offset / sizeof(*link);
> > + vaddr = kmap_atomic(m_page);
> > + link = (struct link_free *)vaddr + m_offset / sizeof(*link);
> > first_page->freelist = link->next;
> > memset(link, POISON_INUSE, sizeof(*link));
> > - kunmap_atomic(link);
> > + kunmap_atomic(vaddr);
> >
> > first_page->inuse++;
> > /* Now move the zspage to another fullness group, if required */
> > @@ -1103,6 +1105,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> > struct link_free *link;
> > struct page *first_page, *f_page;
> > unsigned long f_objidx, f_offset;
> > + void *vaddr;
> >
> > int class_idx;
> > struct size_class *class;
> > @@ -1121,10 +1124,10 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> > spin_lock(&class->lock);
> >
> > /* Insert this object in containing zspage's freelist */
> > - link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
> > - + f_offset);
> > + vaddr = kmap_atomic(f_page);
> > + link = (struct link_free *)(vaddr + f_offset);
> > link->next = first_page->freelist;
> > - kunmap_atomic(link);
> > + kunmap_atomic(vaddr);
> > first_page->freelist = (void *)obj;
> >
> > first_page->inuse--;
> > --
> > 2.0.0
> >
> > --
> > To unsubscribe, send a message with 'unsubscribe linux-mm' in
> > the body to [email protected]. For more info on Linux MM,
> > see: http://www.linux-mm.org/ .
> > Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
>
Hello
2014-11-14 9:11 GMT+08:00 Minchan Kim <[email protected]>:
> The kunmap_atomic should use virtual address getting by kmap_atomic.
> However, some pieces of code in zsmalloc uses modified address,
> not the one got by kmap_atomic for kunmap_atomic.
>
> It's okay for working because zsmalloc modifies the address
> inner PAGE_SIZE bounday so it works with current kmap_atomic's
> implementation. But it's still fragile with potential changing
> of kmap_atomic so let's correct it.
>
> Signed-off-by: Minchan Kim <[email protected]>
> ---
> mm/zsmalloc.c | 21 ++++++++++++---------
> 1 file changed, 12 insertions(+), 9 deletions(-)
>
> diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
> index b3b57ef85830..85e14f584048 100644
> --- a/mm/zsmalloc.c
> +++ b/mm/zsmalloc.c
> @@ -629,6 +629,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> struct page *next_page;
> struct link_free *link;
> unsigned int i = 1;
> + void *vaddr;
>
> /*
> * page->index stores offset of first object starting
> @@ -639,8 +640,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> if (page != first_page)
> page->index = off;
>
> - link = (struct link_free *)kmap_atomic(page) +
> - off / sizeof(*link);
> + vaddr = kmap_atomic(page);
> + link = (struct link_free *)vaddr + off / sizeof(*link);
Just a nitpick:
I think
link = (struct link_free *)(vaddr + off);
is better.
To use the same method as in zs_free()
link = (struct link_free *)(vaddr + f_offset);
>
> while ((off += class->size) < PAGE_SIZE) {
> link->next = obj_location_to_handle(page, i++);
> @@ -654,7 +655,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
> */
> next_page = get_next_page(page);
> link->next = obj_location_to_handle(next_page, 0);
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
> page = next_page;
> off %= PAGE_SIZE;
> }
> @@ -1055,6 +1056,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> unsigned long obj;
> struct link_free *link;
> struct size_class *class;
> + void *vaddr;
>
> struct page *first_page, *m_page;
> unsigned long m_objidx, m_offset;
> @@ -1083,11 +1085,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
> obj_handle_to_location(obj, &m_page, &m_objidx);
> m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
>
> - link = (struct link_free *)kmap_atomic(m_page) +
> - m_offset / sizeof(*link);
> + vaddr = kmap_atomic(m_page);
> + link = (struct link_free *)vaddr + m_offset / sizeof(*link);
link = (struct link_free *)(vaddr + m_offset)
> first_page->freelist = link->next;
> memset(link, POISON_INUSE, sizeof(*link));
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
>
> first_page->inuse++;
> /* Now move the zspage to another fullness group, if required */
> @@ -1103,6 +1105,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> struct link_free *link;
> struct page *first_page, *f_page;
> unsigned long f_objidx, f_offset;
> + void *vaddr;
>
> int class_idx;
> struct size_class *class;
> @@ -1121,10 +1124,10 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
> spin_lock(&class->lock);
>
> /* Insert this object in containing zspage's freelist */
> - link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
> - + f_offset);
> + vaddr = kmap_atomic(f_page);
> + link = (struct link_free *)(vaddr + f_offset);
Yes, I think this method to calc *link* is better. (:
Thanks
> link->next = first_page->freelist;
> - kunmap_atomic(link);
> + kunmap_atomic(vaddr);
> first_page->freelist = (void *)obj;
>
> first_page->inuse--;
> --
> 2.0.0
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
On Fri, 14 Nov 2014 09:07:32 -0600 Seth Jennings <[email protected]> wrote:
> On Fri, Nov 14, 2014 at 10:11:01AM +0900, Minchan Kim wrote:
> > The kunmap_atomic should use virtual address getting by kmap_atomic.
> > However, some pieces of code in zsmalloc uses modified address,
> > not the one got by kmap_atomic for kunmap_atomic.
> >
> > It's okay for working because zsmalloc modifies the address
> > inner PAGE_SIZE bounday so it works with current kmap_atomic's
> > implementation. But it's still fragile with potential changing
> > of kmap_atomic so let's correct it.
It is a bit alarming, but I've seen code elsewhere in which a modified
pointer is passed to kunmap_atomic(). So the kunmap_atomic() interface
is "kvaddr should point somewhere into the page" and that won't be
changing without a big effort.
> Seems like you could just use PAGE_MASK to get the base page address
> from link like this:
I think Minchan's approach is better: it explicitly retains the
kmap_atomic() return value for passing to kunmap_atomic(). That's
nicer than modifying it and then setting it back again.
I mean, a cleaner way of implementing your suggestion would be
void kunmap_atomic_unaligned(void *p)
{
kunmap_atomic(void *)((unsigned long)p & PAGE_MASK);
}
but then one looks at
void __kunmap_atomic(void *kvaddr)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
and asks "what the heck".
So I dunno. We could leave the code as-is. I have no strong feelings
either way. Minchan's patch has no effect on zsmalloc.o section sizes
with my compiler.
Hello Andrew,
On Tue, Nov 18, 2014 at 03:01:38PM -0800, Andrew Morton wrote:
> On Fri, 14 Nov 2014 09:07:32 -0600 Seth Jennings <[email protected]> wrote:
>
> > On Fri, Nov 14, 2014 at 10:11:01AM +0900, Minchan Kim wrote:
> > > The kunmap_atomic should use virtual address getting by kmap_atomic.
> > > However, some pieces of code in zsmalloc uses modified address,
> > > not the one got by kmap_atomic for kunmap_atomic.
> > >
> > > It's okay for working because zsmalloc modifies the address
> > > inner PAGE_SIZE bounday so it works with current kmap_atomic's
> > > implementation. But it's still fragile with potential changing
> > > of kmap_atomic so let's correct it.
>
> It is a bit alarming, but I've seen code elsewhere in which a modified
> pointer is passed to kunmap_atomic(). So the kunmap_atomic() interface
> is "kvaddr should point somewhere into the page" and that won't be
> changing without a big effort.
>
> > Seems like you could just use PAGE_MASK to get the base page address
> > from link like this:
>
> I think Minchan's approach is better: it explicitly retains the
> kmap_atomic() return value for passing to kunmap_atomic(). That's
> nicer than modifying it and then setting it back again.
>
> I mean, a cleaner way of implementing your suggestion would be
>
> void kunmap_atomic_unaligned(void *p)
> {
> kunmap_atomic(void *)((unsigned long)p & PAGE_MASK);
> }
>
> but then one looks at
>
> void __kunmap_atomic(void *kvaddr)
> {
> unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
>
> and asks "what the heck".
>
>
> So I dunno. We could leave the code as-is. I have no strong feelings
> either way. Minchan's patch has no effect on zsmalloc.o section sizes
> with my compiler.
I hope to merge my patch.
Main reason I sent the patch is I got a subtle bug when I implement
new feature of zsmalloc(ie, compaction) due to link's mishandling
(ie, link was over page boundary by my fault).
Although it was totally my mistake, it took time for a while
to find a root cause because unpredictable kmapped address should
be unmapped so it's almost random crash.
IOW, it's fragile to depend on kunmap_atomic's internal and at least,
I wanted to make zram code more robust.
Thanks.
>
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to [email protected]. For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"[email protected]"> [email protected] </a>
--
Kind regards,
Minchan Kim
On Wed, 19 Nov 2014 08:21:39 +0900 Minchan Kim <[email protected]> wrote:
> Main reason I sent the patch is I got a subtle bug when I implement
> new feature of zsmalloc(ie, compaction) due to link's mishandling
> (ie, link was over page boundary by my fault).
> Although it was totally my mistake, it took time for a while
> to find a root cause because unpredictable kmapped address should
> be unmapped so it's almost random crash.
Fair enough.
That's pretty rude behaviour from kunmap_atomic(). Unfortunately it
just doesn't have anything with which to check the address - we'd need
to create a special per-cpu array[KM_TYPE_NR] just for the purpose.