Hi All,
We've posted v4 patchset introducing a new "perfer-many" memory policy
https://lore.kernel.org/lkml/[email protected]/ ,
for which Michal Hocko gave many comments while pointing out some
problems, and we also found some semantics confusion about 'prefer'
and 'local' policy, as well as some duplicated code. This patchset
tries to address them. Please help to review, thanks!
- Feng
Feng Tang (4):
mm/mempolicy: skip nodemask intersect check for 'interleave' when oom
mm/mempolicy: unify the preprocessing for mbind and set_mempolicy
mm/mempolicy: don't handle MPOL_LOCAL like a fake MPOL_PREFERRED
policy
mm/mempolicy: kill MPOL_F_LOCAL bit
include/uapi/linux/mempolicy.h | 1 +
mm/mempolicy.c | 205 +++++++++++++++++++----------------------
2 files changed, 98 insertions(+), 108 deletions(-)
--
2.7.4
MPOL_LOCAL policy has been setup as a real policy, but it is still
handled like a faked POL_PREFERRED policy with one internal
MPOL_F_LOCAL flag bit set, and there are many places having to
judge the real 'prefer' or the 'local' policy, which are quite
confusing.
In current code, there are four cases that MPOL_LOCAL are used:
* user specifies 'local' policy
* user specifies 'prefer' policy, but with empty nodemask
* system 'default' policy is used
* 'prefer' policy + valid 'preferred' node with MPOL_F_STATIC_NODES
flag set, and when it is 'rebind' to a nodemask which doesn't
contains the 'preferred' node, it will add the MPOL_F_LOCAL bit
and performs as 'local' policy. In future if it is 'rebind' again
with valid nodemask, the policy will be restored back to 'prefer'
So for the first three cases, we make 'local' a real policy
instead of a fake 'prefer' one, this will reduce confusion for
reading code.
And next optional patch will kill the 'MPOL_F_LOCAL' bit.
Signed-off-by: Feng Tang <[email protected]>
---
mm/mempolicy.c | 60 ++++++++++++++++++++++++++++++++--------------------------
1 file changed, 33 insertions(+), 27 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0f5bf60..833ed2d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -121,8 +121,7 @@ enum zone_type policy_zone = 0;
*/
static struct mempolicy default_policy = {
.refcnt = ATOMIC_INIT(1), /* never free it */
- .mode = MPOL_PREFERRED,
- .flags = MPOL_F_LOCAL,
+ .mode = MPOL_LOCAL,
};
static struct mempolicy preferred_node_policy[MAX_NUMNODES];
@@ -200,12 +199,9 @@ static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
{
- if (!nodes)
- pol->flags |= MPOL_F_LOCAL; /* local allocation */
- else if (nodes_empty(*nodes))
- return -EINVAL; /* no allowed nodes */
- else
- pol->v.preferred_node = first_node(*nodes);
+ if (nodes_empty(*nodes))
+ return -EINVAL;
+ pol->v.preferred_node = first_node(*nodes);
return 0;
}
@@ -239,25 +235,19 @@ static int mpol_set_nodemask(struct mempolicy *pol,
cpuset_current_mems_allowed, node_states[N_MEMORY]);
VM_BUG_ON(!nodes);
- if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
- nodes = NULL; /* explicit local allocation */
- else {
- if (pol->flags & MPOL_F_RELATIVE_NODES)
- mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
- else
- nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (mpol_store_user_nodemask(pol))
- pol->w.user_nodemask = *nodes;
- else
- pol->w.cpuset_mems_allowed =
- cpuset_current_mems_allowed;
- }
+ if (pol->flags & MPOL_F_RELATIVE_NODES)
+ mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
+ else
+ nodes_and(nsc->mask2, *nodes, nsc->mask1);
- if (nodes)
- ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
+ if (mpol_store_user_nodemask(pol))
+ pol->w.user_nodemask = *nodes;
else
- ret = mpol_ops[pol->mode].create(pol, NULL);
+ pol->w.cpuset_mems_allowed =
+ cpuset_current_mems_allowed;
+
+ ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
return ret;
}
@@ -290,13 +280,14 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
if (((flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES)))
return ERR_PTR(-EINVAL);
+
+ mode = MPOL_LOCAL;
}
} else if (mode == MPOL_LOCAL) {
if (!nodes_empty(*nodes) ||
(flags & MPOL_F_STATIC_NODES) ||
(flags & MPOL_F_RELATIVE_NODES))
return ERR_PTR(-EINVAL);
- mode = MPOL_PREFERRED;
} else if (nodes_empty(*nodes))
return ERR_PTR(-EINVAL);
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
@@ -427,6 +418,9 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
.create = mpol_new_bind,
.rebind = mpol_rebind_nodemask,
},
+ [MPOL_LOCAL] = {
+ .rebind = mpol_rebind_default,
+ },
};
static int migrate_page_add(struct page *page, struct list_head *pagelist,
@@ -1952,6 +1946,8 @@ unsigned int mempolicy_slab_node(void)
&policy->v.nodes);
return z->zone ? zone_to_nid(z->zone) : node;
}
+ case MPOL_LOCAL:
+ return node;
default:
BUG();
@@ -2076,6 +2072,11 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
*mask = mempolicy->v.nodes;
break;
+ case MPOL_LOCAL:
+ nid = numa_node_id();
+ init_nodemask_of_node(mask, nid);
+ break;
+
default:
BUG();
}
@@ -2320,6 +2321,8 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
if (a->flags & MPOL_F_LOCAL)
return true;
return a->v.preferred_node == b->v.preferred_node;
+ case MPOL_LOCAL:
+ return true;
default:
BUG();
return false;
@@ -2463,6 +2466,10 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
polnid = pol->v.preferred_node;
break;
+ case MPOL_LOCAL:
+ polnid = numa_node_id();
+ break;
+
case MPOL_BIND:
/* Optimize placement among multiple nodes via NUMA balancing */
if (pol->flags & MPOL_F_MORON) {
@@ -2907,7 +2914,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
*/
if (nodelist)
goto out;
- mode = MPOL_PREFERRED;
break;
case MPOL_DEFAULT:
/*
@@ -2951,7 +2957,7 @@ int mpol_parse_str(char *str, struct mempolicy **mpol)
else if (nodelist)
new->v.preferred_node = first_node(nodes);
else
- new->flags |= MPOL_F_LOCAL;
+ new->mode = MPOL_LOCAL;
/*
* Save nodes for contextualization: this will be used to "clone"
--
2.7.4
Currently the kernel_mbind() and kernel_set_mempolicy() do almost
the same operation for parameter sanity check and preprocessing.
Add a macro to unify the code to reduce the redundancy, and make
it easier for changing the pre-processing code in future.
Signed-off-by: Feng Tang <[email protected]>
---
mm/mempolicy.c | 46 +++++++++++++++++++---------------------------
1 file changed, 19 insertions(+), 27 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1964cca..0f5bf60 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1460,25 +1460,29 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
+#define MPOL_PRE_PROCESS() \
+ \
+ nodemask_t nodes; \
+ int err; \
+ unsigned short mode_flags; \
+ mode_flags = mode & MPOL_MODE_FLAGS; \
+ mode &= ~MPOL_MODE_FLAGS; \
+ if (mode >= MPOL_MAX) \
+ return -EINVAL; \
+ if ((mode_flags & MPOL_F_STATIC_NODES) && \
+ (mode_flags & MPOL_F_RELATIVE_NODES)) \
+ return -EINVAL; \
+ err = get_nodes(&nodes, nmask, maxnode); \
+ if (err) \
+ return err;
+
static long kernel_mbind(unsigned long start, unsigned long len,
unsigned long mode, const unsigned long __user *nmask,
unsigned long maxnode, unsigned int flags)
{
- nodemask_t nodes;
- int err;
- unsigned short mode_flags;
+ MPOL_PRE_PROCESS();
start = untagged_addr(start);
- mode_flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if (mode >= MPOL_MAX)
- return -EINVAL;
- if ((mode_flags & MPOL_F_STATIC_NODES) &&
- (mode_flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
- err = get_nodes(&nodes, nmask, maxnode);
- if (err)
- return err;
return do_mbind(start, len, mode, mode_flags, &nodes, flags);
}
@@ -1493,20 +1497,8 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
unsigned long maxnode)
{
- int err;
- nodemask_t nodes;
- unsigned short flags;
-
- flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if ((unsigned int)mode >= MPOL_MAX)
- return -EINVAL;
- if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
- err = get_nodes(&nodes, nmask, maxnode);
- if (err)
- return err;
- return do_set_mempolicy(mode, flags, &nodes);
+ MPOL_PRE_PROCESS();
+ return do_set_mempolicy(mode, mode_flags, &nodes);
}
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
--
2.7.4
mempolicy_nodemask_intersects() is used in oom case to check if a
task may have memory allocated on some memory nodes.
MPOL_INTERLEAVE has set a nodemask, which is not a forced requirement,
but just a hint for chosing a node to allocate memory from, and the
task may have memory allocated on other nodes than this nodemask. So
skip the check.
Suggested-by: Michal Hocko <[email protected]>
Signed-off-by: Feng Tang <[email protected]>
---
mm/mempolicy.c | 24 ++++--------------------
1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d79fa29..1964cca 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
*
* If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
* policy. Otherwise, check for intersection between mask and the policy
- * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local'
+ * nodemask for 'bind' policy. For 'interleave', 'preferred' or 'local'
* policy, always return true since it may allocate elsewhere on fallback.
*
* Takes task_lock(tsk) to prevent freeing of its mempolicy.
@@ -2111,29 +2111,13 @@ bool mempolicy_nodemask_intersects(struct task_struct *tsk,
if (!mask)
return ret;
+
task_lock(tsk);
mempolicy = tsk->mempolicy;
- if (!mempolicy)
- goto out;
-
- switch (mempolicy->mode) {
- case MPOL_PREFERRED:
- /*
- * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
- * allocate from, they may fallback to other nodes when oom.
- * Thus, it's possible for tsk to have allocated memory from
- * nodes in mask.
- */
- break;
- case MPOL_BIND:
- case MPOL_INTERLEAVE:
+ if (mempolicy && mempolicy->mode == MPOL_BIND)
ret = nodes_intersects(mempolicy->v.nodes, *mask);
- break;
- default:
- BUG();
- }
-out:
task_unlock(tsk);
+
return ret;
}
--
2.7.4
Now the only remaining case of a real 'local' policy faked by
'prefer' policy plus MPOL_F_LOCAL bit is:
A valid 'prefer' policy with a valid 'preferred' node is 'rebind'
to a nodemask which doesn't contains the 'preferred' node, then it
will handle allocation with 'local' policy.
Add a new 'MPOL_F_LOCAL_TEMP' bit for this case, and kill the
MPOL_F_LOCAL bit, which could simplify the code much.
Reviewed-by: Andi Kleen <[email protected]>
Signed-off-by: Feng Tang <[email protected]>
---
include/uapi/linux/mempolicy.h | 1 +
mm/mempolicy.c | 77 +++++++++++++++++++++++-------------------
2 files changed, 43 insertions(+), 35 deletions(-)
diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
index 4832fd0..942844a 100644
--- a/include/uapi/linux/mempolicy.h
+++ b/include/uapi/linux/mempolicy.h
@@ -63,6 +63,7 @@ enum {
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
+#define MPOL_F_LOCAL_TEMP (1 << 5) /* a policy temporarily changed from 'prefer' to 'local' */
/*
* These bit locations are exposed in the vm.zone_reclaim_mode sysctl
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 833ed2d..53a480f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -332,6 +332,22 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
pol->v.nodes = tmp;
}
+static void mpol_rebind_local(struct mempolicy *pol,
+ const nodemask_t *nodes)
+{
+ if (unlikely(pol->flags & MPOL_F_STATIC_NODES)) {
+ int node = first_node(pol->w.user_nodemask);
+
+ BUG_ON(!(pol->flags & MPOL_F_LOCAL_TEMP));
+
+ if (node_isset(node, *nodes)) {
+ pol->v.preferred_node = node;
+ pol->mode = MPOL_PREFERRED;
+ pol->flags &= ~MPOL_F_LOCAL_TEMP;
+ }
+ }
+}
+
static void mpol_rebind_preferred(struct mempolicy *pol,
const nodemask_t *nodes)
{
@@ -342,13 +358,19 @@ static void mpol_rebind_preferred(struct mempolicy *pol,
if (node_isset(node, *nodes)) {
pol->v.preferred_node = node;
- pol->flags &= ~MPOL_F_LOCAL;
- } else
- pol->flags |= MPOL_F_LOCAL;
+ } else {
+ /*
+ * If there is no valid node, change the mode to
+ * MPOL_LOCAL, which will be restored back when
+ * next rebind() see a valid node.
+ */
+ pol->mode = MPOL_LOCAL;
+ pol->flags |= MPOL_F_LOCAL_TEMP;
+ }
} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
pol->v.preferred_node = first_node(tmp);
- } else if (!(pol->flags & MPOL_F_LOCAL)) {
+ } else {
pol->v.preferred_node = node_remap(pol->v.preferred_node,
pol->w.cpuset_mems_allowed,
*nodes);
@@ -367,7 +389,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
{
if (!pol)
return;
- if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
+ if (!mpol_store_user_nodemask(pol) &&
nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
return;
@@ -419,7 +441,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
.rebind = mpol_rebind_nodemask,
},
[MPOL_LOCAL] = {
- .rebind = mpol_rebind_default,
+ .rebind = mpol_rebind_local,
},
};
@@ -913,10 +935,12 @@ static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
case MPOL_INTERLEAVE:
*nodes = p->v.nodes;
break;
+ case MPOL_LOCAL:
+ /* return empty node mask for local allocation */
+ break;
+
case MPOL_PREFERRED:
- if (!(p->flags & MPOL_F_LOCAL))
- node_set(p->v.preferred_node, *nodes);
- /* else return empty node mask for local allocation */
+ node_set(p->v.preferred_node, *nodes);
break;
default:
BUG();
@@ -1880,9 +1904,9 @@ nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
/* Return the node id preferred by the given mempolicy, or the given id */
static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
{
- if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
+ if (policy->mode == MPOL_PREFERRED) {
nd = policy->v.preferred_node;
- else {
+ } else {
/*
* __GFP_THISNODE shouldn't even be used with the bind policy
* because we might easily break the expectation to stay on the
@@ -1919,14 +1943,11 @@ unsigned int mempolicy_slab_node(void)
return node;
policy = current->mempolicy;
- if (!policy || policy->flags & MPOL_F_LOCAL)
+ if (!policy)
return node;
switch (policy->mode) {
case MPOL_PREFERRED:
- /*
- * handled MPOL_F_LOCAL above
- */
return policy->v.preferred_node;
case MPOL_INTERLEAVE:
@@ -2060,16 +2081,13 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
mempolicy = current->mempolicy;
switch (mempolicy->mode) {
case MPOL_PREFERRED:
- if (mempolicy->flags & MPOL_F_LOCAL)
- nid = numa_node_id();
- else
- nid = mempolicy->v.preferred_node;
+ nid = mempolicy->v.preferred_node;
init_nodemask_of_node(mask, nid);
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
- *mask = mempolicy->v.nodes;
+ *mask = mempolicy->v.nodes;
break;
case MPOL_LOCAL:
@@ -2181,7 +2199,7 @@ struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
* If the policy is interleave, or does not allow the current
* node in its nodemask, we allocate the standard way.
*/
- if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
+ if (pol->mode == MPOL_PREFERRED)
hpage_node = pol->v.preferred_node;
nmask = policy_nodemask(gfp, pol);
@@ -2317,9 +2335,6 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
case MPOL_INTERLEAVE:
return !!nodes_equal(a->v.nodes, b->v.nodes);
case MPOL_PREFERRED:
- /* a's ->flags is the same as b's */
- if (a->flags & MPOL_F_LOCAL)
- return true;
return a->v.preferred_node == b->v.preferred_node;
case MPOL_LOCAL:
return true;
@@ -2460,10 +2475,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
break;
case MPOL_PREFERRED:
- if (pol->flags & MPOL_F_LOCAL)
- polnid = numa_node_id();
- else
- polnid = pol->v.preferred_node;
+ polnid = pol->v.preferred_node;
break;
case MPOL_LOCAL:
@@ -2834,9 +2846,6 @@ void numa_default_policy(void)
* Parse and format mempolicy from/to strings
*/
-/*
- * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
- */
static const char * const policy_modes[] =
{
[MPOL_DEFAULT] = "default",
@@ -3003,12 +3012,10 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
switch (mode) {
case MPOL_DEFAULT:
+ case MPOL_LOCAL:
break;
case MPOL_PREFERRED:
- if (flags & MPOL_F_LOCAL)
- mode = MPOL_LOCAL;
- else
- node_set(pol->v.preferred_node, nodes);
+ node_set(pol->v.preferred_node, nodes);
break;
case MPOL_BIND:
case MPOL_INTERLEAVE:
--
2.7.4
On Thu, 20 May 2021, Feng Tang wrote:
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index d79fa29..1964cca 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
> *
> * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
> * policy. Otherwise, check for intersection between mask and the policy
> - * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local'
> + * nodemask for 'bind' policy. For 'interleave', 'preferred' or 'local'
> * policy, always return true since it may allocate elsewhere on fallback.
> *
> * Takes task_lock(tsk) to prevent freeing of its mempolicy.
> @@ -2111,29 +2111,13 @@ bool mempolicy_nodemask_intersects(struct task_struct *tsk,
>
> if (!mask)
> return ret;
> +
> task_lock(tsk);
> mempolicy = tsk->mempolicy;
> - if (!mempolicy)
> - goto out;
> -
> - switch (mempolicy->mode) {
> - case MPOL_PREFERRED:
> - /*
> - * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
> - * allocate from, they may fallback to other nodes when oom.
> - * Thus, it's possible for tsk to have allocated memory from
> - * nodes in mask.
> - */
> - break;
> - case MPOL_BIND:
> - case MPOL_INTERLEAVE:
> + if (mempolicy && mempolicy->mode == MPOL_BIND)
> ret = nodes_intersects(mempolicy->v.nodes, *mask);
If MPOL_INTERLEAVE is deemed only a suggestion, the same could be
considered true of MPOL_BIND intersection as well, no?
> - break;
> - default:
> - BUG();
> - }
> -out:
> task_unlock(tsk);
> +
> return ret;
> }
>
> --
> 2.7.4
>
>
On Thu, 20 May 2021, Feng Tang wrote:
> Currently the kernel_mbind() and kernel_set_mempolicy() do almost
> the same operation for parameter sanity check and preprocessing.
>
> Add a macro to unify the code to reduce the redundancy, and make
> it easier for changing the pre-processing code in future.
>
> Signed-off-by: Feng Tang <[email protected]>
> ---
> mm/mempolicy.c | 46 +++++++++++++++++++---------------------------
> 1 file changed, 19 insertions(+), 27 deletions(-)
>
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index 1964cca..0f5bf60 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -1460,25 +1460,29 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
> return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
> }
>
> +#define MPOL_PRE_PROCESS() \
This should be extracted to helper function returning bool, not a macro.
> + \
> + nodemask_t nodes; \
> + int err; \
> + unsigned short mode_flags; \
> + mode_flags = mode & MPOL_MODE_FLAGS; \
> + mode &= ~MPOL_MODE_FLAGS; \
> + if (mode >= MPOL_MAX) \
> + return -EINVAL; \
> + if ((mode_flags & MPOL_F_STATIC_NODES) && \
> + (mode_flags & MPOL_F_RELATIVE_NODES)) \
> + return -EINVAL; \
> + err = get_nodes(&nodes, nmask, maxnode); \
> + if (err) \
> + return err;
> +
> static long kernel_mbind(unsigned long start, unsigned long len,
> unsigned long mode, const unsigned long __user *nmask,
> unsigned long maxnode, unsigned int flags)
> {
> - nodemask_t nodes;
> - int err;
> - unsigned short mode_flags;
> + MPOL_PRE_PROCESS();
>
> start = untagged_addr(start);
> - mode_flags = mode & MPOL_MODE_FLAGS;
> - mode &= ~MPOL_MODE_FLAGS;
> - if (mode >= MPOL_MAX)
> - return -EINVAL;
> - if ((mode_flags & MPOL_F_STATIC_NODES) &&
> - (mode_flags & MPOL_F_RELATIVE_NODES))
> - return -EINVAL;
> - err = get_nodes(&nodes, nmask, maxnode);
> - if (err)
> - return err;
> return do_mbind(start, len, mode, mode_flags, &nodes, flags);
> }
>
> @@ -1493,20 +1497,8 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
> static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
> unsigned long maxnode)
> {
> - int err;
> - nodemask_t nodes;
> - unsigned short flags;
> -
> - flags = mode & MPOL_MODE_FLAGS;
> - mode &= ~MPOL_MODE_FLAGS;
> - if ((unsigned int)mode >= MPOL_MAX)
> - return -EINVAL;
> - if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
> - return -EINVAL;
> - err = get_nodes(&nodes, nmask, maxnode);
> - if (err)
> - return err;
> - return do_set_mempolicy(mode, flags, &nodes);
> + MPOL_PRE_PROCESS();
> + return do_set_mempolicy(mode, mode_flags, &nodes);
> }
>
> SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
> --
> 2.7.4
>
>
Hi David,
Thanks for the review!
On Sun, May 23, 2021 at 10:15:00PM -0700, David Rientjes wrote:
> On Thu, 20 May 2021, Feng Tang wrote:
>
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index d79fa29..1964cca 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -2098,7 +2098,7 @@ bool init_nodemask_of_mempolicy(nodemask_t *mask)
> > *
> > * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
> > * policy. Otherwise, check for intersection between mask and the policy
> > - * nodemask for 'bind' or 'interleave' policy. For 'preferred' or 'local'
> > + * nodemask for 'bind' policy. For 'interleave', 'preferred' or 'local'
> > * policy, always return true since it may allocate elsewhere on fallback.
> > *
> > * Takes task_lock(tsk) to prevent freeing of its mempolicy.
> > @@ -2111,29 +2111,13 @@ bool mempolicy_nodemask_intersects(struct task_struct *tsk,
> >
> > if (!mask)
> > return ret;
> > +
> > task_lock(tsk);
> > mempolicy = tsk->mempolicy;
> > - if (!mempolicy)
> > - goto out;
> > -
> > - switch (mempolicy->mode) {
> > - case MPOL_PREFERRED:
> > - /*
> > - * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
> > - * allocate from, they may fallback to other nodes when oom.
> > - * Thus, it's possible for tsk to have allocated memory from
> > - * nodes in mask.
> > - */
> > - break;
> > - case MPOL_BIND:
> > - case MPOL_INTERLEAVE:
> > + if (mempolicy && mempolicy->mode == MPOL_BIND)
> > ret = nodes_intersects(mempolicy->v.nodes, *mask);
>
> If MPOL_INTERLEAVE is deemed only a suggestion, the same could be
> considered true of MPOL_BIND intersection as well, no?
IIUC, 'bind' and 'interleave' are different regarding memory allocation. In
alloc_pages_vma(), there are:
nmask = policy_nodemask(gfp, pol);
preferred_nid = policy_node(gfp, pol, node);
page = __alloc_pages(gfp, order, preferred_nid, nmask);
mpol_cond_put(pol);
and in plicy_nodemask(), only 'bind' policy may return its desired nodemask,
while all other returns NULL including 'interleave'. And this 'NULL' enables
the 'interleave' policy can get memory from other nodes than its nodemask.
So when allocating memory, 'interleave' can get memory from all nodes. I did
some experements which also confirm this.
Thanks,
Feng
>
> > - break;
> > - default:
> > - BUG();
> > - }
> > -out:
> > task_unlock(tsk);
> > +
> > return ret;
> > }
> >
> > --
> > 2.7.4
> >
> >
On Sun, May 23, 2021 at 10:16:11PM -0700, David Rientjes wrote:
> On Thu, 20 May 2021, Feng Tang wrote:
>
> > Currently the kernel_mbind() and kernel_set_mempolicy() do almost
> > the same operation for parameter sanity check and preprocessing.
> >
> > Add a macro to unify the code to reduce the redundancy, and make
> > it easier for changing the pre-processing code in future.
> >
> > Signed-off-by: Feng Tang <[email protected]>
> > ---
> > mm/mempolicy.c | 46 +++++++++++++++++++---------------------------
> > 1 file changed, 19 insertions(+), 27 deletions(-)
> >
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index 1964cca..0f5bf60 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> > @@ -1460,25 +1460,29 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
> > return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
> > }
> >
> > +#define MPOL_PRE_PROCESS() \
>
> This should be extracted to helper function returning bool, not a macro.
Yes, initially I did try it with an inline function, but as a function
it has quite several input parameters and several output parameters,
which made the code still big and ugly.
But if community think it's the right direction to go, I can change it.
Thanks,
Feng
> > + \
> > + nodemask_t nodes; \
> > + int err; \
> > + unsigned short mode_flags; \
> > + mode_flags = mode & MPOL_MODE_FLAGS; \
> > + mode &= ~MPOL_MODE_FLAGS; \
> > + if (mode >= MPOL_MAX) \
> > + return -EINVAL; \
> > + if ((mode_flags & MPOL_F_STATIC_NODES) && \
> > + (mode_flags & MPOL_F_RELATIVE_NODES)) \
> > + return -EINVAL; \
> > + err = get_nodes(&nodes, nmask, maxnode); \
> > + if (err) \
> > + return err;
> > +
> > static long kernel_mbind(unsigned long start, unsigned long len,
> > unsigned long mode, const unsigned long __user *nmask,
> > unsigned long maxnode, unsigned int flags)
> > {
> > - nodemask_t nodes;
> > - int err;
> > - unsigned short mode_flags;
> > + MPOL_PRE_PROCESS();
> >
> > start = untagged_addr(start);
> > - mode_flags = mode & MPOL_MODE_FLAGS;
> > - mode &= ~MPOL_MODE_FLAGS;
> > - if (mode >= MPOL_MAX)
> > - return -EINVAL;
> > - if ((mode_flags & MPOL_F_STATIC_NODES) &&
> > - (mode_flags & MPOL_F_RELATIVE_NODES))
> > - return -EINVAL;
> > - err = get_nodes(&nodes, nmask, maxnode);
> > - if (err)
> > - return err;
> > return do_mbind(start, len, mode, mode_flags, &nodes, flags);
> > }
> >
> > @@ -1493,20 +1497,8 @@ SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
> > static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
> > unsigned long maxnode)
> > {
> > - int err;
> > - nodemask_t nodes;
> > - unsigned short flags;
> > -
> > - flags = mode & MPOL_MODE_FLAGS;
> > - mode &= ~MPOL_MODE_FLAGS;
> > - if ((unsigned int)mode >= MPOL_MAX)
> > - return -EINVAL;
> > - if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
> > - return -EINVAL;
> > - err = get_nodes(&nodes, nmask, maxnode);
> > - if (err)
> > - return err;
> > - return do_set_mempolicy(mode, flags, &nodes);
> > + MPOL_PRE_PROCESS();
> > + return do_set_mempolicy(mode, mode_flags, &nodes);
> > }
> >
> > SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
> > --
> > 2.7.4
> >
> >
On Mon, May 24, 2021 at 01:59:39PM +0800, Tang, Feng wrote:
> On Sun, May 23, 2021 at 10:16:11PM -0700, David Rientjes wrote:
> > On Thu, 20 May 2021, Feng Tang wrote:
> >
> > > Currently the kernel_mbind() and kernel_set_mempolicy() do almost
> > > the same operation for parameter sanity check and preprocessing.
> > >
> > > Add a macro to unify the code to reduce the redundancy, and make
> > > it easier for changing the pre-processing code in future.
> > >
> > > Signed-off-by: Feng Tang <[email protected]>
> > > ---
> > > mm/mempolicy.c | 46 +++++++++++++++++++---------------------------
> > > 1 file changed, 19 insertions(+), 27 deletions(-)
> > >
> > > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > > index 1964cca..0f5bf60 100644
> > > --- a/mm/mempolicy.c
> > > +++ b/mm/mempolicy.c
> > > @@ -1460,25 +1460,29 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
> > > return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
> > > }
> > >
> > > +#define MPOL_PRE_PROCESS() \
> >
> > This should be extracted to helper function returning bool, not a macro.
>
> Yes, initially I did try it with an inline function, but as a function
> it has quite several input parameters and several output parameters,
> which made the code still big and ugly.
>
> But if community think it's the right direction to go, I can change it.
Following is a patch to unify the preprocssing by using a helper function,
please review, thanks
- Feng
---
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d79fa299b70c..8e4f47f925b6 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1460,6 +1460,20 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
+static inline int mpol_pre_process(int *mode, const unsigned long __user *nmask, unsigned long maxnode, nodemask_t *nodes, unsigned short *flags)
+{
+ int ret;
+
+ *flags = *mode & MPOL_MODE_FLAGS;
+ *mode &= ~MPOL_MODE_FLAGS;
+ if ((unsigned int)(*mode) >= MPOL_MAX)
+ return -EINVAL;
+ if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
+ return -EINVAL;
+ ret = get_nodes(nodes, nmask, maxnode);
+ return ret;
+}
+
static long kernel_mbind(unsigned long start, unsigned long len,
unsigned long mode, const unsigned long __user *nmask,
unsigned long maxnode, unsigned int flags)
@@ -1467,19 +1481,14 @@ static long kernel_mbind(unsigned long start, unsigned long len,
nodemask_t nodes;
int err;
unsigned short mode_flags;
+ int lmode = mode;
- start = untagged_addr(start);
- mode_flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if (mode >= MPOL_MAX)
- return -EINVAL;
- if ((mode_flags & MPOL_F_STATIC_NODES) &&
- (mode_flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
- err = get_nodes(&nodes, nmask, maxnode);
+ err = mpol_pre_process(&lmode, nmask, maxnode, &nodes, &mode_flags);
if (err)
return err;
- return do_mbind(start, len, mode, mode_flags, &nodes, flags);
+
+ start = untagged_addr(start);
+ return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
}
SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
@@ -1495,18 +1504,14 @@ static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
{
int err;
nodemask_t nodes;
- unsigned short flags;
+ unsigned short mode_flags;
+ int lmode = mode;
- flags = mode & MPOL_MODE_FLAGS;
- mode &= ~MPOL_MODE_FLAGS;
- if ((unsigned int)mode >= MPOL_MAX)
- return -EINVAL;
- if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
- return -EINVAL;
- err = get_nodes(&nodes, nmask, maxnode);
+ err = mpol_pre_process(&lmode, nmask, maxnode, &nodes, &mode_flags);
if (err)
return err;
- return do_set_mempolicy(mode, flags, &nodes);
+
+ return do_set_mempolicy(lmode, mode_flags, &nodes);
}
SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,