On Thu, Sep 24, 2020 at 04:25:08PM +0800, Huang Ying wrote:
> To follow code-of-conduct better.
>
> Signed-off-by: "Huang, Ying" <[email protected]>
> Suggested-by: "Matthew Wilcox (Oracle)" <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Rik van Riel <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Dave Hansen <[email protected]>
> Cc: Andi Kleen <[email protected]>
> Cc: Michal Hocko <[email protected]>
> Cc: David Rientjes <[email protected]>
> ---
> include/uapi/linux/mempolicy.h | 2 +-
> kernel/sched/debug.c | 2 +-
> mm/mempolicy.c | 6 +++---
> 3 files changed, 5 insertions(+), 5 deletions(-)
>
> diff --git a/include/uapi/linux/mempolicy.h b/include/uapi/linux/mempolicy.h
> index 3354774af61e..3c3666d017e6 100644
> --- a/include/uapi/linux/mempolicy.h
> +++ b/include/uapi/linux/mempolicy.h
> @@ -60,7 +60,7 @@ enum {
> #define MPOL_F_SHARED (1 << 0) /* identify shared policies */
> #define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
> #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
> -#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
> +#define MPOL_F_MOPRON (1 << 4) /* Migrate On Protnone Reference On Node */
>
>
> #endif /* _UAPI_LINUX_MEMPOLICY_H */
> diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
> index 36c54265bb2b..26495a344d8d 100644
> --- a/kernel/sched/debug.c
> +++ b/kernel/sched/debug.c
> @@ -844,7 +844,7 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
>
> task_lock(p);
> pol = p->mempolicy;
> - if (pol && !(pol->flags & MPOL_F_MORON))
> + if (pol && !(pol->flags & MPOL_F_MOPRON))
> pol = NULL;
> mpol_get(pol);
> task_unlock(p);
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index eddbe4e56c73..62cd159aa46d 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
> @@ -2515,7 +2515,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
> }
>
> /* Migrate the page towards the node whose CPU is referencing it */
> - if (pol->flags & MPOL_F_MORON) {
> + if (pol->flags & MPOL_F_MOPRON) {
> polnid = thisnid;
>
> if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
> @@ -2806,7 +2806,7 @@ void __init numa_policy_init(void)
> preferred_node_policy[nid] = (struct mempolicy) {
> .refcnt = ATOMIC_INIT(1),
> .mode = MPOL_PREFERRED,
> - .flags = MPOL_F_MOF | MPOL_F_MORON,
> + .flags = MPOL_F_MOF | MPOL_F_MOPRON,
> .v = { .preferred_node = nid, },
> };
> }
> @@ -3014,7 +3014,7 @@ void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
> unsigned short mode = MPOL_DEFAULT;
> unsigned short flags = 0;
>
> - if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
> + if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MOPRON)) {
> mode = pol->mode;
> flags = pol->flags;
> }
> --
> 2.28.0
>
>
Acked-by: Rafael Aquini <[email protected]>