2002-09-13 03:30:01

by Dave Hansen

[permalink] [raw]
Subject: [PATCH] per-zone kswapd process

# This is a BitKeeper generated patch for the following project:
# Project Name: Linux kernel tree
# This patch format is intended for GNU patch command version 2.5 or higher.
# This patch includes the following deltas:
# ChangeSet 1.625 -> 1.628
# include/linux/mmzone.h 1.19 -> 1.20
# include/linux/swap.h 1.57 -> 1.58
# mm/page_alloc.c 1.98 -> 1.101
# mm/vmscan.c 1.102 -> 1.105
#
# The following is the BitKeeper ChangeSet Log
# --------------------------------------------
# 02/09/12 haveblue@elm3b96.(none) 1.626
# add per-zone kswapd
# --------------------------------------------
# 02/09/12 haveblue@elm3b96.(none) 1.627
# fix some wli-indicated formatting bits
# --------------------------------------------
# 02/09/12 haveblue@elm3b96.(none) 1.628
# move waitqueue init to a more appropriate place
# --------------------------------------------
#
diff -Nru a/include/linux/mmzone.h b/include/linux/mmzone.h
--- a/include/linux/mmzone.h Thu Sep 12 20:24:39 2002
+++ b/include/linux/mmzone.h Thu Sep 12 20:24:39 2002
@@ -108,6 +108,8 @@
unsigned long wait_table_size;
unsigned long wait_table_bits;

+ wait_queue_head_t kswapd_wait;
+
/*
* Discontig memory support fields.
*/
diff -Nru a/include/linux/swap.h b/include/linux/swap.h
--- a/include/linux/swap.h Thu Sep 12 20:24:39 2002
+++ b/include/linux/swap.h Thu Sep 12 20:24:39 2002
@@ -162,7 +162,6 @@
extern void swap_setup(void);

/* linux/mm/vmscan.c */
-extern wait_queue_head_t kswapd_wait;
extern int try_to_free_pages(struct zone *, unsigned int, unsigned int);

/* linux/mm/page_io.c */
diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c
--- a/mm/page_alloc.c Thu Sep 12 20:24:39 2002
+++ b/mm/page_alloc.c Thu Sep 12 20:24:39 2002
@@ -345,8 +345,15 @@
classzone->need_balance = 1;
mb();
/* we're somewhat low on memory, failed to find what we needed */
- if (waitqueue_active(&kswapd_wait))
- wake_up_interruptible(&kswapd_wait);
+ for (i = 0; zones[i] != NULL; i++) {
+ struct zone *z = zones[i];
+
+ /* We don't want to go swapping on zones that aren't actually
+ * low. This accounts for "incremental min" from last loop */
+ if (z->free_pages <= z->pages_low &&
+ waitqueue_active(&z->kswapd_wait))
+ wake_up_interruptible(&z->kswapd_wait);
+ }

/* Go through the zonelist again, taking __GFP_HIGH into account */
min = 1UL << order;
@@ -874,6 +881,8 @@
for(i = 0; i < zone->wait_table_size; ++i)
init_waitqueue_head(zone->wait_table + i);

+ init_waitqueue_head(&zone->kswapd_wait);
+
pgdat->nr_zones = j+1;

mask = (realsize / zone_balance_ratio[j]);
diff -Nru a/mm/vmscan.c b/mm/vmscan.c
--- a/mm/vmscan.c Thu Sep 12 20:24:39 2002
+++ b/mm/vmscan.c Thu Sep 12 20:24:39 2002
@@ -713,8 +713,6 @@
return 0;
}

-DECLARE_WAIT_QUEUE_HEAD(kswapd_wait);
-
static int check_classzone_need_balance(struct zone *classzone)
{
struct zone *first_classzone;
@@ -728,71 +726,33 @@
return 1;
}

-static int kswapd_balance_pgdat(pg_data_t * pgdat)
+static int kswapd_balance_zone(struct zone *zone)
{
- int need_more_balance = 0, i;
- struct zone *zone;
-
- for (i = pgdat->nr_zones-1; i >= 0; i--) {
- zone = pgdat->node_zones + i;
+ int need_more_balance = 0;
+
+ do {
cond_resched();
if (!zone->need_balance)
- continue;
+ break;
if (!try_to_free_pages(zone, GFP_KSWAPD, 0)) {
zone->need_balance = 0;
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
- continue;
+ break;
}
if (check_classzone_need_balance(zone))
need_more_balance = 1;
else
zone->need_balance = 0;
- }
-
- return need_more_balance;
-}
-
-static void kswapd_balance(void)
-{
- int need_more_balance;
- pg_data_t * pgdat;
-
- do {
- need_more_balance = 0;
- pgdat = pgdat_list;
- do
- need_more_balance |= kswapd_balance_pgdat(pgdat);
- while ((pgdat = pgdat->pgdat_next));
} while (need_more_balance);
-}

-static int kswapd_can_sleep_pgdat(pg_data_t * pgdat)
-{
- struct zone *zone;
- int i;
-
- for (i = pgdat->nr_zones-1; i >= 0; i--) {
- zone = pgdat->node_zones + i;
- if (!zone->need_balance)
- continue;
- return 0;
- }
-
- return 1;
+ return 0;
}

-static int kswapd_can_sleep(void)
+static int kswapd_can_sleep_zone(struct zone *zone)
{
- pg_data_t * pgdat;
-
- pgdat = pgdat_list;
- do {
- if (kswapd_can_sleep_pgdat(pgdat))
- continue;
- return 0;
- } while ((pgdat = pgdat->pgdat_next));
-
+ if (zone->need_balance)
+ return 0;
return 1;
}

@@ -809,13 +769,18 @@
* If there are applications that are active memory-allocators
* (most normal use), this basically shouldn't matter.
*/
-int kswapd(void *unused)
+int kswapd_zone(void *p)
{
+ struct zone *zone = (struct zone *)p;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
+
+ printk( "kswapd%d starting for %s\n",
+ zone - zone->zone_pgdat->node_zones,
+ zone->name);

daemonize();
- strcpy(tsk->comm, "kswapd");
+ sprintf(tsk->comm, "kswapd%d", zone - zone->zone_pgdat->node_zones);
sigfillset(&tsk->blocked);

/*
@@ -839,30 +804,37 @@
if (current->flags & PF_FREEZE)
refrigerator(PF_IOTHREAD);
__set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&kswapd_wait, &wait);
+ add_wait_queue(&zone->kswapd_wait, &wait);

mb();
- if (kswapd_can_sleep())
+ if (kswapd_can_sleep_zone(zone))
schedule();

__set_current_state(TASK_RUNNING);
- remove_wait_queue(&kswapd_wait, &wait);
+ remove_wait_queue(&zone->kswapd_wait, &wait);

/*
* If we actually get into a low-memory situation,
* the processes needing more memory will wake us
* up on a more timely basis.
*/
- kswapd_balance();
+ kswapd_balance_zone(zone);
blk_run_queues();
}
}

static int __init kswapd_init(void)
{
+ struct zone* zone;
+
printk("Starting kswapd\n");
swap_setup();
- kernel_thread(kswapd, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
+ for_each_zone(zone)
+ if (zone->size)
+ kernel_thread(kswapd_zone,
+ zone,
+ CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
+
return 0;
}


Attachments:
per-zone-kswapd-2.5.34-mm2-3.patch (5.95 kB)

2002-09-13 03:46:15

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

Dave Hansen wrote:
>
> This patch implements a kswapd process for each memory zone.

I still don't see why it's per zone and not per node. It seems strange
that a wee little laptop would be running two kswapds?

kswapd can get a ton of work done in the development VM and one per
node would, I expect, suffice?

Also, I'm wondering why the individual kernel threads don't have
their affinity masks set to make them run on the CPUs to which the
zone (or zones) are local?

Isn't it the case that with this code you could end up with a kswapd
on node 0 crunching on node 1's pages while a kswapd on node 1 crunches
on node 0's pages?

If I'm not totally out to lunch on this, I'd have thought that a
better approach would be

int sys_kswapd(int nid)
{
return kernel_thread(kswapd, ...);
}

Userspace could then set up the CPU affinity based on some topology
or config information and would then parent a kswapd instance. That
kswapd instance would then be bound to the CPUs which were on the
node identified by `nid'.

Or something like that?

2002-09-13 04:59:45

by William Lee Irwin III

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> I still don't see why it's per zone and not per node. It seems strange
> that a wee little laptop would be running two kswapds?
> kswapd can get a ton of work done in the development VM and one per
> node would, I expect, suffice?

Machines without observable NUMA effects can benefit from it if it's
per-zone. It also follows that if there's more than one task doing this,
page replacement is less likely to block entirely. Last, but not least,
when I devised it, "per-zone" was the theme.


On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> Also, I'm wondering why the individual kernel threads don't have
> their affinity masks set to make them run on the CPUs to which the
> zone (or zones) are local?
> Isn't it the case that with this code you could end up with a kswapd
> on node 0 crunching on node 1's pages while a kswapd on node 1 crunches
> on node 0's pages?

Without some architecture-neutral method of topology detection, there's
no way to do this. A follow-up when it's there should fix it.


On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> If I'm not totally out to lunch on this, I'd have thought that a
> better approach would be
> int sys_kswapd(int nid)
> {
> return kernel_thread(kswapd, ...);
> }
> Userspace could then set up the CPU affinity based on some topology
> or config information and would then parent a kswapd instance. That
> kswapd instance would then be bound to the CPUs which were on the
> node identified by `nid'.
> Or something like that?

I'm very very scared of handing things like that to userspace, largely
because I don't trust userspace at all.

At this point, we need to enumerate nodes and provide a cpu to node
correspondence to userspace, and the kernel can obey, aside from the
question of "What do we do if we need to scan a node without a kswapd
started yet?". I think mbligh recently got the long-needed arch code in
for cpu to node... But I'm just not able to make the leap of faith that
memory detection is something that can ever comfortably be given to
userspace.


Cheers,
Bill

2002-09-13 05:08:01

by Martin J. Bligh

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

>> Also, I'm wondering why the individual kernel threads don't have
>> their affinity masks set to make them run on the CPUs to which the
>> zone (or zones) are local?
>> Isn't it the case that with this code you could end up with a kswapd
>> on node 0 crunching on node 1's pages while a kswapd on node 1 crunches
>> on node 0's pages?
>
> Without some architecture-neutral method of topology detection, there's
> no way to do this. A follow-up when it's there should fix it.

Every discontigmem arch should implement cpu_to_node, with a generic
fallback mechanism that returns 0 or something. Not that we do right
now, but that's easy to fix. There should also be a node_to_cpus call
that returns a bitmask of which cpus are in that node.

Matt ... want to sneak in the first bit of the topology patch, or
whatever lump this fell under? Seems like an appropriate juncture.
We have the code already somewhere, just need to fish it out.

>> If I'm not totally out to lunch on this, I'd have thought that a
>> better approach would be
>> int sys_kswapd(int nid)
>> {
>> return kernel_thread(kswapd, ...);
>> }
>> Userspace could then set up the CPU affinity based on some topology
>> or config information and would then parent a kswapd instance. That
>> kswapd instance would then be bound to the CPUs which were on the
>> node identified by `nid'.
>> Or something like that?
>
> I'm very very scared of handing things like that to userspace, largely
> because I don't trust userspace at all.
>
> At this point, we need to enumerate nodes and provide a cpu to node
> correspondence to userspace, and the kernel can obey, aside from the
> question of "What do we do if we need to scan a node without a kswapd
> started yet?". I think mbligh recently got the long-needed arch code in
> for cpu to node... But I'm just not able to make the leap of faith that
> memory detection is something that can ever comfortably be given to
> userspace.

I don't think the userspace stuff is necessary - we can do this all
in the kernel dead easily I think. Just need a couple of definitions,
which are trivially small functions.

M.

2002-09-13 05:26:35

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

William Lee Irwin III wrote:
>
> On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> > I still don't see why it's per zone and not per node. It seems strange
> > that a wee little laptop would be running two kswapds?
> > kswapd can get a ton of work done in the development VM and one per
> > node would, I expect, suffice?
>
> Machines without observable NUMA effects can benefit from it if it's
> per-zone. It also follows that if there's more than one task doing this,
> page replacement is less likely to block entirely. Last, but not least,
> when I devised it, "per-zone" was the theme.

Maybe, marginally. You could pass a gfp mask to sys_kswapd to select
the zones if that's really a benefit. But if this _is_ a benefit then
it's a VM bug.

Because if a single kswapd cannot service three zones then it cannot
service one zone. (Maybe. We need to do per-zone throttling soon to
fix your OOM problems properly, but then, that shouldn't throttle
kswapd).

> On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> > Also, I'm wondering why the individual kernel threads don't have
> > their affinity masks set to make them run on the CPUs to which the
> > zone (or zones) are local?
> > Isn't it the case that with this code you could end up with a kswapd
> > on node 0 crunching on node 1's pages while a kswapd on node 1 crunches
> > on node 0's pages?
>
> Without some architecture-neutral method of topology detection, there's
> no way to do this. A follow-up when it's there should fix it.

Sorry, I don't buy that.

a) It does not need to be architecture neutral.

b) You surely need a way of communicating the discovered topology
to userspace anyway.

c) $EDITOR /etc/numa-layouf.conf

d) $EDITOR /etc/kswapd.conf

> On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> > If I'm not totally out to lunch on this, I'd have thought that a
> > better approach would be
> > int sys_kswapd(int nid)
> > {
> > return kernel_thread(kswapd, ...);
> > }
> > Userspace could then set up the CPU affinity based on some topology
> > or config information and would then parent a kswapd instance. That
> > kswapd instance would then be bound to the CPUs which were on the
> > node identified by `nid'.
> > Or something like that?
>
> I'm very very scared of handing things like that to userspace, largely
> because I don't trust userspace at all.

Me either. I've seen workloads in which userspace consumes
over 50% of the CPU resources. It should be banned!

> At this point, we need to enumerate nodes and provide a cpu to node
> correspondence to userspace, and the kernel can obey, aside from the
> question of "What do we do if we need to scan a node without a kswapd
> started yet?".

kswapd is completely optional. Put a `do_exit(0)' into the current
one and watch. You'll get crappy dbench numbers, but it stays up.

> I think mbligh recently got the long-needed arch code in
> for cpu to node... But I'm just not able to make the leap of faith that
> memory detection is something that can ever comfortably be given to
> userspace.

A simple syscall which alows you to launch a kswapd instance against
a group of zones on any group of CPUs provides complete generality
and flexibility to userspace. And it is architecture neutral.

If it really is incredibly hard to divine the topology from userspace
then you need to fix that up. Provide the topology to userspace.
Which has the added benefit of providing, umm, the topology to userspace ;)

2002-09-13 05:36:18

by Martin J. Bligh

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

> Sorry, I don't buy that.
>
> a) It does not need to be architecture neutral.
>
> b) You surely need a way of communicating the discovered topology
> to userspace anyway.
>
> c) $EDITOR /etc/numa-layouf.conf
>
> d) $EDITOR /etc/kswapd.conf

I guess you could do that, but it seems overly complicated to me.

>> I think mbligh recently got the long-needed arch code in
>> for cpu to node... But I'm just not able to make the leap of faith that
>> memory detection is something that can ever comfortably be given to
>> userspace.
>
> A simple syscall which alows you to launch a kswapd instance against
> a group of zones on any group of CPUs provides complete generality
> and flexibility to userspace. And it is architecture neutral.
>
> If it really is incredibly hard to divine the topology from userspace
> then you need to fix that up. Provide the topology to userspace.
> Which has the added benefit of providing, umm, the topology to userspace ;)

Can we make a simple default of 1 per node, which is what 99%
of people want, and then make it more complicated later if people
complain? It's really pretty easy:

for (node = 0; node < numnodes; ++node) {
kswapd = kick_off_kswapd_for_node(node);
kswapd->cpus_allowed = node_to_cpus(node);
}

Or whatever the current cpus_allowed method is. All we seem to need
is node_to_cpus ... I can give that to you tommorow with no problem,
it's trivial.

M.

2002-09-13 05:44:20

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

"Martin J. Bligh" wrote:
>
> ..
> Can we make a simple default of 1 per node, which is what 99%
> of people want, and then make it more complicated later if people
> complain? It's really pretty easy:
>
> for (node = 0; node < numnodes; ++node) {
> kswapd = kick_off_kswapd_for_node(node);
> kswapd->cpus_allowed = node_to_cpus(node);
> }

Seems sane.

> Or whatever the current cpus_allowed method is. All we seem to need
> is node_to_cpus ... I can give that to you tommorow with no problem,
> it's trivial.

Tomorrow sounds too early - it'd be nice to get some before-n-after
performance testing to go along with that patch ;)

2002-09-13 13:00:38

by Alan

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Fri, 2002-09-13 at 05:59, William Lee Irwin III wrote:
> Machines without observable NUMA effects can benefit from it if it's
> per-zone. It also follows that if there's more than one task doing this,
> page replacement is less likely to block entirely. Last, but not least,
> when I devised it, "per-zone" was the theme.

It will also increase the amount of disk head thrashing surely ?

2002-09-13 21:31:51

by William Lee Irwin III

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Fri, 2002-09-13 at 05:59, William Lee Irwin III wrote:
>> Machines without observable NUMA effects can benefit from it if it's
>> per-zone. It also follows that if there's more than one task doing this,
>> page replacement is less likely to block entirely. Last, but not least,
>> when I devised it, "per-zone" was the theme.

On Fri, Sep 13, 2002 at 02:05:52PM +0100, Alan Cox wrote:
> It will also increase the amount of disk head thrashing surely ?

I doubt it. Writeout isn't really supposed to happen there in 2.4
either, except under duress. OTOH I've not been doing much with this
directly since rmap10c.


Cheers,
Bill

2002-09-16 05:40:09

by Daniel Phillips

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Friday 13 September 2002 06:59, William Lee Irwin III wrote:
> On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
> > I still don't see why it's per zone and not per node. It seems strange
> > that a wee little laptop would be running two kswapds?
> > kswapd can get a ton of work done in the development VM and one per
> > node would, I expect, suffice?
>
> Machines without observable NUMA effects can benefit from it if it's
> per-zone.

How?

--
Daniel

2002-09-16 07:46:01

by William Lee Irwin III

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Thu, Sep 12, 2002 at 09:06:20PM -0700, Andrew Morton wrote:
>>> I still don't see why it's per zone and not per node. It seems strange
>>> that a wee little laptop would be running two kswapds?
>>> kswapd can get a ton of work done in the development VM and one per
>>> node would, I expect, suffice?

On Friday 13 September 2002 06:59, William Lee Irwin III wrote:
>> Machines without observable NUMA effects can benefit from it if it's
>> per-zone.

On Mon, Sep 16, 2002 at 07:44:30AM +0200, Daniel Phillips wrote:
> How?

The notion was that some level of parallelism would be bestowed on the
single-node case by using separate worker threads on a per-zone basis,
as they won't have more than one node to spawn worker threads for at all.

This notion apparently got shot down somewhere, and I don't care to rise
to its defense. I've lost enough debates this release to know better than
to try.


Bill

2002-09-16 15:08:22

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH] per-zone kswapd process

On Mon, 16 Sep 2002, William Lee Irwin III wrote:

> This notion apparently got shot down somewhere, and I don't care to rise
> to its defense. I've lost enough debates this release to know better
> than to try.

Don't worry about this, there are bigger fish around, lower
hanging sea fruit, so to say. ;)

Rik
--
Bravely reimplemented by the knights who say "NIH".

http://www.surriel.com/ http://distro.conectiva.com/

Spamtraps of the month: [email protected] [email protected]

2002-09-18 16:07:20

by Hubertus Franke

[permalink] [raw]
Subject: [PATCH] recognize MAP_LOCKED in mmap() call


Andrew, at the current time an mmap() ignores a MAP_LOCKED passed to it.
The only way we can get VM_LOCKED associated with the newly created VMA
is to have previously called mlockall() on the process which sets the
mm->def_flags != VM_LOCKED or subsequently call mlock() on the
newly created VMA.

The attached patch checks for MAP_LOCKED being passed and if so checks
the capabilities of the process. Limit checks were already in place.
--
-- Hubertus Franke ([email protected])

--------------------------------< PATCH >------------------------------
--- linux-2.5.35/mm/mmap.c Wed Sep 18 11:12:13 2002
+++ linux-2.5.35-fix/mm/mmap.c Wed Sep 18 11:44:32 2002
@@ -461,6 +461,11 @@
*/
vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;

+ if (flags & MAP_LOCKED) {
+ if (!capable(CAP_IPC_LOCK))
+ return -EPERM;
+ vm_flags |= VM_LOCKED;
+ }
/* mlock MCL_FUTURE? */
if (vm_flags & VM_LOCKED) {
unsigned long locked = mm->locked_vm << PAGE_SHIFT;




Attachments:
patch.2.5.35.mmap_locked (452.00 B)

2002-09-18 16:24:47

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

Hubertus Franke wrote:
>
> Andrew, at the current time an mmap() ignores a MAP_LOCKED passed to it.
> The only way we can get VM_LOCKED associated with the newly created VMA
> is to have previously called mlockall() on the process which sets the
> mm->def_flags != VM_LOCKED or subsequently call mlock() on the
> newly created VMA.
>
> The attached patch checks for MAP_LOCKED being passed and if so checks
> the capabilities of the process. Limit checks were already in place.

Looks sane, thanks.

It appears that MAP_LOCKED is a Linux-special, so presumably it
_used_ to work. I wonder when it broke?

You patch applies to 2.4 as well; it would be useful to give that
a sanity test and send a copy to Marcelo.

(SuS really only anticipates that mmap needs to look at prior mlocks
in force against the address range. It also says

Process memory locking does apply to shared memory regions,

and we don't do that either. I think we should; can't see why SuS
requires this.)

2002-09-18 19:14:58

by Mark_H_Johnson

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call


Andrew Morton wrote:
>(SuS really only anticipates that mmap needs to look at prior mlocks
>in force against the address range. It also says
>
> Process memory locking does apply to shared memory regions,
>
>and we don't do that either. I think we should; can't see why SuS
>requires this.)

Let me make sure I read what you said correctly. Does this mean that Linux
2.4 (or 2.5) kernels do not lock shared memory regions if a process uses
mlockall?

If not, that is *really bad* for our real time applications. We don't want
to take a page fault while running some 80hz task, just because some
non-real time application tried to use what little physical memory we allow
for the kernel and all other applications.

I asked a related question about a week ago on linux-mm and didn't get a
response. Basically, I was concerned that top did not show RSS == Size when
mlockall(MCL_CURRENT|MCL_FUTURE) was called. Could this explain the
difference or is there something else that I'm missing here?

Thanks.
--Mark H Johnson
<mailto:[email protected]>



2002-09-18 19:35:11

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

On Wed, 18 Sep 2002 [email protected] wrote:
> Andrew Morton wrote:
> >(SuS really only anticipates that mmap needs to look at prior mlocks
> >in force against the address range. It also says
> >
> > Process memory locking does apply to shared memory regions,
> >
> >and we don't do that either. I think we should; can't see why SuS
> >requires this.)
>
> Let me make sure I read what you said correctly. Does this mean that
> Linux 2.4 (or 2.5) kernels do not lock shared memory regions if a
> process uses mlockall?

But it does. Linux won't evict memory that's MLOCKed...

cheers,

Rik
--
Spamtrap of the month: [email protected]

http://www.surriel.com/ http://distro.conectiva.com/

2002-09-18 19:49:11

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

[email protected] wrote:
>
> Andrew Morton wrote:
> >(SuS really only anticipates that mmap needs to look at prior mlocks
> >in force against the address range. It also says
> >
> > Process memory locking does apply to shared memory regions,
> >
> >and we don't do that either. I think we should; can't see why SuS
> >requires this.)
>
> Let me make sure I read what you said correctly. Does this mean that Linux
> 2.4 (or 2.5) kernels do not lock shared memory regions if a process uses
> mlockall?

Linux does lock these regions. SuS seems to imply that we shouldn't.
But we should.

> If not, that is *really bad* for our real time applications. We don't want
> to take a page fault while running some 80hz task, just because some
> non-real time application tried to use what little physical memory we allow
> for the kernel and all other applications.
>
> I asked a related question about a week ago on linux-mm and didn't get a
> response. Basically, I was concerned that top did not show RSS == Size when
> mlockall(MCL_CURRENT|MCL_FUTURE) was called. Could this explain the
> difference or is there something else that I'm missing here?
>

That mlockall should have faulted everything in. It could be an
accounting bug, or it could be a bug. That's not an aspect which
gets tested a lot. I'll take a look.

2002-09-25 15:41:07

by Hubertus Franke

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

On Wednesday 18 September 2002 03:54 pm, Andrew Morton wrote:
> [email protected] wrote:
> > Andrew Morton wrote:
> > >(SuS really only anticipates that mmap needs to look at prior mlocks
> > >in force against the address range. It also says
> > >
> > > Process memory locking does apply to shared memory regions,
> > >
> > >and we don't do that either. I think we should; can't see why SuS
> > >requires this.)
> >
> > Let me make sure I read what you said correctly. Does this mean that
> > Linux 2.4 (or 2.5) kernels do not lock shared memory regions if a process
> > uses mlockall?
>
> Linux does lock these regions. SuS seems to imply that we shouldn't.
> But we should.
>
> > If not, that is *really bad* for our real time applications. We don't
> > want to take a page fault while running some 80hz task, just because some
> > non-real time application tried to use what little physical memory we
> > allow for the kernel and all other applications.
> >
> > I asked a related question about a week ago on linux-mm and didn't get a
> > response. Basically, I was concerned that top did not show RSS == Size
> > when mlockall(MCL_CURRENT|MCL_FUTURE) was called. Could this explain the
> > difference or is there something else that I'm missing here?
>
> That mlockall should have faulted everything in. It could be an
> accounting bug, or it could be a bug. That's not an aspect which
> gets tested a lot. I'll take a look.


This is what the manpage says...

mlockall disables paging for all pages mapped into the
address space of the calling process. This includes the
pages of the code, data and stack segment, as well as
shared libraries, user space kernel data, shared memory
and memory mapped files. All mapped pages are guaranteed
to be resident in RAM when the mlockall system call
returns successfully and they are guaranteed to stay in
RAM until the pages are unlocked again by munlock or
munlockall or until the process terminates or starts
another program with exec. Child processes do not inherit
page locks across a fork.

Do you read that all pages must be faulted in apriori ?
Or is it sufficient to to make sure non of the currently mapped
pages are swapped out and future swapout is prohibited.

This still allows for page faults on pages that have not been
mapped in the specified range or process. If required the
app could touch these and they wouldn't be swapped later.


--
-- Hubertus Franke ([email protected])

2002-09-25 15:34:32

by Hubertus Franke

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

On Wednesday 18 September 2002 03:18 pm, [email protected] wrote:
> Andrew Morton wrote:
> >(SuS really only anticipates that mmap needs to look at prior mlocks
> >in force against the address range. It also says
> >
> > Process memory locking does apply to shared memory regions,
> >
> >and we don't do that either. I think we should; can't see why SuS
> >requires this.)
>
> Let me make sure I read what you said correctly. Does this mean that Linux
> 2.4 (or 2.5) kernels do not lock shared memory regions if a process uses
> mlockall?
>
> If not, that is *really bad* for our real time applications. We don't want
> to take a page fault while running some 80hz task, just because some
> non-real time application tried to use what little physical memory we allow
> for the kernel and all other applications.
>
> I asked a related question about a week ago on linux-mm and didn't get a
> response. Basically, I was concerned that top did not show RSS == Size when
> mlockall(MCL_CURRENT|MCL_FUTURE) was called. Could this explain the
> difference or is there something else that I'm missing here?
>
> Thanks.
> --Mark H Johnson
> <mailto:[email protected]>


Sorry for the lengthy delay.
mlock() and mlockall() do the right thing..
however, mmap(MAP_LOCKED) should behave like a mmap | mlock operation
according to the manpages. This however was not implemented as the
transformation from the mmap_flags to vm_flags never checked for MAP_LOCKED
but only for mm->def_flags which only covers a previous mlockall() call.

Hope this clarifies it .
--
-- Hubertus Franke ([email protected])

2002-09-25 16:30:33

by Andrew Morton

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call

Hubertus Franke wrote:
>
> ...
> This is what the manpage says...
>
> mlockall disables paging for all pages mapped into the
> address space of the calling process. This includes the
> pages of the code, data and stack segment, as well as
> shared libraries, user space kernel data, shared memory
> and memory mapped files. All mapped pages are guaranteed
> to be resident in RAM when the mlockall system call
> returns successfully and they are guaranteed to stay in
> RAM until the pages are unlocked again by munlock or
> munlockall or until the process terminates or starts
> another program with exec. Child processes do not inherit
> page locks across a fork.
>
> Do you read that all pages must be faulted in apriori ?

For MCL_FUTURE.

> Or is it sufficient to to make sure non of the currently mapped
> pages are swapped out and future swapout is prohibited.

I'd say that we should try to make all the pages present. But
if it's a problem for (say) a hugepage implementation then it's
unlikely that the world would end if these things were still
demand paged in.

2002-09-25 17:05:25

by Mark_H_Johnson

[permalink] [raw]
Subject: Re: [PATCH] recognize MAP_LOCKED in mmap() call


>This is what the manpage says...
>
> mlockall disables paging for all pages mapped into the
> address space of the calling process. This includes the
> pages of the code, data and stack segment, as well as
> shared libraries, user space kernel data, shared memory
> and memory mapped files. All mapped pages are guaranteed
> to be resident in RAM when the mlockall system call
> returns successfully and they are guaranteed to stay in
> RAM until the pages are unlocked again by munlock or
> munlockall or until the process terminates or starts
> another program with exec. Child processes do not inherit
> page locks across a fork.
>
>Do you read that all pages must be faulted in apriori ?
>Or is it sufficient to to make sure none of the currently mapped
>pages are swapped out and future swapout is prohibited.
>
The key phrase is that "...all mapped pages are guaranteed to be resident
in RAM when the mlockall system call returns successfully..." (third
sentence) In that way I would expect the segments containing the code,
heap, and current stack allocations to be resident. I do not expect
the full stack allocation (e.g., 2M for each thread if that is the
stack size) to be mapped (nor resident) unless I take special action
to grow the stack that large.

We happen to have special code to grow each stack and allocate heap
variables to account for what we expect to use prior to mlockall.

That does raise a question though - are there other segments (e.g.,
debug information) that may be in the total size calculations that
are mapped only when some special action is taken (e.g., I run the
debugger)? That would explain the difference - the measures I reported
on were with executables built with debug symbols.

That might also explain a possible problem we have had when trying
to debug such an application after an hour of run time or so. If
running gdb triggers a growth in locked memory (and we don't have
enough) - we would likely get an error condition that isn't normally
expected by gdb.

>This still allows for page faults on pages that have not been
>mapped in the specified range or process. If required the
>app could touch these and they wouldn't be swapped later.
>

I don't think touching the pages is enough - they have to be allocated
and the maps generated (e.g., calls to mmap, malloc). That is a possibly
expensive operation when real time is active and something we try to
avoid whenever possible.

--
--Mark H Johnson
<mailto:[email protected]>