2022-12-02 17:24:57

by Brian Foster

[permalink] [raw]
Subject: [PATCH v3 1/5] pid: replace pidmap_lock with xarray lock

As a first step to changing the struct pid tracking code from the
idr over to the xarray, replace the custom pidmap_lock spinlock with
the internal lock associated with the underlying xarray. This is
effectively equivalent to using idr_lock() and friends, but since
the goal is to disentangle from the idr, move directly to the
underlying xarray api.

Signed-off-by: Matthew Wilcox <[email protected]>
Signed-off-by: Brian Foster <[email protected]>
---
kernel/pid.c | 79 ++++++++++++++++++++++++++--------------------------
1 file changed, 40 insertions(+), 39 deletions(-)

diff --git a/kernel/pid.c b/kernel/pid.c
index 3fbc5e46b721..3622f8b13143 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -86,22 +86,6 @@ struct pid_namespace init_pid_ns = {
};
EXPORT_SYMBOL_GPL(init_pid_ns);

-/*
- * Note: disable interrupts while the pidmap_lock is held as an
- * interrupt might come in and do read_lock(&tasklist_lock).
- *
- * If we don't disable interrupts there is a nasty deadlock between
- * detach_pid()->free_pid() and another cpu that does
- * spin_lock(&pidmap_lock) followed by an interrupt routine that does
- * read_lock(&tasklist_lock);
- *
- * After we clean up the tasklist_lock and know there are no
- * irq handlers that take it we can leave the interrupts enabled.
- * For now it is easier to be safe than to prove it can't happen.
- */
-
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
-
void put_pid(struct pid *pid)
{
struct pid_namespace *ns;
@@ -129,10 +113,11 @@ void free_pid(struct pid *pid)
int i;
unsigned long flags;

- spin_lock_irqsave(&pidmap_lock, flags);
for (i = 0; i <= pid->level; i++) {
struct upid *upid = pid->numbers + i;
struct pid_namespace *ns = upid->ns;
+
+ xa_lock_irqsave(&ns->idr.idr_rt, flags);
switch (--ns->pid_allocated) {
case 2:
case 1:
@@ -150,8 +135,8 @@ void free_pid(struct pid *pid)
}

idr_remove(&ns->idr, upid->nr);
+ xa_unlock_irqrestore(&ns->idr.idr_rt, flags);
}
- spin_unlock_irqrestore(&pidmap_lock, flags);

call_rcu(&pid->rcu, delayed_put_pid);
}
@@ -206,7 +191,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
}

idr_preload(GFP_KERNEL);
- spin_lock_irq(&pidmap_lock);
+ xa_lock_irq(&tmp->idr.idr_rt);

if (tid) {
nr = idr_alloc(&tmp->idr, NULL, tid,
@@ -233,7 +218,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
pid_max, GFP_ATOMIC);
}
- spin_unlock_irq(&pidmap_lock);
+ xa_unlock_irq(&tmp->idr.idr_rt);
idr_preload_end();

if (nr < 0) {
@@ -266,34 +251,38 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
INIT_HLIST_HEAD(&pid->inodes);

upid = pid->numbers + ns->level;
- spin_lock_irq(&pidmap_lock);
- if (!(ns->pid_allocated & PIDNS_ADDING))
- goto out_unlock;
for ( ; upid >= pid->numbers; --upid) {
+ tmp = upid->ns;
+
+ xa_lock_irq(&tmp->idr.idr_rt);
+ if (tmp == ns && !(tmp->pid_allocated & PIDNS_ADDING)) {
+ xa_unlock_irq(&tmp->idr.idr_rt);
+ put_pid_ns(ns);
+ goto out_free;
+ }
+
/* Make the PID visible to find_pid_ns. */
- idr_replace(&upid->ns->idr, pid, upid->nr);
- upid->ns->pid_allocated++;
+ idr_replace(&tmp->idr, pid, upid->nr);
+ tmp->pid_allocated++;
+ xa_unlock_irq(&tmp->idr.idr_rt);
}
- spin_unlock_irq(&pidmap_lock);

return pid;

-out_unlock:
- spin_unlock_irq(&pidmap_lock);
- put_pid_ns(ns);
-
out_free:
- spin_lock_irq(&pidmap_lock);
while (++i <= ns->level) {
upid = pid->numbers + i;
- idr_remove(&upid->ns->idr, upid->nr);
- }
+ tmp = upid->ns;

- /* On failure to allocate the first pid, reset the state */
- if (ns->pid_allocated == PIDNS_ADDING)
- idr_set_cursor(&ns->idr, 0);
+ xa_lock_irq(&tmp->idr.idr_rt);

- spin_unlock_irq(&pidmap_lock);
+ /* On failure to allocate the first pid, reset the state */
+ if (tmp == ns && tmp->pid_allocated == PIDNS_ADDING)
+ idr_set_cursor(&ns->idr, 0);
+
+ idr_remove(&tmp->idr, upid->nr);
+ xa_unlock_irq(&tmp->idr.idr_rt);
+ }

kmem_cache_free(ns->pid_cachep, pid);
return ERR_PTR(retval);
@@ -301,9 +290,9 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,

void disable_pid_allocation(struct pid_namespace *ns)
{
- spin_lock_irq(&pidmap_lock);
+ xa_lock_irq(&ns->idr.idr_rt);
ns->pid_allocated &= ~PIDNS_ADDING;
- spin_unlock_irq(&pidmap_lock);
+ xa_unlock_irq(&ns->idr.idr_rt);
}

struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
@@ -647,6 +636,18 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
return fd;
}

+/*
+ * Note: disable interrupts while the xarray lock is held as an interrupt might
+ * come in and do read_lock(&tasklist_lock).
+ *
+ * If we don't disable interrupts there is a nasty deadlock between
+ * detach_pid()->free_pid() and another cpu that does xa_lock() followed by an
+ * interrupt routine that does read_lock(&tasklist_lock);
+ *
+ * After we clean up the tasklist_lock and know there are no irq handlers that
+ * take it we can leave the interrupts enabled. For now it is easier to be safe
+ * than to prove it can't happen.
+ */
void __init pid_idr_init(void)
{
/* Verify no one has done anything silly: */
--
2.37.3


2022-12-12 01:52:26

by Ian Kent

[permalink] [raw]
Subject: Re: [PATCH v3 1/5] pid: replace pidmap_lock with xarray lock

On 3/12/22 01:16, Brian Foster wrote:
> As a first step to changing the struct pid tracking code from the
> idr over to the xarray, replace the custom pidmap_lock spinlock with
> the internal lock associated with the underlying xarray. This is
> effectively equivalent to using idr_lock() and friends, but since
> the goal is to disentangle from the idr, move directly to the
> underlying xarray api.
>
> Signed-off-by: Matthew Wilcox <[email protected]>
> Signed-off-by: Brian Foster <[email protected]>

The xa array switch looks simpler than I expected, looks good.

Reviewed-by: Ian Kent <[email protected]>

> ---
> kernel/pid.c | 79 ++++++++++++++++++++++++++--------------------------
> 1 file changed, 40 insertions(+), 39 deletions(-)
>
> diff --git a/kernel/pid.c b/kernel/pid.c
> index 3fbc5e46b721..3622f8b13143 100644
> --- a/kernel/pid.c
> +++ b/kernel/pid.c
> @@ -86,22 +86,6 @@ struct pid_namespace init_pid_ns = {
> };
> EXPORT_SYMBOL_GPL(init_pid_ns);
>
> -/*
> - * Note: disable interrupts while the pidmap_lock is held as an
> - * interrupt might come in and do read_lock(&tasklist_lock).
> - *
> - * If we don't disable interrupts there is a nasty deadlock between
> - * detach_pid()->free_pid() and another cpu that does
> - * spin_lock(&pidmap_lock) followed by an interrupt routine that does
> - * read_lock(&tasklist_lock);
> - *
> - * After we clean up the tasklist_lock and know there are no
> - * irq handlers that take it we can leave the interrupts enabled.
> - * For now it is easier to be safe than to prove it can't happen.
> - */
> -
> -static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
> -
> void put_pid(struct pid *pid)
> {
> struct pid_namespace *ns;
> @@ -129,10 +113,11 @@ void free_pid(struct pid *pid)
> int i;
> unsigned long flags;
>
> - spin_lock_irqsave(&pidmap_lock, flags);
> for (i = 0; i <= pid->level; i++) {
> struct upid *upid = pid->numbers + i;
> struct pid_namespace *ns = upid->ns;
> +
> + xa_lock_irqsave(&ns->idr.idr_rt, flags);
> switch (--ns->pid_allocated) {
> case 2:
> case 1:
> @@ -150,8 +135,8 @@ void free_pid(struct pid *pid)
> }
>
> idr_remove(&ns->idr, upid->nr);
> + xa_unlock_irqrestore(&ns->idr.idr_rt, flags);
> }
> - spin_unlock_irqrestore(&pidmap_lock, flags);
>
> call_rcu(&pid->rcu, delayed_put_pid);
> }
> @@ -206,7 +191,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> }
>
> idr_preload(GFP_KERNEL);
> - spin_lock_irq(&pidmap_lock);
> + xa_lock_irq(&tmp->idr.idr_rt);
>
> if (tid) {
> nr = idr_alloc(&tmp->idr, NULL, tid,
> @@ -233,7 +218,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
> pid_max, GFP_ATOMIC);
> }
> - spin_unlock_irq(&pidmap_lock);
> + xa_unlock_irq(&tmp->idr.idr_rt);
> idr_preload_end();
>
> if (nr < 0) {
> @@ -266,34 +251,38 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
> INIT_HLIST_HEAD(&pid->inodes);
>
> upid = pid->numbers + ns->level;
> - spin_lock_irq(&pidmap_lock);
> - if (!(ns->pid_allocated & PIDNS_ADDING))
> - goto out_unlock;
> for ( ; upid >= pid->numbers; --upid) {
> + tmp = upid->ns;
> +
> + xa_lock_irq(&tmp->idr.idr_rt);
> + if (tmp == ns && !(tmp->pid_allocated & PIDNS_ADDING)) {
> + xa_unlock_irq(&tmp->idr.idr_rt);
> + put_pid_ns(ns);
> + goto out_free;
> + }
> +
> /* Make the PID visible to find_pid_ns. */
> - idr_replace(&upid->ns->idr, pid, upid->nr);
> - upid->ns->pid_allocated++;
> + idr_replace(&tmp->idr, pid, upid->nr);
> + tmp->pid_allocated++;
> + xa_unlock_irq(&tmp->idr.idr_rt);
> }
> - spin_unlock_irq(&pidmap_lock);
>
> return pid;
>
> -out_unlock:
> - spin_unlock_irq(&pidmap_lock);
> - put_pid_ns(ns);
> -
> out_free:
> - spin_lock_irq(&pidmap_lock);
> while (++i <= ns->level) {
> upid = pid->numbers + i;
> - idr_remove(&upid->ns->idr, upid->nr);
> - }
> + tmp = upid->ns;
>
> - /* On failure to allocate the first pid, reset the state */
> - if (ns->pid_allocated == PIDNS_ADDING)
> - idr_set_cursor(&ns->idr, 0);
> + xa_lock_irq(&tmp->idr.idr_rt);
>
> - spin_unlock_irq(&pidmap_lock);
> + /* On failure to allocate the first pid, reset the state */
> + if (tmp == ns && tmp->pid_allocated == PIDNS_ADDING)
> + idr_set_cursor(&ns->idr, 0);
> +
> + idr_remove(&tmp->idr, upid->nr);
> + xa_unlock_irq(&tmp->idr.idr_rt);
> + }
>
> kmem_cache_free(ns->pid_cachep, pid);
> return ERR_PTR(retval);
> @@ -301,9 +290,9 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
>
> void disable_pid_allocation(struct pid_namespace *ns)
> {
> - spin_lock_irq(&pidmap_lock);
> + xa_lock_irq(&ns->idr.idr_rt);
> ns->pid_allocated &= ~PIDNS_ADDING;
> - spin_unlock_irq(&pidmap_lock);
> + xa_unlock_irq(&ns->idr.idr_rt);
> }
>
> struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
> @@ -647,6 +636,18 @@ SYSCALL_DEFINE2(pidfd_open, pid_t, pid, unsigned int, flags)
> return fd;
> }
>
> +/*
> + * Note: disable interrupts while the xarray lock is held as an interrupt might
> + * come in and do read_lock(&tasklist_lock).
> + *
> + * If we don't disable interrupts there is a nasty deadlock between
> + * detach_pid()->free_pid() and another cpu that does xa_lock() followed by an
> + * interrupt routine that does read_lock(&tasklist_lock);
> + *
> + * After we clean up the tasklist_lock and know there are no irq handlers that
> + * take it we can leave the interrupts enabled. For now it is easier to be safe
> + * than to prove it can't happen.
> + */
> void __init pid_idr_init(void)
> {
> /* Verify no one has done anything silly: */