2020-02-13 15:47:19

by Greg Kroah-Hartman

[permalink] [raw]
Subject: [PATCH 4.14 057/173] padata: Remove broken queue flushing

From: Herbert Xu <[email protected]>

[ Upstream commit 07928d9bfc81640bab36f5190e8725894d93b659 ]

The function padata_flush_queues is fundamentally broken because
it cannot force padata users to complete the request that is
underway. IOW padata has to passively wait for the completion
of any outstanding work.

As it stands flushing is used in two places. Its use in padata_stop
is simply unnecessary because nothing depends on the queues to
be flushed afterwards.

The other use in padata_replace is more substantial as we depend
on it to free the old pd structure. This patch instead uses the
pd->refcnt to dynamically free the pd structure once all requests
are complete.

Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
Cc: <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Signed-off-by: Sasha Levin <[email protected]>
---
kernel/padata.c | 46 ++++++++++++----------------------------------
1 file changed, 12 insertions(+), 34 deletions(-)

diff --git a/kernel/padata.c b/kernel/padata.c
index 87540ce72aea6..ef4ba3d664dab 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -34,6 +34,8 @@

#define MAX_OBJ_NUM 1000

+static void padata_free_pd(struct parallel_data *pd);
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -292,6 +294,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
+ int cnt;

local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
@@ -301,6 +304,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);

+ cnt = 0;
+
while (!list_empty(&local_list)) {
struct padata_priv *padata;

@@ -310,9 +315,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);

padata->serial(padata);
- atomic_dec(&pd->refcnt);
+ cnt++;
}
local_bh_enable();
+
+ if (atomic_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
}

/**
@@ -435,8 +443,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
- atomic_set(&pd->refcnt, 0);
- pd->pinst = pinst;
+ atomic_set(&pd->refcnt, 1);
spin_lock_init(&pd->lock);

return pd;
@@ -460,31 +467,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}

-/* Flush all objects out of the padata queues. */
-static void padata_flush_queues(struct parallel_data *pd)
-{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct padata_serial_queue *squeue;
-
- for_each_cpu(cpu, pd->cpumask.pcpu) {
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
- flush_work(&pqueue->work);
- }
-
- del_timer_sync(&pd->timer);
-
- if (atomic_read(&pd->reorder_objects))
- padata_reorder(pd);
-
- for_each_cpu(cpu, pd->cpumask.cbcpu) {
- squeue = per_cpu_ptr(pd->squeue, cpu);
- flush_work(&squeue->work);
- }
-
- BUG_ON(atomic_read(&pd->refcnt) != 0);
-}
-
static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
@@ -498,10 +480,6 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;

synchronize_rcu();
-
- get_online_cpus();
- padata_flush_queues(pinst->pd);
- put_online_cpus();
}

/* Replace the internal control structure with a new one. */
@@ -522,8 +500,8 @@ static void padata_replace(struct padata_instance *pinst,
if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
notification_mask |= PADATA_CPU_SERIAL;

- padata_flush_queues(pd_old);
- padata_free_pd(pd_old);
+ if (atomic_dec_and_test(&pd_old->refcnt))
+ padata_free_pd(pd_old);

if (notification_mask)
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
--
2.20.1




2020-02-14 19:47:51

by Daniel Jordan

[permalink] [raw]
Subject: [PATCH v2 4.14] padata: Remove broken queue flushing

From: Herbert Xu <[email protected]>

[ Upstream commit 07928d9bfc81640bab36f5190e8725894d93b659 ]

The function padata_flush_queues is fundamentally broken because
it cannot force padata users to complete the request that is
underway. IOW padata has to passively wait for the completion
of any outstanding work.

As it stands flushing is used in two places. Its use in padata_stop
is simply unnecessary because nothing depends on the queues to
be flushed afterwards.

The other use in padata_replace is more substantial as we depend
on it to free the old pd structure. This patch instead uses the
pd->refcnt to dynamically free the pd structure once all requests
are complete.

Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
Cc: <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
Reviewed-by: Daniel Jordan <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
[dj: leave "pd->pinst = pinst" assignment in padata_alloc_pd()]
Signed-off-by: Daniel Jordan <[email protected]>
---
kernel/padata.c | 45 ++++++++++++---------------------------------
1 file changed, 12 insertions(+), 33 deletions(-)

diff --git a/kernel/padata.c b/kernel/padata.c
index 87540ce72aea..528a251217df 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -34,6 +34,8 @@

#define MAX_OBJ_NUM 1000

+static void padata_free_pd(struct parallel_data *pd);
+
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
{
int cpu, target_cpu;
@@ -292,6 +294,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
struct padata_serial_queue *squeue;
struct parallel_data *pd;
LIST_HEAD(local_list);
+ int cnt;

local_bh_disable();
squeue = container_of(serial_work, struct padata_serial_queue, work);
@@ -301,6 +304,8 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_replace_init(&squeue->serial.list, &local_list);
spin_unlock(&squeue->serial.lock);

+ cnt = 0;
+
while (!list_empty(&local_list)) {
struct padata_priv *padata;

@@ -310,9 +315,12 @@ static void padata_serial_worker(struct work_struct *serial_work)
list_del_init(&padata->list);

padata->serial(padata);
- atomic_dec(&pd->refcnt);
+ cnt++;
}
local_bh_enable();
+
+ if (atomic_sub_and_test(cnt, &pd->refcnt))
+ padata_free_pd(pd);
}

/**
@@ -435,7 +443,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
- atomic_set(&pd->refcnt, 0);
+ atomic_set(&pd->refcnt, 1);
pd->pinst = pinst;
spin_lock_init(&pd->lock);

@@ -460,31 +468,6 @@ static void padata_free_pd(struct parallel_data *pd)
kfree(pd);
}

-/* Flush all objects out of the padata queues. */
-static void padata_flush_queues(struct parallel_data *pd)
-{
- int cpu;
- struct padata_parallel_queue *pqueue;
- struct padata_serial_queue *squeue;
-
- for_each_cpu(cpu, pd->cpumask.pcpu) {
- pqueue = per_cpu_ptr(pd->pqueue, cpu);
- flush_work(&pqueue->work);
- }
-
- del_timer_sync(&pd->timer);
-
- if (atomic_read(&pd->reorder_objects))
- padata_reorder(pd);
-
- for_each_cpu(cpu, pd->cpumask.cbcpu) {
- squeue = per_cpu_ptr(pd->squeue, cpu);
- flush_work(&squeue->work);
- }
-
- BUG_ON(atomic_read(&pd->refcnt) != 0);
-}
-
static void __padata_start(struct padata_instance *pinst)
{
pinst->flags |= PADATA_INIT;
@@ -498,10 +481,6 @@ static void __padata_stop(struct padata_instance *pinst)
pinst->flags &= ~PADATA_INIT;

synchronize_rcu();
-
- get_online_cpus();
- padata_flush_queues(pinst->pd);
- put_online_cpus();
}

/* Replace the internal control structure with a new one. */
@@ -522,8 +501,8 @@ static void padata_replace(struct padata_instance *pinst,
if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
notification_mask |= PADATA_CPU_SERIAL;

- padata_flush_queues(pd_old);
- padata_free_pd(pd_old);
+ if (atomic_dec_and_test(&pd_old->refcnt))
+ padata_free_pd(pd_old);

if (notification_mask)
blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
--
2.25.0

2020-02-18 04:50:28

by Greg Kroah-Hartman

[permalink] [raw]
Subject: Re: [PATCH v2 4.14] padata: Remove broken queue flushing

On Fri, Feb 14, 2020 at 02:46:51PM -0500, Daniel Jordan wrote:
> From: Herbert Xu <[email protected]>
>
> [ Upstream commit 07928d9bfc81640bab36f5190e8725894d93b659 ]
>
> The function padata_flush_queues is fundamentally broken because
> it cannot force padata users to complete the request that is
> underway. IOW padata has to passively wait for the completion
> of any outstanding work.
>
> As it stands flushing is used in two places. Its use in padata_stop
> is simply unnecessary because nothing depends on the queues to
> be flushed afterwards.
>
> The other use in padata_replace is more substantial as we depend
> on it to free the old pd structure. This patch instead uses the
> pd->refcnt to dynamically free the pd structure once all requests
> are complete.
>
> Fixes: 2b73b07ab8a4 ("padata: Flush the padata queues actively")
> Cc: <[email protected]>
> Signed-off-by: Herbert Xu <[email protected]>
> Reviewed-by: Daniel Jordan <[email protected]>
> Signed-off-by: Herbert Xu <[email protected]>
> [dj: leave "pd->pinst = pinst" assignment in padata_alloc_pd()]
> Signed-off-by: Daniel Jordan <[email protected]>

Thanks, all 3 backports now queued up.

greg k-h