From: Trond Myklebust <[email protected]>
Ensure that we immediately call rpc_exit_task() after waking up, and
that the tk_rpc_status cannot get clobbered by some other function.
Signed-off-by: Trond Myklebust <[email protected]>
---
include/linux/sunrpc/sched.h | 1 +
net/sunrpc/clnt.c | 6 ++----
net/sunrpc/sched.c | 40 ++++++++++++++++++++++--------------
net/sunrpc/xprtsock.c | 3 +--
4 files changed, 29 insertions(+), 21 deletions(-)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index acc62647317c..647247040ef9 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -209,6 +209,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c284efa3d1ef..77a44118c0da 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1642,7 +1642,7 @@ static void
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
{
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
- task->tk_rpc_status = rpc_status;
+ rpc_task_set_rpc_status(task, rpc_status);
rpc_exit(task, tk_status);
}
@@ -2435,10 +2435,8 @@ rpc_check_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- if (RPC_SIGNALLED(task)) {
- rpc_call_rpcerror(task, -ERESTARTSYS);
+ if (RPC_SIGNALLED(task))
return;
- }
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
return;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 25b9221950ff..f388bfaf6ff0 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -65,6 +65,13 @@ gfp_t rpc_task_gfp_mask(void)
}
EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
+{
+ if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
+ return true;
+ return false;
+}
+
unsigned long
rpc_task_timeout(const struct rpc_task *task)
{
@@ -855,12 +862,14 @@ void rpc_signal_task(struct rpc_task *task)
if (!RPC_IS_ACTIVATED(task))
return;
+ if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
+ return;
trace_rpc_task_signalled(task, task->tk_action);
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
- rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
+ rpc_wake_up_queued_task(queue, task);
}
void rpc_exit(struct rpc_task *task, int status)
@@ -907,10 +916,16 @@ static void __rpc_execute(struct rpc_task *task)
* Perform the next FSM step or a pending callback.
*
* tk_action may be NULL if the task has been killed.
- * In particular, note that rpc_killall_tasks may
- * do this at any time, so beware when dereferencing.
*/
do_action = task->tk_action;
+ /* Tasks with an RPC error status should exit */
+ if (do_action != rpc_exit_task &&
+ (status = READ_ONCE(task->tk_rpc_status)) != 0) {
+ task->tk_status = status;
+ if (do_action != NULL)
+ do_action = rpc_exit_task;
+ }
+ /* Callbacks override all actions */
if (task->tk_callback) {
do_action = task->tk_callback;
task->tk_callback = NULL;
@@ -932,14 +947,6 @@ static void __rpc_execute(struct rpc_task *task)
continue;
}
- /*
- * Signalled tasks should exit rather than sleep.
- */
- if (RPC_SIGNALLED(task)) {
- task->tk_rpc_status = -ERESTARTSYS;
- rpc_exit(task, -ERESTARTSYS);
- }
-
/*
* The queue->lock protects against races with
* rpc_make_runnable().
@@ -955,6 +962,12 @@ static void __rpc_execute(struct rpc_task *task)
spin_unlock(&queue->lock);
continue;
}
+ /* Wake up any task that has an exit status */
+ if (READ_ONCE(task->tk_rpc_status) != 0) {
+ rpc_wake_up_task_queue_locked(queue, task);
+ spin_unlock(&queue->lock);
+ continue;
+ }
rpc_clear_running(task);
spin_unlock(&queue->lock);
if (task_is_async)
@@ -972,10 +985,7 @@ static void __rpc_execute(struct rpc_task *task)
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
- trace_rpc_task_signalled(task, task->tk_action);
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
- task->tk_rpc_status = -ERESTARTSYS;
- rpc_exit(task, -ERESTARTSYS);
+ rpc_signal_task(task);
}
trace_rpc_task_sync_wake(task, task->tk_action);
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e976007f4fd0..d4750caa773a 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1978,8 +1978,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
* we'll need to figure out how to pass a namespace to
* connect.
*/
- task->tk_rpc_status = -ENOTCONN;
- rpc_exit(task, -ENOTCONN);
+ rpc_task_set_rpc_status(task, -ENOTCONN);
goto out_wake;
}
ret = xs_local_setup_socket(transport);
--
2.37.3
From: Trond Myklebust <[email protected]>
Add the helper rpc_cancel_tasks(), which uses a caller-defined selection
function to define a set of in-flight RPC calls to cancel. This is
mainly intended for pNFS drivers which are subject to a layout recall,
and which may therefore want to cancel all pending I/O using that layout
in order to redrive it after the layout recall has been satisfied.
Signed-off-by: Trond Myklebust <[email protected]>
---
include/linux/sunrpc/sched.h | 5 +++++
net/sunrpc/clnt.c | 37 ++++++++++++++++++++++++++++++++++++
net/sunrpc/sched.c | 11 +++++++++++
3 files changed, 53 insertions(+)
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 647247040ef9..cdcf0fe56a6f 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -210,11 +210,16 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status);
+void rpc_task_try_cancel(struct rpc_task *task, int error);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
void rpc_killall_tasks(struct rpc_clnt *);
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 77a44118c0da..3bee30cf8a7d 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -873,6 +873,43 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
}
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
+/**
+ * rpc_cancel_tasks - try to cancel a set of RPC tasks
+ * @clnt: Pointer to RPC client
+ * @error: RPC task error value to set
+ * @fnmatch: Pointer to selector function
+ * @data: User data
+ *
+ * Uses @fnmatch to define a set of RPC tasks that are to be cancelled.
+ * The argument @error must be a negative error value.
+ */
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data)
+{
+ struct rpc_task *task;
+ unsigned long count = 0;
+
+ if (list_empty(&clnt->cl_tasks))
+ return 0;
+ /*
+ * Spin lock all_tasks to prevent changes...
+ */
+ spin_lock(&clnt->cl_lock);
+ list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
+ if (!RPC_IS_ACTIVATED(task))
+ continue;
+ if (!fnmatch(task, data))
+ continue;
+ rpc_task_try_cancel(task, error);
+ count++;
+ }
+ spin_unlock(&clnt->cl_lock);
+ return count;
+}
+EXPORT_SYMBOL_GPL(rpc_cancel_tasks);
+
/*
* Properly shut down an RPC client, terminating all outstanding
* requests.
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f388bfaf6ff0..de912e02371b 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -872,6 +872,17 @@ void rpc_signal_task(struct rpc_task *task)
rpc_wake_up_queued_task(queue, task);
}
+void rpc_task_try_cancel(struct rpc_task *task, int error)
+{
+ struct rpc_wait_queue *queue;
+
+ if (!rpc_task_set_rpc_status(task, error))
+ return;
+ queue = READ_ONCE(task->tk_waitqueue);
+ if (queue)
+ rpc_wake_up_queued_task(queue, task);
+}
+
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
--
2.37.3