This set contains the first 5 patches from my previous attempt (with an updated changelog).
The RPC patches will follow in a seperate patchset, as we need to further evaluate the proposed
timeout changes.
root (5):
nfs: kill renewd before clearing client minor version
nfs: prevent backlogging of renewd requests
nfs41: renewd sequence operations should take/put client reference
nfs4: renewd renew operations should take/put a client reference
nfs4: adjust rpc timeout for nfs_client->rpcclient based on the
lease_time
fs/nfs/client.c | 45 ++++++++++++++++++++++-----------------------
fs/nfs/nfs4proc.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++--
fs/nfs/nfs4renewd.c | 24 +++++++-----------------
3 files changed, 77 insertions(+), 42 deletions(-)
From: root <root@vadmin.(none)>
renewd should be synchronously killed before we destroy the session in
nfs4_clear_minor_version
Signed-off-by: Alexandros Batsakis <[email protected]>
---
fs/nfs/client.c | 45 ++++++++++++++++++++++-----------------------
1 files changed, 22 insertions(+), 23 deletions(-)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index d0b060a..f712e52 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -164,17 +164,20 @@ error_0:
return ERR_PTR(err);
}
-static void nfs4_shutdown_client(struct nfs_client *clp)
+/*
+ * Clears/puts all minor version specific parts from an nfs_client struct
+ * reverting it to minorversion 0.
+ */
+static void nfs4_clear_client_minor_version(struct nfs_client *clp)
{
-#ifdef CONFIG_NFS_V4
- if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
- nfs4_kill_renewd(clp);
- BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners));
- if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
- nfs_idmap_delete(clp);
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp)) {
+ nfs4_destroy_session(clp->cl_session);
+ clp->cl_session = NULL;
+ }
- rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
-#endif
+ clp->cl_call_sync = _nfs4_call_sync;
+#endif /* CONFIG_NFS_V4_1 */
}
/*
@@ -188,22 +191,18 @@ static void nfs4_destroy_callback(struct nfs_client *clp)
#endif /* CONFIG_NFS_V4 */
}
-/*
- * Clears/puts all minor version specific parts from an nfs_client struct
- * reverting it to minorversion 0.
- */
-static void nfs4_clear_client_minor_version(struct nfs_client *clp)
+static void nfs4_shutdown_client(struct nfs_client *clp)
{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp)) {
- nfs4_destroy_session(clp->cl_session);
- clp->cl_session = NULL;
- }
-
- clp->cl_call_sync = _nfs4_call_sync;
-#endif /* CONFIG_NFS_V4_1 */
-
+#ifdef CONFIG_NFS_V4
+ if (__test_and_clear_bit(NFS_CS_RENEWD, &clp->cl_res_state))
+ nfs4_kill_renewd(clp);
+ nfs4_clear_client_minor_version(clp);
nfs4_destroy_callback(clp);
+ if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
+ nfs_idmap_delete(clp);
+
+ rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
+#endif
}
/*
--
1.6.2.5
From: root <root@vadmin.(none)>
If the renewd send queue gets backlogged (e.g., if the server goes down),
we will keep filling the queue with periodic RENEW/SEQUENCE requests.
This patch schedules a new renewd request if and only if the previous one
returns (either success or failure)
Signed-off-by: Alexandros Batsakis <[email protected]>
---
fs/nfs/nfs4proc.c | 3 +++
fs/nfs/nfs4renewd.c | 24 +++++++-----------------
2 files changed, 10 insertions(+), 17 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 84b53d3..ce44b5a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3162,6 +3162,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
+ nfs4_schedule_state_renewal(clp);
}
static const struct rpc_call_ops nfs4_renew_ops = {
@@ -5040,6 +5041,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+ nfs4_schedule_state_renewal(clp);
+
kfree(task->tk_msg.rpc_argp);
kfree(task->tk_msg.rpc_resp);
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 0156c01..d87f103 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -36,11 +36,6 @@
* as an rpc_task, not a real kernel thread, so it always runs in rpciod's
* context. There is one renewd per nfs_server.
*
- * TODO: If the send queue gets backlogged (e.g., if the server goes down),
- * we will keep filling the queue with periodic RENEW requests. We need a
- * mechanism for ensuring that if renewd successfully sends off a request,
- * then it only wakes up when the request is finished. Maybe use the
- * child task framework of the RPC layer?
*/
#include <linux/mm.h>
@@ -63,7 +58,7 @@ nfs4_renew_state(struct work_struct *work)
struct nfs_client *clp =
container_of(work, struct nfs_client, cl_renewd.work);
struct rpc_cred *cred;
- long lease, timeout;
+ long lease;
unsigned long last, now;
ops = nfs4_state_renewal_ops[clp->cl_minorversion];
@@ -75,7 +70,6 @@ nfs4_renew_state(struct work_struct *work)
lease = clp->cl_lease_time;
last = clp->cl_last_renewal;
now = jiffies;
- timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
cred = ops->get_state_renewal_cred_locked(clp);
@@ -90,19 +84,15 @@ nfs4_renew_state(struct work_struct *work)
/* Queue an asynchronous RENEW. */
ops->sched_state_renewal(clp, cred);
put_rpccred(cred);
+ goto out_exp;
}
- timeout = (2 * lease) / 3;
- spin_lock(&clp->cl_lock);
- } else
+ } else {
dprintk("%s: failed to call renewd. Reason: lease not expired \n",
__func__);
- if (timeout < 5 * HZ) /* safeguard */
- timeout = 5 * HZ;
- dprintk("%s: requeueing work. Lease period = %ld\n",
- __func__, (timeout + HZ - 1) / HZ);
- cancel_delayed_work(&clp->cl_renewd);
- schedule_delayed_work(&clp->cl_renewd, timeout);
- spin_unlock(&clp->cl_lock);
+ spin_unlock(&clp->cl_lock);
+ }
+ nfs4_schedule_state_renewal(clp);
+out_exp:
nfs_expire_unreferenced_delegations(clp);
out:
dprintk("%s: done\n", __func__);
--
1.6.2.5
From: root <root@vadmin.(none)>
renewd sends SEQUENCE requests to the NFS server in order to renew state.
As the request is asynchronous, renewd should take a reference to the
nfs_client to prevent concurrent umounts from freeing the session/client
Signed-off-by: Alexandros Batsakis <[email protected]>
---
fs/nfs/nfs4proc.c | 23 +++++++++++++++++++----
1 files changed, 19 insertions(+), 4 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ce44b5a..87fd43a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -419,7 +419,8 @@ static void nfs41_sequence_done(struct nfs_client *clp,
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
/* Check sequence flags */
- nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
+ if (atomic_read(&clp->cl_count) > 1)
+ nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
}
out:
/* The session may be reset by one of the error handlers. */
@@ -5032,6 +5033,8 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
+ if (atomic_read(&clp->cl_count) == 1)
+ goto out;
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) {
@@ -5041,8 +5044,9 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
}
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
- nfs4_schedule_state_renewal(clp);
-
+ if (atomic_read(&clp->cl_count) > 1)
+ nfs4_schedule_state_renewal(clp);
+out:
kfree(task->tk_msg.rpc_argp);
kfree(task->tk_msg.rpc_resp);
@@ -5064,9 +5068,15 @@ static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
rpc_call_start(task);
}
+static void nfs41_sequence_release(void *calldata)
+{
+ nfs_put_client((struct nfs_client *) calldata);
+}
+
static const struct rpc_call_ops nfs41_sequence_ops = {
.rpc_call_done = nfs41_sequence_call_done,
.rpc_call_prepare = nfs41_sequence_prepare,
+ .rpc_release = nfs41_sequence_release,
};
static int nfs41_proc_async_sequence(struct nfs_client *clp,
@@ -5079,11 +5089,16 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp,
.rpc_cred = cred,
};
+ if (!atomic_inc_not_zero(&clp->cl_count))
+ return -EIO;
args = kzalloc(sizeof(*args), GFP_KERNEL);
- if (!args)
+ if (!args) {
+ nfs_put_client(clp);
return -ENOMEM;
+ }
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res) {
+ nfs_put_client(clp);
kfree(args);
return -ENOMEM;
}
--
1.6.2.5
From: root <root@vadmin.(none)>
renewd sends RENEW requests to the NFS server in order to renew state.
As the request is asynchronous, renewd should take a reference to the
nfs_client to prevent concurrent umounts from freeing the client
Signed-off-by: Alexandros Batsakis <[email protected]>
---
fs/nfs/nfs4proc.c | 11 ++++++++++-
1 files changed, 10 insertions(+), 1 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 87fd43a..ffc84d4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3163,11 +3163,18 @@ static void nfs4_renew_done(struct rpc_task *task, void *data)
if (time_before(clp->cl_last_renewal,timestamp))
clp->cl_last_renewal = timestamp;
spin_unlock(&clp->cl_lock);
- nfs4_schedule_state_renewal(clp);
+ if (atomic_read(&clp->cl_count) > 1)
+ nfs4_schedule_state_renewal(clp);
+}
+
+static void nfs4_renew_release(void *calldata)
+{
+ nfs_put_client((struct nfs_client *) calldata);
}
static const struct rpc_call_ops nfs4_renew_ops = {
.rpc_call_done = nfs4_renew_done,
+ .rpc_release = nfs4_renew_release,
};
int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
@@ -3178,6 +3185,8 @@ int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred)
.rpc_cred = cred,
};
+ if (!atomic_inc_not_zero(&clp->cl_count))
+ return -EIO;
return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
&nfs4_renew_ops, (void *)jiffies);
}
--
1.6.2.5
From: root <root@vadmin.(none)>
the timeo mount parameters do not affect the nfs_client's rpc_client as
they are mount-point specific. As the nfs_client mainly deals with the
state management, the duration of its operations should be capped by
the server's lease time.
This patch sets the timeout value for the
nfs_client->rpc_client to be equal to the server's lease time. The new value
overrides the xprt->timeout value set in rpc_new_client()
Signed-off-by: Alexandros Batsakis <[email protected]>
---
fs/nfs/nfs4proc.c | 19 +++++++++++++++++++
1 files changed, 19 insertions(+), 0 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ffc84d4..efa3feb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3490,6 +3490,23 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
return _nfs4_async_handle_error(task, server, server->nfs_client, state);
}
+static void nfs4_set_rpc_timeout(struct nfs_client *clp, __u32 lease_time)
+{
+ struct rpc_timeout to;
+
+ to.to_retries = NFS_DEF_TCP_RETRANS;
+ to.to_initval = lease_time * HZ / (to.to_retries + 1);
+ to.to_increment = to.to_initval;
+ to.to_maxval = to.to_initval + (to.to_increment * to.to_retries);
+ if (to.to_maxval > NFS_MAX_TCP_TIMEOUT)
+ to.to_maxval = NFS_MAX_TCP_TIMEOUT;
+ to.to_exponential = 0;
+
+ memcpy(&clp->cl_rpcclient->cl_timeout_default, &to,
+ sizeof(clp->cl_rpcclient->cl_timeout_default));
+ clp->cl_rpcclient->cl_timeout = &clp->cl_rpcclient->cl_timeout_default;
+}
+
int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short port, struct rpc_cred *cred)
{
nfs4_verifier sc_verifier;
@@ -3560,6 +3577,7 @@ static int _nfs4_proc_setclientid_confirm(struct nfs_client *clp, struct rpc_cre
if (status == 0) {
spin_lock(&clp->cl_lock);
clp->cl_lease_time = fsinfo.lease_time * HZ;
+ nfs4_set_rpc_timeout(clp, fsinfo.lease_time);
clp->cl_last_renewal = now;
spin_unlock(&clp->cl_lock);
}
@@ -4578,6 +4596,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
nfs_restart_rpc(task, data->clp);
return;
}
+ nfs4_set_rpc_timeout(data->clp, data->res->lr_fsinfo->lease_time);
dprintk("<-- %s\n", __func__);
}
--
1.6.2.5