Fix session reset deadlocks Version 4.
These patches apply to 2.6.32
Fix races and bugs as well as implement a new session draining scheme
designed by Trond.
0001-nfs41-add-create-session-into-establish_clid.patch
0002-nfs41-rename-cl_state-session-SETUP-bit-to-RESET.patch
0003-nfs41-nfs4_get_lease_time-will-never-session-reset.patch
0004-nfs41-call-free-slot-from-nfs4_restart_rpc.patch
0005-nfs41-free-the-slot-on-unhandled-read-errors.patch
0006-nfs41-fix-switch-in-nfs4_handle_exception.patch
0007-nfs41-fix-switch-in-nfs4_recovery_handle_error.patch
0008-nfs41-don-t-clear-tk_action-on-success.patch
0009-nfs41-remove-nfs4_recover_session.patch
0010-nfs41-nfs41-fix-state-manager-deadlock-in-session.patch
0011-nfs41-drain-session-cleanup.patch
0012-nfs41-only-state-manager-sets-NFS4CLNT_SESSION_SETU.patch
Testing:
CONFIG_NFS_V4_1
v41 mount: Connectathon tests passed. PyNFS testclient.py SESSIONRESET tests
The INJECT_ERROR testclient.py test where NFS4ERR_BADSESSION was returned
every 50th SEQUENCE operation and the session destroyed
durring a Connectathon basic test run. This passed all but the bigfile test
where the check_lease op->renew_lease nfs4_proc_sequence state manager
session reset call could not get a slot due to the async error handler
restart read/write RPC's getting slots prior to any rpc tasks waiting on
queues. This will be fixed in a subsequent patch set.
v4 mount: Connectathon tests passed.
no CONFIG_NFS_V4_1
v4 mount: Connectathon tests passed.
-->Andy
~
From: Andy Adamson <[email protected]>
Do not fall through and set NFS4CLNT_SESSION_RESET bit on NFS4ERR_EXPIRED
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4state.c | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 4168fa6..91726bc 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1078,6 +1078,7 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_EXPIRED:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
nfs4_state_start_reclaim_nograce(clp);
+ break;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
--
1.6.0.6
From: Andy Adamson <[email protected]>
The bit is no longer used for session setup, only for session reset.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/internal.h | 2 +-
fs/nfs/nfs4_fs.h | 2 +-
fs/nfs/nfs4proc.c | 8 ++++----
fs/nfs/nfs4state.c | 8 ++++----
4 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index e21b1bb..ebcd379 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -183,7 +183,7 @@ static inline void nfs4_restart_rpc(struct rpc_task *task,
{
#ifdef CONFIG_NFS_V4_1
if (nfs4_has_session(clp) &&
- test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)) {
+ test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
rpc_restart_call_prepare(task);
return;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 99d507d..e9ecd6b 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -44,7 +44,7 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_REBOOT,
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
- NFS4CLNT_SESSION_SETUP,
+ NFS4CLNT_SESSION_RESET,
};
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 5c24b4b..1e13d53 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -270,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
exception->retry = 1;
/* FALLTHROUGH */
#endif /* !defined(CONFIG_NFS_V4_1) */
@@ -439,7 +439,7 @@ static int nfs4_recover_session(struct nfs4_session *session)
ret = nfs4_wait_clnt_recover(clp);
if (ret != 0)
break;
- if (!test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ if (!test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
break;
nfs4_schedule_state_manager(clp);
ret = -EIO;
@@ -468,7 +468,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4CLNT_SESSION_SETUP, &session->clp->cl_state)) {
+ if (test_bit(NFS4CLNT_SESSION_RESET, &session->clp->cl_state)) {
if (tbl->highest_used_slotid != -1) {
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
spin_unlock(&tbl->slot_tbl_lock);
@@ -3350,7 +3350,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 8f1eb06..4168fa6 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1085,7 +1085,7 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
}
}
@@ -1246,7 +1246,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
}
/* Initialize or reset the session */
- if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)
+ if (test_and_clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)
&& nfs4_has_session(clp)) {
status = nfs4_reset_session(clp);
if (status) {
@@ -1261,7 +1261,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
nfs4_reboot_recovery_ops[clp->cl_minorversion]);
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state))
+ if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
continue;
nfs4_state_end_reclaim_reboot(clp);
continue;
@@ -1277,7 +1277,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
if (status == -NFS4ERR_EXPIRED)
continue;
- if (test_bit(NFS4CLNT_SESSION_SETUP,
+ if (test_bit(NFS4CLNT_SESSION_RESET,
&clp->cl_state))
continue;
goto out_error;
--
1.6.0.6
From: Andy Adamson <[email protected]>
Reported-by: Trond Myklebust <[email protected]>
Resetting the clientid from the state manager could result in not confirming
the clientid due to create session not being called.
Move the create session call from the NFS4CLNT_SESSION_SETUP state manager
initialize session case into the NFS4CLNT_LEASE_EXPIRED case establish_clid
call.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4_fs.h | 2 ++
fs/nfs/nfs4proc.c | 7 +++----
fs/nfs/nfs4state.c | 35 ++++++++++++++---------------------
3 files changed, 19 insertions(+), 25 deletions(-)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 6ea07a3..99d507d 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -200,9 +200,11 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *);
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct rpc_cred *);
+extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
extern int nfs4_proc_async_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs_client *, struct rpc_cred *);
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
+extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
extern int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 741a562..5c24b4b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4296,7 +4296,7 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
* NFS4ERR_BADSESSION in the sequence operation, and will therefore
* be in some phase of session reset.
*/
-static int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
+int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
{
nfs4_verifier verifier;
struct nfs41_exchange_id_args args = {
@@ -4582,7 +4582,6 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
if (!session)
return NULL;
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
/*
* The create session reply races with the server back
* channel probe. Mark the client NFS_CS_SESSION_INITING
@@ -4948,7 +4947,7 @@ struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
.recover_open = nfs4_open_reclaim,
.recover_lock = nfs4_lock_reclaim,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
@@ -4968,7 +4967,7 @@ struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
.state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
.recover_open = nfs4_open_expired,
.recover_lock = nfs4_lock_expired,
- .establish_clid = nfs4_proc_exchange_id,
+ .establish_clid = nfs41_init_clientid,
.get_clid_cred = nfs4_get_exchange_id_cred,
};
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 2ef4fec..8f1eb06 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -116,6 +116,19 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
#if defined(CONFIG_NFS_V4_1)
+int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
+{
+ int status;
+
+ status = nfs4_proc_exchange_id(clp, cred);
+ if (status == 0)
+ /* create session schedules state renewal upon success */
+ status = nfs4_proc_create_session(clp, 0);
+ if (status == 0)
+ nfs_mark_client_ready(clp, NFS_CS_READY);
+ return status;
+}
+
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
@@ -1156,7 +1169,6 @@ static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
switch (err) {
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
}
}
@@ -1182,24 +1194,8 @@ out:
return status;
}
-static int nfs4_initialize_session(struct nfs_client *clp)
-{
- int status;
-
- status = nfs4_proc_create_session(clp, 0);
- if (!status) {
- nfs_mark_client_ready(clp, NFS_CS_READY);
- } else if (status == -NFS4ERR_STALE_CLIENTID) {
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- set_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state);
- } else {
- nfs_mark_client_ready(clp, status);
- }
- return status;
-}
#else /* CONFIG_NFS_V4_1 */
static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
-static int nfs4_initialize_session(struct nfs_client *clp) { return 0; }
#endif /* CONFIG_NFS_V4_1 */
/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
@@ -1252,10 +1248,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
/* Initialize or reset the session */
if (test_and_clear_bit(NFS4CLNT_SESSION_SETUP, &clp->cl_state)
&& nfs4_has_session(clp)) {
- if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
- status = nfs4_initialize_session(clp);
- else
- status = nfs4_reset_session(clp);
+ status = nfs4_reset_session(clp);
if (status) {
if (status == -NFS4ERR_STALE_CLIENTID)
continue;
--
1.6.0.6
From: Andy Adamson <[email protected]>
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8b8bd31..b1d8dcd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -527,7 +527,7 @@ int nfs4_setup_sequence(struct nfs_client *clp,
goto out;
ret = nfs41_setup_sequence(clp->cl_session, args, res, cache_reply,
task);
- if (ret != -EAGAIN) {
+ if (ret && ret != -EAGAIN) {
/* terminate rpc task */
task->tk_status = ret;
task->tk_action = NULL;
--
1.6.0.6
From: Andy Adamson <[email protected]>
nfs4_read_done returns zero on unhandled errors. nfs_readpage_result will
return on a negative tk_status without freeing the slot.
Call nfs4_sequence_free_slot on unhandled errors in nfs4_read_done.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 3 +++
1 files changed, 3 insertions(+), 0 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d847e98..ee4a987 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2996,6 +2996,9 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
nfs_invalidate_atime(data->inode);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
+ else if (task->tk_status < 0)
+ nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
+
return 0;
}
--
1.6.0.6
From: Andy Adamson <[email protected]>
nfs4_recover_session can put rpciod to sleep. Just use nfs4_schedule_recovery.
Reported-by: Trond Myklebust <[email protected]>
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 26 +++-----------------------
1 files changed, 3 insertions(+), 23 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b1d8dcd..637cd3f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -429,24 +429,6 @@ out:
return ret_id;
}
-static int nfs4_recover_session(struct nfs4_session *session)
-{
- struct nfs_client *clp = session->clp;
- unsigned int loop;
- int ret;
-
- for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
- ret = nfs4_wait_clnt_recover(clp);
- if (ret != 0)
- break;
- if (!test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
- break;
- nfs4_schedule_state_manager(clp);
- ret = -EIO;
- }
- return ret;
-}
-
static int nfs41_setup_sequence(struct nfs4_session *session,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
@@ -455,7 +437,6 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
{
struct nfs4_slot *slot;
struct nfs4_slot_table *tbl;
- int status = 0;
u8 slotid;
dprintk("--> %s\n", __func__);
@@ -478,11 +459,10 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
/* The slot table is empty; start the reset thread */
dprintk("%s Session Reset\n", __func__);
+ rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
+ nfs4_schedule_state_manager(session->clp);
spin_unlock(&tbl->slot_tbl_lock);
- status = nfs4_recover_session(session);
- if (status)
- return status;
- spin_lock(&tbl->slot_tbl_lock);
+ return -EAGAIN;
}
slotid = nfs4_find_slot(tbl, task);
--
1.6.0.6
From: Andy Adamson <[email protected]>
Do not fall through and call nfs4_delay on session error handling.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ee4a987..8b8bd31 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -272,7 +272,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
errorcode);
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
exception->retry = 1;
- /* FALLTHROUGH */
+ break;
#endif /* !defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
case -NFS4ERR_GRACE:
--
1.6.0.6
From: Andy Adamson <[email protected]>
Do not wake up the next slot_tbl_waitq task in nfs4_free_slot because we
may be draining the slot. Either signal the state manager that the session
is drained (the state manager wakes up tasks) OR wake up the next task.
In nfs41_sequence_done, the slot dereference is only needed in the sequence
operation success case.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 18 +++++++++---------
1 files changed, 9 insertions(+), 9 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8b5b5f0..263ac19 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -329,7 +329,6 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
else
tbl->highest_used_slotid = -1;
}
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
spin_unlock(&tbl->slot_tbl_lock);
dprintk("%s: free_slotid %u highest_used_slotid %d\n", __func__,
free_slotid, tbl->highest_used_slotid);
@@ -346,14 +345,13 @@ void nfs41_sequence_free_slot(const struct nfs_client *clp,
}
tbl = &clp->cl_session->fc_slot_table;
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
- dprintk("%s: No slot\n", __func__);
/* just wake up the next guy waiting since
* we may have not consumed a slot after all */
- rpc_wake_up_next(&tbl->slot_tbl_waitq);
- return;
+ dprintk("%s: No slot\n", __func__);
+ } else {
+ nfs4_free_slot(tbl, res->sr_slotid);
+ res->sr_slotid = NFS4_MAX_SLOT_TABLE;
}
- nfs4_free_slot(tbl, res->sr_slotid);
- res->sr_slotid = NFS4_MAX_SLOT_TABLE;
/* Signal state manager thread if session is drained */
if (test_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
@@ -363,6 +361,8 @@ void nfs41_sequence_free_slot(const struct nfs_client *clp,
complete(&clp->cl_session->complete);
}
spin_unlock(&tbl->slot_tbl_lock);
+ } else {
+ rpc_wake_up_next(&tbl->slot_tbl_waitq);
}
}
@@ -387,10 +387,10 @@ static void nfs41_sequence_done(struct nfs_client *clp,
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE)
goto out;
- tbl = &clp->cl_session->fc_slot_table;
- slot = tbl->slots + res->sr_slotid;
-
+ /* Check the SEQUENCE operation status */
if (res->sr_status == 0) {
+ tbl = &clp->cl_session->fc_slot_table;
+ slot = tbl->slots + res->sr_slotid;
/* Update the slot's sequence and clientid lease timer */
++slot->seq_nr;
timestamp = res->sr_renewal_time;
--
1.6.0.6
From: Andy Adamson <[email protected]>
If the session is reset during state recovery, the state manager thread can
sleep on the slot_tbl_waitq causing a deadlock.
Add a completion framework to the session. Have the state manager thread set
a new session state (NFS4CLNT_SESSION_DRAINING) and wait for the session slot
table to drain.
Signal the state manager thread in nfs41_sequence_free_slot when the
NFS4CLNT_SESSION_DRAINING bit is set and the session is drained.
Reported-by: Trond Myklebust <[email protected]>
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4_fs.h | 1 +
fs/nfs/nfs4proc.c | 26 +++++++++++++++++---------
fs/nfs/nfs4state.c | 15 +++++++++++++++
include/linux/nfs_fs_sb.h | 1 +
4 files changed, 34 insertions(+), 9 deletions(-)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index e9ecd6b..5c77401 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -45,6 +45,7 @@ enum nfs4_client_state {
NFS4CLNT_RECLAIM_NOGRACE,
NFS4CLNT_DELEGRETURN,
NFS4CLNT_SESSION_RESET,
+ NFS4CLNT_SESSION_DRAINING,
};
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 637cd3f..8b5b5f0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -354,6 +354,16 @@ void nfs41_sequence_free_slot(const struct nfs_client *clp,
}
nfs4_free_slot(tbl, res->sr_slotid);
res->sr_slotid = NFS4_MAX_SLOT_TABLE;
+
+ /* Signal state manager thread if session is drained */
+ if (test_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state)) {
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->highest_used_slotid == -1) {
+ dprintk("%s COMPLETE: Session Drained\n", __func__);
+ complete(&clp->cl_session->complete);
+ }
+ spin_unlock(&tbl->slot_tbl_lock);
+ }
}
static void nfs41_sequence_done(struct nfs_client *clp,
@@ -450,15 +460,11 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
spin_lock(&tbl->slot_tbl_lock);
if (test_bit(NFS4CLNT_SESSION_RESET, &session->clp->cl_state)) {
- if (tbl->highest_used_slotid != -1) {
- rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
- spin_unlock(&tbl->slot_tbl_lock);
- dprintk("<-- %s: Session reset: draining\n", __func__);
- return -EAGAIN;
- }
-
- /* The slot table is empty; start the reset thread */
- dprintk("%s Session Reset\n", __func__);
+ /*
+ * The state manager will wait until the slot table is empty.
+ * Schedule the reset thread
+ */
+ dprintk("%s Schedule Session Reset\n", __func__);
rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
nfs4_schedule_state_manager(session->clp);
spin_unlock(&tbl->slot_tbl_lock);
@@ -4487,6 +4493,7 @@ static int nfs4_reset_slot_tables(struct nfs4_session *session)
1);
if (status)
return status;
+ init_completion(&session->complete);
status = nfs4_reset_slot_table(&session->bc_slot_table,
session->bc_attrs.max_reqs,
@@ -4589,6 +4596,7 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
* nfs_client struct
*/
clp->cl_cons_state = NFS_CS_SESSION_INITING;
+ init_completion(&session->complete);
tbl = &session->fc_slot_table;
spin_lock_init(&tbl->slot_tbl_lock);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 91726bc..2a05d62 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1175,8 +1175,23 @@ static void nfs4_session_recovery_handle_error(struct nfs_client *clp, int err)
static int nfs4_reset_session(struct nfs_client *clp)
{
+ struct nfs4_session *ses = clp->cl_session;
+ struct nfs4_slot_table *tbl = &ses->fc_slot_table;
int status;
+ INIT_COMPLETION(ses->complete);
+ spin_lock(&tbl->slot_tbl_lock);
+ if (tbl->highest_used_slotid != -1) {
+ set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
+ spin_unlock(&tbl->slot_tbl_lock);
+ status = wait_for_completion_interruptible(&ses->complete);
+ clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
+ if (status) /* -ERESTARTSYS */
+ goto out;
+ } else {
+ spin_unlock(&tbl->slot_tbl_lock);
+ }
+
status = nfs4_proc_destroy_session(clp->cl_session);
if (status && status != -NFS4ERR_BADSESSION &&
status != -NFS4ERR_DEADSESSION) {
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569e..34fc6be 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
unsigned long session_state;
u32 hash_alg;
u32 ssv_len;
+ struct completion complete;
/* The fore and back channel */
struct nfs4_channel_attrs fc_attrs;
--
1.6.0.6
From: Andy Adamson <[email protected]>
Replace sync and async handlers setting of the NFS4CLNT_SESSION_SETUP bit with
setting NFS4CLNT_CHECK_LEASE, and let the state manager decide to reset the session.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 6 +++---
fs/nfs/nfs4state.c | 10 +++++++---
2 files changed, 10 insertions(+), 6 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 57316ca..3936da3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -270,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR: %d Reset session\n", __func__,
errorcode);
- set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
exception->retry = 1;
break;
#endif /* !defined(CONFIG_NFS_V4_1) */
@@ -459,7 +459,7 @@ static int nfs41_setup_sequence(struct nfs4_session *session,
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
- if (test_bit(NFS4CLNT_SESSION_RESET, &session->clp->cl_state)) {
+ if (test_bit(NFS4CLNT_SESSION_DRAINING, &session->clp->cl_state)) {
/*
* The state manager will wait until the slot table is empty.
* Schedule the reset thread
@@ -3355,7 +3355,7 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
case -NFS4ERR_SEQ_MISORDERED:
dprintk("%s ERROR %d, Reset session\n", __func__,
task->tk_status);
- set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ nfs4_schedule_state_recovery(clp);
task->tk_status = 0;
return -EAGAIN;
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 31b4f74..9b26231 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1064,7 +1064,7 @@ static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp)
clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
}
-static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
+static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
case -NFS4ERR_CB_PATH_DOWN:
@@ -1087,7 +1087,10 @@ static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
case -NFS4ERR_SEQ_FALSE_RETRY:
case -NFS4ERR_SEQ_MISORDERED:
set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* Zero session reset errors */
+ return 0;
}
+ return error;
}
static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1107,6 +1110,7 @@ restart:
if (status < 0) {
set_bit(ops->owner_flag_bit, &sp->so_flags);
nfs4_put_state_owner(sp);
+ /* Do not zero session reset errors */
nfs4_recovery_handle_error(clp, status);
return status;
}
@@ -1138,7 +1142,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
status = ops->renew_lease(clp, cred);
put_rpccred(cred);
out:
- nfs4_recovery_handle_error(clp, status);
+ status = nfs4_recovery_handle_error(clp, status);
return status;
}
@@ -1185,7 +1189,6 @@ static int nfs4_reset_session(struct nfs_client *clp)
set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
spin_unlock(&tbl->slot_tbl_lock);
status = wait_for_completion_interruptible(&ses->complete);
- clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
if (status) /* -ERESTARTSYS */
goto out;
} else {
@@ -1206,6 +1209,7 @@ static int nfs4_reset_session(struct nfs_client *clp)
/* fall through*/
out:
/* Wake up the next rpc task even on error */
+ clear_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
rpc_wake_up_next(&clp->cl_session->fc_slot_table.slot_tbl_waitq);
return status;
}
--
1.6.0.6
On Fri, 2009-12-04 at 15:25 -0500, [email protected] wrote:
> Fix session reset deadlocks Version 4.
>
> These patches apply to 2.6.32
>
> Fix races and bugs as well as implement a new session draining scheme
> designed by Trond.
>
> 0001-nfs41-add-create-session-into-establish_clid.patch
> 0002-nfs41-rename-cl_state-session-SETUP-bit-to-RESET.patch
> 0003-nfs41-nfs4_get_lease_time-will-never-session-reset.patch
> 0004-nfs41-call-free-slot-from-nfs4_restart_rpc.patch
> 0005-nfs41-free-the-slot-on-unhandled-read-errors.patch
> 0006-nfs41-fix-switch-in-nfs4_handle_exception.patch
> 0007-nfs41-fix-switch-in-nfs4_recovery_handle_error.patch
> 0008-nfs41-don-t-clear-tk_action-on-success.patch
> 0009-nfs41-remove-nfs4_recover_session.patch
> 0010-nfs41-nfs41-fix-state-manager-deadlock-in-session.patch
> 0011-nfs41-drain-session-cleanup.patch
> 0012-nfs41-only-state-manager-sets-NFS4CLNT_SESSION_SETU.patch
>
> Testing:
>
> CONFIG_NFS_V4_1
> v41 mount: Connectathon tests passed. PyNFS testclient.py SESSIONRESET tests
>
> The INJECT_ERROR testclient.py test where NFS4ERR_BADSESSION was returned
> every 50th SEQUENCE operation and the session destroyed
> durring a Connectathon basic test run. This passed all but the bigfile test
> where the check_lease op->renew_lease nfs4_proc_sequence state manager
> session reset call could not get a slot due to the async error handler
> restart read/write RPC's getting slots prior to any rpc tasks waiting on
> queues. This will be fixed in a subsequent patch set.
>
> v4 mount: Connectathon tests passed.
>
> no CONFIG_NFS_V4_1
> v4 mount: Connectathon tests passed.
Thanks Andy!
Those look good to me. I had to fix them up a bit in order to have them
apply on top of the nfs-for-next branch, but nothing major.
I'll push them out to the git repository on linux-nfs.org some time
during the weekend. Hopefully Ricardo and Alexandros will have sent me
their patches too by then...
Cheers
Trond
On Fri, 2009-12-04 at 17:01 -0500, Trond Myklebust wrote:
> Those look good to me. I had to fix them up a bit in order to have them
> apply on top of the nfs-for-next branch, but nothing major.
I figured I might as well push them out immediately. They compile, and
they don't seem to have any adverse effects on the NFSv4 stuff.
You can find them as the nfs-for-next branch on
git://git.linux-nfs.org/projects/trondmy/nfs-2.6.git
Cheers
Trond
I'm seeing a problem with the bits in the nfs-for-next branch when I run
v4.1. If a bad session is received after a server reboot, the client
issues a flood of sequence requests. The sequence RPCs contain the last
sequenceID from the previous session with the new sessionID. This is
repeated over and over until you ctrl-C the triggering process. The
server replies with sequence misordered, but the client issues the exact
same RPC over and over.
Also to note is that right after the session is destroyed, the client
will issue sequence requests with sessionID of 0 right before the create
session succeeds.
- ricardo
> -----Original Message-----
> From: Myklebust, Trond
> Sent: Friday, December 04, 2009 2:25 PM
> To: Adamson, Andy
> Cc: [email protected]
> Subject: Re: [PATCH 0/12] Fix session reset deadlocks Version 4
>
> On Fri, 2009-12-04 at 17:01 -0500, Trond Myklebust wrote:
> > Those look good to me. I had to fix them up a bit in order to have
them
> > apply on top of the nfs-for-next branch, but nothing major.
>
> I figured I might as well push them out immediately. They compile, and
> they don't seem to have any adverse effects on the NFSv4 stuff.
>
> You can find them as the nfs-for-next branch on
> git://git.linux-nfs.org/projects/trondmy/nfs-2.6.git
>
> Cheers
> Trond
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs"
in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
OK - I'll take a look...
-->Andy
On Sat, Dec 5, 2009 at 2:05 AM, Labiaga, Ricardo
<[email protected]> wrote:
> I'm seeing a problem with the bits in the nfs-for-next branch when I run
> v4.1. If a bad session is received after a server reboot, the client
> issues a flood of sequence requests. The sequence RPCs contain the last
> sequenceID from the previous session with the new sessionID. This is
> repeated over and over until you ctrl-C the triggering process. The
> server replies with sequence misordered, but the client issues the exact
> same RPC over and over.
>
> Also to note is that right after the session is destroyed, the client
> will issue sequence requests with sessionID of 0 right before the create
> session succeeds.
>
> - ricardo
>
>
>> -----Original Message-----
>> From: Myklebust, Trond
>> Sent: Friday, December 04, 2009 2:25 PM
>> To: Adamson, Andy
>> Cc: [email protected]
>> Subject: Re: [PATCH 0/12] Fix session reset deadlocks Version 4
>>
>> On Fri, 2009-12-04 at 17:01 -0500, Trond Myklebust wrote:
>> > Those look good to me. I had to fix them up a bit in order to have
> them
>> > apply on top of the nfs-for-next branch, but nothing major.
>>
>> I figured I might as well push them out immediately. They compile, and
>> they don't seem to have any adverse effects on the NFSv4 stuff.
>>
>> You can find them as the nfs-for-next branch on
>> git://git.linux-nfs.org/projects/trondmy/nfs-2.6.git
>>
>> Cheers
>> Trond
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-nfs"
> in
>> the body of a message to [email protected]
>> More majordomo info at http://vger.kernel.org/majordomo-info.html
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
>
On Fri, 2009-12-04 at 23:05 -0800, Labiaga, Ricardo wrote:
> I'm seeing a problem with the bits in the nfs-for-next branch when I run
> v4.1. If a bad session is received after a server reboot, the client
> issues a flood of sequence requests. The sequence RPCs contain the last
> sequenceID from the previous session with the new sessionID. This is
> repeated over and over until you ctrl-C the triggering process. The
> server replies with sequence misordered, but the client issues the exact
> same RPC over and over.
>
> Also to note is that right after the session is destroyed, the client
> will issue sequence requests with sessionID of 0 right before the create
> session succeeds.
That would indicate a bug in the NFS4CLNT_SESSION_DRAINING code.
OK. I think I see what it is...
Could you try this?
Cheers
Trond
----------------------------------------------------------------------------
NFSv41: nfs4_reset_session must always set NFS4CLNT_SESSION_DRAINING
From: Trond Myklebust <[email protected]>
Otherwise we have no guarantees that other processes won't start another
RPC call while we're resetting the session.
Signed-off-by: Trond Myklebust <[email protected]>
---
fs/nfs/nfs4state.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index a86f3ac..bc4ca6f 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1211,8 +1211,8 @@ static int nfs4_reset_session(struct nfs_client *clp)
INIT_COMPLETION(ses->complete);
spin_lock(&tbl->slot_tbl_lock);
+ set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
if (tbl->highest_used_slotid != -1) {
- set_bit(NFS4CLNT_SESSION_DRAINING, &clp->cl_state);
spin_unlock(&tbl->slot_tbl_lock);
status = wait_for_completion_interruptible(&ses->complete);
if (status) /* -ERESTARTSYS */
Here is another cleanup/bugfix...
----------------------------------------------------------------------------------------------------
NFSv41: Clean up slot table management
From: Trond Myklebust <[email protected]>
We no longer need to maintain a distinction between nfs41_sequence_done and
nfs41_sequence_free_slot.
This fixes a number of slot table leakages in the NFSv4.1 code.
Signed-off-by: Trond Myklebust <[email protected]>
---
fs/nfs/internal.h | 17 +-------------
fs/nfs/nfs4proc.c | 64 ++++++++++++-----------------------------------------
fs/nfs/read.c | 13 ++---------
fs/nfs/unlink.c | 3 +-
fs/nfs/write.c | 4 +--
5 files changed, 21 insertions(+), 80 deletions(-)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 83a9284..b1a020c 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -197,8 +197,7 @@ extern const u32 nfs41_maxwrite_overhead;
#endif
/* nfs4proc.c */
-extern void nfs4_restart_rpc(struct rpc_task *, const struct nfs_client *,
- struct nfs4_sequence_res *);
+extern void nfs4_restart_rpc(struct rpc_task *, const struct nfs_client *);
#ifdef CONFIG_NFS_V4
extern struct rpc_procinfo nfs4_procedures[];
#endif
@@ -275,20 +274,6 @@ extern int _nfs4_call_sync_session(struct nfs_server *server,
struct nfs4_sequence_res *res,
int cache_reply);
-#ifdef CONFIG_NFS_V4_1
-extern void nfs41_sequence_free_slot(const struct nfs_client *,
- struct nfs4_sequence_res *res);
-#endif /* CONFIG_NFS_V4_1 */
-
-static inline void nfs4_sequence_free_slot(const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp))
- nfs41_sequence_free_slot(clp, res);
-#endif /* CONFIG_NFS_V4_1 */
-}
-
/*
* Determine the device name as a string
*/
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 71993f1..906585b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -341,15 +341,11 @@ nfs4_free_slot(struct nfs4_slot_table *tbl, u8 free_slotid)
free_slotid, tbl->highest_used_slotid);
}
-void nfs41_sequence_free_slot(const struct nfs_client *clp,
+static void nfs41_sequence_free_slot(const struct nfs_client *clp,
struct nfs4_sequence_res *res)
{
struct nfs4_slot_table *tbl;
- if (!nfs4_has_session(clp)) {
- dprintk("%s: No session\n", __func__);
- return;
- }
tbl = &clp->cl_session->fc_slot_table;
if (res->sr_slotid == NFS4_MAX_SLOT_TABLE) {
/* just wake up the next guy waiting since
@@ -407,7 +403,6 @@ static void nfs41_sequence_done(struct nfs_client *clp,
spin_unlock(&clp->cl_lock);
/* Check sequence flags */
nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
- return;
}
out:
/* The session may be reset by one of the error handlers. */
@@ -556,7 +551,6 @@ static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
struct nfs41_call_sync_data *data = calldata;
nfs41_sequence_done(data->clp, data->seq_res, task->tk_status);
- nfs41_sequence_free_slot(data->clp, data->seq_res);
}
struct rpc_call_ops nfs41_call_sync_ops = {
@@ -632,12 +626,10 @@ static void nfs4_sequence_done(const struct nfs_server *server,
#endif /* CONFIG_NFS_V4_1 */
}
-void nfs4_restart_rpc(struct rpc_task *task, const struct nfs_client *clp,
- struct nfs4_sequence_res *res)
+void nfs4_restart_rpc(struct rpc_task *task, const struct nfs_client *clp)
{
#ifdef CONFIG_NFS_V4_1
if (nfs4_has_session(clp)) {
- nfs41_sequence_free_slot(clp, res);
rpc_restart_call_prepare(task);
return;
}
@@ -645,15 +637,6 @@ void nfs4_restart_rpc(struct rpc_task *task, const struct nfs_client *clp,
rpc_restart_call(task);
}
-/* no restart, therefore free slot here */
-static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
- struct nfs4_sequence_res *res,
- int rpc_status)
-{
- nfs4_sequence_done(server, res, rpc_status);
- nfs4_sequence_free_slot(server->nfs_client, res);
-}
-
static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -1350,8 +1333,8 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
data->rpc_status = task->tk_status;
- nfs4_sequence_done_free_slot(data->o_arg.server, &data->o_res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->o_arg.server, &data->o_res.seq_res,
+ task->tk_status);
if (RPC_ASSASSINATED(task))
return;
@@ -1757,12 +1740,10 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client,
- &calldata->res.seq_res);
+ nfs4_restart_rpc(task, server->nfs_client);
return;
}
}
- nfs4_sequence_free_slot(server->nfs_client, &calldata->res.seq_res);
nfs_refresh_inode(calldata->inode, calldata->res.fattr);
}
@@ -2553,7 +2534,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
nfs4_sequence_done(res->server, &res->seq_res, task->tk_status);
if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
return 0;
- nfs4_sequence_free_slot(res->server->nfs_client, &res->seq_res);
update_changeattr(dir, &res->cinfo);
nfs_post_op_update_inode(dir, &res->dir_attr);
return 1;
@@ -2992,20 +2972,16 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
dprintk("--> %s\n", __func__);
- /* nfs4_sequence_free_slot called in the read rpc_call_done */
nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client, &data->res.seq_res);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
nfs_invalidate_atime(data->inode);
if (task->tk_status > 0)
renew_lease(server, data->timestamp);
- else if (task->tk_status < 0)
- nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
-
return 0;
}
@@ -3019,13 +2995,11 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
{
struct inode *inode = data->inode;
- /* slot is freed in nfs_writeback_done */
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client,
- &data->res.seq_res);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -3053,12 +3027,9 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client,
- &data->res.seq_res);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
return -EAGAIN;
}
- nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
- &data->res.seq_res);
nfs_refresh_inode(inode, data->res.fattr);
return 0;
}
@@ -3509,8 +3480,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
{
struct nfs4_delegreturndata *data = calldata;
- nfs4_sequence_done_free_slot(data->res.server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->res.server, &data->res.seq_res,
+ task->tk_status);
data->rpc_status = task->tk_status;
if (data->rpc_status == 0)
@@ -3768,11 +3739,8 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
nfs4_restart_rpc(task,
- calldata->server->nfs_client,
- &calldata->res.seq_res);
+ calldata->server->nfs_client);
}
- nfs4_sequence_free_slot(calldata->server->nfs_client,
- &calldata->res.seq_res);
}
static void nfs4_locku_prepare(struct rpc_task *task, void *data)
@@ -3954,8 +3922,8 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
dprintk("%s: begin!\n", __func__);
- nfs4_sequence_done_free_slot(data->server, &data->res.seq_res,
- task->tk_status);
+ nfs4_sequence_done(data->server, &data->res.seq_res,
+ task->tk_status);
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
@@ -4425,10 +4393,9 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
- rpc_restart_call(task);
+ nfs4_restart_rpc(task, data->clp);
return;
}
- nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
dprintk("<-- %s\n", __func__);
}
@@ -4912,11 +4879,10 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) {
- nfs4_restart_rpc(task, clp, task->tk_msg.rpc_resp);
+ nfs4_restart_rpc(task, clp);
return;
}
}
- nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
kfree(task->tk_msg.rpc_argp);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 3e04fb9..d319bfb 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -356,26 +356,19 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
- goto out;
+ return;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
- goto out;
+ return;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client,
- &data->res.seq_res);
- return;
-out:
- nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
- &data->res.seq_res);
- return;
-
+ nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
}
/*
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 52f7bdb..012a14c 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -84,8 +84,7 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
struct nfs_removeres *res = task->tk_msg.rpc_resp;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client,
- &res->seq_res);
+ nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
}
/**
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 556668f..d546c60 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1216,8 +1216,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- nfs4_restart_rpc(task, server->nfs_client,
- &data->res.seq_res);
+ nfs4_restart_rpc(task, server->nfs_client);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
@@ -1229,7 +1228,6 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
/* Can't do anything about it except throw an error. */
task->tk_status = -EIO;
}
- nfs4_sequence_free_slot(server->nfs_client, &data->res.seq_res);
return 0;
}
On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
> Tried with this patch but it didn't make a difference.
You are still seeing RPC calls with 0 session ids?
> I haven't tried applying the second cleanup patch yet since it
> didn't apply cleanly on top of nfs-for-next. Is this the branch you
> used?
I've pushed out all patches (including the cleanup patch) onto
nfs-for-next now...
Trond
On 12/5/09 1:12 PM, "Trond Myklebust" <[email protected]> wrote:
> On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
>> Tried with this patch but it didn't make a difference.
>
> You are still seeing RPC calls with 0 session ids?
>
Yes, right after the session is destroyed, and before it's recreated. The
original RPC that got the BAD_SESSION error keeps on trying.
After the session is recreated, the same RPC is issued (with the same
sequenceID) but with the new sessionID. This time it fails with
SEQ_MISORDERED. This repeats indefinitely until the process is manually
interrupted.
>> I haven't tried applying the second cleanup patch yet since it
>> didn't apply cleanly on top of nfs-for-next. Is this the branch you
>> used?
>
> I've pushed out all patches (including the cleanup patch) onto
> nfs-for-next now...
>
Got it, I was able to apply both patches. The results above are with both
patches.
- ricardo
On 12/5/09 1:39 PM, "Ricardo Labiaga" <[email protected]> wrote:
> On 12/5/09 1:12 PM, "Trond Myklebust" <[email protected]> wrote:
>
>> On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
>>> Tried with this patch but it didn't make a difference.
>>
>> You are still seeing RPC calls with 0 session ids?
>>
>
> Yes, right after the session is destroyed, and before it's recreated. The
> original RPC that got the BAD_SESSION error keeps on trying.
>
I should clarify. It's not a retransmission, the client issues the same
compound with a new XID.
- ricardo
> After the session is recreated, the same RPC is issued (with the same
> sequenceID) but with the new sessionID. This time it fails with
> SEQ_MISORDERED. This repeats indefinitely until the process is manually
> interrupted.
>
>>> I haven't tried applying the second cleanup patch yet since it
>>> didn't apply cleanly on top of nfs-for-next. Is this the branch you
>>> used?
>>
>> I've pushed out all patches (including the cleanup patch) onto
>> nfs-for-next now...
>>
>
> Got it, I was able to apply both patches. The results above are with both
> patches.
>
> - ricardo
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
On Sat, 2009-12-05 at 13:42 -0800, Labiaga, Ricardo wrote:
>
>
> On 12/5/09 1:39 PM, "Ricardo Labiaga" <[email protected]> wrote:
>
> > On 12/5/09 1:12 PM, "Trond Myklebust" <[email protected]> wrote:
> >
> >> On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
> >>> Tried with this patch but it didn't make a difference.
> >>
> >> You are still seeing RPC calls with 0 session ids?
> >>
> >
> > Yes, right after the session is destroyed, and before it's recreated. The
> > original RPC that got the BAD_SESSION error keeps on trying.
> >
>
> I should clarify. It's not a retransmission, the client issues the same
> compound with a new XID.
>
> - ricardo
>
> > After the session is recreated, the same RPC is issued (with the same
> > sequenceID) but with the new sessionID. This time it fails with
> > SEQ_MISORDERED. This repeats indefinitely until the process is manually
> > interrupted.
> >
> >>> I haven't tried applying the second cleanup patch yet since it
> >>> didn't apply cleanly on top of nfs-for-next. Is this the branch you
> >>> used?
> >>
> >> I've pushed out all patches (including the cleanup patch) onto
> >> nfs-for-next now...
> >>
> >
> > Got it, I was able to apply both patches. The results above are with both
> > patches.
I've found some other interesting session reset cases. I've coded up
some fixes, and pushed them to the nfs-for-next tree.
In particular, please see
http://git.linux-nfs.org/?p=trondmy/nfs-2.6.git&a=commitdiff&h=f26468fb9384e73fb357d2e84d3e9c88c7d1129d
which should ensure that we always reinitialise the slot sequence number
after a server reboot.
Could you please see if that in any way changes the above behaviour?
Cheers
Trond
These patches due improve the situation. I still see a number of sequence
calls with sessionID=0 and the same sequenceID that triggered the initial
BADSESSION. It does recover after the session is fully established though.
The sequenceID's with sessionID=0 are generated because nfs4_reset_session()
clears the DRAINING flag and wakes the pending RPCs even on error. This is
broken, since we don't have a valid sessionID. Since we're already in the
state manager, why not just let the state manager retry if the error is
recoverable (such as STALE_CLIENTID)?
I'll give that a try after dinner :-)
- ricardo
On 12/5/09 4:34 PM, "Trond Myklebust" <[email protected]> wrote:
> On Sat, 2009-12-05 at 13:42 -0800, Labiaga, Ricardo wrote:
>>
>>
>> On 12/5/09 1:39 PM, "Ricardo Labiaga" <[email protected]> wrote:
>>
>>> On 12/5/09 1:12 PM, "Trond Myklebust" <[email protected]> wrote:
>>>
>>>> On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
>>>>> Tried with this patch but it didn't make a difference.
>>>>
>>>> You are still seeing RPC calls with 0 session ids?
>>>>
>>>
>>> Yes, right after the session is destroyed, and before it's recreated. The
>>> original RPC that got the BAD_SESSION error keeps on trying.
>>>
>>
>> I should clarify. It's not a retransmission, the client issues the same
>> compound with a new XID.
>>
>> - ricardo
>>
>>> After the session is recreated, the same RPC is issued (with the same
>>> sequenceID) but with the new sessionID. This time it fails with
>>> SEQ_MISORDERED. This repeats indefinitely until the process is manually
>>> interrupted.
>>>
>>>>> I haven't tried applying the second cleanup patch yet since it
>>>>> didn't apply cleanly on top of nfs-for-next. Is this the branch you
>>>>> used?
>>>>
>>>> I've pushed out all patches (including the cleanup patch) onto
>>>> nfs-for-next now...
>>>>
>>>
>>> Got it, I was able to apply both patches. The results above are with both
>>> patches.
>
> I've found some other interesting session reset cases. I've coded up
> some fixes, and pushed them to the nfs-for-next tree.
>
> In particular, please see
>
> http://git.linux-nfs.org/?p=trondmy/nfs-2.6.git&a=commitdiff&h=f26468fb9384e73
> fb357d2e84d3e9c88c7d1129d
> which should ensure that we always reinitialise the slot sequence number
> after a server reboot.
>
> Could you please see if that in any way changes the above behaviour?
>
> Cheers
> Trond
On 12/5/09 7:25 PM, "Ricardo Labiaga" <[email protected]> wrote:
> These patches due improve the situation. I still see a number of sequence
^^^
do
(apologies for the spam, but better be clear...)
- ricardo
> calls with sessionID=0 and the same sequenceID that triggered the initial
> BADSESSION. It does recover after the session is fully established though.
>
> The sequenceID's with sessionID=0 are generated because nfs4_reset_session()
> clears the DRAINING flag and wakes the pending RPCs even on error. This is
> broken, since we don't have a valid sessionID. Since we're already in the
> state manager, why not just let the state manager retry if the error is
> recoverable (such as STALE_CLIENTID)?
>
> I'll give that a try after dinner :-)
>
> - ricardo
>
>
>
> On 12/5/09 4:34 PM, "Trond Myklebust" <[email protected]> wrote:
>
>> On Sat, 2009-12-05 at 13:42 -0800, Labiaga, Ricardo wrote:
>>>
>>>
>>> On 12/5/09 1:39 PM, "Ricardo Labiaga" <[email protected]> wrote:
>>>
>>>> On 12/5/09 1:12 PM, "Trond Myklebust" <[email protected]> wrote:
>>>>
>>>>> On Sat, 2009-12-05 at 12:55 -0800, Labiaga, Ricardo wrote:
>>>>>> Tried with this patch but it didn't make a difference.
>>>>>
>>>>> You are still seeing RPC calls with 0 session ids?
>>>>>
>>>>
>>>> Yes, right after the session is destroyed, and before it's recreated. The
>>>> original RPC that got the BAD_SESSION error keeps on trying.
>>>>
>>>
>>> I should clarify. It's not a retransmission, the client issues the same
>>> compound with a new XID.
>>>
>>> - ricardo
>>>
>>>> After the session is recreated, the same RPC is issued (with the same
>>>> sequenceID) but with the new sessionID. This time it fails with
>>>> SEQ_MISORDERED. This repeats indefinitely until the process is manually
>>>> interrupted.
>>>>
>>>>>> I haven't tried applying the second cleanup patch yet since it
>>>>>> didn't apply cleanly on top of nfs-for-next. Is this the branch you
>>>>>> used?
>>>>>
>>>>> I've pushed out all patches (including the cleanup patch) onto
>>>>> nfs-for-next now...
>>>>>
>>>>
>>>> Got it, I was able to apply both patches. The results above are with both
>>>> patches.
>>
>> I've found some other interesting session reset cases. I've coded up
>> some fixes, and pushed them to the nfs-for-next tree.
>>
>> In particular, please see
>>
>>
http://git.linux-nfs.org/?p=trondmy/nfs-2.6.git&a=commitdiff&h=f26468fb9384e7>>
3
>> fb357d2e84d3e9c88c7d1129d
>> which should ensure that we always reinitialise the slot sequence number
>> after a server reboot.
>>
>> Could you please see if that in any way changes the above behaviour?
>>
>> Cheers
>> Trond
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-nfs" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
From: Andy Adamson <[email protected]>
Make this clear by calling rpc_restart-call.
Prepare for nfs4_restart_rpc() to free slots.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/nfs4proc.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1e13d53..979d2e3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -4389,7 +4389,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
rpc_delay(task, NFS4_POLL_RETRY_MIN);
task->tk_status = 0;
- nfs4_restart_rpc(task, data->clp);
+ rpc_restart_call(task);
return;
}
nfs41_sequence_free_slot(data->clp, &data->res->lr_seq_res);
--
1.6.0.6
From: Andy Adamson <[email protected]>
nfs41_sequence_free_slot can be called multiple times on SEQUENCE operation
errors.
No reason to inline nfs4_restart_rpc
Reported-by: Trond Myklebust <[email protected]>
nfs_writeback_done and nfs_readpage_retry call nfs4_restart_rpc outside the
error handler, and the slot is not freed prior to restarting in the rpc_prepare
state during session reset.
Fix this by moving the call to nfs41_sequence_free_slot from the error
path of nfs41_sequence_done into nfs4_restart_rpc, and by removing the test
for NFS4CLNT_SESSION_SETUP.
Always free slot and goto the rpc prepare state on async errors.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/internal.h | 16 ++--------------
fs/nfs/nfs4proc.c | 29 +++++++++++++++++++++++------
fs/nfs/read.c | 3 ++-
fs/nfs/unlink.c | 4 +++-
fs/nfs/write.c | 3 ++-
5 files changed, 32 insertions(+), 23 deletions(-)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index ebcd379..a6f7b6c 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -177,26 +177,14 @@ extern __be32 * nfs_decode_dirent(__be32 *, struct nfs_entry *, int);
extern struct rpc_procinfo nfs3_procedures[];
extern __be32 *nfs3_decode_dirent(__be32 *, struct nfs_entry *, int);
-/* nfs4proc.c */
-static inline void nfs4_restart_rpc(struct rpc_task *task,
- const struct nfs_client *clp)
-{
-#ifdef CONFIG_NFS_V4_1
- if (nfs4_has_session(clp) &&
- test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) {
- rpc_restart_call_prepare(task);
- return;
- }
-#endif /* CONFIG_NFS_V4_1 */
- rpc_restart_call(task);
-}
-
/* nfs4xdr.c */
#ifdef CONFIG_NFS_V4
extern __be32 *nfs4_decode_dirent(__be32 *p, struct nfs_entry *entry, int plus);
#endif
/* nfs4proc.c */
+extern void nfs4_restart_rpc(struct rpc_task *, const struct nfs_client *,
+ struct nfs4_sequence_res *);
#ifdef CONFIG_NFS_V4
extern struct rpc_procinfo nfs4_procedures[];
#endif
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 979d2e3..d847e98 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -637,6 +637,19 @@ static void nfs4_sequence_done(const struct nfs_server *server,
#endif /* CONFIG_NFS_V4_1 */
}
+void nfs4_restart_rpc(struct rpc_task *task, const struct nfs_client *clp,
+ struct nfs4_sequence_res *res)
+{
+#ifdef CONFIG_NFS_V4_1
+ if (nfs4_has_session(clp)) {
+ nfs41_sequence_free_slot(clp, res);
+ rpc_restart_call_prepare(task);
+ return;
+ }
+#endif /* CONFIG_NFS_V4_1 */
+ rpc_restart_call(task);
+}
+
/* no restart, therefore free slot here */
static void nfs4_sequence_done_free_slot(const struct nfs_server *server,
struct nfs4_sequence_res *res,
@@ -1737,7 +1750,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
break;
default:
if (nfs4_async_handle_error(task, server, state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs4_restart_rpc(task, server->nfs_client,
+ &calldata->res.seq_res);
return;
}
}
@@ -2975,7 +2989,7 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
nfs4_sequence_done(server, &data->res.seq_res, task->tk_status);
if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs4_restart_rpc(task, server->nfs_client, &data->res.seq_res);
return -EAGAIN;
}
@@ -3000,7 +3014,8 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client,
+ &data->res.seq_res);
return -EAGAIN;
}
if (task->tk_status >= 0) {
@@ -3028,7 +3043,8 @@ static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data)
nfs4_sequence_done(NFS_SERVER(inode), &data->res.seq_res,
task->tk_status);
if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
- nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client);
+ nfs4_restart_rpc(task, NFS_SERVER(inode)->nfs_client,
+ &data->res.seq_res);
return -EAGAIN;
}
nfs4_sequence_free_slot(NFS_SERVER(inode)->nfs_client,
@@ -3742,7 +3758,8 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
default:
if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
nfs4_restart_rpc(task,
- calldata->server->nfs_client);
+ calldata->server->nfs_client,
+ &calldata->res.seq_res);
}
nfs4_sequence_free_slot(calldata->server->nfs_client,
&calldata->res.seq_res);
@@ -4871,7 +4888,7 @@ void nfs41_sequence_call_done(struct rpc_task *task, void *data)
if (_nfs4_async_handle_error(task, NULL, clp, NULL)
== -EAGAIN) {
- nfs4_restart_rpc(task, clp);
+ nfs4_restart_rpc(task, clp, task->tk_msg.rpc_resp);
return;
}
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 12c9e66..3e04fb9 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -368,7 +368,8 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
- nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client);
+ nfs4_restart_rpc(task, NFS_SERVER(data->inode)->nfs_client,
+ &data->res.seq_res);
return;
out:
nfs4_sequence_free_slot(NFS_SERVER(data->inode)->nfs_client,
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1064c91..52f7bdb 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -81,9 +81,11 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = calldata;
struct inode *dir = data->dir;
+ struct nfs_removeres *res = task->tk_msg.rpc_resp;
if (!NFS_PROTO(dir)->unlink_done(task, dir))
- nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client);
+ nfs4_restart_rpc(task, NFS_SERVER(dir)->nfs_client,
+ &res->seq_res);
}
/**
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 53eb26c..556668f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1216,7 +1216,8 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
*/
argp->stable = NFS_FILE_SYNC;
}
- nfs4_restart_rpc(task, server->nfs_client);
+ nfs4_restart_rpc(task, server->nfs_client,
+ &data->res.seq_res);
return -EAGAIN;
}
if (time_before(complain, jiffies)) {
--
1.6.0.6