From: Andy Adamson <[email protected]>
Data server sessions as well as MDS and of course normal NFSv4.1 mount session
stats are recorded.
This is an RFC - I've used these stats to verify that DS session slot table
waitq's have been drained when migrating from a dead DS to the MDS. The stats
are also useful in seeing how many session slots are actually used for various
work loads.
Comments please.
Andy Adamson (1):
NFSv4.1 add a session statistics file to /proc/fs/nfsfs
fs/nfs/client.c | 93 +++++++++++++++++++++++++++++++++++++++++++++
fs/nfs/nfs4proc.c | 5 ++
include/linux/nfs_fs_sb.h | 3 +
3 files changed, 101 insertions(+), 0 deletions(-)
--
1.7.6.4
From: Andy Adamson <[email protected]>
Session statistics are needed for performance characterisation and debugging
recovery and migration.
Only gather forechannel statistics as the backchannel has one session slot.
Signed-off-by: Andy Adamson <[email protected]>
---
fs/nfs/client.c | 93 +++++++++++++++++++++++++++++++++++++++++++++
fs/nfs/nfs4proc.c | 5 ++
include/linux/nfs_fs_sb.h | 3 +
3 files changed, 101 insertions(+), 0 deletions(-)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9e9cb50..db3f950 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1842,6 +1842,91 @@ static const struct file_operations nfs_volume_list_fops = {
.owner = THIS_MODULE,
};
+#ifdef CONFIG_NFS_V4_1
+static int nfs_session_list_open(struct inode *inode, struct file *file);
+static int nfs_session_list_show(struct seq_file *m, void *v);
+
+static const struct seq_operations nfs_session_list_ops = {
+ .start = nfs_server_list_start,
+ .next = nfs_server_list_next,
+ .stop = nfs_server_list_stop,
+ .show = nfs_session_list_show,
+};
+
+static const struct file_operations nfs_session_list_fops = {
+ .open = nfs_session_list_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * open "/proc/fs/nfsfs/sessions" which provides a summary of sessions with
+ * which we're dealing
+ */
+static int nfs_session_list_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *m;
+ int ret;
+ struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info;
+ struct net *net = pid_ns->child_reaper->nsproxy->net_ns;
+
+ ret = seq_open(file, &nfs_session_list_ops);
+ if (ret < 0)
+ return ret;
+
+ m = file->private_data;
+ m->private = net;
+
+ return 0;
+}
+
+/*
+ * display a header line followed by a load of call lines
+ */
+static int nfs_session_list_show(struct seq_file *m, void *v)
+{
+ struct nfs_client *clp;
+ struct nfs_net *nn = net_generic(m->private, nfs_net_id);
+ struct nfs4_slot_table *tbl;
+ char sessionid[16];
+
+ /* display header on line 1 */
+ if (v == &nn->nfs_client_list) {
+ seq_puts(m, "HOSTNAME SESSIONID WAITQ:MAX,CUR"
+ " SLOTID:MAX,TARGET,HIGHEST \n");
+ return 0;
+ }
+
+ /* display one session per line on subsequent lines */
+ clp = list_entry(v, struct nfs_client, cl_share_link);
+
+ /* Check if the nfs_client has a session and is initialized */
+ if (!clp->cl_session || clp->cl_cons_state != NFS_CS_READY)
+ return 0;
+
+ snprintf(sessionid, 16, "%x:%x:%x:%x",
+ ((u32 *)&clp->cl_session->sess_id.data)[0],
+ ((u32 *)&clp->cl_session->sess_id.data)[1],
+ ((u32 *)&clp->cl_session->sess_id.data)[2],
+ ((u32 *)&clp->cl_session->sess_id.data)[3]);
+
+ tbl = &clp->cl_session->fc_slot_table;
+ spin_lock(&tbl->slot_tbl_lock);
+ seq_printf(m, "%s %-16s %u, %u %d, %d, %d\n",
+ clp->cl_hostname,
+ sessionid,
+ tbl->max_qlen,
+ tbl->qlen,
+ tbl->max_slots = 1,
+ tbl->target_max_slots,
+ tbl->highest_used_slotid);
+ spin_unlock(&tbl->slot_tbl_lock);
+ return 0;
+}
+#endif /* CONFIG_NFS_V4_1 */
+
/*
* open "/proc/fs/nfsfs/servers" which provides a summary of servers with which
* we're dealing
@@ -2037,6 +2122,14 @@ int __init nfs_fs_proc_init(void)
proc_fs_nfs, &nfs_volume_list_fops);
if (!p)
goto error_2;
+
+#ifdef CONFIG_NFS_V4_1
+ /* a file of sessions that we are using */
+ p = proc_create("sessions", S_IFREG|S_IRUGO,
+ proc_fs_nfs, &nfs_session_list_fops);
+ if (!p)
+ goto error_2;
+#endif /* CONFIG_NFS_V4_1 */
return 0;
error_2:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e68a1f8..6d3773c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -565,6 +565,11 @@ int nfs41_setup_sequence(struct nfs4_session *session,
tbl = &session->fc_slot_table;
spin_lock(&tbl->slot_tbl_lock);
+
+ /* Gather stats */
+ tbl->max_qlen = max(tbl->max_qlen, tbl->slot_tbl_waitq.qlen);
+ tbl->qlen = tbl->slot_tbl_waitq.qlen;
+
if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
!rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
/* The state manager will wait until the slot table is empty */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 3bf4766..1ec17d9 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,9 @@ struct nfs4_slot_table {
u32 target_max_slots; /* Set by CB_RECALL_SLOT as
* the new max_slots */
struct completion complete;
+ /* For session statistics */
+ unsigned short max_qlen; /* max waitq qlen */
+ unsigned short qlen; /* current waitq qlen */
};
static inline int slot_idx(struct nfs4_slot_table *tbl, struct nfs4_slot *sp)
--
1.7.6.4
T24gVGh1LCAyMDEyLTAyLTE2IGF0IDE2OjE1IC0wNTAwLCBBbmR5IEFkYW1zb24gd3JvdGU6DQo+
IE9uIFRodSwgRmViIDE2LCAyMDEyIGF0IDM6NTAgUE0sIE15a2xlYnVzdCwgVHJvbmQNCj4gPFRy
b25kLk15a2xlYnVzdEBuZXRhcHAuY29tPiB3cm90ZToNCj4gPiBPbiBXZWQsIDIwMTItMDItMTUg
YXQgMTc6NTAgLTA1MDAsIGFuZHJvc0BuZXRhcHAuY29tIHdyb3RlOg0KPiA+PiBGcm9tOiBBbmR5
IEFkYW1zb24gPGFuZHJvc0BuZXRhcHAuY29tPg0KPiA+Pg0KPiA+PiBTZXNzaW9uIHN0YXRpc3Rp
Y3MgYXJlIG5lZWRlZCBmb3IgcGVyZm9ybWFuY2UgY2hhcmFjdGVyaXNhdGlvbiBhbmQgZGVidWdn
aW5nDQo+ID4+IHJlY292ZXJ5IGFuZCBtaWdyYXRpb24uDQo+ID4+DQo+ID4+IE9ubHkgZ2F0aGVy
IGZvcmVjaGFubmVsIHN0YXRpc3RpY3MgYXMgdGhlIGJhY2tjaGFubmVsIGhhcyBvbmUgc2Vzc2lv
biBzbG90Lg0KPiA+Pg0KPiA+DQo+ID4gQ291bGQgdGhpcyBiZSBkb25lIGJldHRlciB1c2luZyB0
cmFjZXBvaW50cz8gSSdtIG5vdCBzdXJlIHRoYXQgSSBzZWUgaG93DQo+ID4gYSB0eXBpY2FsIHN5
c3RlbSBhZG1pbiB3b3VsZCB1c2UgdGhpcyBraW5kIG9mIGZpbGUsIHNvIGl0IGxvb2tzIG1vcmUN
Cj4gPiBsaWtlIGEgZGVidWdnaW5nIGFpZC4NCj4gDQo+IEl0IGlzIHNpbWlsYXIgdG8gdGhlIG1v
dW50c3RhdHMgeHBydCBzdGFuemEgd2l0aCB3YWl0cSBsZW5ndGhzLCBidXQgSQ0KPiBzZWUgeW91
ciBwb2ludC4gIEknbGwgbG9vayBpbnRvIGEgdHJhY2Vwb2ludCB2ZXJzaW9uLg0KPiANCj4gQW55
IHN1Z2dlc3Rpb25zIG9mIG90aGVyIHNlc3Npb24gaW5mbyB0byBnYXRoZXI/DQoNCiAgICAgICog
SG93IG1hbnkgdGltZXMgaGF2ZSB3ZSByZXNldCB0aGUgc2Vzc2lvbj8NCiAgICAgICogSG93IG1h
bnkgc2xvdCByZWNhbGxzIGhhdmUgd2Ugc2Vlbj8NCiAgICAgICogSG93IG1hbnkgTkZTNEVSUl9D
T05OX05PVF9CT1VORF9UT19TRVNTSU9OPw0KICAgICAgKiBIb3cgbWFueSBORlM0RVJSX0JBRFNF
U1NJT04vREVBRFNFU1NJT04/DQogICAgICAqIEhvdyBtYW55IE5GUzRFUlJfQkFEU0xPVC9CQURf
SElHSF9TTE9UPw0KDQotLSANClRyb25kIE15a2xlYnVzdA0KTGludXggTkZTIGNsaWVudCBtYWlu
dGFpbmVyDQoNCk5ldEFwcA0KVHJvbmQuTXlrbGVidXN0QG5ldGFwcC5jb20NCnd3dy5uZXRhcHAu
Y29tDQoNCg==
On Thu, Feb 16, 2012 at 3:50 PM, Myklebust, Trond
<[email protected]> wrote:
> On Wed, 2012-02-15 at 17:50 -0500, [email protected] wrote:
>> From: Andy Adamson <[email protected]>
>>
>> Session statistics are needed for performance characterisation and debugging
>> recovery and migration.
>>
>> Only gather forechannel statistics as the backchannel has one session slot.
>>
>
> Could this be done better using tracepoints? I'm not sure that I see how
> a typical system admin would use this kind of file, so it looks more
> like a debugging aid.
It is similar to the mountstats xprt stanza with waitq lengths, but I
see your point. I'll look into a tracepoint version.
Any suggestions of other session info to gather?
-->Andy
>
> --
> Trond Myklebust
> Linux NFS client maintainer
>
> NetApp
> [email protected]
> http://www.netapp.com
>
T24gV2VkLCAyMDEyLTAyLTE1IGF0IDE3OjUwIC0wNTAwLCBhbmRyb3NAbmV0YXBwLmNvbSB3cm90
ZToNCj4gRnJvbTogQW5keSBBZGFtc29uIDxhbmRyb3NAbmV0YXBwLmNvbT4NCj4gDQo+IFNlc3Np
b24gc3RhdGlzdGljcyBhcmUgbmVlZGVkIGZvciBwZXJmb3JtYW5jZSBjaGFyYWN0ZXJpc2F0aW9u
IGFuZCBkZWJ1Z2dpbmcNCj4gcmVjb3ZlcnkgYW5kIG1pZ3JhdGlvbi4NCj4gDQo+IE9ubHkgZ2F0
aGVyIGZvcmVjaGFubmVsIHN0YXRpc3RpY3MgYXMgdGhlIGJhY2tjaGFubmVsIGhhcyBvbmUgc2Vz
c2lvbiBzbG90Lg0KPiANCg0KQ291bGQgdGhpcyBiZSBkb25lIGJldHRlciB1c2luZyB0cmFjZXBv
aW50cz8gSSdtIG5vdCBzdXJlIHRoYXQgSSBzZWUgaG93DQphIHR5cGljYWwgc3lzdGVtIGFkbWlu
IHdvdWxkIHVzZSB0aGlzIGtpbmQgb2YgZmlsZSwgc28gaXQgbG9va3MgbW9yZQ0KbGlrZSBhIGRl
YnVnZ2luZyBhaWQuDQoNCi0tIA0KVHJvbmQgTXlrbGVidXN0DQpMaW51eCBORlMgY2xpZW50IG1h
aW50YWluZXINCg0KTmV0QXBwDQpUcm9uZC5NeWtsZWJ1c3RAbmV0YXBwLmNvbQ0Kd3d3Lm5ldGFw
cC5jb20NCg0K