A future patch will remove refcounting on svc_serv as it is of little
use.
It is currently used to keep the svc around while the pool_stats file is
open.
Change this to get the pointer, protected by the mutex, only in
seq_start, and the release the mutex in seq_stop.
This means that if the nfsd server is stopped and restarted while the
pool_stats file it open, then some pool stats info could be from the
first instance and some from the second. This might appear odd, but is
unlikely to be a problem in practice.
Signed-off-by: NeilBrown <[email protected]>
---
fs/nfsd/nfsctl.c | 2 +-
fs/nfsd/nfssvc.c | 30 ++++++++---------------
include/linux/sunrpc/svc.h | 5 +++-
net/sunrpc/svc_xprt.c | 49 ++++++++++++++++++++++++++++++++------
4 files changed, 57 insertions(+), 29 deletions(-)
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 79efb1075f38..d78ae4452946 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -179,7 +179,7 @@ static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = nfsd_pool_stats_release,
+ .release = svc_pool_stats_release,
};
DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 6c968c02cc29..203e1cfc1cad 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -1072,30 +1072,20 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
return true;
}
-int nfsd_pool_stats_open(struct inode *inode, struct file *file)
+static struct svc_serv *nfsd_get_serv(struct seq_file *s, bool start)
{
- int ret;
- struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
-
- mutex_lock(&nfsd_mutex);
- if (nn->nfsd_serv == NULL) {
+ struct nfsd_net *nn = net_generic(file_inode(s->file)->i_sb->s_fs_info,
+ nfsd_net_id);
+ if (start) {
+ mutex_lock(&nfsd_mutex);
+ return nn->nfsd_serv;
+ } else {
mutex_unlock(&nfsd_mutex);
- return -ENODEV;
+ return NULL;
}
- svc_get(nn->nfsd_serv);
- ret = svc_pool_stats_open(nn->nfsd_serv, file);
- mutex_unlock(&nfsd_mutex);
- return ret;
}
-int nfsd_pool_stats_release(struct inode *inode, struct file *file)
+int nfsd_pool_stats_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq = file->private_data;
- struct svc_serv *serv = seq->private;
- int ret = seq_release(inode, file);
-
- mutex_lock(&nfsd_mutex);
- svc_put(serv);
- mutex_unlock(&nfsd_mutex);
- return ret;
+ return svc_pool_stats_open(nfsd_get_serv, file);
}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index b10f987509cc..11acad6988a2 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -433,7 +433,10 @@ void svc_exit_thread(struct svc_rqst *);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int (*threadfn)(void *data));
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
+ struct file *file);
+int svc_pool_stats_release(struct inode *inode,
+ struct file *file);
void svc_process(struct svc_rqst *rqstp);
void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index fee83d1024bc..2f99f7475b7b 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -1366,26 +1366,38 @@ EXPORT_SYMBOL_GPL(svc_xprt_names);
/*----------------------------------------------------------------------------*/
+struct pool_private {
+ struct svc_serv *(*get_serv)(struct seq_file *, bool);
+ struct svc_serv *serv;
+};
+
static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
{
unsigned int pidx = (unsigned int)*pos;
- struct svc_serv *serv = m->private;
+ struct pool_private *pp = m->private;
dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
+ pp->serv = pp->get_serv(m, true);
+
if (!pidx)
return SEQ_START_TOKEN;
- return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
+ if (!pp->serv)
+ return NULL;
+ return (pidx > pp->serv->sv_nrpools ? NULL : &pp->serv->sv_pools[pidx-1]);
}
static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
{
struct svc_pool *pool = p;
- struct svc_serv *serv = m->private;
+ struct pool_private *pp = m->private;
+ struct svc_serv *serv = pp->serv;
dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
- if (p == SEQ_START_TOKEN) {
+ if (!serv) {
+ pool = NULL;
+ } else if (p == SEQ_START_TOKEN) {
pool = &serv->sv_pools[0];
} else {
unsigned int pidx = (pool - &serv->sv_pools[0]);
@@ -1400,6 +1412,9 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
static void svc_pool_stats_stop(struct seq_file *m, void *p)
{
+ struct pool_private *pp = m->private;
+
+ pp->get_serv(m, false);
}
static int svc_pool_stats_show(struct seq_file *m, void *p)
@@ -1427,15 +1442,35 @@ static const struct seq_operations svc_pool_stats_seq_ops = {
.show = svc_pool_stats_show,
};
-int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
+int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
+ struct file *file)
{
+ struct pool_private *pp;
int err;
+ pp = kmalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return -ENOMEM;
+
err = seq_open(file, &svc_pool_stats_seq_ops);
- if (!err)
- ((struct seq_file *) file->private_data)->private = serv;
+ if (!err) {
+ pp->get_serv = get_serv;
+ ((struct seq_file *) file->private_data)->private = pp;
+ } else
+ kfree(pp);
+
return err;
}
EXPORT_SYMBOL(svc_pool_stats_open);
+int svc_pool_stats_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ kfree(seq->private);
+ seq->private = NULL;
+ return seq_release(inode, file);
+}
+EXPORT_SYMBOL(svc_pool_stats_release);
+
/*----------------------------------------------------------------------------*/
--
2.42.0
On Mon, 2023-10-30 at 12:08 +1100, NeilBrown wrote:
> A future patch will remove refcounting on svc_serv as it is of little
> use.
> It is currently used to keep the svc around while the pool_stats file is
> open.
> Change this to get the pointer, protected by the mutex, only in
> seq_start, and the release the mutex in seq_stop.
> This means that if the nfsd server is stopped and restarted while the
> pool_stats file it open, then some pool stats info could be from the
> first instance and some from the second. This might appear odd, but is
> unlikely to be a problem in practice.
>
> Signed-off-by: NeilBrown <[email protected]>
> ---
> fs/nfsd/nfsctl.c | 2 +-
> fs/nfsd/nfssvc.c | 30 ++++++++---------------
> include/linux/sunrpc/svc.h | 5 +++-
> net/sunrpc/svc_xprt.c | 49 ++++++++++++++++++++++++++++++++------
> 4 files changed, 57 insertions(+), 29 deletions(-)
>
> diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> index 79efb1075f38..d78ae4452946 100644
> --- a/fs/nfsd/nfsctl.c
> +++ b/fs/nfsd/nfsctl.c
> @@ -179,7 +179,7 @@ static const struct file_operations pool_stats_operations = {
> .open = nfsd_pool_stats_open,
> .read = seq_read,
> .llseek = seq_lseek,
> - .release = nfsd_pool_stats_release,
> + .release = svc_pool_stats_release,
> };
>
> DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
> diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> index 6c968c02cc29..203e1cfc1cad 100644
> --- a/fs/nfsd/nfssvc.c
> +++ b/fs/nfsd/nfssvc.c
> @@ -1072,30 +1072,20 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
> return true;
> }
>
> -int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> +static struct svc_serv *nfsd_get_serv(struct seq_file *s, bool start)
> {
> - int ret;
> - struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> -
> - mutex_lock(&nfsd_mutex);
> - if (nn->nfsd_serv == NULL) {
> + struct nfsd_net *nn = net_generic(file_inode(s->file)->i_sb->s_fs_info,
> + nfsd_net_id);
> + if (start) {
> + mutex_lock(&nfsd_mutex);
> + return nn->nfsd_serv;
> + } else {
> mutex_unlock(&nfsd_mutex);
> - return -ENODEV;
> + return NULL;
> }
> - svc_get(nn->nfsd_serv);
> - ret = svc_pool_stats_open(nn->nfsd_serv, file);
> - mutex_unlock(&nfsd_mutex);
> - return ret;
> }
>
> -int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> +int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> {
> - struct seq_file *seq = file->private_data;
> - struct svc_serv *serv = seq->private;
> - int ret = seq_release(inode, file);
> -
> - mutex_lock(&nfsd_mutex);
> - svc_put(serv);
> - mutex_unlock(&nfsd_mutex);
> - return ret;
> + return svc_pool_stats_open(nfsd_get_serv, file);
> }
> diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> index b10f987509cc..11acad6988a2 100644
> --- a/include/linux/sunrpc/svc.h
> +++ b/include/linux/sunrpc/svc.h
> @@ -433,7 +433,10 @@ void svc_exit_thread(struct svc_rqst *);
> struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
> int (*threadfn)(void *data));
> int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
> -int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
> +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> + struct file *file);
> +int svc_pool_stats_release(struct inode *inode,
> + struct file *file);
> void svc_process(struct svc_rqst *rqstp);
> void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
> int svc_register(const struct svc_serv *, struct net *, const int,
> diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> index fee83d1024bc..2f99f7475b7b 100644
> --- a/net/sunrpc/svc_xprt.c
> +++ b/net/sunrpc/svc_xprt.c
> @@ -1366,26 +1366,38 @@ EXPORT_SYMBOL_GPL(svc_xprt_names);
>
> /*----------------------------------------------------------------------------*/
>
> +struct pool_private {
> + struct svc_serv *(*get_serv)(struct seq_file *, bool);
This bool is pretty ugly. I think I'd rather see two operations here
(get_serv/put_serv). Also, this could use a kerneldoc comment.
> + struct svc_serv *serv;
> +};
> +
> static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
> {
> unsigned int pidx = (unsigned int)*pos;
> - struct svc_serv *serv = m->private;
> + struct pool_private *pp = m->private;
>
> dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
>
> + pp->serv = pp->get_serv(m, true);
> +
> if (!pidx)
> return SEQ_START_TOKEN;
> - return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
> + if (!pp->serv)
> + return NULL;
> + return (pidx > pp->serv->sv_nrpools ? NULL : &pp->serv->sv_pools[pidx-1]);
> }
>
> static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
> {
> struct svc_pool *pool = p;
> - struct svc_serv *serv = m->private;
> + struct pool_private *pp = m->private;
> + struct svc_serv *serv = pp->serv;
>
> dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
>
> - if (p == SEQ_START_TOKEN) {
> + if (!serv) {
> + pool = NULL;
> + } else if (p == SEQ_START_TOKEN) {
> pool = &serv->sv_pools[0];
> } else {
> unsigned int pidx = (pool - &serv->sv_pools[0]);
> @@ -1400,6 +1412,9 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
>
> static void svc_pool_stats_stop(struct seq_file *m, void *p)
> {
> + struct pool_private *pp = m->private;
> +
> + pp->get_serv(m, false);
> }
>
> static int svc_pool_stats_show(struct seq_file *m, void *p)
> @@ -1427,15 +1442,35 @@ static const struct seq_operations svc_pool_stats_seq_ops = {
> .show = svc_pool_stats_show,
> };
>
> -int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
> +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> + struct file *file)
> {
> + struct pool_private *pp;
> int err;
>
> + pp = kmalloc(sizeof(*pp), GFP_KERNEL);
> + if (!pp)
> + return -ENOMEM;
> +
> err = seq_open(file, &svc_pool_stats_seq_ops);
> - if (!err)
> - ((struct seq_file *) file->private_data)->private = serv;
> + if (!err) {
> + pp->get_serv = get_serv;
> + ((struct seq_file *) file->private_data)->private = pp;
> + } else
> + kfree(pp);
> +
> return err;
> }
> EXPORT_SYMBOL(svc_pool_stats_open);
>
> +int svc_pool_stats_release(struct inode *inode, struct file *file)
> +{
> + struct seq_file *seq = file->private_data;
> +
> + kfree(seq->private);
> + seq->private = NULL;
> + return seq_release(inode, file);
> +}
> +EXPORT_SYMBOL(svc_pool_stats_release);
> +
> /*----------------------------------------------------------------------------*/
--
Jeff Layton <[email protected]>
On Tue, 31 Oct 2023, Jeff Layton wrote:
> On Mon, 2023-10-30 at 12:08 +1100, NeilBrown wrote:
> > A future patch will remove refcounting on svc_serv as it is of little
> > use.
> > It is currently used to keep the svc around while the pool_stats file is
> > open.
> > Change this to get the pointer, protected by the mutex, only in
> > seq_start, and the release the mutex in seq_stop.
> > This means that if the nfsd server is stopped and restarted while the
> > pool_stats file it open, then some pool stats info could be from the
> > first instance and some from the second. This might appear odd, but is
> > unlikely to be a problem in practice.
> >
> > Signed-off-by: NeilBrown <[email protected]>
> > ---
> > fs/nfsd/nfsctl.c | 2 +-
> > fs/nfsd/nfssvc.c | 30 ++++++++---------------
> > include/linux/sunrpc/svc.h | 5 +++-
> > net/sunrpc/svc_xprt.c | 49 ++++++++++++++++++++++++++++++++------
> > 4 files changed, 57 insertions(+), 29 deletions(-)
> >
> > diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> > index 79efb1075f38..d78ae4452946 100644
> > --- a/fs/nfsd/nfsctl.c
> > +++ b/fs/nfsd/nfsctl.c
> > @@ -179,7 +179,7 @@ static const struct file_operations pool_stats_operations = {
> > .open = nfsd_pool_stats_open,
> > .read = seq_read,
> > .llseek = seq_lseek,
> > - .release = nfsd_pool_stats_release,
> > + .release = svc_pool_stats_release,
> > };
> >
> > DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
> > diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> > index 6c968c02cc29..203e1cfc1cad 100644
> > --- a/fs/nfsd/nfssvc.c
> > +++ b/fs/nfsd/nfssvc.c
> > @@ -1072,30 +1072,20 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
> > return true;
> > }
> >
> > -int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> > +static struct svc_serv *nfsd_get_serv(struct seq_file *s, bool start)
> > {
> > - int ret;
> > - struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > -
> > - mutex_lock(&nfsd_mutex);
> > - if (nn->nfsd_serv == NULL) {
> > + struct nfsd_net *nn = net_generic(file_inode(s->file)->i_sb->s_fs_info,
> > + nfsd_net_id);
> > + if (start) {
> > + mutex_lock(&nfsd_mutex);
> > + return nn->nfsd_serv;
> > + } else {
> > mutex_unlock(&nfsd_mutex);
> > - return -ENODEV;
> > + return NULL;
> > }
> > - svc_get(nn->nfsd_serv);
> > - ret = svc_pool_stats_open(nn->nfsd_serv, file);
> > - mutex_unlock(&nfsd_mutex);
> > - return ret;
> > }
> >
> > -int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> > +int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> > {
> > - struct seq_file *seq = file->private_data;
> > - struct svc_serv *serv = seq->private;
> > - int ret = seq_release(inode, file);
> > -
> > - mutex_lock(&nfsd_mutex);
> > - svc_put(serv);
> > - mutex_unlock(&nfsd_mutex);
> > - return ret;
> > + return svc_pool_stats_open(nfsd_get_serv, file);
> > }
> > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> > index b10f987509cc..11acad6988a2 100644
> > --- a/include/linux/sunrpc/svc.h
> > +++ b/include/linux/sunrpc/svc.h
> > @@ -433,7 +433,10 @@ void svc_exit_thread(struct svc_rqst *);
> > struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
> > int (*threadfn)(void *data));
> > int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
> > -int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
> > +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> > + struct file *file);
> > +int svc_pool_stats_release(struct inode *inode,
> > + struct file *file);
> > void svc_process(struct svc_rqst *rqstp);
> > void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
> > int svc_register(const struct svc_serv *, struct net *, const int,
> > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> > index fee83d1024bc..2f99f7475b7b 100644
> > --- a/net/sunrpc/svc_xprt.c
> > +++ b/net/sunrpc/svc_xprt.c
> > @@ -1366,26 +1366,38 @@ EXPORT_SYMBOL_GPL(svc_xprt_names);
> >
> > /*----------------------------------------------------------------------------*/
> >
> > +struct pool_private {
> > + struct svc_serv *(*get_serv)(struct seq_file *, bool);
>
> This bool is pretty ugly. I think I'd rather see two operations here
> (get_serv/put_serv). Also, this could use a kerneldoc comment.
I agree that bool is ugly, but two function pointers as function args
seemed ugly, and stashing them in 'struct svc_serv' seemed ugly.
So I picked one. I'd be keen to find an approach that didn't require a
function pointer.
Maybe sunrpc could declare
struct svc_ref {
struct mutex mutex;
struct svc_serv *serv;
}
and nfsd could use one of those instead of nfsd_mutex and nfsd_serv, and
pass a pointer to it to the open function.
But then the mutex would have to be in the per-net structure. And maybe
that isn't a bad idea, but it is a change...
I guess I could pass pointers to nfsd_mutex and nn->nfsd_serv to the
open function....
Any other ideas?
Thanks,
NeilBrown
>
> > + struct svc_serv *serv;
> > +};
> > +
> > static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
> > {
> > unsigned int pidx = (unsigned int)*pos;
> > - struct svc_serv *serv = m->private;
> > + struct pool_private *pp = m->private;
> >
> > dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
> >
> > + pp->serv = pp->get_serv(m, true);
> > +
> > if (!pidx)
> > return SEQ_START_TOKEN;
> > - return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
> > + if (!pp->serv)
> > + return NULL;
> > + return (pidx > pp->serv->sv_nrpools ? NULL : &pp->serv->sv_pools[pidx-1]);
> > }
> >
> > static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
> > {
> > struct svc_pool *pool = p;
> > - struct svc_serv *serv = m->private;
> > + struct pool_private *pp = m->private;
> > + struct svc_serv *serv = pp->serv;
> >
> > dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
> >
> > - if (p == SEQ_START_TOKEN) {
> > + if (!serv) {
> > + pool = NULL;
> > + } else if (p == SEQ_START_TOKEN) {
> > pool = &serv->sv_pools[0];
> > } else {
> > unsigned int pidx = (pool - &serv->sv_pools[0]);
> > @@ -1400,6 +1412,9 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
> >
> > static void svc_pool_stats_stop(struct seq_file *m, void *p)
> > {
> > + struct pool_private *pp = m->private;
> > +
> > + pp->get_serv(m, false);
> > }
> >
> > static int svc_pool_stats_show(struct seq_file *m, void *p)
> > @@ -1427,15 +1442,35 @@ static const struct seq_operations svc_pool_stats_seq_ops = {
> > .show = svc_pool_stats_show,
> > };
> >
> > -int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
> > +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> > + struct file *file)
> > {
> > + struct pool_private *pp;
> > int err;
> >
> > + pp = kmalloc(sizeof(*pp), GFP_KERNEL);
> > + if (!pp)
> > + return -ENOMEM;
> > +
> > err = seq_open(file, &svc_pool_stats_seq_ops);
> > - if (!err)
> > - ((struct seq_file *) file->private_data)->private = serv;
> > + if (!err) {
> > + pp->get_serv = get_serv;
> > + ((struct seq_file *) file->private_data)->private = pp;
> > + } else
> > + kfree(pp);
> > +
> > return err;
> > }
> > EXPORT_SYMBOL(svc_pool_stats_open);
> >
> > +int svc_pool_stats_release(struct inode *inode, struct file *file)
> > +{
> > + struct seq_file *seq = file->private_data;
> > +
> > + kfree(seq->private);
> > + seq->private = NULL;
> > + return seq_release(inode, file);
> > +}
> > +EXPORT_SYMBOL(svc_pool_stats_release);
> > +
> > /*----------------------------------------------------------------------------*/
>
> --
> Jeff Layton <[email protected]>
>
On Tue, 2023-10-31 at 08:48 +1100, NeilBrown wrote:
> On Tue, 31 Oct 2023, Jeff Layton wrote:
> > On Mon, 2023-10-30 at 12:08 +1100, NeilBrown wrote:
> > > A future patch will remove refcounting on svc_serv as it is of little
> > > use.
> > > It is currently used to keep the svc around while the pool_stats file is
> > > open.
> > > Change this to get the pointer, protected by the mutex, only in
> > > seq_start, and the release the mutex in seq_stop.
> > > This means that if the nfsd server is stopped and restarted while the
> > > pool_stats file it open, then some pool stats info could be from the
> > > first instance and some from the second. This might appear odd, but is
> > > unlikely to be a problem in practice.
> > >
> > > Signed-off-by: NeilBrown <[email protected]>
> > > ---
> > > fs/nfsd/nfsctl.c | 2 +-
> > > fs/nfsd/nfssvc.c | 30 ++++++++---------------
> > > include/linux/sunrpc/svc.h | 5 +++-
> > > net/sunrpc/svc_xprt.c | 49 ++++++++++++++++++++++++++++++++------
> > > 4 files changed, 57 insertions(+), 29 deletions(-)
> > >
> > > diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
> > > index 79efb1075f38..d78ae4452946 100644
> > > --- a/fs/nfsd/nfsctl.c
> > > +++ b/fs/nfsd/nfsctl.c
> > > @@ -179,7 +179,7 @@ static const struct file_operations pool_stats_operations = {
> > > .open = nfsd_pool_stats_open,
> > > .read = seq_read,
> > > .llseek = seq_lseek,
> > > - .release = nfsd_pool_stats_release,
> > > + .release = svc_pool_stats_release,
> > > };
> > >
> > > DEFINE_SHOW_ATTRIBUTE(nfsd_reply_cache_stats);
> > > diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
> > > index 6c968c02cc29..203e1cfc1cad 100644
> > > --- a/fs/nfsd/nfssvc.c
> > > +++ b/fs/nfsd/nfssvc.c
> > > @@ -1072,30 +1072,20 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp, struct xdr_stream *xdr)
> > > return true;
> > > }
> > >
> > > -int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> > > +static struct svc_serv *nfsd_get_serv(struct seq_file *s, bool start)
> > > {
> > > - int ret;
> > > - struct nfsd_net *nn = net_generic(inode->i_sb->s_fs_info, nfsd_net_id);
> > > -
> > > - mutex_lock(&nfsd_mutex);
> > > - if (nn->nfsd_serv == NULL) {
> > > + struct nfsd_net *nn = net_generic(file_inode(s->file)->i_sb->s_fs_info,
> > > + nfsd_net_id);
> > > + if (start) {
> > > + mutex_lock(&nfsd_mutex);
> > > + return nn->nfsd_serv;
> > > + } else {
> > > mutex_unlock(&nfsd_mutex);
> > > - return -ENODEV;
> > > + return NULL;
> > > }
> > > - svc_get(nn->nfsd_serv);
> > > - ret = svc_pool_stats_open(nn->nfsd_serv, file);
> > > - mutex_unlock(&nfsd_mutex);
> > > - return ret;
> > > }
> > >
> > > -int nfsd_pool_stats_release(struct inode *inode, struct file *file)
> > > +int nfsd_pool_stats_open(struct inode *inode, struct file *file)
> > > {
> > > - struct seq_file *seq = file->private_data;
> > > - struct svc_serv *serv = seq->private;
> > > - int ret = seq_release(inode, file);
> > > -
> > > - mutex_lock(&nfsd_mutex);
> > > - svc_put(serv);
> > > - mutex_unlock(&nfsd_mutex);
> > > - return ret;
> > > + return svc_pool_stats_open(nfsd_get_serv, file);
> > > }
> > > diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> > > index b10f987509cc..11acad6988a2 100644
> > > --- a/include/linux/sunrpc/svc.h
> > > +++ b/include/linux/sunrpc/svc.h
> > > @@ -433,7 +433,10 @@ void svc_exit_thread(struct svc_rqst *);
> > > struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
> > > int (*threadfn)(void *data));
> > > int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
> > > -int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
> > > +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> > > + struct file *file);
> > > +int svc_pool_stats_release(struct inode *inode,
> > > + struct file *file);
> > > void svc_process(struct svc_rqst *rqstp);
> > > void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
> > > int svc_register(const struct svc_serv *, struct net *, const int,
> > > diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
> > > index fee83d1024bc..2f99f7475b7b 100644
> > > --- a/net/sunrpc/svc_xprt.c
> > > +++ b/net/sunrpc/svc_xprt.c
> > > @@ -1366,26 +1366,38 @@ EXPORT_SYMBOL_GPL(svc_xprt_names);
> > >
> > > /*----------------------------------------------------------------------------*/
> > >
> > > +struct pool_private {
> > > + struct svc_serv *(*get_serv)(struct seq_file *, bool);
> >
> > This bool is pretty ugly. I think I'd rather see two operations here
> > (get_serv/put_serv). Also, this could use a kerneldoc comment.
>
> I agree that bool is ugly, but two function pointers as function args
> seemed ugly, and stashing them in 'struct svc_serv' seemed ugly.
> So I picked one. I'd be keen to find an approach that didn't require a
> function pointer.
>
> Maybe sunrpc could declare
>
> struct svc_ref {
> struct mutex mutex;
> struct svc_serv *serv;
> }
>
> and nfsd could use one of those instead of nfsd_mutex and nfsd_serv, and
> pass a pointer to it to the open function.
>
> But then the mutex would have to be in the per-net structure. And maybe
> that isn't a bad idea, but it is a change...
>
> I guess I could pass pointers to nfsd_mutex and nn->nfsd_serv to the
> open function....
>
> Any other ideas?
>
>
I think just passing two function pointers to svc_pool_stats_open, and
storing them both in the serv is the best solution (for now). Like you
said, there are no clean options here. That function only has one caller
though, so at least the nastiness will be confined to that.
Moving the mutex to be per-net does make a lot of sense, but I think
that's a separate project. If you decide to do that and it allows you to
make a simpler interface for handling the get/put_serv pointers, then
the interface can be reworked at that point.
> >
> > > + struct svc_serv *serv;
> > > +};
> > > +
> > > static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos)
> > > {
> > > unsigned int pidx = (unsigned int)*pos;
> > > - struct svc_serv *serv = m->private;
> > > + struct pool_private *pp = m->private;
> > >
> > > dprintk("svc_pool_stats_start, *pidx=%u\n", pidx);
> > >
> > > + pp->serv = pp->get_serv(m, true);
> > > +
> > > if (!pidx)
> > > return SEQ_START_TOKEN;
> > > - return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]);
> > > + if (!pp->serv)
> > > + return NULL;
> > > + return (pidx > pp->serv->sv_nrpools ? NULL : &pp->serv->sv_pools[pidx-1]);
> > > }
> > >
> > > static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
> > > {
> > > struct svc_pool *pool = p;
> > > - struct svc_serv *serv = m->private;
> > > + struct pool_private *pp = m->private;
> > > + struct svc_serv *serv = pp->serv;
> > >
> > > dprintk("svc_pool_stats_next, *pos=%llu\n", *pos);
> > >
> > > - if (p == SEQ_START_TOKEN) {
> > > + if (!serv) {
> > > + pool = NULL;
> > > + } else if (p == SEQ_START_TOKEN) {
> > > pool = &serv->sv_pools[0];
> > > } else {
> > > unsigned int pidx = (pool - &serv->sv_pools[0]);
> > > @@ -1400,6 +1412,9 @@ static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos)
> > >
> > > static void svc_pool_stats_stop(struct seq_file *m, void *p)
> > > {
> > > + struct pool_private *pp = m->private;
> > > +
> > > + pp->get_serv(m, false);
> > > }
> > >
> > > static int svc_pool_stats_show(struct seq_file *m, void *p)
> > > @@ -1427,15 +1442,35 @@ static const struct seq_operations svc_pool_stats_seq_ops = {
> > > .show = svc_pool_stats_show,
> > > };
> > >
> > > -int svc_pool_stats_open(struct svc_serv *serv, struct file *file)
> > > +int svc_pool_stats_open(struct svc_serv *(*get_serv)(struct seq_file *, bool),
> > > + struct file *file)
> > > {
> > > + struct pool_private *pp;
> > > int err;
> > >
> > > + pp = kmalloc(sizeof(*pp), GFP_KERNEL);
> > > + if (!pp)
> > > + return -ENOMEM;
> > > +
> > > err = seq_open(file, &svc_pool_stats_seq_ops);
> > > - if (!err)
> > > - ((struct seq_file *) file->private_data)->private = serv;
> > > + if (!err) {
> > > + pp->get_serv = get_serv;
> > > + ((struct seq_file *) file->private_data)->private = pp;
> > > + } else
> > > + kfree(pp);
> > > +
> > > return err;
> > > }
> > > EXPORT_SYMBOL(svc_pool_stats_open);
> > >
> > > +int svc_pool_stats_release(struct inode *inode, struct file *file)
> > > +{
> > > + struct seq_file *seq = file->private_data;
> > > +
> > > + kfree(seq->private);
> > > + seq->private = NULL;
> > > + return seq_release(inode, file);
> > > +}
> > > +EXPORT_SYMBOL(svc_pool_stats_release);
> > > +
> > > /*----------------------------------------------------------------------------*/
> >
> > --
> > Jeff Layton <[email protected]>
> >
>
--
Jeff Layton <[email protected]>