2014-10-17 10:21:23

by Jeff Layton

[permalink] [raw]
Subject: [PATCH] nfsd: convert nfs4_file searches to use RCU

The global state_lock protects the file_hashtbl, and that has the
potential to be a scalability bottleneck.

Address this by making the file_hashtbl use RCU. Add a rcu_head to the
nfs4_file and use that when freeing ones that have been hashed.

Convert find_file to use a lockless lookup. Convert find_or_add_file to
attempt a lockless lookup first, and then fall back to doing the
"normal" locked search and insert if that fails to find anything.

Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4state.c | 36 +++++++++++++++++++++++++++---------
fs/nfsd/state.h | 1 +
2 files changed, 28 insertions(+), 9 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index e9c3afe4b5d3..9bd3bcfee3c2 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -280,15 +280,22 @@ static void nfsd4_free_file(struct nfs4_file *f)
kmem_cache_free(file_slab, f);
}

+static void nfsd4_free_file_rcu(struct rcu_head *rcu)
+{
+ struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
+
+ nfsd4_free_file(fp);
+}
+
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
might_lock(&state_lock);

if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
- hlist_del(&fi->fi_hash);
+ hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
- nfsd4_free_file(fi);
+ call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}

@@ -3073,7 +3080,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
- hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
+ hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}

void
@@ -3313,12 +3320,19 @@ find_file_locked(struct knfsd_fh *fh)
static struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
- struct nfs4_file *fp;
+ struct nfs4_file *fp, *ret = NULL;
+ unsigned int hashval = file_hashval(fh);

- spin_lock(&state_lock);
- fp = find_file_locked(fh);
- spin_unlock(&state_lock);
- return fp;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
+ if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
+ if (atomic_inc_not_zero(&fp->fi_ref))
+ ret = fp;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
}

static struct nfs4_file *
@@ -3326,9 +3340,13 @@ find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
{
struct nfs4_file *fp;

+ fp = find_file(fh);
+ if (fp)
+ return fp;
+
spin_lock(&state_lock);
fp = find_file_locked(fh);
- if (fp == NULL) {
+ if (likely(fp == NULL)) {
nfsd4_init_file(new, fh);
fp = new;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 8e85e07efce6..530470a35ecd 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -490,6 +490,7 @@ struct nfs4_file {
atomic_t fi_access[2];
u32 fi_share_deny;
struct file *fi_deleg_file;
+ struct rcu_head fi_rcu;
atomic_t fi_delegees;
struct knfsd_fh fi_fhandle;
bool fi_had_conflict;
--
1.9.3



2014-10-28 08:35:24

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH v2] nfsd: convert nfs4_file searches to use RCU

Looks good,

Reviewed-by: Christoph Hellwig <[email protected]>

2014-10-21 10:40:16

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] nfsd: convert nfs4_file searches to use RCU

On Fri, Oct 17, 2014 at 06:21:15AM -0400, Jeff Layton wrote:
> The global state_lock protects the file_hashtbl, and that has the
> potential to be a scalability bottleneck.
>
> Address this by making the file_hashtbl use RCU. Add a rcu_head to the
> nfs4_file and use that when freeing ones that have been hashed.
>
> Convert find_file to use a lockless lookup. Convert find_or_add_file to
> attempt a lockless lookup first, and then fall back to doing the
> "normal" locked search and insert if that fails to find anything.
>
> Signed-off-by: Jeff Layton <[email protected]>
> ---
> fs/nfsd/nfs4state.c | 36 +++++++++++++++++++++++++++---------
> fs/nfsd/state.h | 1 +
> 2 files changed, 28 insertions(+), 9 deletions(-)
>
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index e9c3afe4b5d3..9bd3bcfee3c2 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -280,15 +280,22 @@ static void nfsd4_free_file(struct nfs4_file *f)
> kmem_cache_free(file_slab, f);
> }
>
> +static void nfsd4_free_file_rcu(struct rcu_head *rcu)
> +{
> + struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
> +
> + nfsd4_free_file(fp);

You might as well kill the pointless nfsd4_free_file wrapper while
you're at it.

> @@ -3313,12 +3320,19 @@ find_file_locked(struct knfsd_fh *fh)
> static struct nfs4_file *
> find_file(struct knfsd_fh *fh)
> {
> - struct nfs4_file *fp;
> + struct nfs4_file *fp, *ret = NULL;
> + unsigned int hashval = file_hashval(fh);
>
> - spin_lock(&state_lock);
> - fp = find_file_locked(fh);
> - spin_unlock(&state_lock);
> - return fp;
> + rcu_read_lock();
> + hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
> + if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
> + if (atomic_inc_not_zero(&fp->fi_ref))
> + ret = fp;
> + break;
> + }
> + }
> + rcu_read_unlock();
> + return ret;

I think it would be better to just switch find_file_locked ti use
hlist_for_each_entry_rcu instead of duplicating it.

> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> index 8e85e07efce6..530470a35ecd 100644
> --- a/fs/nfsd/state.h
> +++ b/fs/nfsd/state.h
> @@ -490,6 +490,7 @@ struct nfs4_file {
> atomic_t fi_access[2];
> u32 fi_share_deny;
> struct file *fi_deleg_file;
> + struct rcu_head fi_rcu;

Can we union this over a field that's guaranteed to be unused on
a file that has been unhashed?


Also a slightly related question: Is the small fixes size hash table
still fine for the workloads where the RCU access matters? It seems
like we should aim for a more scalable data structure to look up the
files. It also irks me a bit how this duplicates the inode cache,
which for some filesystems (e.g. XFS) already is very scalable.


2014-10-23 12:01:10

by Jeff Layton

[permalink] [raw]
Subject: [PATCH v2] nfsd: convert nfs4_file searches to use RCU

The global state_lock protects the file_hashtbl, and that has the
potential to be a scalability bottleneck.

Address this by making the file_hashtbl use RCU. Add a rcu_head to the
nfs4_file and use that when freeing ones that have been hashed. In order
to conserve space, we union the fi_rcu field with the fi_delegations
list_head which must be clear by the time the last reference to the file
is dropped.

Convert find_file_locked to use RCU lookup primitives and not to require
that the state_lock be held, and convert find_file to do a lockless
lookup. Convert find_or_add_file to attempt a lockless lookup first, and
then fall back to doing a locked search and insert if that fails to find
anything.

Also, minimize the number of times we need to calculate the hash value
by passing it in as an argument to the search and insert functions, and
optimize the order of arguments in nfsd4_init_file.

Cc: Christoph Hellwig <[email protected]>
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4state.c | 51 +++++++++++++++++++++++++++++----------------------
fs/nfsd/state.h | 5 ++++-
2 files changed, 33 insertions(+), 23 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1afd7d4420bd..1379d86f7b4f 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -275,9 +275,11 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}

-static void nfsd4_free_file(struct nfs4_file *f)
+static void nfsd4_free_file_rcu(struct rcu_head *rcu)
{
- kmem_cache_free(file_slab, f);
+ struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
+
+ kmem_cache_free(file_slab, fp);
}

static inline void
@@ -286,9 +288,10 @@ put_nfs4_file(struct nfs4_file *fi)
might_lock(&state_lock);

if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
- hlist_del(&fi->fi_hash);
+ hlist_del_rcu(&fi->fi_hash);
spin_unlock(&state_lock);
- nfsd4_free_file(fi);
+ WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
+ call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
}
}

@@ -3057,10 +3060,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
}

/* OPEN Share state helper functions */
-static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
+static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
+ struct nfs4_file *fp)
{
- unsigned int hashval = file_hashval(fh);
-
lockdep_assert_held(&state_lock);

atomic_set(&fp->fi_ref, 1);
@@ -3073,7 +3075,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
fp->fi_share_deny = 0;
memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
memset(fp->fi_access, 0, sizeof(fp->fi_access));
- hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
+ hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
}

void
@@ -3294,17 +3296,14 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)

/* search file_hashtbl[] for file */
static struct nfs4_file *
-find_file_locked(struct knfsd_fh *fh)
+find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
{
- unsigned int hashval = file_hashval(fh);
struct nfs4_file *fp;

- lockdep_assert_held(&state_lock);
-
- hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
+ hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
- get_nfs4_file(fp);
- return fp;
+ if (atomic_inc_not_zero(&fp->fi_ref))
+ return fp;
}
}
return NULL;
@@ -3314,10 +3313,11 @@ static struct nfs4_file *
find_file(struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);

- spin_lock(&state_lock);
- fp = find_file_locked(fh);
- spin_unlock(&state_lock);
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
return fp;
}

@@ -3325,11 +3325,18 @@ static struct nfs4_file *
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
{
struct nfs4_file *fp;
+ unsigned int hashval = file_hashval(fh);
+
+ rcu_read_lock();
+ fp = find_file_locked(fh, hashval);
+ rcu_read_unlock();
+ if (fp)
+ return fp;

spin_lock(&state_lock);
- fp = find_file_locked(fh);
- if (fp == NULL) {
- nfsd4_init_file(new, fh);
+ fp = find_file_locked(fh, hashval);
+ if (likely(fp == NULL)) {
+ nfsd4_init_file(fh, hashval, new);
fp = new;
}
spin_unlock(&state_lock);
@@ -4127,7 +4134,7 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
nfs4_put_stateowner(so);
}
if (open->op_file)
- nfsd4_free_file(open->op_file);
+ kmem_cache_free(file_slab, open->op_file);
if (open->op_stp)
nfs4_put_stid(&open->op_stp->st_stid);
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 8e85e07efce6..9d3be371240a 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -477,7 +477,10 @@ struct nfs4_file {
spinlock_t fi_lock;
struct hlist_node fi_hash; /* hash on fi_fhandle */
struct list_head fi_stateids;
- struct list_head fi_delegations;
+ union {
+ struct list_head fi_delegations;
+ struct rcu_head fi_rcu;
+ };
/* One each for O_RDONLY, O_WRONLY, O_RDWR: */
struct file * fi_fds[3];
/*
--
1.9.3


2014-10-21 11:52:59

by Christoph Hellwig

[permalink] [raw]
Subject: Re: [PATCH] nfsd: convert nfs4_file searches to use RCU

On Tue, Oct 21, 2014 at 07:16:06AM -0400, Jeff Layton wrote:
> Yeah, that's probably fine. Suggestions on what to union it with?
>
> struct callback_head is two pointers, so maybe we can use one of the
> list_heads (fi_delegations maybe?).

Sounds reasonable to me.

> If we do want to change to a different type of structure I'd be fine
> with that, but would prefer that it be RCU-friendly. What sort of
> structure did you have in mind?

For the XFS inode lookup we use multiple radix trees, which work
very well for how inode numbers work in XFS with a clear allocation
group component, and usual clustering of inode numbers. A simple
radix tree might work fine for NFSd, or maybe the new resizable hash
tables from the networking folks?

2014-10-21 11:16:14

by Jeff Layton

[permalink] [raw]
Subject: Re: [PATCH] nfsd: convert nfs4_file searches to use RCU

On Tue, 21 Oct 2014 03:40:13 -0700
Christoph Hellwig <[email protected]> wrote:

> On Fri, Oct 17, 2014 at 06:21:15AM -0400, Jeff Layton wrote:
> > The global state_lock protects the file_hashtbl, and that has the
> > potential to be a scalability bottleneck.
> >
> > Address this by making the file_hashtbl use RCU. Add a rcu_head to the
> > nfs4_file and use that when freeing ones that have been hashed.
> >
> > Convert find_file to use a lockless lookup. Convert find_or_add_file to
> > attempt a lockless lookup first, and then fall back to doing the
> > "normal" locked search and insert if that fails to find anything.
> >
> > Signed-off-by: Jeff Layton <[email protected]>
> > ---
> > fs/nfsd/nfs4state.c | 36 +++++++++++++++++++++++++++---------
> > fs/nfsd/state.h | 1 +
> > 2 files changed, 28 insertions(+), 9 deletions(-)
> >
> > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> > index e9c3afe4b5d3..9bd3bcfee3c2 100644
> > --- a/fs/nfsd/nfs4state.c
> > +++ b/fs/nfsd/nfs4state.c
> > @@ -280,15 +280,22 @@ static void nfsd4_free_file(struct nfs4_file *f)
> > kmem_cache_free(file_slab, f);
> > }
> >
> > +static void nfsd4_free_file_rcu(struct rcu_head *rcu)
> > +{
> > + struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
> > +
> > + nfsd4_free_file(fp);
>
> You might as well kill the pointless nfsd4_free_file wrapper while
> you're at it.
>

Hmm, ok. There is one place that still calls it, but we can just make
that use kmem_cache_free.

> > @@ -3313,12 +3320,19 @@ find_file_locked(struct knfsd_fh *fh)
> > static struct nfs4_file *
> > find_file(struct knfsd_fh *fh)
> > {
> > - struct nfs4_file *fp;
> > + struct nfs4_file *fp, *ret = NULL;
> > + unsigned int hashval = file_hashval(fh);
> >
> > - spin_lock(&state_lock);
> > - fp = find_file_locked(fh);
> > - spin_unlock(&state_lock);
> > - return fp;
> > + rcu_read_lock();
> > + hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
> > + if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
> > + if (atomic_inc_not_zero(&fp->fi_ref))
> > + ret = fp;
> > + break;
> > + }
> > + }
> > + rcu_read_unlock();
> > + return ret;
>
> I think it would be better to just switch find_file_locked ti use
> hlist_for_each_entry_rcu instead of duplicating it.
>

I'll have to think about that. We do have to do an atomic_inc_not_zero
if we're doing an unlocked search, but that's not really necessary if
the spinlock is held. I guess it won't hurt in that case, so we should
be able to merge the two functions. I'll respin and do that...

> > diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> > index 8e85e07efce6..530470a35ecd 100644
> > --- a/fs/nfsd/state.h
> > +++ b/fs/nfsd/state.h
> > @@ -490,6 +490,7 @@ struct nfs4_file {
> > atomic_t fi_access[2];
> > u32 fi_share_deny;
> > struct file *fi_deleg_file;
> > + struct rcu_head fi_rcu;
>
> Can we union this over a field that's guaranteed to be unused on
> a file that has been unhashed?
>

Yeah, that's probably fine. Suggestions on what to union it with?

struct callback_head is two pointers, so maybe we can use one of the
list_heads (fi_delegations maybe?).

>
> Also a slightly related question: Is the small fixes size hash table
> still fine for the workloads where the RCU access matters? It seems
> like we should aim for a more scalable data structure to look up the
> files. It also irks me a bit how this duplicates the inode cache,
> which for some filesystems (e.g. XFS) already is very scalable.
>

TBH, I haven't done any real performance measurements on this
hashtable. The main impetus for this patch was to clear the way for
some changes that I'm doing for some pnfsd-related work (I need to be
able to walk a list of nfs4_files w/o holding a spinlock).

I think it makes sense to allow searching for nfs4_files w/o holding
a lock. It's unlikely to hurt performance, and may help it.

If we do want to change to a different type of structure I'd be fine
with that, but would prefer that it be RCU-friendly. What sort of
structure did you have in mind?

Thanks for the review so far!
--
Jeff Layton <[email protected]>

2014-11-07 21:54:47

by J. Bruce Fields

[permalink] [raw]
Subject: Re: [PATCH v2] nfsd: convert nfs4_file searches to use RCU

On Thu, Oct 23, 2014 at 08:01:02AM -0400, Jeff Layton wrote:
> The global state_lock protects the file_hashtbl, and that has the
> potential to be a scalability bottleneck.
>
> Address this by making the file_hashtbl use RCU. Add a rcu_head to the
> nfs4_file and use that when freeing ones that have been hashed. In order
> to conserve space, we union the fi_rcu field with the fi_delegations
> list_head which must be clear by the time the last reference to the file
> is dropped.
>
> Convert find_file_locked to use RCU lookup primitives and not to require
> that the state_lock be held, and convert find_file to do a lockless
> lookup. Convert find_or_add_file to attempt a lockless lookup first, and
> then fall back to doing a locked search and insert if that fails to find
> anything.
>
> Also, minimize the number of times we need to calculate the hash value
> by passing it in as an argument to the search and insert functions, and
> optimize the order of arguments in nfsd4_init_file.

OK, looks fine, thanks. Applying for 3.19.

In general though I'd like to see more evidence when we're making
changes for performance reasons.

--b.

>
> Cc: Christoph Hellwig <[email protected]>
> Signed-off-by: Jeff Layton <[email protected]>
> ---
> fs/nfsd/nfs4state.c | 51 +++++++++++++++++++++++++++++----------------------
> fs/nfsd/state.h | 5 ++++-
> 2 files changed, 33 insertions(+), 23 deletions(-)
>
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index 1afd7d4420bd..1379d86f7b4f 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -275,9 +275,11 @@ opaque_hashval(const void *ptr, int nbytes)
> return x;
> }
>
> -static void nfsd4_free_file(struct nfs4_file *f)
> +static void nfsd4_free_file_rcu(struct rcu_head *rcu)
> {
> - kmem_cache_free(file_slab, f);
> + struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
> +
> + kmem_cache_free(file_slab, fp);
> }
>
> static inline void
> @@ -286,9 +288,10 @@ put_nfs4_file(struct nfs4_file *fi)
> might_lock(&state_lock);
>
> if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
> - hlist_del(&fi->fi_hash);
> + hlist_del_rcu(&fi->fi_hash);
> spin_unlock(&state_lock);
> - nfsd4_free_file(fi);
> + WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
> + call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
> }
> }
>
> @@ -3057,10 +3060,9 @@ static struct nfs4_file *nfsd4_alloc_file(void)
> }
>
> /* OPEN Share state helper functions */
> -static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
> +static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
> + struct nfs4_file *fp)
> {
> - unsigned int hashval = file_hashval(fh);
> -
> lockdep_assert_held(&state_lock);
>
> atomic_set(&fp->fi_ref, 1);
> @@ -3073,7 +3075,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
> fp->fi_share_deny = 0;
> memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
> memset(fp->fi_access, 0, sizeof(fp->fi_access));
> - hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
> + hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
> }
>
> void
> @@ -3294,17 +3296,14 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
>
> /* search file_hashtbl[] for file */
> static struct nfs4_file *
> -find_file_locked(struct knfsd_fh *fh)
> +find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
> {
> - unsigned int hashval = file_hashval(fh);
> struct nfs4_file *fp;
>
> - lockdep_assert_held(&state_lock);
> -
> - hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
> + hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
> if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
> - get_nfs4_file(fp);
> - return fp;
> + if (atomic_inc_not_zero(&fp->fi_ref))
> + return fp;
> }
> }
> return NULL;
> @@ -3314,10 +3313,11 @@ static struct nfs4_file *
> find_file(struct knfsd_fh *fh)
> {
> struct nfs4_file *fp;
> + unsigned int hashval = file_hashval(fh);
>
> - spin_lock(&state_lock);
> - fp = find_file_locked(fh);
> - spin_unlock(&state_lock);
> + rcu_read_lock();
> + fp = find_file_locked(fh, hashval);
> + rcu_read_unlock();
> return fp;
> }
>
> @@ -3325,11 +3325,18 @@ static struct nfs4_file *
> find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
> {
> struct nfs4_file *fp;
> + unsigned int hashval = file_hashval(fh);
> +
> + rcu_read_lock();
> + fp = find_file_locked(fh, hashval);
> + rcu_read_unlock();
> + if (fp)
> + return fp;
>
> spin_lock(&state_lock);
> - fp = find_file_locked(fh);
> - if (fp == NULL) {
> - nfsd4_init_file(new, fh);
> + fp = find_file_locked(fh, hashval);
> + if (likely(fp == NULL)) {
> + nfsd4_init_file(fh, hashval, new);
> fp = new;
> }
> spin_unlock(&state_lock);
> @@ -4127,7 +4134,7 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
> nfs4_put_stateowner(so);
> }
> if (open->op_file)
> - nfsd4_free_file(open->op_file);
> + kmem_cache_free(file_slab, open->op_file);
> if (open->op_stp)
> nfs4_put_stid(&open->op_stp->st_stid);
> }
> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> index 8e85e07efce6..9d3be371240a 100644
> --- a/fs/nfsd/state.h
> +++ b/fs/nfsd/state.h
> @@ -477,7 +477,10 @@ struct nfs4_file {
> spinlock_t fi_lock;
> struct hlist_node fi_hash; /* hash on fi_fhandle */
> struct list_head fi_stateids;
> - struct list_head fi_delegations;
> + union {
> + struct list_head fi_delegations;
> + struct rcu_head fi_rcu;
> + };
> /* One each for O_RDONLY, O_WRONLY, O_RDWR: */
> struct file * fi_fds[3];
> /*
> --
> 1.9.3
>