2022-09-26 17:37:16

by Jeff Layton

[permalink] [raw]
Subject: [PATCH 0/4] nfsd: minor cleanup and sanity check patches for nfsd

Chuck,

While hunting for some bugs, I found some warts that the first two
patches clean up. I also added some sanity checks when freeing
delegations or queueing up a CB_RECALL. Your call on whether you want
to accept those. These should apply cleanly to your for-next branch,
but the last patch may need to be adjusted when Dai fixes up his patch.

Cheers,

Jeff Layton (4):
nfsd: only fill out return pointer on success in nfsd4_lookup_stateid
nfsd: fix comments about spinlock handling with delegations
nfsd: make nfsd4_run_cb a bool return function
nfsd: extra checks when freeing delegation stateids

fs/nfsd/nfs4callback.c | 14 ++++++++++++--
fs/nfsd/nfs4state.c | 26 +++++++++++++++-----------
fs/nfsd/state.h | 2 +-
3 files changed, 28 insertions(+), 14 deletions(-)

--
2.37.3


2022-09-26 17:37:16

by Jeff Layton

[permalink] [raw]
Subject: [PATCH 4/4] nfsd: extra checks when freeing delegation stateids

We've had some reports of problems in the refcounting for delegation
stateids that we've yet to track down. Add some extra checks to ensure
that we've removed the object from various lists before freeing it.

Link: https://bugzilla.redhat.com/show_bug.cgi?id=2127067
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4state.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)

This one may need adjustment after Dai fixes the use of openlockstateid
here.

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 90533f43fea9..490092a10285 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1060,9 +1060,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)

static void nfs4_free_deleg(struct nfs4_stid *stid)
{
- struct nfs4_ol_stateid *stp = openlockstateid(stid);
+ struct nfs4_delegation *dp = delegstateid(stid);

- WARN_ON(!list_empty(&stp->st_stid.sc_cp_list));
+ WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
+ WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
+ WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
+ WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
kmem_cache_free(deleg_slab, stid);
atomic_long_dec(&num_delegations);
}
--
2.37.3

2022-09-26 17:37:16

by Jeff Layton

[permalink] [raw]
Subject: [PATCH 2/4] nfsd: fix comments about spinlock handling with delegations

Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4state.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 273ed890562c..211f1af1cfb3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4857,14 +4857,14 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
* callback (and since the lease code is serialized by the
- * i_lock) we know the server hasn't removed the lease yet, and
+ * flc_lock) we know the server hasn't removed the lease yet, and
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
nfsd4_run_cb(&dp->dl_recall);
}

-/* Called from break_lease() with i_lock held. */
+/* Called from break_lease() with flc_lock held. */
static bool
nfsd_break_deleg_cb(struct file_lock *fl)
{
--
2.37.3

2022-09-26 17:37:21

by Jeff Layton

[permalink] [raw]
Subject: [PATCH 3/4] nfsd: make nfsd4_run_cb a bool return function

...and mirror the semantics of queue_work. Also, queueing a delegation
recall should always succeed when queueing, so WARN if one ever doesn't.

Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4callback.c | 14 ++++++++++++--
fs/nfsd/nfs4state.c | 5 ++---
fs/nfsd/state.h | 2 +-
3 files changed, 15 insertions(+), 6 deletions(-)

diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 4ce328209f61..ba904614ebf5 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_holds_slot = false;
}

-void nfsd4_run_cb(struct nfsd4_callback *cb)
+/**
+ * nfsd4_run_cb - queue up a callback job to run
+ * @cb: callback to queue
+ *
+ * Kick off a callback to do its thing. Returns false if it was already
+ * queued or running, true otherwise.
+ */
+bool nfsd4_run_cb(struct nfsd4_callback *cb)
{
+ bool queued;
struct nfs4_client *clp = cb->cb_clp;

nfsd41_cb_inflight_begin(clp);
- if (!nfsd4_queue_cb(cb))
+ queued = nfsd4_queue_cb(cb);
+ if (!queued)
nfsd41_cb_inflight_end(clp);
+ return queued;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 211f1af1cfb3..90533f43fea9 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
- nfsd4_run_cb(&dp->dl_recall);
+ WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
}

/* Called from break_lease() with flc_lock held. */
static bool
nfsd_break_deleg_cb(struct file_lock *fl)
{
- bool ret = false;
struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
struct nfs4_client *clp = dp->dl_stid.sc_client;
@@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
fp->fi_had_conflict = true;
nfsd_break_one_deleg(dp);
spin_unlock(&fp->fi_lock);
- return ret;
+ return false;
}

/**
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index b3477087a9fc..e2daef3cc003 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
-extern void nfsd4_run_cb(struct nfsd4_callback *cb);
+extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
extern int nfsd4_create_callback_queue(void);
extern void nfsd4_destroy_callback_queue(void);
extern void nfsd4_shutdown_callback(struct nfs4_client *);
--
2.37.3

2022-09-26 17:38:30

by Jeff Layton

[permalink] [raw]
Subject: [PATCH 1/4] nfsd: only fill out return pointer on success in nfsd4_lookup_stateid

In the case of a revoked delegation, we still fill out the pointer even
when returning an error, which is bad form. Only overwrite the pointer
on success.

Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfsd/nfs4state.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)

diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d5a8f3d7cbe8..273ed890562c 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -6268,6 +6268,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
struct nfs4_stid **s, struct nfsd_net *nn)
{
__be32 status;
+ struct nfs4_stid *stid;
bool return_revoked = false;

/*
@@ -6290,15 +6291,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
}
if (status)
return status;
- *s = find_stateid_by_type(cstate->clp, stateid, typemask);
- if (!*s)
+ stid = find_stateid_by_type(cstate->clp, stateid, typemask);
+ if (!stid)
return nfserr_bad_stateid;
- if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
- nfs4_put_stid(*s);
+ if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
+ nfs4_put_stid(stid);
if (cstate->minorversion)
return nfserr_deleg_revoked;
return nfserr_bad_stateid;
}
+ *s = stid;
return nfs_ok;
}

--
2.37.3

2022-09-26 18:11:00

by Chuck Lever

[permalink] [raw]
Subject: Re: [PATCH 3/4] nfsd: make nfsd4_run_cb a bool return function



> On Sep 26, 2022, at 12:38 PM, Jeff Layton <[email protected]> wrote:
>
> ...and mirror the semantics of queue_work. Also, queueing a delegation
> recall should always succeed when queueing, so WARN if one ever doesn't.

The description is not especially clear. It seems like the point
of this patch is really the part after the "Also, ..." Otherwise,
I'm not getting why this change is really needed?

Maybe a tracepoint would be less alarming to users/administrators
than would a WARN with a full stack trace? The kdoc comment you
added (thank you!) suggests there are times when it is OK for the
nfsd4_queue_cb() to fail.


> Signed-off-by: Jeff Layton <[email protected]>
> ---
> fs/nfsd/nfs4callback.c | 14 ++++++++++++--
> fs/nfsd/nfs4state.c | 5 ++---
> fs/nfsd/state.h | 2 +-
> 3 files changed, 15 insertions(+), 6 deletions(-)
>
> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> index 4ce328209f61..ba904614ebf5 100644
> --- a/fs/nfsd/nfs4callback.c
> +++ b/fs/nfsd/nfs4callback.c
> @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> cb->cb_holds_slot = false;
> }
>
> -void nfsd4_run_cb(struct nfsd4_callback *cb)
> +/**
> + * nfsd4_run_cb - queue up a callback job to run
> + * @cb: callback to queue
> + *
> + * Kick off a callback to do its thing. Returns false if it was already
> + * queued or running, true otherwise.
> + */
> +bool nfsd4_run_cb(struct nfsd4_callback *cb)
> {
> + bool queued;
> struct nfs4_client *clp = cb->cb_clp;

Reverse christmas tree, please.


>
> nfsd41_cb_inflight_begin(clp);
> - if (!nfsd4_queue_cb(cb))
> + queued = nfsd4_queue_cb(cb);
> + if (!queued)
> nfsd41_cb_inflight_end(clp);
> + return queued;
> }
> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> index 211f1af1cfb3..90533f43fea9 100644
> --- a/fs/nfsd/nfs4state.c
> +++ b/fs/nfsd/nfs4state.c
> @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
> * we know it's safe to take a reference.
> */
> refcount_inc(&dp->dl_stid.sc_count);
> - nfsd4_run_cb(&dp->dl_recall);
> + WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
> }
>
> /* Called from break_lease() with flc_lock held. */
> static bool
> nfsd_break_deleg_cb(struct file_lock *fl)
> {
> - bool ret = false;
> struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
> struct nfs4_file *fp = dp->dl_stid.sc_file;
> struct nfs4_client *clp = dp->dl_stid.sc_client;
> @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
> fp->fi_had_conflict = true;
> nfsd_break_one_deleg(dp);
> spin_unlock(&fp->fi_lock);
> - return ret;
> + return false;
> }
>
> /**
> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> index b3477087a9fc..e2daef3cc003 100644
> --- a/fs/nfsd/state.h
> +++ b/fs/nfsd/state.h
> @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
> extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
> extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
> -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
> +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
> extern int nfsd4_create_callback_queue(void);
> extern void nfsd4_destroy_callback_queue(void);
> extern void nfsd4_shutdown_callback(struct nfs4_client *);
> --
> 2.37.3
>

--
Chuck Lever



2022-09-26 18:38:46

by Jeff Layton

[permalink] [raw]
Subject: Re: [PATCH 3/4] nfsd: make nfsd4_run_cb a bool return function

On Mon, 2022-09-26 at 17:51 +0000, Chuck Lever III wrote:
>
> > On Sep 26, 2022, at 12:38 PM, Jeff Layton <[email protected]> wrote:
> >
> > ...and mirror the semantics of queue_work. Also, queueing a delegation
> > recall should always succeed when queueing, so WARN if one ever doesn't.
>
> The description is not especially clear. It seems like the point
> of this patch is really the part after the "Also, ..." Otherwise,
> I'm not getting why this change is really needed?
>
> Maybe a tracepoint would be less alarming to users/administrators
> than would a WARN with a full stack trace? The kdoc comment you
> added (thank you!) suggests there are times when it is OK for the
> nfsd4_queue_cb() to fail.
>
>

Yeah, that's pretty much the case with the "also". There may be some
other cases where we ought to catch this sort of thing too. We'd likely
never see a tracepoint for this. We'd just notice that there was a
refcount leak.

queue_work can return false without queueing anything if the work is
already queued. In this case, we will have taken an extra reference to
the stid that will never get put. I think this should never happen
because of the flc_lock, but it'd be good to catch it if it does.

That said, I don't feel that strongly about the patch, so if you think
it's not worthwhile, I'm fine with holding off on it.

> > Signed-off-by: Jeff Layton <[email protected]>
> > ---
> > fs/nfsd/nfs4callback.c | 14 ++++++++++++--
> > fs/nfsd/nfs4state.c | 5 ++---
> > fs/nfsd/state.h | 2 +-
> > 3 files changed, 15 insertions(+), 6 deletions(-)
> >
> > diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
> > index 4ce328209f61..ba904614ebf5 100644
> > --- a/fs/nfsd/nfs4callback.c
> > +++ b/fs/nfsd/nfs4callback.c
> > @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> > cb->cb_holds_slot = false;
> > }
> >
> > -void nfsd4_run_cb(struct nfsd4_callback *cb)
> > +/**
> > + * nfsd4_run_cb - queue up a callback job to run
> > + * @cb: callback to queue
> > + *
> > + * Kick off a callback to do its thing. Returns false if it was already
> > + * queued or running, true otherwise.
> > + */
> > +bool nfsd4_run_cb(struct nfsd4_callback *cb)
> > {
> > + bool queued;
> > struct nfs4_client *clp = cb->cb_clp;
>
> Reverse christmas tree, please.
>
>
> >
> > nfsd41_cb_inflight_begin(clp);
> > - if (!nfsd4_queue_cb(cb))
> > + queued = nfsd4_queue_cb(cb);
> > + if (!queued)
> > nfsd41_cb_inflight_end(clp);
> > + return queued;
> > }
> > diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
> > index 211f1af1cfb3..90533f43fea9 100644
> > --- a/fs/nfsd/nfs4state.c
> > +++ b/fs/nfsd/nfs4state.c
> > @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
> > * we know it's safe to take a reference.
> > */
> > refcount_inc(&dp->dl_stid.sc_count);
> > - nfsd4_run_cb(&dp->dl_recall);
> > + WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
> > }
> >
> > /* Called from break_lease() with flc_lock held. */
> > static bool
> > nfsd_break_deleg_cb(struct file_lock *fl)
> > {
> > - bool ret = false;
> > struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
> > struct nfs4_file *fp = dp->dl_stid.sc_file;
> > struct nfs4_client *clp = dp->dl_stid.sc_client;
> > @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
> > fp->fi_had_conflict = true;
> > nfsd_break_one_deleg(dp);
> > spin_unlock(&fp->fi_lock);
> > - return ret;
> > + return false;
> > }
> >
> > /**
> > diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
> > index b3477087a9fc..e2daef3cc003 100644
> > --- a/fs/nfsd/state.h
> > +++ b/fs/nfsd/state.h
> > @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
> > extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
> > extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
> > const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
> > -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
> > +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
> > extern int nfsd4_create_callback_queue(void);
> > extern void nfsd4_destroy_callback_queue(void);
> > extern void nfsd4_shutdown_callback(struct nfs4_client *);
> > --
> > 2.37.3
> >
>
> --
> Chuck Lever
>
>
>

--
Jeff Layton <[email protected]>

2022-09-26 18:38:52

by Chuck Lever

[permalink] [raw]
Subject: Re: [PATCH 3/4] nfsd: make nfsd4_run_cb a bool return function



> On Sep 26, 2022, at 2:24 PM, Jeff Layton <[email protected]> wrote:
>
> On Mon, 2022-09-26 at 17:51 +0000, Chuck Lever III wrote:
>>
>>> On Sep 26, 2022, at 12:38 PM, Jeff Layton <[email protected]> wrote:
>>>
>>> ...and mirror the semantics of queue_work. Also, queueing a delegation
>>> recall should always succeed when queueing, so WARN if one ever doesn't.
>>
>> The description is not especially clear. It seems like the point
>> of this patch is really the part after the "Also, ..." Otherwise,
>> I'm not getting why this change is really needed?
>>
>> Maybe a tracepoint would be less alarming to users/administrators
>> than would a WARN with a full stack trace? The kdoc comment you
>> added (thank you!) suggests there are times when it is OK for the
>> nfsd4_queue_cb() to fail.
>>
>>
>
> Yeah, that's pretty much the case with the "also". There may be some
> other cases where we ought to catch this sort of thing too. We'd likely
> never see a tracepoint for this. We'd just notice that there was a
> refcount leak.
>
> queue_work can return false without queueing anything if the work is
> already queued. In this case, we will have taken an extra reference to
> the stid that will never get put. I think this should never happen
> because of the flc_lock, but it'd be good to catch it if it does.

I've pushed Dai's v3 SSC fix and the first two patches in this series
to the nfsd for-next tree. Can you send me 3/4 and 4/4 again but

- Include the above text in the description of 3/4, and

- rebase 4/4 on top of the latest nfsd for-next

Thanks!


> That said, I don't feel that strongly about the patch, so if you think
> it's not worthwhile, I'm fine with holding off on it.
>
>>> Signed-off-by: Jeff Layton <[email protected]>
>>> ---
>>> fs/nfsd/nfs4callback.c | 14 ++++++++++++--
>>> fs/nfsd/nfs4state.c | 5 ++---
>>> fs/nfsd/state.h | 2 +-
>>> 3 files changed, 15 insertions(+), 6 deletions(-)
>>>
>>> diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
>>> index 4ce328209f61..ba904614ebf5 100644
>>> --- a/fs/nfsd/nfs4callback.c
>>> +++ b/fs/nfsd/nfs4callback.c
>>> @@ -1371,11 +1371,21 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
>>> cb->cb_holds_slot = false;
>>> }
>>>
>>> -void nfsd4_run_cb(struct nfsd4_callback *cb)
>>> +/**
>>> + * nfsd4_run_cb - queue up a callback job to run
>>> + * @cb: callback to queue
>>> + *
>>> + * Kick off a callback to do its thing. Returns false if it was already
>>> + * queued or running, true otherwise.
>>> + */
>>> +bool nfsd4_run_cb(struct nfsd4_callback *cb)
>>> {
>>> + bool queued;
>>> struct nfs4_client *clp = cb->cb_clp;
>>
>> Reverse christmas tree, please.
>>
>>
>>>
>>> nfsd41_cb_inflight_begin(clp);
>>> - if (!nfsd4_queue_cb(cb))
>>> + queued = nfsd4_queue_cb(cb);
>>> + if (!queued)
>>> nfsd41_cb_inflight_end(clp);
>>> + return queued;
>>> }
>>> diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
>>> index 211f1af1cfb3..90533f43fea9 100644
>>> --- a/fs/nfsd/nfs4state.c
>>> +++ b/fs/nfsd/nfs4state.c
>>> @@ -4861,14 +4861,13 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
>>> * we know it's safe to take a reference.
>>> */
>>> refcount_inc(&dp->dl_stid.sc_count);
>>> - nfsd4_run_cb(&dp->dl_recall);
>>> + WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
>>> }
>>>
>>> /* Called from break_lease() with flc_lock held. */
>>> static bool
>>> nfsd_break_deleg_cb(struct file_lock *fl)
>>> {
>>> - bool ret = false;
>>> struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
>>> struct nfs4_file *fp = dp->dl_stid.sc_file;
>>> struct nfs4_client *clp = dp->dl_stid.sc_client;
>>> @@ -4894,7 +4893,7 @@ nfsd_break_deleg_cb(struct file_lock *fl)
>>> fp->fi_had_conflict = true;
>>> nfsd_break_one_deleg(dp);
>>> spin_unlock(&fp->fi_lock);
>>> - return ret;
>>> + return false;
>>> }
>>>
>>> /**
>>> diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
>>> index b3477087a9fc..e2daef3cc003 100644
>>> --- a/fs/nfsd/state.h
>>> +++ b/fs/nfsd/state.h
>>> @@ -692,7 +692,7 @@ extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
>>> extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
>>> extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
>>> const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
>>> -extern void nfsd4_run_cb(struct nfsd4_callback *cb);
>>> +extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
>>> extern int nfsd4_create_callback_queue(void);
>>> extern void nfsd4_destroy_callback_queue(void);
>>> extern void nfsd4_shutdown_callback(struct nfs4_client *);
>>> --
>>> 2.37.3
>>>
>>
>> --
>> Chuck Lever
>>
>>
>>
>
> --
> Jeff Layton <[email protected]>

--
Chuck Lever