In nlm_lookup_host() there's an optimization that looks for matching nsm
handles at the same time as looking for hosts, so that if a host
isn't found but a matching nsm handle is, we save a second lookup for
the nsm handle.
The optimization (unlike nsm_find()) ignores nsm_use_hostnames, so may
give incorrect results when that parameter is set. We could probably
find some way to fix this and keep the optimization, but it seems
unlikely to be very significant, so just skip it entirely.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 23 +++++++----------------
1 files changed, 7 insertions(+), 16 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 9fd8889..e9069c2 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -138,7 +138,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host;
- struct nsm_handle *nsm = NULL;
+ struct nsm_handle *nsm;
mutex_lock(&nlm_host_mutex);
@@ -157,10 +157,6 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
continue;
- /* See if we have an NSM handle for this client */
- if (!nsm)
- nsm = host->h_nsmhandle;
-
if (host->h_proto != ni->protocol)
continue;
if (host->h_version != ni->version)
@@ -184,17 +180,12 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
* The host wasn't in our hash table. If we don't
* have an NSM handle for it yet, create one.
*/
- if (nsm)
- atomic_inc(&nsm->sm_count);
- else {
- host = NULL;
- nsm = nsm_find(ni->sap, ni->salen,
- ni->hostname, ni->hostname_len, 1);
- if (!nsm) {
- dprintk("lockd: nlm_lookup_host failed; "
- "no nsm handle\n");
- goto out;
- }
+ host = NULL;
+ nsm = nsm_find(ni->sap, ni->salen, ni->hostname, ni->hostname_len, 1);
+ if (!nsm) {
+ dprintk("lockd: nlm_lookup_host failed; "
+ "no nsm handle\n");
+ goto out;
}
host = kzalloc(sizeof(*host), GFP_KERNEL);
--
1.5.5.rc1
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> In nlm_lookup_host() there's an optimization that looks for matching
> nsm
> handles at the same time as looking for hosts, so that if a host
> isn't found but a matching nsm handle is, we save a second lookup for
> the nsm handle.
>
> The optimization (unlike nsm_find()) ignores nsm_use_hostnames, so may
> give incorrect results when that parameter is set.
I don't think that's a problem here. There's only one nsm_handle per
peer address. "nsm_use_hostnames" determines how to communicate with
the local statd, so it shouldn't make any difference how we pick an
nsm_handle that matches this nlm_host.
> We could probably
> find some way to fix this and keep the optimization, but it seems
> unlikely to be very significant, so just skip it entirely.
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 23 +++++++----------------
> 1 files changed, 7 insertions(+), 16 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index 9fd8889..e9069c2 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -138,7 +138,7 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> struct hlist_head *chain;
> struct hlist_node *pos;
> struct nlm_host *host;
> - struct nsm_handle *nsm = NULL;
> + struct nsm_handle *nsm;
>
> mutex_lock(&nlm_host_mutex);
>
> @@ -157,10 +157,6 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
> continue;
>
> - /* See if we have an NSM handle for this client */
> - if (!nsm)
> - nsm = host->h_nsmhandle;
> -
> if (host->h_proto != ni->protocol)
> continue;
> if (host->h_version != ni->version)
> @@ -184,17 +180,12 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> * The host wasn't in our hash table. If we don't
> * have an NSM handle for it yet, create one.
> */
> - if (nsm)
> - atomic_inc(&nsm->sm_count);
I've always found this slightly confusing. If we grab the nsm handle
above in the loop, shouldn't we bump the refcount there?
Anyway, I like the idea of having only one way to acquire the
nsm_handle for this host, exactly because it keeps our refcount
management more straightforward.
>
> - else {
> - host = NULL;
> - nsm = nsm_find(ni->sap, ni->salen,
> - ni->hostname, ni->hostname_len, 1);
> - if (!nsm) {
> - dprintk("lockd: nlm_lookup_host failed; "
> - "no nsm handle\n");
> - goto out;
> - }
> + host = NULL;
> + nsm = nsm_find(ni->sap, ni->salen, ni->hostname, ni->hostname_len,
> 1);
> + if (!nsm) {
I've tried to stick with (nsm == NULL) instead, since nsm isn't a
boolean, it's a pointer.
>
> + dprintk("lockd: nlm_lookup_host failed; "
> + "no nsm handle\n");
> + goto out;
> }
>
> host = kzalloc(sizeof(*host), GFP_KERNEL);
>
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> Note that the only caller of nsm_unmonitor has no use for its return
> value.
I noticed this too.
It might be cleaner if we passed the nsm_handle by itself as the
argument to nsm_{un}monitor. h_name actually points to the name in
the nsm_handle.
>
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/mon.c | 8 +++-----
> include/linux/lockd/sm_inter.h | 2 +-
> 2 files changed, 4 insertions(+), 6 deletions(-)
>
> diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
> index 4e7e958..480f197 100644
> --- a/fs/lockd/mon.c
> +++ b/fs/lockd/mon.c
> @@ -97,15 +97,14 @@ nsm_monitor(struct nlm_host *host)
> /*
> * Cease to monitor remote host
> */
> -int
> -nsm_unmonitor(struct nlm_host *host)
> +void nsm_unmonitor(struct nlm_host *host)
> {
> struct nsm_handle *nsm = host->h_nsmhandle;
> struct nsm_res res;
> - int status = 0;
> + int status;
>
> if (nsm == NULL)
> - return 0;
> + return;
> host->h_nsmhandle = NULL;
>
> if (atomic_read(&nsm->sm_count) == 1
> @@ -120,7 +119,6 @@ nsm_unmonitor(struct nlm_host *host)
> nsm->sm_monitored = 0;
> }
> nsm_release(nsm);
> - return status;
> }
>
> /*
> diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/
> sm_inter.h
> index 5a5448b..b069598 100644
> --- a/include/linux/lockd/sm_inter.h
> +++ b/include/linux/lockd/sm_inter.h
> @@ -42,7 +42,7 @@ struct nsm_res {
> };
>
> int nsm_monitor(struct nlm_host *);
> -int nsm_unmonitor(struct nlm_host *);
> +void nsm_unmonitor(struct nlm_host *);
> extern int nsm_local_state;
>
> #endif /* LINUX_LOCKD_SM_INTER_H */
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> The sm_sticky logic seems racy;
Can you describe what races you suspect?
> instead just unmonitor before destruction of handle.
>
> XXX: how to test?
Would this allow the nsm_handle cache to grow large on long-running
hosts?
Instead of sm_sticky, maybe we should put a "don't unmonitor" flag in
the nlm_host structure instead.
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 24 ++++++++++++++++++++----
> fs/lockd/mon.c | 15 +--------------
> fs/lockd/svcsubs.c | 6 ------
> include/linux/lockd/lockd.h | 3 +--
> 4 files changed, 22 insertions(+), 26 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index e9069c2..1a1adb9 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -683,6 +683,10 @@ found:
> return nsm;
> }
>
> +
> +/* XXX: */
> +int nsm_mon_unmon(struct nsm_handle *, u32, struct nsm_res *);
> +
> /*
> * Release an NSM handle
> */
> @@ -691,9 +695,21 @@ nsm_release(struct nsm_handle *nsm)
> {
> if (!nsm)
> return;
> - if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
> - list_del(&nsm->sm_link);
> - spin_unlock(&nsm_lock);
> - kfree(nsm);
> + if (!atomic_dec_and_lock(&nsm->sm_count, &nsm_lock))
> + return;
> + list_del(&nsm->sm_link);
> + spin_unlock(&nsm_lock);
> + if (nsm->sm_monitored) {
> + struct nsm_res res;
> + int status;
> +
> + dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
> + status = nsm_mon_unmon(nsm, SM_UNMON, &res);
> + if (status < 0)
> + printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
> + nsm->sm_name);
> + else
> + nsm->sm_monitored = 0;
> }
> + kfree(nsm);
> }
> diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
> index 480f197..8cc0e97 100644
> --- a/fs/lockd/mon.c
> +++ b/fs/lockd/mon.c
> @@ -32,7 +32,7 @@ int nsm_local_state;
> /*
> * Common procedure for SM_MON/SM_UNMON calls
> */
> -static int
> +int
> nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
> {
> struct rpc_clnt *clnt;
>
> @@ -100,24 +100,11 @@ nsm_monitor(struct nlm_host *host)
> void nsm_unmonitor(struct nlm_host *host)
> {
> struct nsm_handle *nsm = host->h_nsmhandle;
> - struct nsm_res res;
> - int status;
>
> if (nsm == NULL)
> return;
> host->h_nsmhandle = NULL;
>
> - if (atomic_read(&nsm->sm_count) == 1
> - && nsm->sm_monitored && !nsm->sm_sticky) {
> - dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
> -
> - status = nsm_mon_unmon(nsm, SM_UNMON, &res);
> - if (status < 0)
> - printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
> - host->h_name);
> - else
> - nsm->sm_monitored = 0;
> - }
> nsm_release(nsm);
> }
>
> diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
> index 9c186f0..0296904 100644
> --- a/fs/lockd/svcsubs.c
> +++ b/fs/lockd/svcsubs.c
> @@ -336,12 +336,6 @@ nlmsvc_is_client(void *data, struct nlm_host
> *dummy)
> struct nlm_host *host = data;
>
> BUG_ON(!host->h_server);
> - /* we are destroying locks even though the client
> - * hasn't asked us too, so don't unmonitor the
> - * client
> - */
> - if (host->h_nsmhandle)
> - host->h_nsmhandle->sm_sticky = 1;
> return 1;
>
> }
> diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
> index b56d5aa..e1c2690 100644
> --- a/include/linux/lockd/lockd.h
> +++ b/include/linux/lockd/lockd.h
> @@ -75,8 +75,7 @@ struct nsm_handle {
> char * sm_name;
> struct sockaddr_storage sm_addr;
> size_t sm_addrlen;
> - unsigned int sm_monitored : 1,
> - sm_sticky : 1; /* don't unmonitor */
> + unsigned int sm_monitored : 1;
> char sm_addrbuf[48]; /* address eyecatcher */
> };
>
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> This will simplify splitting these variables into client and server
> sides.
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 46 ++++++++++++++++++++++++++--------------------
> 1 files changed, 26 insertions(+), 20 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index 1a1adb9..199ca8c 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -26,10 +26,16 @@
> #define NLM_HOST_EXPIRE (300 * HZ)
> #define NLM_HOST_COLLECT (120 * HZ)
>
> -static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
> +struct host_table {
> + struct hlist_head ht_chains[NLM_HOST_NRHASH];
> + int ht_num;
> + struct mutex ht_mutex;
> +};
> +
> +static struct host_table nlm_hosts = {
> + .ht_mutex = __MUTEX_INITIALIZER(nlm_hosts.ht_mutex)
> +};
> static unsigned long next_gc;
I also moved next_gc into the host_table struct in my version of these
patches. Do we want to garbage collect the client and server host
caches separately? I'm not sure.
>
> -static int nrhosts;
> -static DEFINE_MUTEX(nlm_host_mutex);
>
> static void nlm_gc_hosts(void);
> static struct nsm_handle *nsm_find(const struct sockaddr *sap,
> @@ -140,7 +146,7 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> struct nlm_host *host;
> struct nsm_handle *nsm;
>
> - mutex_lock(&nlm_host_mutex);
> + mutex_lock(&nlm_hosts.ht_mutex);
>
> if (time_after_eq(jiffies, next_gc))
> nlm_gc_hosts();
> @@ -152,7 +158,7 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> * different NLM rpc_clients into one single nlm_host object.
> * This would allow us to have one nlm_host per address.
> */
> - chain = &nlm_hosts[nlm_hash_address(ni->sap)];
> + chain = &nlm_hosts.ht_chains[nlm_hash_address(ni->sap)];
> hlist_for_each_entry(host, pos, chain, h_hash) {
> if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
> continue;
> @@ -218,7 +224,7 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> INIT_LIST_HEAD(&host->h_granted);
> INIT_LIST_HEAD(&host->h_reclaim);
>
> - nrhosts++;
> + nlm_hosts.ht_num++;
>
> nlm_display_address((struct sockaddr *)&host->h_addr,
> host->h_addrbuf, sizeof(host->h_addrbuf));
> @@ -229,7 +235,7 @@ static struct nlm_host *nlm_lookup_host(struct
> nlm_lookup_host_info *ni)
> host->h_name);
>
> out:
> - mutex_unlock(&nlm_host_mutex);
> + mutex_unlock(&nlm_hosts.ht_mutex);
> return host;
> }
>
> @@ -498,8 +504,8 @@ void nlm_host_rebooted(const struct sockaddr_in
> *sin,
> * lock for this.
> * To avoid processing a host several times, we match the nsmstate.
> */
> -again: mutex_lock(&nlm_host_mutex);
> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
> +chain) {
> +again: mutex_lock(&nlm_hosts.ht_mutex);
> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
> NLM_HOST_NRHASH; ++chain) {
> hlist_for_each_entry(host, pos, chain, h_hash) {
> if (host->h_nsmhandle == nsm
> && host->h_nsmstate != new_state) {
> @@ -507,7 +513,7 @@ again: mutex_lock(&nlm_host_mutex);
> host->h_state++;
>
> nlm_get_host(host);
> - mutex_unlock(&nlm_host_mutex);
> + mutex_unlock(&nlm_hosts.ht_mutex);
>
> if (host->h_server) {
> /* We're server for this guy, just ditch
> @@ -524,7 +530,7 @@ again: mutex_lock(&nlm_host_mutex);
> }
> }
>
> - mutex_unlock(&nlm_host_mutex);
> + mutex_unlock(&nlm_hosts.ht_mutex);
> }
>
> /*
> @@ -539,11 +545,11 @@ nlm_shutdown_hosts(void)
> struct nlm_host *host;
>
> dprintk("lockd: shutting down host module\n");
> - mutex_lock(&nlm_host_mutex);
> + mutex_lock(&nlm_hosts.ht_mutex);
>
> /* First, make all hosts eligible for gc */
> dprintk("lockd: nuking all hosts...\n");
> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
> +chain) {
> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
> NLM_HOST_NRHASH; ++chain) {
> hlist_for_each_entry(host, pos, chain, h_hash) {
> host->h_expires = jiffies - 1;
> if (host->h_rpcclnt) {
> @@ -555,13 +561,13 @@ nlm_shutdown_hosts(void)
>
> /* Then, perform a garbage collection pass */
> nlm_gc_hosts();
> - mutex_unlock(&nlm_host_mutex);
> + mutex_unlock(&nlm_hosts.ht_mutex);
>
> /* complain if any hosts are left */
> - if (nrhosts) {
> + if (nlm_hosts.ht_num) {
> printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
> - dprintk("lockd: %d hosts left:\n", nrhosts);
> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
> +chain) {
> + dprintk("lockd: %d hosts left:\n", nlm_hosts.ht_num);
> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
> NLM_HOST_NRHASH; ++chain) {
> hlist_for_each_entry(host, pos, chain, h_hash) {
> dprintk(" %s (cnt %d use %d exp %ld)\n",
> host->h_name, atomic_read(&host->h_count),
> @@ -584,7 +590,7 @@ nlm_gc_hosts(void)
> struct nlm_host *host;
>
> dprintk("lockd: host garbage collection\n");
> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
> +chain) {
> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
> NLM_HOST_NRHASH; ++chain) {
> hlist_for_each_entry(host, pos, chain, h_hash)
> host->h_inuse = 0;
> }
> @@ -592,7 +598,7 @@ nlm_gc_hosts(void)
> /* Mark all hosts that hold locks, blocks or shares */
> nlmsvc_mark_resources();
>
> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
> +chain) {
> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
> NLM_HOST_NRHASH; ++chain) {
> hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
> if (atomic_read(&host->h_count) || host->h_inuse
> || time_before(jiffies, host->h_expires)) {
> @@ -605,7 +611,7 @@ nlm_gc_hosts(void)
> hlist_del_init(&host->h_hash);
>
> nlm_destroy_host(host);
> - nrhosts--;
> + nlm_hosts.ht_num--;
> }
> }
>
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> Minor reorganization; no change in behavior. This will save some
> duplicated code after we split apart the client and server host lists.
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 59 ++++++++++++++++++++++++++++++
> +-----------------------
> 1 files changed, 34 insertions(+), 25 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index 1b90b49..bb3bf36 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -482,6 +482,31 @@ void nlm_release_host(struct nlm_host *host)
> }
> }
>
> +static struct nlm_host *next_host_state(struct host_table *table,
> + struct nsm_handle *nsm,
> + u32 new_state)
> +{
> + struct hlist_head *chain;
> + struct hlist_node *pos;
> + struct nlm_host *host = NULL;
> +
> + mutex_lock(&table->ht_mutex);
> + for_each_host(host, pos, chain, table) {
> + if (host->h_nsmhandle == nsm
> + && host->h_nsmstate != new_state) {
> + host->h_nsmstate = new_state;
> + host->h_state++;
> +
> + nlm_get_host(host);
> + mutex_unlock(&table->ht_mutex);
> + goto out;
If you exit here, you end up calling mutex_unlock() twice. Instead of
"goto out;", you could just "return host;"
>
> + }
> + }
> +out:
> + mutex_unlock(&table->ht_mutex);
> + return host;
> +}
>
> +
> /*
> * We were notified that the host indicated by address &sin
> * has rebooted.
> @@ -492,8 +517,6 @@ void nlm_host_rebooted(const struct sockaddr_in
> *sin,
> unsigned int hostname_len,
> u32 new_state)
> {
> - struct hlist_head *chain;
> - struct hlist_node *pos;
> struct nsm_handle *nsm;
> struct nlm_host *host;
>
> @@ -517,31 +540,17 @@ void nlm_host_rebooted(const struct
> sockaddr_in *sin,
> * lock for this.
> * To avoid processing a host several times, we match the nsmstate.
> */
> -again: mutex_lock(&nlm_hosts.ht_mutex);
> - for_each_host(host, pos, chain, &nlm_hosts) {
> - if (host->h_nsmhandle == nsm
> - && host->h_nsmstate != new_state) {
> - host->h_nsmstate = new_state;
> - host->h_state++;
> -
> - nlm_get_host(host);
> - mutex_unlock(&nlm_hosts.ht_mutex);
> -
> - if (host->h_server) {
> - /* We're server for this guy, just ditch
> - * all the locks he held. */
> - nlmsvc_free_host_resources(host);
> - } else {
> - /* He's the server, initiate lock recovery. */
> - nlmclnt_recovery(host);
> - }
> -
> - nlm_release_host(host);
> - goto again;
> + while ((host = next_host_state(&nlm_hosts, nsm, new_state)) !=
> NULL) {
> + if (host->h_server) {
> + /* We're server for this guy, just ditch
> + * all the locks he held. */
> + nlmsvc_free_host_resources(host);
> + } else {
> + /* He's the server, initiate lock recovery. */
> + nlmclnt_recovery(host);
> }
> + nlm_release_host(host);
> }
> -
> - mutex_unlock(&nlm_hosts.ht_mutex);
> }
>
> /*
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
Not that this is a performance-critical path...
But it might be cleaner to create a helper that garbage collects a
single host, and then call that routine from a loop in both
nlm_shutdown_hosts() and nlm_gc_hosts(), instead of using a two pass
shutdown.
On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
> This code will later be shared.
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 27 ++++++++++++++++-----------
> 1 files changed, 16 insertions(+), 11 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index fa006af..588017f 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -593,29 +593,34 @@ static void warn_host_leak(struct host_table
> *table)
> }
> }
>
> -/*
> - * Shut down the hosts module.
> - * Note that this routine is called only at server shutdown time.
> - */
> -void
> -nlm_shutdown_hosts(void)
> +static void expire_hosts(struct host_table *table)
> {
> struct hlist_head *chain;
> struct hlist_node *pos;
> struct nlm_host *host;
>
> - dprintk("lockd: shutting down host module\n");
> - mutex_lock(&nlm_hosts.ht_mutex);
> -
> - /* First, make all hosts eligible for gc */
> dprintk("lockd: nuking all hosts...\n");
> - for_each_host(host, pos, chain, &nlm_hosts) {
> + for_each_host(host, pos, chain, table) {
> host->h_expires = jiffies - 1;
> if (host->h_rpcclnt) {
> rpc_shutdown_client(host->h_rpcclnt);
> host->h_rpcclnt = NULL;
> }
> }
> +}
> +
> +/*
> + * Shut down the hosts module.
> + * Note that this routine is called only at server shutdown time.
> + */
> +void
> +nlm_shutdown_hosts(void)
> +{
> + dprintk("lockd: shutting down host module\n");
> + mutex_lock(&nlm_hosts.ht_mutex);
> +
> + /* First, make all hosts eligible for gc */
> + expire_hosts(&nlm_hosts);
>
> /* Then, perform a garbage collection pass */
> nlm_gc_hosts();
> --
> 1.5.5.rc1
>
--
Chuck Lever
chuck[dot]lever[at]oracle[dot]com
On Wed, Nov 05, 2008 at 05:25:40PM -0500, Chuck Lever wrote:
> On Nov 5, 2008, at 3:06 PM, J. Bruce Fields wrote:
>> This will simplify splitting these variables into client and server
>> sides.
>>
>> Signed-off-by: J. Bruce Fields <[email protected]>
>> ---
>> fs/lockd/host.c | 46 ++++++++++++++++++++++++++--------------------
>> 1 files changed, 26 insertions(+), 20 deletions(-)
>>
>> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
>> index 1a1adb9..199ca8c 100644
>> --- a/fs/lockd/host.c
>> +++ b/fs/lockd/host.c
>> @@ -26,10 +26,16 @@
>> #define NLM_HOST_EXPIRE (300 * HZ)
>> #define NLM_HOST_COLLECT (120 * HZ)
>>
>> -static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
>> +struct host_table {
>> + struct hlist_head ht_chains[NLM_HOST_NRHASH];
>> + int ht_num;
>> + struct mutex ht_mutex;
>> +};
>> +
>> +static struct host_table nlm_hosts = {
>> + .ht_mutex = __MUTEX_INITIALIZER(nlm_hosts.ht_mutex)
>> +};
>> static unsigned long next_gc;
>
> I also moved next_gc into the host_table struct in my version of these
> patches. Do we want to garbage collect the client and server host
> caches separately? I'm not sure.
I always figured we should get rid of the garbage collection entirely.
--b.
>
>>
>> -static int nrhosts;
>> -static DEFINE_MUTEX(nlm_host_mutex);
>>
>> static void nlm_gc_hosts(void);
>> static struct nsm_handle *nsm_find(const struct sockaddr *sap,
>> @@ -140,7 +146,7 @@ static struct nlm_host *nlm_lookup_host(struct
>> nlm_lookup_host_info *ni)
>> struct nlm_host *host;
>> struct nsm_handle *nsm;
>>
>> - mutex_lock(&nlm_host_mutex);
>> + mutex_lock(&nlm_hosts.ht_mutex);
>>
>> if (time_after_eq(jiffies, next_gc))
>> nlm_gc_hosts();
>> @@ -152,7 +158,7 @@ static struct nlm_host *nlm_lookup_host(struct
>> nlm_lookup_host_info *ni)
>> * different NLM rpc_clients into one single nlm_host object.
>> * This would allow us to have one nlm_host per address.
>> */
>> - chain = &nlm_hosts[nlm_hash_address(ni->sap)];
>> + chain = &nlm_hosts.ht_chains[nlm_hash_address(ni->sap)];
>> hlist_for_each_entry(host, pos, chain, h_hash) {
>> if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
>> continue;
>> @@ -218,7 +224,7 @@ static struct nlm_host *nlm_lookup_host(struct
>> nlm_lookup_host_info *ni)
>> INIT_LIST_HEAD(&host->h_granted);
>> INIT_LIST_HEAD(&host->h_reclaim);
>>
>> - nrhosts++;
>> + nlm_hosts.ht_num++;
>>
>> nlm_display_address((struct sockaddr *)&host->h_addr,
>> host->h_addrbuf, sizeof(host->h_addrbuf));
>> @@ -229,7 +235,7 @@ static struct nlm_host *nlm_lookup_host(struct
>> nlm_lookup_host_info *ni)
>> host->h_name);
>>
>> out:
>> - mutex_unlock(&nlm_host_mutex);
>> + mutex_unlock(&nlm_hosts.ht_mutex);
>> return host;
>> }
>>
>> @@ -498,8 +504,8 @@ void nlm_host_rebooted(const struct sockaddr_in
>> *sin,
>> * lock for this.
>> * To avoid processing a host several times, we match the nsmstate.
>> */
>> -again: mutex_lock(&nlm_host_mutex);
>> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
>> +chain) {
>> +again: mutex_lock(&nlm_hosts.ht_mutex);
>> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
>> NLM_HOST_NRHASH; ++chain) {
>> hlist_for_each_entry(host, pos, chain, h_hash) {
>> if (host->h_nsmhandle == nsm
>> && host->h_nsmstate != new_state) {
>> @@ -507,7 +513,7 @@ again: mutex_lock(&nlm_host_mutex);
>> host->h_state++;
>>
>> nlm_get_host(host);
>> - mutex_unlock(&nlm_host_mutex);
>> + mutex_unlock(&nlm_hosts.ht_mutex);
>>
>> if (host->h_server) {
>> /* We're server for this guy, just ditch
>> @@ -524,7 +530,7 @@ again: mutex_lock(&nlm_host_mutex);
>> }
>> }
>>
>> - mutex_unlock(&nlm_host_mutex);
>> + mutex_unlock(&nlm_hosts.ht_mutex);
>> }
>>
>> /*
>> @@ -539,11 +545,11 @@ nlm_shutdown_hosts(void)
>> struct nlm_host *host;
>>
>> dprintk("lockd: shutting down host module\n");
>> - mutex_lock(&nlm_host_mutex);
>> + mutex_lock(&nlm_hosts.ht_mutex);
>>
>> /* First, make all hosts eligible for gc */
>> dprintk("lockd: nuking all hosts...\n");
>> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
>> +chain) {
>> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
>> NLM_HOST_NRHASH; ++chain) {
>> hlist_for_each_entry(host, pos, chain, h_hash) {
>> host->h_expires = jiffies - 1;
>> if (host->h_rpcclnt) {
>> @@ -555,13 +561,13 @@ nlm_shutdown_hosts(void)
>>
>> /* Then, perform a garbage collection pass */
>> nlm_gc_hosts();
>> - mutex_unlock(&nlm_host_mutex);
>> + mutex_unlock(&nlm_hosts.ht_mutex);
>>
>> /* complain if any hosts are left */
>> - if (nrhosts) {
>> + if (nlm_hosts.ht_num) {
>> printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
>> - dprintk("lockd: %d hosts left:\n", nrhosts);
>> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
>> +chain) {
>> + dprintk("lockd: %d hosts left:\n", nlm_hosts.ht_num);
>> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
>> NLM_HOST_NRHASH; ++chain) {
>> hlist_for_each_entry(host, pos, chain, h_hash) {
>> dprintk(" %s (cnt %d use %d exp %ld)\n",
>> host->h_name, atomic_read(&host->h_count),
>> @@ -584,7 +590,7 @@ nlm_gc_hosts(void)
>> struct nlm_host *host;
>>
>> dprintk("lockd: host garbage collection\n");
>> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
>> +chain) {
>> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
>> NLM_HOST_NRHASH; ++chain) {
>> hlist_for_each_entry(host, pos, chain, h_hash)
>> host->h_inuse = 0;
>> }
>> @@ -592,7 +598,7 @@ nlm_gc_hosts(void)
>> /* Mark all hosts that hold locks, blocks or shares */
>> nlmsvc_mark_resources();
>>
>> - for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; +
>> +chain) {
>> + for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains +
>> NLM_HOST_NRHASH; ++chain) {
>> hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
>> if (atomic_read(&host->h_count) || host->h_inuse
>> || time_before(jiffies, host->h_expires)) {
>> @@ -605,7 +611,7 @@ nlm_gc_hosts(void)
>> hlist_del_init(&host->h_hash);
>>
>> nlm_destroy_host(host);
>> - nrhosts--;
>> + nlm_hosts.ht_num--;
>> }
>> }
>>
>> --
>> 1.5.5.rc1
>>
>
> --
> Chuck Lever
> chuck[dot]lever[at]oracle[dot]com
>
>
>
On Wed, 2008-11-05 at 17:41 -0500, J. Bruce Fields wrote:
> I always figured we should get rid of the garbage collection entirely.
That would lead to NSM_MON/UNMON storms on an insane scale if you have
an application like a mail server that is constantly locking and
unlocking files.
Trond
On Wed, 5 Nov 2008 15:06:51 -0500
"J. Bruce Fields" <[email protected]> wrote:
> These clients will all be shut down by nlm_destroy_host() when we do
> garbage collection a little later, so this is redundant.
>
> XXX: Ask Jeff Layton why he added this again?
>
> Signed-off-by: J. Bruce Fields <[email protected]>
> ---
> fs/lockd/host.c | 8 +-------
> 1 files changed, 1 insertions(+), 7 deletions(-)
>
> diff --git a/fs/lockd/host.c b/fs/lockd/host.c
> index 73c2be2..0387c6b 100644
> --- a/fs/lockd/host.c
> +++ b/fs/lockd/host.c
> @@ -602,14 +602,8 @@ static void expire_hosts(struct host_table *table)
> struct hlist_node *pos;
> struct nlm_host *host;
>
> - dprintk("lockd: nuking all hosts...\n");
> - for_each_host(host, pos, chain, table) {
> + for_each_host(host, pos, chain, table)
> host->h_expires = jiffies - 1;
> - if (host->h_rpcclnt) {
> - rpc_shutdown_client(host->h_rpcclnt);
> - host->h_rpcclnt = NULL;
> - }
> - }
> }
>
> /*
Thank goodness for my OC commenting in the BZ I was using to track this!
https://bugzilla.redhat.com/show_bug.cgi?id=254195#c4
IIRC, there is a chicken and egg problem with refcounting -- at
least in the code at the time that I did this. If there is still
an active grant callback in queue at the time that nlm_shutdown_hosts
is called, the h_count will stay high and nlm_destroy_host won't be
called. This can happen if we try to bring down lockd while trying to
do a grant callback to an unresponsive client.
--
Jeff Layton <[email protected]>
Note that the first argument to nlmsvc_is_client always has h_server
set, since it is the host associated with a lock, block, or share held
by the server.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/svcsubs.c | 19 +++++++++----------
1 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 34c2766..9c186f0 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -335,16 +335,15 @@ nlmsvc_is_client(void *data, struct nlm_host *dummy)
{
struct nlm_host *host = data;
- if (host->h_server) {
- /* we are destroying locks even though the client
- * hasn't asked us too, so don't unmonitor the
- * client
- */
- if (host->h_nsmhandle)
- host->h_nsmhandle->sm_sticky = 1;
- return 1;
- } else
- return 0;
+ BUG_ON(!host->h_server);
+ /* we are destroying locks even though the client
+ * hasn't asked us too, so don't unmonitor the
+ * client
+ */
+ if (host->h_nsmhandle)
+ host->h_nsmhandle->sm_sticky = 1;
+ return 1;
+
}
/*
--
1.5.5.rc1
Note that the only caller of nsm_unmonitor has no use for its return
value.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/mon.c | 8 +++-----
include/linux/lockd/sm_inter.h | 2 +-
2 files changed, 4 insertions(+), 6 deletions(-)
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 4e7e958..480f197 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -97,15 +97,14 @@ nsm_monitor(struct nlm_host *host)
/*
* Cease to monitor remote host
*/
-int
-nsm_unmonitor(struct nlm_host *host)
+void nsm_unmonitor(struct nlm_host *host)
{
struct nsm_handle *nsm = host->h_nsmhandle;
struct nsm_res res;
- int status = 0;
+ int status;
if (nsm == NULL)
- return 0;
+ return;
host->h_nsmhandle = NULL;
if (atomic_read(&nsm->sm_count) == 1
@@ -120,7 +119,6 @@ nsm_unmonitor(struct nlm_host *host)
nsm->sm_monitored = 0;
}
nsm_release(nsm);
- return status;
}
/*
diff --git a/include/linux/lockd/sm_inter.h b/include/linux/lockd/sm_inter.h
index 5a5448b..b069598 100644
--- a/include/linux/lockd/sm_inter.h
+++ b/include/linux/lockd/sm_inter.h
@@ -42,7 +42,7 @@ struct nsm_res {
};
int nsm_monitor(struct nlm_host *);
-int nsm_unmonitor(struct nlm_host *);
+void nsm_unmonitor(struct nlm_host *);
extern int nsm_local_state;
#endif /* LINUX_LOCKD_SM_INTER_H */
--
1.5.5.rc1
This will simplify splitting these variables into client and server
sides.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 46 ++++++++++++++++++++++++++--------------------
1 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 1a1adb9..199ca8c 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -26,10 +26,16 @@
#define NLM_HOST_EXPIRE (300 * HZ)
#define NLM_HOST_COLLECT (120 * HZ)
-static struct hlist_head nlm_hosts[NLM_HOST_NRHASH];
+struct host_table {
+ struct hlist_head ht_chains[NLM_HOST_NRHASH];
+ int ht_num;
+ struct mutex ht_mutex;
+};
+
+static struct host_table nlm_hosts = {
+ .ht_mutex = __MUTEX_INITIALIZER(nlm_hosts.ht_mutex)
+};
static unsigned long next_gc;
-static int nrhosts;
-static DEFINE_MUTEX(nlm_host_mutex);
static void nlm_gc_hosts(void);
static struct nsm_handle *nsm_find(const struct sockaddr *sap,
@@ -140,7 +146,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
struct nlm_host *host;
struct nsm_handle *nsm;
- mutex_lock(&nlm_host_mutex);
+ mutex_lock(&nlm_hosts.ht_mutex);
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
@@ -152,7 +158,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
* different NLM rpc_clients into one single nlm_host object.
* This would allow us to have one nlm_host per address.
*/
- chain = &nlm_hosts[nlm_hash_address(ni->sap)];
+ chain = &nlm_hosts.ht_chains[nlm_hash_address(ni->sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
continue;
@@ -218,7 +224,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
- nrhosts++;
+ nlm_hosts.ht_num++;
nlm_display_address((struct sockaddr *)&host->h_addr,
host->h_addrbuf, sizeof(host->h_addrbuf));
@@ -229,7 +235,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
host->h_name);
out:
- mutex_unlock(&nlm_host_mutex);
+ mutex_unlock(&nlm_hosts.ht_mutex);
return host;
}
@@ -498,8 +504,8 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
* lock for this.
* To avoid processing a host several times, we match the nsmstate.
*/
-again: mutex_lock(&nlm_host_mutex);
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
+again: mutex_lock(&nlm_hosts.ht_mutex);
+ for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
hlist_for_each_entry(host, pos, chain, h_hash) {
if (host->h_nsmhandle == nsm
&& host->h_nsmstate != new_state) {
@@ -507,7 +513,7 @@ again: mutex_lock(&nlm_host_mutex);
host->h_state++;
nlm_get_host(host);
- mutex_unlock(&nlm_host_mutex);
+ mutex_unlock(&nlm_hosts.ht_mutex);
if (host->h_server) {
/* We're server for this guy, just ditch
@@ -524,7 +530,7 @@ again: mutex_lock(&nlm_host_mutex);
}
}
- mutex_unlock(&nlm_host_mutex);
+ mutex_unlock(&nlm_hosts.ht_mutex);
}
/*
@@ -539,11 +545,11 @@ nlm_shutdown_hosts(void)
struct nlm_host *host;
dprintk("lockd: shutting down host module\n");
- mutex_lock(&nlm_host_mutex);
+ mutex_lock(&nlm_hosts.ht_mutex);
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
+ for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
hlist_for_each_entry(host, pos, chain, h_hash) {
host->h_expires = jiffies - 1;
if (host->h_rpcclnt) {
@@ -555,13 +561,13 @@ nlm_shutdown_hosts(void)
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
- mutex_unlock(&nlm_host_mutex);
+ mutex_unlock(&nlm_hosts.ht_mutex);
/* complain if any hosts are left */
- if (nrhosts) {
+ if (nlm_hosts.ht_num) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
- dprintk("lockd: %d hosts left:\n", nrhosts);
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
+ dprintk("lockd: %d hosts left:\n", nlm_hosts.ht_num);
+ for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
hlist_for_each_entry(host, pos, chain, h_hash) {
dprintk(" %s (cnt %d use %d exp %ld)\n",
host->h_name, atomic_read(&host->h_count),
@@ -584,7 +590,7 @@ nlm_gc_hosts(void)
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
+ for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
hlist_for_each_entry(host, pos, chain, h_hash)
host->h_inuse = 0;
}
@@ -592,7 +598,7 @@ nlm_gc_hosts(void)
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
- for (chain = nlm_hosts; chain < nlm_hosts + NLM_HOST_NRHASH; ++chain) {
+ for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
@@ -605,7 +611,7 @@ nlm_gc_hosts(void)
hlist_del_init(&host->h_hash);
nlm_destroy_host(host);
- nrhosts--;
+ nlm_hosts.ht_num--;
}
}
--
1.5.5.rc1
This code will later be shared.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 27 ++++++++++++++++-----------
1 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index fa006af..588017f 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -593,29 +593,34 @@ static void warn_host_leak(struct host_table *table)
}
}
-/*
- * Shut down the hosts module.
- * Note that this routine is called only at server shutdown time.
- */
-void
-nlm_shutdown_hosts(void)
+static void expire_hosts(struct host_table *table)
{
struct hlist_head *chain;
struct hlist_node *pos;
struct nlm_host *host;
- dprintk("lockd: shutting down host module\n");
- mutex_lock(&nlm_hosts.ht_mutex);
-
- /* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
- for_each_host(host, pos, chain, &nlm_hosts) {
+ for_each_host(host, pos, chain, table) {
host->h_expires = jiffies - 1;
if (host->h_rpcclnt) {
rpc_shutdown_client(host->h_rpcclnt);
host->h_rpcclnt = NULL;
}
}
+}
+
+/*
+ * Shut down the hosts module.
+ * Note that this routine is called only at server shutdown time.
+ */
+void
+nlm_shutdown_hosts(void)
+{
+ dprintk("lockd: shutting down host module\n");
+ mutex_lock(&nlm_hosts.ht_mutex);
+
+ /* First, make all hosts eligible for gc */
+ expire_hosts(&nlm_hosts);
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
--
1.5.5.rc1
The sm_sticky logic seems racy; instead just unmonitor before
destruction of handle.
XXX: how to test?
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 24 ++++++++++++++++++++----
fs/lockd/mon.c | 15 +--------------
fs/lockd/svcsubs.c | 6 ------
include/linux/lockd/lockd.h | 3 +--
4 files changed, 22 insertions(+), 26 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index e9069c2..1a1adb9 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -683,6 +683,10 @@ found:
return nsm;
}
+
+/* XXX: */
+int nsm_mon_unmon(struct nsm_handle *, u32, struct nsm_res *);
+
/*
* Release an NSM handle
*/
@@ -691,9 +695,21 @@ nsm_release(struct nsm_handle *nsm)
{
if (!nsm)
return;
- if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) {
- list_del(&nsm->sm_link);
- spin_unlock(&nsm_lock);
- kfree(nsm);
+ if (!atomic_dec_and_lock(&nsm->sm_count, &nsm_lock))
+ return;
+ list_del(&nsm->sm_link);
+ spin_unlock(&nsm_lock);
+ if (nsm->sm_monitored) {
+ struct nsm_res res;
+ int status;
+
+ dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name);
+ status = nsm_mon_unmon(nsm, SM_UNMON, &res);
+ if (status < 0)
+ printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
+ nsm->sm_name);
+ else
+ nsm->sm_monitored = 0;
}
+ kfree(nsm);
}
diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c
index 480f197..8cc0e97 100644
--- a/fs/lockd/mon.c
+++ b/fs/lockd/mon.c
@@ -32,7 +32,7 @@ int nsm_local_state;
/*
* Common procedure for SM_MON/SM_UNMON calls
*/
-static int
+int
nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res)
{
struct rpc_clnt *clnt;
@@ -100,24 +100,11 @@ nsm_monitor(struct nlm_host *host)
void nsm_unmonitor(struct nlm_host *host)
{
struct nsm_handle *nsm = host->h_nsmhandle;
- struct nsm_res res;
- int status;
if (nsm == NULL)
return;
host->h_nsmhandle = NULL;
- if (atomic_read(&nsm->sm_count) == 1
- && nsm->sm_monitored && !nsm->sm_sticky) {
- dprintk("lockd: nsm_unmonitor(%s)\n", host->h_name);
-
- status = nsm_mon_unmon(nsm, SM_UNMON, &res);
- if (status < 0)
- printk(KERN_NOTICE "lockd: cannot unmonitor %s\n",
- host->h_name);
- else
- nsm->sm_monitored = 0;
- }
nsm_release(nsm);
}
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 9c186f0..0296904 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -336,12 +336,6 @@ nlmsvc_is_client(void *data, struct nlm_host *dummy)
struct nlm_host *host = data;
BUG_ON(!host->h_server);
- /* we are destroying locks even though the client
- * hasn't asked us too, so don't unmonitor the
- * client
- */
- if (host->h_nsmhandle)
- host->h_nsmhandle->sm_sticky = 1;
return 1;
}
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b56d5aa..e1c2690 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -75,8 +75,7 @@ struct nsm_handle {
char * sm_name;
struct sockaddr_storage sm_addr;
size_t sm_addrlen;
- unsigned int sm_monitored : 1,
- sm_sticky : 1; /* don't unmonitor */
+ unsigned int sm_monitored : 1;
char sm_addrbuf[48]; /* address eyecatcher */
};
--
1.5.5.rc1
We've got a lot of loops like this, and I find them a little easier to
read with the macros.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 108 ++++++++++++++++++++++++++++--------------------------
1 files changed, 56 insertions(+), 52 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 199ca8c..1b90b49 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -32,6 +32,19 @@ struct host_table {
struct mutex ht_mutex;
};
+#define for_each_host(host, pos, chain, table) \
+ for ((chain) = (table)->ht_chains; \
+ (chain) < (table)->ht_chains + NLM_HOST_NRHASH; \
+ ++(chain)) \
+ hlist_for_each_entry((host), (pos), (chain), h_hash)
+
+#define for_each_host_safe(host, pos, next, chain, table) \
+ for ((chain) = (table)->ht_chains; \
+ (chain) < (table)->ht_chains + NLM_HOST_NRHASH; \
+ ++(chain)) \
+ hlist_for_each_entry_safe((host), (pos), (next), (chain), \
+ h_hash)
+
static struct host_table nlm_hosts = {
.ht_mutex = __MUTEX_INITIALIZER(nlm_hosts.ht_mutex)
};
@@ -505,28 +518,26 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
* To avoid processing a host several times, we match the nsmstate.
*/
again: mutex_lock(&nlm_hosts.ht_mutex);
- for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- if (host->h_nsmhandle == nsm
- && host->h_nsmstate != new_state) {
- host->h_nsmstate = new_state;
- host->h_state++;
-
- nlm_get_host(host);
- mutex_unlock(&nlm_hosts.ht_mutex);
-
- if (host->h_server) {
- /* We're server for this guy, just ditch
- * all the locks he held. */
- nlmsvc_free_host_resources(host);
- } else {
- /* He's the server, initiate lock recovery. */
- nlmclnt_recovery(host);
- }
-
- nlm_release_host(host);
- goto again;
+ for_each_host(host, pos, chain, &nlm_hosts) {
+ if (host->h_nsmhandle == nsm
+ && host->h_nsmstate != new_state) {
+ host->h_nsmstate = new_state;
+ host->h_state++;
+
+ nlm_get_host(host);
+ mutex_unlock(&nlm_hosts.ht_mutex);
+
+ if (host->h_server) {
+ /* We're server for this guy, just ditch
+ * all the locks he held. */
+ nlmsvc_free_host_resources(host);
+ } else {
+ /* He's the server, initiate lock recovery. */
+ nlmclnt_recovery(host);
}
+
+ nlm_release_host(host);
+ goto again;
}
}
@@ -549,13 +560,11 @@ nlm_shutdown_hosts(void)
/* First, make all hosts eligible for gc */
dprintk("lockd: nuking all hosts...\n");
- for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- host->h_expires = jiffies - 1;
- if (host->h_rpcclnt) {
- rpc_shutdown_client(host->h_rpcclnt);
- host->h_rpcclnt = NULL;
- }
+ for_each_host(host, pos, chain, &nlm_hosts) {
+ host->h_expires = jiffies - 1;
+ if (host->h_rpcclnt) {
+ rpc_shutdown_client(host->h_rpcclnt);
+ host->h_rpcclnt = NULL;
}
}
@@ -567,12 +576,10 @@ nlm_shutdown_hosts(void)
if (nlm_hosts.ht_num) {
printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
dprintk("lockd: %d hosts left:\n", nlm_hosts.ht_num);
- for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash) {
- dprintk(" %s (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- }
+ for_each_host(host, pos, chain, &nlm_hosts) {
+ dprintk(" %s (cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
}
}
}
@@ -590,29 +597,26 @@ nlm_gc_hosts(void)
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
- for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry(host, pos, chain, h_hash)
- host->h_inuse = 0;
- }
+ for_each_host(host, pos, chain, &nlm_hosts)
+ host->h_inuse = 0;
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
- for (chain = nlm_hosts.ht_chains; chain < nlm_hosts.ht_chains + NLM_HOST_NRHASH; ++chain) {
- hlist_for_each_entry_safe(host, pos, next, chain, h_hash) {
- if (atomic_read(&host->h_count) || host->h_inuse
- || time_before(jiffies, host->h_expires)) {
- dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- continue;
- }
- dprintk("lockd: delete host %s\n", host->h_name);
- hlist_del_init(&host->h_hash);
-
- nlm_destroy_host(host);
- nlm_hosts.ht_num--;
+ for_each_host_safe(host, pos, next, chain, &nlm_hosts) {
+ if (atomic_read(&host->h_count) || host->h_inuse
+ || time_before(jiffies, host->h_expires)) {
+ dprintk("nlm_gc_hosts skipping %s"
+ " (cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
+ continue;
}
+ dprintk("lockd: delete host %s\n", host->h_name);
+ hlist_del_init(&host->h_hash);
+
+ nlm_destroy_host(host);
+ nlm_hosts.ht_num--;
}
next_gc = jiffies + NLM_HOST_COLLECT;
--
1.5.5.rc1
Separate out client and server host lists. Most logic is just
duplicated, but note that the garbage-collection logic is slightly
different for the two, since the client doesn't need to do the mark and
sweep of all its hosts.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 59 ++++++++++++++++++++++++++++++++----------------------
1 files changed, 35 insertions(+), 24 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 588017f..73c2be2 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -45,8 +45,11 @@ struct host_table {
hlist_for_each_entry_safe((host), (pos), (next), (chain), \
h_hash)
-static struct host_table nlm_hosts = {
- .ht_mutex = __MUTEX_INITIALIZER(nlm_hosts.ht_mutex)
+static struct host_table nlm_clients = {
+ .ht_mutex = __MUTEX_INITIALIZER(nlm_clients.ht_mutex)
+};
+static struct host_table nlm_servers = {
+ .ht_mutex = __MUTEX_INITIALIZER(nlm_servers.ht_mutex)
};
static unsigned long next_gc;
@@ -158,8 +161,9 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
struct hlist_node *pos;
struct nlm_host *host;
struct nsm_handle *nsm;
+ struct host_table *table = ni->server ? &nlm_servers : &nlm_clients;
- mutex_lock(&nlm_hosts.ht_mutex);
+ mutex_lock(&table->ht_mutex);
if (time_after_eq(jiffies, next_gc))
nlm_gc_hosts();
@@ -171,17 +175,16 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
* different NLM rpc_clients into one single nlm_host object.
* This would allow us to have one nlm_host per address.
*/
- chain = &nlm_hosts.ht_chains[nlm_hash_address(ni->sap)];
+ chain = &table->ht_chains[nlm_hash_address(ni->sap)];
hlist_for_each_entry(host, pos, chain, h_hash) {
if (!nlm_cmp_addr(nlm_addr(host), ni->sap))
continue;
+ BUG_ON(host->h_server != ni->server); /* XXX */
if (host->h_proto != ni->protocol)
continue;
if (host->h_version != ni->version)
continue;
- if (host->h_server != ni->server)
- continue;
if (!nlm_cmp_addr(nlm_srcaddr(host), ni->src_sap))
continue;
@@ -237,7 +240,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
INIT_LIST_HEAD(&host->h_granted);
INIT_LIST_HEAD(&host->h_reclaim);
- nlm_hosts.ht_num++;
+ table->ht_num++;
nlm_display_address((struct sockaddr *)&host->h_addr,
host->h_addrbuf, sizeof(host->h_addrbuf));
@@ -248,7 +251,7 @@ static struct nlm_host *nlm_lookup_host(struct nlm_lookup_host_info *ni)
host->h_name);
out:
- mutex_unlock(&nlm_hosts.ht_mutex);
+ mutex_unlock(&table->ht_mutex);
return host;
}
@@ -540,15 +543,15 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
* lock for this.
* To avoid processing a host several times, we match the nsmstate.
*/
- while ((host = next_host_state(&nlm_hosts, nsm, new_state)) != NULL) {
- if (host->h_server) {
- /* We're server for this guy, just ditch
- * all the locks he held. */
- nlmsvc_free_host_resources(host);
- } else {
- /* He's the server, initiate lock recovery. */
- nlmclnt_recovery(host);
- }
+ while ((host = next_host_state(&nlm_servers, nsm, new_state)) != NULL) {
+ /* We're server for this guy, just ditch
+ * all the locks he held. */
+ nlmsvc_free_host_resources(host);
+ nlm_release_host(host);
+ }
+ while ((host = next_host_state(&nlm_clients, nsm, new_state)) != NULL) {
+ /* He's the server, initiate lock recovery. */
+ nlmclnt_recovery(host);
nlm_release_host(host);
}
}
@@ -617,18 +620,23 @@ void
nlm_shutdown_hosts(void)
{
dprintk("lockd: shutting down host module\n");
- mutex_lock(&nlm_hosts.ht_mutex);
+ mutex_lock(&nlm_servers.ht_mutex);
+ mutex_lock(&nlm_clients.ht_mutex);
/* First, make all hosts eligible for gc */
- expire_hosts(&nlm_hosts);
+ expire_hosts(&nlm_clients);
+ expire_hosts(&nlm_servers);
/* Then, perform a garbage collection pass */
nlm_gc_hosts();
- mutex_unlock(&nlm_hosts.ht_mutex);
+ mutex_unlock(&nlm_clients.ht_mutex);
+ mutex_unlock(&nlm_servers.ht_mutex);
/* complain if any hosts are left */
- if (nlm_hosts.ht_num)
- warn_host_leak(&nlm_hosts);
+ if (nlm_clients.ht_num)
+ warn_host_leak(&nlm_clients);
+ if (nlm_servers.ht_num)
+ warn_host_leak(&nlm_servers);
}
/*
@@ -644,13 +652,16 @@ nlm_gc_hosts(void)
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
- for_each_host(host, pos, chain, &nlm_hosts)
+ for_each_host(host, pos, chain, &nlm_servers)
host->h_inuse = 0;
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
- nlm_gc_table(&nlm_hosts);
+ nlm_gc_table(&nlm_servers);
+
+ /* Note client doesn't need mark/sweep logic: */
+ nlm_gc_table(&nlm_clients);
}
/*
--
1.5.5.rc1
Minor reorganization; no change in behavior. This will save some
duplicated code after we split apart the client and server host lists.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 59 +++++++++++++++++++++++++++++++-----------------------
1 files changed, 34 insertions(+), 25 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 1b90b49..bb3bf36 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -482,6 +482,31 @@ void nlm_release_host(struct nlm_host *host)
}
}
+static struct nlm_host *next_host_state(struct host_table *table,
+ struct nsm_handle *nsm,
+ u32 new_state)
+{
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+ struct nlm_host *host = NULL;
+
+ mutex_lock(&table->ht_mutex);
+ for_each_host(host, pos, chain, table) {
+ if (host->h_nsmhandle == nsm
+ && host->h_nsmstate != new_state) {
+ host->h_nsmstate = new_state;
+ host->h_state++;
+
+ nlm_get_host(host);
+ mutex_unlock(&table->ht_mutex);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&table->ht_mutex);
+ return host;
+}
+
/*
* We were notified that the host indicated by address &sin
* has rebooted.
@@ -492,8 +517,6 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
unsigned int hostname_len,
u32 new_state)
{
- struct hlist_head *chain;
- struct hlist_node *pos;
struct nsm_handle *nsm;
struct nlm_host *host;
@@ -517,31 +540,17 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
* lock for this.
* To avoid processing a host several times, we match the nsmstate.
*/
-again: mutex_lock(&nlm_hosts.ht_mutex);
- for_each_host(host, pos, chain, &nlm_hosts) {
- if (host->h_nsmhandle == nsm
- && host->h_nsmstate != new_state) {
- host->h_nsmstate = new_state;
- host->h_state++;
-
- nlm_get_host(host);
- mutex_unlock(&nlm_hosts.ht_mutex);
-
- if (host->h_server) {
- /* We're server for this guy, just ditch
- * all the locks he held. */
- nlmsvc_free_host_resources(host);
- } else {
- /* He's the server, initiate lock recovery. */
- nlmclnt_recovery(host);
- }
-
- nlm_release_host(host);
- goto again;
+ while ((host = next_host_state(&nlm_hosts, nsm, new_state)) != NULL) {
+ if (host->h_server) {
+ /* We're server for this guy, just ditch
+ * all the locks he held. */
+ nlmsvc_free_host_resources(host);
+ } else {
+ /* He's the server, initiate lock recovery. */
+ nlmclnt_recovery(host);
}
+ nlm_release_host(host);
}
-
- mutex_unlock(&nlm_hosts.ht_mutex);
}
/*
--
1.5.5.rc1
This code will later be shared.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 46 +++++++++++++++++++++++++++-------------------
1 files changed, 27 insertions(+), 19 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 3fd7573..fa006af 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -553,6 +553,31 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
}
}
+static void nlm_gc_table(struct host_table *table)
+{
+ struct hlist_head *chain;
+ struct hlist_node *pos, *next;
+ struct nlm_host *host;
+
+ for_each_host_safe(host, pos, next, chain, table) {
+ if (atomic_read(&host->h_count) || host->h_inuse
+ || time_before(jiffies, host->h_expires)) {
+ dprintk("nlm_gc_hosts skipping %s"
+ " (cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
+ continue;
+ }
+ dprintk("lockd: delete host %s\n", host->h_name);
+ hlist_del_init(&host->h_hash);
+
+ nlm_destroy_host(host);
+ table->ht_num--;
+ }
+
+ next_gc = jiffies + NLM_HOST_COLLECT;
+}
+
static void warn_host_leak(struct host_table *table)
{
struct hlist_head *chain;
@@ -610,7 +635,7 @@ static void
nlm_gc_hosts(void)
{
struct hlist_head *chain;
- struct hlist_node *pos, *next;
+ struct hlist_node *pos;
struct nlm_host *host;
dprintk("lockd: host garbage collection\n");
@@ -620,26 +645,9 @@ nlm_gc_hosts(void)
/* Mark all hosts that hold locks, blocks or shares */
nlmsvc_mark_resources();
- for_each_host_safe(host, pos, next, chain, &nlm_hosts) {
- if (atomic_read(&host->h_count) || host->h_inuse
- || time_before(jiffies, host->h_expires)) {
- dprintk("nlm_gc_hosts skipping %s"
- " (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- continue;
- }
- dprintk("lockd: delete host %s\n", host->h_name);
- hlist_del_init(&host->h_hash);
-
- nlm_destroy_host(host);
- nlm_hosts.ht_num--;
- }
-
- next_gc = jiffies + NLM_HOST_COLLECT;
+ nlm_gc_table(&nlm_hosts);
}
-
/*
* Manage NSM handles
*/
--
1.5.5.rc1
This will also save some duplicated code after splitting apart client
and server.
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 26 +++++++++++++++++---------
1 files changed, 17 insertions(+), 9 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index bb3bf36..3fd7573 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -553,6 +553,21 @@ void nlm_host_rebooted(const struct sockaddr_in *sin,
}
}
+static void warn_host_leak(struct host_table *table)
+{
+ struct hlist_head *chain;
+ struct hlist_node *pos;
+ struct nlm_host *host;
+
+ printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
+ dprintk("lockd: %d hosts left:\n", table->ht_num);
+ for_each_host(host, pos, chain, table) {
+ dprintk(" %s (cnt %d use %d exp %ld)\n",
+ host->h_name, atomic_read(&host->h_count),
+ host->h_inuse, host->h_expires);
+ }
+}
+
/*
* Shut down the hosts module.
* Note that this routine is called only at server shutdown time.
@@ -582,15 +597,8 @@ nlm_shutdown_hosts(void)
mutex_unlock(&nlm_hosts.ht_mutex);
/* complain if any hosts are left */
- if (nlm_hosts.ht_num) {
- printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
- dprintk("lockd: %d hosts left:\n", nlm_hosts.ht_num);
- for_each_host(host, pos, chain, &nlm_hosts) {
- dprintk(" %s (cnt %d use %d exp %ld)\n",
- host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires);
- }
- }
+ if (nlm_hosts.ht_num)
+ warn_host_leak(&nlm_hosts);
}
/*
--
1.5.5.rc1
These clients will all be shut down by nlm_destroy_host() when we do
garbage collection a little later, so this is redundant.
XXX: Ask Jeff Layton why he added this again?
Signed-off-by: J. Bruce Fields <[email protected]>
---
fs/lockd/host.c | 8 +-------
1 files changed, 1 insertions(+), 7 deletions(-)
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index 73c2be2..0387c6b 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -602,14 +602,8 @@ static void expire_hosts(struct host_table *table)
struct hlist_node *pos;
struct nlm_host *host;
- dprintk("lockd: nuking all hosts...\n");
- for_each_host(host, pos, chain, table) {
+ for_each_host(host, pos, chain, table)
host->h_expires = jiffies - 1;
- if (host->h_rpcclnt) {
- rpc_shutdown_client(host->h_rpcclnt);
- host->h_rpcclnt = NULL;
- }
- }
}
/*
--
1.5.5.rc1