v3:
- add NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK in a separate patch
- a little cleanup and patch squashing
- changelog cleanups
v2:
- don't use *_unsafe sleep in setlk retry code
- better encapsulate retry logic into helper functions
- don't bother with waitqueue handling if we aren't expecting callback
- fix build when CONFIG_NFS_V4_1 is not set
- address several other style comments
This is the third posting of this patchset. The only real change is a
separate patch to add the NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK constant.
This patchset adds support for CB_NOTIFY_LOCK callbacks to the NFS
client. The basic idea is to add a waitqueue to the nfs_client and then
have blocking lock waiters wait on that queue for callbacks.
When a callback comes in, we use a keyed wakeup to wake any waiters. The
waitqueue handling is necessarily more "manual" than I would like, but I
don't see a real alternative there given that we need to insert the
waiters onto the waitqueue prior to sending the lock request, and
sending a lock request can involve blocking operations.
Tested in conjunction with the corresponding knfsd server-side patchset.
Jeff Layton (9):
nfs: eliminate pointless and confusing do_vfs_lock wrappers
nfs: check for POSIX lock capability on server even for flock locks
nfs: use safe, interruptible sleeps when waiting to retry LOCK
nfs: add a new NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK constant
nfs: track whether server sets MAY_NOTIFY_LOCK flag
nfs: add handling for CB_NOTIFY_LOCK in client
nfs: move nfs4_set_lock_state call into caller
nfs: move nfs4 lock retry attempt loop to a separate function
nfs: add code to allow client to wait on lock callbacks
fs/nfs/callback.h | 8 +++
fs/nfs/callback_proc.c | 18 +++++
fs/nfs/callback_xdr.c | 49 ++++++++++++-
fs/nfs/file.c | 9 +--
fs/nfs/nfs4_fs.h | 1 +
fs/nfs/nfs4client.c | 3 +
fs/nfs/nfs4proc.c | 179 ++++++++++++++++++++++++++++++++++------------
include/linux/nfs_fs_sb.h | 3 +
include/uapi/linux/nfs4.h | 5 +-
9 files changed, 220 insertions(+), 55 deletions(-)
--
2.7.4
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/file.c | 9 ++-------
fs/nfs/nfs4proc.c | 15 +++++----------
2 files changed, 7 insertions(+), 17 deletions(-)
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index ca699ddc11c1..2a747f6c252d 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -708,11 +708,6 @@ out_noconflict:
goto out;
}
-static int do_vfs_lock(struct file *file, struct file_lock *fl)
-{
- return locks_lock_file_wait(file, fl);
-}
-
static int
do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
{
@@ -745,7 +740,7 @@ do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
if (!is_local)
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
- status = do_vfs_lock(filp, fl);
+ status = locks_lock_file_wait(filp, fl);
return status;
}
@@ -770,7 +765,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
if (!is_local)
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
else
- status = do_vfs_lock(filp, fl);
+ status = locks_lock_file_wait(filp, fl);
if (status < 0)
goto out;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a9dec32ba9ba..9d38366666f4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5600,11 +5600,6 @@ static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
-static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
-{
- return locks_lock_inode_wait(inode, fl);
-}
-
struct nfs4_unlockdata {
struct nfs_locku_args arg;
struct nfs_locku_res res;
@@ -5657,7 +5652,7 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
switch (task->tk_status) {
case 0:
renew_lease(calldata->server, calldata->timestamp);
- do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
+ locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
if (nfs4_update_lock_stateid(calldata->lsp,
&calldata->res.stateid))
break;
@@ -5765,7 +5760,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
mutex_lock(&sp->so_delegreturn_mutex);
/* Exclude nfs4_reclaim_open_stateid() - note nesting! */
down_read(&nfsi->rwsem);
- if (do_vfs_lock(inode, request) == -ENOENT) {
+ if (locks_lock_inode_wait(inode, request) == -ENOENT) {
up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
goto out;
@@ -5906,7 +5901,7 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->timestamp);
if (data->arg.new_lock) {
data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
- if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
+ if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
rpc_restart_call_prepare(task);
break;
}
@@ -6148,7 +6143,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
if (status != 0)
goto out;
request->fl_flags |= FL_ACCESS;
- status = do_vfs_lock(state->inode, request);
+ status = locks_lock_inode_wait(state->inode, request);
if (status < 0)
goto out;
mutex_lock(&sp->so_delegreturn_mutex);
@@ -6157,7 +6152,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
/* Yes: cache locks! */
/* ...but avoid races with delegation recall... */
request->fl_flags = fl_flags & ~FL_SLEEP;
- status = do_vfs_lock(state->inode, request);
+ status = locks_lock_inode_wait(state->inode, request);
up_read(&nfsi->rwsem);
mutex_unlock(&sp->so_delegreturn_mutex);
goto out;
--
2.7.4
We may end up in here with a FL_FLOCK lock request. We translate those
to whole-file NFSv4 locks and send them on to the server, so we need to
verify that the server supports them no matter what sort of lock request
this is.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/nfs4proc.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9d38366666f4..a0f25185c78c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6135,8 +6135,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
unsigned char fl_flags = request->fl_flags;
int status = -ENOLCK;
- if ((fl_flags & FL_POSIX) &&
- !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ if (!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
--
2.7.4
We actually want to use TASK_INTERRUPTIBLE sleeps when we're in the
process of polling for a NFSv4 lock. If there is a signal pending when
the task wakes up, then we'll be returning an error anyway. So, we might
as well wake up immediately for non-fatal signals as well. That allows
us to return to userland more quickly in that case, but won't change the
error that userland sees.
Also, there is no need to use the *_unsafe sleep variants here, as no
vfs-layer locks should be held at this point.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/nfs4proc.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a0f25185c78c..253b0c09fb02 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5537,7 +5537,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
static unsigned long
nfs4_set_lock_task_retry(unsigned long timeout)
{
- freezable_schedule_timeout_killable_unsafe(timeout);
+ freezable_schedule_timeout_interruptible(timeout);
timeout <<= 1;
if (timeout > NFS4_LOCK_MAXTIMEOUT)
return NFS4_LOCK_MAXTIMEOUT;
--
2.7.4
As defined in RFC 5661, section 18.16.
Signed-off-by: Jeff Layton <[email protected]>
---
include/uapi/linux/nfs4.h | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/include/uapi/linux/nfs4.h b/include/uapi/linux/nfs4.h
index 2b871e0858d9..4ae62796bfde 100644
--- a/include/uapi/linux/nfs4.h
+++ b/include/uapi/linux/nfs4.h
@@ -39,8 +39,9 @@
#define NFS4_FH_VOL_MIGRATION 0x0004
#define NFS4_FH_VOL_RENAME 0x0008
-#define NFS4_OPEN_RESULT_CONFIRM 0x0002
-#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_CONFIRM 0x0002
+#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
+#define NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK 0x0020
#define NFS4_SHARE_ACCESS_MASK 0x000F
#define NFS4_SHARE_ACCESS_READ 0x0001
--
2.7.4
We need to have this info set up before adding the waiter to the
waitqueue, so move this out of the _nfs4_proc_setlk and into the
caller. That's more efficient anyway since we don't need to do
this more than once if we end up waiting on the lock.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/nfs4proc.c | 16 +++++++++-------
1 file changed, 9 insertions(+), 7 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c8ccf13e3ffb..c807850ac476 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6135,14 +6135,8 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs4_state_owner *sp = state->owner;
unsigned char fl_flags = request->fl_flags;
- int status = -ENOLCK;
+ int status;
- if (!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
- goto out;
- /* Is this a delegated open? */
- status = nfs4_set_lock_state(state, request);
- if (status != 0)
- goto out;
request->fl_flags |= FL_ACCESS;
status = locks_lock_inode_wait(state->inode, request);
if (status < 0)
@@ -6216,6 +6210,10 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (state == NULL)
return -ENOLCK;
+
+ if (!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ return -ENOLCK;
+
/*
* Don't rely on the VFS having checked the file open mode,
* since it won't do this for flock() locks.
@@ -6230,6 +6228,10 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
return -EBADF;
}
+ status = nfs4_set_lock_state(state, request);
+ if (status != 0)
+ return status;
+
do {
status = nfs4_proc_setlk(state, cmd, request);
if ((status != -EAGAIN) || IS_SETLK(cmd))
--
2.7.4
This also consolidates the waiting logic into a single function,
instead of having it spread across two like it is now.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/nfs4proc.c | 51 ++++++++++++++++++++++++---------------------------
1 file changed, 24 insertions(+), 27 deletions(-)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c807850ac476..a7517abaf3c7 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -5530,22 +5530,6 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
return err;
}
-#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
-#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
-
-/*
- * sleep, with exponential backoff, and retry the LOCK operation.
- */
-static unsigned long
-nfs4_set_lock_task_retry(unsigned long timeout)
-{
- freezable_schedule_timeout_interruptible(timeout);
- timeout <<= 1;
- if (timeout > NFS4_LOCK_MAXTIMEOUT)
- return NFS4_LOCK_MAXTIMEOUT;
- return timeout;
-}
-
static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct inode *inode = state->inode;
@@ -6178,12 +6162,34 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
return err;
}
+#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
+#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
+
+static int
+nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ int status;
+ unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
+
+ do {
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+ break;
+ freezable_schedule_timeout_interruptible(timeout);
+ timeout *= 2;
+ timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
+ status = -ERESTARTSYS;
+ if (signalled())
+ break;
+ } while(status < 0);
+ return status;
+}
+
static int
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
struct nfs_open_context *ctx;
struct nfs4_state *state;
- unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
int status;
/* verify open state */
@@ -6232,16 +6238,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
if (status != 0)
return status;
- do {
- status = nfs4_proc_setlk(state, cmd, request);
- if ((status != -EAGAIN) || IS_SETLK(cmd))
- break;
- timeout = nfs4_set_lock_task_retry(timeout);
- status = -ERESTARTSYS;
- if (signalled())
- break;
- } while(status < 0);
- return status;
+ return nfs4_retry_setlk(state, cmd, request);
}
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
--
2.7.4
Add a waitqueue head to the client structure. Have clients set a wait
on that queue prior to requesting a lock from the server. If the lock
is blocked, then we can use that to wait for wakeups.
Note that we do need to do this "manually" since we need to set the
wait on the waitqueue prior to requesting the lock, but requesting a
lock can involve activities that can block.
However, only do that for NFSv4.1 locks, either by compiling out
all of the waitqueue handling when CONFIG_NFS_V4_1 is disabled, or
skipping all of it at runtime if we're dealing with v4.0, or v4.1
servers that don't send lock callbacks.
Note too that even when we expect to get a lock callback, RFC5661
section 20.11.4 is pretty clear that we still need to poll for them,
so we do still sleep on a timeout. We do however always poll at the
longest interval in that case.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/callback_proc.c | 6 +++
fs/nfs/nfs4client.c | 3 ++
fs/nfs/nfs4proc.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++-
include/linux/nfs_fs_sb.h | 3 ++
4 files changed, 107 insertions(+), 1 deletion(-)
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 974881824414..95ed790e5c3d 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -632,12 +632,18 @@ out:
__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
struct cb_process_state *cps)
{
+ struct nfs_lowner *lowner = &args->cbnl_owner;
+
if (!cps->clp) /* set in cb_sequence */
return htonl(NFS4ERR_OP_NOT_IN_SESSION);
dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+ /* Don't wake anybody if the string looked bogus */
+ if (lowner->id || lowner->s_dev)
+ __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
+
return htonl(NFS4_OK);
}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index cd3b7cfdde16..9f62df5feb7d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -199,6 +199,9 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
clp->cl_minorversion = cl_init->minorversion;
clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
clp->cl_mig_gen = 1;
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ init_waitqueue_head(&clp->cl_lock_waitq);
+#endif
return clp;
error:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index a7517abaf3c7..30d3a3ca075c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6166,7 +6166,8 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *
#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
static int
-nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
+ struct file_lock *request)
{
int status;
unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
@@ -6185,6 +6186,99 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
return status;
}
+#ifdef CONFIG_NFS_V4_1
+struct nfs4_lock_waiter {
+ struct task_struct *task;
+ struct inode *inode;
+ struct nfs_lowner *owner;
+ bool notified;
+};
+
+static int
+nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key)
+{
+ int ret;
+ struct cb_notify_lock_args *cbnl = key;
+ struct nfs4_lock_waiter *waiter = wait->private;
+ struct nfs_lowner *lowner = &cbnl->cbnl_owner,
+ *wowner = waiter->owner;
+
+ /* Only wake if the callback was for the same owner */
+ if (lowner->clientid != wowner->clientid ||
+ lowner->id != wowner->id ||
+ lowner->s_dev != wowner->s_dev)
+ return 0;
+
+ /* Make sure it's for the right inode */
+ if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
+ return 0;
+
+ waiter->notified = true;
+
+ /* override "private" so we can use default_wake_function */
+ wait->private = waiter->task;
+ ret = autoremove_wake_function(wait, mode, flags, key);
+ wait->private = waiter;
+ return ret;
+}
+
+static int
+nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ int status;
+ unsigned long flags;
+ struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs_client *clp = server->nfs_client;
+ wait_queue_head_t *q = &clp->cl_lock_waitq;
+ struct nfs_lowner owner = { .clientid = clp->cl_clientid,
+ .id = lsp->ls_seqid.owner_id,
+ .s_dev = server->s_dev };
+ struct nfs4_lock_waiter waiter = { .task = current,
+ .inode = state->inode,
+ .owner = &owner,
+ .notified = false };
+ wait_queue_t wait;
+
+ /* Don't bother with waitqueue if we don't expect a callback */
+ if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
+ return nfs4_retry_setlk_simple(state, cmd, request);
+
+ init_wait(&wait);
+ wait.private = &waiter;
+ wait.func = nfs4_wake_lock_waiter;
+ add_wait_queue(q, &wait);
+
+ do {
+ status = nfs4_proc_setlk(state, cmd, request);
+ if ((status != -EAGAIN) || IS_SETLK(cmd))
+ break;
+
+ spin_lock_irqsave(&q->lock, flags);
+ if (waiter.notified) {
+ spin_unlock_irqrestore(&q->lock, flags);
+ continue;
+ }
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
+ status = -ERESTARTSYS;
+ if (signalled())
+ break;
+ } while(status < 0);
+
+ finish_wait(q, &wait);
+ return status;
+}
+#else /* !CONFIG_NFS_V4_1 */
+static inline int
+nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
+{
+ return nfs4_retry_setlk_simple(state, cmd, request);
+}
+#endif
+
static int
nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
{
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 14a762d2734d..b34097c67848 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -103,6 +103,9 @@ struct nfs_client {
#define NFS_SP4_MACH_CRED_WRITE 5 /* WRITE */
#define NFS_SP4_MACH_CRED_COMMIT 6 /* COMMIT */
#define NFS_SP4_MACH_CRED_PNFS_CLEANUP 7 /* LAYOUTRETURN */
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+ wait_queue_head_t cl_lock_waitq;
+#endif /* CONFIG_NFS_V4_1 */
#endif /* CONFIG_NFS_V4 */
/* Our own IP address, as a null-terminated string.
--
2.7.4
We want to handle the two cases differently, such that we poll more
aggressively when we don't expect a callback.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/nfs4_fs.h | 1 +
fs/nfs/nfs4proc.c | 2 ++
2 files changed, 3 insertions(+)
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 9bf64eacba5b..91e4f135a5f2 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -156,6 +156,7 @@ enum {
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
NFS_STATE_RECOVERY_FAILED, /* OPEN stateid state recovery failed */
+ NFS_STATE_MAY_NOTIFY_LOCK, /* server may CB_NOTIFY_LOCK */
};
struct nfs4_state {
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 253b0c09fb02..c8ccf13e3ffb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2537,6 +2537,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
goto out;
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+ if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
+ set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
dentry = opendata->dentry;
if (d_really_is_negative(dentry)) {
--
2.7.4
For now, the callback doesn't do anything. Support for that will be
added in later patches.
Signed-off-by: Jeff Layton <[email protected]>
---
fs/nfs/callback.h | 8 ++++++++
fs/nfs/callback_proc.c | 12 ++++++++++++
fs/nfs/callback_xdr.c | 49 ++++++++++++++++++++++++++++++++++++++++++++++++-
3 files changed, 68 insertions(+), 1 deletion(-)
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 5fe1cecbf9f0..b486848306f0 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -179,6 +179,14 @@ extern __be32 nfs4_callback_devicenotify(
struct cb_devicenotifyargs *args,
void *dummy, struct cb_process_state *cps);
+struct cb_notify_lock_args {
+ struct nfs_fh cbnl_fh;
+ struct nfs_lowner cbnl_owner;
+};
+
+extern __be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args,
+ void *dummy,
+ struct cb_process_state *cps);
#endif /* CONFIG_NFS_V4_1 */
extern int check_gss_callback_principal(struct nfs_client *, struct svc_rqst *);
extern __be32 nfs4_callback_getattr(struct cb_getattrargs *args,
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index f953ef6b2f2e..974881824414 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -628,4 +628,16 @@ out:
dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
+
+__be32 nfs4_callback_notify_lock(struct cb_notify_lock_args *args, void *dummy,
+ struct cb_process_state *cps)
+{
+ if (!cps->clp) /* set in cb_sequence */
+ return htonl(NFS4ERR_OP_NOT_IN_SESSION);
+
+ dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
+ rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
+
+ return htonl(NFS4_OK);
+}
#endif /* CONFIG_NFS_V4_1 */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a710825f3d61..2949cd9ad2ce 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -35,6 +35,7 @@
(1 + 3) * 4) // seqid, 3 slotids
#define CB_OP_RECALLANY_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#define CB_OP_RECALLSLOT_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
+#define CB_OP_NOTIFY_LOCK_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
#endif /* CONFIG_NFS_V4_1 */
#define NFSDBG_FACILITY NFSDBG_CALLBACK
@@ -534,6 +535,47 @@ static __be32 decode_recallslot_args(struct svc_rqst *rqstp,
return 0;
}
+static __be32 decode_lockowner(struct xdr_stream *xdr, struct cb_notify_lock_args *args)
+{
+ __be32 *p;
+ unsigned int len;
+
+ p = read_buf(xdr, 12);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+
+ p = xdr_decode_hyper(p, &args->cbnl_owner.clientid);
+ len = be32_to_cpu(*p);
+
+ p = read_buf(xdr, len);
+ if (unlikely(p == NULL))
+ return htonl(NFS4ERR_BADXDR);
+
+ /* Only try to decode if the length is right */
+ if (len == 20) {
+ p += 2; /* skip "lock id:" */
+ args->cbnl_owner.s_dev = be32_to_cpu(*p++);
+ xdr_decode_hyper(p, &args->cbnl_owner.id);
+ } else {
+ args->cbnl_owner.s_dev = 0;
+ args->cbnl_owner.id = 0;
+ }
+ return 0;
+}
+
+static __be32 decode_notify_lock_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_notify_lock_args *args)
+{
+ __be32 status;
+
+ status = decode_fh(xdr, &args->cbnl_fh);
+ if (unlikely(status != 0))
+ goto out;
+ status = decode_lockowner(xdr, args);
+out:
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
+ return status;
+}
+
#endif /* CONFIG_NFS_V4_1 */
static __be32 encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
@@ -746,6 +788,7 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_RECALL_SLOT:
case OP_CB_LAYOUTRECALL:
case OP_CB_NOTIFY_DEVICEID:
+ case OP_CB_NOTIFY_LOCK:
*op = &callback_ops[op_nr];
break;
@@ -753,7 +796,6 @@ preprocess_nfs41_op(int nop, unsigned int op_nr, struct callback_op **op)
case OP_CB_PUSH_DELEG:
case OP_CB_RECALLABLE_OBJ_AVAIL:
case OP_CB_WANTS_CANCELLED:
- case OP_CB_NOTIFY_LOCK:
return htonl(NFS4ERR_NOTSUPP);
default:
@@ -1006,6 +1048,11 @@ static struct callback_op callback_ops[] = {
.decode_args = (callback_decode_arg_t)decode_recallslot_args,
.res_maxsize = CB_OP_RECALLSLOT_RES_MAXSZ,
},
+ [OP_CB_NOTIFY_LOCK] = {
+ .process_op = (callback_process_op_t)nfs4_callback_notify_lock,
+ .decode_args = (callback_decode_arg_t)decode_notify_lock_args,
+ .res_maxsize = CB_OP_NOTIFY_LOCK_RES_MAXSZ,
+ },
#endif /* CONFIG_NFS_V4_1 */
};
--
2.7.4
> On Sep 16, 2016, at 16:27, Jeff Layton <[email protected]> wrote:
>=20
> We may end up in here with a FL_FLOCK lock request. We translate those
> to whole-file NFSv4 locks and send them on to the server, so we need to
> verify that the server supports them no matter what sort of lock request
> this is.
>=20
> Signed-off-by: Jeff Layton <[email protected]>
> ---
> fs/nfs/nfs4proc.c | 3 +--
> 1 file changed, 1 insertion(+), 2 deletions(-)
>=20
> diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
> index 9d38366666f4..a0f25185c78c 100644
> --- a/fs/nfs/nfs4proc.c
> +++ b/fs/nfs/nfs4proc.c
> @@ -6135,8 +6135,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *stat=
e, int cmd, struct file_lock
> =09unsigned char fl_flags =3D request->fl_flags;
> =09int status =3D -ENOLCK;
>=20
> -=09if ((fl_flags & FL_POSIX) &&
> -=09=09=09!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
> +=09if (!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
> =09=09goto out;
> =09/* Is this a delegated open? */
> =09status =3D nfs4_set_lock_state(state, request);
> --=20
> 2.7.4
The ability to support FL_FLOCK locks does not depend on the server=92s sup=
port for POSIX locking semantics. FL_FLOCK can also use stacked lock semant=
ics, precisely because they always cover the whole file.
> On Sep 16, 2016, at 16:27, Jeff Layton <[email protected]> wrote:
>=20
> This also consolidates the waiting logic into a single function,
> instead of having it spread across two like it is now.
>=20
> Signed-off-by: Jeff Layton <[email protected]>
> ---
> fs/nfs/nfs4proc.c | 51 ++++++++++++++++++++++++--------------------------=
-
> 1 file changed, 24 insertions(+), 27 deletions(-)
>=20
> diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
> index c807850ac476..a7517abaf3c7 100644
> --- a/fs/nfs/nfs4proc.c
> +++ b/fs/nfs/nfs4proc.c
> @@ -5530,22 +5530,6 @@ int nfs4_proc_delegreturn(struct inode *inode, str=
uct rpc_cred *cred, const nfs4
> =09return err;
> }
>=20
> -#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
> -#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
> -
> -/*=20
> - * sleep, with exponential backoff, and retry the LOCK operation.=20
> - */
> -static unsigned long
> -nfs4_set_lock_task_retry(unsigned long timeout)
> -{
> -=09freezable_schedule_timeout_interruptible(timeout);
> -=09timeout <<=3D 1;
> -=09if (timeout > NFS4_LOCK_MAXTIMEOUT)
> -=09=09return NFS4_LOCK_MAXTIMEOUT;
> -=09return timeout;
> -}
> -
> static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct fil=
e_lock *request)
> {
> =09struct inode *inode =3D state->inode;
> @@ -6178,12 +6162,34 @@ static int nfs4_proc_setlk(struct nfs4_state *sta=
te, int cmd, struct file_lock *
> =09return err;
> }
>=20
> +#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
> +#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
> +
> +static int
> +nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *re=
quest)
> +{
> +=09int status;
> +=09unsigned long timeout =3D NFS4_LOCK_MINTIMEOUT;
> +
> +=09do {
> +=09=09status =3D nfs4_proc_setlk(state, cmd, request);
> +=09=09if ((status !=3D -EAGAIN) || IS_SETLK(cmd))
> +=09=09=09break;
> +=09=09freezable_schedule_timeout_interruptible(timeout);
> +=09=09timeout *=3D 2;
> +=09=09timeout =3D min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
> +=09=09status =3D -ERESTARTSYS;
> +=09=09if (signalled())
> +=09=09=09break;
> +=09} while(status < 0);
Can it ever be >=3D 0 here? Why not just use 'while (!signalled())'?
> +=09return status;
> +}
> +
> static int
> nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
> {
> =09struct nfs_open_context *ctx;
> =09struct nfs4_state *state;
> -=09unsigned long timeout =3D NFS4_LOCK_MINTIMEOUT;
> =09int status;
>=20
> =09/* verify open state */
> @@ -6232,16 +6238,7 @@ nfs4_proc_lock(struct file *filp, int cmd, struct =
file_lock *request)
> =09if (status !=3D 0)
> =09=09return status;
>=20
> -=09do {
> -=09=09status =3D nfs4_proc_setlk(state, cmd, request);
> -=09=09if ((status !=3D -EAGAIN) || IS_SETLK(cmd))
> -=09=09=09break;
> -=09=09timeout =3D nfs4_set_lock_task_retry(timeout);
> -=09=09status =3D -ERESTARTSYS;
> -=09=09if (signalled())
> -=09=09=09break;
> -=09} while(status < 0);
> -=09return status;
> +=09return nfs4_retry_setlk(state, cmd, request);
> }
>=20
> int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *=
state, const nfs4_stateid *stateid)
> --=20
> 2.7.4
>=20
On Fri, 2016-09-16 at 21:14 +0000, Trond Myklebust wrote:
> >
> > > > On Sep 16, 2016, at 16:27, Jeff Layton <[email protected]> wrote:
> >
> > We may end up in here with a FL_FLOCK lock request. We translate those
> > to whole-file NFSv4 locks and send them on to the server, so we need to
> > verify that the server supports them no matter what sort of lock request
> > this is.
> >
> > > > Signed-off-by: Jeff Layton <[email protected]>
> > ---
> > fs/nfs/nfs4proc.c | 3 +--
> > 1 file changed, 1 insertion(+), 2 deletions(-)
> >
> > diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
> > index 9d38366666f4..a0f25185c78c 100644
> > --- a/fs/nfs/nfs4proc.c
> > +++ b/fs/nfs/nfs4proc.c
> > @@ -6135,8 +6135,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
> > unsigned char fl_flags = request->fl_flags;
> > int status = -ENOLCK;
> >
> > > > - if ((fl_flags & FL_POSIX) &&
> > > > - !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
> > > > + if (!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
> > > > goto out;
> > /* Is this a delegated open? */
> > status = nfs4_set_lock_state(state, request);
> > --
> > 2.7.4
>
> The ability to support FL_FLOCK locks does not depend on the server’s
> support for POSIX locking semantics. FL_FLOCK can also use stacked
> lock semantics, precisely because they always cover the whole file.
Oh! I had always thought this flags absence basically meant "I don't
support file locking at all, so don't bother sending any LOCK
requests". Now that I look though, all RFC5661 says is:
o OPEN4_RESULT_LOCKTYPE_POSIX indicates that the server's byte-range
locking behavior supports the complete set of POSIX locking
techniques [24]. From this, the client can choose to manage byte-
range locking state in a way to handle a mismatch of byte-range
locking management.
If this flag isn't there, I guess we can't infer anything about how the
server's locks are implemented. That's just super.
So, ok. If you think this logic is more correct as-is, then I'm fine
with dropping this patch. This check gets moved in a later patch
though, so I'll need to fix that up as well.
--
Jeff Layton <[email protected]>
On Fri, 2016-09-16 at 21:20 +0000, Trond Myklebust wrote:
> >
> > On Sep 16, 2016, at 16:27, Jeff Layton <[email protected]> wrote:
> >
> > This also consolidates the waiting logic into a single function,
> > instead of having it spread across two like it is now.
> >
> > Signed-off-by: Jeff Layton <[email protected]>
> > ---
> > fs/nfs/nfs4proc.c | 51 ++++++++++++++++++++++++------------------
> > ---------
> > 1 file changed, 24 insertions(+), 27 deletions(-)
> >
> > diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
> > index c807850ac476..a7517abaf3c7 100644
> > --- a/fs/nfs/nfs4proc.c
> > +++ b/fs/nfs/nfs4proc.c
> > @@ -5530,22 +5530,6 @@ int nfs4_proc_delegreturn(struct inode
> > *inode, struct rpc_cred *cred, const nfs4
> > return err;
> > }
> >
> > -#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
> > -#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
> > -
> > -/*
> > - * sleep, with exponential backoff, and retry the LOCK operation.
> > - */
> > -static unsigned long
> > -nfs4_set_lock_task_retry(unsigned long timeout)
> > -{
> > - freezable_schedule_timeout_interruptible(timeout);
> > - timeout <<= 1;
> > - if (timeout > NFS4_LOCK_MAXTIMEOUT)
> > - return NFS4_LOCK_MAXTIMEOUT;
> > - return timeout;
> > -}
> > -
> > static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd,
> > struct file_lock *request)
> > {
> > struct inode *inode = state->inode;
> > @@ -6178,12 +6162,34 @@ static int nfs4_proc_setlk(struct
> > nfs4_state *state, int cmd, struct file_lock *
> > return err;
> > }
> >
> > +#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
> > +#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
> > +
> > +static int
> > +nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct
> > file_lock *request)
> > +{
> > + int status;
> > + unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
> > +
> > + do {
> > + status = nfs4_proc_setlk(state, cmd, request);
> > + if ((status != -EAGAIN) || IS_SETLK(cmd))
> > + break;
> > + freezable_schedule_timeout_interruptible(timeout);
> > + timeout *= 2;
> > + timeout = min_t(unsigned long,
> > NFS4_LOCK_MAXTIMEOUT, timeout);
> > + status = -ERESTARTSYS;
> > + if (signalled())
> > + break;
> > + } while(status < 0);
>
> Can it ever be >= 0 here? Why not just use 'while (!signalled())'?
>
Good point. I'll make that change.
Thanks,
> >
> > + return status;
> > +}
> > +
> > static int
> > nfs4_proc_lock(struct file *filp, int cmd, struct file_lock
> > *request)
> > {
> > struct nfs_open_context *ctx;
> > struct nfs4_state *state;
> > - unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
> > int status;
> >
> > /* verify open state */
> > @@ -6232,16 +6238,7 @@ nfs4_proc_lock(struct file *filp, int cmd,
> > struct file_lock *request)
> > if (status != 0)
> > return status;
> >
> > - do {
> > - status = nfs4_proc_setlk(state, cmd, request);
> > - if ((status != -EAGAIN) || IS_SETLK(cmd))
> > - break;
> > - timeout = nfs4_set_lock_task_retry(timeout);
> > - status = -ERESTARTSYS;
> > - if (signalled())
> > - break;
> > - } while(status < 0);
> > - return status;
> > + return nfs4_retry_setlk(state, cmd, request);
> > }
> >
> > int nfs4_lock_delegation_recall(struct file_lock *fl, struct
> > nfs4_state *state, const nfs4_stateid *stateid)
> >
--
Jeff Layton <[email protected]>
DQo+IE9uIFNlcCAxNiwgMjAxNiwgYXQgMTc6NDYsIEplZmYgTGF5dG9uIDxqbGF5dG9uQHJlZGhh
dC5jb20+IHdyb3RlOg0KPiANCj4gT24gRnJpLCAyMDE2LTA5LTE2IGF0IDIxOjE0ICswMDAwLCBU
cm9uZCBNeWtsZWJ1c3Qgd3JvdGU6DQo+Pj4gDQo+Pj4+PiBPbiBTZXAgMTYsIDIwMTYsIGF0IDE2
OjI3LCBKZWZmIExheXRvbiA8amxheXRvbkByZWRoYXQuY29tPiB3cm90ZToNCj4+PiANCj4+PiBX
ZSBtYXkgZW5kIHVwIGluIGhlcmUgd2l0aCBhIEZMX0ZMT0NLIGxvY2sgcmVxdWVzdC4gV2UgdHJh
bnNsYXRlIHRob3NlDQo+Pj4gdG8gd2hvbGUtZmlsZSBORlN2NCBsb2NrcyBhbmQgc2VuZCB0aGVt
IG9uIHRvIHRoZSBzZXJ2ZXIsIHNvIHdlIG5lZWQgdG8NCj4+PiB2ZXJpZnkgdGhhdCB0aGUgc2Vy
dmVyIHN1cHBvcnRzIHRoZW0gbm8gbWF0dGVyIHdoYXQgc29ydCBvZiBsb2NrIHJlcXVlc3QNCj4+
PiB0aGlzIGlzLg0KPj4+IA0KPj4+Pj4gU2lnbmVkLW9mZi1ieTogSmVmZiBMYXl0b24gPGpsYXl0
b25AcmVkaGF0LmNvbT4NCj4+PiAtLS0NCj4+PiBmcy9uZnMvbmZzNHByb2MuYyB8IDMgKy0tDQo+
Pj4gMSBmaWxlIGNoYW5nZWQsIDEgaW5zZXJ0aW9uKCspLCAyIGRlbGV0aW9ucygtKQ0KPj4+IA0K
Pj4+IGRpZmYgLS1naXQgYS9mcy9uZnMvbmZzNHByb2MuYyBiL2ZzL25mcy9uZnM0cHJvYy5jDQo+
Pj4gaW5kZXggOWQzODM2NjY2NmY0Li5hMGYyNTE4NWM3OGMgMTAwNjQ0DQo+Pj4gLS0tIGEvZnMv
bmZzL25mczRwcm9jLmMNCj4+PiArKysgYi9mcy9uZnMvbmZzNHByb2MuYw0KPj4+IEBAIC02MTM1
LDggKzYxMzUsNyBAQCBzdGF0aWMgaW50IF9uZnM0X3Byb2Nfc2V0bGsoc3RydWN0IG5mczRfc3Rh
dGUgKnN0YXRlLCBpbnQgY21kLCBzdHJ1Y3QgZmlsZV9sb2NrDQo+Pj4gCXVuc2lnbmVkIGNoYXIg
ZmxfZmxhZ3MgPSByZXF1ZXN0LT5mbF9mbGFnczsNCj4+PiAJaW50IHN0YXR1cyA9IC1FTk9MQ0s7
DQo+Pj4gDQo+Pj4+PiAtCWlmICgoZmxfZmxhZ3MgJiBGTF9QT1NJWCkgJiYNCj4+Pj4+IC0JCQkh
dGVzdF9iaXQoTkZTX1NUQVRFX1BPU0lYX0xPQ0tTLCAmc3RhdGUtPmZsYWdzKSkNCj4+Pj4+ICsJ
aWYgKCF0ZXN0X2JpdChORlNfU1RBVEVfUE9TSVhfTE9DS1MsICZzdGF0ZS0+ZmxhZ3MpKQ0KPj4+
Pj4gCQlnb3RvIG91dDsNCj4+PiAJLyogSXMgdGhpcyBhIGRlbGVnYXRlZCBvcGVuPyAqLw0KPj4+
IAlzdGF0dXMgPSBuZnM0X3NldF9sb2NrX3N0YXRlKHN0YXRlLCByZXF1ZXN0KTsNCj4+PiAtLSAN
Cj4+PiAyLjcuNA0KPj4gDQo+PiBUaGUgYWJpbGl0eSB0byBzdXBwb3J0IEZMX0ZMT0NLIGxvY2tz
IGRvZXMgbm90IGRlcGVuZCBvbiB0aGUgc2VydmVy4oCZcw0KPj4gc3VwcG9ydCBmb3IgUE9TSVgg
bG9ja2luZyBzZW1hbnRpY3MuIEZMX0ZMT0NLIGNhbiBhbHNvIHVzZSBzdGFja2VkDQo+PiBsb2Nr
IHNlbWFudGljcywgcHJlY2lzZWx5IGJlY2F1c2UgdGhleSBhbHdheXMgY292ZXIgdGhlIHdob2xl
IGZpbGUuDQo+IA0KPiBPaCEgSSBoYWQgYWx3YXlzIHRob3VnaHQgdGhpcyBmbGFncyBhYnNlbmNl
IGJhc2ljYWxseSBtZWFudCAiSSBkb24ndA0KPiBzdXBwb3J0IGZpbGUgbG9ja2luZyBhdCBhbGws
IHNvIGRvbid0IGJvdGhlciBzZW5kaW5nIGFueSBMT0NLDQo+IHJlcXVlc3RzIi4gTm93IHRoYXQg
SSBsb29rIHRob3VnaCwgYWxsIFJGQzU2NjEgc2F5cyBpczoNCj4gDQo+IA0KPiAgICBvICBPUEVO
NF9SRVNVTFRfTE9DS1RZUEVfUE9TSVggaW5kaWNhdGVzIHRoYXQgdGhlIHNlcnZlcidzIGJ5dGUt
cmFuZ2UNCj4gICAgICAgbG9ja2luZyBiZWhhdmlvciBzdXBwb3J0cyB0aGUgY29tcGxldGUgc2V0
IG9mIFBPU0lYIGxvY2tpbmcNCj4gICAgICAgdGVjaG5pcXVlcyBbMjRdLiAgRnJvbSB0aGlzLCB0
aGUgY2xpZW50IGNhbiBjaG9vc2UgdG8gbWFuYWdlIGJ5dGUtDQo+ICAgICAgIHJhbmdlIGxvY2tp
bmcgc3RhdGUgaW4gYSB3YXkgdG8gaGFuZGxlIGEgbWlzbWF0Y2ggb2YgYnl0ZS1yYW5nZQ0KPiAg
ICAgICBsb2NraW5nIG1hbmFnZW1lbnQuDQoNClJpZ2h0LiBZb3UgYWxzbyBoYXZlOg0KDQoxNS4x
LjguNy4gIE5GUzRFUlJfTE9DS19SQU5HRSAoRXJyb3IgQ29kZSAxMDAyOCkNCg0KICAgQSBMT0NL
IG9wZXJhdGlvbiBpcyBvcGVyYXRpbmcgb24gYSByYW5nZSB0aGF0IG92ZXJsYXBzIGluIHBhcnQg
YQ0KICAgY3VycmVudGx5IGhlbGQgYnl0ZS1yYW5nZSBsb2NrIGZvciB0aGUgY3VycmVudCBsb2Nr
LW93bmVyIGFuZCBkb2VzDQogICBub3QgcHJlY2lzZWx5IG1hdGNoIGEgc2luZ2xlIHN1Y2ggYnl0
ZS1yYW5nZSBsb2NrIHdoZXJlIHRoZSBzZXJ2ZXINCiAgIGRvZXMgbm90IHN1cHBvcnQgdGhpcyB0
eXBlIG9mIHJlcXVlc3QsIGFuZCB0aHVzIGRvZXMgbm90IGltcGxlbWVudA0KICAgUE9TSVggbG9j
a2luZyBzZW1hbnRpY3MgWzI0XS4gIFNlZSBTZWN0aW9ucyAxOC4xMC40LCAxOC4xMS40LCBhbmQN
CiAgIDE4LjEyLjQgZm9yIGEgZGlzY3Vzc2lvbiBvZiBob3cgdGhpcyBhcHBsaWVzIHRvIExPQ0ss
IExPQ0tULCBhbmQNCiAgIExPQ0tVIHJlc3BlY3RpdmVseS4NCg0KDQo+IA0KPiBJZiB0aGlzIGZs
YWcgaXNuJ3QgdGhlcmUsIEkgZ3Vlc3Mgd2UgY2FuJ3QgaW5mZXIgYW55dGhpbmcgYWJvdXQgaG93
IHRoZQ0KPiBzZXJ2ZXIncyBsb2NrcyBhcmUgaW1wbGVtZW50ZWQuIFRoYXQncyBqdXN0IHN1cGVy
Lg0KPiANCj4gU28sIG9rLiBJZiB5b3UgdGhpbmsgdGhpcyBsb2dpYyBpcyBtb3JlIGNvcnJlY3Qg
YXMtaXMsIHRoZW4gSSdtIGZpbmUNCj4gd2l0aCBkcm9wcGluZyB0aGlzIHBhdGNoLiBUaGlzIGNo
ZWNrIGdldHMgbW92ZWQgaW4gYSBsYXRlciBwYXRjaA0KPiB0aG91Z2gsIHNvIEknbGwgbmVlZCB0
byBmaXggdGhhdCB1cCBhcyB3ZWxsLg0KDQpUaGUgY3VycmVudCBjb2RlIHdhcyBkZXNpZ25lZCwg
YXMgSSBzYWlkLCB0byBhbGxvdyBmbG9jaygpIGxvY2tzIGF0IGxlYXN0IHRvIHdvcmsgd2l0aCBh
IHNlcnZlciB0aGF0IG9ubHkgc3VwcG9ydHMgV2luZG93cyBzdGFja2VkIGxvY2tzIGFuZC9vciBu
YXRpdmUgQlNEIGxvY2tzLg0KDQo=