2023-01-15 10:34:51

by Dawei Li

[permalink] [raw]
Subject: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

Currently, racing protection on conn list is implemented as rwlock,
improve it by rcu primitive for its better performance.

Signed-off-by: Dawei Li <[email protected]>
---
fs/ksmbd/connection.c | 30 +++++++++++++++---------------
fs/ksmbd/connection.h | 1 -
fs/ksmbd/smb2pdu.c | 14 +++++++-------
3 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index fd0a288af299..36d1da273edd 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -20,7 +20,7 @@ static DEFINE_MUTEX(init_lock);
static struct ksmbd_conn_ops default_conn_ops;

LIST_HEAD(conn_list);
-DEFINE_RWLOCK(conn_list_lock);
+DEFINE_SPINLOCK(conn_list_lock);

/**
* ksmbd_conn_free() - free resources of the connection instance
@@ -32,9 +32,9 @@ DEFINE_RWLOCK(conn_list_lock);
*/
void ksmbd_conn_free(struct ksmbd_conn *conn)
{
- write_lock(&conn_list_lock);
- list_del(&conn->conns_list);
- write_unlock(&conn_list_lock);
+ spin_lock(&conn_list_lock);
+ list_del_rcu(&conn->conns_list);
+ spin_unlock(&conn_list_lock);

xa_destroy(&conn->sessions);
kvfree(conn->request_buf);
@@ -84,9 +84,9 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
spin_lock_init(&conn->llist_lock);
INIT_LIST_HEAD(&conn->lock_list);

- write_lock(&conn_list_lock);
- list_add(&conn->conns_list, &conn_list);
- write_unlock(&conn_list_lock);
+ spin_lock(&conn_list_lock);
+ list_add_rcu(&conn->conns_list, &conn_list);
+ spin_unlock(&conn_list_lock);
return conn;
}

@@ -95,15 +95,15 @@ bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
struct ksmbd_conn *t;
bool ret = false;

- read_lock(&conn_list_lock);
- list_for_each_entry(t, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(t, &conn_list, conns_list) {
if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
continue;

ret = true;
break;
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
return ret;
}

@@ -402,8 +402,8 @@ static void stop_sessions(void)
struct ksmbd_transport *t;

again:
- read_lock(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(conn, &conn_list, conns_list) {
struct task_struct *task;

t = conn->transport;
@@ -413,12 +413,12 @@ static void stop_sessions(void)
task->comm, task_pid_nr(task));
conn->status = KSMBD_SESS_EXITING;
if (t->ops->shutdown) {
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
t->ops->shutdown(t);
- read_lock(&conn_list_lock);
+ rcu_read_lock();
}
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();

if (!list_empty(&conn_list)) {
schedule_timeout_interruptible(HZ / 10); /* 100ms */
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index 3643354a3fa7..e41f33a2da7c 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -139,7 +139,6 @@ struct ksmbd_transport {
#define KSMBD_TCP_PEER_SOCKADDR(c) ((struct sockaddr *)&((c)->peer_addr))

extern struct list_head conn_list;
-extern rwlock_t conn_list_lock;

bool ksmbd_conn_alive(struct ksmbd_conn *conn);
void ksmbd_conn_wait_idle(struct ksmbd_conn *conn);
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 771e045c8d4a..c60cfb360f42 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -6915,8 +6915,8 @@ int smb2_lock(struct ksmbd_work *work)

nolock = 1;
/* check locks in connection list */
- read_lock(&conn_list_lock);
- list_for_each_entry(conn, &conn_list, conns_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(conn, &conn_list, conns_list) {
spin_lock(&conn->llist_lock);
list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
if (file_inode(cmp_lock->fl->fl_file) !=
@@ -6932,7 +6932,7 @@ int smb2_lock(struct ksmbd_work *work)
list_del(&cmp_lock->flist);
list_del(&cmp_lock->clist);
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();

locks_free_lock(cmp_lock->fl);
kfree(cmp_lock);
@@ -6954,7 +6954,7 @@ int smb2_lock(struct ksmbd_work *work)
cmp_lock->start > smb_lock->start &&
cmp_lock->start < smb_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("previous lock conflict with zero byte lock range\n");
goto out;
}
@@ -6963,7 +6963,7 @@ int smb2_lock(struct ksmbd_work *work)
smb_lock->start > cmp_lock->start &&
smb_lock->start < cmp_lock->end) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("current lock conflict with zero byte lock range\n");
goto out;
}
@@ -6974,14 +6974,14 @@ int smb2_lock(struct ksmbd_work *work)
cmp_lock->end >= smb_lock->end)) &&
!cmp_lock->zero_len && !smb_lock->zero_len) {
spin_unlock(&conn->llist_lock);
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
pr_err("Not allow lock operation on exclusive lock range\n");
goto out;
}
}
spin_unlock(&conn->llist_lock);
}
- read_unlock(&conn_list_lock);
+ rcu_read_unlock();
out_check_cl:
if (smb_lock->fl->fl_type == F_UNLCK && nolock) {
pr_err("Try to unlock nolocked range\n");
--
2.25.1


2023-01-30 04:12:54

by Sergey Senozhatsky

[permalink] [raw]
Subject: Re: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

On (23/01/15 18:32), Dawei Li wrote:
> Currently, racing protection on conn list is implemented as rwlock,
> improve it by rcu primitive for its better performance.

Why is this a problem and what proves that this patch improves/solves
it? Numbers/benchmarks/analysis?

2023-01-30 04:15:46

by Sergey Senozhatsky

[permalink] [raw]
Subject: Re: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

On (23/01/15 18:32), Dawei Li wrote:
>
> void ksmbd_conn_free(struct ksmbd_conn *conn)
> {
> - write_lock(&conn_list_lock);
> - list_del(&conn->conns_list);
> - write_unlock(&conn_list_lock);
> + spin_lock(&conn_list_lock);
> + list_del_rcu(&conn->conns_list);
> + spin_unlock(&conn_list_lock);
>
> xa_destroy(&conn->sessions);
> kvfree(conn->request_buf);

From a quick look this does not seem like a correct RCU usage. E.g.
where do you wait for grace periods and synchronize readers/writers?

2023-01-30 14:17:09

by Dawei Li

[permalink] [raw]
Subject: Re: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

Hi Sergey,

Thanks for reviewing,

On Mon, Jan 30, 2023 at 01:15:35PM +0900, Sergey Senozhatsky wrote:
> On (23/01/15 18:32), Dawei Li wrote:
> >
> > void ksmbd_conn_free(struct ksmbd_conn *conn)
> > {
> > - write_lock(&conn_list_lock);
> > - list_del(&conn->conns_list);
> > - write_unlock(&conn_list_lock);
> > + spin_lock(&conn_list_lock);
> > + list_del_rcu(&conn->conns_list);
> > + spin_unlock(&conn_list_lock);
synchronize_rcu();
> >
> > xa_destroy(&conn->sessions);
> > kvfree(conn->request_buf);
>
> From a quick look this does not seem like a correct RCU usage. E.g.
> where do you wait for grace periods and synchronize readers/writers?

Nice catch, I totally mess it up. Thanks!

At first glance, I assume synchronize_rcu() will do the job if sleeping
is OK?

Steve, Namjae,
Please drop this buggy patch from ksmbd-for-next.

Thanks,
Dawei

2023-01-30 15:44:02

by Steve French

[permalink] [raw]
Subject: Re: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

On Mon, Jan 30, 2023 at 8:17 AM Dawei Li <[email protected]> wrote:
...
> Steve, Namjae,
> Please drop this buggy patch from ksmbd-for-next.
>
> Thanks,
> Dawei

Done


--
Thanks,

Steve

2023-01-31 02:39:43

by Sergey Senozhatsky

[permalink] [raw]
Subject: Re: [PATCH 3/6] ksmbd: replace rwlock with rcu for concurrenct access on conn list

Hi,

On (23/01/30 22:16), Dawei Li wrote:
> Hi Sergey,
>
> Thanks for reviewing,
>
> On Mon, Jan 30, 2023 at 01:15:35PM +0900, Sergey Senozhatsky wrote:
> > On (23/01/15 18:32), Dawei Li wrote:
> > >
> > > void ksmbd_conn_free(struct ksmbd_conn *conn)
> > > {
> > > - write_lock(&conn_list_lock);
> > > - list_del(&conn->conns_list);
> > > - write_unlock(&conn_list_lock);
> > > + spin_lock(&conn_list_lock);
> > > + list_del_rcu(&conn->conns_list);
> > > + spin_unlock(&conn_list_lock);
> synchronize_rcu();
> > >
> > > xa_destroy(&conn->sessions);
> > > kvfree(conn->request_buf);
> >
> > From a quick look this does not seem like a correct RCU usage. E.g.
> > where do you wait for grace periods and synchronize readers/writers?
>
> Nice catch, I totally mess it up. Thanks!
>
> At first glance, I assume synchronize_rcu() will do the job if sleeping
> is OK?

Yes, synchronize_rcu() will sleep (schedule()) and wait for grace
period to expire (and synchronize will all the RCU readers). RCU
is good for cases when writes are seldom, which may not be the case
with ksmb.

I really want to see benhcmarks, why do we want to remove the RW-lock.