refcount_t is better for reference counters since its
implementation can prevent overflows.
So convert atomic_t ref counters to refcount_t.
Signed-off-by: Chuhong Yuan <[email protected]>
---
Changes in v2:
- Convert refcount from 0-base to 1-base.
drivers/net/ethernet/chelsio/cxgb4/smt.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
index eaf1fb74689c..343887fa52aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
@@ -57,7 +57,7 @@ struct smt_data *t4_init_smt(void)
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
- atomic_set(&s->smtab[i].refcnt, 0);
+ refcount_set(&s->smtab[i].refcnt, 1);
}
return s;
}
@@ -68,7 +68,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
- if (atomic_read(&e->refcnt) == 0) {
+ if (refcount_read(&e->refcnt) == 1) {
if (!first_free)
first_free = e;
} else {
@@ -98,7 +98,7 @@ static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
static void t4_smte_free(struct smt_entry *e)
{
spin_lock_bh(&e->lock);
- if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ if (refcount_read(&e->refcnt) == 1) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
spin_unlock_bh(&e->lock);
@@ -111,7 +111,7 @@ static void t4_smte_free(struct smt_entry *e)
*/
void cxgb4_smt_release(struct smt_entry *e)
{
- if (atomic_dec_and_test(&e->refcnt))
+ if (refcount_dec_and_test(&e->refcnt))
t4_smte_free(e);
}
EXPORT_SYMBOL(cxgb4_smt_release);
@@ -215,14 +215,14 @@ static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
- if (!atomic_read(&e->refcnt)) {
- atomic_set(&e->refcnt, 1);
+ if (refcount_read(&e->refcnt) == 1) {
+ refcount_set(&e->refcnt, 2);
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
- atomic_inc(&e->refcnt);
+ refcount_inc(&e->refcnt);
}
spin_unlock(&e->lock);
}
--
2.20.1
From: Chuhong Yuan <[email protected]>
Date: Fri, 2 Aug 2019 16:35:47 +0800
> refcount_t is better for reference counters since its
> implementation can prevent overflows.
> So convert atomic_t ref counters to refcount_t.
>
> Signed-off-by: Chuhong Yuan <[email protected]>
> ---
> Changes in v2:
> - Convert refcount from 0-base to 1-base.
The existing code is buggy and should be fixed before you start making
conversions to it.
> @@ -111,7 +111,7 @@ static void t4_smte_free(struct smt_entry *e)
> */
> void cxgb4_smt_release(struct smt_entry *e)
> {
> - if (atomic_dec_and_test(&e->refcnt))
> + if (refcount_dec_and_test(&e->refcnt))
> t4_smte_free(e);
This runs without any locking and therefore:
> if (e) {
> spin_lock(&e->lock);
> - if (!atomic_read(&e->refcnt)) {
> - atomic_set(&e->refcnt, 1);
> + if (refcount_read(&e->refcnt) == 1) {
> + refcount_set(&e->refcnt, 2);
This test is not safe, since the reference count can asynchronously decrement
to zero above outside of any locks.
Then you'll need to add locking, and as a result the need to an atomic
counter goes away and just a normal int can be used.