One of the main uses of spin_is_locked() is to require that a lock is held when
a function is called, for debugging, but lockdep_assert_held() is better for
this purpose since it won't make a mistake when someone else is holding the
lock. This patch series replaces all of this kind of use of spin_is_locked()
with calls to lockdep_assert_held(). An ulterior motive is to reduce the number
of uses of spin_is_locked() from the kernel, to work towards possibly
eliminating it.
Thanks,
Lance
arch/x86/pci/i386.c | 2 +-
drivers/hv/hv_balloon.c | 2 +-
drivers/misc/sgi-xp/xpc_channel.c | 6 +++---
drivers/misc/sgi-xp/xpc_sn2.c | 2 +-
drivers/misc/sgi-xp/xpc_uv.c | 2 +-
drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +--
drivers/net/ethernet/intel/igbvf/mbx.c | 4 ++--
drivers/net/ethernet/sfc/efx.c | 2 +-
drivers/net/ethernet/smsc/smsc911x.h | 2 +-
drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 2 +-
drivers/scsi/snic/snic_scsi.c | 4 ++--
fs/userfaultfd.c | 2 +-
kernel/futex.c | 4 ++--
kernel/locking/mutex-debug.c | 4 ++--
mm/khugepaged.c | 4 ++--
mm/swap.c | 3 +--
net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
security/apparmor/file.c | 2 +-
virt/kvm/arm/vgic/vgic.c | 12 ++++++------
19 files changed, 31 insertions(+), 33 deletions(-)
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Cliff Whickman <[email protected]>
Cc: Robin Holt <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Greg Kroah-Hartman <[email protected]>
---
drivers/misc/sgi-xp/xpc_channel.c | 6 +++---
drivers/misc/sgi-xp/xpc_sn2.c | 2 +-
drivers/misc/sgi-xp/xpc_uv.c | 2 +-
3 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 05a890ce2ab8..8e6607fc8a67 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -28,7 +28,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
@@ -82,7 +82,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
@@ -755,7 +755,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 5a12d2a54049..0ae69b9390ce 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1671,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
{
struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
ch_sn2->remote_msgqueue_pa = 0;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..0441abe87880 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1183,7 +1183,7 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Solarflare linux maintainers <[email protected]>
Cc: Edward Cree <[email protected]>
Cc: Bert Kenward <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: <[email protected]>
---
drivers/net/ethernet/sfc/efx.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..f40c3d5ca413 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3193,7 +3193,7 @@ struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
{
u32 hash = efx_filter_spec_hash(spec);
- WARN_ON(!spin_is_locked(&efx->rps_hash_lock));
+ lockdep_assert_held(&efx->rps_hash_lock);
if (!efx->rps_hash_table)
return NULL;
return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Steve Glendinning <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: <[email protected]>
---
drivers/net/ethernet/smsc/smsc911x.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d75508acd2b..51b2fc1a395f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -67,7 +67,7 @@
#ifdef CONFIG_DEBUG_SPINLOCK
#define SMSC_ASSERT_MAC_LOCK(pdata) \
- WARN_ON_SMP(!spin_is_locked(&pdata->mac_lock))
+ lockdep_assert_held(&pdata->mac_lock)
#else
#define SMSC_ASSERT_MAC_LOCK(pdata) do {} while (0)
#endif /* CONFIG_DEBUG_SPINLOCK */
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Will Deacon <[email protected]>
---
kernel/locking/mutex-debug.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 9aa713629387..771d4ca96dda 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
- SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
+ lockdep_assert_held(&lock->wait_lock);
/* Mark the current thread as blocked on the lock: */
task->blocked_on = waiter;
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: John Johansen <[email protected]>
Cc: James Morris <[email protected]>
Cc: "Serge E. Hallyn" <[email protected]>
Cc: <[email protected]>
---
security/apparmor/file.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 4285943f7260..d0afed9ebd0e 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -496,7 +496,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
/* update caching of label on file_ctx */
spin_lock(&fctx->lock);
old = rcu_dereference_protected(fctx->label,
- spin_is_locked(&fctx->lock));
+ lockdep_is_held(&fctx->lock));
l = aa_label_merge(old, label, GFP_ATOMIC);
if (l) {
if (l != old) {
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Christoffer Dall <[email protected]>
Cc: Marc Zyngier <[email protected]>
Cc: Eric Auger <[email protected]>
Cc: [email protected]
Cc: <[email protected]>
---
virt/kvm/arm/vgic/vgic.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc910e0..50e25438fb3c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*/
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
/* If the interrupt is active, it must stay on the current vcpu */
if (irq->active)
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
{
struct kvm_vcpu *vcpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
retry:
vcpu = vgic_target_oracle(irq);
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr);
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
*multi_sgi = false;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w;
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
bool multi_sgi;
u8 prio = 0xff;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Pablo Neira Ayuso <[email protected]>
Cc: Jozsef Kadlecsik <[email protected]>
Cc: Florian Westphal <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
---
net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 8a33dac4e805..e287da68d5fa 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -15,7 +15,7 @@
#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
#define ipset_dereference_protected(p, set) \
- __ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
+ __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Alexander Viro <[email protected]>
Cc: <[email protected]>
---
fs/userfaultfd.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index bfa0ec69f924..a20244ff0027 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -926,7 +926,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
wait_queue_entry_t *wq;
struct userfaultfd_wait_queue *uwq;
- VM_BUG_ON(!spin_is_locked(&wqh->lock));
+ lockdep_assert_held(&wqh->lock);
uwq = NULL;
if (!waitqueue_active(wqh))
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Karan Tilak Kumar <[email protected]>
Cc: Sesidhar Baddela <[email protected]>
Cc: "James E.J. Bottomley" <[email protected]>
Cc: "Martin K. Petersen" <[email protected]>
Cc: <[email protected]>
---
drivers/scsi/snic/snic_scsi.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46424aa..42e485139fc9 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -2001,7 +2001,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
}
dr_failed:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
if (rqi)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2604,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
ret = SUCCESS;
skip_internal_abts:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
spin_unlock_irqrestore(io_lock, flags);
return ret;
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: "Kirill A. Shutemov" <[email protected]>
Cc: Yang Shi <[email protected]>
Cc: Matthew Wilcox <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Vlastimil Babka <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Shakeel Butt <[email protected]>
Cc: <[email protected]>
---
mm/khugepaged.c | 4 ++--
mm/swap.c | 3 +--
2 files changed, 3 insertions(+), 4 deletions(-)
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index a31d740e6cd1..80f12467ccb3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
@@ -1665,7 +1665,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
int progress = 0;
VM_BUG_ON(!pages);
- VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
+ lockdep_assert_held(&khugepaged_mm_lock);
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
diff --git a/mm/swap.c b/mm/swap.c
index 26fc9b5f1b6c..c89eb442c0bf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -824,8 +824,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(!PageHead(page), page);
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
- VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
+ lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
if (!list)
SetPageLRU(page_tail);
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Daniel Drake <[email protected]>
Cc: Ulrich Kunitz <[email protected]>
Cc: Kalle Valo <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
---
drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 1f6d9f357e57..9ccd780695f0 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -235,7 +235,7 @@ void zd_mac_clear(struct zd_mac *mac)
{
flush_workqueue(zd_workqueue);
zd_chip_clear(&mac->chip);
- ZD_ASSERT(!spin_is_locked(&mac->lock));
+ lockdep_assert_held(&mac->lock);
ZD_MEMCLEAR(mac, sizeof(struct zd_mac));
}
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Jeff Kirsher <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: [email protected]
Cc: <[email protected]>
---
drivers/net/ethernet/intel/igbvf/mbx.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e5838f7c2..a3cd7ac48d4b 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: "K. Y. Srinivasan" <[email protected]>
Cc: Haiyang Zhang <[email protected]>
Cc: Stephen Hemminger <[email protected]>
Cc: <[email protected]>
---
drivers/hv/hv_balloon.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b788082793..41631512ae97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -689,7 +689,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__online_page_increment_counters(pg);
__online_page_free(pg);
- WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+ lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
}
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Darren Hart <[email protected]>
---
kernel/futex.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/futex.c b/kernel/futex.c
index 11fc3bb456d6..3e2de8fc1891 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1365,9 +1365,9 @@ static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
- || WARN_ON(plist_node_empty(&q->list)))
+ if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
return;
+ lockdep_assert_held(q->lock_ptr);
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Jeff Kirsher <[email protected]>
Cc: "David S. Miller" <[email protected]>
Cc: [email protected]
Cc: <[email protected]>
---
drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ac685ad4d877..8ce3471723d3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1489,8 +1489,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
bool found = false;
int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
- "Missing mac_filter_hash_lock\n");
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
--
2.19.0
lockdep_assert_held() is better suited to checking locking requirements,
since it won't get confused when someone else holds the lock. This is
also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Cc: Bjorn Helgaas <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Borislav Petkov <[email protected]>
Cc: "H. Peter Anvin" <[email protected]>
Cc: <[email protected]>
Cc: <[email protected]>
---
arch/x86/pci/i386.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index ed4ac215305d..24bb58a007de 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
{
struct pcibios_fwaddrmap *map;
- WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
+ lockdep_assert_held(&pcibios_fwaddrmap_lock);
list_for_each_entry(map, &pcibios_fwaddrmappings, list)
if (map->dev == dev)
--
2.19.0
On 10/02/2018 10:39 PM, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: John Johansen <[email protected]>
> Cc: James Morris <[email protected]>
> Cc: "Serge E. Hallyn" <[email protected]>
> Cc: <[email protected]>
Acked-by: John Johansen <[email protected]>
> ---
> security/apparmor/file.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/security/apparmor/file.c b/security/apparmor/file.c
> index 4285943f7260..d0afed9ebd0e 100644
> --- a/security/apparmor/file.c
> +++ b/security/apparmor/file.c
> @@ -496,7 +496,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
> /* update caching of label on file_ctx */
> spin_lock(&fctx->lock);
> old = rcu_dereference_protected(fctx->label,
> - spin_is_locked(&fctx->lock));
> + lockdep_is_held(&fctx->lock));
> l = aa_label_merge(old, label, GFP_ATOMIC);
> if (l) {
> if (l != old) {
>
On 10/3/18 7:38 AM, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
Agreed
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: "Kirill A. Shutemov" <[email protected]>
> Cc: Yang Shi <[email protected]>
> Cc: Matthew Wilcox <[email protected]>
> Cc: Mel Gorman <[email protected]>
> Cc: Vlastimil Babka <[email protected]>
> Cc: Jan Kara <[email protected]>
> Cc: Shakeel Butt <[email protected]>
> Cc: <[email protected]>
Acked-by: Vlastimil Babka <[email protected]>
> ---
> mm/khugepaged.c | 4 ++--
> mm/swap.c | 3 +--
> 2 files changed, 3 insertions(+), 4 deletions(-)
>
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index a31d740e6cd1..80f12467ccb3 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -1225,7 +1225,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
> {
> struct mm_struct *mm = mm_slot->mm;
>
> - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
> + lockdep_assert_held(&khugepaged_mm_lock);
>
> if (khugepaged_test_exit(mm)) {
> /* free mm_slot */
> @@ -1665,7 +1665,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
> int progress = 0;
>
> VM_BUG_ON(!pages);
> - VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
> + lockdep_assert_held(&khugepaged_mm_lock);
>
> if (khugepaged_scan.mm_slot)
> mm_slot = khugepaged_scan.mm_slot;
> diff --git a/mm/swap.c b/mm/swap.c
> index 26fc9b5f1b6c..c89eb442c0bf 100644
> --- a/mm/swap.c
> +++ b/mm/swap.c
> @@ -824,8 +824,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
> VM_BUG_ON_PAGE(!PageHead(page), page);
> VM_BUG_ON_PAGE(PageCompound(page_tail), page);
> VM_BUG_ON_PAGE(PageLRU(page_tail), page);
> - VM_BUG_ON(NR_CPUS != 1 &&
> - !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
> + lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock);
>
> if (!list)
> SetPageLRU(page_tail);
>
On Tue, 2 Oct 2018, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Pablo Neira Ayuso <[email protected]>
> Cc: Jozsef Kadlecsik <[email protected]>
> Cc: Florian Westphal <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: <[email protected]>
> Cc: <[email protected]>
> Cc: <[email protected]>
> ---
> net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
Acked-by: Jozsef Kadlecsik <[email protected]>
Best regards,
Jozsef
> diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
> index 8a33dac4e805..e287da68d5fa 100644
> --- a/net/netfilter/ipset/ip_set_hash_gen.h
> +++ b/net/netfilter/ipset/ip_set_hash_gen.h
> @@ -15,7 +15,7 @@
>
> #define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
> #define ipset_dereference_protected(p, set) \
> - __ipset_dereference_protected(p, spin_is_locked(&(set)->lock))
> + __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
>
> #define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
>
> --
> 2.19.0
>
>
-
E-mail : [email protected], [email protected]
PGP key : http://www.kfki.hu/~kadlec/pgp_public_key.txt
Address : Wigner Research Centre for Physics, Hungarian Academy of Sciences
H-1525 Budapest 114, POB. 49, Hungary
Lance Roy <[email protected]> writes:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Daniel Drake <[email protected]>
> Cc: Ulrich Kunitz <[email protected]>
> Cc: Kalle Valo <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: <[email protected]>
> Cc: <[email protected]>
> ---
> drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
Should I take this or is it going through some other tree?
If it goes to via some other tree:
Acked-by: Kalle Valo <[email protected]>
--
Kalle Valo
Lance Roy <[email protected]> writes:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: "K. Y. Srinivasan" <[email protected]>
> Cc: Haiyang Zhang <[email protected]>
> Cc: Stephen Hemminger <[email protected]>
> Cc: <[email protected]>
> ---
> drivers/hv/hv_balloon.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
> index b1b788082793..41631512ae97 100644
> --- a/drivers/hv/hv_balloon.c
> +++ b/drivers/hv/hv_balloon.c
> @@ -689,7 +689,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
> __online_page_increment_counters(pg);
> __online_page_free(pg);
>
> - WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
> + lockdep_assert_held(&dm_device.ha_lock);
> dm_device.num_pages_onlined++;
> }
Reviewed-by: Vitaly Kuznetsov <[email protected]>
However,
lockdep_assert_held() is a no-op when !CONFIG_LOCKDEP but this doesn't
really matter: hv_page_online_one() is static and it has only two call
sites, both taking the dm_device.ha_lock lock - so the warning may just
go away.
--
Vitaly
On 10/2/18 10:38 PM, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
Reviewed-by: Bart Van Assche <[email protected]>
On Tue, Oct 02, 2018 at 10:38:47PM -0700, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Bjorn Helgaas <[email protected]>
> Cc: Thomas Gleixner <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Borislav Petkov <[email protected]>
> Cc: "H. Peter Anvin" <[email protected]>
> Cc: <[email protected]>
> Cc: <[email protected]>
I assume you plan to merge the whole series together. I don't object
to that, but I don't know enough to be able to formally ack this.
It would be useful to include a tiny bit more detail in the changelog.
The spin_is_locked() documentation doesn't mention anything about
differences with respect to the lock being held by self vs by someone
else, so I can't tell where the confusion arises.
Bjorn
> ---
> arch/x86/pci/i386.c | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
> index ed4ac215305d..24bb58a007de 100644
> --- a/arch/x86/pci/i386.c
> +++ b/arch/x86/pci/i386.c
> @@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
> {
> struct pcibios_fwaddrmap *map;
>
> - WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
> + lockdep_assert_held(&pcibios_fwaddrmap_lock);
>
> list_for_each_entry(map, &pcibios_fwaddrmappings, list)
> if (map->dev == dev)
> --
> 2.19.0
>
On Wed, Oct 03, 2018 at 11:00:51AM -0500, Bjorn Helgaas wrote:
> On Tue, Oct 02, 2018 at 10:38:47PM -0700, Lance Roy wrote:
> > lockdep_assert_held() is better suited to checking locking requirements,
> > since it won't get confused when someone else holds the lock. This is
> > also a step towards possibly removing spin_is_locked().
> >
> > Signed-off-by: Lance Roy <[email protected]>
> > Cc: Bjorn Helgaas <[email protected]>
> > Cc: Thomas Gleixner <[email protected]>
> > Cc: Ingo Molnar <[email protected]>
> > Cc: Borislav Petkov <[email protected]>
> > Cc: "H. Peter Anvin" <[email protected]>
> > Cc: <[email protected]>
> > Cc: <[email protected]>
>
> I assume you plan to merge the whole series together. I don't object
> to that, but I don't know enough to be able to formally ack this.
>
> It would be useful to include a tiny bit more detail in the changelog.
> The spin_is_locked() documentation doesn't mention anything about
> differences with respect to the lock being held by self vs by someone
> else, so I can't tell where the confusion arises.
The difference is that spin_is_locked() will return true when someone else holds
the lock, while lockdep_assert_held() asserts that the current thread holds the
lock. How about the following for an new changelog?
lockdep_assert_held() is better suited to checking locking requirements,
since it only checks if the current thread holds the lock regardless of
whether someone else does. This is also a step towards possibly removing
spin_is_locked().
Thanks,
Lance
> Bjorn
>
> > ---
> > arch/x86/pci/i386.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
> >
> > diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
> > index ed4ac215305d..24bb58a007de 100644
> > --- a/arch/x86/pci/i386.c
> > +++ b/arch/x86/pci/i386.c
> > @@ -59,7 +59,7 @@ static struct pcibios_fwaddrmap *pcibios_fwaddrmap_lookup(struct pci_dev *dev)
> > {
> > struct pcibios_fwaddrmap *map;
> >
> > - WARN_ON_SMP(!spin_is_locked(&pcibios_fwaddrmap_lock));
> > + lockdep_assert_held(&pcibios_fwaddrmap_lock);
> >
> > list_for_each_entry(map, &pcibios_fwaddrmappings, list)
> > if (map->dev == dev)
> > --
> > 2.19.0
> >
On Wed, Oct 03, 2018 at 12:06:48PM +0300, Kalle Valo wrote:
> Lance Roy <[email protected]> writes:
>
> > lockdep_assert_held() is better suited to checking locking requirements,
> > since it won't get confused when someone else holds the lock. This is
> > also a step towards possibly removing spin_is_locked().
> >
> > Signed-off-by: Lance Roy <[email protected]>
> > Cc: Daniel Drake <[email protected]>
> > Cc: Ulrich Kunitz <[email protected]>
> > Cc: Kalle Valo <[email protected]>
> > Cc: "David S. Miller" <[email protected]>
> > Cc: <[email protected]>
> > Cc: <[email protected]>
> > ---
> > drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 2 +-
> > 1 file changed, 1 insertion(+), 1 deletion(-)
>
> Should I take this or is it going through some other tree?
Sure you can take this if you like. If not, Paul McKenney will take it in his
tree.
Thanks,
Lance
> If it goes to via some other tree:
>
> Acked-by: Kalle Valo <[email protected]>
>
> --
> Kalle Valo
Lance Roy <[email protected]> writes:
> On Wed, Oct 03, 2018 at 12:06:48PM +0300, Kalle Valo wrote:
>> Lance Roy <[email protected]> writes:
>>
>> > lockdep_assert_held() is better suited to checking locking requirements,
>> > since it won't get confused when someone else holds the lock. This is
>> > also a step towards possibly removing spin_is_locked().
>> >
>> > Signed-off-by: Lance Roy <[email protected]>
>> > Cc: Daniel Drake <[email protected]>
>> > Cc: Ulrich Kunitz <[email protected]>
>> > Cc: Kalle Valo <[email protected]>
>> > Cc: "David S. Miller" <[email protected]>
>> > Cc: <[email protected]>
>> > Cc: <[email protected]>
>> > ---
>> > drivers/net/wireless/zydas/zd1211rw/zd_mac.c | 2 +-
>> > 1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> Should I take this or is it going through some other tree?
>
> Sure you can take this if you like. If not, Paul McKenney will take it in his
> tree.
Ok, then I'll take this to wireless-drivers-next. And I'll change the
title prefix to "zd1211rw: ".
--
Kalle Valo
> -----Original Message-----
> From: Intel-wired-lan [mailto:[email protected]] On
> Behalf Of Lance Roy
> Sent: Tuesday, October 2, 2018 10:39 PM
> To: [email protected]
> Cc: [email protected]; Lance Roy <[email protected]>; intel-wired-
> [email protected]; Paul E. McKenney <[email protected]>; David S.
> Miller <[email protected]>
> Subject: [Intel-wired-lan] [PATCH 04/16] i40e: Replace spin_is_locked() with
> lockdep
>
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is also a
> step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Jeff Kirsher <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: [email protected]
> Cc: <[email protected]>
> ---
> drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +--
> 1 file changed, 1 insertion(+), 2 deletions(-)
Tested-by: Andrew Bowers <[email protected]>
Lance Roy <[email protected]> wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Daniel Drake <[email protected]>
> Cc: Ulrich Kunitz <[email protected]>
> Cc: Kalle Valo <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: <[email protected]>
> Cc: <[email protected]>
Patch applied to wireless-drivers-next.git, thanks.
209e957b467b zd1211rw: Replace spin_is_locked() with lockdep
--
https://patchwork.kernel.org/patch/10624325/
https://wireless.wiki.kernel.org/en/developers/documentation/submittingpatches
Commit-ID: 4de1a293a08bdf8ec1530e02163930ac86f80ea2
Gitweb: https://git.kernel.org/tip/4de1a293a08bdf8ec1530e02163930ac86f80ea2
Author: Lance Roy <[email protected]>
AuthorDate: Tue, 2 Oct 2018 22:38:57 -0700
Committer: Thomas Gleixner <[email protected]>
CommitDate: Tue, 9 Oct 2018 13:19:28 +0200
futex: Replace spin_is_locked() with lockdep
lockdep_assert_held() is better suited for checking locking requirements,
since it won't get confused when the lock is held by some other task. This
is also a step towards possibly removing spin_is_locked().
Signed-off-by: Lance Roy <[email protected]>
Signed-off-by: Thomas Gleixner <[email protected]>
Cc: "Paul E. McKenney" <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Darren Hart <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
---
kernel/futex.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/futex.c b/kernel/futex.c
index 11fc3bb456d6..3e2de8fc1891 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1365,9 +1365,9 @@ static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
- if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
- || WARN_ON(plist_node_empty(&q->list)))
+ if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list)))
return;
+ lockdep_assert_held(q->lock_ptr);
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
On Tue, Oct 02, 2018 at 10:38:58PM -0700, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Peter Zijlstra <[email protected]>
> Cc: Ingo Molnar <[email protected]>
> Cc: Will Deacon <[email protected]>
> ---
> kernel/locking/mutex-debug.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
> index 9aa713629387..771d4ca96dda 100644
> --- a/kernel/locking/mutex-debug.c
> +++ b/kernel/locking/mutex-debug.c
> @@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
>
> void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
> {
> - SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
> + lockdep_assert_held(&lock->wait_lock);
> DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
> DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
> DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
> @@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
> void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> struct task_struct *task)
> {
> - SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
> + lockdep_assert_held(&lock->wait_lock);
I think it's a good idea to replace debug usage of spin_is_locked() with
calls to lockdep, but I wonder whether that means that DEBUG_MUTEXES should
select LOCKDEP so that we don't lose coverage?
What do you think?
Will
On Tue, Oct 09, 2018 at 01:18:10PM +0100, Will Deacon wrote:
> On Tue, Oct 02, 2018 at 10:38:58PM -0700, Lance Roy wrote:
> > lockdep_assert_held() is better suited to checking locking requirements,
> > since it won't get confused when someone else holds the lock. This is
> > also a step towards possibly removing spin_is_locked().
> >
> > Signed-off-by: Lance Roy <[email protected]>
> > Cc: Peter Zijlstra <[email protected]>
> > Cc: Ingo Molnar <[email protected]>
> > Cc: Will Deacon <[email protected]>
> > ---
> > kernel/locking/mutex-debug.c | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
> > index 9aa713629387..771d4ca96dda 100644
> > --- a/kernel/locking/mutex-debug.c
> > +++ b/kernel/locking/mutex-debug.c
> > @@ -36,7 +36,7 @@ void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
> >
> > void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
> > {
> > - SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
> > + lockdep_assert_held(&lock->wait_lock);
> > DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list));
> > DEBUG_LOCKS_WARN_ON(waiter->magic != waiter);
> > DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
> > @@ -51,7 +51,7 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
> > void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
> > struct task_struct *task)
> > {
> > - SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
> > + lockdep_assert_held(&lock->wait_lock);
>
> I think it's a good idea to replace debug usage of spin_is_locked() with
> calls to lockdep, but I wonder whether that means that DEBUG_MUTEXES should
> select LOCKDEP so that we don't lose coverage?
>
> What do you think?
Makes sense to me!
But given that this was accepted into -tip, I have dropped it from my tree.
Thanx, Paul
Lance,
> lockdep_assert_held() is better suited to checking locking
> requirements, since it won't get confused when someone else holds the
> lock. This is also a step towards possibly removing spin_is_locked().
Applied to 4.20/scsi-queue, thank you!
--
Martin K. Petersen Oracle Linux Engineering
> From: [email protected] [mailto:netdev-
> [email protected]] On Behalf Of Lance Roy
> Sent: Tuesday, October 2, 2018 10:39 PM
> To: [email protected]
> Cc: Paul E. McKenney <[email protected]>; Lance Roy
> <[email protected]>; Kirsher, Jeffrey T <[email protected]>; David
> S. Miller <[email protected]>; [email protected];
> [email protected]
> Subject: [PATCH 05/16] igbvf: Replace spin_is_locked() with lockdep
>
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Jeff Kirsher <[email protected]>
> Cc: "David S. Miller" <[email protected]>
> Cc: [email protected]
> Cc: <[email protected]>
> ---
> drivers/net/ethernet/intel/igbvf/mbx.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
Tested-by: Aaron Brown <[email protected]>
On Tue, Oct 02, 2018 at 10:39:02PM -0700, Lance Roy wrote:
> lockdep_assert_held() is better suited to checking locking requirements,
> since it won't get confused when someone else holds the lock. This is
> also a step towards possibly removing spin_is_locked().
>
> Signed-off-by: Lance Roy <[email protected]>
> Cc: Christoffer Dall <[email protected]>
> Cc: Marc Zyngier <[email protected]>
> Cc: Eric Auger <[email protected]>
> Cc: [email protected]
> Cc: <[email protected]>
Acked-by: Christoffer Dall <[email protected]>
> ---
> virt/kvm/arm/vgic/vgic.c | 12 ++++++------
> 1 file changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> index 7cfdfbc910e0..50e25438fb3c 100644
> --- a/virt/kvm/arm/vgic/vgic.c
> +++ b/virt/kvm/arm/vgic/vgic.c
> @@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
> */
> static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
> {
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> + lockdep_assert_held(&irq->irq_lock);
>
> /* If the interrupt is active, it must stay on the current vcpu */
> if (irq->active)
> @@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
> {
> struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
>
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> + lockdep_assert_held(&vgic_cpu->ap_list_lock);
>
> list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
> }
> @@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
> {
> struct kvm_vcpu *vcpu;
>
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> + lockdep_assert_held(&irq->irq_lock);
>
> retry:
> vcpu = vgic_target_oracle(irq);
> @@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
> static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
> struct vgic_irq *irq, int lr)
> {
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> + lockdep_assert_held(&irq->irq_lock);
>
> if (kvm_vgic_global_state.type == VGIC_V2)
> vgic_v2_populate_lr(vcpu, irq, lr);
> @@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
>
> *multi_sgi = false;
>
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> + lockdep_assert_held(&vgic_cpu->ap_list_lock);
>
> list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
> int w;
> @@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
> bool multi_sgi;
> u8 prio = 0xff;
>
> - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> + lockdep_assert_held(&vgic_cpu->ap_list_lock);
>
> count = compute_ap_list_depth(vcpu, &multi_sgi);
> if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
> --
> 2.19.0
>
>
> _______________________________________________
> linux-arm-kernel mailing list
> [email protected]
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
On Fri, Nov 02, 2018 at 07:45:33PM +0100, Christoffer Dall wrote:
> On Tue, Oct 02, 2018 at 10:39:02PM -0700, Lance Roy wrote:
> > lockdep_assert_held() is better suited to checking locking requirements,
> > since it won't get confused when someone else holds the lock. This is
> > also a step towards possibly removing spin_is_locked().
> >
> > Signed-off-by: Lance Roy <[email protected]>
> > Cc: Christoffer Dall <[email protected]>
> > Cc: Marc Zyngier <[email protected]>
> > Cc: Eric Auger <[email protected]>
> > Cc: [email protected]
> > Cc: <[email protected]>
>
> Acked-by: Christoffer Dall <[email protected]>
Applied, thank you!
Thanx, Paul
> > ---
> > virt/kvm/arm/vgic/vgic.c | 12 ++++++------
> > 1 file changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
> > index 7cfdfbc910e0..50e25438fb3c 100644
> > --- a/virt/kvm/arm/vgic/vgic.c
> > +++ b/virt/kvm/arm/vgic/vgic.c
> > @@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
> > */
> > static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
> > {
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> > + lockdep_assert_held(&irq->irq_lock);
> >
> > /* If the interrupt is active, it must stay on the current vcpu */
> > if (irq->active)
> > @@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
> > {
> > struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
> >
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> > + lockdep_assert_held(&vgic_cpu->ap_list_lock);
> >
> > list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
> > }
> > @@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
> > {
> > struct kvm_vcpu *vcpu;
> >
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> > + lockdep_assert_held(&irq->irq_lock);
> >
> > retry:
> > vcpu = vgic_target_oracle(irq);
> > @@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
> > static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
> > struct vgic_irq *irq, int lr)
> > {
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
> > + lockdep_assert_held(&irq->irq_lock);
> >
> > if (kvm_vgic_global_state.type == VGIC_V2)
> > vgic_v2_populate_lr(vcpu, irq, lr);
> > @@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
> >
> > *multi_sgi = false;
> >
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> > + lockdep_assert_held(&vgic_cpu->ap_list_lock);
> >
> > list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
> > int w;
> > @@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
> > bool multi_sgi;
> > u8 prio = 0xff;
> >
> > - DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
> > + lockdep_assert_held(&vgic_cpu->ap_list_lock);
> >
> > count = compute_ap_list_depth(vcpu, &multi_sgi);
> > if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
> > --
> > 2.19.0
> >
> >
> > _______________________________________________
> > linux-arm-kernel mailing list
> > [email protected]
> > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
>