Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754891AbbBTOkD (ORCPT ); Fri, 20 Feb 2015 09:40:03 -0500 Received: from mail.bmw-carit.de ([62.245.222.98]:53443 "EHLO linuxmail.bmw-carit.de" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1754826AbbBTOj7 (ORCPT ); Fri, 20 Feb 2015 09:39:59 -0500 From: Daniel Wagner To: Jeff Layton Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, John Kacur , Daniel Wagner , "J. Bruce Fields" , Alexander Viro Subject: [RFC v1 4/5] locks: Use percpu spinlocks to protect file_lock_list Date: Fri, 20 Feb 2015 15:39:54 +0100 Message-Id: <1424443195-18676-5-git-send-email-daniel.wagner@bmw-carit.de> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1424443195-18676-1-git-send-email-daniel.wagner@bmw-carit.de> References: <1424443195-18676-1-git-send-email-daniel.wagner@bmw-carit.de> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4076 Lines: 116 Replace the lglock with percpu spinlocks. That allows us to iterate in the seqfile ops without taking all underlyng spinlocks with the lg_global_lock(). Signed-off-by: Daniel Wagner Cc: Jeff Layton Cc: "J. Bruce Fields" Cc: Alexander Viro --- fs/locks.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/fs/locks.c b/fs/locks.c index 142e4fd..20ed00a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -128,7 +128,6 @@ #include #include #include -#include #define CREATE_TRACE_POINTS #include @@ -160,10 +159,10 @@ int lease_break_time = 45; /* * The global file_lock_list is only used for displaying /proc/locks, so we * keep a list on each CPU, with each list protected by its own spinlock via - * the file_lock_lglock. Note that alterations to the list also require that + * the file_lock_lock. Note that alterations to the list also require that * the relevant flc_lock is held. */ -DEFINE_STATIC_LGLOCK(file_lock_lglock); +static DEFINE_PER_CPU(spinlock_t, file_lock_lock); static DEFINE_PER_CPU(struct hlist_head, file_lock_list); /* @@ -561,10 +560,10 @@ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) /* Must be called with the flc_lock held! */ static void locks_insert_global_locks(struct file_lock *fl) { - lg_local_lock(&file_lock_lglock); + spin_lock(this_cpu_ptr(&file_lock_lock)); fl->fl_link_cpu = smp_processor_id(); hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); - lg_local_unlock(&file_lock_lglock); + spin_unlock(this_cpu_ptr(&file_lock_lock)); } /* Must be called with the flc_lock held! */ @@ -577,9 +576,9 @@ static void locks_delete_global_locks(struct file_lock *fl) */ if (hlist_unhashed(&fl->fl_link)) return; - lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); + spin_lock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu)); hlist_del_init(&fl->fl_link); - lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); + spin_unlock(per_cpu_ptr(&file_lock_lock, fl->fl_link_cpu)); } static unsigned long @@ -2628,9 +2627,9 @@ static void *locks_start(struct seq_file *f, loff_t *pos) struct locks_iterator *iter = f->private; iter->li_pos = *pos + 1; - lg_global_lock(&file_lock_lglock); spin_lock(&blocked_lock_lock); - return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); + return seq_hlist_start_percpu_locked(&file_lock_list, &file_lock_lock, + &iter->li_cpu, *pos); } static void *locks_next(struct seq_file *f, void *v, loff_t *pos) @@ -2638,14 +2637,17 @@ static void *locks_next(struct seq_file *f, void *v, loff_t *pos) struct locks_iterator *iter = f->private; ++iter->li_pos; - return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); + return seq_hlist_next_percpu_locked(v, &file_lock_list, &file_lock_lock, + &iter->li_cpu, pos); } static void locks_stop(struct seq_file *f, void *v) __releases(&blocked_lock_lock) { + struct locks_iterator *iter = f->private; + + seq_hlist_stop_percpu_locked(v, &file_lock_lock, &iter->li_cpu); spin_unlock(&blocked_lock_lock); - lg_global_unlock(&file_lock_lglock); } static const struct seq_operations locks_seq_operations = { @@ -2686,10 +2688,10 @@ static int __init filelock_init(void) filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, NULL); - lg_lock_init(&file_lock_lglock, "file_lock_lglock"); - - for_each_possible_cpu(i) + for_each_possible_cpu(i) { INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); + spin_lock_init(per_cpu_ptr(&file_lock_lock, i)); + } return 0; } -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/