Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754861AbbBTOkC (ORCPT ); Fri, 20 Feb 2015 09:40:02 -0500 Received: from mail.bmw-carit.de ([62.245.222.98]:53437 "EHLO linuxmail.bmw-carit.de" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1754824AbbBTOj7 (ORCPT ); Fri, 20 Feb 2015 09:39:59 -0500 From: Daniel Wagner To: Jeff Layton Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org, John Kacur , Daniel Wagner , "J. Bruce Fields" , Alexander Viro Subject: [RFC v1 3/5] seq_file: Add percpu seq_hlist helpers with locking iterators Date: Fri, 20 Feb 2015 15:39:53 +0100 Message-Id: <1424443195-18676-4-git-send-email-daniel.wagner@bmw-carit.de> X-Mailer: git-send-email 2.1.0 In-Reply-To: <1424443195-18676-1-git-send-email-daniel.wagner@bmw-carit.de> References: <1424443195-18676-1-git-send-email-daniel.wagner@bmw-carit.de> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4036 Lines: 133 Introduce a variant of the seq_hlist helpers for iterating seq_hlist are protected by perpcu spinlocks. Signed-off-by: Daniel Wagner Cc: Jeff Layton Cc: "J. Bruce Fields" Cc: Alexander Viro --- fs/seq_file.c | 83 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/seq_file.h | 13 ++++++++ 2 files changed, 96 insertions(+) diff --git a/fs/seq_file.c b/fs/seq_file.c index 555f821..56adfdb 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -966,3 +966,86 @@ seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, return NULL; } EXPORT_SYMBOL(seq_hlist_next_percpu); + +/** + * seq_hlist_start_precpu_locked - start an iteration of a percpu hlist array + * @head: pointer to percpu array of struct hlist_heads + * @lock: pointer to percpu spinlock which protects @head + * @cpu: pointer to cpu "cursor" + * @pos: start position of sequence + * + * Called at seq_file->op->start(). + */ +struct hlist_node * +seq_hlist_start_percpu_locked(struct hlist_head __percpu *head, + spinlock_t __percpu *lock, int *cpu, loff_t pos) +{ + struct hlist_node *node; + + for_each_possible_cpu(*cpu) { + spin_lock(per_cpu_ptr(lock, *cpu)); + hlist_for_each(node, per_cpu_ptr(head, *cpu)) { + if (pos-- == 0) + return node; + } + spin_unlock(per_cpu_ptr(lock, *cpu)); + } + return NULL; +} +EXPORT_SYMBOL(seq_hlist_start_percpu_locked); + +/** + * seq_hlist_next_percpu_locked - move to the next position of the percpu hlist array + * @v: pointer to current hlist_node + * @head: pointer to percpu array of struct hlist_heads + * @lock: pointer to percpu spinlock which protects @head + * @cpu: pointer to cpu "cursor" + * @pos: start position of sequence + * + * Called at seq_file->op->next(). + */ +struct hlist_node * +seq_hlist_next_percpu_locked(void *v, struct hlist_head __percpu *head, + spinlock_t __percpu *lock, + int *cpu, loff_t *pos) +{ + struct hlist_node *node = v; + + ++*pos; + + if (node->next) + return node->next; + + spin_unlock(per_cpu_ptr(lock, *cpu)); + + for (*cpu = cpumask_next(*cpu, cpu_possible_mask); *cpu < nr_cpu_ids; + *cpu = cpumask_next(*cpu, cpu_possible_mask)) { + struct hlist_head *bucket; + + spin_lock(per_cpu_ptr(lock, *cpu)); + bucket = per_cpu_ptr(head, *cpu); + + if (!hlist_empty(bucket)) + return bucket->first; + + spin_unlock(per_cpu_ptr(lock, *cpu)); + } + return NULL; +} +EXPORT_SYMBOL(seq_hlist_next_percpu_locked); + +/** + * seq_hlist_stop_percpu_locked - stop iterating over the percpu hlist array + * @v: pointer to current hlist_node + * @lock: pointer to percpu spinlock which protects @head + * @cpu: pointer to cpu "cursor" + * + * Called at seq_file->op->stop(). + */ +void +seq_hlist_stop_percpu_locked(void *v, spinlock_t __percpu *lock, int *cpu) +{ + if (v) + spin_unlock(per_cpu_ptr(lock, *cpu)); +} +EXPORT_SYMBOL(seq_hlist_stop_percpu_locked); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index afbb1fd..6419ac4 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -184,4 +184,17 @@ extern struct hlist_node *seq_hlist_start_percpu(struct hlist_head __percpu *hea extern struct hlist_node *seq_hlist_next_percpu(void *v, struct hlist_head __percpu *head, int *cpu, loff_t *pos); +extern struct hlist_node *seq_hlist_start_percpu_locked( + struct hlist_head __percpu *head, + spinlock_t __percpu *lock, + int *cpu, loff_t pos); + +extern struct hlist_node *seq_hlist_next_percpu_locked( + void *v, struct hlist_head __percpu *head, + spinlock_t __percpu *lock, + int *cpu, loff_t *pos); + +extern void seq_hlist_stop_percpu_locked( + void *v, spinlock_t __percpu *lock, int *cpu); + #endif -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/