Stop using spinlocks in the read path. Add RCU list to handle the readers.
Signed-off-by: Kees Cook <[email protected]>
---
security/yama/yama_lsm.c | 43 ++++++++++++++++++++-----------------------
1 file changed, 20 insertions(+), 23 deletions(-)
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index b4c2984..17da6ca 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -30,6 +30,7 @@ struct ptrace_relation {
struct task_struct *tracer;
struct task_struct *tracee;
struct list_head node;
+ struct rcu_head rcu;
};
static LIST_HEAD(ptracer_relations);
@@ -48,32 +49,29 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
static int yama_ptracer_add(struct task_struct *tracer,
struct task_struct *tracee)
{
- int rc = 0;
- struct ptrace_relation *added;
- struct ptrace_relation *entry, *relation = NULL;
+ struct ptrace_relation *relation, *added;
added = kmalloc(sizeof(*added), GFP_KERNEL);
if (!added)
return -ENOMEM;
+ added->tracee = tracee;
+ added->tracer = tracer;
+
spin_lock_bh(&ptracer_relations_lock);
- list_for_each_entry(entry, &ptracer_relations, node)
- if (entry->tracee == tracee) {
- relation = entry;
- break;
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->tracee == tracee) {
+ list_replace_rcu(&relation->node, &added->node);
+ kfree_rcu(relation, rcu);
+ goto out;
}
- if (!relation) {
- relation = added;
- relation->tracee = tracee;
- list_add(&relation->node, &ptracer_relations);
}
- relation->tracer = tracer;
- spin_unlock_bh(&ptracer_relations_lock);
- if (added != relation)
- kfree(added);
+ list_add_rcu(&added->node, &ptracer_relations);
- return rc;
+out:
+ spin_unlock_bh(&ptracer_relations_lock);
+ return 0;
}
/**
@@ -84,15 +82,16 @@ static int yama_ptracer_add(struct task_struct *tracer,
static void yama_ptracer_del(struct task_struct *tracer,
struct task_struct *tracee)
{
- struct ptrace_relation *relation, *safe;
+ struct ptrace_relation *relation;
spin_lock_bh(&ptracer_relations_lock);
- list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
if (relation->tracee == tracee ||
(tracer && relation->tracer == tracer)) {
- list_del(&relation->node);
- kfree(relation);
+ list_del_rcu(&relation->node);
+ kfree_rcu(relation, rcu);
}
+ }
spin_unlock_bh(&ptracer_relations_lock);
}
@@ -217,11 +216,10 @@ static int ptracer_exception_found(struct task_struct *tracer,
struct task_struct *parent = NULL;
bool found = false;
- spin_lock_bh(&ptracer_relations_lock);
rcu_read_lock();
if (!thread_group_leader(tracee))
tracee = rcu_dereference(tracee->group_leader);
- list_for_each_entry(relation, &ptracer_relations, node)
+ list_for_each_entry_rcu(relation, &ptracer_relations, node)
if (relation->tracee == tracee) {
parent = relation->tracer;
found = true;
@@ -231,7 +229,6 @@ static int ptracer_exception_found(struct task_struct *tracer,
if (found && (parent == NULL || task_is_descendant(parent, tracer)))
rc = 1;
rcu_read_unlock();
- spin_unlock_bh(&ptracer_relations_lock);
return rc;
}
--
1.7.9.5
--
Kees Cook
Chrome OS Security
Quoting Kees Cook ([email protected]):
> Stop using spinlocks in the read path. Add RCU list to handle the readers.
Looks good to me. BTW, kfree_rcu is neat :)
Reviewed-by: Serge E. Hallyn <[email protected]>
> Signed-off-by: Kees Cook <[email protected]>
> ---
> security/yama/yama_lsm.c | 43 ++++++++++++++++++++-----------------------
> 1 file changed, 20 insertions(+), 23 deletions(-)
>
> diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
> index b4c2984..17da6ca 100644
> --- a/security/yama/yama_lsm.c
> +++ b/security/yama/yama_lsm.c
> @@ -30,6 +30,7 @@ struct ptrace_relation {
> struct task_struct *tracer;
> struct task_struct *tracee;
> struct list_head node;
> + struct rcu_head rcu;
> };
>
> static LIST_HEAD(ptracer_relations);
> @@ -48,32 +49,29 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
> static int yama_ptracer_add(struct task_struct *tracer,
> struct task_struct *tracee)
> {
> - int rc = 0;
> - struct ptrace_relation *added;
> - struct ptrace_relation *entry, *relation = NULL;
> + struct ptrace_relation *relation, *added;
>
> added = kmalloc(sizeof(*added), GFP_KERNEL);
> if (!added)
> return -ENOMEM;
>
> + added->tracee = tracee;
> + added->tracer = tracer;
> +
> spin_lock_bh(&ptracer_relations_lock);
> - list_for_each_entry(entry, &ptracer_relations, node)
> - if (entry->tracee == tracee) {
> - relation = entry;
> - break;
> + list_for_each_entry_rcu(relation, &ptracer_relations, node) {
> + if (relation->tracee == tracee) {
> + list_replace_rcu(&relation->node, &added->node);
> + kfree_rcu(relation, rcu);
> + goto out;
> }
> - if (!relation) {
> - relation = added;
> - relation->tracee = tracee;
> - list_add(&relation->node, &ptracer_relations);
> }
> - relation->tracer = tracer;
>
> - spin_unlock_bh(&ptracer_relations_lock);
> - if (added != relation)
> - kfree(added);
> + list_add_rcu(&added->node, &ptracer_relations);
>
> - return rc;
> +out:
> + spin_unlock_bh(&ptracer_relations_lock);
> + return 0;
> }
>
> /**
> @@ -84,15 +82,16 @@ static int yama_ptracer_add(struct task_struct *tracer,
> static void yama_ptracer_del(struct task_struct *tracer,
> struct task_struct *tracee)
> {
> - struct ptrace_relation *relation, *safe;
> + struct ptrace_relation *relation;
>
> spin_lock_bh(&ptracer_relations_lock);
> - list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
> + list_for_each_entry_rcu(relation, &ptracer_relations, node) {
> if (relation->tracee == tracee ||
> (tracer && relation->tracer == tracer)) {
> - list_del(&relation->node);
> - kfree(relation);
> + list_del_rcu(&relation->node);
> + kfree_rcu(relation, rcu);
> }
> + }
> spin_unlock_bh(&ptracer_relations_lock);
> }
>
> @@ -217,11 +216,10 @@ static int ptracer_exception_found(struct task_struct *tracer,
> struct task_struct *parent = NULL;
> bool found = false;
>
> - spin_lock_bh(&ptracer_relations_lock);
> rcu_read_lock();
> if (!thread_group_leader(tracee))
> tracee = rcu_dereference(tracee->group_leader);
> - list_for_each_entry(relation, &ptracer_relations, node)
> + list_for_each_entry_rcu(relation, &ptracer_relations, node)
> if (relation->tracee == tracee) {
> parent = relation->tracer;
> found = true;
> @@ -231,7 +229,6 @@ static int ptracer_exception_found(struct task_struct *tracer,
> if (found && (parent == NULL || task_is_descendant(parent, tracer)))
> rc = 1;
> rcu_read_unlock();
> - spin_unlock_bh(&ptracer_relations_lock);
>
> return rc;
> }
> --
> 1.7.9.5
>
> --
> Kees Cook
> Chrome OS Security
On 11/13/2012 07:58 PM, Kees Cook wrote:
> Stop using spinlocks in the read path. Add RCU list to handle the readers.
>
> Signed-off-by: Kees Cook <[email protected]>
Looks good to me
Acked-by: John Johansen <[email protected]>
> ---
> security/yama/yama_lsm.c | 43 ++++++++++++++++++++-----------------------
> 1 file changed, 20 insertions(+), 23 deletions(-)
>
> diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
> index b4c2984..17da6ca 100644
> --- a/security/yama/yama_lsm.c
> +++ b/security/yama/yama_lsm.c
> @@ -30,6 +30,7 @@ struct ptrace_relation {
> struct task_struct *tracer;
> struct task_struct *tracee;
> struct list_head node;
> + struct rcu_head rcu;
> };
>
> static LIST_HEAD(ptracer_relations);
> @@ -48,32 +49,29 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
> static int yama_ptracer_add(struct task_struct *tracer,
> struct task_struct *tracee)
> {
> - int rc = 0;
> - struct ptrace_relation *added;
> - struct ptrace_relation *entry, *relation = NULL;
> + struct ptrace_relation *relation, *added;
>
> added = kmalloc(sizeof(*added), GFP_KERNEL);
> if (!added)
> return -ENOMEM;
>
> + added->tracee = tracee;
> + added->tracer = tracer;
> +
> spin_lock_bh(&ptracer_relations_lock);
> - list_for_each_entry(entry, &ptracer_relations, node)
> - if (entry->tracee == tracee) {
> - relation = entry;
> - break;
> + list_for_each_entry_rcu(relation, &ptracer_relations, node) {
> + if (relation->tracee == tracee) {
> + list_replace_rcu(&relation->node, &added->node);
> + kfree_rcu(relation, rcu);
> + goto out;
> }
> - if (!relation) {
> - relation = added;
> - relation->tracee = tracee;
> - list_add(&relation->node, &ptracer_relations);
> }
> - relation->tracer = tracer;
>
> - spin_unlock_bh(&ptracer_relations_lock);
> - if (added != relation)
> - kfree(added);
> + list_add_rcu(&added->node, &ptracer_relations);
>
> - return rc;
> +out:
> + spin_unlock_bh(&ptracer_relations_lock);
> + return 0;
> }
>
> /**
> @@ -84,15 +82,16 @@ static int yama_ptracer_add(struct task_struct *tracer,
> static void yama_ptracer_del(struct task_struct *tracer,
> struct task_struct *tracee)
> {
> - struct ptrace_relation *relation, *safe;
> + struct ptrace_relation *relation;
>
> spin_lock_bh(&ptracer_relations_lock);
> - list_for_each_entry_safe(relation, safe, &ptracer_relations, node)
> + list_for_each_entry_rcu(relation, &ptracer_relations, node) {
> if (relation->tracee == tracee ||
> (tracer && relation->tracer == tracer)) {
> - list_del(&relation->node);
> - kfree(relation);
> + list_del_rcu(&relation->node);
> + kfree_rcu(relation, rcu);
> }
> + }
> spin_unlock_bh(&ptracer_relations_lock);
> }
>
> @@ -217,11 +216,10 @@ static int ptracer_exception_found(struct task_struct *tracer,
> struct task_struct *parent = NULL;
> bool found = false;
>
> - spin_lock_bh(&ptracer_relations_lock);
> rcu_read_lock();
> if (!thread_group_leader(tracee))
> tracee = rcu_dereference(tracee->group_leader);
> - list_for_each_entry(relation, &ptracer_relations, node)
> + list_for_each_entry_rcu(relation, &ptracer_relations, node)
> if (relation->tracee == tracee) {
> parent = relation->tracer;
> found = true;
> @@ -231,7 +229,6 @@ static int ptracer_exception_found(struct task_struct *tracer,
> if (found && (parent == NULL || task_is_descendant(parent, tracer)))
> rc = 1;
> rcu_read_unlock();
> - spin_unlock_bh(&ptracer_relations_lock);
>
> return rc;
> }
>