On Wed, Jun 09, 2021 at 10:31:29AM +0900, William Breathitt Gray wrote:
> +static int counter_set_event_node(struct counter_device *const counter,
> + struct counter_watch *const watch,
> + const struct counter_comp_node *const cfg)
> +{
> + struct counter_event_node *event_node;
> + struct counter_comp_node *comp_node;
> +
The caller should be holding the counter->events_list_lock lock but it's
not.
> + /* Search for event in the list */
> + list_for_each_entry(event_node, &counter->next_events_list, l)
> + if (event_node->event == watch->event &&
> + event_node->channel == watch->channel)
> + break;
> +
> + /* If event is not already in the list */
> + if (&event_node->l == &counter->next_events_list) {
> + /* Allocate new event node */
> + event_node = kmalloc(sizeof(*event_node), GFP_ATOMIC);
> + if (!event_node)
> + return -ENOMEM;
> +
> + /* Configure event node and add to the list */
> + event_node->event = watch->event;
> + event_node->channel = watch->channel;
> + INIT_LIST_HEAD(&event_node->comp_list);
> + list_add(&event_node->l, &counter->next_events_list);
> + }
> +
> + /* Check if component watch has already been set before */
> + list_for_each_entry(comp_node, &event_node->comp_list, l)
> + if (comp_node->parent == cfg->parent &&
> + comp_node->comp.count_u8_read == cfg->comp.count_u8_read)
> + return -EINVAL;
> +
> + /* Allocate component node */
> + comp_node = kmalloc(sizeof(*comp_node), GFP_ATOMIC);
> + if (!comp_node) {
> + /* Free event node if no one else is watching */
> + if (list_empty(&event_node->comp_list)) {
> + list_del(&event_node->l);
> + kfree(event_node);
> + }
> + return -ENOMEM;
> + }
> + *comp_node = *cfg;
> +
> + /* Add component node to event node */
> + list_add_tail(&comp_node->l, &event_node->comp_list);
> +
> + return 0;
> +}
> +
> +static int counter_disable_events(struct counter_device *const counter)
> +{
> + unsigned long flags;
> + int err = 0;
> +
> + spin_lock_irqsave(&counter->events_list_lock, flags);
> +
> + counter_events_list_free(&counter->events_list);
> +
> + if (counter->ops->events_configure)
> + err = counter->ops->events_configure(counter);
> +
> + spin_unlock_irqrestore(&counter->events_list_lock, flags);
> +
> + counter_events_list_free(&counter->next_events_list);
> +
> + return err;
> +}
> +
> +static int counter_add_watch(struct counter_device *const counter,
> + const unsigned long arg)
> +{
> + void __user *const uwatch = (void __user *)arg;
> + struct counter_watch watch;
> + struct counter_comp_node comp_node = {0};
Always use = {};. It's the new hotness, and it avoids a Sparse warning
for using 0 instead of NULL. #IDidNotTest #CouldBeWrongAboutSparse
> + size_t parent, id;
> + struct counter_comp *ext;
> + size_t num_ext;
> + int err;
> +
regards,
dan carpenter
On Wed, Jun 09, 2021 at 11:07:08AM +0300, Dan Carpenter wrote:
> On Wed, Jun 09, 2021 at 10:31:29AM +0900, William Breathitt Gray wrote:
> > +static int counter_set_event_node(struct counter_device *const counter,
> > + struct counter_watch *const watch,
> > + const struct counter_comp_node *const cfg)
> > +{
> > + struct counter_event_node *event_node;
> > + struct counter_comp_node *comp_node;
> > +
>
> The caller should be holding the counter->events_list_lock lock but it's
> not.
Hi Dan,
The counter_set_event_node() function doesn't access or modify
counter->events_list (it works on counter->next_events_list) so holding
the counter->events_list_lock here isn't necessary.
> > + /* Search for event in the list */
> > + list_for_each_entry(event_node, &counter->next_events_list, l)
> > + if (event_node->event == watch->event &&
> > + event_node->channel == watch->channel)
> > + break;
> > +
> > + /* If event is not already in the list */
> > + if (&event_node->l == &counter->next_events_list) {
> > + /* Allocate new event node */
> > + event_node = kmalloc(sizeof(*event_node), GFP_ATOMIC);
> > + if (!event_node)
> > + return -ENOMEM;
> > +
> > + /* Configure event node and add to the list */
> > + event_node->event = watch->event;
> > + event_node->channel = watch->channel;
> > + INIT_LIST_HEAD(&event_node->comp_list);
> > + list_add(&event_node->l, &counter->next_events_list);
> > + }
> > +
> > + /* Check if component watch has already been set before */
> > + list_for_each_entry(comp_node, &event_node->comp_list, l)
> > + if (comp_node->parent == cfg->parent &&
> > + comp_node->comp.count_u8_read == cfg->comp.count_u8_read)
> > + return -EINVAL;
> > +
> > + /* Allocate component node */
> > + comp_node = kmalloc(sizeof(*comp_node), GFP_ATOMIC);
> > + if (!comp_node) {
> > + /* Free event node if no one else is watching */
> > + if (list_empty(&event_node->comp_list)) {
> > + list_del(&event_node->l);
> > + kfree(event_node);
> > + }
> > + return -ENOMEM;
> > + }
> > + *comp_node = *cfg;
> > +
> > + /* Add component node to event node */
> > + list_add_tail(&comp_node->l, &event_node->comp_list);
> > +
> > + return 0;
> > +}
> > +
> > +static int counter_disable_events(struct counter_device *const counter)
> > +{
> > + unsigned long flags;
> > + int err = 0;
> > +
> > + spin_lock_irqsave(&counter->events_list_lock, flags);
> > +
> > + counter_events_list_free(&counter->events_list);
> > +
> > + if (counter->ops->events_configure)
> > + err = counter->ops->events_configure(counter);
> > +
> > + spin_unlock_irqrestore(&counter->events_list_lock, flags);
> > +
> > + counter_events_list_free(&counter->next_events_list);
> > +
> > + return err;
> > +}
> > +
> > +static int counter_add_watch(struct counter_device *const counter,
> > + const unsigned long arg)
> > +{
> > + void __user *const uwatch = (void __user *)arg;
> > + struct counter_watch watch;
> > + struct counter_comp_node comp_node = {0};
>
> Always use = {};. It's the new hotness, and it avoids a Sparse warning
> for using 0 instead of NULL. #IDidNotTest #CouldBeWrongAboutSparse
Thanks for the heads-up! I think this is the only patch where I have
this, so I'll hold off submitting a v12 for just this change unless
something else comes up with this patchset (I can fix this spare warning
in a subsequent patch).
William Breathitt Gray
On Wed, Jun 09, 2021 at 05:28:10PM +0900, William Breathitt Gray wrote:
> On Wed, Jun 09, 2021 at 11:07:08AM +0300, Dan Carpenter wrote:
> > On Wed, Jun 09, 2021 at 10:31:29AM +0900, William Breathitt Gray wrote:
> > > +static int counter_set_event_node(struct counter_device *const counter,
> > > + struct counter_watch *const watch,
> > > + const struct counter_comp_node *const cfg)
> > > +{
> > > + struct counter_event_node *event_node;
> > > + struct counter_comp_node *comp_node;
> > > +
> >
> > The caller should be holding the counter->events_list_lock lock but it's
> > not.
>
> Hi Dan,
>
> The counter_set_event_node() function doesn't access or modify
> counter->events_list (it works on counter->next_events_list) so holding
> the counter->events_list_lock here isn't necessary.
>
There needs to be some sort of locking or this function can race with
itself. (Two threads add the same event at exactly the same time). It
looks like it can also race with counter_disable_events() leading to a
use after free.
> > > + /* Search for event in the list */
> > > + list_for_each_entry(event_node, &counter->next_events_list, l)
> > > + if (event_node->event == watch->event &&
> > > + event_node->channel == watch->channel)
> > > + break;
> > > +
> > > + /* If event is not already in the list */
> > > + if (&event_node->l == &counter->next_events_list) {
> > > + /* Allocate new event node */
> > > + event_node = kmalloc(sizeof(*event_node), GFP_ATOMIC);
Btw, say we decided that we can add/remove events locklessly, then these
GFP_ATOMICs can be changed to GFP_KERNEL.
> > > + if (!event_node)
> > > + return -ENOMEM;
> > > +
> > > + /* Configure event node and add to the list */
> > > + event_node->event = watch->event;
> > > + event_node->channel = watch->channel;
> > > + INIT_LIST_HEAD(&event_node->comp_list);
> > > + list_add(&event_node->l, &counter->next_events_list);
> > > + }
> > > +
> > > + /* Check if component watch has already been set before */
> > > + list_for_each_entry(comp_node, &event_node->comp_list, l)
> > > + if (comp_node->parent == cfg->parent &&
> > > + comp_node->comp.count_u8_read == cfg->comp.count_u8_read)
> > > + return -EINVAL;
> > > +
> > > + /* Allocate component node */
> > > + comp_node = kmalloc(sizeof(*comp_node), GFP_ATOMIC);
^^^^^^^^^^
> > > + if (!comp_node) {
> > > + /* Free event node if no one else is watching */
> > > + if (list_empty(&event_node->comp_list)) {
> > > + list_del(&event_node->l);
> > > + kfree(event_node);
> > > + }
> > > + return -ENOMEM;
> > > + }
> > > + *comp_node = *cfg;
> > > +
> > > + /* Add component node to event node */
> > > + list_add_tail(&comp_node->l, &event_node->comp_list);
> > > +
> > > + return 0;
> > > +}
regards,
dan carpenter