2023-07-24 05:48:10

by Zheng Yejian

[permalink] [raw]
Subject: [PATCH] ring-buffer: Fix wrong stat of cpu_buffer->read

When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set
to 0 in order to make sure any read iterators reset themselves. However,
this will mess 'entries' stating, see following steps:

# cd /sys/kernel/tracing/
# 1. Enlarge ring buffer prepare for later reducing:
# echo 20 > per_cpu/cpu0/buffer_size_kb
# 2. Write a log into ring buffer of cpu0:
# taskset -c 0 echo "hello1" > trace_marker
# 3. Read the log:
# cat per_cpu/cpu0/trace_pipe
<...>-332 [000] ..... 62.406844: tracing_mark_write: hello1
# 4. Stop reading and see the stats, now 0 entries, and 1 event readed:
# cat per_cpu/cpu0/stats
entries: 0
[...]
read events: 1
# 5. Reduce the ring buffer
# echo 7 > per_cpu/cpu0/buffer_size_kb
# 6. Now entries became unexpected 1 because actually no entries!!!
# cat per_cpu/cpu0/stats
entries: 1
[...]
read events: 0

To fix it, introduce 'page_removed' field to count total removed pages
since last reset, then use it to let read iterators reset themselves
instead of changing the 'read' pointer.

Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Zheng Yejian <[email protected]>
---
kernel/trace/ring_buffer.c | 22 ++++++++++++----------
1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index de061dd47313..46b4a3c7c3bf 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -523,6 +523,8 @@ struct ring_buffer_per_cpu {
rb_time_t before_stamp;
u64 event_stamp[MAX_NEST];
u64 read_stamp;
+ /* pages removed since last reset */
+ unsigned long pages_removed;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
@@ -559,6 +561,7 @@ struct ring_buffer_iter {
struct buffer_page *head_page;
struct buffer_page *cache_reader_page;
unsigned long cache_read;
+ unsigned long cache_pages_removed;
u64 read_stamp;
u64 page_stamp;
struct ring_buffer_event *event;
@@ -1957,6 +1960,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
to_remove = rb_list_head(to_remove)->next;
head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
}
+ /* Read iterators need to reset themselves when some pages removed */
+ cpu_buffer->pages_removed += nr_removed;

next_page = rb_list_head(to_remove)->next;

@@ -1978,12 +1983,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
cpu_buffer->head_page = list_entry(next_page,
struct buffer_page, list);

- /*
- * change read pointer to make sure any read iterators reset
- * themselves
- */
- cpu_buffer->read = 0;
-
/* pages are removed, resume tracing and then free the pages */
atomic_dec(&cpu_buffer->record_disabled);
raw_spin_unlock_irq(&cpu_buffer->reader_lock);
@@ -4395,6 +4394,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)

iter->cache_reader_page = iter->head_page;
iter->cache_read = cpu_buffer->read;
+ iter->cache_pages_removed = cpu_buffer->pages_removed;

if (iter->head) {
iter->read_stamp = cpu_buffer->read_stamp;
@@ -4849,12 +4849,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
buffer = cpu_buffer->buffer;

/*
- * Check if someone performed a consuming read to
- * the buffer. A consuming read invalidates the iterator
- * and we need to reset the iterator in this case.
+ * Check if someone performed a consuming read to the buffer
+ * or removed some pages from the buffer. In these cases,
+ * iterator was invalidated and we need to reset it.
*/
if (unlikely(iter->cache_read != cpu_buffer->read ||
- iter->cache_reader_page != cpu_buffer->reader_page))
+ iter->cache_reader_page != cpu_buffer->reader_page ||
+ iter->cache_pages_removed != cpu_buffer->pages_removed))
rb_iter_reset(iter);

again:
@@ -5298,6 +5299,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->last_overrun = 0;

rb_head_page_activate(cpu_buffer);
+ cpu_buffer->pages_removed = 0;
}

/* Must have disabled the cpu buffer then done a synchronize_rcu */
--
2.25.1



2023-07-24 09:47:23

by Steven Rostedt

[permalink] [raw]
Subject: Re: [PATCH] ring-buffer: Fix wrong stat of cpu_buffer->read

Do you folks find it fun to post a fix right after I send Linus a pull request? ;-)

Thanks, I'll queue it up (but I will wait a bit before posting in case
more fixes trickle in)

-- Steve


On Mon, 24 Jul 2023 13:40:40 +0800
Zheng Yejian <[email protected]> wrote:

> When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set
> to 0 in order to make sure any read iterators reset themselves. However,
> this will mess 'entries' stating, see following steps:
>
> # cd /sys/kernel/tracing/
> # 1. Enlarge ring buffer prepare for later reducing:
> # echo 20 > per_cpu/cpu0/buffer_size_kb
> # 2. Write a log into ring buffer of cpu0:
> # taskset -c 0 echo "hello1" > trace_marker
> # 3. Read the log:
> # cat per_cpu/cpu0/trace_pipe
> <...>-332 [000] ..... 62.406844: tracing_mark_write: hello1
> # 4. Stop reading and see the stats, now 0 entries, and 1 event readed:
> # cat per_cpu/cpu0/stats
> entries: 0
> [...]
> read events: 1
> # 5. Reduce the ring buffer
> # echo 7 > per_cpu/cpu0/buffer_size_kb
> # 6. Now entries became unexpected 1 because actually no entries!!!
> # cat per_cpu/cpu0/stats
> entries: 1
> [...]
> read events: 0
>
> To fix it, introduce 'page_removed' field to count total removed pages
> since last reset, then use it to let read iterators reset themselves
> instead of changing the 'read' pointer.
>
> Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic")
> Signed-off-by: Zheng Yejian <[email protected]>
> ---
> kernel/trace/ring_buffer.c | 22 ++++++++++++----------
> 1 file changed, 12 insertions(+), 10 deletions(-)
>
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index de061dd47313..46b4a3c7c3bf 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -523,6 +523,8 @@ struct ring_buffer_per_cpu {
> rb_time_t before_stamp;
> u64 event_stamp[MAX_NEST];
> u64 read_stamp;
> + /* pages removed since last reset */
> + unsigned long pages_removed;
> /* ring buffer pages to update, > 0 to add, < 0 to remove */
> long nr_pages_to_update;
> struct list_head new_pages; /* new pages to add */
> @@ -559,6 +561,7 @@ struct ring_buffer_iter {
> struct buffer_page *head_page;
> struct buffer_page *cache_reader_page;
> unsigned long cache_read;
> + unsigned long cache_pages_removed;
> u64 read_stamp;
> u64 page_stamp;
> struct ring_buffer_event *event;
> @@ -1957,6 +1960,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
> to_remove = rb_list_head(to_remove)->next;
> head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
> }
> + /* Read iterators need to reset themselves when some pages removed */
> + cpu_buffer->pages_removed += nr_removed;
>
> next_page = rb_list_head(to_remove)->next;
>
> @@ -1978,12 +1983,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
> cpu_buffer->head_page = list_entry(next_page,
> struct buffer_page, list);
>
> - /*
> - * change read pointer to make sure any read iterators reset
> - * themselves
> - */
> - cpu_buffer->read = 0;
> -
> /* pages are removed, resume tracing and then free the pages */
> atomic_dec(&cpu_buffer->record_disabled);
> raw_spin_unlock_irq(&cpu_buffer->reader_lock);
> @@ -4395,6 +4394,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
>
> iter->cache_reader_page = iter->head_page;
> iter->cache_read = cpu_buffer->read;
> + iter->cache_pages_removed = cpu_buffer->pages_removed;
>
> if (iter->head) {
> iter->read_stamp = cpu_buffer->read_stamp;
> @@ -4849,12 +4849,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
> buffer = cpu_buffer->buffer;
>
> /*
> - * Check if someone performed a consuming read to
> - * the buffer. A consuming read invalidates the iterator
> - * and we need to reset the iterator in this case.
> + * Check if someone performed a consuming read to the buffer
> + * or removed some pages from the buffer. In these cases,
> + * iterator was invalidated and we need to reset it.
> */
> if (unlikely(iter->cache_read != cpu_buffer->read ||
> - iter->cache_reader_page != cpu_buffer->reader_page))
> + iter->cache_reader_page != cpu_buffer->reader_page ||
> + iter->cache_pages_removed != cpu_buffer->pages_removed))
> rb_iter_reset(iter);
>
> again:
> @@ -5298,6 +5299,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
> cpu_buffer->last_overrun = 0;
>
> rb_head_page_activate(cpu_buffer);
> + cpu_buffer->pages_removed = 0;
> }
>
> /* Must have disabled the cpu buffer then done a synchronize_rcu */


2023-07-24 12:47:35

by Zheng Yejian

[permalink] [raw]
Subject: Re: [PATCH] ring-buffer: Fix wrong stat of cpu_buffer->read

On 2023/7/24 16:43, Steven Rostedt wrote:
> Do you folks find it fun to post a fix right after I send Linus a pull request? ;-)

It's always so coincidental :-)

>
> Thanks, I'll queue it up (but I will wait a bit before posting in case
> more fixes trickle in)

Thanks!

-- Zheng Yejian

>
> -- Steve
>
>
> On Mon, 24 Jul 2023 13:40:40 +0800
> Zheng Yejian <[email protected]> wrote:
>
>> When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set
>> to 0 in order to make sure any read iterators reset themselves. However,
>> this will mess 'entries' stating, see following steps:
>>
>> # cd /sys/kernel/tracing/
>> # 1. Enlarge ring buffer prepare for later reducing:
>> # echo 20 > per_cpu/cpu0/buffer_size_kb
>> # 2. Write a log into ring buffer of cpu0:
>> # taskset -c 0 echo "hello1" > trace_marker
>> # 3. Read the log:
>> # cat per_cpu/cpu0/trace_pipe
>> <...>-332 [000] ..... 62.406844: tracing_mark_write: hello1
>> # 4. Stop reading and see the stats, now 0 entries, and 1 event readed:
>> # cat per_cpu/cpu0/stats
>> entries: 0
>> [...]
>> read events: 1
>> # 5. Reduce the ring buffer
>> # echo 7 > per_cpu/cpu0/buffer_size_kb
>> # 6. Now entries became unexpected 1 because actually no entries!!!
>> # cat per_cpu/cpu0/stats
>> entries: 1
>> [...]
>> read events: 0
>>
>> To fix it, introduce 'page_removed' field to count total removed pages
>> since last reset, then use it to let read iterators reset themselves
>> instead of changing the 'read' pointer.
>>
>> Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic")
>> Signed-off-by: Zheng Yejian <[email protected]>
>> ---
>> kernel/trace/ring_buffer.c | 22 ++++++++++++----------
>> 1 file changed, 12 insertions(+), 10 deletions(-)
>>
>> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
>> index de061dd47313..46b4a3c7c3bf 100644
>> --- a/kernel/trace/ring_buffer.c
>> +++ b/kernel/trace/ring_buffer.c
>> @@ -523,6 +523,8 @@ struct ring_buffer_per_cpu {
>> rb_time_t before_stamp;
>> u64 event_stamp[MAX_NEST];
>> u64 read_stamp;
>> + /* pages removed since last reset */
>> + unsigned long pages_removed;
>> /* ring buffer pages to update, > 0 to add, < 0 to remove */
>> long nr_pages_to_update;
>> struct list_head new_pages; /* new pages to add */
>> @@ -559,6 +561,7 @@ struct ring_buffer_iter {
>> struct buffer_page *head_page;
>> struct buffer_page *cache_reader_page;
>> unsigned long cache_read;
>> + unsigned long cache_pages_removed;
>> u64 read_stamp;
>> u64 page_stamp;
>> struct ring_buffer_event *event;
>> @@ -1957,6 +1960,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
>> to_remove = rb_list_head(to_remove)->next;
>> head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
>> }
>> + /* Read iterators need to reset themselves when some pages removed */
>> + cpu_buffer->pages_removed += nr_removed;
>>
>> next_page = rb_list_head(to_remove)->next;
>>
>> @@ -1978,12 +1983,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
>> cpu_buffer->head_page = list_entry(next_page,
>> struct buffer_page, list);
>>
>> - /*
>> - * change read pointer to make sure any read iterators reset
>> - * themselves
>> - */
>> - cpu_buffer->read = 0;
>> -
>> /* pages are removed, resume tracing and then free the pages */
>> atomic_dec(&cpu_buffer->record_disabled);
>> raw_spin_unlock_irq(&cpu_buffer->reader_lock);
>> @@ -4395,6 +4394,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
>>
>> iter->cache_reader_page = iter->head_page;
>> iter->cache_read = cpu_buffer->read;
>> + iter->cache_pages_removed = cpu_buffer->pages_removed;
>>
>> if (iter->head) {
>> iter->read_stamp = cpu_buffer->read_stamp;
>> @@ -4849,12 +4849,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
>> buffer = cpu_buffer->buffer;
>>
>> /*
>> - * Check if someone performed a consuming read to
>> - * the buffer. A consuming read invalidates the iterator
>> - * and we need to reset the iterator in this case.
>> + * Check if someone performed a consuming read to the buffer
>> + * or removed some pages from the buffer. In these cases,
>> + * iterator was invalidated and we need to reset it.
>> */
>> if (unlikely(iter->cache_read != cpu_buffer->read ||
>> - iter->cache_reader_page != cpu_buffer->reader_page))
>> + iter->cache_reader_page != cpu_buffer->reader_page ||
>> + iter->cache_pages_removed != cpu_buffer->pages_removed))
>> rb_iter_reset(iter);
>>
>> again:
>> @@ -5298,6 +5299,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
>> cpu_buffer->last_overrun = 0;
>>
>> rb_head_page_activate(cpu_buffer);
>> + cpu_buffer->pages_removed = 0;
>> }
>>
>> /* Must have disabled the cpu buffer then done a synchronize_rcu */
>
>