From: "Steven Rostedt (Google)" <[email protected]>
In preparation for having the ring buffer mapped to a dedicated location,
which will have the same restrictions as user space memory mapped buffers,
allow it to use the "mapped" field of the ring_buffer_per_cpu structure
without having the user space meta page mapping.
When this starts using the mapped field, it will need to handle adding a
user space mapping (and removing it) from a ring buffer that is using a
dedicated memory range.
Signed-off-by: Steven Rostedt (Google) <[email protected]>
---
kernel/trace/ring_buffer.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 793ecc454039..44b1d5f1a99a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -5223,6 +5223,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
+ if (!meta)
+ return;
+
meta->reader.read = cpu_buffer->reader_page->read;
meta->reader.id = cpu_buffer->reader_page->id;
meta->reader.lost_events = cpu_buffer->lost_events;
@@ -6167,7 +6170,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->mapped || !cpu_buffer->meta_page) {
mutex_unlock(&cpu_buffer->mapping_lock);
return ERR_PTR(-ENODEV);
}
@@ -6345,12 +6348,13 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
err = __rb_map_vma(cpu_buffer, vma);
if (!err) {
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 1;
+ cpu_buffer->mapped++;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
} else {
kfree(cpu_buffer->subbuf_ids);
@@ -6388,7 +6392,7 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 0;
+ cpu_buffer->mapped--;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
--
2.43.0
On Wed, Apr 10, 2024 at 09:25:42PM -0400, Steven Rostedt wrote:
> From: "Steven Rostedt (Google)" <[email protected]>
>
> In preparation for having the ring buffer mapped to a dedicated location,
> which will have the same restrictions as user space memory mapped buffers,
> allow it to use the "mapped" field of the ring_buffer_per_cpu structure
> without having the user space meta page mapping.
>
> When this starts using the mapped field, it will need to handle adding a
> user space mapping (and removing it) from a ring buffer that is using a
> dedicated memory range.
>
> Signed-off-by: Steven Rostedt (Google) <[email protected]>
> ---
> kernel/trace/ring_buffer.c | 10 +++++++---
> 1 file changed, 7 insertions(+), 3 deletions(-)
>
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 793ecc454039..44b1d5f1a99a 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -5223,6 +5223,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
> {
> struct trace_buffer_meta *meta = cpu_buffer->meta_page;
>
> + if (!meta)
> + return;
> +
> meta->reader.read = cpu_buffer->reader_page->read;
> meta->reader.id = cpu_buffer->reader_page->id;
> meta->reader.lost_events = cpu_buffer->lost_events;
> @@ -6167,7 +6170,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
>
> mutex_lock(&cpu_buffer->mapping_lock);
>
> - if (!cpu_buffer->mapped) {
> + if (!cpu_buffer->mapped || !cpu_buffer->meta_page) {
> mutex_unlock(&cpu_buffer->mapping_lock);
> return ERR_PTR(-ENODEV);
> }
> @@ -6345,12 +6348,13 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
IIUC, we still allow to map from user-space this buffer. So we now can have
mapped && !meta_page.
Then the "if (cpu_buffer->mapped) {" that skips the meta_page creation in
ring_buffer_map() should be replaced by if (cpu_buffer->meta_page).
> */
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
> +
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
>
> err = __rb_map_vma(cpu_buffer, vma);
> if (!err) {
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> - cpu_buffer->mapped = 1;
> + cpu_buffer->mapped++;
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
> } else {
> kfree(cpu_buffer->subbuf_ids);
> @@ -6388,7 +6392,7 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
> mutex_lock(&buffer->mutex);
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
In this function, there's also a check for cpu_buffer->mapped > 1. This avoids
killing the meta-page while someone is still in use.
It seems like a dedicated meta_page counter will be necessary. Otherwise, in the
event of a ring-buffer mapped at boot we, would setup the meta-page on the first
mmap() and never tear it down.
>
> - cpu_buffer->mapped = 0;
> + cpu_buffer->mapped--;
>
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
>
> --
> 2.43.0
>
>