From: "Steven Rostedt (Google)" <[email protected]>
In preparation for having the ring buffer mapped to a dedicated location,
which will have the same restrictions as user space memory mapped buffers,
allow it to use the "mapped" field of the ring_buffer_per_cpu structure
without having the user space meta page mapping.
When this starts using the mapped field, it will need to handle adding a
user space mapping (and removing it) from a ring buffer that is using a
dedicated memory range.
Signed-off-by: Steven Rostedt (Google) <[email protected]>
---
kernel/trace/ring_buffer.c | 24 +++++++++++++++++-------
1 file changed, 17 insertions(+), 7 deletions(-)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 28853966aa9a..aa8eb878e0d4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -491,6 +491,7 @@ struct ring_buffer_per_cpu {
unsigned long pages_removed;
unsigned int mapped;
+ unsigned int user_mapped; /* first user space mapping */
struct mutex mapping_lock;
unsigned long *subbuf_ids; /* ID to subbuf VA */
struct trace_buffer_meta *meta_page;
@@ -5224,6 +5225,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct trace_buffer_meta *meta = cpu_buffer->meta_page;
+ if (!meta)
+ return;
+
meta->reader.read = cpu_buffer->reader_page->read;
meta->reader.id = cpu_buffer->reader_page->id;
meta->reader.lost_events = cpu_buffer->lost_events;
@@ -6167,7 +6171,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->mapped || !cpu_buffer->meta_page) {
mutex_unlock(&cpu_buffer->mapping_lock);
return ERR_PTR(-ENODEV);
}
@@ -6194,7 +6198,7 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
if (inc && cpu_buffer->mapped == UINT_MAX)
return -EBUSY;
- if (WARN_ON(!inc && cpu_buffer->mapped == 0))
+ if (WARN_ON(!inc && cpu_buffer->mapped < cpu_buffer->user_mapped))
return -EINVAL;
mutex_lock(&cpu_buffer->buffer->mutex);
@@ -6328,7 +6332,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
mutex_lock(&cpu_buffer->mapping_lock);
- if (cpu_buffer->mapped) {
+ if (cpu_buffer->user_mapped) {
err = __rb_map_vma(cpu_buffer, vma);
if (!err)
err = __rb_inc_dec_mapped(cpu_buffer, true);
@@ -6359,12 +6363,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
*/
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
+
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
err = __rb_map_vma(cpu_buffer, vma);
if (!err) {
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 1;
+ /* This is the first time it is mapped externally */
+ cpu_buffer->mapped++;
+ cpu_buffer->user_mapped = cpu_buffer->mapped;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
} else {
kfree(cpu_buffer->subbuf_ids);
@@ -6392,10 +6399,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&cpu_buffer->mapping_lock);
- if (!cpu_buffer->mapped) {
+ if (!cpu_buffer->user_mapped) {
err = -ENODEV;
goto out;
- } else if (cpu_buffer->mapped > 1) {
+ } else if (cpu_buffer->mapped > cpu_buffer->user_mapped) {
__rb_inc_dec_mapped(cpu_buffer, false);
goto out;
}
@@ -6403,7 +6410,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
mutex_lock(&buffer->mutex);
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
- cpu_buffer->mapped = 0;
+ /* This is the last user space mapping */
+ if (!WARN_ON_ONCE(cpu_buffer->mapped != cpu_buffer->user_mapped))
+ cpu_buffer->mapped--;
+ cpu_buffer->user_mapped = 0;
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
--
2.43.0
On Tue, 11 Jun 2024 22:16:43 -0400
Steven Rostedt <[email protected]> wrote:
> From: "Steven Rostedt (Google)" <[email protected]>
>
> In preparation for having the ring buffer mapped to a dedicated location,
> which will have the same restrictions as user space memory mapped buffers,
> allow it to use the "mapped" field of the ring_buffer_per_cpu structure
> without having the user space meta page mapping.
>
> When this starts using the mapped field, it will need to handle adding a
> user space mapping (and removing it) from a ring buffer that is using a
> dedicated memory range.
>
> Signed-off-by: Steven Rostedt (Google) <[email protected]>
> ---
> kernel/trace/ring_buffer.c | 24 +++++++++++++++++-------
> 1 file changed, 17 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 28853966aa9a..aa8eb878e0d4 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -491,6 +491,7 @@ struct ring_buffer_per_cpu {
> unsigned long pages_removed;
>
> unsigned int mapped;
> + unsigned int user_mapped; /* first user space mapping */
This actually needs to be a counter and not just save the mapped value
when it first gets set :-p
As the mappings could technically be incremented and decremented
differently.
I'll send a new patch. But since my long test keeps failing for subtle
things, I'm not going to post another series until the full test passes.
-- Steve
> struct mutex mapping_lock;
> unsigned long *subbuf_ids; /* ID to subbuf VA */
> struct trace_buffer_meta *meta_page;
> @@ -5224,6 +5225,9 @@ static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
> {
> struct trace_buffer_meta *meta = cpu_buffer->meta_page;
>
> + if (!meta)
> + return;
> +
> meta->reader.read = cpu_buffer->reader_page->read;
> meta->reader.id = cpu_buffer->reader_page->id;
> meta->reader.lost_events = cpu_buffer->lost_events;
> @@ -6167,7 +6171,7 @@ rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
>
> mutex_lock(&cpu_buffer->mapping_lock);
>
> - if (!cpu_buffer->mapped) {
> + if (!cpu_buffer->mapped || !cpu_buffer->meta_page) {
> mutex_unlock(&cpu_buffer->mapping_lock);
> return ERR_PTR(-ENODEV);
> }
> @@ -6194,7 +6198,7 @@ static int __rb_inc_dec_mapped(struct ring_buffer_per_cpu *cpu_buffer,
> if (inc && cpu_buffer->mapped == UINT_MAX)
> return -EBUSY;
>
> - if (WARN_ON(!inc && cpu_buffer->mapped == 0))
> + if (WARN_ON(!inc && cpu_buffer->mapped < cpu_buffer->user_mapped))
> return -EINVAL;
>
> mutex_lock(&cpu_buffer->buffer->mutex);
> @@ -6328,7 +6332,7 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
>
> mutex_lock(&cpu_buffer->mapping_lock);
>
> - if (cpu_buffer->mapped) {
> + if (cpu_buffer->user_mapped) {
> err = __rb_map_vma(cpu_buffer, vma);
> if (!err)
> err = __rb_inc_dec_mapped(cpu_buffer, true);
> @@ -6359,12 +6363,15 @@ int ring_buffer_map(struct trace_buffer *buffer, int cpu,
> */
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> rb_setup_ids_meta_page(cpu_buffer, subbuf_ids);
> +
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
>
> err = __rb_map_vma(cpu_buffer, vma);
> if (!err) {
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> - cpu_buffer->mapped = 1;
> + /* This is the first time it is mapped externally */
> + cpu_buffer->mapped++;
> + cpu_buffer->user_mapped = cpu_buffer->mapped;
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
> } else {
> kfree(cpu_buffer->subbuf_ids);
> @@ -6392,10 +6399,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
>
> mutex_lock(&cpu_buffer->mapping_lock);
>
> - if (!cpu_buffer->mapped) {
> + if (!cpu_buffer->user_mapped) {
> err = -ENODEV;
> goto out;
> - } else if (cpu_buffer->mapped > 1) {
> + } else if (cpu_buffer->mapped > cpu_buffer->user_mapped) {
> __rb_inc_dec_mapped(cpu_buffer, false);
> goto out;
> }
> @@ -6403,7 +6410,10 @@ int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
> mutex_lock(&buffer->mutex);
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
>
> - cpu_buffer->mapped = 0;
> + /* This is the last user space mapping */
> + if (!WARN_ON_ONCE(cpu_buffer->mapped != cpu_buffer->user_mapped))
> + cpu_buffer->mapped--;
> + cpu_buffer->user_mapped = 0;
>
> raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
>