Hi all,
We (Android folks) have been recently working on bringing tracing to the
pKVM hypervisor (more about pKVM? [1] [2]) reusing as much as possible the
tracefs support already available in the host. More specifically, sharing
the ring_buffer_per_cpu between the kernel and the hypervisor, the later
being the writer while the former is only reading. After presenting this
endeavour at the tracingsummit, end of last year [3], Steven observed this
is a similar problem to another idea he had a while ago: mapping the
tracing ring buffers directly into userspace.
The tracing ring-buffer can be stored or sent to network without any copy
via splice. However the later doesn't allow real time processing of the
traces by userspace without a copy, which can only be achieved by letting
userspace map directly the ring-buffer.
And indeed, in both ideas, we have a ring-buffer, an entity being the
writer, the other being a reader and both share the ring buffer pages while
having different VA spaces. So here's an RFC bringing userspace mapping of
a ring-buffer and if it doesn't cover the pKVM hypervisor it nonetheless
brings building blocks that will be reused later.
Any feedback very much appreciated.
Vincent
[1] https://lwn.net/Articles/836693/
[2] https://www.youtube.com/watch?v=9npebeVFbFw
[3] https://tracingsummit.org/ts/2022/hypervisortracing/
--
As an example, Steve wrote this quick demo that only needs libtracefs:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <errno.h>
#include <unistd.h>
#include <tracefs.h>
#include <kbuffer.h>
#include <event-parse.h>
#include <asm/types.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#define TRACE_MMAP_IOCTL_GET_READER_PAGE _IO('T', 0x1)
#define TRACE_MMAP_IOCTL_UPDATE_META_PAGE _IO('T', 0x2)
struct ring_buffer_meta_page {
__u64 entries;
__u64 overrun;
__u32 pages_touched;
__u32 reader_page;
__u32 nr_data_pages;
__u32 data_page_head;
__u32 data_pages[];
};
static char *argv0;
static int page_size;
static char *get_this_name(void)
{
static char *this_name;
char *arg;
char *p;
if (this_name)
return this_name;
arg = argv0;
p = arg+strlen(arg);
while (p >= arg && *p != '/')
p--;
p++;
this_name = p;
return p;
}
static void usage(void)
{
char *p = get_this_name();
printf("usage: %s exec\n"
"\n",p);
exit(-1);
}
static void __vdie(const char *fmt, va_list ap, int err)
{
int ret = errno;
char *p = get_this_name();
if (err && errno)
perror(p);
else
ret = -1;
fprintf(stderr, " ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
exit(ret);
}
void die(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vdie(fmt, ap, 0);
va_end(ap);
}
void pdie(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
__vdie(fmt, ap, 1);
va_end(ap);
}
static void read_page(struct tep_handle *tep, struct kbuffer *kbuf,
void *data, int page)
{
static struct trace_seq seq;
struct tep_record record;
if (seq.buffer)
trace_seq_reset(&seq);
else
trace_seq_init(&seq);
kbuffer_load_subbuffer(kbuf, data + page_size * page);
while ((record.data = kbuffer_read_event(kbuf, &record.ts))) {
kbuffer_next_event(kbuf, NULL);
tep_print_event(tep, &seq, &record,
"%s-%d %9d\t%s: %s\n",
TEP_PRINT_COMM,
TEP_PRINT_PID,
TEP_PRINT_TIME,
TEP_PRINT_NAME,
TEP_PRINT_INFO);
trace_seq_do_printf(&seq);
trace_seq_reset(&seq);
}
}
static int get_reader_page(int fd, struct ring_buffer_meta_page *meta)
{
return meta->reader_page;
}
static int next_reader_page(int fd, struct ring_buffer_meta_page *meta)
{
if (ioctl(fd, TRACE_MMAP_IOCTL_GET_READER_PAGE) < 0)
pdie("ioctl");
return meta->reader_page;
}
int main (int argc, char **argv)
{
struct ring_buffer_meta_page *map;
struct tep_handle *tep;
struct kbuffer *kbuf;
unsigned long *p;
void *meta;
void *data;
char *buf;
int data_len;
int start;
int page;
int fd;
argv0 = argv[0];
tep = tracefs_local_events(NULL);
kbuf = tep_kbuffer(tep);
page_size = getpagesize();
fd = tracefs_instance_file_open(NULL, "per_cpu/cpu0/trace_pipe_raw",
O_RDONLY);
if (fd < 0)
pdie("raw");
meta = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
if (meta == MAP_FAILED)
pdie("mmap");
if (ioctl(fd, TRACE_MMAP_IOCTL_UPDATE_META_PAGE) < 0)
pdie("ioctl");
map = meta;
printf("entries: %llu\n", map->entries);
printf("overrun: %llu\n", map->overrun);
printf("pages_touched: %u\n", map->pages_touched);
printf("reader_page: %u\n", map->reader_page);
printf("nr_data_pages: %u\n\n", map->nr_data_pages);
data_len = page_size * map->nr_data_pages;
data = mmap(NULL, data_len, PROT_READ, MAP_SHARED, fd, page_size);
if (data == MAP_FAILED)
pdie("mmap data");
page = get_reader_page(fd, meta);
start = page;
do {
read_page(tep, kbuf, data, page);
printf("reader_page: %u\n", map->reader_page);
printf("PAGE: %d\n", page);
} while ((page = next_reader_page(fd, meta)) != start);
p = data;
printf("%lx\n%lx\n%lx\n\n", p[0], p[1], p[2]);
munmap(data, data_len);
munmap(meta, page_size);
close(fd);
buf = tracefs_instance_file_read(NULL, "per_cpu/cpu0/stats", NULL);
if (!buf)
pdie("stats");
printf("%s\n", buf);
free(buf);
return 0;
}
Vincent Donnefort (2):
ring-buffer: Introducing ring-buffer mapping functions
tracing: Allow user-space mapping of the ring-buffer
include/linux/ring_buffer.h | 8 +
include/uapi/linux/trace_mmap.h | 17 ++
kernel/trace/ring_buffer.c | 355 +++++++++++++++++++++++++++++++-
kernel/trace/trace.c | 74 ++++++-
4 files changed, 441 insertions(+), 13 deletions(-)
create mode 100644 include/uapi/linux/trace_mmap.h
--
2.39.1.581.gbfd45094c4-goog
In preparation for allowing the user-space to map a ring-buffer, add
a set of mapping functions:
ring_buffer_{map,unmap}()
ring_buffer_map_fault()
And controls on the ring-buffer:
ring_buffer_get_reader_page() /* swap reader and head */
ring_buffer_update_meta_page()
Mapping the ring-buffer also involves:
A unique ID for each page of the ring-buffer, as currently the pages
are only identified through their in-kernel VA.
A meta-page, where are stored statistics about the ring-buffer and
the list of pages ID, ordered. A field gives what page is the reader
one and one to gives where the ring-buffer starts in the list of data
pages.
The linear mapping exposes the meta-page, and each page of the
ring-buffer, ordered following their unique ID, assigned during the
first mapping.
Once mapped, no page can get in or out of the ring-buffer: the buffer
size will remain unmodified and the splice enabling functions will in
reality simply memcpy the data instead of swapping pages.
Also, the meta-page being... a single page, this limits at the moment the
number of pages in the ring-buffer that can be mapped.
Signed-off-by: Vincent Donnefort <[email protected]>
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 782e14f62201..4897e17ebdde 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,6 +6,8 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
+#include <uapi/linux/trace_mmap.h>
+
struct trace_buffer;
struct ring_buffer_iter;
@@ -211,4 +213,10 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
#define trace_rb_cpu_prepare NULL
#endif
+int ring_buffer_map(struct trace_buffer *buffer, int cpu);
+int ring_buffer_unmap(struct trace_buffer *buffer, int cpu);
+struct page *ring_buffer_map_fault(struct trace_buffer *buffer, int cpu,
+ unsigned long pgoff);
+int ring_buffer_get_reader_page(struct trace_buffer *buffer, int cpu);
+int ring_buffer_update_meta_page(struct trace_buffer *buffer, int cpu);
#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
new file mode 100644
index 000000000000..0f3282fa1a94
--- /dev/null
+++ b/include/uapi/linux/trace_mmap.h
@@ -0,0 +1,14 @@
+#ifndef _UAPI_TRACE_MMAP_H_
+#define _UAPI_TRACE_MMAP_H_
+
+struct ring_buffer_meta_page {
+ __u64 entries;
+ __u64 overrun;
+ __u32 pages_touched;
+ __u32 reader_page;
+ __u32 nr_data_pages;
+ __u32 data_page_head;
+ __u32 data_pages[];
+};
+
+#endif /* _UAPI_TRACE_MMAP_H_ */
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index c366a0a9ddba..ffbb216a18a9 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
#include <linux/percpu.h>
#include <linux/mutex.h>
#include <linux/delay.h>
+#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/hash.h>
@@ -332,6 +333,7 @@ struct buffer_page {
local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
struct buffer_data_page *page; /* Actual data page */
+ u32 id; /* ID for external mapping */
};
/*
@@ -529,6 +531,12 @@ struct ring_buffer_per_cpu {
rb_time_t before_stamp;
u64 event_stamp[MAX_NEST];
u64 read_stamp;
+
+ atomic_t mapped;
+ struct rw_semaphore mapping_lock;
+ unsigned long *page_ids; /* ID to addr */
+ void *meta_page;
+
/* ring buffer pages to update, > 0 to add, < 0 to remove */
long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
@@ -1452,12 +1460,41 @@ static inline void rb_inc_page(struct buffer_page **bpage)
*bpage = list_entry(p, struct buffer_page, list);
}
+static inline void
+rb_meta_page_head_move(struct ring_buffer_per_cpu *cpu_buffer, unsigned long num)
+{
+ struct ring_buffer_meta_page *meta = cpu_buffer->meta_page;
+ unsigned long head_id = meta->data_page_head;
+
+ /* No bookkeeping necessary */
+ if (!atomic_read(&cpu_buffer->mapped))
+ return;
+
+ meta->data_page_head = (head_id + num) % cpu_buffer->nr_pages;
+}
+
+static inline void
+rb_meta_page_head_swap(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct ring_buffer_meta_page *meta = cpu_buffer->meta_page;
+
+ /* No bookkeeping necessary */
+ if (!atomic_read(&cpu_buffer->mapped))
+ return;
+
+ meta->reader_page = cpu_buffer->reader_page->id;
+ meta->data_pages[meta->data_page_head] = cpu_buffer->head_page->id;
+
+ rb_meta_page_head_move(cpu_buffer, 1);
+}
+
static struct buffer_page *
rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
{
struct buffer_page *head;
struct buffer_page *page;
struct list_head *list;
+ unsigned long cnt = 0;
int i;
if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
@@ -1479,9 +1516,11 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
do {
if (rb_is_head_page(page, page->list.prev)) {
cpu_buffer->head_page = page;
+ rb_meta_page_head_move(cpu_buffer, cnt);
return page;
}
rb_inc_page(&page);
+ cnt++;
} while (page != head);
}
@@ -1757,6 +1796,7 @@ rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
init_waitqueue_head(&cpu_buffer->irq_work.waiters);
init_waitqueue_head(&cpu_buffer->irq_work.full_waiters);
+ init_rwsem(&cpu_buffer->mapping_lock);
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
GFP_KERNEL, cpu_to_node(cpu));
@@ -2195,7 +2235,6 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
/* prevent another thread from changing buffer sizes */
mutex_lock(&buffer->mutex);
-
if (cpu_id == RING_BUFFER_ALL_CPUS) {
/*
* Don't succeed if resizing is disabled, as a reader might be
@@ -3504,7 +3543,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
return;
/*
- * If this interrupted another event,
+ * If this interrupted another event,
*/
if (atomic_inc_return(this_cpu_ptr(&checking)) != 1)
goto out;
@@ -4665,6 +4704,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
* Now make the new head point back to the reader page.
*/
rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
+ rb_meta_page_head_swap(cpu_buffer);
rb_inc_page(&cpu_buffer->head_page);
local_inc(&cpu_buffer->pages_read);
@@ -5511,6 +5551,12 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
cpu_buffer_a = buffer_a->buffers[cpu];
cpu_buffer_b = buffer_b->buffers[cpu];
+ if (atomic_read(&cpu_buffer_a->mapped) ||
+ atomic_read(&cpu_buffer_b->mapped)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/* At least make sure the two buffers are somewhat the same */
if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
goto out;
@@ -5804,14 +5850,19 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
cpu_buffer->read += rb_page_entries(reader);
cpu_buffer->read_bytes += BUF_PAGE_SIZE;
- /* swap the pages */
- rb_init_page(bpage);
- bpage = reader->page;
- reader->page = *data_page;
- local_set(&reader->write, 0);
- local_set(&reader->entries, 0);
- reader->read = 0;
- *data_page = bpage;
+ if (likely(!atomic_read(&cpu_buffer->mapped))) {
+ /* swap the pages */
+ rb_init_page(bpage);
+ bpage = reader->page;
+ reader->page = *data_page;
+ local_set(&reader->write, 0);
+ local_set(&reader->entries, 0);
+ reader->read = 0;
+ *data_page = bpage;
+ } else {
+ memcpy(bpage->data, cpu_buffer->reader_page->page->data,
+ PAGE_SIZE);
+ }
/*
* Use the real_end for the data size,
@@ -5856,6 +5907,290 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
}
EXPORT_SYMBOL_GPL(ring_buffer_read_page);
+#define META_PAGE_MAX_PAGES \
+ ((PAGE_SIZE - (offsetof(struct ring_buffer_meta_page, data_page_head))) >> 2)
+
+static void rb_free_page_ids(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ kfree(cpu_buffer->page_ids);
+ cpu_buffer->page_ids = NULL;
+}
+
+static int rb_alloc_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ if (cpu_buffer->meta_page)
+ return 0;
+
+ if ((cpu_buffer->nr_pages + 1) > META_PAGE_MAX_PAGES)
+ return -E2BIG;
+
+ cpu_buffer->meta_page = page_to_virt(alloc_page(GFP_USER));
+ if (!cpu_buffer->meta_page)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void rb_free_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ free_page((unsigned long)cpu_buffer->meta_page);
+ cpu_buffer->meta_page = NULL;
+}
+
+static void rb_update_meta_page(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ struct ring_buffer_meta_page *meta;
+ unsigned long flags;
+
+ /* Update the head page if the writer moved it */
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rb_set_head_page(cpu_buffer);
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ /*
+ * Instead of letting the writer carry the meta page burden,
+ * give the responsibility to the reader.
+ */
+ meta = (struct ring_buffer_meta_page *)cpu_buffer->meta_page;
+ meta->entries = local_read(&cpu_buffer->entries);
+ meta->pages_touched = local_read(&cpu_buffer->pages_touched);
+ meta->overrun = local_read(&cpu_buffer->overrun);
+}
+
+static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
+ unsigned long *page_ids)
+{
+ struct buffer_page *first_page, *bpage;
+ struct ring_buffer_meta_page *meta;
+ int id = 0, i = 0;
+
+ meta = (struct ring_buffer_meta_page *)cpu_buffer->meta_page;
+
+ meta->reader_page = cpu_buffer->reader_page->id;
+ meta->nr_data_pages = cpu_buffer->nr_pages;
+ meta->data_page_head = 0;
+
+ page_ids[id] = (unsigned long)cpu_buffer->reader_page->page;
+ cpu_buffer->reader_page->id = id;
+
+ id++;
+
+ first_page = bpage = rb_set_head_page(cpu_buffer);
+ do {
+ if (i >= META_PAGE_MAX_PAGES) {
+ WARN_ON(1);
+ break;
+ }
+
+ page_ids[id] = (unsigned long)bpage->page;
+ bpage->id = id;
+ meta->data_pages[i] = id;
+
+ rb_inc_page(&bpage);
+ i++; id++;
+ } while (bpage != first_page);
+
+ /* install page ID to kern VA translation */
+ cpu_buffer->page_ids = page_ids;
+}
+
+static struct ring_buffer_per_cpu *
+rb_get_mapped_buffer(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return ERR_PTR(-EINVAL);
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ down_read(&cpu_buffer->mapping_lock);
+
+ if (!atomic_read(&cpu_buffer->mapped)) {
+ up_read(&cpu_buffer->mapping_lock);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return cpu_buffer;
+}
+
+void rb_put_mapped_buffer(struct ring_buffer_per_cpu *cpu_buffer)
+{
+ up_read(&cpu_buffer->mapping_lock);
+}
+
+int ring_buffer_map(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ unsigned long flags, *page_ids;
+ int err = 0;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -EINVAL;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ down_write(&cpu_buffer->mapping_lock);
+
+ if (atomic_read(&cpu_buffer->mapped)) {
+ atomic_inc(&cpu_buffer->mapped);
+ goto unlock;
+ }
+
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+ atomic_inc(&cpu_buffer->resize_disabled);
+ mutex_unlock(&buffer->mutex);
+
+ err = rb_alloc_meta_page(cpu_buffer);
+ if (err) {
+ atomic_dec(&cpu_buffer->resize_disabled);
+ goto unlock;
+ }
+
+ /* page_ids include the reader page while nr_pages does not */
+ page_ids = kzalloc(sizeof(*page_ids) * (cpu_buffer->nr_pages + 1),
+ GFP_KERNEL);
+ if (!page_ids) {
+ rb_free_meta_page(cpu_buffer);
+ atomic_dec(&cpu_buffer->resize_disabled);
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ /*
+ * Lock all other readers as we'll disable the splice and enable the
+ * meta-page data_head_page book keeping.
+ */
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ rb_setup_ids_meta_page(cpu_buffer, page_ids);
+ atomic_inc(&cpu_buffer->mapped);
+
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+unlock:
+ up_write(&cpu_buffer->mapping_lock);
+
+ return err;
+}
+
+int ring_buffer_unmap(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ int err = 0;
+
+ if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ return -EINVAL;
+
+ cpu_buffer = buffer->buffers[cpu];
+
+ down_write(&cpu_buffer->mapping_lock);
+
+ if (!atomic_read(&cpu_buffer->mapped)) {
+ err = -ENODEV;
+ goto unlock;
+ }
+
+ if (atomic_dec_and_test(&cpu_buffer->mapped)) {
+ unsigned long flags;
+
+ /*
+ * Lock all readers to make sure none will attempt meta-page
+ * book keeping while we free the resources
+ */
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ rb_free_page_ids(cpu_buffer);
+ rb_free_meta_page(cpu_buffer);
+ atomic_dec(&cpu_buffer->resize_disabled);
+
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+ }
+
+unlock:
+ up_write(&cpu_buffer->mapping_lock);
+
+ return err;
+}
+
+/*
+ * +--------------+
+ * | meta page | pgoff=0
+ * +--------------+
+ * | data page1 | pgoff=1 page_ids=0
+ * +--------------+
+ * | data page2 | pgoff=2 page_ids=1
+ * ...
+ */
+struct page *ring_buffer_map_fault(struct trace_buffer *buffer, int cpu,
+ unsigned long pgoff)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct page *page = NULL;
+
+ cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
+ if (IS_ERR(cpu_buffer))
+ return NULL;
+
+ if (!cpu_buffer->page_ids) {
+ WARN_ON(1);
+ goto put;
+ }
+
+ if (pgoff == 0) {
+ page = virt_to_page(cpu_buffer->meta_page);
+ } else {
+ pgoff--;
+
+ if (pgoff > (cpu_buffer->nr_pages))
+ goto put;
+
+ page = virt_to_page(cpu_buffer->page_ids[pgoff]);
+ }
+put:
+ rb_put_mapped_buffer(cpu_buffer);
+
+ return page;
+}
+
+int ring_buffer_get_reader_page(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *bpage, *reader;
+ unsigned long flags;
+ int err = 0;
+
+ cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
+ if (IS_ERR(cpu_buffer))
+ return (int)PTR_ERR(cpu_buffer);
+
+ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ reader = cpu_buffer->reader_page;
+ reader->read = rb_page_size(reader);
+ bpage = rb_get_reader_page(cpu_buffer);
+ if (!bpage)
+ err = -ENODEV;
+ raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ rb_put_mapped_buffer(cpu_buffer);
+
+ return err;
+}
+
+int ring_buffer_update_meta_page(struct trace_buffer *buffer, int cpu)
+{
+ struct ring_buffer_per_cpu *cpu_buffer;
+
+ cpu_buffer = rb_get_mapped_buffer(buffer, cpu);
+ if (IS_ERR(cpu_buffer))
+ return PTR_ERR(cpu_buffer);
+
+ rb_update_meta_page(cpu_buffer);
+ rb_put_mapped_buffer(cpu_buffer);
+
+ return 0;
+}
+
/*
* We only allocate new buffers, never free them if the CPU goes down.
* If we were to free the buffer, then the user would lose any trace that was in
--
2.39.1.581.gbfd45094c4-goog
Currently, user-space extracts data from the ring-buffer via splice,
which is handy for storage or network sharing. However, due to splice
limitations, it is not possible to do real-time analysis without a copy.
A solution for that problem is to let the user-space map the ring-buffer
directly.
The mapping exposed via the per-CPU file trace_pipe_raw. The first page
is the meta-page and is followed by each page of the ring-buffer,
ordered by their unique page ID. It is therefore easy to translate a
page-ID to an offset into the mapping.
* Meta-page -- include/uapi/linux/trace_mmap.h for a description
* Page ID 0
* Page ID 1
...
The mapper must then do what use to be the kernel jobs: swap the reader
with the head. This is done with a newly introduced ioctl:
TRACE_MMAP_IOCTL_GET_READER_PAGE.
To avoid putting to much work on the writer, the meta-page is not
automatically updated. User-space must query an update before reading
with another ioctl: TRACE_MMAP_IOCTL_UPDATE_META_PAGE.
Signed-off-by: Vincent Donnefort <[email protected]>
diff --git a/include/uapi/linux/trace_mmap.h b/include/uapi/linux/trace_mmap.h
index 0f3282fa1a94..fe3bbe02e571 100644
--- a/include/uapi/linux/trace_mmap.h
+++ b/include/uapi/linux/trace_mmap.h
@@ -11,4 +11,7 @@ struct ring_buffer_meta_page {
__u32 data_pages[];
};
+#define TRACE_MMAP_IOCTL_GET_READER_PAGE _IO('T', 0x1)
+#define TRACE_MMAP_IOCTL_UPDATE_META_PAGE _IO('T', 0x2)
+
#endif /* _UAPI_TRACE_MMAP_H_ */
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 78ed5f1baa8c..e7d999499dde 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6351,7 +6351,7 @@ static void tracing_set_nop(struct trace_array *tr)
{
if (tr->current_trace == &nop_trace)
return;
-
+
tr->current_trace->enabled--;
if (tr->current_trace->reset)
@@ -8384,15 +8384,27 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
return ret;
}
-/* An ioctl call with cmd 0 to the ring buffer file will wake up all waiters */
static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ftrace_buffer_info *info = file->private_data;
struct trace_iterator *iter = &info->iter;
+ switch (cmd) {
+ case TRACE_MMAP_IOCTL_GET_READER_PAGE:
+ return ring_buffer_get_reader_page(iter->array_buffer->buffer,
+ iter->cpu_file);
+ case TRACE_MMAP_IOCTL_UPDATE_META_PAGE:
+ return ring_buffer_update_meta_page(iter->array_buffer->buffer,
+ iter->cpu_file);
+ }
+
if (cmd)
- return -ENOIOCTLCMD;
+ return -ENOTTY;
+ /*
+ * An ioctl call with cmd 0 to the ring buffer file will wake up all
+ * waiters
+ */
mutex_lock(&trace_types_lock);
iter->wait_index++;
@@ -8405,6 +8417,61 @@ static long tracing_buffers_ioctl(struct file *file, unsigned int cmd, unsigned
return 0;
}
+static vm_fault_t tracing_buffers_mmap_fault(struct vm_fault *vmf)
+{
+ struct ftrace_buffer_info *info = vmf->vma->vm_file->private_data;
+ struct trace_iterator *iter = &info->iter;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
+ struct page *page;
+
+ page = ring_buffer_map_fault(iter->array_buffer->buffer, iter->cpu_file,
+ vmf->pgoff);
+ if (!page)
+ return ret;
+
+ get_page(page);
+ vmf->page = page;
+
+ return 0;
+}
+
+static void tracing_buffers_mmap_close(struct vm_area_struct *vma)
+{
+ struct ftrace_buffer_info *info = vma->vm_file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ ring_buffer_unmap(iter->array_buffer->buffer, iter->cpu_file);
+}
+
+static void tracing_buffers_mmap_open(struct vm_area_struct *vma)
+{
+ struct ftrace_buffer_info *info = vma->vm_file->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ WARN_ON(ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file));
+}
+
+static const struct vm_operations_struct tracing_buffers_vmops = {
+ .open = tracing_buffers_mmap_open,
+ .close = tracing_buffers_mmap_close,
+ .fault = tracing_buffers_mmap_fault,
+};
+
+static int tracing_buffers_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct ftrace_buffer_info *info = filp->private_data;
+ struct trace_iterator *iter = &info->iter;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+
+ vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+ vma->vm_flags &= ~VM_MAYWRITE;
+ vma->vm_ops = &tracing_buffers_vmops;
+
+ return ring_buffer_map(iter->array_buffer->buffer, iter->cpu_file);
+}
+
static const struct file_operations tracing_buffers_fops = {
.open = tracing_buffers_open,
.read = tracing_buffers_read,
@@ -8413,6 +8480,7 @@ static const struct file_operations tracing_buffers_fops = {
.splice_read = tracing_buffers_splice_read,
.unlocked_ioctl = tracing_buffers_ioctl,
.llseek = no_llseek,
+ .mmap = tracing_buffers_mmap,
};
static ssize_t
--
2.39.1.581.gbfd45094c4-goog