From: "Steven Rostedt (Red Hat)" <[email protected]>
Currently seq_buf is full when all but one byte of the buffer is
filled. Change it so that the seq_buf is full when all of the
buffer is filled.
Some of the functions would fill the buffer completely and report
everything was fine. This was inconsistent with the max of size - 1.
Changing this to be max of size makes all functions consistent.
Link: http://lkml.kernel.org/r/[email protected]
Tested-by: Jiri Kosina <[email protected]>
Acked-by: Jiri Kosina <[email protected]>
Reviewed-by: Petr Mladek <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
---
include/linux/seq_buf.h | 6 +++---
kernel/trace/seq_buf.c | 19 +++++++++++--------
2 files changed, 14 insertions(+), 11 deletions(-)
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 5d91262433e2..581c1fc733c3 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -43,13 +43,13 @@ seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
{
- return s->len == s->size;
+ return s->len > s->size;
}
static inline void
seq_buf_set_overflow(struct seq_buf *s)
{
- s->len = s->size;
+ s->len = s->size + 1;
}
/*
@@ -61,7 +61,7 @@ seq_buf_buffer_left(struct seq_buf *s)
if (seq_buf_has_overflowed(s))
return 0;
- return (s->size - 1) - s->len;
+ return s->size - s->len;
}
extern __printf(2, 3)
diff --git a/kernel/trace/seq_buf.c b/kernel/trace/seq_buf.c
index 7dac34d1235b..9d3bb64dca31 100644
--- a/kernel/trace/seq_buf.c
+++ b/kernel/trace/seq_buf.c
@@ -17,7 +17,7 @@
#include <linux/seq_buf.h>
/* How much buffer is written? */
-#define SEQ_BUF_USED(s) min((s)->len, (s)->size - 1)
+#define SEQ_BUF_USED(s) min((s)->len, (s)->size)
/**
* seq_buf_print_seq - move the contents of seq_buf into a seq_file
@@ -51,7 +51,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
if (s->len < s->size) {
len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
- if (s->len + len < s->size) {
+ if (s->len + len <= s->size) {
s->len += len;
return 0;
}
@@ -100,8 +100,11 @@ int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
WARN_ON(s->size == 0);
/*
- * The last byte of the buffer is used to determine if we
- * overflowed or not.
+ * Note, because bitmap_scnprintf() only returns the number of bytes
+ * written and not the number that would be written, we use the last
+ * byte of the buffer to let us know if we overflowed. There's a small
+ * chance that the bitmap could have fit exactly inside the buffer, but
+ * it's not that critical if that does happen.
*/
if (len > 1) {
ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
@@ -140,7 +143,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
if (s->len < s->size) {
ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
- if (s->len + ret < s->size) {
+ if (s->len + ret <= s->size) {
s->len += ret;
return 0;
}
@@ -164,7 +167,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
WARN_ON(s->size == 0);
- if (s->len + len < s->size) {
+ if (s->len + len <= s->size) {
memcpy(s->buffer + s->len, str, len);
s->len += len;
return 0;
@@ -186,7 +189,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
{
WARN_ON(s->size == 0);
- if (s->len + 1 < s->size) {
+ if (s->len + 1 <= s->size) {
s->buffer[s->len++] = c;
return 0;
}
@@ -210,7 +213,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
{
WARN_ON(s->size == 0);
- if (s->len + len < s->size) {
+ if (s->len + len <= s->size) {
memcpy(s->buffer + s->len, mem, len);
s->len += len;
return 0;
--
2.1.1
On Thu 2014-11-13 20:13:01, Steven Rostedt wrote:
> From: "Steven Rostedt (Red Hat)" <[email protected]>
>
> Currently seq_buf is full when all but one byte of the buffer is
> filled. Change it so that the seq_buf is full when all of the
> buffer is filled.
>
> Some of the functions would fill the buffer completely and report
> everything was fine. This was inconsistent with the max of size - 1.
> Changing this to be max of size makes all functions consistent.
>
> Link: http://lkml.kernel.org/r/[email protected]
>
> Tested-by: Jiri Kosina <[email protected]>
> Acked-by: Jiri Kosina <[email protected]>
> Reviewed-by: Petr Mladek <[email protected]>
> Signed-off-by: Steven Rostedt <[email protected]>
Hmm, we should not apply this patch before we fix all other locations
accessing seq.len. We need to make sure that they do not access
outside of the buffer when seq.len = seq.size + 1.
See my comments for "[RFC][PATCH 13/23 v4] tracing: Create seq_buf
layer in trace_seq"
BTW: Are these patches applied in some public branch, please? The
patch series is getting long. I would like to see it applied but I do
not want to copy all the patches and apply manually :-)
Best Regards,
Petr
PS: I will need to go in a while. I am not sure that I will be able to
review the whole patchset before the weekend. I do the review in the
order of patches. I sent reply only when I had something to add. The
non-commented other patches (< 17) looks fine to me.
> ---
> include/linux/seq_buf.h | 6 +++---
> kernel/trace/seq_buf.c | 19 +++++++++++--------
> 2 files changed, 14 insertions(+), 11 deletions(-)
>
> diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
> index 5d91262433e2..581c1fc733c3 100644
> --- a/include/linux/seq_buf.h
> +++ b/include/linux/seq_buf.h
> @@ -43,13 +43,13 @@ seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
> static inline bool
> seq_buf_has_overflowed(struct seq_buf *s)
> {
> - return s->len == s->size;
> + return s->len > s->size;
> }
>
> static inline void
> seq_buf_set_overflow(struct seq_buf *s)
> {
> - s->len = s->size;
> + s->len = s->size + 1;
> }
>
> /*
> @@ -61,7 +61,7 @@ seq_buf_buffer_left(struct seq_buf *s)
> if (seq_buf_has_overflowed(s))
> return 0;
>
> - return (s->size - 1) - s->len;
> + return s->size - s->len;
> }
>
> extern __printf(2, 3)
> diff --git a/kernel/trace/seq_buf.c b/kernel/trace/seq_buf.c
> index 7dac34d1235b..9d3bb64dca31 100644
> --- a/kernel/trace/seq_buf.c
> +++ b/kernel/trace/seq_buf.c
> @@ -17,7 +17,7 @@
> #include <linux/seq_buf.h>
>
> /* How much buffer is written? */
> -#define SEQ_BUF_USED(s) min((s)->len, (s)->size - 1)
> +#define SEQ_BUF_USED(s) min((s)->len, (s)->size)
>
> /**
> * seq_buf_print_seq - move the contents of seq_buf into a seq_file
> @@ -51,7 +51,7 @@ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args)
>
> if (s->len < s->size) {
> len = vsnprintf(s->buffer + s->len, s->size - s->len, fmt, args);
> - if (s->len + len < s->size) {
> + if (s->len + len <= s->size) {
> s->len += len;
> return 0;
> }
> @@ -100,8 +100,11 @@ int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
> WARN_ON(s->size == 0);
>
> /*
> - * The last byte of the buffer is used to determine if we
> - * overflowed or not.
> + * Note, because bitmap_scnprintf() only returns the number of bytes
> + * written and not the number that would be written, we use the last
> + * byte of the buffer to let us know if we overflowed. There's a small
> + * chance that the bitmap could have fit exactly inside the buffer, but
> + * it's not that critical if that does happen.
> */
> if (len > 1) {
> ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
> @@ -140,7 +143,7 @@ int seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary)
>
> if (s->len < s->size) {
> ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
> - if (s->len + ret < s->size) {
> + if (s->len + ret <= s->size) {
> s->len += ret;
> return 0;
> }
> @@ -164,7 +167,7 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
>
> WARN_ON(s->size == 0);
>
> - if (s->len + len < s->size) {
> + if (s->len + len <= s->size) {
> memcpy(s->buffer + s->len, str, len);
> s->len += len;
> return 0;
> @@ -186,7 +189,7 @@ int seq_buf_putc(struct seq_buf *s, unsigned char c)
> {
> WARN_ON(s->size == 0);
>
> - if (s->len + 1 < s->size) {
> + if (s->len + 1 <= s->size) {
> s->buffer[s->len++] = c;
> return 0;
> }
> @@ -210,7 +213,7 @@ int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len)
> {
> WARN_ON(s->size == 0);
>
> - if (s->len + len < s->size) {
> + if (s->len + len <= s->size) {
> memcpy(s->buffer + s->len, mem, len);
> s->len += len;
> return 0;
> --
> 2.1.1
>
>
On Fri, 14 Nov 2014 18:07:16 +0100
Petr Mladek <[email protected]> wrote:
> On Thu 2014-11-13 20:13:01, Steven Rostedt wrote:
> > From: "Steven Rostedt (Red Hat)" <[email protected]>
> >
> > Currently seq_buf is full when all but one byte of the buffer is
> > filled. Change it so that the seq_buf is full when all of the
> > buffer is filled.
> >
> > Some of the functions would fill the buffer completely and report
> > everything was fine. This was inconsistent with the max of size - 1.
> > Changing this to be max of size makes all functions consistent.
> >
> > Link: http://lkml.kernel.org/r/[email protected]
> >
> > Tested-by: Jiri Kosina <[email protected]>
> > Acked-by: Jiri Kosina <[email protected]>
> > Reviewed-by: Petr Mladek <[email protected]>
> > Signed-off-by: Steven Rostedt <[email protected]>
>
> Hmm, we should not apply this patch before we fix all other locations
> accessing seq.len. We need to make sure that they do not access
> outside of the buffer when seq.len = seq.size + 1.
>
> See my comments for "[RFC][PATCH 13/23 v4] tracing: Create seq_buf
> layer in trace_seq"
I agree. As I replied there, I'll add a patch before this gets applied
(right after seq_buf_left() is introduced), that will fix those issues.
>
> BTW: Are these patches applied in some public branch, please? The
> patch series is getting long. I would like to see it applied but I do
> not want to copy all the patches and apply manually :-)
I'll start pushing them up to my repo under rfc/seq-buf
git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace.git
That branch will always be rebasing. I have it applied on top of my
ftrace/core branch that will become my next for-next soon (after it
passes all my tests).
>
> Best Regards,
> Petr
>
> PS: I will need to go in a while. I am not sure that I will be able to
> review the whole patchset before the weekend. I do the review in the
> order of patches. I sent reply only when I had something to add. The
> non-commented other patches (< 17) looks fine to me.
Thanks, I'll add your Reviewed-by tags on them.
To ease the pain of review, I'll reply to your email comments with the
patches as I fix them up (as I've already done). I'll keep the commit
ids as well so that you can verify them. I'll try to remember to
constantly update my rfc/seq-buf branch.
Thanks a lot for your reviews. I really do appreciate it.
-- Steve
On Fri, 14 Nov 2014 12:30:01 -0500
Steven Rostedt <[email protected]> wrote:
> > Hmm, we should not apply this patch before we fix all other locations
> > accessing seq.len. We need to make sure that they do not access
> > outside of the buffer when seq.len = seq.size + 1.
> >
> > See my comments for "[RFC][PATCH 13/23 v4] tracing: Create seq_buf
> > layer in trace_seq"
>
> I agree. As I replied there, I'll add a patch before this gets applied
> (right after seq_buf_left() is introduced), that will fix those issues.
I made the move of seq_buf_used() before the full buffer usage patch,
and here's a patch I added.
I'll be posting a v5 with all these changes too, but this may make it
easier to review:
-- Steve
>From a43da42e939ba41a32c7ea83793cef9c2360ec1a Mon Sep 17 00:00:00 2001
From: "Steven Rostedt (Red Hat)" <[email protected]>
Date: Fri, 14 Nov 2014 15:49:41 -0500
Subject: [PATCH] tracing: Use trace_seq_used() and seq_buf_used() instead of
len
As the seq_buf->len will soon be +1 size when there's an overflow, we
must use trace_seq_used() or seq_buf_used() methods to get the real
length. This will prevent buffer overflow issues if just the len
of the seq_buf descriptor is used to copy memory.
Link: http://lkml.kernel.org/r/[email protected]
Reported-by: Petr Mladek <[email protected]>
Signed-off-by: Steven Rostedt <[email protected]>
---
include/linux/trace_seq.h | 20 +++++++++++++++-
kernel/trace/trace.c | 44 ++++++++++++++++++++++++------------
kernel/trace/trace_events.c | 9 +++++---
kernel/trace/trace_functions_graph.c | 5 +++-
kernel/trace/trace_seq.c | 2 +-
5 files changed, 59 insertions(+), 21 deletions(-)
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 85d37106be3d..cfaf5a1d4bad 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -24,6 +24,24 @@ trace_seq_init(struct trace_seq *s)
}
/**
+ * trace_seq_used - amount of actual data written to buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the amount of data written to the buffer.
+ *
+ * IMPORTANT!
+ *
+ * Use this instead of @s->seq.len if you need to pass the amount
+ * of data from the buffer to another buffer (userspace, or what not).
+ * The @s->seq.len on overflow is bigger than the buffer size and
+ * using it can cause access to undefined memory.
+ */
+static inline int trace_seq_used(struct trace_seq *s)
+{
+ return seq_buf_used(&s->seq);
+}
+
+/**
* trace_seq_buffer_ptr - return pointer to next location in buffer
* @s: trace sequence descriptor
*
@@ -35,7 +53,7 @@ trace_seq_init(struct trace_seq *s)
static inline unsigned char *
trace_seq_buffer_ptr(struct trace_seq *s)
{
- return s->buffer + s->seq.len;
+ return s->buffer + seq_buf_used(&s->seq);
}
/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 7d7a07e9b9e9..9f1ffc707f3b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -944,10 +944,10 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
{
int len;
- if (s->seq.len <= s->seq.readpos)
+ if (trace_seq_used(s) <= s->seq.readpos)
return -EBUSY;
- len = s->seq.len - s->seq.readpos;
+ len = trace_seq_used(s) - s->seq.readpos;
if (cnt > len)
cnt = len;
memcpy(buf, s->buffer + s->seq.readpos, cnt);
@@ -4514,18 +4514,18 @@ waitagain:
trace_access_lock(iter->cpu_file);
while (trace_find_next_entry_inc(iter) != NULL) {
enum print_line_t ret;
- int len = iter->seq.seq.len;
+ int save_len = iter->seq.seq.len;
ret = print_trace_line(iter);
if (ret == TRACE_TYPE_PARTIAL_LINE) {
/* don't print partial lines */
- iter->seq.seq.len = len;
+ iter->seq.seq.len = save_len;
break;
}
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
- if (iter->seq.seq.len >= cnt)
+ if (trace_seq_used(&iter->seq) >= cnt)
break;
/*
@@ -4541,7 +4541,7 @@ waitagain:
/* Now copy what we have to the user */
sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
- if (iter->seq.seq.readpos >= iter->seq.seq.len)
+ if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
trace_seq_init(&iter->seq);
/*
@@ -4575,20 +4575,33 @@ static size_t
tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
{
size_t count;
+ int save_len;
int ret;
/* Seq buffer is page-sized, exactly what we need. */
for (;;) {
- count = iter->seq.seq.len;
+ save_len = iter->seq.seq.len;
ret = print_trace_line(iter);
- count = iter->seq.seq.len - count;
- if (rem < count) {
- rem = 0;
- iter->seq.seq.len -= count;
+
+ if (trace_seq_has_overflowed(&iter->seq)) {
+ iter->seq.seq.len = save_len;
break;
}
+
+ /*
+ * This should not be hit, because it should only
+ * be set if the iter->seq overflowed. But check it
+ * anyway to be safe.
+ */
if (ret == TRACE_TYPE_PARTIAL_LINE) {
- iter->seq.seq.len -= count;
+ iter->seq.seq.len = save_len;
+ break;
+ }
+
+ count = trace_seq_used(&iter->seq) - save_len;
+ if (rem < count) {
+ rem = 0;
+ iter->seq.seq.len = save_len;;
break;
}
@@ -4669,13 +4682,13 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
/* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq,
page_address(spd.pages[i]),
- iter->seq.seq.len);
+ trace_seq_used(&iter->seq));
if (ret < 0) {
__free_page(spd.pages[i]);
break;
}
spd.partial[i].offset = 0;
- spd.partial[i].len = iter->seq.seq.len;
+ spd.partial[i].len = trace_seq_used(&iter->seq);
trace_seq_init(&iter->seq);
}
@@ -5676,7 +5689,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
trace_seq_printf(s, "read events: %ld\n", cnt);
- count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->seq.len);
+ count = simple_read_from_buffer(ubuf, count, ppos,
+ s->buffer, trace_seq_used(s));
kfree(s);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 4d0067dd7f88..935cbea78532 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1044,7 +1044,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_unlock(&event_mutex);
if (file)
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->seq.len);
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, trace_seq_used(s));
kfree(s);
@@ -1210,7 +1211,8 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
trace_seq_init(s);
print_subsystem_event_filter(system, s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->seq.len);
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, trace_seq_used(s));
kfree(s);
@@ -1265,7 +1267,8 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
trace_seq_init(s);
func(s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->seq.len);
+ r = simple_read_from_buffer(ubuf, cnt, ppos,
+ s->buffer, trace_seq_used(s));
kfree(s);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6d1342ae7a44..ec35468349a7 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1153,6 +1153,9 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
return ret;
}
+ if (trace_seq_has_overflowed(s))
+ goto out;
+
/* Strip ending newline */
if (s->buffer[s->seq.len - 1] == '\n') {
s->buffer[s->seq.len - 1] = '\0';
@@ -1160,7 +1163,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
}
trace_seq_puts(s, " */\n");
-
+ out:
return trace_handle_return(s);
}
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
index 087fa514069d..0c7aab4dd94f 100644
--- a/kernel/trace/trace_seq.c
+++ b/kernel/trace/trace_seq.c
@@ -30,7 +30,7 @@
#define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq)
/* How much buffer is written? */
-#define TRACE_SEQ_BUF_USED(s) min((s)->seq.len, (unsigned int)(PAGE_SIZE - 1))
+#define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq)
/*
* trace_seq should work with being initialized with 0s.
--
1.8.1.4