2023-03-03 15:17:38

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH v3 0/3] Improve trace/ring_buffer.c

This series improves ring_buffer.c by changing the type of some
static functions to void or bool and uses try_cmpxchg instead of
cmpxchg (*ptr, old, new) == old where appropriate.

v2: Convert only loops with cmpxchg.
v3: Rearrange variable declarations.

Cc: Steven Rostedt <[email protected]>
Cc: Masami Hiramatsu <[email protected]>

Uros Bizjak (3):
ring_buffer: Change some static functions to void
ring_buffer: Change some static functions to bool
ring_buffer: Use try_cmpxchg instead of cmpxchg

kernel/trace/ring_buffer.c | 77 ++++++++++++++++----------------------
1 file changed, 33 insertions(+), 44 deletions(-)

--
2.39.2



2023-03-03 15:17:42

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH v3 3/3] ring_buffer: Use try_cmpxchg instead of cmpxchg

Use try_cmpxchg instead of cmpxchg (*ptr, old, new) == old.
x86 CMPXCHG instruction returns success in ZF flag, so this change
saves a compare after cmpxchg (and related move instruction in
front of cmpxchg).

Also, try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg
fails. There is no need to re-read the value in the loop.

No functional change intended.

Cc: Steven Rostedt <[email protected]>
Cc: Masami Hiramatsu <[email protected]>
Signed-off-by: Uros Bizjak <[email protected]>
---
v2: Convert only loops with cmpxchg.
---
kernel/trace/ring_buffer.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 71df857242b4..3bfc2e8a3da4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -4061,10 +4061,10 @@ void ring_buffer_record_off(struct trace_buffer *buffer)
unsigned int rd;
unsigned int new_rd;

+ rd = atomic_read(&buffer->record_disabled);
do {
- rd = atomic_read(&buffer->record_disabled);
new_rd = rd | RB_BUFFER_OFF;
- } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+ } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
}
EXPORT_SYMBOL_GPL(ring_buffer_record_off);

@@ -4084,10 +4084,10 @@ void ring_buffer_record_on(struct trace_buffer *buffer)
unsigned int rd;
unsigned int new_rd;

+ rd = atomic_read(&buffer->record_disabled);
do {
- rd = atomic_read(&buffer->record_disabled);
new_rd = rd & ~RB_BUFFER_OFF;
- } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+ } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
}
EXPORT_SYMBOL_GPL(ring_buffer_record_on);

--
2.39.2


2023-03-03 15:17:44

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH v3 2/3] ring_buffer: Change some static functions to bool

The return values of some functions are of boolean type. Change the
type of these function to bool and adjust their return values. Also
change type of some internal varibles to bool.

No functional change intended.

Cc: Steven Rostedt <[email protected]>
Cc: Masami Hiramatsu <[email protected]>
Signed-off-by: Uros Bizjak <[email protected]>
---
v3: Rearrange variable declarations.
---
kernel/trace/ring_buffer.c | 47 ++++++++++++++++++--------------------
1 file changed, 22 insertions(+), 25 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05fdc92554df..71df857242b4 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -163,7 +163,7 @@ enum {
#define extended_time(event) \
(event->type_len >= RINGBUF_TYPE_TIME_EXTEND)

-static inline int rb_null_event(struct ring_buffer_event *event)
+static inline bool rb_null_event(struct ring_buffer_event *event)
{
return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
}
@@ -367,11 +367,9 @@ static void free_buffer_page(struct buffer_page *bpage)
/*
* We need to fit the time_stamp delta into 27 bits.
*/
-static inline int test_time_stamp(u64 delta)
+static inline bool test_time_stamp(u64 delta)
{
- if (delta & TS_DELTA_TEST)
- return 1;
- return 0;
+ return !!(delta & TS_DELTA_TEST);
}

#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
@@ -700,7 +698,7 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
return ret == expect;
}

-static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
{
unsigned long cnt, top, bottom, msb;
unsigned long cnt2, top2, bottom2, msb2;
@@ -1490,7 +1488,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
return NULL;
}

-static int rb_head_page_replace(struct buffer_page *old,
+static bool rb_head_page_replace(struct buffer_page *old,
struct buffer_page *new)
{
unsigned long *ptr = (unsigned long *)&old->list.prev->next;
@@ -1917,7 +1915,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
return local_read(&bpage->write) & RB_WRITE_MASK;
}

-static int
+static bool
rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
@@ -2030,12 +2028,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
return nr_removed == 0;
}

-static int
+static bool
rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *pages = &cpu_buffer->new_pages;
- int retries, success;
unsigned long flags;
+ bool success;
+ int retries;

/* Can be called at early boot up, where interrupts must not been enabled */
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2054,7 +2053,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
* spinning.
*/
retries = 10;
- success = 0;
+ success = false;
while (retries--) {
struct list_head *head_page, *prev_page, *r;
struct list_head *last_page, *first_page;
@@ -2083,7 +2082,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
* pointer to point to end of list
*/
head_page->prev = last_page;
- success = 1;
+ success = true;
break;
}
}
@@ -2111,7 +2110,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)

static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
- int success;
+ bool success;

if (cpu_buffer->nr_pages_to_update > 0)
success = rb_insert_pages(cpu_buffer);
@@ -2994,7 +2993,7 @@ static u64 rb_time_delta(struct ring_buffer_event *event)
}
}

-static inline int
+static inline bool
rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
{
@@ -3015,7 +3014,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
delta = rb_time_delta(event);

if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
- return 0;
+ return false;

/* Make sure the write stamp is read before testing the location */
barrier();
@@ -3028,7 +3027,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
/* Something came in, can't discard */
if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
write_stamp, write_stamp - delta))
- return 0;
+ return false;

/*
* It's possible that the event time delta is zero
@@ -3061,12 +3060,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
if (index == old_index) {
/* update counters */
local_sub(event_length, &cpu_buffer->entries_bytes);
- return 1;
+ return true;
}
}

/* could not discard */
- return 0;
+ return false;
}

static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
@@ -3281,7 +3280,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
* Note: The TRANSITION bit only handles a single transition between context.
*/

-static __always_inline int
+static __always_inline bool
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
unsigned int val = cpu_buffer->current_context;
@@ -3298,14 +3297,14 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = RB_CTX_TRANSITION;
if (val & (1 << (bit + cpu_buffer->nest))) {
do_ring_buffer_record_recursion();
- return 1;
+ return true;
}
}

val |= (1 << (bit + cpu_buffer->nest));
cpu_buffer->current_context = val;

- return 0;
+ return false;
}

static __always_inline void
@@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- bool dolock;
+ bool dolock, ret;
int cpu;
- int ret;

/* yes this is racy, but if you don't like the race, lock the buffer */
for_each_buffer_cpu(buffer, cpu) {
@@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags;
- bool dolock;
- int ret;
+ bool dolock, ret;

if (!cpumask_test_cpu(cpu, buffer->cpumask))
return true;
--
2.39.2


2023-03-03 15:17:47

by Uros Bizjak

[permalink] [raw]
Subject: [PATCH v3 1/3] ring_buffer: Change some static functions to void

The results of some static functions are not used. Change the
type of these function to void and remove unnecessary returns.

No functional change intended.

Cc: Steven Rostedt <[email protected]>
Signed-off-by: Uros Bizjak <[email protected]>
Reviewed-by: Masami Hiramatsu <[email protected]>
---
kernel/trace/ring_buffer.c | 22 +++++++---------------
1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index af50d931b020..05fdc92554df 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1569,15 +1569,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
}
}

-static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
struct buffer_page *bpage)
{
unsigned long val = (unsigned long)bpage;

- if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
- return 1;
-
- return 0;
+ RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
}

/**
@@ -1587,30 +1584,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
* As a safety measure we check to make sure the data pages have not
* been corrupted.
*/
-static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
{
struct list_head *head = rb_list_head(cpu_buffer->pages);
struct list_head *tmp;

if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(head->next)->prev) != head))
- return -1;
+ return;

if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(head->prev)->next) != head))
- return -1;
+ return;

for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
- return -1;
+ return;

if (RB_WARN_ON(cpu_buffer,
rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
- return -1;
+ return;
}
-
- return 0;
}

static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
@@ -4500,7 +4495,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
default:
RB_WARN_ON(cpu_buffer, 1);
}
- return;
}

static void
@@ -4531,7 +4525,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
default:
RB_WARN_ON(iter->cpu_buffer, 1);
}
- return;
}

static struct buffer_page *
@@ -4946,7 +4939,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
{
if (likely(locked))
raw_spin_unlock(&cpu_buffer->reader_lock);
- return;
}

/**
--
2.39.2


2023-03-03 16:16:44

by Mukesh Ojha

[permalink] [raw]
Subject: Re: [PATCH v3 1/3] ring_buffer: Change some static functions to void



On 3/3/2023 8:47 PM, Uros Bizjak wrote:
> The results of some static functions are not used. Change the
> type of these function to void and remove unnecessary returns.
>
> No functional change intended.
>
> Cc: Steven Rostedt <[email protected]>
> Signed-off-by: Uros Bizjak <[email protected]>
> Reviewed-by: Masami Hiramatsu <[email protected]>
> ---
> kernel/trace/ring_buffer.c | 22 +++++++---------------
> 1 file changed, 7 insertions(+), 15 deletions(-)
>
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index af50d931b020..05fdc92554df 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -1569,15 +1569,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
> }
> }
>
> -static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
> +static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
> struct buffer_page *bpage)
> {
> unsigned long val = (unsigned long)bpage;
>
> - if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
> - return 1;
> -
> - return 0;
> + RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
> }
>
> /**
> @@ -1587,30 +1584,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
> * As a safety measure we check to make sure the data pages have not
> * been corrupted.
> */
> -static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> +static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
> {
> struct list_head *head = rb_list_head(cpu_buffer->pages);
> struct list_head *tmp;
>
> if (RB_WARN_ON(cpu_buffer,
> rb_list_head(rb_list_head(head->next)->prev) != head))
> - return -1;
> + return;
>
> if (RB_WARN_ON(cpu_buffer,
> rb_list_head(rb_list_head(head->prev)->next) != head))
> - return -1;
> + return;
>
> for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
> if (RB_WARN_ON(cpu_buffer,
> rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
> - return -1;
> + return;
>
> if (RB_WARN_ON(cpu_buffer,
> rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
> - return -1;
> + return;
> }
> -
> - return 0;
> }
>
> static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
> @@ -4500,7 +4495,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
> default:
> RB_WARN_ON(cpu_buffer, 1);
> }
> - return;
> }
>
> static void
> @@ -4531,7 +4525,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
> default:
> RB_WARN_ON(iter->cpu_buffer, 1);
> }
> - return;
> }
>
> static struct buffer_page *
> @@ -4946,7 +4939,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
> {
> if (likely(locked))
> raw_spin_unlock(&cpu_buffer->reader_lock);
> - return;
> }
>

Nice clean up, thanks.

Reviewed-by: Mukesh Ojha <[email protected]>

-Mukesh
> /**

2023-03-03 16:34:22

by Mukesh Ojha

[permalink] [raw]
Subject: Re: [PATCH v3 2/3] ring_buffer: Change some static functions to bool



On 3/3/2023 8:47 PM, Uros Bizjak wrote:
> The return values of some functions are of boolean type. Change the
> type of these function to bool and adjust their return values. Also
> change type of some internal varibles to bool.
>
> No functional change intended.
>
> Cc: Steven Rostedt <[email protected]>
> Cc: Masami Hiramatsu <[email protected]>
> Signed-off-by: Uros Bizjak <[email protected]>
> ---
> v3: Rearrange variable declarations.
> ---
> kernel/trace/ring_buffer.c | 47 ++++++++++++++++++--------------------
> 1 file changed, 22 insertions(+), 25 deletions(-)
>
> diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
> index 05fdc92554df..71df857242b4 100644
> --- a/kernel/trace/ring_buffer.c
> +++ b/kernel/trace/ring_buffer.c
> @@ -163,7 +163,7 @@ enum {
> #define extended_time(event) \
> (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
>
> -static inline int rb_null_event(struct ring_buffer_event *event)
> +static inline bool rb_null_event(struct ring_buffer_event *event)
> {
> return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
> }
> @@ -367,11 +367,9 @@ static void free_buffer_page(struct buffer_page *bpage)
> /*
> * We need to fit the time_stamp delta into 27 bits.
> */
> -static inline int test_time_stamp(u64 delta)
> +static inline bool test_time_stamp(u64 delta)
> {
> - if (delta & TS_DELTA_TEST)
> - return 1;
> - return 0;
> + return !!(delta & TS_DELTA_TEST);
> }
>
> #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
> @@ -700,7 +698,7 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
> return ret == expect;
> }
>
> -static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> +static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> {
> unsigned long cnt, top, bottom, msb;
> unsigned long cnt2, top2, bottom2, msb2;
> @@ -1490,7 +1488,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
> return NULL;
> }
>
> -static int rb_head_page_replace(struct buffer_page *old,
> +static bool rb_head_page_replace(struct buffer_page *old,
> struct buffer_page *new)

nit: did you miss ret in rb_get_reader_page()?

> {
> unsigned long *ptr = (unsigned long *)&old->list.prev->next;
> @@ -1917,7 +1915,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
> return local_read(&bpage->write) & RB_WRITE_MASK;
> }
>
> -static int
> +static bool
> rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
> {
> struct list_head *tail_page, *to_remove, *next_page;
> @@ -2030,12 +2028,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
> return nr_removed == 0;
> }
>
> -static int
> +static bool
> rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
> {
> struct list_head *pages = &cpu_buffer->new_pages;
> - int retries, success;
> unsigned long flags;
> + bool success;
> + int retries;
>
> /* Can be called at early boot up, where interrupts must not been enabled */
> raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
> @@ -2054,7 +2053,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
> * spinning.
> */
> retries = 10;
> - success = 0;
> + success = false;
> while (retries--) {
> struct list_head *head_page, *prev_page, *r;
> struct list_head *last_page, *first_page;
> @@ -2083,7 +2082,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
> * pointer to point to end of list
> */
> head_page->prev = last_page;
> - success = 1;
> + success = true;
> break;
> }
> }
> @@ -2111,7 +2110,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
>
> static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
> {
> - int success;
> + bool success;
>
> if (cpu_buffer->nr_pages_to_update > 0)
> success = rb_insert_pages(cpu_buffer);
> @@ -2994,7 +2993,7 @@ static u64 rb_time_delta(struct ring_buffer_event *event)
> }
> }
>
> -static inline int
> +static inline bool
> rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
> struct ring_buffer_event *event)
> {
> @@ -3015,7 +3014,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
> delta = rb_time_delta(event);
>
> if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
> - return 0;
> + return false;
>
> /* Make sure the write stamp is read before testing the location */
> barrier();
> @@ -3028,7 +3027,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
> /* Something came in, can't discard */
> if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
> write_stamp, write_stamp - delta))
> - return 0;
> + return false;
>
> /*
> * It's possible that the event time delta is zero
> @@ -3061,12 +3060,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
> if (index == old_index) {
> /* update counters */
> local_sub(event_length, &cpu_buffer->entries_bytes);
> - return 1;
> + return true;
> }
> }
>
> /* could not discard */
> - return 0;
> + return false;
> }
>
> static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
> @@ -3281,7 +3280,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
> * Note: The TRANSITION bit only handles a single transition between context.
> */
>
> -static __always_inline int
> +static __always_inline bool
> trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
> {
> unsigned int val = cpu_buffer->current_context;
> @@ -3298,14 +3297,14 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
> bit = RB_CTX_TRANSITION;
> if (val & (1 << (bit + cpu_buffer->nest))) {
> do_ring_buffer_record_recursion();
> - return 1;
> + return true;
> }
> }
>
> val |= (1 << (bit + cpu_buffer->nest));
> cpu_buffer->current_context = val;
>
> - return 0;
> + return false;
> }
>
> static __always_inline void
> @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
> {
> struct ring_buffer_per_cpu *cpu_buffer;
> unsigned long flags;
> - bool dolock;
> + bool dolock, ret;
> int cpu;
> - int ret;

nit: would have been nice if you would not have changed existing stuff
i.e each variable on a separate line.

>
> /* yes this is racy, but if you don't like the race, lock the buffer */
> for_each_buffer_cpu(buffer, cpu) {
> @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
> {
> struct ring_buffer_per_cpu *cpu_buffer;
> unsigned long flags;
> - bool dolock;
> - int ret;
> + bool dolock, ret;

same here

>
> if (!cpumask_test_cpu(cpu, buffer->cpumask))
> return true;


Feel free to add

Reviewed-by: Mukesh Ojha <[email protected]>

-Mukesh

2023-03-03 17:08:06

by Steven Rostedt

[permalink] [raw]
Subject: Re: [PATCH v3 2/3] ring_buffer: Change some static functions to bool

On Fri, 3 Mar 2023 22:04:01 +0530
Mukesh Ojha <[email protected]> wrote:

> > static __always_inline void
> > @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
> > {
> > struct ring_buffer_per_cpu *cpu_buffer;
> > unsigned long flags;
> > - bool dolock;
> > + bool dolock, ret;
> > int cpu;
> > - int ret;
>
> nit: would have been nice if you would not have changed existing stuff
> i.e each variable on a separate line.

I agree. I prefer each variable on a separate line, as it makes fixing
"unused variable" warnings easier.

-- Steve


>
> >
> > /* yes this is racy, but if you don't like the race, lock the buffer */
> > for_each_buffer_cpu(buffer, cpu) {
> > @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
> > {
> > struct ring_buffer_per_cpu *cpu_buffer;
> > unsigned long flags;
> > - bool dolock;
> > - int ret;
> > + bool dolock, ret;
>
> same here


2023-03-04 08:56:50

by Uros Bizjak

[permalink] [raw]
Subject: Re: [PATCH v3 2/3] ring_buffer: Change some static functions to bool

On Fri, Mar 3, 2023 at 5:34 PM Mukesh Ojha <[email protected]> wrote:

> > -static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> > +static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
> > {
> > unsigned long cnt, top, bottom, msb;
> > unsigned long cnt2, top2, bottom2, msb2;
> > @@ -1490,7 +1488,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
> > return NULL;
> > }
> >
> > -static int rb_head_page_replace(struct buffer_page *old,
> > +static bool rb_head_page_replace(struct buffer_page *old,
> > struct buffer_page *new)
>
> nit: did you miss ret in rb_get_reader_page()?

Indeed, thanks for noticing!

> > static __always_inline void
> > @@ -5408,9 +5407,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
> > {
> > struct ring_buffer_per_cpu *cpu_buffer;
> > unsigned long flags;
> > - bool dolock;
> > + bool dolock, ret;
> > int cpu;
> > - int ret;
>
> nit: would have been nice if you would not have changed existing stuff
> i.e each variable on a separate line.
> >
> > /* yes this is racy, but if you don't like the race, lock the buffer */
> > for_each_buffer_cpu(buffer, cpu) {
> > @@ -5438,8 +5436,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
> > {
> > struct ring_buffer_per_cpu *cpu_buffer;
> > unsigned long flags;
> > - bool dolock;
> > - int ret;
> > + bool dolock, ret;
>
> same here

I'll do it in the way you propose in the follow-up revisiton.

> >
> > if (!cpumask_test_cpu(cpu, buffer->cpumask))
> > return true;
>
>
> Feel free to add
>
> Reviewed-by: Mukesh Ojha <[email protected]>

Thanks.

Uros.