2015-02-05 01:14:11

by Stephen Boyd

[permalink] [raw]
Subject: Re: [PATCH v3 2/4] sched_clock: Optimize cache line usage

On 01/30, Daniel Thompson wrote:
> diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
> index 3d21a8719444..cb69a47dfee4 100644
> --- a/kernel/time/sched_clock.c
> +++ b/kernel/time/sched_clock.c
> @@ -18,28 +18,44 @@
> #include <linux/seqlock.h>
> #include <linux/bitops.h>
>
> -struct clock_data {
> - ktime_t wrap_kt;
> +/**
> + * struct clock_read_data - data required to read from sched_clock
> + *

Nitpick: Won't kernel-doc complain that members aren't
documented?

> + * Care must be taken when updating this structure; it is read by
> + * some very hot code paths. It occupies <=48 bytes and, when combined
> + * with the seqcount used to synchronize access, comfortably fits into
> + * a 64 byte cache line.
> + */
> +struct clock_read_data {
> u64 epoch_ns;
> u64 epoch_cyc;
> - seqcount_t seq;
> - unsigned long rate;
> + u64 sched_clock_mask;
> + u64 (*read_sched_clock)(void);
> u32 mult;
> u32 shift;
> bool suspended;
> };
>
> +/**
> + * struct clock_data - all data needed for sched_clock (including
> + * registration of a new clock source)
> + *

Same comment.

> + * The ordering of this structure has been chosen to optimize cache
> + * performance. In particular seq and read_data (combined) should fit
> + * into a single 64 byte cache line.
> + */
> +struct clock_data {
> + seqcount_t seq;
> + struct clock_read_data read_data;
> + ktime_t wrap_kt;
> + unsigned long rate;
> +};
> @@ -60,15 +79,16 @@ unsigned long long notrace sched_clock(void)
> {
> u64 cyc, res;
> unsigned long seq;
> + struct clock_read_data *rd = &cd.read_data;
>
> do {
> seq = raw_read_seqcount_begin(&cd.seq);
>
> - res = cd.epoch_ns;
> - if (!cd.suspended) {
> - cyc = read_sched_clock();
> - cyc = (cyc - cd.epoch_cyc) & sched_clock_mask;
> - res += cyc_to_ns(cyc, cd.mult, cd.shift);
> + res = rd->epoch_ns;
> + if (!rd->suspended) {

Should this have likely() treatment? It would be really nice if
we could use static branches here to avoid any branch penalty at
all. I guess that would need some sort of special cased
stop_machine() though. Or I wonder if we could replace
rd->read_sched_clock() with a dumb function that returns
cd.epoch_cyc so that the math turns out to be 0?

> + cyc = rd->read_sched_clock();
> + cyc = (cyc - rd->epoch_cyc) & rd->sched_clock_mask;
> + res += cyc_to_ns(cyc, rd->mult, rd->shift);
> }
> } while (read_seqcount_retry(&cd.seq, seq));
>

--
Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
a Linux Foundation Collaborative Project


2015-02-05 10:21:36

by Daniel Thompson

[permalink] [raw]
Subject: Re: [PATCH v3 2/4] sched_clock: Optimize cache line usage

On 05/02/15 01:14, Stephen Boyd wrote:
> On 01/30, Daniel Thompson wrote:
>> diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
>> index 3d21a8719444..cb69a47dfee4 100644
>> --- a/kernel/time/sched_clock.c
>> +++ b/kernel/time/sched_clock.c
>> @@ -18,28 +18,44 @@
>> #include <linux/seqlock.h>
>> #include <linux/bitops.h>
>>
>> -struct clock_data {
>> - ktime_t wrap_kt;
>> +/**
>> + * struct clock_read_data - data required to read from sched_clock
>> + *
>
> Nitpick: Won't kernel-doc complain that members aren't
> documented?

It does indeed. I'll add descriptions here...


>> + * Care must be taken when updating this structure; it is read by
>> + * some very hot code paths. It occupies <=48 bytes and, when combined
>> + * with the seqcount used to synchronize access, comfortably fits into
>> + * a 64 byte cache line.
>> + */
>> +struct clock_read_data {
>> u64 epoch_ns;
>> u64 epoch_cyc;
>> - seqcount_t seq;
>> - unsigned long rate;
>> + u64 sched_clock_mask;
>> + u64 (*read_sched_clock)(void);
>> u32 mult;
>> u32 shift;
>> bool suspended;
>> };
>>
>> +/**
>> + * struct clock_data - all data needed for sched_clock (including
>> + * registration of a new clock source)
>> + *
>
> Same comment.

... and here.


>> + * The ordering of this structure has been chosen to optimize cache
>> + * performance. In particular seq and read_data (combined) should fit
>> + * into a single 64 byte cache line.
>> + */
>> +struct clock_data {
>> + seqcount_t seq;
>> + struct clock_read_data read_data;
>> + ktime_t wrap_kt;
>> + unsigned long rate;
>> +};
>> @@ -60,15 +79,16 @@ unsigned long long notrace sched_clock(void)
>> {
>> u64 cyc, res;
>> unsigned long seq;
>> + struct clock_read_data *rd = &cd.read_data;
>>
>> do {
>> seq = raw_read_seqcount_begin(&cd.seq);
>>
>> - res = cd.epoch_ns;
>> - if (!cd.suspended) {
>> - cyc = read_sched_clock();
>> - cyc = (cyc - cd.epoch_cyc) & sched_clock_mask;
>> - res += cyc_to_ns(cyc, cd.mult, cd.shift);
>> + res = rd->epoch_ns;
>> + if (!rd->suspended) {
>
> Should this have likely() treatment? It would be really nice if
> we could use static branches here to avoid any branch penalty at
> all. I guess that would need some sort of special cased
> stop_machine() though. Or I wonder if we could replace
> rd->read_sched_clock() with a dumb function that returns
> cd.epoch_cyc so that the math turns out to be 0?

Great idea.

Making this code branchless with a special function sounds very much
better than using likely().