2024-02-14 12:15:11

by Adrián Larumbe

[permalink] [raw]
Subject: [PATCH 0/1] Always record job cycle and timestamp information

A driver user expressed interest in being able to access engine usage stats
through fdinfo when debugfs is not built into their kernel. In the current
implementation, this wasn't possible, because it was assumed even for
inflight jobs enabling the cycle counter and timestamp registers would
incur in additional power consumption, so both were kept disabled until
toggled through debugfs.

A second read of the TRM made me think otherwise, but this is something
that would be best clarified by someone from ARM's side.

Adrián Larumbe (1):
drm/panfrost: Always record job cycle and timestamp information

drivers/gpu/drm/panfrost/Makefile | 2 --
drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
7 files changed, 9 insertions(+), 59 deletions(-)
delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h


base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
--
2.43.0



2024-02-14 12:30:41

by Adrián Larumbe

[permalink] [raw]
Subject: [PATCH 1/1] drm/panfrost: Always record job cycle and timestamp information

Some users of Panfrost expressed interest in being able to gather fdinfo
stats for running jobs, on production builds with no built-in debugfs
support. Sysfs was first considered, but eventually it was realised
timestamp and cycle counting don't incur in additional power consumption
when the GPU is running and there are inflight jobs, so there's no reason
to let user space toggle profiling.

Remove the profiling debugfs knob altogether so that cycle and timestamp
counting is always enabled for inflight jobs.

Signed-off-by: Adrián Larumbe <[email protected]>
---
drivers/gpu/drm/panfrost/Makefile | 2 --
drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
7 files changed, 9 insertions(+), 59 deletions(-)
delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h

diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index 2c01c1e7523e..7da2b3f02ed9 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -12,6 +12,4 @@ panfrost-y := \
panfrost_perfcnt.o \
panfrost_dump.o

-panfrost-$(CONFIG_DEBUG_FS) += panfrost_debugfs.o
-
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.c b/drivers/gpu/drm/panfrost/panfrost_debugfs.c
deleted file mode 100644
index 72d4286a6bf7..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.c
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2023 Collabora ltd. */
-/* Copyright 2023 Amazon.com, Inc. or its affiliates. */
-
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_file.h>
-#include <drm/panfrost_drm.h>
-
-#include "panfrost_device.h"
-#include "panfrost_gpu.h"
-#include "panfrost_debugfs.h"
-
-void panfrost_debugfs_init(struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev->dev));
-
- debugfs_create_atomic_t("profile", 0600, minor->debugfs_root, &pfdev->profile_mode);
-}
diff --git a/drivers/gpu/drm/panfrost/panfrost_debugfs.h b/drivers/gpu/drm/panfrost/panfrost_debugfs.h
deleted file mode 100644
index c5af5f35877f..000000000000
--- a/drivers/gpu/drm/panfrost/panfrost_debugfs.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright 2023 Collabora ltd.
- * Copyright 2023 Amazon.com, Inc. or its affiliates.
- */
-
-#ifndef PANFROST_DEBUGFS_H
-#define PANFROST_DEBUGFS_H
-
-#ifdef CONFIG_DEBUG_FS
-void panfrost_debugfs_init(struct drm_minor *minor);
-#endif
-
-#endif /* PANFROST_DEBUGFS_H */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 62f7e3527385..cd6bbcb2bea4 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -130,7 +130,6 @@ struct panfrost_device {
struct list_head scheduled_jobs;

struct panfrost_perfcnt *perfcnt;
- atomic_t profile_mode;

struct mutex sched_lock;

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index a926d71e8131..e31fd4d62bbe 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -20,7 +20,6 @@
#include "panfrost_job.h"
#include "panfrost_gpu.h"
#include "panfrost_perfcnt.h"
-#include "panfrost_debugfs.h"

static bool unstable_ioctls;
module_param_unsafe(unstable_ioctls, bool, 0600);
@@ -600,10 +599,6 @@ static const struct drm_driver panfrost_drm_driver = {

.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
-
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = panfrost_debugfs_init,
-#endif
};

static int panfrost_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 0c2dbf6ef2a5..745b16a77edd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -159,13 +159,11 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
struct panfrost_job *job = pfdev->jobs[slot][0];

WARN_ON(!job);
- if (job->is_profiled) {
- if (job->engine_usage) {
- job->engine_usage->elapsed_ns[slot] +=
- ktime_to_ns(ktime_sub(ktime_get(), job->start_time));
- job->engine_usage->cycles[slot] +=
- panfrost_cycle_counter_read(pfdev) - job->start_cycles;
- }
+ if (job->engine_usage) {
+ job->engine_usage->elapsed_ns[slot] +=
+ ktime_to_ns(ktime_sub(ktime_get(), job->start_time));
+ job->engine_usage->cycles[slot] +=
+ panfrost_cycle_counter_read(pfdev) - job->start_cycles;
panfrost_cycle_counter_put(job->pfdev);
}

@@ -243,12 +241,9 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
subslot = panfrost_enqueue_job(pfdev, js, job);
/* Don't queue the job if a reset is in progress */
if (!atomic_read(&pfdev->reset.pending)) {
- if (atomic_read(&pfdev->profile_mode)) {
- panfrost_cycle_counter_get(pfdev);
- job->is_profiled = true;
- job->start_time = ktime_get();
- job->start_cycles = panfrost_cycle_counter_read(pfdev);
- }
+ panfrost_cycle_counter_get(pfdev);
+ job->start_time = ktime_get();
+ job->start_cycles = panfrost_cycle_counter_read(pfdev);

job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
dev_dbg(pfdev->dev,
@@ -693,8 +688,7 @@ panfrost_reset(struct panfrost_device *pfdev,
spin_lock(&pfdev->js->job_lock);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
- if (pfdev->jobs[i][j]->is_profiled)
- panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev);
+ panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev);
pm_runtime_put_noidle(pfdev->dev);
panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index ec581b97852b..022c83ede368 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -34,7 +34,6 @@ struct panfrost_job {
struct dma_fence *render_done_fence;

struct panfrost_engine_usage *engine_usage;
- bool is_profiled;
ktime_t start_time;
u64 start_cycles;
};
--
2.43.0


2024-02-14 13:52:14

by Steven Price

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information

Hi Adrián,

On 14/02/2024 12:14, Adrián Larumbe wrote:
> A driver user expressed interest in being able to access engine usage stats
> through fdinfo when debugfs is not built into their kernel. In the current
> implementation, this wasn't possible, because it was assumed even for
> inflight jobs enabling the cycle counter and timestamp registers would
> incur in additional power consumption, so both were kept disabled until
> toggled through debugfs.
>
> A second read of the TRM made me think otherwise, but this is something
> that would be best clarified by someone from ARM's side.

I'm afraid I can't give a definitive answer. This will probably vary
depending on implementation. The command register enables/disables
"propagation" of the cycle/timestamp values. This propagation will cost
some power (gates are getting toggled) but whether that power is
completely in the noise of the GPU as a whole I can't say.

The out-of-tree kbase driver only enables the counters for jobs
explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
from a profiler.

I'd be happier moving the debugfs file to sysfs rather than assuming
that the power consumption is small enough for all platforms.

Ideally we'd have some sort of kernel interface for a profiler to inform
the kernel what it is interested in, but I can't immediately see how to
make that useful across different drivers. kbase's profiling support is
great with our profiling tools, but there's a very strong connection
between the two.

Steve

> Adrián Larumbe (1):
> drm/panfrost: Always record job cycle and timestamp information
>
> drivers/gpu/drm/panfrost/Makefile | 2 --
> drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
> drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
> drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
> drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
> drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
> drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
> 7 files changed, 9 insertions(+), 59 deletions(-)
> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
>
>
> base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918


2024-02-16 16:57:33

by Daniel Vetter

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information

On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
> Hi Adrián,
>
> On 14/02/2024 12:14, Adrián Larumbe wrote:
> > A driver user expressed interest in being able to access engine usage stats
> > through fdinfo when debugfs is not built into their kernel. In the current
> > implementation, this wasn't possible, because it was assumed even for
> > inflight jobs enabling the cycle counter and timestamp registers would
> > incur in additional power consumption, so both were kept disabled until
> > toggled through debugfs.
> >
> > A second read of the TRM made me think otherwise, but this is something
> > that would be best clarified by someone from ARM's side.
>
> I'm afraid I can't give a definitive answer. This will probably vary
> depending on implementation. The command register enables/disables
> "propagation" of the cycle/timestamp values. This propagation will cost
> some power (gates are getting toggled) but whether that power is
> completely in the noise of the GPU as a whole I can't say.
>
> The out-of-tree kbase driver only enables the counters for jobs
> explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
> from a profiler.
>
> I'd be happier moving the debugfs file to sysfs rather than assuming
> that the power consumption is small enough for all platforms.
>
> Ideally we'd have some sort of kernel interface for a profiler to inform
> the kernel what it is interested in, but I can't immediately see how to
> make that useful across different drivers. kbase's profiling support is
> great with our profiling tools, but there's a very strong connection
> between the two.

Yeah I'm not sure whether a magic (worse probably per-driver massively
different) file in sysfs is needed to enable gpu perf monitoring stats in
fdinfo.

I get that we do have a bit a gap because the linux perf pmu stuff is
global, and you want per-process, and there's kinda no per-process support
for perf stats for devices. But that's probably the direction we want to
go, not so much fdinfo. At least for hardware performance counters and
things like that.

Iirc the i915 pmu support had some integration for per-process support,
you might want to chat with Tvrtko for kernel side and Lionel for more
userspace side. At least if I'm not making a complete mess and my memory
is vaguely related to reality. Adding them both.

Cheers, Sima


>
> Steve
>
> > Adrián Larumbe (1):
> > drm/panfrost: Always record job cycle and timestamp information
> >
> > drivers/gpu/drm/panfrost/Makefile | 2 --
> > drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
> > drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
> > drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
> > drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
> > drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
> > drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
> > 7 files changed, 9 insertions(+), 59 deletions(-)
> > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
> > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
> >
> >
> > base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
>

--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

2024-02-16 21:31:35

by Tvrtko Ursulin

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information


On 16/02/2024 16:57, Daniel Vetter wrote:
> On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
>> Hi Adrián,
>>
>> On 14/02/2024 12:14, Adrián Larumbe wrote:
>>> A driver user expressed interest in being able to access engine usage stats
>>> through fdinfo when debugfs is not built into their kernel. In the current
>>> implementation, this wasn't possible, because it was assumed even for
>>> inflight jobs enabling the cycle counter and timestamp registers would
>>> incur in additional power consumption, so both were kept disabled until
>>> toggled through debugfs.
>>>
>>> A second read of the TRM made me think otherwise, but this is something
>>> that would be best clarified by someone from ARM's side.
>>
>> I'm afraid I can't give a definitive answer. This will probably vary
>> depending on implementation. The command register enables/disables
>> "propagation" of the cycle/timestamp values. This propagation will cost
>> some power (gates are getting toggled) but whether that power is
>> completely in the noise of the GPU as a whole I can't say.
>>
>> The out-of-tree kbase driver only enables the counters for jobs
>> explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
>> from a profiler.
>>
>> I'd be happier moving the debugfs file to sysfs rather than assuming
>> that the power consumption is small enough for all platforms.
>>
>> Ideally we'd have some sort of kernel interface for a profiler to inform
>> the kernel what it is interested in, but I can't immediately see how to
>> make that useful across different drivers. kbase's profiling support is
>> great with our profiling tools, but there's a very strong connection
>> between the two.
>
> Yeah I'm not sure whether a magic (worse probably per-driver massively
> different) file in sysfs is needed to enable gpu perf monitoring stats in
> fdinfo.
>
> I get that we do have a bit a gap because the linux perf pmu stuff is
> global, and you want per-process, and there's kinda no per-process support
> for perf stats for devices. But that's probably the direction we want to
> go, not so much fdinfo. At least for hardware performance counters and
> things like that.
>
> Iirc the i915 pmu support had some integration for per-process support,
> you might want to chat with Tvrtko for kernel side and Lionel for more
> userspace side. At least if I'm not making a complete mess and my memory
> is vaguely related to reality. Adding them both.

Yeah there are two separate things, i915 PMU and i915 Perf/OA.

If my memory serves me right I indeed did have a per-process support for
i915 PMU implemented as an RFC (or at least a branch somewhere) some
years back. IIRC it only exposed the per engine GPU utilisation and did
not find it very useful versus the complexity. (I think it at least
required maintaining a map of drm clients per task.)

Our more useful profiling is using a custom Perf/OA interface
(Observation Architecture) which is possibly similar to kbase mentioned
above. Why it is a custom interface is explained in a large comment on
top of i915_perf.c. Not sure if all of them still hold but on the
overall perf does not sound like the right fit for detailed GPU profiling.

Also PMU drivers are very challenging to get the implementation right,
since locking model and atomicity requirements are quite demanding.

From my point of view, at least it is my initial thinking, if custom
per driver solutions are strongly not desired, it could be interesting
to look into whether there is enough commonality, in at least concepts,
to see if a new DRM level common but extensible API would be doable.
Even then it may be tricky to "extract" enough common code to justify it.

Regards,

Tvrtko

>
> Cheers, Sima
>
>
>>
>> Steve
>>
>>> Adrián Larumbe (1):
>>> drm/panfrost: Always record job cycle and timestamp information
>>>
>>> drivers/gpu/drm/panfrost/Makefile | 2 --
>>> drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
>>> drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
>>> drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
>>> drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
>>> drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
>>> drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
>>> 7 files changed, 9 insertions(+), 59 deletions(-)
>>> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
>>> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
>>>
>>>
>>> base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
>>
>
> --
> Daniel Vetter
> Software Engineer, Intel Corporation
> http://blog.ffwll.ch

2024-02-21 09:40:44

by Adrián Larumbe

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information

Hi,

I just wanted to make sure we're on the same page on this matter. So in
Panfrost, and I guess in almost every other single driver out there, HW perf
counters and their uapi interface are orthogonal to fdinfo's reporting on drm
engine utilisation.

At the moment it seems like HW perfcounters and the way they're exposed to UM
are very idiosincratic and any attempt to unify their interface into a common
set of ioctl's sounds like a gargantuan task I wouldn't like to be faced with.

As for fdinfo, I guess there's more room for coming up with common helpers that
could handle the toggling of HW support for drm engine calculations, but I'd at
least have to see how things are being done in let's say, Freedreno or Intel.

Right now there's a pressing need to get rid of the debugfs knob for fdinfo's
drm engine profiling sources in Panfrost, after which I could perhaps draw up an
RFC for how to generalise this onto other drivers.

Adrian

On 16.02.2024 17:43, Tvrtko Ursulin wrote:
>
> On 16/02/2024 16:57, Daniel Vetter wrote:
> > On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
> > > Hi Adrián,
> > >
> > > On 14/02/2024 12:14, Adrián Larumbe wrote:
> > > > A driver user expressed interest in being able to access engine usage stats
> > > > through fdinfo when debugfs is not built into their kernel. In the current
> > > > implementation, this wasn't possible, because it was assumed even for
> > > > inflight jobs enabling the cycle counter and timestamp registers would
> > > > incur in additional power consumption, so both were kept disabled until
> > > > toggled through debugfs.
> > > >
> > > > A second read of the TRM made me think otherwise, but this is something
> > > > that would be best clarified by someone from ARM's side.
> > >
> > > I'm afraid I can't give a definitive answer. This will probably vary
> > > depending on implementation. The command register enables/disables
> > > "propagation" of the cycle/timestamp values. This propagation will cost
> > > some power (gates are getting toggled) but whether that power is
> > > completely in the noise of the GPU as a whole I can't say.
> > >
> > > The out-of-tree kbase driver only enables the counters for jobs
> > > explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
> > > from a profiler.
> > >
> > > I'd be happier moving the debugfs file to sysfs rather than assuming
> > > that the power consumption is small enough for all platforms.
> > >
> > > Ideally we'd have some sort of kernel interface for a profiler to inform
> > > the kernel what it is interested in, but I can't immediately see how to
> > > make that useful across different drivers. kbase's profiling support is
> > > great with our profiling tools, but there's a very strong connection
> > > between the two.
> >
> > Yeah I'm not sure whether a magic (worse probably per-driver massively
> > different) file in sysfs is needed to enable gpu perf monitoring stats in
> > fdinfo.
> >
> > I get that we do have a bit a gap because the linux perf pmu stuff is
> > global, and you want per-process, and there's kinda no per-process support
> > for perf stats for devices. But that's probably the direction we want to
> > go, not so much fdinfo. At least for hardware performance counters and
> > things like that.
> >
> > Iirc the i915 pmu support had some integration for per-process support,
> > you might want to chat with Tvrtko for kernel side and Lionel for more
> > userspace side. At least if I'm not making a complete mess and my memory
> > is vaguely related to reality. Adding them both.
>
> Yeah there are two separate things, i915 PMU and i915 Perf/OA.
>
> If my memory serves me right I indeed did have a per-process support for i915
> PMU implemented as an RFC (or at least a branch somewhere) some years back.
> IIRC it only exposed the per engine GPU utilisation and did not find it very
> useful versus the complexity. (I think it at least required maintaining a map
> of drm clients per task.)
>
> Our more useful profiling is using a custom Perf/OA interface (Observation
> Architecture) which is possibly similar to kbase mentioned above. Why it is a
> custom interface is explained in a large comment on top of i915_perf.c. Not
> sure if all of them still hold but on the overall perf does not sound like the
> right fit for detailed GPU profiling.
>
> Also PMU drivers are very challenging to get the implementation right, since
> locking model and atomicity requirements are quite demanding.
>
> From my point of view, at least it is my initial thinking, if custom per
> driver solutions are strongly not desired, it could be interesting to look
> into whether there is enough commonality, in at least concepts, to see if a
> new DRM level common but extensible API would be doable. Even then it may be
> tricky to "extract" enough common code to justify it.
>
> Regards,
>
> Tvrtko
>
> >
> > Cheers, Sima
> >
> >
> > >
> > > Steve
> > >
> > > > Adrián Larumbe (1):
> > > > drm/panfrost: Always record job cycle and timestamp information
> > > >
> > > > drivers/gpu/drm/panfrost/Makefile | 2 --
> > > > drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
> > > > drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
> > > > drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
> > > > drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
> > > > drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
> > > > drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
> > > > 7 files changed, 9 insertions(+), 59 deletions(-)
> > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
> > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
> > > >
> > > >
> > > > base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
> > >
> >
> > --
> > Daniel Vetter
> > Software Engineer, Intel Corporation
> > http://blog.ffwll.ch


2024-02-21 14:34:35

by Tvrtko Ursulin

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information


On 21/02/2024 09:40, Adrián Larumbe wrote:
> Hi,
>
> I just wanted to make sure we're on the same page on this matter. So in
> Panfrost, and I guess in almost every other single driver out there, HW perf
> counters and their uapi interface are orthogonal to fdinfo's reporting on drm
> engine utilisation.
>
> At the moment it seems like HW perfcounters and the way they're exposed to UM
> are very idiosincratic and any attempt to unify their interface into a common
> set of ioctl's sounds like a gargantuan task I wouldn't like to be faced with.

I share the same feeling on this sub-topic.

> As for fdinfo, I guess there's more room for coming up with common helpers that
> could handle the toggling of HW support for drm engine calculations, but I'd at
> least have to see how things are being done in let's say, Freedreno or Intel.

For Intel we don't need this ability, well at least for pre-GuC
platforms. Stat collection is super cheap and permanently enabled there.

But let me copy Umesh because something at the back of my mind is
telling me that perhaps there was something expensive about collecting
these stats with the GuC backend? If so maybe a toggle would be
beneficial there.

> Right now there's a pressing need to get rid of the debugfs knob for fdinfo's
> drm engine profiling sources in Panfrost, after which I could perhaps draw up an
> RFC for how to generalise this onto other drivers.

There is a knob currently meaning fdinfo does not work by default? If
that is so, I would have at least expected someone had submitted a patch
for gputop to handle this toggle. It being kind of a common reference
implementation I don't think it is great if it does not work out of the box.

The toggle as an idea sounds a bit annoying, but if there is no other
realistic way maybe it is not too bad. As long as it is documented in
the drm-usage-stats.rst, doesn't live in debugfs, and has some common
plumbing implemented both on the kernel side and for the aforementioned
gputop / igt_drm_fdinfo / igt_drm_clients. Where and how exactly TBD.

Regards,

Tvrtko

>
> On 16.02.2024 17:43, Tvrtko Ursulin wrote:
>>
>> On 16/02/2024 16:57, Daniel Vetter wrote:
>>> On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
>>>> Hi Adrián,
>>>>
>>>> On 14/02/2024 12:14, Adrián Larumbe wrote:
>>>>> A driver user expressed interest in being able to access engine usage stats
>>>>> through fdinfo when debugfs is not built into their kernel. In the current
>>>>> implementation, this wasn't possible, because it was assumed even for
>>>>> inflight jobs enabling the cycle counter and timestamp registers would
>>>>> incur in additional power consumption, so both were kept disabled until
>>>>> toggled through debugfs.
>>>>>
>>>>> A second read of the TRM made me think otherwise, but this is something
>>>>> that would be best clarified by someone from ARM's side.
>>>>
>>>> I'm afraid I can't give a definitive answer. This will probably vary
>>>> depending on implementation. The command register enables/disables
>>>> "propagation" of the cycle/timestamp values. This propagation will cost
>>>> some power (gates are getting toggled) but whether that power is
>>>> completely in the noise of the GPU as a whole I can't say.
>>>>
>>>> The out-of-tree kbase driver only enables the counters for jobs
>>>> explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
>>>> from a profiler.
>>>>
>>>> I'd be happier moving the debugfs file to sysfs rather than assuming
>>>> that the power consumption is small enough for all platforms.
>>>>
>>>> Ideally we'd have some sort of kernel interface for a profiler to inform
>>>> the kernel what it is interested in, but I can't immediately see how to
>>>> make that useful across different drivers. kbase's profiling support is
>>>> great with our profiling tools, but there's a very strong connection
>>>> between the two.
>>>
>>> Yeah I'm not sure whether a magic (worse probably per-driver massively
>>> different) file in sysfs is needed to enable gpu perf monitoring stats in
>>> fdinfo.
>>>
>>> I get that we do have a bit a gap because the linux perf pmu stuff is
>>> global, and you want per-process, and there's kinda no per-process support
>>> for perf stats for devices. But that's probably the direction we want to
>>> go, not so much fdinfo. At least for hardware performance counters and
>>> things like that.
>>>
>>> Iirc the i915 pmu support had some integration for per-process support,
>>> you might want to chat with Tvrtko for kernel side and Lionel for more
>>> userspace side. At least if I'm not making a complete mess and my memory
>>> is vaguely related to reality. Adding them both.
>>
>> Yeah there are two separate things, i915 PMU and i915 Perf/OA.
>>
>> If my memory serves me right I indeed did have a per-process support for i915
>> PMU implemented as an RFC (or at least a branch somewhere) some years back.
>> IIRC it only exposed the per engine GPU utilisation and did not find it very
>> useful versus the complexity. (I think it at least required maintaining a map
>> of drm clients per task.)
>>
>> Our more useful profiling is using a custom Perf/OA interface (Observation
>> Architecture) which is possibly similar to kbase mentioned above. Why it is a
>> custom interface is explained in a large comment on top of i915_perf.c. Not
>> sure if all of them still hold but on the overall perf does not sound like the
>> right fit for detailed GPU profiling.
>>
>> Also PMU drivers are very challenging to get the implementation right, since
>> locking model and atomicity requirements are quite demanding.
>>
>> From my point of view, at least it is my initial thinking, if custom per
>> driver solutions are strongly not desired, it could be interesting to look
>> into whether there is enough commonality, in at least concepts, to see if a
>> new DRM level common but extensible API would be doable. Even then it may be
>> tricky to "extract" enough common code to justify it.
>>
>> Regards,
>>
>> Tvrtko
>>
>>>
>>> Cheers, Sima
>>>
>>>
>>>>
>>>> Steve
>>>>
>>>>> Adrián Larumbe (1):
>>>>> drm/panfrost: Always record job cycle and timestamp information
>>>>>
>>>>> drivers/gpu/drm/panfrost/Makefile | 2 --
>>>>> drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
>>>>> drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
>>>>> drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
>>>>> drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
>>>>> drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
>>>>> drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
>>>>> 7 files changed, 9 insertions(+), 59 deletions(-)
>>>>> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
>>>>> delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
>>>>>
>>>>>
>>>>> base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
>>>>
>>>
>>> --
>>> Daniel Vetter
>>> Software Engineer, Intel Corporation
>>> http://blog.ffwll.ch
>

2024-02-21 15:14:05

by Adrián Larumbe

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information

> On 21.02.2024 14:34, Tvrtko Ursulin wrote:
>
> On 21/02/2024 09:40, Adrián Larumbe wrote:
> > Hi,
> >
> > I just wanted to make sure we're on the same page on this matter. So in
> > Panfrost, and I guess in almost every other single driver out there, HW perf
> > counters and their uapi interface are orthogonal to fdinfo's reporting on drm
> > engine utilisation.
> >
> > At the moment it seems like HW perfcounters and the way they're exposed to UM
> > are very idiosincratic and any attempt to unify their interface into a common
> > set of ioctl's sounds like a gargantuan task I wouldn't like to be faced with.
>
> I share the same feeling on this sub-topic.
>
> > As for fdinfo, I guess there's more room for coming up with common helpers that
> > could handle the toggling of HW support for drm engine calculations, but I'd at
> > least have to see how things are being done in let's say, Freedreno or Intel.
>
> For Intel we don't need this ability, well at least for pre-GuC platforms.
> Stat collection is super cheap and permanently enabled there.
>
> But let me copy Umesh because something at the back of my mind is telling me
> that perhaps there was something expensive about collecting these stats with
> the GuC backend? If so maybe a toggle would be beneficial there.
>
> > Right now there's a pressing need to get rid of the debugfs knob for fdinfo's
> > drm engine profiling sources in Panfrost, after which I could perhaps draw up an
> > RFC for how to generalise this onto other drivers.
>
> There is a knob currently meaning fdinfo does not work by default? If that is
> so, I would have at least expected someone had submitted a patch for gputop to
> handle this toggle. It being kind of a common reference implementation I don't
> think it is great if it does not work out of the box.

It does sound like I forgot to document this knob at the time I submited fdinfo
support for Panforst. I'll make a point of mentioning it in a new patch where I
drop debugfs support and enable toggling from sysfs instead.

> The toggle as an idea sounds a bit annoying, but if there is no other
> realistic way maybe it is not too bad. As long as it is documented in the
> drm-usage-stats.rst, doesn't live in debugfs, and has some common plumbing
> implemented both on the kernel side and for the aforementioned gputop /
> igt_drm_fdinfo / igt_drm_clients. Where and how exactly TBD.

As soon as the new patch is merged, I'll go and reflect the driver uAPI changes
in all three of these.

> Regards,
>
> Tvrtko
>

Cheers,
Adrian

> > On 16.02.2024 17:43, Tvrtko Ursulin wrote:
> > >
> > > On 16/02/2024 16:57, Daniel Vetter wrote:
> > > > On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
> > > > > Hi Adrián,
> > > > >
> > > > > On 14/02/2024 12:14, Adrián Larumbe wrote:
> > > > > > A driver user expressed interest in being able to access engine usage stats
> > > > > > through fdinfo when debugfs is not built into their kernel. In the current
> > > > > > implementation, this wasn't possible, because it was assumed even for
> > > > > > inflight jobs enabling the cycle counter and timestamp registers would
> > > > > > incur in additional power consumption, so both were kept disabled until
> > > > > > toggled through debugfs.
> > > > > >
> > > > > > A second read of the TRM made me think otherwise, but this is something
> > > > > > that would be best clarified by someone from ARM's side.
> > > > >
> > > > > I'm afraid I can't give a definitive answer. This will probably vary
> > > > > depending on implementation. The command register enables/disables
> > > > > "propagation" of the cycle/timestamp values. This propagation will cost
> > > > > some power (gates are getting toggled) but whether that power is
> > > > > completely in the noise of the GPU as a whole I can't say.
> > > > >
> > > > > The out-of-tree kbase driver only enables the counters for jobs
> > > > > explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
> > > > > from a profiler.
> > > > >
> > > > > I'd be happier moving the debugfs file to sysfs rather than assuming
> > > > > that the power consumption is small enough for all platforms.
> > > > >
> > > > > Ideally we'd have some sort of kernel interface for a profiler to inform
> > > > > the kernel what it is interested in, but I can't immediately see how to
> > > > > make that useful across different drivers. kbase's profiling support is
> > > > > great with our profiling tools, but there's a very strong connection
> > > > > between the two.
> > > >
> > > > Yeah I'm not sure whether a magic (worse probably per-driver massively
> > > > different) file in sysfs is needed to enable gpu perf monitoring stats in
> > > > fdinfo.
> > > >
> > > > I get that we do have a bit a gap because the linux perf pmu stuff is
> > > > global, and you want per-process, and there's kinda no per-process support
> > > > for perf stats for devices. But that's probably the direction we want to
> > > > go, not so much fdinfo. At least for hardware performance counters and
> > > > things like that.
> > > >
> > > > Iirc the i915 pmu support had some integration for per-process support,
> > > > you might want to chat with Tvrtko for kernel side and Lionel for more
> > > > userspace side. At least if I'm not making a complete mess and my memory
> > > > is vaguely related to reality. Adding them both.
> > >
> > > Yeah there are two separate things, i915 PMU and i915 Perf/OA.
> > >
> > > If my memory serves me right I indeed did have a per-process support for i915
> > > PMU implemented as an RFC (or at least a branch somewhere) some years back.
> > > IIRC it only exposed the per engine GPU utilisation and did not find it very
> > > useful versus the complexity. (I think it at least required maintaining a map
> > > of drm clients per task.)
> > >
> > > Our more useful profiling is using a custom Perf/OA interface (Observation
> > > Architecture) which is possibly similar to kbase mentioned above. Why it is a
> > > custom interface is explained in a large comment on top of i915_perf.c. Not
> > > sure if all of them still hold but on the overall perf does not sound like the
> > > right fit for detailed GPU profiling.
> > >
> > > Also PMU drivers are very challenging to get the implementation right, since
> > > locking model and atomicity requirements are quite demanding.
> > >
> > > From my point of view, at least it is my initial thinking, if custom per
> > > driver solutions are strongly not desired, it could be interesting to look
> > > into whether there is enough commonality, in at least concepts, to see if a
> > > new DRM level common but extensible API would be doable. Even then it may be
> > > tricky to "extract" enough common code to justify it.
> > >
> > > Regards,
> > >
> > > Tvrtko
> > >
> > > >
> > > > Cheers, Sima
> > > >
> > > >
> > > > >
> > > > > Steve
> > > > >
> > > > > > Adrián Larumbe (1):
> > > > > > drm/panfrost: Always record job cycle and timestamp information
> > > > > >
> > > > > > drivers/gpu/drm/panfrost/Makefile | 2 --
> > > > > > drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
> > > > > > drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
> > > > > > drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
> > > > > > drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
> > > > > > drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
> > > > > > drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
> > > > > > 7 files changed, 9 insertions(+), 59 deletions(-)
> > > > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
> > > > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
> > > > > >
> > > > > >
> > > > > > base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
> > > > >
> > > >
> > > > --
> > > > Daniel Vetter
> > > > Software Engineer, Intel Corporation
> > > > http://blog.ffwll.ch


2024-02-28 14:14:11

by Daniel Vetter

[permalink] [raw]
Subject: Re: [PATCH 0/1] Always record job cycle and timestamp information

On Wed, Feb 21, 2024 at 03:13:41PM +0000, Adri?n Larumbe wrote:
> > On 21.02.2024 14:34, Tvrtko Ursulin wrote:
> >
> > On 21/02/2024 09:40, Adri?n Larumbe wrote:
> > > Hi,
> > >
> > > I just wanted to make sure we're on the same page on this matter. So in
> > > Panfrost, and I guess in almost every other single driver out there, HW perf
> > > counters and their uapi interface are orthogonal to fdinfo's reporting on drm
> > > engine utilisation.
> > >
> > > At the moment it seems like HW perfcounters and the way they're exposed to UM
> > > are very idiosincratic and any attempt to unify their interface into a common
> > > set of ioctl's sounds like a gargantuan task I wouldn't like to be faced with.
> >
> > I share the same feeling on this sub-topic.
> >
> > > As for fdinfo, I guess there's more room for coming up with common helpers that
> > > could handle the toggling of HW support for drm engine calculations, but I'd at
> > > least have to see how things are being done in let's say, Freedreno or Intel.
> >
> > For Intel we don't need this ability, well at least for pre-GuC platforms.
> > Stat collection is super cheap and permanently enabled there.
> >
> > But let me copy Umesh because something at the back of my mind is telling me
> > that perhaps there was something expensive about collecting these stats with
> > the GuC backend? If so maybe a toggle would be beneficial there.
> >
> > > Right now there's a pressing need to get rid of the debugfs knob for fdinfo's
> > > drm engine profiling sources in Panfrost, after which I could perhaps draw up an
> > > RFC for how to generalise this onto other drivers.
> >
> > There is a knob currently meaning fdinfo does not work by default? If that is
> > so, I would have at least expected someone had submitted a patch for gputop to
> > handle this toggle. It being kind of a common reference implementation I don't
> > think it is great if it does not work out of the box.
>
> It does sound like I forgot to document this knob at the time I submited fdinfo
> support for Panforst. I'll make a point of mentioning it in a new patch where I
> drop debugfs support and enable toggling from sysfs instead.
>
> > The toggle as an idea sounds a bit annoying, but if there is no other
> > realistic way maybe it is not too bad. As long as it is documented in the
> > drm-usage-stats.rst, doesn't live in debugfs, and has some common plumbing
> > implemented both on the kernel side and for the aforementioned gputop /
> > igt_drm_fdinfo / igt_drm_clients. Where and how exactly TBD.
>
> As soon as the new patch is merged, I'll go and reflect the driver uAPI changes
> in all three of these.

Would be good (and kinda proper per process rules) to implement the code
in at least e.g. gputop for this. To make sure it actually works for that
use-case, and there's not an oversight that breaks it all.
-Sima

>
> > Regards,
> >
> > Tvrtko
> >
>
> Cheers,
> Adrian
>
> > > On 16.02.2024 17:43, Tvrtko Ursulin wrote:
> > > >
> > > > On 16/02/2024 16:57, Daniel Vetter wrote:
> > > > > On Wed, Feb 14, 2024 at 01:52:05PM +0000, Steven Price wrote:
> > > > > > Hi Adri?n,
> > > > > >
> > > > > > On 14/02/2024 12:14, Adri?n Larumbe wrote:
> > > > > > > A driver user expressed interest in being able to access engine usage stats
> > > > > > > through fdinfo when debugfs is not built into their kernel. In the current
> > > > > > > implementation, this wasn't possible, because it was assumed even for
> > > > > > > inflight jobs enabling the cycle counter and timestamp registers would
> > > > > > > incur in additional power consumption, so both were kept disabled until
> > > > > > > toggled through debugfs.
> > > > > > >
> > > > > > > A second read of the TRM made me think otherwise, but this is something
> > > > > > > that would be best clarified by someone from ARM's side.
> > > > > >
> > > > > > I'm afraid I can't give a definitive answer. This will probably vary
> > > > > > depending on implementation. The command register enables/disables
> > > > > > "propagation" of the cycle/timestamp values. This propagation will cost
> > > > > > some power (gates are getting toggled) but whether that power is
> > > > > > completely in the noise of the GPU as a whole I can't say.
> > > > > >
> > > > > > The out-of-tree kbase driver only enables the counters for jobs
> > > > > > explicitly marked (BASE_JD_REQ_PERMON) or due to an explicit connection
> > > > > > from a profiler.
> > > > > >
> > > > > > I'd be happier moving the debugfs file to sysfs rather than assuming
> > > > > > that the power consumption is small enough for all platforms.
> > > > > >
> > > > > > Ideally we'd have some sort of kernel interface for a profiler to inform
> > > > > > the kernel what it is interested in, but I can't immediately see how to
> > > > > > make that useful across different drivers. kbase's profiling support is
> > > > > > great with our profiling tools, but there's a very strong connection
> > > > > > between the two.
> > > > >
> > > > > Yeah I'm not sure whether a magic (worse probably per-driver massively
> > > > > different) file in sysfs is needed to enable gpu perf monitoring stats in
> > > > > fdinfo.
> > > > >
> > > > > I get that we do have a bit a gap because the linux perf pmu stuff is
> > > > > global, and you want per-process, and there's kinda no per-process support
> > > > > for perf stats for devices. But that's probably the direction we want to
> > > > > go, not so much fdinfo. At least for hardware performance counters and
> > > > > things like that.
> > > > >
> > > > > Iirc the i915 pmu support had some integration for per-process support,
> > > > > you might want to chat with Tvrtko for kernel side and Lionel for more
> > > > > userspace side. At least if I'm not making a complete mess and my memory
> > > > > is vaguely related to reality. Adding them both.
> > > >
> > > > Yeah there are two separate things, i915 PMU and i915 Perf/OA.
> > > >
> > > > If my memory serves me right I indeed did have a per-process support for i915
> > > > PMU implemented as an RFC (or at least a branch somewhere) some years back.
> > > > IIRC it only exposed the per engine GPU utilisation and did not find it very
> > > > useful versus the complexity. (I think it at least required maintaining a map
> > > > of drm clients per task.)
> > > >
> > > > Our more useful profiling is using a custom Perf/OA interface (Observation
> > > > Architecture) which is possibly similar to kbase mentioned above. Why it is a
> > > > custom interface is explained in a large comment on top of i915_perf.c. Not
> > > > sure if all of them still hold but on the overall perf does not sound like the
> > > > right fit for detailed GPU profiling.
> > > >
> > > > Also PMU drivers are very challenging to get the implementation right, since
> > > > locking model and atomicity requirements are quite demanding.
> > > >
> > > > From my point of view, at least it is my initial thinking, if custom per
> > > > driver solutions are strongly not desired, it could be interesting to look
> > > > into whether there is enough commonality, in at least concepts, to see if a
> > > > new DRM level common but extensible API would be doable. Even then it may be
> > > > tricky to "extract" enough common code to justify it.
> > > >
> > > > Regards,
> > > >
> > > > Tvrtko
> > > >
> > > > >
> > > > > Cheers, Sima
> > > > >
> > > > >
> > > > > >
> > > > > > Steve
> > > > > >
> > > > > > > Adri?n Larumbe (1):
> > > > > > > drm/panfrost: Always record job cycle and timestamp information
> > > > > > >
> > > > > > > drivers/gpu/drm/panfrost/Makefile | 2 --
> > > > > > > drivers/gpu/drm/panfrost/panfrost_debugfs.c | 21 ------------------
> > > > > > > drivers/gpu/drm/panfrost/panfrost_debugfs.h | 14 ------------
> > > > > > > drivers/gpu/drm/panfrost/panfrost_device.h | 1 -
> > > > > > > drivers/gpu/drm/panfrost/panfrost_drv.c | 5 -----
> > > > > > > drivers/gpu/drm/panfrost/panfrost_job.c | 24 ++++++++-------------
> > > > > > > drivers/gpu/drm/panfrost/panfrost_job.h | 1 -
> > > > > > > 7 files changed, 9 insertions(+), 59 deletions(-)
> > > > > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.c
> > > > > > > delete mode 100644 drivers/gpu/drm/panfrost/panfrost_debugfs.h
> > > > > > >
> > > > > > >
> > > > > > > base-commit: 6b1f93ea345947c94bf3a7a6e668a2acfd310918
> > > > > >
> > > > >
> > > > > --
> > > > > Daniel Vetter
> > > > > Software Engineer, Intel Corporation
> > > > > http://blog.ffwll.ch
>

--
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch