From: Praveen Chidambaram <[email protected]>
In some systems, the system bus speed can be varied, usually
based on the current CPU frequency. However, various device
drivers and/or applications may need a faster system bus for I/O
even though the CPU itself may be idle.
Signed-off-by: Praveen Chidambaram <[email protected]>
Signed-off-by: David Brown <[email protected]>
Signed-off-by: Daniel Walker <[email protected]>
---
include/linux/pm_qos_params.h | 3 ++-
kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
2 files changed, 27 insertions(+), 8 deletions(-)
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index d74f75e..091c13c 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -10,8 +10,9 @@
#define PM_QOS_CPU_DMA_LATENCY 1
#define PM_QOS_NETWORK_LATENCY 2
#define PM_QOS_NETWORK_THROUGHPUT 3
+#define PM_QOS_SYSTEM_BUS_FREQ 4
-#define PM_QOS_NUM_CLASSES 4
+#define PM_QOS_NUM_CLASSES 5
#define PM_QOS_DEFAULT_VALUE -1
int pm_qos_add_requirement(int qos, char *name, s32 value);
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 3db49b9..8576f40 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
.comparitor = max_compare
};
+static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
+static struct pm_qos_object system_bus_freq_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
+ .notifiers = &system_bus_freq_notifier,
+ .name = "system_bus_freq",
+ .default_value = 0,
+ .target_value = ATOMIC_INIT(0),
+ .comparitor = max_compare
+};
+
-static struct pm_qos_object *pm_qos_array[] = {
- &null_pm_qos,
- &cpu_dma_pm_qos,
- &network_lat_pm_qos,
- &network_throughput_pm_qos
+static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
+ [PM_QOS_RESERVED] = &null_pm_qos,
+ [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
+ [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
+ [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
+ [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
};
static DEFINE_SPINLOCK(pm_qos_lock);
@@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
- int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
@@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
return ret;
}
ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
+ if (ret < 0) {
printk(KERN_ERR
"pm_qos_param: network_throughput setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: system_bus_freq setup failed\n");
return ret;
}
--
1.6.3.3
Just adding the signed off by's to the CC line.
On Thu, 2009-12-31 at 17:20 -0800, Daniel Walker wrote:
> From: Praveen Chidambaram <[email protected]>
>
> In some systems, the system bus speed can be varied, usually
> based on the current CPU frequency. However, various device
> drivers and/or applications may need a faster system bus for I/O
> even though the CPU itself may be idle.
>
> Signed-off-by: Praveen Chidambaram <[email protected]>
> Signed-off-by: David Brown <[email protected]>
> Signed-off-by: Daniel Walker <[email protected]>
> ---
> include/linux/pm_qos_params.h | 3 ++-
> kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> 2 files changed, 27 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> index d74f75e..091c13c 100644
> --- a/include/linux/pm_qos_params.h
> +++ b/include/linux/pm_qos_params.h
> @@ -10,8 +10,9 @@
> #define PM_QOS_CPU_DMA_LATENCY 1
> #define PM_QOS_NETWORK_LATENCY 2
> #define PM_QOS_NETWORK_THROUGHPUT 3
> +#define PM_QOS_SYSTEM_BUS_FREQ 4
>
> -#define PM_QOS_NUM_CLASSES 4
> +#define PM_QOS_NUM_CLASSES 5
> #define PM_QOS_DEFAULT_VALUE -1
>
> int pm_qos_add_requirement(int qos, char *name, s32 value);
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index 3db49b9..8576f40 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> .comparitor = max_compare
> };
>
> +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> +static struct pm_qos_object system_bus_freq_pm_qos = {
> + .requirements =
> + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> + .notifiers = &system_bus_freq_notifier,
> + .name = "system_bus_freq",
> + .default_value = 0,
> + .target_value = ATOMIC_INIT(0),
> + .comparitor = max_compare
> +};
> +
>
> -static struct pm_qos_object *pm_qos_array[] = {
> - &null_pm_qos,
> - &cpu_dma_pm_qos,
> - &network_lat_pm_qos,
> - &network_throughput_pm_qos
> +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> + [PM_QOS_RESERVED] = &null_pm_qos,
> + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
> };
>
> static DEFINE_SPINLOCK(pm_qos_lock);
> @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> * will register the notifier into a notification chain that gets called
> * upon changes to the pm_qos_class target value.
> */
> - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> {
> int retval;
>
> @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> return ret;
> }
> ret = register_pm_qos_misc(&network_throughput_pm_qos);
> - if (ret < 0)
> + if (ret < 0) {
> printk(KERN_ERR
> "pm_qos_param: network_throughput setup failed\n");
> + return ret;
> + }
> + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> + if (ret < 0)
> + printk(KERN_ERR
> + "pm_qos_param: system_bus_freq setup failed\n");
>
> return ret;
> }
On Thu, Dec 31, 2009 at 05:20:27PM -0800, Daniel Walker wrote:
> From: Praveen Chidambaram <[email protected]>
>
> In some systems, the system bus speed can be varied, usually
> based on the current CPU frequency. However, various device
> drivers and/or applications may need a faster system bus for I/O
> even though the CPU itself may be idle
What happened to the discussion around multiple platforms needing
multiple bus pm_qos_requirements?
Is system bus freq too generic? (I'm worried about the name space)
Is this ok? (I'm asking linux-pm for input here.)
On X86 would this be analogous to FSB, Memory, or PCI bus frequencies?
What will happen when there are two buses each wanting a PM_QOS
parameter? Is that a likely scenario?
Also, on your platform you have a throttling driver controlling the
frequency of some bus, that will use this value as a constraint on how
far it will throttle. no? I would be interested in seeing this driver
sometime. (I just want to make sure no one bastardizes pm_qos into an
operating point thing. I'm not sure I can justify why but I want to
avoid that.)
Lets have a on list discussion around the above. Other than these I
don't see a problem with your patch. I would like to know that other Si
vendors have a need for it other than just yours and the
abstraction/naming is compatible to them as well.
--mgross
>
> Signed-off-by: Praveen Chidambaram <[email protected]>
> Signed-off-by: David Brown <[email protected]>
> Signed-off-by: Daniel Walker <[email protected]>
> ---
> include/linux/pm_qos_params.h | 3 ++-
> kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> 2 files changed, 27 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> index d74f75e..091c13c 100644
> --- a/include/linux/pm_qos_params.h
> +++ b/include/linux/pm_qos_params.h
> @@ -10,8 +10,9 @@
> #define PM_QOS_CPU_DMA_LATENCY 1
> #define PM_QOS_NETWORK_LATENCY 2
> #define PM_QOS_NETWORK_THROUGHPUT 3
> +#define PM_QOS_SYSTEM_BUS_FREQ 4
>
> -#define PM_QOS_NUM_CLASSES 4
> +#define PM_QOS_NUM_CLASSES 5
> #define PM_QOS_DEFAULT_VALUE -1
>
> int pm_qos_add_requirement(int qos, char *name, s32 value);
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index 3db49b9..8576f40 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> .comparitor = max_compare
> };
>
> +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> +static struct pm_qos_object system_bus_freq_pm_qos = {
> + .requirements =
> + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> + .notifiers = &system_bus_freq_notifier,
> + .name = "system_bus_freq",
> + .default_value = 0,
> + .target_value = ATOMIC_INIT(0),
> + .comparitor = max_compare
> +};
> +
>
> -static struct pm_qos_object *pm_qos_array[] = {
> - &null_pm_qos,
> - &cpu_dma_pm_qos,
> - &network_lat_pm_qos,
> - &network_throughput_pm_qos
> +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> + [PM_QOS_RESERVED] = &null_pm_qos,
> + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
I've never used or seen this syntax before. Is it C99? FWIW I like it.
> };
>
> static DEFINE_SPINLOCK(pm_qos_lock);
> @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> * will register the notifier into a notification chain that gets called
> * upon changes to the pm_qos_class target value.
> */
> - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> {
> int retval;
>
> @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> return ret;
> }
> ret = register_pm_qos_misc(&network_throughput_pm_qos);
> - if (ret < 0)
> + if (ret < 0) {
> printk(KERN_ERR
> "pm_qos_param: network_throughput setup failed\n");
> + return ret;
> + }
> + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> + if (ret < 0)
> + printk(KERN_ERR
> + "pm_qos_param: system_bus_freq setup failed\n");
>
> return ret;
> }
> --
> 1.6.3.3
On Mon, 2010-01-04 at 13:38 -0800, mark gross wrote:
> On Thu, Dec 31, 2009 at 05:20:27PM -0800, Daniel Walker wrote:
> > From: Praveen Chidambaram <[email protected]>
> >
> > In some systems, the system bus speed can be varied, usually
> > based on the current CPU frequency. However, various device
> > drivers and/or applications may need a faster system bus for I/O
> > even though the CPU itself may be idle
>
> What happened to the discussion around multiple platforms needing
> multiple bus pm_qos_requirements?
>
> Is system bus freq too generic? (I'm worried about the name space)
> Is this ok? (I'm asking linux-pm for input here.)
> On X86 would this be analogous to FSB, Memory, or PCI bus frequencies?
> What will happen when there are two buses each wanting a PM_QOS
> parameter? Is that a likely scenario?
We can always change the naming in the future, but someone could add a
PM_QOS_MEMORY_BUS_FREQ or PM_QOS_PERIPHERAL_BUS_FREQ in addition to this
one.. Since it's all generic code having generic naming seems fine..
> Also, on your platform you have a throttling driver controlling the
> frequency of some bus, that will use this value as a constraint on how
> far it will throttle. no? I would be interested in seeing this driver
> sometime. (I just want to make sure no one bastardizes pm_qos into an
> operating point thing. I'm not sure I can justify why but I want to
> avoid that.)
When you say "operating point thing" do you mean you don't want the
frequency adjusted at runtime? You want it set once then move on?
Daniel
On Mon, Jan 04, 2010 at 01:38:52PM -0800, mark gross wrote:
> Also, on your platform you have a throttling driver controlling the
> frequency of some bus, that will use this value as a constraint on how
> far it will throttle. no? I would be interested in seeing this driver
> sometime. (I just want to make sure no one bastardizes pm_qos into an
> operating point thing. I'm not sure I can justify why but I want to
> avoid that.)
We're part of a system with several other devices. Our driver
just passes the frequency on as our voted minimum.
Look for PM_QOS_SYSTEM_BUS_FREQ in the file:
https://www.codeaurora.org/gitweb/quic/la/?p=kernel/msm.git;a=blob;f=arch/arm/mach-msm/clock.c;h=3a22b5abb8e8acf876367dc6c013fe507af9c0ac;hb=287d333b03df8faafd3a480abc8e121110870839
http://tinyurl.com/ye7eg33
David
> What happened to the discussion around multiple platforms needing
> multiple bus pm_qos_requirements?
>
> Is system bus freq too generic? (I'm worried about the name space)
Something generic would be nice that each architecture can interpet it
for something specific.
> Is this ok? (I'm asking linux-pm for input here.)
> On X86 would this be analogous to FSB, Memory, or PCI bus frequencies?
I am hoping this would be eqivalent to FSB.
> What will happen when there are two buses each wanting a PM_QOS
> parameter? Is that a likely scenario?
Possibly. I am not sure how do we solve multiple instances with PM
QoS.
>
> Also, on your platform you have a throttling driver controlling the
> frequency of some bus, that will use this value as a constraint on how
> far it will throttle. no? I would be interested in seeing this driver
> sometime. (I just want to make sure no one bastardizes pm_qos into an
> operating point thing. I'm not sure I can justify why but I want to
> avoid that.)
The notifier handler and the clock driver limits the bus frequency to
the max supported by the hardware.
What is an operating point thing?
>
> Lets have a on list discussion around the above. Other than these I
> don't see a problem with your patch. I would like to know that other Si
> vendors have a need for it other than just yours and the
> abstraction/naming is compatible to them as well.
In ARM architecture, I hope this parameter would be used for AXI/AHB
scaling and could be used by other Si vendors as well.
Thanks,
Praveen
>
> --mgross
> >
> > Signed-off-by: Praveen Chidambaram <[email protected]>
> > Signed-off-by: David Brown <[email protected]>
> > Signed-off-by: Daniel Walker <[email protected]>
> > ---
> > include/linux/pm_qos_params.h | 3 ++-
> > kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> > 2 files changed, 27 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> > index d74f75e..091c13c 100644
> > --- a/include/linux/pm_qos_params.h
> > +++ b/include/linux/pm_qos_params.h
> > @@ -10,8 +10,9 @@
> > #define PM_QOS_CPU_DMA_LATENCY 1
> > #define PM_QOS_NETWORK_LATENCY 2
> > #define PM_QOS_NETWORK_THROUGHPUT 3
> > +#define PM_QOS_SYSTEM_BUS_FREQ 4
> >
> > -#define PM_QOS_NUM_CLASSES 4
> > +#define PM_QOS_NUM_CLASSES 5
> > #define PM_QOS_DEFAULT_VALUE -1
> >
> > int pm_qos_add_requirement(int qos, char *name, s32 value);
> > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > index 3db49b9..8576f40 100644
> > --- a/kernel/pm_qos_params.c
> > +++ b/kernel/pm_qos_params.c
> > @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> > .comparitor = max_compare
> > };
> >
> > +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> > +static struct pm_qos_object system_bus_freq_pm_qos = {
> > + .requirements =
> > + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> > + .notifiers = &system_bus_freq_notifier,
> > + .name = "system_bus_freq",
> > + .default_value = 0,
> > + .target_value = ATOMIC_INIT(0),
> > + .comparitor = max_compare
> > +};
> > +
> >
> > -static struct pm_qos_object *pm_qos_array[] = {
> > - &null_pm_qos,
> > - &cpu_dma_pm_qos,
> > - &network_lat_pm_qos,
> > - &network_throughput_pm_qos
> > +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> > + [PM_QOS_RESERVED] = &null_pm_qos,
> > + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> > + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> > + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> > + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
>
> I've never used or seen this syntax before. Is it C99? FWIW I like it.
>
> > };
> >
> > static DEFINE_SPINLOCK(pm_qos_lock);
> > @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> > * will register the notifier into a notification chain that gets called
> > * upon changes to the pm_qos_class target value.
> > */
> > - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > {
> > int retval;
> >
> > @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> > return ret;
> > }
> > ret = register_pm_qos_misc(&network_throughput_pm_qos);
> > - if (ret < 0)
> > + if (ret < 0) {
> > printk(KERN_ERR
> > "pm_qos_param: network_throughput setup failed\n");
> > + return ret;
> > + }
> > + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> > + if (ret < 0)
> > + printk(KERN_ERR
> > + "pm_qos_param: system_bus_freq setup failed\n");
> >
> > return ret;
> > }
> > --
> > 1.6.3.3
>
On Mon, Jan 04, 2010 at 02:00:45PM -0800, Daniel Walker wrote:
> On Mon, 2010-01-04 at 13:38 -0800, mark gross wrote:
> > On Thu, Dec 31, 2009 at 05:20:27PM -0800, Daniel Walker wrote:
> > > From: Praveen Chidambaram <[email protected]>
> > >
> > > In some systems, the system bus speed can be varied, usually
> > > based on the current CPU frequency. However, various device
> > > drivers and/or applications may need a faster system bus for I/O
> > > even though the CPU itself may be idle
> >
> > What happened to the discussion around multiple platforms needing
> > multiple bus pm_qos_requirements?
> >
> > Is system bus freq too generic? (I'm worried about the name space)
> > Is this ok? (I'm asking linux-pm for input here.)
> > On X86 would this be analogous to FSB, Memory, or PCI bus frequencies?
> > What will happen when there are two buses each wanting a PM_QOS
> > parameter? Is that a likely scenario?
>
> We can always change the naming in the future, but someone could add a
> PM_QOS_MEMORY_BUS_FREQ or PM_QOS_PERIPHERAL_BUS_FREQ in addition to this
> one.. Since it's all generic code having generic naming seems fine..
>
> > Also, on your platform you have a throttling driver controlling the
> > frequency of some bus, that will use this value as a constraint on how
> > far it will throttle. no? I would be interested in seeing this driver
> > sometime. (I just want to make sure no one bastardizes pm_qos into an
> > operating point thing. I'm not sure I can justify why but I want to
> > avoid that.)
>
> When you say "operating point thing" do you mean you don't want the
> frequency adjusted at runtime? You want it set once then move on?
>
no, I mean that I want the parameter to be used to constrain throttling
done by drivers and not define the settings used by the drivers.
I worry that the parameter could be used as "the setting" to use by the
driver thus pulling the PM throttling logic from the driver and putting
it in user mode as a back door operating point interface.
Its a subtle distinction but its one of the things I worry about. Also,
I'm not sure I can fight off good arguments for me to lighten up on this
point, but I'll try if it comes to that. ;)
--mgross
On Mon, Jan 04, 2010 at 03:18:52PM -0800, David Brown wrote:
> On Mon, Jan 04, 2010 at 01:38:52PM -0800, mark gross wrote:
>
> > Also, on your platform you have a throttling driver controlling the
> > frequency of some bus, that will use this value as a constraint on how
> > far it will throttle. no? I would be interested in seeing this driver
> > sometime. (I just want to make sure no one bastardizes pm_qos into an
> > operating point thing. I'm not sure I can justify why but I want to
> > avoid that.)
>
> We're part of a system with several other devices. Our driver
> just passes the frequency on as our voted minimum.
>
> Look for PM_QOS_SYSTEM_BUS_FREQ in the file:
>
> https://www.codeaurora.org/gitweb/quic/la/?p=kernel/msm.git;a=blob;f=arch/arm/mach-msm/clock.c;h=3a22b5abb8e8acf876367dc6c013fe507af9c0ac;hb=287d333b03df8faafd3a480abc8e121110870839
>
> http://tinyurl.com/ye7eg33
hurray for tinyurls!
It looks pretty ok to me on my initial inspection.
--mgross
> David
On Mon, Jan 04, 2010 at 04:22:58PM -0700, Chidambaram, Praveen wrote:
>
>
> > What happened to the discussion around multiple platforms needing
> > multiple bus pm_qos_requirements?
>
> >
> > Is system bus freq too generic? (I'm worried about the name space)
> Something generic would be nice that each architecture can interpet it
> for something specific.
>
> > Is this ok? (I'm asking linux-pm for input here.)
> > On X86 would this be analogous to FSB, Memory, or PCI bus frequencies?
> I am hoping this would be eqivalent to FSB.
>
> > What will happen when there are two buses each wanting a PM_QOS
> > parameter? Is that a likely scenario?
> Possibly. I am not sure how do we solve multiple instances with PM
> QoS.
Me either, we've been waiting for the problem become big enough that we
need to fix it. FWIW the issue exists for the network parameters when
you have multiple NIC's with different capabilities. i.e. 100Mb,
1000Mb, 10000Mb, and 12Mb. All using the same constraint value sort of
falls over.
This will be a problem that needs to be pushed by the device folks a
little before we can come up with a sensible implementation.
> >
> > Also, on your platform you have a throttling driver controlling the
> > frequency of some bus, that will use this value as a constraint on how
> > far it will throttle. no? I would be interested in seeing this driver
> > sometime. (I just want to make sure no one bastardizes pm_qos into an
> > operating point thing. I'm not sure I can justify why but I want to
> > avoid that.)
> The notifier handler and the clock driver limits the bus frequency to
> the max supported by the hardware.
>
> What is an operating point thing?
Nope. Thats a constraint base PM thing. Very compatible with the
pm_qos design goals.
>
> >
> > Lets have a on list discussion around the above. Other than these I
> > don't see a problem with your patch. I would like to know that other Si
> > vendors have a need for it other than just yours and the
> > abstraction/naming is compatible to them as well.
> In ARM architecture, I hope this parameter would be used for AXI/AHB
> scaling and could be used by other Si vendors as well.
>
If there is no issue other folks have with this I think I should provide
my sign-off and see where it goes.
--mgross
>
> >
> > --mgross
> > >
> > > Signed-off-by: Praveen Chidambaram <[email protected]>
> > > Signed-off-by: David Brown <[email protected]>
> > > Signed-off-by: Daniel Walker <[email protected]>
> > > ---
> > > include/linux/pm_qos_params.h | 3 ++-
> > > kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> > > 2 files changed, 27 insertions(+), 8 deletions(-)
> > >
> > > diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> > > index d74f75e..091c13c 100644
> > > --- a/include/linux/pm_qos_params.h
> > > +++ b/include/linux/pm_qos_params.h
> > > @@ -10,8 +10,9 @@
> > > #define PM_QOS_CPU_DMA_LATENCY 1
> > > #define PM_QOS_NETWORK_LATENCY 2
> > > #define PM_QOS_NETWORK_THROUGHPUT 3
> > > +#define PM_QOS_SYSTEM_BUS_FREQ 4
> > >
> > > -#define PM_QOS_NUM_CLASSES 4
> > > +#define PM_QOS_NUM_CLASSES 5
> > > #define PM_QOS_DEFAULT_VALUE -1
> > >
> > > int pm_qos_add_requirement(int qos, char *name, s32 value);
> > > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > > index 3db49b9..8576f40 100644
> > > --- a/kernel/pm_qos_params.c
> > > +++ b/kernel/pm_qos_params.c
> > > @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> > > .comparitor = max_compare
> > > };
> > >
> > > +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> > > +static struct pm_qos_object system_bus_freq_pm_qos = {
> > > + .requirements =
> > > + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> > > + .notifiers = &system_bus_freq_notifier,
> > > + .name = "system_bus_freq",
> > > + .default_value = 0,
> > > + .target_value = ATOMIC_INIT(0),
> > > + .comparitor = max_compare
> > > +};
> > > +
> > >
> > > -static struct pm_qos_object *pm_qos_array[] = {
> > > - &null_pm_qos,
> > > - &cpu_dma_pm_qos,
> > > - &network_lat_pm_qos,
> > > - &network_throughput_pm_qos
> > > +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> > > + [PM_QOS_RESERVED] = &null_pm_qos,
> > > + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> > > + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> > > + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> > > + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
> >
> > I've never used or seen this syntax before. Is it C99? FWIW I like it.
> >
> > > };
> > >
> > > static DEFINE_SPINLOCK(pm_qos_lock);
> > > @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> > > * will register the notifier into a notification chain that gets called
> > > * upon changes to the pm_qos_class target value.
> > > */
> > > - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > > +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > > {
> > > int retval;
> > >
> > > @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> > > return ret;
> > > }
> > > ret = register_pm_qos_misc(&network_throughput_pm_qos);
> > > - if (ret < 0)
> > > + if (ret < 0) {
> > > printk(KERN_ERR
> > > "pm_qos_param: network_throughput setup failed\n");
> > > + return ret;
> > > + }
> > > + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> > > + if (ret < 0)
> > > + printk(KERN_ERR
> > > + "pm_qos_param: system_bus_freq setup failed\n");
> > >
> > > return ret;
> > > }
> > > --
> > > 1.6.3.3
> >
Daniel Walker <[email protected]> writes:
> From: Praveen Chidambaram <[email protected]>
>
> In some systems, the system bus speed can be varied, usually
> based on the current CPU frequency. However, various device
> drivers and/or applications may need a faster system bus for I/O
> even though the CPU itself may be idle.
>
> Signed-off-by: Praveen Chidambaram <[email protected]>
> Signed-off-by: David Brown <[email protected]>
> Signed-off-by: Daniel Walker <[email protected]>
I think some type of bus parameter is a good idea and something we
would use on TI OMAP as well. However, I also have two concerns with
this approach.
1) The constraint should be in throughput, not in frequency
2) It doesn't handle multiple busses (as Mark Gross pointed out)
For (1), I don't like the idea of forcing drivers to know about the
underlying bus frequency. The same driver could be in use across a
family of SoCs (or even different SoCs), each having different bus
frequencies. For this driver to be portable, the driver should
express its constraints in terms of throughput, not underlying bus
frequency.
For (2), I'm not sure what the best way to handle this in PM QoS is.
Lately, I've been thinking that PM QoS is not the right place for
this. My idea (currenly only in my head) is the that busses in the
LDM (platform_bus, etc.) should have constraints associated with
them. That way, constraints can be set using a 'struct device' and
the bus they are attatched to will inherit the constraints directly.
This automatically solves the problem of multiple busses and allows
the possibility for different bus types to handle the constraints
differently.
Kevin
> ---
> include/linux/pm_qos_params.h | 3 ++-
> kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> 2 files changed, 27 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> index d74f75e..091c13c 100644
> --- a/include/linux/pm_qos_params.h
> +++ b/include/linux/pm_qos_params.h
> @@ -10,8 +10,9 @@
> #define PM_QOS_CPU_DMA_LATENCY 1
> #define PM_QOS_NETWORK_LATENCY 2
> #define PM_QOS_NETWORK_THROUGHPUT 3
> +#define PM_QOS_SYSTEM_BUS_FREQ 4
>
> -#define PM_QOS_NUM_CLASSES 4
> +#define PM_QOS_NUM_CLASSES 5
> #define PM_QOS_DEFAULT_VALUE -1
>
> int pm_qos_add_requirement(int qos, char *name, s32 value);
> diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> index 3db49b9..8576f40 100644
> --- a/kernel/pm_qos_params.c
> +++ b/kernel/pm_qos_params.c
> @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> .comparitor = max_compare
> };
>
> +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> +static struct pm_qos_object system_bus_freq_pm_qos = {
> + .requirements =
> + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> + .notifiers = &system_bus_freq_notifier,
> + .name = "system_bus_freq",
> + .default_value = 0,
> + .target_value = ATOMIC_INIT(0),
> + .comparitor = max_compare
> +};
> +
>
> -static struct pm_qos_object *pm_qos_array[] = {
> - &null_pm_qos,
> - &cpu_dma_pm_qos,
> - &network_lat_pm_qos,
> - &network_throughput_pm_qos
> +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> + [PM_QOS_RESERVED] = &null_pm_qos,
> + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
> };
>
> static DEFINE_SPINLOCK(pm_qos_lock);
> @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> * will register the notifier into a notification chain that gets called
> * upon changes to the pm_qos_class target value.
> */
> - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> {
> int retval;
>
> @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> return ret;
> }
> ret = register_pm_qos_misc(&network_throughput_pm_qos);
> - if (ret < 0)
> + if (ret < 0) {
> printk(KERN_ERR
> "pm_qos_param: network_throughput setup failed\n");
> + return ret;
> + }
> + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> + if (ret < 0)
> + printk(KERN_ERR
> + "pm_qos_param: system_bus_freq setup failed\n");
>
> return ret;
> }
> --
> 1.6.3.3
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
On Thu, Jan 07, 2010 at 08:34:28AM -0800, Kevin Hilman wrote:
> Daniel Walker <[email protected]> writes:
>
> > From: Praveen Chidambaram <[email protected]>
> >
> > In some systems, the system bus speed can be varied, usually
> > based on the current CPU frequency. However, various device
> > drivers and/or applications may need a faster system bus for I/O
> > even though the CPU itself may be idle.
> >
> > Signed-off-by: Praveen Chidambaram <[email protected]>
> > Signed-off-by: David Brown <[email protected]>
> > Signed-off-by: Daniel Walker <[email protected]>
>
> I think some type of bus parameter is a good idea and something we
> would use on TI OMAP as well. However, I also have two concerns with
> this approach.
>
> 1) The constraint should be in throughput, not in frequency
> 2) It doesn't handle multiple busses (as Mark Gross pointed out)
>
> For (1), I don't like the idea of forcing drivers to know about the
> underlying bus frequency. The same driver could be in use across a
> family of SoCs (or even different SoCs), each having different bus
> frequencies. For this driver to be portable, the driver should
> express its constraints in terms of throughput, not underlying bus
> frequency.
This makes sense, as throttling constraints should be based on things
that are invariant to bus width.
>
> For (2), I'm not sure what the best way to handle this in PM QoS is.
> Lately, I've been thinking that PM QoS is not the right place for
> this. My idea (currenly only in my head) is the that busses in the
> LDM (platform_bus, etc.) should have constraints associated with
> them. That way, constraints can be set using a 'struct device' and
> the bus they are attatched to will inherit the constraints directly.
> This automatically solves the problem of multiple busses and allows
> the possibility for different bus types to handle the constraints
> differently.
Sounds a bit like the range timers implementation. One question, what
would a throttling constraint changes API look like if not pm_qos?
I think adding a bandwidth throttling constraint to struct device may be
a good thing, but I'm not sure if there isn't a place for the PM_QOS
interface yet. i.e. perhaps if this happens, then we should look at
evolving the pm_qos api to handle multiple constraint's per class and
multiple buses, multiple nic's etc...
--mgross
>
> Kevin
>
>
> > ---
> > include/linux/pm_qos_params.h | 3 ++-
> > kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> > 2 files changed, 27 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> > index d74f75e..091c13c 100644
> > --- a/include/linux/pm_qos_params.h
> > +++ b/include/linux/pm_qos_params.h
> > @@ -10,8 +10,9 @@
> > #define PM_QOS_CPU_DMA_LATENCY 1
> > #define PM_QOS_NETWORK_LATENCY 2
> > #define PM_QOS_NETWORK_THROUGHPUT 3
> > +#define PM_QOS_SYSTEM_BUS_FREQ 4
> >
> > -#define PM_QOS_NUM_CLASSES 4
> > +#define PM_QOS_NUM_CLASSES 5
> > #define PM_QOS_DEFAULT_VALUE -1
> >
> > int pm_qos_add_requirement(int qos, char *name, s32 value);
> > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > index 3db49b9..8576f40 100644
> > --- a/kernel/pm_qos_params.c
> > +++ b/kernel/pm_qos_params.c
> > @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> > .comparitor = max_compare
> > };
> >
> > +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> > +static struct pm_qos_object system_bus_freq_pm_qos = {
> > + .requirements =
> > + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> > + .notifiers = &system_bus_freq_notifier,
> > + .name = "system_bus_freq",
> > + .default_value = 0,
> > + .target_value = ATOMIC_INIT(0),
> > + .comparitor = max_compare
> > +};
> > +
> >
> > -static struct pm_qos_object *pm_qos_array[] = {
> > - &null_pm_qos,
> > - &cpu_dma_pm_qos,
> > - &network_lat_pm_qos,
> > - &network_throughput_pm_qos
> > +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> > + [PM_QOS_RESERVED] = &null_pm_qos,
> > + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> > + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> > + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> > + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
> > };
> >
> > static DEFINE_SPINLOCK(pm_qos_lock);
> > @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> > * will register the notifier into a notification chain that gets called
> > * upon changes to the pm_qos_class target value.
> > */
> > - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > {
> > int retval;
> >
> > @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> > return ret;
> > }
> > ret = register_pm_qos_misc(&network_throughput_pm_qos);
> > - if (ret < 0)
> > + if (ret < 0) {
> > printk(KERN_ERR
> > "pm_qos_param: network_throughput setup failed\n");
> > + return ret;
> > + }
> > + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> > + if (ret < 0)
> > + printk(KERN_ERR
> > + "pm_qos_param: system_bus_freq setup failed\n");
> >
> > return ret;
> > }
> > --
> > 1.6.3.3
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to [email protected]
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at http://www.tux.org/lkml/
mark gross <[email protected]> writes:
> On Thu, Jan 07, 2010 at 08:34:28AM -0800, Kevin Hilman wrote:
>> Daniel Walker <[email protected]> writes:
>>
>> > From: Praveen Chidambaram <[email protected]>
>> >
>> > In some systems, the system bus speed can be varied, usually
>> > based on the current CPU frequency. However, various device
>> > drivers and/or applications may need a faster system bus for I/O
>> > even though the CPU itself may be idle.
>> >
>> > Signed-off-by: Praveen Chidambaram <[email protected]>
>> > Signed-off-by: David Brown <[email protected]>
>> > Signed-off-by: Daniel Walker <[email protected]>
>>
>> I think some type of bus parameter is a good idea and something we
>> would use on TI OMAP as well. However, I also have two concerns with
>> this approach.
>>
>> 1) The constraint should be in throughput, not in frequency
>> 2) It doesn't handle multiple busses (as Mark Gross pointed out)
>>
>> For (1), I don't like the idea of forcing drivers to know about the
>> underlying bus frequency. The same driver could be in use across a
>> family of SoCs (or even different SoCs), each having different bus
>> frequencies. For this driver to be portable, the driver should
>> express its constraints in terms of throughput, not underlying bus
>> frequency.
>
> This makes sense, as throttling constraints should be based on things
> that are invariant to bus width.
>
>>
>> For (2), I'm not sure what the best way to handle this in PM QoS is.
>> Lately, I've been thinking that PM QoS is not the right place for
>> this. My idea (currenly only in my head) is the that busses in the
>> LDM (platform_bus, etc.) should have constraints associated with
>> them. That way, constraints can be set using a 'struct device' and
>> the bus they are attatched to will inherit the constraints directly.
>> This automatically solves the problem of multiple busses and allows
>> the possibility for different bus types to handle the constraints
>> differently.
>
> Sounds a bit like the range timers implementation. One question, what
> would a throttling constraint changes API look like if not pm_qos?
Hmm, good question. I haven't gotten far enough to think of an API. :(
I've been thinking of (and using) PM QoS as an API for system-wide
constraints, and it is fine for this.
However, when it comes to device specific constraints, I don't think
it fits. IMHO, constraints should be something that becomes part of
the driver model, rather than a separate API. Most likely by evolving
the current runtime PM code.
Basically if a device has a constraint, it should register that
constraint with the LDM (API TBD) and it should be associated with the
device and associated bus etc. That way, the PM core code (including
runtime PM layer) can examine device and bus specific constraints and
make localized decisions rather than querying some central repository.
This approach also allows the platform-specific runtime PM
implementations to handle constraints in platform-specific ways, but the
constraints are still tightly coupled to devices and/or busses.
> I think adding a bandwidth throttling constraint to struct device may be
> a good thing, but I'm not sure if there isn't a place for the PM_QOS
> interface yet. i.e. perhaps if this happens, then we should look at
> evolving the pm_qos api to handle multiple constraint's per class and
> multiple buses, multiple nic's etc...
That's one option. But my preferred option would be to see the LDM grow
some notion of per-device and/or per-bus constraints.
We're currenly experimenting with this on ARM/OMAP where we have have
a 'struct omap_device' which is a 'struct device' plus some
arch-specific extentions including constraints. I am working on
runtime PM for OMAP currently, and when something is working there, we
may have some proposals for generalizing this in LDM-generic way.
Kevin
Just adding some CC's ..
On Thu, 2010-01-07 at 08:34 -0800, Kevin Hilman wrote:
> Daniel Walker <[email protected]> writes:
>
> > From: Praveen Chidambaram <[email protected]>
> >
> > In some systems, the system bus speed can be varied, usually
> > based on the current CPU frequency. However, various device
> > drivers and/or applications may need a faster system bus for I/O
> > even though the CPU itself may be idle.
> >
> > Signed-off-by: Praveen Chidambaram <[email protected]>
> > Signed-off-by: David Brown <[email protected]>
> > Signed-off-by: Daniel Walker <[email protected]>
>
> I think some type of bus parameter is a good idea and something we
> would use on TI OMAP as well. However, I also have two concerns with
> this approach.
>
> 1) The constraint should be in throughput, not in frequency
> 2) It doesn't handle multiple busses (as Mark Gross pointed out)
>
> For (1), I don't like the idea of forcing drivers to know about the
> underlying bus frequency. The same driver could be in use across a
> family of SoCs (or even different SoCs), each having different bus
> frequencies. For this driver to be portable, the driver should
> express its constraints in terms of throughput, not underlying bus
> frequency.
>
> For (2), I'm not sure what the best way to handle this in PM QoS is.
> Lately, I've been thinking that PM QoS is not the right place for
> this. My idea (currenly only in my head) is the that busses in the
> LDM (platform_bus, etc.) should have constraints associated with
> them. That way, constraints can be set using a 'struct device' and
> the bus they are attatched to will inherit the constraints directly.
> This automatically solves the problem of multiple busses and allows
> the possibility for different bus types to handle the constraints
> differently.
>
> Kevin
>
>
> > ---
> > include/linux/pm_qos_params.h | 3 ++-
> > kernel/pm_qos_params.c | 32 +++++++++++++++++++++++++-------
> > 2 files changed, 27 insertions(+), 8 deletions(-)
> >
> > diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
> > index d74f75e..091c13c 100644
> > --- a/include/linux/pm_qos_params.h
> > +++ b/include/linux/pm_qos_params.h
> > @@ -10,8 +10,9 @@
> > #define PM_QOS_CPU_DMA_LATENCY 1
> > #define PM_QOS_NETWORK_LATENCY 2
> > #define PM_QOS_NETWORK_THROUGHPUT 3
> > +#define PM_QOS_SYSTEM_BUS_FREQ 4
> >
> > -#define PM_QOS_NUM_CLASSES 4
> > +#define PM_QOS_NUM_CLASSES 5
> > #define PM_QOS_DEFAULT_VALUE -1
> >
> > int pm_qos_add_requirement(int qos, char *name, s32 value);
> > diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
> > index 3db49b9..8576f40 100644
> > --- a/kernel/pm_qos_params.c
> > +++ b/kernel/pm_qos_params.c
> > @@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
> > .comparitor = max_compare
> > };
> >
> > +static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
> > +static struct pm_qos_object system_bus_freq_pm_qos = {
> > + .requirements =
> > + {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
> > + .notifiers = &system_bus_freq_notifier,
> > + .name = "system_bus_freq",
> > + .default_value = 0,
> > + .target_value = ATOMIC_INIT(0),
> > + .comparitor = max_compare
> > +};
> > +
> >
> > -static struct pm_qos_object *pm_qos_array[] = {
> > - &null_pm_qos,
> > - &cpu_dma_pm_qos,
> > - &network_lat_pm_qos,
> > - &network_throughput_pm_qos
> > +static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
> > + [PM_QOS_RESERVED] = &null_pm_qos,
> > + [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
> > + [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
> > + [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
> > + [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
> > };
> >
> > static DEFINE_SPINLOCK(pm_qos_lock);
> > @@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
> > * will register the notifier into a notification chain that gets called
> > * upon changes to the pm_qos_class target value.
> > */
> > - int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > +int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > {
> > int retval;
> >
> > @@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
> > return ret;
> > }
> > ret = register_pm_qos_misc(&network_throughput_pm_qos);
> > - if (ret < 0)
> > + if (ret < 0) {
> > printk(KERN_ERR
> > "pm_qos_param: network_throughput setup failed\n");
> > + return ret;
> > + }
> > + ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
> > + if (ret < 0)
> > + printk(KERN_ERR
> > + "pm_qos_param: system_bus_freq setup failed\n");
> >
> > return ret;
> > }
> > --
> > 1.6.3.3
> >
> > --
> > To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> > the body of a message to [email protected]
> > More majordomo info at http://vger.kernel.org/majordomo-info.html
> > Please read the FAQ at http://www.tux.org/lkml/