In order to have the pm_qos framework be callable from interrupt
context, all listeners have to also be callable in that context.
This patch schedules the update of the latency value via
ieee80211_max_network_latency() for execution on
the global workqueue (using schedule_work()).
As there was no synchronization/locking between the listener and the
caller of pm_qos_update_request before, there should be no new races
uncovered by this. Although the timing probably changes.
Signed-off-by: Florian Mickler <[email protected]>
---
This needs some networking expertise to check the reasoning above and
judge the implementation.
net/mac80211/ieee80211_i.h | 4 +++-
net/mac80211/main.c | 5 +++--
net/mac80211/mlme.c | 20 +++++++++++++++-----
3 files changed, 21 insertions(+), 8 deletions(-)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 1a9e2da..3d2155a 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -851,6 +851,7 @@ struct ieee80211_local {
struct work_struct dynamic_ps_disable_work;
struct timer_list dynamic_ps_timer;
struct notifier_block network_latency_notifier;
+ struct work_struct network_latency_notifier_work;
int user_power_level; /* in dBm */
int power_constr_level; /* in dBm */
@@ -994,8 +995,9 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
-int ieee80211_max_network_latency(struct notifier_block *nb,
+int ieee80211_max_network_latency_notification(struct notifier_block *nb,
unsigned long data, void *dummy);
+void ieee80211_max_network_latency(struct work_struct *w);
void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
struct ieee80211_bss *bss,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22a384d..5cded3a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -607,9 +607,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
rtnl_unlock();
ieee80211_led_init(local);
-
+ INIT_WORK(&local->network_latency_notifier_work,
+ ieee80211_max_network_latency);
local->network_latency_notifier.notifier_call =
- ieee80211_max_network_latency;
+ ieee80211_max_network_latency_notification;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 0839c4e..feb6134 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1953,17 +1953,27 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
rcu_read_unlock();
}
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy)
+void ieee80211_max_network_latency(struct work_struct *w)
{
- s32 latency_usec = (s32) data;
+ s32 latency_usec = (s32) atomic_long_read(&w->data);
struct ieee80211_local *local =
- container_of(nb, struct ieee80211_local,
- network_latency_notifier);
+ container_of(w, struct ieee80211_local,
+ network_latency_notifier_work);
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local, latency_usec);
mutex_unlock(&local->iflist_mtx);
+}
+
+/* the notifier might be called from interrupt context */
+int ieee80211_max_network_latency_notification(struct notifier_block *nb,
+ unsigned long data, void *dummy)
+{
+ struct ieee80211_local *local =
+ container_of(nb, struct ieee80211_local,
+ network_latency_notifier);
+ atomic_long_set(&local->network_latency_notifier_work.data, data);
+ schedule_work(&local->network_latency_notifier_work);
return 0;
}
--
1.7.1
The pm_qos framework has to guarantee atomic notifications so that
drivers can request and release constraints at all times while no races
occur.
In order to avoid implementing a secondary notification chain in which
listeners might sleep, we demand that every listener implements it's
notification so that it can run in interrupt context. The listener is in
a better position to know if races are harmful or not.
Signed-off-by: Florian Mickler <[email protected]>
---
kernel/pm_qos_params.c | 50 ++++++++++++++++++++++++------------------------
1 files changed, 25 insertions(+), 25 deletions(-)
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index f42d3f7..d918605 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -63,7 +63,7 @@ static s32 min_compare(s32 v1, s32 v2);
struct pm_qos_object {
struct pm_qos_request_list requests;
- struct blocking_notifier_head *notifiers;
+ struct atomic_notifier_head *notifiers;
struct miscdevice pm_qos_power_miscdev;
char *name;
s32 default_value;
@@ -72,7 +72,7 @@ struct pm_qos_object {
};
static struct pm_qos_object null_pm_qos;
-static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static ATOMIC_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
.requests = {LIST_HEAD_INIT(cpu_dma_pm_qos.requests.list)},
.notifiers = &cpu_dma_lat_notifier,
@@ -82,7 +82,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
.comparitor = min_compare
};
-static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static ATOMIC_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
.requests = {LIST_HEAD_INIT(network_lat_pm_qos.requests.list)},
.notifiers = &network_lat_notifier,
@@ -93,7 +93,7 @@ static struct pm_qos_object network_lat_pm_qos = {
};
-static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static ATOMIC_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
.requests = {LIST_HEAD_INIT(network_throughput_pm_qos.requests.list)},
.notifiers = &network_throughput_notifier,
@@ -103,7 +103,6 @@ static struct pm_qos_object network_throughput_pm_qos = {
.comparitor = max_compare
};
-
static struct pm_qos_object *pm_qos_array[] = {
&null_pm_qos,
&cpu_dma_pm_qos,
@@ -135,35 +134,30 @@ static s32 min_compare(s32 v1, s32 v2)
return min(v1, v2);
}
-
static void update_target(int pm_qos_class)
{
s32 extreme_value;
+ struct pm_qos_object *obj = pm_qos_array[pm_qos_class];
struct pm_qos_request_list *node;
unsigned long flags;
int call_notifier = 0;
spin_lock_irqsave(&pm_qos_lock, flags);
- extreme_value = pm_qos_array[pm_qos_class]->default_value;
- list_for_each_entry(node,
- &pm_qos_array[pm_qos_class]->requests.list, list) {
- extreme_value = pm_qos_array[pm_qos_class]->comparitor(
- extreme_value, node->value);
+ extreme_value = obj->default_value;
+ list_for_each_entry(node, &obj->requests.list, list) {
+ extreme_value = obj->comparitor(extreme_value, node->value);
}
- if (atomic_read(&pm_qos_array[pm_qos_class]->target_value) !=
- extreme_value) {
+ if (atomic_read(&obj->target_value) != extreme_value) {
call_notifier = 1;
- atomic_set(&pm_qos_array[pm_qos_class]->target_value,
- extreme_value);
+ atomic_set(&obj->target_value, extreme_value);
pr_debug(KERN_ERR "new target for qos %d is %d\n", pm_qos_class,
- atomic_read(&pm_qos_array[pm_qos_class]->target_value));
+ atomic_read(&obj->target_value));
}
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (call_notifier)
- blocking_notifier_call_chain(
- pm_qos_array[pm_qos_class]->notifiers,
- (unsigned long) extreme_value, NULL);
+ atomic_notifier_call_chain(obj->notifiers,
+ (unsigned long) extreme_value, NULL);
}
static int register_pm_qos_misc(struct pm_qos_object *qos)
@@ -272,6 +266,7 @@ EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_remove_request - modifies an existing qos request
+ *
* @pm_qos_req: handle to request list element
*
* Will remove pm qos request from the list of requests and
@@ -298,18 +293,21 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
+ *
+ * Note: The notifier has to be callable from interrupt context.
+ *
* @pm_qos_class: identifies which qos target changes should be notified.
* @notifier: notifier block managed by caller.
*
- * will register the notifier into a notification chain that gets called
+ * Will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
- retval = blocking_notifier_chain_register(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
+ retval = atomic_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
return retval;
}
@@ -317,18 +315,19 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
/**
* pm_qos_remove_notifier - deletes notification entry from chain.
+ *
* @pm_qos_class: identifies which qos target changes are notified.
* @notifier: notifier block to be removed.
*
- * will remove the notifier from the notification chain that gets called
+ * Will remove the notifier from the notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
- retval = blocking_notifier_chain_unregister(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
+ retval = atomic_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
return retval;
}
@@ -381,6 +380,7 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
} else
return -EINVAL;
+
pm_qos_req = (struct pm_qos_request_list *)filp->private_data;
pm_qos_update_request(pm_qos_req, value);
--
1.7.1
On Wed, 2010-06-09 at 11:15 +0200, [email protected] wrote:
> In order to have the pm_qos framework be callable from interrupt
> context, all listeners have to also be callable in that context.
That makes no sense at all. Why add work structs _everywhere_ in the
callees and make the API harder to use and easy to get wrong completely,
instead of just adding a single work struct that will be queued from the
caller and dealing with the locking complexity etc. just once.
johannes
On Wed, 9 Jun 2010, [email protected] wrote:
> The pm_qos framework has to guarantee atomic notifications so that
> drivers can request and release constraints at all times while no races
> occur.
>
> In order to avoid implementing a secondary notification chain in which
> listeners might sleep, we demand that every listener implements it's
> notification so that it can run in interrupt context. The listener is in
> a better position to know if races are harmful or not.
That breaks existing notifiers.
Thanks,
tglx
On Wed, 09 Jun 2010 11:38:07 +0200
Johannes Berg <[email protected]> wrote:
> On Wed, 2010-06-09 at 11:15 +0200, [email protected] wrote:
> > In order to have the pm_qos framework be callable from interrupt
> > context, all listeners have to also be callable in that context.
>
> That makes no sense at all. Why add work structs _everywhere_ in the
> callees and make the API harder to use and easy to get wrong completely,
> instead of just adding a single work struct that will be queued from the
> caller and dealing with the locking complexity etc. just once.
>
> johannes
Just to defend this approach, but I'm certainly not married to it
(hence RFC):
There are only two listeners at the moment. I suspect that most future
uses of the framework need to be atomic, as the driver that
requests a specific quality of service probably doesn't want to get into
races with the provider of that service(listener). So i suspected the
network listener to be the special case.
The race between service-provider and qos-requester for non-atomic
contextes is already there, isn't it? so, locking complexity shouldn't
be worse than before.
But my first approach to this is seen here:
https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/026902.html
A third possibility would be to make it dependent on the
type of the constraint, if blocking notifiers are allowed or not.
But that would sacrifice API consistency (update_request for one
constraint is allowed to be called in interrupt context and
update_request for another would be not).
Cheers,
Flo
On Wed, 2010-06-09 at 12:20 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 11:38:07 +0200
> Johannes Berg <[email protected]> wrote:
>
> > On Wed, 2010-06-09 at 11:15 +0200, [email protected] wrote:
> > > In order to have the pm_qos framework be callable from interrupt
> > > context, all listeners have to also be callable in that context.
> >
> > That makes no sense at all. Why add work structs _everywhere_ in the
> > callees and make the API harder to use and easy to get wrong completely,
> > instead of just adding a single work struct that will be queued from the
> > caller and dealing with the locking complexity etc. just once.
> There are only two listeners at the moment. I suspect that most future
> uses of the framework need to be atomic, as the driver that
> requests a specific quality of service probably doesn't want to get into
> races with the provider of that service(listener). So i suspected the
> network listener to be the special case.
Well even if it doesn't _want_ to race with it, a lot of drivers like
USB drivers etc. can't really do anything without deferring to a
workqueue.
And what's the race anyway? You get one update, defer the work, and if
another update happens inbetween you just read the new value when the
work finally runs -- and you end up doing it only once instead of twice.
That doesn't seem like a problem.
> The race between service-provider and qos-requester for non-atomic
> contextes is already there, isn't it? so, locking complexity shouldn't
> be worse than before.
I have no idea how it works now? I thought you can't request an update
from an atomic context.
However, if you request a QoS value, it is fundamentally that -- a
request. There's no guarantee as to when or how it will be honoured.
> But my first approach to this is seen here:
> https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/026902.html
Icky too.
> A third possibility would be to make it dependent on the
> type of the constraint, if blocking notifiers are allowed or not.
> But that would sacrifice API consistency (update_request for one
> constraint is allowed to be called in interrupt context and
> update_request for another would be not).
I don't see what's wrong with the fourth possibility: Allow calling
pm_qos_update_request() from atomic context, but change _it_ to schedule
off a work that calls the blocking notifier chain. That avoids the
complexity in notify-API users since they have process context, and also
in request-API users since they can call it from any context.
johannes
On Wed, 09 Jun 2010 12:42:08 +0200
Johannes Berg <[email protected]> wrote:
> On Wed, 2010-06-09 at 12:20 +0200, Florian Mickler wrote:
>
> > A third possibility would be to make it dependent on the
> > type of the constraint, if blocking notifiers are allowed or not.
> > But that would sacrifice API consistency (update_request for one
> > constraint is allowed to be called in interrupt context and
> > update_request for another would be not).
>
> I don't see what's wrong with the fourth possibility: Allow calling
> pm_qos_update_request() from atomic context, but change _it_ to schedule
> off a work that calls the blocking notifier chain. That avoids the
> complexity in notify-API users since they have process context, and also
> in request-API users since they can call it from any context.
>
> johannes
That was also my first idea, but then I thought about qos and thought
atomic notification are necessary.
Do you see any value in having atomic
notification?
I have the following situation before my eyes:
Driver A gets an interrupt and needs (to service that
interrupt) the cpu to guarantee a latency of X because the
device is a bit icky.
Now, in that situation, if we don't immediately (without scheduling in
between) notify the system to be in that latency-mode the driver won't
function properly. Is this a realistic scene?
At the moment we only have process context notification and only 2
listeners.
I think providing for atomic as well as "relaxed" notification could be
useful.
If atomic notification is deemed unnecessary, I have no
problems to just use schedule_work() in update request.
Anyway, it is probably best to split this. I.e. first make
update_request callable from atomic contexts with doing the
schedule_work in update_request and then
as an add on provide for constraints_objects with atomic notifications.
Flo
On Wed, 2010-06-09 at 14:16 +0200, Florian Mickler wrote:
> That was also my first idea, but then I thought about qos and thought
> atomic notification are necessary.
> Do you see any value in having atomic notification?
>
> I have the following situation before my eyes:
>
> Driver A gets an interrupt and needs (to service that
> interrupt) the cpu to guarantee a latency of X because the
> device is a bit icky.
>
> Now, in that situation, if we don't immediately (without scheduling in
> between) notify the system to be in that latency-mode the driver won't
> function properly. Is this a realistic scene?
>
> At the moment we only have process context notification and only 2
> listeners.
>
> I think providing for atomic as well as "relaxed" notification could be
> useful.
>
> If atomic notification is deemed unnecessary, I have no
> problems to just use schedule_work() in update request.
> Anyway, it is probably best to split this. I.e. first make
> update_request callable from atomic contexts with doing the
> schedule_work in update_request and then
> as an add on provide for constraints_objects with atomic notifications.
Well I remember http://thread.gmane.org/gmane.linux.kernel/979935 where
Mark renamed things to "request" which seems to imply to me more of a
"please do this" than "I NEED IT NOW!!!!!".
johannes
On Wed, 2010-06-09 at 11:46 +0200, Thomas Gleixner wrote:
> On Wed, 9 Jun 2010, [email protected] wrote:
>
> > The pm_qos framework has to guarantee atomic notifications so that
> > drivers can request and release constraints at all times while no races
> > occur.
> >
> > In order to avoid implementing a secondary notification chain in which
> > listeners might sleep, we demand that every listener implements it's
> > notification so that it can run in interrupt context. The listener is in
> > a better position to know if races are harmful or not.
>
> That breaks existing notifiers.
Right ... and we don't want to do that. Which is why I think we just
use blocking notifiers as they are but allow for creating atomic ones
which may use atomic update sites.
This is the solution I have in my tree ... it preserves existing
semantics because all the update and add sites are in user context, but
it allows for notifiers with purely atomic semantics and will do a
runtime warn if anyone tries to use them in a blocking fashion (or if
anyone adds an atomic update to an existing blocking notifier).
James
---
>From 7377f3e38b1f64f4149be7e7975f63e745540661 Mon Sep 17 00:00:00 2001
From: James Bottomley <[email protected]>
Date: Tue, 8 Jun 2010 14:55:35 -0400
Subject: [PATCH] pm_qos: add atomic notifier
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index d823cc0..dd1b5eb 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -28,6 +28,7 @@ void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
int pm_qos_request(int pm_qos_class);
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
+int pm_qos_add_atomic_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
#endif
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index c9997f8..5b3ba68 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -55,7 +55,8 @@ enum pm_qos_type {
struct pm_qos_object {
struct plist_head requests;
- struct blocking_notifier_head *notifiers;
+ struct blocking_notifier_head *blocking_notifiers;
+ struct atomic_notifier_head *atomic_notifiers;
struct miscdevice pm_qos_power_miscdev;
char *name;
s32 default_value;
@@ -66,7 +67,7 @@ static struct pm_qos_object null_pm_qos;
static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
static struct pm_qos_object cpu_dma_pm_qos = {
.requests = {_PLIST_HEAD_INIT(cpu_dma_pm_qos.requests)},
- .notifiers = &cpu_dma_lat_notifier,
+ .blocking_notifiers = &cpu_dma_lat_notifier,
.name = "cpu_dma_latency",
.default_value = 2000 * USEC_PER_SEC,
.type = PM_QOS_MIN,
@@ -75,7 +76,7 @@ static struct pm_qos_object cpu_dma_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
static struct pm_qos_object network_lat_pm_qos = {
.requests = {_PLIST_HEAD_INIT(network_lat_pm_qos.requests)},
- .notifiers = &network_lat_notifier,
+ .blocking_notifiers = &network_lat_notifier,
.name = "network_latency",
.default_value = 2000 * USEC_PER_SEC,
.type = PM_QOS_MIN
@@ -85,7 +86,7 @@ static struct pm_qos_object network_lat_pm_qos = {
static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
static struct pm_qos_object network_throughput_pm_qos = {
.requests = {_PLIST_HEAD_INIT(network_throughput_pm_qos.requests)},
- .notifiers = &network_throughput_notifier,
+ .blocking_notifiers = &network_throughput_notifier,
.name = "network_throughput",
.default_value = 0,
.type = PM_QOS_MAX,
@@ -131,6 +132,17 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
}
}
+static void pm_qos_call_notifiers(struct pm_qos_object *o,
+ unsigned long curr_value)
+{
+ if (o->atomic_notifiers)
+ atomic_notifier_call_chain(o->atomic_notifiers,
+ curr_value, NULL);
+ else
+ blocking_notifier_call_chain(o->blocking_notifiers,
+ curr_value, NULL);
+}
+
static void update_target(struct pm_qos_object *o, struct plist_node *node,
int del)
{
@@ -147,13 +159,29 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
spin_unlock_irqrestore(&pm_qos_lock, flags);
if (prev_value != curr_value)
- blocking_notifier_call_chain(o->notifiers,
- (unsigned long)curr_value,
- NULL);
+ pm_qos_call_notifiers(o, curr_value);
+}
+
+static int pm_qos_might_sleep(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ might_sleep();
+ return NOTIFY_DONE;
}
-static int register_pm_qos_misc(struct pm_qos_object *qos)
+static struct notifier_block pm_qos_might_sleep_notifier = {
+ .notifier_call = pm_qos_might_sleep,
+};
+
+static int register_pm_qos(struct pm_qos_object *qos)
{
+ int retval = 0;
+
+ if (qos->blocking_notifiers)
+ retval = blocking_notifier_chain_register(
+ qos->blocking_notifiers, &pm_qos_might_sleep_notifier);
+ if (retval)
+ return retval;
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
@@ -302,8 +330,12 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
+ /* someone tried to register a blocking notifier to a
+ * qos object that only supports atomic ones */
+ BUG_ON(!pm_qos_array[pm_qos_class]->blocking_notifiers);
+
retval = blocking_notifier_chain_register(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
+ pm_qos_array[pm_qos_class]->blocking_notifiers, notifier);
return retval;
}
@@ -319,15 +351,41 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
- int retval;
- retval = blocking_notifier_chain_unregister(
- pm_qos_array[pm_qos_class]->notifiers, notifier);
-
- return retval;
+ if (pm_qos_array[pm_qos_class]->atomic_notifiers)
+ return atomic_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->atomic_notifiers,
+ notifier);
+ else
+ return blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->blocking_notifiers,
+ notifier);
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+/**
+ * pm_qos_add_atomic_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets
+ * called upon changes to the pm_qos_class target value. The notifier
+ * may be called from atomic context. use @pm_qos_remove_notifier to
+ * unregister.
+ */
+int pm_qos_add_atomic_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ if (pm_qos_array[pm_qos_class]->atomic_notifiers)
+ return atomic_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->atomic_notifiers,
+ notifier);
+ else
+ return blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->blocking_notifiers,
+ notifier);
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_atomic_notifier);
+
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
long pm_qos_class;
@@ -391,17 +449,17 @@ static int __init pm_qos_power_init(void)
{
int ret = 0;
- ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ ret = register_pm_qos(&cpu_dma_pm_qos);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
return ret;
}
- ret = register_pm_qos_misc(&network_lat_pm_qos);
+ ret = register_pm_qos(&network_lat_pm_qos);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
return ret;
}
- ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ ret = register_pm_qos(&network_throughput_pm_qos);
if (ret < 0)
printk(KERN_ERR
"pm_qos_param: network_throughput setup failed\n");
On Wed, 09 Jun 2010 08:38:39 -0400
James Bottomley <[email protected]> wrote:
> On Wed, 2010-06-09 at 11:46 +0200, Thomas Gleixner wrote:
> > On Wed, 9 Jun 2010, [email protected] wrote:
> >
> > > The pm_qos framework has to guarantee atomic notifications so that
> > > drivers can request and release constraints at all times while no races
> > > occur.
> > >
> > > In order to avoid implementing a secondary notification chain in which
> > > listeners might sleep, we demand that every listener implements it's
> > > notification so that it can run in interrupt context. The listener is in
> > > a better position to know if races are harmful or not.
> >
> > That breaks existing notifiers.
>
> Right ... and we don't want to do that. Which is why I think we just
> use blocking notifiers as they are but allow for creating atomic ones
> which may use atomic update sites.
>
> This is the solution I have in my tree ... it preserves existing
> semantics because all the update and add sites are in user context, but
> it allows for notifiers with purely atomic semantics and will do a
> runtime warn if anyone tries to use them in a blocking fashion (or if
> anyone adds an atomic update to an existing blocking notifier).
>
> James
>
> @@ -302,8 +330,12 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> {
> int retval;
>
> + /* someone tried to register a blocking notifier to a
> + * qos object that only supports atomic ones */
> + BUG_ON(!pm_qos_array[pm_qos_class]->blocking_notifiers);
> +
> retval = blocking_notifier_chain_register(
> - pm_qos_array[pm_qos_class]->notifiers, notifier);
> + pm_qos_array[pm_qos_class]->blocking_notifiers, notifier);
>
> return retval;
> }
Why not:
retval = 1;
if(pm_qos_array[pm_qos_class]->blocking_notifiers)
retval = blocking_notifier_chain_register(..
else
WARN();
return retval;
That way, the offending programmer could eventually fix it, without
having to reboot?
> @@ -319,15 +351,41 @@ EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
The rest looks good to me. I posted another variant using
schedule_work().
As currently no atomic notifications are needed and critical operations
probably have to check pm_qos_get_request manually anyway to be shure
this would be an alternative. Whatever.
Cheers,
Flo
On Wed, 2010-06-09 at 17:27 +0200, Florian Mickler wrote:
> On Wed, 09 Jun 2010 08:38:39 -0400
> James Bottomley <[email protected]> wrote:
>
> > On Wed, 2010-06-09 at 11:46 +0200, Thomas Gleixner wrote:
> > > On Wed, 9 Jun 2010, [email protected] wrote:
> > >
> > > > The pm_qos framework has to guarantee atomic notifications so that
> > > > drivers can request and release constraints at all times while no races
> > > > occur.
> > > >
> > > > In order to avoid implementing a secondary notification chain in which
> > > > listeners might sleep, we demand that every listener implements it's
> > > > notification so that it can run in interrupt context. The listener is in
> > > > a better position to know if races are harmful or not.
> > >
> > > That breaks existing notifiers.
> >
> > Right ... and we don't want to do that. Which is why I think we just
> > use blocking notifiers as they are but allow for creating atomic ones
> > which may use atomic update sites.
> >
> > This is the solution I have in my tree ... it preserves existing
> > semantics because all the update and add sites are in user context, but
> > it allows for notifiers with purely atomic semantics and will do a
> > runtime warn if anyone tries to use them in a blocking fashion (or if
> > anyone adds an atomic update to an existing blocking notifier).
> >
> > James
>
>
> >
> > @@ -302,8 +330,12 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > {
> > int retval;
> >
> > + /* someone tried to register a blocking notifier to a
> > + * qos object that only supports atomic ones */
> > + BUG_ON(!pm_qos_array[pm_qos_class]->blocking_notifiers);
> > +
> > retval = blocking_notifier_chain_register(
> > - pm_qos_array[pm_qos_class]->notifiers, notifier);
> > + pm_qos_array[pm_qos_class]->blocking_notifiers, notifier);
> >
> > return retval;
> > }
>
> Why not:
>
> retval = 1;
> if(pm_qos_array[pm_qos_class]->blocking_notifiers)
> retval = blocking_notifier_chain_register(..
> else
> WARN();
> return retval;
>
> That way, the offending programmer could eventually fix it, without
> having to reboot?
Because there are no current users that will trip the BUG_ON ... and we
want to keep it that way. Code doesn't go into the kernel if it BUGs on
boot.
The point about failing hard for an abuse of a kernel API isn't to trap
current abusers because you fix those before you add it. It's to
prevent future abuse. If your kernel BUGs under test you tend to fix
the code, so it becomes impossible for anyone to add any users which
abuse the API in this fashion.
James
On Wed, 09 Jun 2010 14:27:05 +0200
Johannes Berg <[email protected]> wrote:
> On Wed, 2010-06-09 at 14:16 +0200, Florian Mickler wrote:
>
> > That was also my first idea, but then I thought about qos and thought
> > atomic notification are necessary.
> > Do you see any value in having atomic notification?
> >
> > I have the following situation before my eyes:
> >
> > Driver A gets an interrupt and needs (to service that
> > interrupt) the cpu to guarantee a latency of X because the
> > device is a bit icky.
> >
> > Now, in that situation, if we don't immediately (without scheduling in
> > between) notify the system to be in that latency-mode the driver won't
> > function properly. Is this a realistic scene?
> >
> > At the moment we only have process context notification and only 2
> > listeners.
> >
> > I think providing for atomic as well as "relaxed" notification could be
> > useful.
> >
> > If atomic notification is deemed unnecessary, I have no
> > problems to just use schedule_work() in update request.
> > Anyway, it is probably best to split this. I.e. first make
> > update_request callable from atomic contexts with doing the
> > schedule_work in update_request and then
> > as an add on provide for constraints_objects with atomic notifications.
>
> Well I remember http://thread.gmane.org/gmane.linux.kernel/979935 where
> Mark renamed things to "request" which seems to imply to me more of a
> "please do this" than "I NEED IT NOW!!!!!".
>
> johannes
Yes. I just posted a version which uses schedule_work().
Just FYI, James has also posted his version which uses either a blocking
or an atomic notifier chain.
http://article.gmane.org/gmane.linux.kernel/996813
Cheers,
Flo
On Wed, 09 Jun 2010 11:32:26 -0400
James Bottomley <[email protected]> wrote:
> > > @@ -302,8 +330,12 @@ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
> > > {
> > > int retval;
> > >
> > > + /* someone tried to register a blocking notifier to a
> > > + * qos object that only supports atomic ones */
> > > + BUG_ON(!pm_qos_array[pm_qos_class]->blocking_notifiers);
> > > +
> > > retval = blocking_notifier_chain_register(
> > > - pm_qos_array[pm_qos_class]->notifiers, notifier);
> > > + pm_qos_array[pm_qos_class]->blocking_notifiers, notifier);
> > >
> > > return retval;
> > > }
> >
> > Why not:
> >
> > retval = 1;
> > if(pm_qos_array[pm_qos_class]->blocking_notifiers)
> > retval = blocking_notifier_chain_register(..
> > else
> > WARN();
> > return retval;
> >
> > That way, the offending programmer could eventually fix it, without
> > having to reboot?
>
> Because there are no current users that will trip the BUG_ON ... and we
> want to keep it that way. Code doesn't go into the kernel if it BUGs on
> boot.
>
> The point about failing hard for an abuse of a kernel API isn't to trap
> current abusers because you fix those before you add it. It's to
> prevent future abuse. If your kernel BUGs under test you tend to fix
> the code, so it becomes impossible for anyone to add any users which
> abuse the API in this fashion.
>
> James
>
There are actually people who ignore WARN()ings when submitting code??
....thinking about it... Yes, that may be possible.
Cheers,
Flo
--
Only two things are infinite, the universe and human stupidity, and I'm
not sure about the former. Albert Einstein