Reentrancy into the clock framework from the clk.h api is necessary for
clocks that are prepared and unprepared via i2c_transfer (which includes
many PMICs and discrete audio chips) as well as for several other use
cases.
This patch implements reentrancy by adding two global atomic_t's which
track the context of the current caller. Context in this case is the
return value from get_current(). One context variable is for slow
operations protected by the prepare_mutex and the other is for fast
operations protected by the enable_lock spinlock.
The clk.h api implementations are modified to first see if the relevant
global lock is already held and if so compare the global context (set by
whoever is holding the lock) against their own context (via a call to
get_current()). If the two match then this function is a nested call
from the one already holding the lock and we procede. If the context
does not match then procede to call mutex_lock and busy-wait for the
existing task to complete.
This patch does not increase concurrency for unrelated calls into the
clock framework. Instead it simply allows reentrancy by the single task
which is currently holding the global clock framework lock.
Signed-off-by: Mike Turquette <[email protected]>
Cc: Rajagopal Venkat <[email protected]>
Cc: David Brown <[email protected]>
Cc: Ulf Hansson <[email protected]>
Cc: Laurent Pinchart <[email protected]>
---
drivers/clk/clk.c | 255 ++++++++++++++++++++++++++++++++++++++---------------
1 file changed, 186 insertions(+), 69 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..17432a5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,9 +19,12 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static atomic_t prepare_context;
+static atomic_t enable_context;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
@@ -456,27 +459,6 @@ unsigned int __clk_get_prepare_count(struct clk *clk)
return !clk ? 0 : clk->prepare_count;
}
-unsigned long __clk_get_rate(struct clk *clk)
-{
- unsigned long ret;
-
- if (!clk) {
- ret = 0;
- goto out;
- }
-
- ret = clk->rate;
-
- if (clk->flags & CLK_IS_ROOT)
- goto out;
-
- if (!clk->parent)
- ret = 0;
-
-out:
- return ret;
-}
-
unsigned long __clk_get_flags(struct clk *clk)
{
return !clk ? 0 : clk->flags;
@@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
return NULL;
}
+/*** locking & reentrancy ***/
+
+static void clk_fwk_lock(void)
+{
+ /* hold the framework-wide lock, context == NULL */
+ mutex_lock(&prepare_lock);
+
+ /* set context for any reentrant calls */
+ atomic_set(&prepare_context, (int) get_current());
+}
+
+static void clk_fwk_unlock(void)
+{
+ /* clear the context */
+ atomic_set(&prepare_context, 0);
+
+ /* release the framework-wide lock, context == NULL */
+ mutex_unlock(&prepare_lock);
+}
+
+static bool clk_is_reentrant(void)
+{
+ if (mutex_is_locked(&prepare_lock))
+ if ((void *) atomic_read(&prepare_context) == get_current())
+ return true;
+
+ return false;
+}
+
/*** clk api ***/
void __clk_unprepare(struct clk *clk)
@@ -600,9 +611,15 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ /* re-enter if call is from the same context */
+ if (clk_is_reentrant()) {
+ __clk_unprepare(clk);
+ return;
+ }
+
+ clk_fwk_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_fwk_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -648,10 +665,16 @@ int clk_prepare(struct clk *clk)
{
int ret;
- mutex_lock(&prepare_lock);
- ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ /* re-enter if call is from the same context */
+ if (clk_is_reentrant()) {
+ ret = __clk_prepare(clk);
+ goto out;
+ }
+ clk_fwk_lock();
+ ret = __clk_prepare(clk);
+ clk_fwk_unlock();
+out:
return ret;
}
EXPORT_SYMBOL_GPL(clk_prepare);
@@ -692,8 +715,27 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
+ /* this call re-enters if it is from the same context */
+ if (spin_is_locked(&enable_lock)) {
+ if ((void *) atomic_read(&enable_context) == get_current()) {
+ __clk_disable(clk);
+ return;
+ }
+ }
+
+ /* hold the framework-wide lock, context == NULL */
spin_lock_irqsave(&enable_lock, flags);
+
+ /* set context for any reentrant calls */
+ atomic_set(&enable_context, (int) get_current());
+
+ /* disable the clock(s) */
__clk_disable(clk);
+
+ /* clear the context */
+ atomic_set(&enable_context, 0);
+
+ /* release the framework-wide lock, context == NULL */
spin_unlock_irqrestore(&enable_lock, flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -745,10 +787,29 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
+ /* this call re-enters if it is from the same context */
+ if (spin_is_locked(&enable_lock)) {
+ if ((void *) atomic_read(&enable_context) == get_current()) {
+ ret = __clk_enable(clk);
+ goto out;
+ }
+ }
+
+ /* hold the framework-wide lock, context == NULL */
spin_lock_irqsave(&enable_lock, flags);
+
+ /* set context for any reentrant calls */
+ atomic_set(&enable_context, (int) get_current());
+
+ /* enable the clock(s) */
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ /* clear the context */
+ atomic_set(&enable_context, 0);
+
+ /* release the framework-wide lock, context == NULL */
+ spin_unlock_irqrestore(&enable_lock, flags);
+out:
return ret;
}
EXPORT_SYMBOL_GPL(clk_enable);
@@ -792,10 +853,17 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ /* this call re-enters if it is from the same context */
+ if (clk_is_reentrant()) {
+ ret = __clk_round_rate(clk, rate);
+ goto out;
+ }
+
+ clk_fwk_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_fwk_unlock();
+out:
return ret;
}
EXPORT_SYMBOL_GPL(clk_round_rate);
@@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
__clk_recalc_rates(child, msg);
}
+unsigned long __clk_get_rate(struct clk *clk)
+{
+ unsigned long ret;
+
+ if (!clk) {
+ ret = 0;
+ goto out;
+ }
+
+ if (clk->flags & CLK_GET_RATE_NOCACHE)
+ __clk_recalc_rates(clk, 0);
+
+ ret = clk->rate;
+
+ if (clk->flags & CLK_IS_ROOT)
+ goto out;
+
+ if (!clk->parent)
+ ret = 0;
+
+out:
+ return ret;
+}
+
/**
* clk_get_rate - return the rate of clk
* @clk: the clk whose rate is being returned
@@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ /*
+ * FIXME - any locking here seems heavy weight
+ * can clk->rate be replaced with an atomic_t?
+ * same logic can likely be applied to prepare_count & enable_count
+ */
- if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
- __clk_recalc_rates(clk, 0);
+ if (clk_is_reentrant()) {
+ rate = __clk_get_rate(clk);
+ goto out;
+ }
+ clk_fwk_lock();
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_fwk_unlock();
+out:
return rate;
}
EXPORT_SYMBOL_GPL(clk_get_rate);
@@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
clk_change_rate(child);
}
+int __clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = 0;
+ struct clk *top, *fail_clk;
+
+ /* bail early if nothing to do */
+ if (rate == clk->rate)
+ return 0;
+
+ if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
+ return -EBUSY;
+ }
+
+ /* calculate new rates and get the topmost changed clock */
+ top = clk_calc_new_rates(clk, rate);
+ if (!top)
+ return -EINVAL;
+
+ /* notify that we are about to change rates */
+ fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+ if (fail_clk) {
+ pr_warn("%s: failed to set %s rate\n", __func__,
+ fail_clk->name);
+ clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+ return -EBUSY;
+ }
+
+ /* change the rates */
+ clk_change_rate(top);
+
+ return ret;
+}
+
/**
* clk_set_rate - specify a new rate for clk
* @clk: the clk whose rate is being changed
@@ -1096,44 +1229,18 @@ static void clk_change_rate(struct clk *clk)
*/
int clk_set_rate(struct clk *clk, unsigned long rate)
{
- struct clk *top, *fail_clk;
int ret = 0;
- /* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
-
- /* bail early if nothing to do */
- if (rate == clk->rate)
- goto out;
-
- if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
- ret = -EBUSY;
- goto out;
- }
-
- /* calculate new rates and get the topmost changed clock */
- top = clk_calc_new_rates(clk, rate);
- if (!top) {
- ret = -EINVAL;
- goto out;
- }
-
- /* notify that we are about to change rates */
- fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
- if (fail_clk) {
- pr_warn("%s: failed to set %s rate\n", __func__,
- fail_clk->name);
- clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
- ret = -EBUSY;
+ if (clk_is_reentrant()) {
+ ret = __clk_set_rate(clk, rate);
goto out;
}
- /* change the rates */
- clk_change_rate(top);
+ clk_fwk_lock();
+ ret = __clk_set_rate(clk, rate);
+ clk_fwk_unlock();
out:
- mutex_unlock(&prepare_lock);
-
return ret;
}
EXPORT_SYMBOL_GPL(clk_set_rate);
@@ -1148,10 +1255,16 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ if (clk_is_reentrant()) {
+ parent = __clk_get_parent(clk);
+ goto out;
+ }
+
+ clk_fwk_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_fwk_unlock();
+out:
return parent;
}
EXPORT_SYMBOL_GPL(clk_get_parent);
@@ -1330,6 +1443,7 @@ out:
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
+ bool reenter;
if (!clk || !clk->ops)
return -EINVAL;
@@ -1337,8 +1451,10 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
if (!clk->ops->set_parent)
return -ENOSYS;
- /* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ reenter = clk_is_reentrant();
+
+ if (!reenter)
+ clk_fwk_lock();
if (clk->parent == parent)
goto out;
@@ -1367,7 +1483,8 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
__clk_reparent(clk, parent);
out:
- mutex_unlock(&prepare_lock);
+ if (!reenter)
+ clk_fwk_unlock();
return ret;
}
--
1.7.10.4
Hi Mike,
Thanks for the patch.
Please see below for a couple of comments.
On Wednesday 27 March 2013 00:09:43 Mike Turquette wrote:
> Reentrancy into the clock framework from the clk.h api is necessary for
> clocks that are prepared and unprepared via i2c_transfer (which includes
> many PMICs and discrete audio chips) as well as for several other use
> cases.
>
> This patch implements reentrancy by adding two global atomic_t's which
> track the context of the current caller. Context in this case is the
> return value from get_current(). One context variable is for slow
> operations protected by the prepare_mutex and the other is for fast
> operations protected by the enable_lock spinlock.
>
> The clk.h api implementations are modified to first see if the relevant
> global lock is already held and if so compare the global context (set by
> whoever is holding the lock) against their own context (via a call to
> get_current()). If the two match then this function is a nested call
> from the one already holding the lock and we procede. If the context
> does not match then procede to call mutex_lock and busy-wait for the
> existing task to complete.
>
> This patch does not increase concurrency for unrelated calls into the
> clock framework. Instead it simply allows reentrancy by the single task
> which is currently holding the global clock framework lock.
>
> Signed-off-by: Mike Turquette <[email protected]>
> Cc: Rajagopal Venkat <[email protected]>
> Cc: David Brown <[email protected]>
> Cc: Ulf Hansson <[email protected]>
> Cc: Laurent Pinchart <[email protected]>
> ---
> drivers/clk/clk.c | 255 +++++++++++++++++++++++++++++++++++---------------
> 1 file changed, 186 insertions(+), 69 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..17432a5 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,9 +19,12 @@
> #include <linux/of.h>
> #include <linux/device.h>
> #include <linux/init.h>
> +#include <linux/sched.h>
>
> static DEFINE_SPINLOCK(enable_lock);
> static DEFINE_MUTEX(prepare_lock);
> +static atomic_t prepare_context;
> +static atomic_t enable_context;
>
> static HLIST_HEAD(clk_root_list);
> static HLIST_HEAD(clk_orphan_list);
[snip]
> @@ -566,6 +548,35 @@ struct clk *__clk_lookup(const char *name)
> return NULL;
> }
>
> +/*** locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)
> +{
> + /* hold the framework-wide lock, context == NULL */
> + mutex_lock(&prepare_lock);
> +
> + /* set context for any reentrant calls */
> + atomic_set(&prepare_context, (int) get_current());
Won't that break on 64-bit architectures with sizeof(void *) != sizeof(int) ?
I wonder if it would make sense to abstract these operations in a generic
recursive mutex. Given that it would delay this patch past v3.10 I won't push
for that.
> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> + /* clear the context */
> + atomic_set(&prepare_context, 0);
> +
> + /* release the framework-wide lock, context == NULL */
> + mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> + if (mutex_is_locked(&prepare_lock))
> + if ((void *) atomic_read(&prepare_context) == get_current())
> + return true;
> +
> + return false;
> +}
> +
> /*** clk api ***/
>
> void __clk_unprepare(struct clk *clk)
[snip]
> @@ -877,6 +945,30 @@ static void __clk_recalc_rates(struct clk *clk,
> unsigned long msg) __clk_recalc_rates(child, msg);
> }
>
> +unsigned long __clk_get_rate(struct clk *clk)
> +{
> + unsigned long ret;
> +
> + if (!clk) {
> + ret = 0;
> + goto out;
You could return 0 directly here.
> + }
> +
> + if (clk->flags & CLK_GET_RATE_NOCACHE)
> + __clk_recalc_rates(clk, 0);
> +
> + ret = clk->rate;
> +
> + if (clk->flags & CLK_IS_ROOT)
> + goto out;
And return ret here.
> +
> + if (!clk->parent)
> + ret = 0;
> +
> +out:
> + return ret;
> +}
> +
> /**
> * clk_get_rate - return the rate of clk
> * @clk: the clk whose rate is being returned
> @@ -889,14 +981,22 @@ unsigned long clk_get_rate(struct clk *clk)
> {
> unsigned long rate;
>
> - mutex_lock(&prepare_lock);
> + /*
> + * FIXME - any locking here seems heavy weight
> + * can clk->rate be replaced with an atomic_t?
> + * same logic can likely be applied to prepare_count & enable_count
> + */
>
> - if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
> - __clk_recalc_rates(clk, 0);
> + if (clk_is_reentrant()) {
> + rate = __clk_get_rate(clk);
> + goto out;
You can return directly here as well.
> + }
>
> + clk_fwk_lock();
> rate = __clk_get_rate(clk);
> - mutex_unlock(&prepare_lock);
> + clk_fwk_unlock();
>
> +out:
> return rate;
> }
> EXPORT_SYMBOL_GPL(clk_get_rate);
> @@ -1073,6 +1173,39 @@ static void clk_change_rate(struct clk *clk)
> clk_change_rate(child);
> }
>
> +int __clk_set_rate(struct clk *clk, unsigned long rate)
> +{
> + int ret = 0;
> + struct clk *top, *fail_clk;
> +
> + /* bail early if nothing to do */
> + if (rate == clk->rate)
> + return 0;
> +
> + if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
> + return -EBUSY;
> + }
Braces are not necessary.
> + /* calculate new rates and get the topmost changed clock */
> + top = clk_calc_new_rates(clk, rate);
> + if (!top)
> + return -EINVAL;
> +
> + /* notify that we are about to change rates */
> + fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
> + if (fail_clk) {
> + pr_warn("%s: failed to set %s rate\n", __func__,
> + fail_clk->name);
> + clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
> + return -EBUSY;
> + }
> +
> + /* change the rates */
> + clk_change_rate(top);
> +
> + return ret;
> +}
> +
> /**
> * clk_set_rate - specify a new rate for clk
> * @clk: the clk whose rate is being changed
--
Regards,
Laurent Pinchart
On Wed, 27 Mar 2013, Mike Turquette wrote:
> Reentrancy into the clock framework from the clk.h api is necessary
> for clocks that are prepared and unprepared via i2c_transfer (which
> includes many PMICs and discrete audio chips) as well as for several
> other use cases.
That explanation sucks.
Why does an i2c clock need reentrancy? Just because it's i2c or what?
What exactly are the "several other use cases"?
Why do you need the spinlock side reentrant? If a clock is handled by
i2c it hardly can hold the spinlock over a i2c transfer.
A few pointers to code which needs this would be nice as well.
I'm refraining from reviewing the horrible implementation until I get
proper answers to the above questions.
Thanks,
tglx
On 27 March 2013 15:10, Thomas Gleixner <[email protected]> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework from the clk.h api is necessary
>> for clocks that are prepared and unprepared via i2c_transfer (which
>> includes many PMICs and discrete audio chips) as well as for several
>> other use cases.
>
> That explanation sucks.
>
> Why does an i2c clock need reentrancy? Just because it's i2c or what?
I am noway connected to this development but was just going through
your mail and i think i might know the answer why is this required.
Consider an example where an external chip has clock controller and has
bits which can be programmed to enable/disable clock. And this chip is
connected via spi/i2c to SoC.
clk_prepare(peripheral on external chip)
-> i2c_xfer(to write to external chips register)
-> clk_enable(i2c controller)
->controller-xfer-routine.. and finally we enable clk here...
Sorry if i am on the wrong side :)
--
viresh
Hi Thomas,
On Wednesday 27 March 2013 10:40:28 Thomas Gleixner wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > Reentrancy into the clock framework from the clk.h api is necessary
> > for clocks that are prepared and unprepared via i2c_transfer (which
> > includes many PMICs and discrete audio chips) as well as for several
> > other use cases.
>
> That explanation sucks.
>
> Why does an i2c clock need reentrancy? Just because it's i2c or what?
That's just an example, other clocks need reentrancy as well.
> What exactly are the "several other use cases"?
Please see below.
> Why do you need the spinlock side reentrant? If a clock is handled by
> i2c it hardly can hold the spinlock over a i2c transfer.
>
> A few pointers to code which needs this would be nice as well.
See for instance
http://git.linuxtv.org/pinchartl/media.git/commitdiff/c4abb868a4cb6ad33fde76a6383543f3bd6699b0
That commit exposes the XCLKA and XCLKB clock outputs of the OMAP3 ISP (camera
interface) as clock framework objects. The issue here is that the ISP has
several inputs clocks. One of them is a parent of XCLK[AB], and another one
controls access to the ISP registers (L4 bus clock). The ISP driver thus needs
to enable/disable and prepare/unprepare the L4 clock in the XCLK[AB]
enable/disable and prepare/unprepare operation handlers.
> I'm refraining from reviewing the horrible implementation until I get
> proper answers to the above questions.
--
Regards,
Laurent Pinchart
On 27 March 2013 10:55, Viresh Kumar <[email protected]> wrote:
> On 27 March 2013 15:10, Thomas Gleixner <[email protected]> wrote:
>> On Wed, 27 Mar 2013, Mike Turquette wrote:
>>
>>> Reentrancy into the clock framework from the clk.h api is necessary
>>> for clocks that are prepared and unprepared via i2c_transfer (which
>>> includes many PMICs and discrete audio chips) as well as for several
>>> other use cases.
>>
>> That explanation sucks.
>>
>> Why does an i2c clock need reentrancy? Just because it's i2c or what?
>
> I am noway connected to this development but was just going through
> your mail and i think i might know the answer why is this required.
>
> Consider an example where an external chip has clock controller and has
> bits which can be programmed to enable/disable clock. And this chip is
> connected via spi/i2c to SoC.
>
> clk_prepare(peripheral on external chip)
> -> i2c_xfer(to write to external chips register)
> -> clk_enable(i2c controller)
> ->controller-xfer-routine.. and finally we enable clk here...
>
>
> Sorry if i am on the wrong side :)
I agree with you Viresh. I guess Mike should update the commit message.
I would also like add another reason to why this is needed. For some
clks you would like to do pinctrl operations from a clk hw. But since
a pinctrl driver likely requires a clk to be prepared|enabled, we run
into a clk reentrant issue.
Kind regards
Ulf Hansson
>
> --
> viresh
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
On Wed, 27 Mar 2013, Ulf Hansson wrote:
> On 27 March 2013 10:55, Viresh Kumar <[email protected]> wrote:
> > On 27 March 2013 15:10, Thomas Gleixner <[email protected]> wrote:
> >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> >>
> >>> Reentrancy into the clock framework from the clk.h api is necessary
> >>> for clocks that are prepared and unprepared via i2c_transfer (which
> >>> includes many PMICs and discrete audio chips) as well as for several
> >>> other use cases.
> >>
> >> That explanation sucks.
> >>
> >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> >
> > I am noway connected to this development but was just going through
> > your mail and i think i might know the answer why is this required.
> >
> > Consider an example where an external chip has clock controller and has
> > bits which can be programmed to enable/disable clock. And this chip is
> > connected via spi/i2c to SoC.
> >
> > clk_prepare(peripheral on external chip)
> > -> i2c_xfer(to write to external chips register)
> > -> clk_enable(i2c controller)
> > ->controller-xfer-routine.. and finally we enable clk here...
Which does not explain the whole issue:
clk_prepare() takes the mutex
clk_enable() takes the spinlock
That works today.
The issue arises, if you need to call clk_prepare(i2c) in the xfer
routine.
> >
> > Sorry if i am on the wrong side :)
Only slightly :)
> I agree with you Viresh. I guess Mike should update the commit message.
>
> I would also like add another reason to why this is needed. For some
> clks you would like to do pinctrl operations from a clk hw. But since
> a pinctrl driver likely requires a clk to be prepared|enabled, we run
> into a clk reentrant issue.
Fair enough. This all wants to go into the changelog, so we can
understand why we have this business.
Thanks,
tglx
On Wed, 27 Mar 2013, Mike Turquette wrote:
> +/*** locking & reentrancy ***/
> +
> +static void clk_fwk_lock(void)
This function name sucks as much as the whole implementation does.
> +{
> + /* hold the framework-wide lock, context == NULL */
> + mutex_lock(&prepare_lock);
> +
> + /* set context for any reentrant calls */
> + atomic_set(&prepare_context, (int) get_current());
And what's the point of the atomic here? There is no need for an
atomic if you hold the lock. Neither here nor on the reader side.
Aside of that, the cast to (int) and the one below to (void *) are
blantantly wrong on 64 bit.
> +}
> +
> +static void clk_fwk_unlock(void)
> +{
> + /* clear the context */
> + atomic_set(&prepare_context, 0);
> +
> + /* release the framework-wide lock, context == NULL */
> + mutex_unlock(&prepare_lock);
> +}
> +
> +static bool clk_is_reentrant(void)
> +{
> + if (mutex_is_locked(&prepare_lock))
> + if ((void *) atomic_read(&prepare_context) == get_current())
Mooo.
> + return true;
> +
> + return false;
> +}
Why the heck do you need this function?
Just to sprinkle all these ugly constructs into the code:
> - mutex_lock(&prepare_lock);
> + /* re-enter if call is from the same context */
> + if (clk_is_reentrant()) {
> + __clk_unprepare(clk);
> + return;
> + }
Sigh. Why not doing the obvious?
Step 1/2: Wrap locking in helper functions
+static void clk_prepare_lock(void)
+{
+ mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+ mutex_unlock(&prepare_lock);
+}
That way the whole change in the existing code boils down to:
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
...
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
Ditto for the spinlock.
And there is no pointless reshuffling of functions required.
Step 2/2: Implement reentrancy
+static struct task_struct *prepare_owner;
+static int prepare_refcnt;
static void clk_prepare_lock()
{
- mutex_lock(&prepare_lock);
+ if (!mutex_trylock(&prepare_lock)) {
+ if (prepare_owner == current) {
+ prepare_refcnt++;
+ return;
+ }
+ mutex_lock(&prepare_lock);
+ }
+ WARN_ON_ONCE(prepare_owner != NULL);
+ WARN_ON_ONCE(prepare_refcnt != 0);
+ prepare_owner = current;
+ prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
- mutex_unlock(&prepare_lock);
+ WARN_ON_ONCE(prepare_owner != current);
+ WARN_ON_ONCE(prepare_refcnt == 0);
+
+ if (--prepare_refcnt)
+ return;
+ prepare_owner = NULL;
+ mutex_unlock(&prepare_lock);
}
Ditto for the spinlock.
That step requires ZERO change to the functions. They simply work and
you don't need all this ugly reentrancy hackery.
Thanks,
tglx
Quoting Thomas Gleixner (2013-03-27 04:09:17)
> On Wed, 27 Mar 2013, Ulf Hansson wrote:
> > On 27 March 2013 10:55, Viresh Kumar <[email protected]> wrote:
> > > On 27 March 2013 15:10, Thomas Gleixner <[email protected]> wrote:
> > >> On Wed, 27 Mar 2013, Mike Turquette wrote:
> > >>
> > >>> Reentrancy into the clock framework from the clk.h api is necessary
> > >>> for clocks that are prepared and unprepared via i2c_transfer (which
> > >>> includes many PMICs and discrete audio chips) as well as for several
> > >>> other use cases.
> > >>
> > >> That explanation sucks.
> > >>
> > >> Why does an i2c clock need reentrancy? Just because it's i2c or what?
> > >
> > > I am noway connected to this development but was just going through
> > > your mail and i think i might know the answer why is this required.
> > >
> > > Consider an example where an external chip has clock controller and has
> > > bits which can be programmed to enable/disable clock. And this chip is
> > > connected via spi/i2c to SoC.
> > >
> > > clk_prepare(peripheral on external chip)
> > > -> i2c_xfer(to write to external chips register)
> > > -> clk_enable(i2c controller)
> > > ->controller-xfer-routine.. and finally we enable clk here...
>
> Which does not explain the whole issue:
>
> clk_prepare() takes the mutex
> clk_enable() takes the spinlock
>
> That works today.
>
> The issue arises, if you need to call clk_prepare(i2c) in the xfer
> routine.
>
The issue arises any time a clk_ops callback calls a function that
unwittingly re-enters the clock framework. I think the easiest example
to understand and perhaps the most common in practice is a clock which
is controlled via an i2c transfer.
Viresh's example makes the mistake of calling
clk_enable(i2c_controller), but it must also call
clk_prepare(i2c_controller) which is missing in the call graph above.
That nested call to clk_prepare is where the reentrancy comes from.
This has nothing to do with the prepare/enable locking split and leaves
that relationship intact.
> > >
> > > Sorry if i am on the wrong side :)
>
> Only slightly :)
>
> > I agree with you Viresh. I guess Mike should update the commit message.
> >
> > I would also like add another reason to why this is needed. For some
> > clks you would like to do pinctrl operations from a clk hw. But since
> > a pinctrl driver likely requires a clk to be prepared|enabled, we run
> > into a clk reentrant issue.
>
> Fair enough. This all wants to go into the changelog, so we can
> understand why we have this business.
>
I'll submit a v5 which I hope will end the pain and suffering this patch
has caused you.
Regards,
Mike
> Thanks,
>
> tglx
On Wed, 27 Mar 2013, Mike Turquette wrote:
> Thanks for the review Thomas. I will steal your code and call it my own
> in the next version.
Sure.
> In particular getting rid of the atomics makes things much nicer.
I'd say using the helper functions and not having all these
conditionals makes it really nice :)
Thanks,
tglx
On Wed, 27 Mar 2013, Mike Turquette wrote:
> Quoting Laurent Pinchart (2013-03-27 02:08:15)
> > I wonder if it would make sense to abstract these operations in a generic
> > recursive mutex. Given that it would delay this patch past v3.10 I won't push
> > for that.
> >
>
> Having a nice implementation of recursive mutexes would have saved me
> some time.
If you encapsulate stuff nicely like I suggested, then switching to a
generic version later on is a nobrainer.
Thanks,
tglx
On Wed, Mar 27, 2013 at 12:09:43AM -0700, Mike Turquette wrote:
> + /* set context for any reentrant calls */
> + atomic_set(&prepare_context, (int) get_current());
...
> + if (mutex_is_locked(&prepare_lock))
> + if ((void *) atomic_read(&prepare_context) == get_current())
> + return true;
If you really want to do it like this, then the _correct_ way to do it
is:
if (atomic_read(&prepare_context) == (int)get_current())
So that any effects from the cast are the same in both parts. Otherwise,
you will be running into the possibility that a cast could do something
like truncate the returned value, resulting in the test condition using
pointers always being false.
It's not that much of a problem on ARM, but it's a matter of good
programming discpline that such issues are avoided.
Even better would be to cast it to unsigned long - this is the value
which the atomic types use, and that is less likely to be truncated.
It also helps to avoid the possibility of compilers complaining about
a narrowing cast too - especially as the entire Linux kernel assumes
that pointers can be cast to unsigned long and back again with no loss.
This fifth attempt at allowing calls to the clk api to reenter splits
the last patch into two parts. The first patch abstracts out the
locking details into some helper functions and converts all of the
direct calls to the mutex and spinlock api to use these helpers.
The second patch introduces the reentrancy logic into these helper
functions. Fundamentally the reentrancy logic hasn't changed since v4,
but fixing casting bugs, removing unnecessary barriers and better design
& beautification separate this approach from the last one.
Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.
Mike Turquette (2):
clk: abstract locking out into helper functions
clk: allow reentrant calls into the clk framework
drivers/clk/clk.c | 136 ++++++++++++++++++++++++++++++++++++++---------------
1 file changed, 98 insertions(+), 38 deletions(-)
--
1.7.10.4
Create locking helpers for the global mutex and global spinlock. The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.
Signed-off-by: Mike Turquette <[email protected]>
Cc: Rajagopal Venkat <[email protected]>
Cc: David Brown <[email protected]>
Cc: Ulf Hansson <[email protected]>
Cc: Laurent Pinchart <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
drivers/clk/clk.c | 97 ++++++++++++++++++++++++++++++++---------------------
1 file changed, 59 insertions(+), 38 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..bea47d5 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,27 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/*** locking ***/
+static void clk_prepare_lock(void)
+{
+ mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+ mutex_unlock(&prepare_lock);
+}
+
+static void clk_enable_lock(unsigned long *flags)
+{
+ spin_lock_irqsave(&enable_lock, *flags);
+}
+
+static void clk_enable_unlock(unsigned long *flags)
+{
+ spin_unlock_irqrestore(&enable_lock, *flags);
+}
+
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +90,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@@ -77,7 +98,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -130,7 +151,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_printf(s, "{");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@@ -144,7 +165,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
seq_printf(s, "}");
return 0;
@@ -316,7 +337,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +347,7 @@ static int __init clk_debug_init(void)
inited = 1;
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -372,7 +393,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
- spin_lock_irqsave(&enable_lock, flags);
+ clk_enable_lock(&flags);
if (clk->enable_count)
goto unlock_out;
@@ -393,7 +414,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
unlock_out:
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(&flags);
out:
return;
@@ -403,7 +424,7 @@ static int clk_disable_unused(void)
{
struct clk *clk;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@@ -417,7 +438,7 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -600,9 +621,9 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -648,9 +669,9 @@ int clk_prepare(struct clk *clk)
{
int ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -692,9 +713,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+ clk_enable_lock(&flags);
__clk_disable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(&flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -745,9 +766,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
- spin_lock_irqsave(&enable_lock, flags);
+ clk_enable_lock(&flags);
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(&flags);
return ret;
}
@@ -792,9 +813,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -889,13 +910,13 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return rate;
}
@@ -1100,7 +1121,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
int ret = 0;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
@@ -1132,7 +1153,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
clk_change_rate(top);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1148,9 +1169,9 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return parent;
}
@@ -1294,19 +1315,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
__clk_prepare(parent);
/* FIXME replace with clk_is_enabled(clk) someday */
- spin_lock_irqsave(&enable_lock, flags);
+ clk_enable_lock(&flags);
if (clk->enable_count)
__clk_enable(parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(&flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
- spin_lock_irqsave(&enable_lock, flags);
+ clk_enable_lock(&flags);
if (clk->enable_count)
__clk_disable(old_parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(&flags);
if (clk->prepare_count)
__clk_unprepare(old_parent);
@@ -1338,7 +1359,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return -ENOSYS;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk->parent == parent)
goto out;
@@ -1367,7 +1388,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
__clk_reparent(clk, parent);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1390,7 +1411,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (!clk)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
@@ -1514,7 +1535,7 @@ int __clk_init(struct device *dev, struct clk *clk)
clk_debug_register(clk);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1748,7 +1769,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1793,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
clk->notifier_count++;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1797,7 +1818,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
@@ -1818,7 +1839,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
ret = -ENOENT;
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
--
1.7.10.4
Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api. A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC. The i2c subsystem itself
will use the clk api resulting in a deadlock:
clk_prepare(audio_clk)
i2c_transfer(..)
clk_prepare(i2c_controller_clk)
The ability to reenter the clock framework prevents this deadlock.
Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations. Yet another example is performing pinctrl operations
from a clk_ops callback. Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock. Allowing for nested calls to
reenter the clock framework enables both of these use cases.
Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock. One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.
When the clk framework is entered we try to hold the global lock. If it
is held we compare the current task id against the current owner; a
match implies a nested call and we reenter. If the values do not match
then we block on the lock until it is released.
Signed-off-by: Mike Turquette <[email protected]>
Cc: Rajagopal Venkat <[email protected]>
Cc: David Brown <[email protected]>
Cc: Ulf Hansson <[email protected]>
Cc: Laurent Pinchart <[email protected]>
Cc: Thomas Gleixner <[email protected]>
---
Changes since v4:
* remove uneccesary atomic operations
* remove casting bugs
* place reentrancy logic into locking helper functions
* improve debugging with reference counting and WARNs
drivers/clk/clk.c | 43 +++++++++++++++++++++++++++++++++++++++++--
1 file changed, 41 insertions(+), 2 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index bea47d5..fe7c054 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -30,21 +37,53 @@ static LIST_HEAD(clk_notifier_list);
/*** locking ***/
static void clk_prepare_lock(void)
{
- mutex_lock(&prepare_lock);
+ if (!mutex_trylock(&prepare_lock)) {
+ if (prepare_owner == current) {
+ prepare_refcnt++;
+ return;
+ }
+ mutex_lock(&prepare_lock);
+ }
+ WARN_ON_ONCE(prepare_owner != NULL);
+ WARN_ON_ONCE(prepare_refcnt != 0);
+ prepare_owner = current;
+ prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
+ WARN_ON_ONCE(prepare_owner != current);
+ WARN_ON_ONCE(prepare_refcnt == 0);
+
+ if (--prepare_refcnt)
+ return;
+ prepare_owner = NULL;
mutex_unlock(&prepare_lock);
}
static void clk_enable_lock(unsigned long *flags)
{
- spin_lock_irqsave(&enable_lock, *flags);
+ if (!spin_trylock_irqsave(&enable_lock, *flags)) {
+ if (enable_owner == current) {
+ enable_refcnt++;
+ return;
+ }
+ spin_lock_irqsave(&enable_lock, *flags);
+ }
+ WARN_ON_ONCE(enable_owner != NULL);
+ WARN_ON_ONCE(enable_refcnt != 0);
+ enable_owner = current;
+ enable_refcnt = 1;
}
static void clk_enable_unlock(unsigned long *flags)
{
+ WARN_ON_ONCE(enable_owner != current);
+ WARN_ON_ONCE(enable_refcnt == 0);
+
+ if (--enable_refcnt)
+ return;
+ enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, *flags);
}
--
1.7.10.4
On Wed, 27 Mar 2013, Mike Turquette wrote:
> Create locking helpers for the global mutex and global spinlock. The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.
This looks way better. Nitpick below.
> +static void clk_enable_lock(unsigned long *flags)
> +{
> + spin_lock_irqsave(&enable_lock, *flags);
> +}
> +static void clk_enable_unlock(unsigned long *flags)
Please just hand in the flags, no need for indirection.
> +{
> + spin_unlock_irqrestore(&enable_lock, *flags);
> +}
> +
Reviewed-by: Thomas Gleixner <[email protected]>
On Wed, 27 Mar 2013, Mike Turquette wrote:
> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api. A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC. The i2c subsystem itself
> will use the clk api resulting in a deadlock:
>
> clk_prepare(audio_clk)
> i2c_transfer(..)
> clk_prepare(i2c_controller_clk)
>
> The ability to reenter the clock framework prevents this deadlock.
>
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations. Yet another example is performing pinctrl operations
> from a clk_ops callback. Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock. Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
>
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock. One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
>
> When the clk framework is entered we try to hold the global lock. If it
> is held we compare the current task id against the current owner; a
s/task id/task/ We store a the task pointer in the owner variable.
Reviewed-by: Thomas Gleixner <[email protected]>
Hi Mike,
On Wednesday 27 March 2013 21:45:56 Mike Turquette wrote:
> This fifth attempt at allowing calls to the clk api to reenter splits
> the last patch into two parts. The first patch abstracts out the
> locking details into some helper functions and converts all of the
> direct calls to the mutex and spinlock api to use these helpers.
>
> The second patch introduces the reentrancy logic into these helper
> functions. Fundamentally the reentrancy logic hasn't changed since v4,
> but fixing casting bugs, removing unnecessary barriers and better design
> & beautification separate this approach from the last one.
>
> Changes tested on top of the latest clk-next branch with an OMAP4430
> Panda board.
>
> Mike Turquette (2):
> clk: abstract locking out into helper functions
> clk: allow reentrant calls into the clk framework
>
> drivers/clk/clk.c | 136 +++++++++++++++++++++++++++++++++++---------------
> 1 file changed, 98 insertions(+), 38 deletions(-)
For the whole patch set,
Tested-by: Laurent Pinchart <[email protected]>
on a DM3730.
--
Regards,
Laurent Pinchart
On Thu, Mar 28, 2013 at 2:33 AM, Thomas Gleixner <[email protected]> wrote:
> On Wed, 27 Mar 2013, Mike Turquette wrote:
>
>> Reentrancy into the clock framework is necessary for clock operations
>> that result in nested calls to the clk api. A common example is a clock
>> that is prepared via an i2c transaction, such as a clock inside of a
>> discrete audio chip or a power management IC. The i2c subsystem itself
>> will use the clk api resulting in a deadlock:
>>
>> clk_prepare(audio_clk)
>> i2c_transfer(..)
>> clk_prepare(i2c_controller_clk)
>>
>> The ability to reenter the clock framework prevents this deadlock.
>>
>> Other use cases exist such as allowing .set_rate callbacks to call
>> clk_set_parent to achieve the best rate, or to save power in certain
>> configurations. Yet another example is performing pinctrl operations
>> from a clk_ops callback. Calls into the pinctrl subsystem may call
>> clk_{un}prepare on an unrelated clock. Allowing for nested calls to
>> reenter the clock framework enables both of these use cases.
>>
>> Reentrancy is implemented by two global pointers that track the owner
>> currently holding a global lock. One pointer tracks the owner during
>> sleepable, mutex-protected operations and the other one tracks the owner
>> during non-interruptible, spinlock-protected operations.
>>
>> When the clk framework is entered we try to hold the global lock. If it
>> is held we compare the current task id against the current owner; a
>
> s/task id/task/ We store a the task pointer in the owner variable.
>
> Reviewed-by: Thomas Gleixner <[email protected]>
Will fix the typo and add your reviewed-by.
Thanks for the review,
Mike
This sixth version of the series fixes up two minor cosmetic issues and
adds reviewed-by's and tested-by's. If there are no more comments to
this version then I'll merge these patches into clk-next so that it can
get some cycles in linux-next.
Changes tested on top of the latest clk-next branch with an OMAP4430
Panda board.
Mike Turquette (2):
clk: abstract locking out into helper functions
clk: allow reentrant calls into the clk framework
drivers/clk/clk.c | 139 ++++++++++++++++++++++++++++++++++++++---------------
1 file changed, 101 insertions(+), 38 deletions(-)
--
1.7.10.4
Create locking helpers for the global mutex and global spinlock. The
definitions of these helpers will be expanded upon in the next patch
which introduces reentrancy into the locking scheme.
Signed-off-by: Mike Turquette <[email protected]>
Cc: Rajagopal Venkat <[email protected]>
Cc: David Brown <[email protected]>
Cc: Ulf Hansson <[email protected]>
Tested-by: Laurent Pinchart <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
---
Changes since v5:
* pass flags by value instead of by reference in clk_enable_{un}lock
drivers/clk/clk.c | 99 +++++++++++++++++++++++++++++++++--------------------
1 file changed, 61 insertions(+), 38 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5e8ffff..0b5d612 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
+/*** locking ***/
+static void clk_prepare_lock(void)
+{
+ mutex_lock(&prepare_lock);
+}
+
+static void clk_prepare_unlock(void)
+{
+ mutex_unlock(&prepare_lock);
+}
+
+static unsigned long clk_enable_lock(void)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&enable_lock, flags);
+ return flags;
+}
+
+static void clk_enable_unlock(unsigned long flags)
+{
+ spin_unlock_irqrestore(&enable_lock, flags);
+}
+
/*** debugfs support ***/
#ifdef CONFIG_COMMON_CLK_DEBUG
@@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
seq_printf(s, "---------------------------------------------------------------------\n");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node)
clk_summary_show_subtree(s, c, 0);
@@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
hlist_for_each_entry(c, &clk_orphan_list, child_node)
clk_summary_show_subtree(s, c, 0);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
seq_printf(s, "{");
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(c, &clk_root_list, child_node) {
if (!first_node)
@@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
clk_dump_subtree(s, c, 0);
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
seq_printf(s, "}");
return 0;
@@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
if (!orphandir)
return -ENOMEM;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_debug_create_subtree(clk, rootdir);
@@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
inited = 1;
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
hlist_for_each_entry(child, &clk->children, child_node)
clk_disable_unused_subtree(child);
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
goto unlock_out;
@@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
}
unlock_out:
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
out:
return;
@@ -403,7 +426,7 @@ static int clk_disable_unused(void)
{
struct clk *clk;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
hlist_for_each_entry(clk, &clk_root_list, child_node)
clk_disable_unused_subtree(clk);
@@ -417,7 +440,7 @@ static int clk_disable_unused(void)
hlist_for_each_entry(clk, &clk_orphan_list, child_node)
clk_unprepare_unused_subtree(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return 0;
}
@@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
*/
void clk_unprepare(struct clk *clk)
{
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
__clk_unprepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
}
EXPORT_SYMBOL_GPL(clk_unprepare);
@@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
{
int ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_prepare(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
__clk_disable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
}
EXPORT_SYMBOL_GPL(clk_disable);
@@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
unsigned long flags;
int ret;
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
ret = __clk_enable(clk);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
return ret;
}
@@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
{
unsigned long ret;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
ret = __clk_round_rate(clk, rate);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
__clk_recalc_rates(clk, 0);
rate = __clk_get_rate(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return rate;
}
@@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
int ret = 0;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* bail early if nothing to do */
if (rate == clk->rate)
@@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
clk_change_rate(top);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
{
struct clk *parent;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
parent = __clk_get_parent(clk);
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return parent;
}
@@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
__clk_prepare(parent);
/* FIXME replace with clk_is_enabled(clk) someday */
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
__clk_enable(parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
/* change clock input source */
ret = clk->ops->set_parent(clk->hw, i);
/* clean up old prepare and enable */
- spin_lock_irqsave(&enable_lock, flags);
+ flags = clk_enable_lock();
if (clk->enable_count)
__clk_disable(old_parent);
- spin_unlock_irqrestore(&enable_lock, flags);
+ clk_enable_unlock(flags);
if (clk->prepare_count)
__clk_unprepare(old_parent);
@@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
return -ENOSYS;
/* prevent racing with updates to the clock topology */
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
if (clk->parent == parent)
goto out;
@@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
__clk_reparent(clk, parent);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
if (!clk)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* check to see if a clock with this name is already registered */
if (__clk_lookup(clk->name)) {
@@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
clk_debug_register(clk);
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
/* search the list of notifiers for this clk */
list_for_each_entry(cn, &clk_notifier_list, node)
@@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
clk->notifier_count++;
out:
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
@@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
if (!clk || !nb)
return -EINVAL;
- mutex_lock(&prepare_lock);
+ clk_prepare_lock();
list_for_each_entry(cn, &clk_notifier_list, node)
if (cn->clk == clk)
@@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
ret = -ENOENT;
}
- mutex_unlock(&prepare_lock);
+ clk_prepare_unlock();
return ret;
}
--
1.7.10.4
Reentrancy into the clock framework is necessary for clock operations
that result in nested calls to the clk api. A common example is a clock
that is prepared via an i2c transaction, such as a clock inside of a
discrete audio chip or a power management IC. The i2c subsystem itself
will use the clk api resulting in a deadlock:
clk_prepare(audio_clk)
i2c_transfer(..)
clk_prepare(i2c_controller_clk)
The ability to reenter the clock framework prevents this deadlock.
Other use cases exist such as allowing .set_rate callbacks to call
clk_set_parent to achieve the best rate, or to save power in certain
configurations. Yet another example is performing pinctrl operations
from a clk_ops callback. Calls into the pinctrl subsystem may call
clk_{un}prepare on an unrelated clock. Allowing for nested calls to
reenter the clock framework enables both of these use cases.
Reentrancy is implemented by two global pointers that track the owner
currently holding a global lock. One pointer tracks the owner during
sleepable, mutex-protected operations and the other one tracks the owner
during non-interruptible, spinlock-protected operations.
When the clk framework is entered we try to hold the global lock. If it
is held we compare the current task against the current owner; a match
implies a nested call and we reenter. If the values do not match then
we block on the lock until it is released.
Signed-off-by: Mike Turquette <[email protected]>
Cc: Rajagopal Venkat <[email protected]>
Cc: David Brown <[email protected]>
Cc: Ulf Hansson <[email protected]>
Tested-by: Laurent Pinchart <[email protected]>
Reviewed-by: Thomas Gleixner <[email protected]>
---
Changes since v5:
* fixed up typo in changelog
Changes since v4:
* remove uneccesary atomic operations
* remove casting bugs
* place reentrancy logic into locking helper functions
* improve debugging with reference counting and WARNs
drivers/clk/clk.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 42 insertions(+), 2 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0b5d612..0230c9d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -19,10 +19,17 @@
#include <linux/of.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/sched.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static struct task_struct *prepare_owner;
+static struct task_struct *enable_owner;
+
+static int prepare_refcnt;
+static int enable_refcnt;
+
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
@@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
/*** locking ***/
static void clk_prepare_lock(void)
{
- mutex_lock(&prepare_lock);
+ if (!mutex_trylock(&prepare_lock)) {
+ if (prepare_owner == current) {
+ prepare_refcnt++;
+ return;
+ }
+ mutex_lock(&prepare_lock);
+ }
+ WARN_ON_ONCE(prepare_owner != NULL);
+ WARN_ON_ONCE(prepare_refcnt != 0);
+ prepare_owner = current;
+ prepare_refcnt = 1;
}
static void clk_prepare_unlock(void)
{
+ WARN_ON_ONCE(prepare_owner != current);
+ WARN_ON_ONCE(prepare_refcnt == 0);
+
+ if (--prepare_refcnt)
+ return;
+ prepare_owner = NULL;
mutex_unlock(&prepare_lock);
}
static unsigned long clk_enable_lock(void)
{
unsigned long flags;
- spin_lock_irqsave(&enable_lock, flags);
+
+ if (!spin_trylock_irqsave(&enable_lock, flags)) {
+ if (enable_owner == current) {
+ enable_refcnt++;
+ return flags;
+ }
+ spin_lock_irqsave(&enable_lock, flags);
+ }
+ WARN_ON_ONCE(enable_owner != NULL);
+ WARN_ON_ONCE(enable_refcnt != 0);
+ enable_owner = current;
+ enable_refcnt = 1;
return flags;
}
static void clk_enable_unlock(unsigned long flags)
{
+ WARN_ON_ONCE(enable_owner != current);
+ WARN_ON_ONCE(enable_refcnt == 0);
+
+ if (--enable_refcnt)
+ return;
+ enable_owner = NULL;
spin_unlock_irqrestore(&enable_lock, flags);
}
--
1.7.10.4
On 28 March 2013 21:59, Mike Turquette <[email protected]> wrote:
> Create locking helpers for the global mutex and global spinlock. The
> definitions of these helpers will be expanded upon in the next patch
> which introduces reentrancy into the locking scheme.
>
> Signed-off-by: Mike Turquette <[email protected]>
> Cc: Rajagopal Venkat <[email protected]>
> Cc: David Brown <[email protected]>
> Cc: Ulf Hansson <[email protected]>
> Tested-by: Laurent Pinchart <[email protected]>
> Reviewed-by: Thomas Gleixner <[email protected]>
> ---
> Changes since v5:
> * pass flags by value instead of by reference in clk_enable_{un}lock
>
> drivers/clk/clk.c | 99 +++++++++++++++++++++++++++++++++--------------------
> 1 file changed, 61 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 5e8ffff..0b5d612 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -27,6 +27,29 @@ static HLIST_HEAD(clk_root_list);
> static HLIST_HEAD(clk_orphan_list);
> static LIST_HEAD(clk_notifier_list);
>
> +/*** locking ***/
> +static void clk_prepare_lock(void)
> +{
> + mutex_lock(&prepare_lock);
> +}
> +
> +static void clk_prepare_unlock(void)
> +{
> + mutex_unlock(&prepare_lock);
> +}
> +
> +static unsigned long clk_enable_lock(void)
> +{
> + unsigned long flags;
> + spin_lock_irqsave(&enable_lock, flags);
> + return flags;
> +}
> +
> +static void clk_enable_unlock(unsigned long flags)
> +{
> + spin_unlock_irqrestore(&enable_lock, flags);
> +}
> +
> /*** debugfs support ***/
>
> #ifdef CONFIG_COMMON_CLK_DEBUG
> @@ -69,7 +92,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
> seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
> seq_printf(s, "---------------------------------------------------------------------\n");
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> hlist_for_each_entry(c, &clk_root_list, child_node)
> clk_summary_show_subtree(s, c, 0);
> @@ -77,7 +100,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
> hlist_for_each_entry(c, &clk_orphan_list, child_node)
> clk_summary_show_subtree(s, c, 0);
>
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return 0;
> }
> @@ -130,7 +153,7 @@ static int clk_dump(struct seq_file *s, void *data)
>
> seq_printf(s, "{");
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> hlist_for_each_entry(c, &clk_root_list, child_node) {
> if (!first_node)
> @@ -144,7 +167,7 @@ static int clk_dump(struct seq_file *s, void *data)
> clk_dump_subtree(s, c, 0);
> }
>
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> seq_printf(s, "}");
> return 0;
> @@ -316,7 +339,7 @@ static int __init clk_debug_init(void)
> if (!orphandir)
> return -ENOMEM;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> hlist_for_each_entry(clk, &clk_root_list, child_node)
> clk_debug_create_subtree(clk, rootdir);
> @@ -326,7 +349,7 @@ static int __init clk_debug_init(void)
>
> inited = 1;
>
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return 0;
> }
> @@ -372,7 +395,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
> hlist_for_each_entry(child, &clk->children, child_node)
> clk_disable_unused_subtree(child);
>
> - spin_lock_irqsave(&enable_lock, flags);
> + flags = clk_enable_lock();
>
> if (clk->enable_count)
> goto unlock_out;
> @@ -393,7 +416,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
> }
>
> unlock_out:
> - spin_unlock_irqrestore(&enable_lock, flags);
> + clk_enable_unlock(flags);
>
> out:
> return;
> @@ -403,7 +426,7 @@ static int clk_disable_unused(void)
> {
> struct clk *clk;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> hlist_for_each_entry(clk, &clk_root_list, child_node)
> clk_disable_unused_subtree(clk);
> @@ -417,7 +440,7 @@ static int clk_disable_unused(void)
> hlist_for_each_entry(clk, &clk_orphan_list, child_node)
> clk_unprepare_unused_subtree(clk);
>
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return 0;
> }
> @@ -600,9 +623,9 @@ void __clk_unprepare(struct clk *clk)
> */
> void clk_unprepare(struct clk *clk)
> {
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
> __clk_unprepare(clk);
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
> }
> EXPORT_SYMBOL_GPL(clk_unprepare);
>
> @@ -648,9 +671,9 @@ int clk_prepare(struct clk *clk)
> {
> int ret;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
> ret = __clk_prepare(clk);
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -692,9 +715,9 @@ void clk_disable(struct clk *clk)
> {
> unsigned long flags;
>
> - spin_lock_irqsave(&enable_lock, flags);
> + flags = clk_enable_lock();
> __clk_disable(clk);
> - spin_unlock_irqrestore(&enable_lock, flags);
> + clk_enable_unlock(flags);
> }
> EXPORT_SYMBOL_GPL(clk_disable);
>
> @@ -745,9 +768,9 @@ int clk_enable(struct clk *clk)
> unsigned long flags;
> int ret;
>
> - spin_lock_irqsave(&enable_lock, flags);
> + flags = clk_enable_lock();
> ret = __clk_enable(clk);
> - spin_unlock_irqrestore(&enable_lock, flags);
> + clk_enable_unlock(flags);
>
> return ret;
> }
> @@ -792,9 +815,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
> {
> unsigned long ret;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
> ret = __clk_round_rate(clk, rate);
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -889,13 +912,13 @@ unsigned long clk_get_rate(struct clk *clk)
> {
> unsigned long rate;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
> __clk_recalc_rates(clk, 0);
>
> rate = __clk_get_rate(clk);
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return rate;
> }
> @@ -1100,7 +1123,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
> int ret = 0;
>
> /* prevent racing with updates to the clock topology */
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> /* bail early if nothing to do */
> if (rate == clk->rate)
> @@ -1132,7 +1155,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
> clk_change_rate(top);
>
> out:
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -1148,9 +1171,9 @@ struct clk *clk_get_parent(struct clk *clk)
> {
> struct clk *parent;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
> parent = __clk_get_parent(clk);
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return parent;
> }
> @@ -1294,19 +1317,19 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
> __clk_prepare(parent);
>
> /* FIXME replace with clk_is_enabled(clk) someday */
> - spin_lock_irqsave(&enable_lock, flags);
> + flags = clk_enable_lock();
> if (clk->enable_count)
> __clk_enable(parent);
> - spin_unlock_irqrestore(&enable_lock, flags);
> + clk_enable_unlock(flags);
>
> /* change clock input source */
> ret = clk->ops->set_parent(clk->hw, i);
>
> /* clean up old prepare and enable */
> - spin_lock_irqsave(&enable_lock, flags);
> + flags = clk_enable_lock();
> if (clk->enable_count)
> __clk_disable(old_parent);
> - spin_unlock_irqrestore(&enable_lock, flags);
> + clk_enable_unlock(flags);
>
> if (clk->prepare_count)
> __clk_unprepare(old_parent);
> @@ -1338,7 +1361,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
> return -ENOSYS;
>
> /* prevent racing with updates to the clock topology */
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> if (clk->parent == parent)
> goto out;
> @@ -1367,7 +1390,7 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
> __clk_reparent(clk, parent);
>
> out:
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -1390,7 +1413,7 @@ int __clk_init(struct device *dev, struct clk *clk)
> if (!clk)
> return -EINVAL;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> /* check to see if a clock with this name is already registered */
> if (__clk_lookup(clk->name)) {
> @@ -1514,7 +1537,7 @@ int __clk_init(struct device *dev, struct clk *clk)
> clk_debug_register(clk);
>
> out:
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -1748,7 +1771,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
> if (!clk || !nb)
> return -EINVAL;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> /* search the list of notifiers for this clk */
> list_for_each_entry(cn, &clk_notifier_list, node)
> @@ -1772,7 +1795,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
> clk->notifier_count++;
>
> out:
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> @@ -1797,7 +1820,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
> if (!clk || !nb)
> return -EINVAL;
>
> - mutex_lock(&prepare_lock);
> + clk_prepare_lock();
>
> list_for_each_entry(cn, &clk_notifier_list, node)
> if (cn->clk == clk)
> @@ -1818,7 +1841,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
> ret = -ENOENT;
> }
>
> - mutex_unlock(&prepare_lock);
> + clk_prepare_unlock();
>
> return ret;
> }
> --
> 1.7.10.4
>
Reviewed-by: Ulf Hansson <[email protected]>
On 28 March 2013 21:59, Mike Turquette <[email protected]> wrote:
> Reentrancy into the clock framework is necessary for clock operations
> that result in nested calls to the clk api. A common example is a clock
> that is prepared via an i2c transaction, such as a clock inside of a
> discrete audio chip or a power management IC. The i2c subsystem itself
> will use the clk api resulting in a deadlock:
>
> clk_prepare(audio_clk)
> i2c_transfer(..)
> clk_prepare(i2c_controller_clk)
>
> The ability to reenter the clock framework prevents this deadlock.
>
> Other use cases exist such as allowing .set_rate callbacks to call
> clk_set_parent to achieve the best rate, or to save power in certain
> configurations. Yet another example is performing pinctrl operations
> from a clk_ops callback. Calls into the pinctrl subsystem may call
> clk_{un}prepare on an unrelated clock. Allowing for nested calls to
> reenter the clock framework enables both of these use cases.
>
> Reentrancy is implemented by two global pointers that track the owner
> currently holding a global lock. One pointer tracks the owner during
> sleepable, mutex-protected operations and the other one tracks the owner
> during non-interruptible, spinlock-protected operations.
>
> When the clk framework is entered we try to hold the global lock. If it
> is held we compare the current task against the current owner; a match
> implies a nested call and we reenter. If the values do not match then
> we block on the lock until it is released.
>
> Signed-off-by: Mike Turquette <[email protected]>
> Cc: Rajagopal Venkat <[email protected]>
> Cc: David Brown <[email protected]>
> Cc: Ulf Hansson <[email protected]>
> Tested-by: Laurent Pinchart <[email protected]>
> Reviewed-by: Thomas Gleixner <[email protected]>
> ---
> Changes since v5:
> * fixed up typo in changelog
>
> Changes since v4:
> * remove uneccesary atomic operations
> * remove casting bugs
> * place reentrancy logic into locking helper functions
> * improve debugging with reference counting and WARNs
>
> drivers/clk/clk.c | 44 ++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 42 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
> index 0b5d612..0230c9d 100644
> --- a/drivers/clk/clk.c
> +++ b/drivers/clk/clk.c
> @@ -19,10 +19,17 @@
> #include <linux/of.h>
> #include <linux/device.h>
> #include <linux/init.h>
> +#include <linux/sched.h>
>
> static DEFINE_SPINLOCK(enable_lock);
> static DEFINE_MUTEX(prepare_lock);
>
> +static struct task_struct *prepare_owner;
> +static struct task_struct *enable_owner;
> +
> +static int prepare_refcnt;
> +static int enable_refcnt;
> +
> static HLIST_HEAD(clk_root_list);
> static HLIST_HEAD(clk_orphan_list);
> static LIST_HEAD(clk_notifier_list);
> @@ -30,23 +37,56 @@ static LIST_HEAD(clk_notifier_list);
> /*** locking ***/
> static void clk_prepare_lock(void)
> {
> - mutex_lock(&prepare_lock);
> + if (!mutex_trylock(&prepare_lock)) {
> + if (prepare_owner == current) {
> + prepare_refcnt++;
> + return;
> + }
> + mutex_lock(&prepare_lock);
> + }
> + WARN_ON_ONCE(prepare_owner != NULL);
> + WARN_ON_ONCE(prepare_refcnt != 0);
> + prepare_owner = current;
> + prepare_refcnt = 1;
> }
>
> static void clk_prepare_unlock(void)
> {
> + WARN_ON_ONCE(prepare_owner != current);
> + WARN_ON_ONCE(prepare_refcnt == 0);
> +
> + if (--prepare_refcnt)
> + return;
> + prepare_owner = NULL;
> mutex_unlock(&prepare_lock);
> }
>
> static unsigned long clk_enable_lock(void)
> {
> unsigned long flags;
> - spin_lock_irqsave(&enable_lock, flags);
> +
> + if (!spin_trylock_irqsave(&enable_lock, flags)) {
> + if (enable_owner == current) {
> + enable_refcnt++;
> + return flags;
> + }
> + spin_lock_irqsave(&enable_lock, flags);
> + }
> + WARN_ON_ONCE(enable_owner != NULL);
> + WARN_ON_ONCE(enable_refcnt != 0);
> + enable_owner = current;
> + enable_refcnt = 1;
> return flags;
> }
>
> static void clk_enable_unlock(unsigned long flags)
> {
> + WARN_ON_ONCE(enable_owner != current);
> + WARN_ON_ONCE(enable_refcnt == 0);
> +
> + if (--enable_refcnt)
> + return;
> + enable_owner = NULL;
> spin_unlock_irqrestore(&enable_lock, flags);
> }
>
> --
> 1.7.10.4
>
Great piece of work!
Reviewed-by: Ulf Hansson <[email protected]>