From: Jie Luo <[email protected]>
qca8xxx is 4 * 2.5GBaseT ports chip, working as switch mode
named by qca8386, working as PHY mode named by qca8084,
clock hardware reigster is accessed by MDIO bus.
This patch series add the clock controller of qca8363/qca8084,
and add the clock flag CLK_ENABLE_MUTEX_LOCK to avoid spin lock
used during the clock operation of qca8k clock controller where
the sleep happens when accessing clock control register by MDIO
bus.
Luo Jie (3):
clk: Add the flag CLK_ENABLE_MUTEX_LOCK of enabling clock
dt-bindings: clock: add qca8386/qca8084 clock and reset definitions
clk: qcom: add clock controller driver for qca8386/qca8084
.../bindings/clock/qcom,nsscc-qca8k.yaml | 59 +
drivers/clk/clk.c | 78 +-
drivers/clk/qcom/Kconfig | 8 +
drivers/clk/qcom/Makefile | 1 +
drivers/clk/qcom/nsscc-qca8k.c | 2205 +++++++++++++++++
include/dt-bindings/clock/qcom,nsscc-qca8k.h | 102 +
include/dt-bindings/reset/qcom,nsscc-qca8k.h | 76 +
include/linux/clk-provider.h | 4 +
8 files changed, 2519 insertions(+), 14 deletions(-)
create mode 100644 Documentation/devicetree/bindings/clock/qcom,nsscc-qca8k.yaml
create mode 100644 drivers/clk/qcom/nsscc-qca8k.c
create mode 100644 include/dt-bindings/clock/qcom,nsscc-qca8k.h
create mode 100644 include/dt-bindings/reset/qcom,nsscc-qca8k.h
base-commit: ec89391563792edd11d138a853901bce76d11f44
--
2.34.1
Support the clock controller where the HW register is
accessed by MDIO bus, the spin lock can't be used because
of sleep during the MDIO operation.
Add the flag CLK_ENABLE_MUTEX_LOCK to hint clock framework
to use mutex lock instead of the spin lock.
Signed-off-by: Luo Jie <[email protected]>
---
drivers/clk/clk.c | 78 +++++++++++++++++++++++++++++-------
include/linux/clk-provider.h | 4 ++
2 files changed, 68 insertions(+), 14 deletions(-)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index c249f9791ae8..9f1be8e3f32b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -26,12 +26,15 @@
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
+static DEFINE_MUTEX(enable_mutex_lock);
static struct task_struct *prepare_owner;
static struct task_struct *enable_owner;
+static struct task_struct *enable_mutex_owner;
static int prepare_refcnt;
static int enable_refcnt;
+static int enable_mutex_refcnt;
static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
@@ -149,7 +152,7 @@ static void clk_prepare_unlock(void)
mutex_unlock(&prepare_lock);
}
-static unsigned long clk_enable_lock(void)
+static unsigned long clk_enable_spin_lock(void)
__acquires(enable_lock)
{
unsigned long flags;
@@ -177,7 +180,7 @@ static unsigned long clk_enable_lock(void)
return flags;
}
-static void clk_enable_unlock(unsigned long flags)
+static void clk_enable_spin_unlock(unsigned long flags)
__releases(enable_lock)
{
WARN_ON_ONCE(enable_owner != current);
@@ -191,6 +194,52 @@ static void clk_enable_unlock(unsigned long flags)
spin_unlock_irqrestore(&enable_lock, flags);
}
+static void clk_enable_mutex_lock(void)
+{
+ if (!mutex_trylock(&enable_mutex_lock)) {
+ if (enable_mutex_owner == current) {
+ enable_mutex_refcnt++;
+ return;
+ }
+ mutex_lock(&enable_mutex_lock);
+ }
+ WARN_ON_ONCE(enable_mutex_owner != NULL);
+ WARN_ON_ONCE(enable_mutex_refcnt != 0);
+ enable_mutex_owner = current;
+ enable_mutex_refcnt = 1;
+}
+
+static void clk_enable_mutex_unlock(void)
+{
+ WARN_ON_ONCE(enable_mutex_owner != current);
+ WARN_ON_ONCE(enable_mutex_refcnt == 0);
+
+ if (--enable_mutex_refcnt)
+ return;
+ enable_mutex_owner = NULL;
+ mutex_unlock(&enable_mutex_lock);
+}
+
+static unsigned long clk_enable_lock(struct clk_core *core)
+{
+ unsigned long flags = 0;
+
+ if (core && (core->flags & CLK_ENABLE_MUTEX_LOCK))
+ clk_enable_mutex_lock();
+ else
+ flags = clk_enable_spin_lock();
+
+ return flags;
+}
+
+static void clk_enable_unlock(struct clk_core *core, unsigned long flags)
+{
+ if (core && (core->flags & CLK_ENABLE_MUTEX_LOCK))
+ clk_enable_mutex_unlock();
+ else
+ clk_enable_spin_unlock(flags);
+}
+
static bool clk_core_rate_is_protected(struct clk_core *core)
{
return core->protect_count;
@@ -1111,9 +1160,9 @@ static void clk_core_disable_lock(struct clk_core *core)
{
unsigned long flags;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_core_disable(core);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
}
/**
@@ -1178,9 +1227,9 @@ static int clk_core_enable_lock(struct clk_core *core)
unsigned long flags;
int ret;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
ret = clk_core_enable(core);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
return ret;
}
@@ -1390,7 +1439,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
if (clk_pm_runtime_get(core))
goto unprepare_out;
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
if (core->enable_count)
goto unlock_out;
@@ -1413,7 +1462,7 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
}
unlock_out:
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
@@ -2042,9 +2091,9 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *core,
}
/* update the clk tree topology */
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_reparent(core, parent);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
return old_parent;
}
@@ -2087,9 +2136,9 @@ static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
trace_clk_set_parent_complete(core, parent);
if (ret) {
- flags = clk_enable_lock();
+ flags = clk_enable_lock(core);
clk_reparent(core, old_parent);
- clk_enable_unlock(flags);
+ clk_enable_unlock(core, flags);
__clk_set_parent_after(core, old_parent, parent);
@@ -3388,6 +3437,7 @@ static const struct {
ENTRY(CLK_IS_CRITICAL),
ENTRY(CLK_OPS_PARENT_ENABLE),
ENTRY(CLK_DUTY_CYCLE_PARENT),
+ ENTRY(CLK_ENABLE_MUTEX_LOCK),
#undef ENTRY
};
@@ -4410,9 +4460,9 @@ void clk_unregister(struct clk *clk)
* Assign empty clock ops for consumers that might still hold
* a reference to this clock.
*/
- flags = clk_enable_lock();
+ flags = clk_enable_lock(clk->core);
clk->core->ops = &clk_nodrv_ops;
- clk_enable_unlock(flags);
+ clk_enable_unlock(clk->core, flags);
if (ops->terminate)
ops->terminate(clk->core->hw);
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0f0cd01906b4..084b1f6fe321 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,10 @@
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
+/* clock operation is accessing register by MDIO, which needs to sleep.
+ * the lock should use mutex_lock instead of spin_lock.
+ */
+#define CLK_ENABLE_MUTEX_LOCK BIT(14)
struct clk;
struct clk_hw;
--
2.34.1
Quoting Luo Jie (2023-08-01 01:53:50)
> Support the clock controller where the HW register is
> accessed by MDIO bus, the spin lock can't be used because
> of sleep during the MDIO operation.
>
> Add the flag CLK_ENABLE_MUTEX_LOCK to hint clock framework
> to use mutex lock instead of the spin lock.
Why can't you enable the MDIO bus clk in .prepare()?
On 8/2/2023 3:18 AM, Stephen Boyd wrote:
> Quoting Luo Jie (2023-08-01 01:53:50)
>> Support the clock controller where the HW register is
>> accessed by MDIO bus, the spin lock can't be used because
>> of sleep during the MDIO operation.
>>
>> Add the flag CLK_ENABLE_MUTEX_LOCK to hint clock framework
>> to use mutex lock instead of the spin lock.
>
> Why can't you enable the MDIO bus clk in .prepare()?
thanks Stephen for the suggestion, let me check this and update patchset.