This series implements busfreq, a framework used in MXP's
tree to scale the interconnect and dram frequencies.
In the vendor tree, device's driver request for a
performance level, which is used to scale the frequencies.
This series implements it using the interconnect framework.
Devices' driver request for bandwidth which is use by busfreq
to determine a performance level, and then scale the frequency.
Busfreq is quite generic. It could be used for any i.MX SoC.
A busfreq platform driver just have to define a list of
interconnect nodes, and some OPPs.
This series is sent as RFC mostly because the current support
of i.MX SoC won't benefit of busfreq framework, because the
clocks' driver don't support interconnect / dram frequency
scaling.
As exemple, this series implements busfreq for i.MX8MM whose
upstreaming is in progress. Because this relies on ATF to
do the frequency scaling, it won't be hard make it work.
As exemple, this series implements busfreq for
Alexandre Bailon (3):
drivers: interconnect: Add a driver for i.MX SoC
drivers: interconnect: imx: Add support of i.MX8MM
dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
.../bindings/interconnect/imx8mm.txt | 24 +
drivers/interconnect/Kconfig | 1 +
drivers/interconnect/Makefile | 1 +
drivers/interconnect/imx/Kconfig | 17 +
drivers/interconnect/imx/Makefile | 2 +
drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++
drivers/interconnect/imx/busfreq.c | 570 ++++++++++++++++++
drivers/interconnect/imx/busfreq.h | 123 ++++
include/dt-bindings/interconnect/imx8mm.h | 37 ++
9 files changed, 907 insertions(+)
create mode 100644 Documentation/devicetree/bindings/interconnect/imx8mm.txt
create mode 100644 drivers/interconnect/imx/Kconfig
create mode 100644 drivers/interconnect/imx/Makefile
create mode 100644 drivers/interconnect/imx/busfreq-imx8mm.c
create mode 100644 drivers/interconnect/imx/busfreq.c
create mode 100644 drivers/interconnect/imx/busfreq.h
create mode 100644 include/dt-bindings/interconnect/imx8mm.h
--
2.19.2
This adds a platform driver for the i.MX8MM SoC.
Signed-off-by: Alexandre Bailon <[email protected]>
---
drivers/interconnect/imx/Kconfig | 4 +
drivers/interconnect/imx/Makefile | 1 +
drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++++++++++++++++++++
include/dt-bindings/interconnect/imx8mm.h | 37 ++++++
4 files changed, 174 insertions(+)
create mode 100644 drivers/interconnect/imx/busfreq-imx8mm.c
create mode 100644 include/dt-bindings/interconnect/imx8mm.h
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
index afd7b71bbd82..b569d40e5ca0 100644
--- a/drivers/interconnect/imx/Kconfig
+++ b/drivers/interconnect/imx/Kconfig
@@ -11,3 +11,7 @@ config BUSFREQ
A generic interconnect driver that could be used for any i.MX.
This provides a way to register master and slave and some opp
to use when one or more master are in use.
+
+config BUSFREQ_IMX8MM
+ bool "i.MX8MM busfreq driver"
+ depends on BUSFREQ
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
index fea647183815..a92fea6e042d 100644
--- a/drivers/interconnect/imx/Makefile
+++ b/drivers/interconnect/imx/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_BUSFREQ) += busfreq.o
+obj-$(CONFIG_BUSFREQ_IMX8MM) += busfreq-imx8mm.o
diff --git a/drivers/interconnect/imx/busfreq-imx8mm.c b/drivers/interconnect/imx/busfreq-imx8mm.c
new file mode 100644
index 000000000000..c3b10a49dc29
--- /dev/null
+++ b/drivers/interconnect/imx/busfreq-imx8mm.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Author: Alexandre Bailon <[email protected]>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/interconnect/imx8mm.h>
+
+#include "busfreq.h"
+
+static struct busfreq_icc_node imx8mm_icc_nodes[] = {
+ /* NOC */
+ DEFINE_BUS_MASTER("A53-0", IMX8MM_CPU_0, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("A53-1", IMX8MM_CPU_1, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("A53-2", IMX8MM_CPU_2, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("A53-3", IMX8MM_CPU_3, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("VPU H1", IMX8MM_VPU_H1, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("VPU G1", IMX8MM_VPU_G1, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("VPU G2", IMX8MM_VPU_G2, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("MIPI", IMX8MM_MIPI, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("USB-1", IMX8MM_USB_1, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("USB-2", IMX8MM_USB_1, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("GPU", IMX8MM_GPU, IMX8MM_NOC),
+ DEFINE_BUS_MASTER("PCIE", IMX8MM_PCIE, IMX8MM_NOC),
+ DEFINE_BUS_SLAVE("DRAM", IMX8MM_DRAM),
+ DEFINE_BUS_INTERCONNECT("NOC", IMX8MM_NOC, 1, IMX8MM_DRAM),
+
+ /* PL301 */
+ DEFINE_BUS_MASTER("SAI-1", IMX8MM_SAI1, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SAI-2", IMX8MM_SAI2, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SAI-3", IMX8MM_SAI3, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SAI-4", IMX8MM_SAI4, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SAI-5", IMX8MM_SAI5, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SAI-6", IMX8MM_SAI6, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("SPDIF", IMX8MM_SPDIF, IMX8MM_PL301),
+ DEFINE_BUS_MASTER("FEC", IMX8MM_FEC, IMX8MM_PL301),
+ DEFINE_BUS_INTERCONNECT("PL301", IMX8MM_PL301, 1, IMX8MM_NOC),
+};
+
+static struct busfreq_opp_clk imx8mm_low_freq_clks[] = {
+ DEFINE_OPP_CLOCK("dram-alt", 100000000),
+ DEFINE_OPP_CLOCK("dram-apb", 40000000),
+ DEFINE_OPP_CLOCK("dram-core", 25000000),
+ DEFINE_OPP_CLOCK("noc", 150000000),
+ DEFINE_OPP_CLOCK("ahb", 22222222),
+ DEFINE_OPP_CLOCK("axi", 24000000),
+};
+
+static struct busfreq_opp_clk imx8mm_audio_freq_clks[] = {
+ DEFINE_OPP_CLOCK("dram-alt", 400000000),
+ DEFINE_OPP_CLOCK("dram-apb", 40000000),
+ DEFINE_OPP_CLOCK("dram-core", 100000000),
+ DEFINE_OPP_CLOCK("noc", 150000000),
+ DEFINE_OPP_CLOCK("ahb", 22222222),
+ DEFINE_OPP_CLOCK("axi", 24000000),
+};
+
+static struct busfreq_opp_bw imx8mm_audio_freq_nodes[] = {
+ DEFINE_OPP_NODE(IMX8MM_SAI1),
+ DEFINE_OPP_NODE(IMX8MM_SAI2),
+ DEFINE_OPP_NODE(IMX8MM_SAI3),
+ DEFINE_OPP_NODE(IMX8MM_SAI4),
+ DEFINE_OPP_NODE(IMX8MM_SAI5),
+ DEFINE_OPP_NODE(IMX8MM_SAI6),
+ DEFINE_OPP_NODE(IMX8MM_SPDIF),
+};
+
+static struct busfreq_opp_clk imx8mm_high_freq_clks[] = {
+ DEFINE_OPP_CLOCK("dram-apb", 800000000),
+ DEFINE_OPP_CLOCK("dram-core", 750000000),
+ DEFINE_OPP_CLOCK("noc", 750000000),
+ DEFINE_OPP_CLOCK("ahb", 133333333),
+ DEFINE_OPP_CLOCK("axi", 333000000),
+};
+
+static struct busfreq_opp_bw imx8mm_high_freq_nodes[] = {
+ DEFINE_OPP_NODE(IMX8MM_SAI1),
+ DEFINE_OPP_NODE(IMX8MM_SAI2),
+ DEFINE_OPP_NODE(IMX8MM_SAI3),
+ DEFINE_OPP_NODE(IMX8MM_SAI4),
+ DEFINE_OPP_NODE(IMX8MM_SAI5),
+ DEFINE_OPP_NODE(IMX8MM_SAI6),
+ DEFINE_OPP_NODE(IMX8MM_SPDIF),
+ DEFINE_OPP_NODE(IMX8MM_MIPI),
+};
+
+static struct busfreq_plat_opp imx8mm_opps[] = {
+ DEFINE_OPP_NO_NODES(imx8mm_low_freq_clks, false),
+ DEFINE_OPP(imx8mm_audio_freq_clks, imx8mm_audio_freq_nodes, false),
+ DEFINE_OPP(imx8mm_high_freq_clks, imx8mm_high_freq_nodes, true),
+};
+
+static int imx8mm_busfreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = busfreq_register(pdev, imx8mm_icc_nodes,
+ ARRAY_SIZE(imx8mm_icc_nodes),
+ imx8mm_opps, ARRAY_SIZE(imx8mm_opps));
+ return ret;
+}
+
+static int imx8mm_busfreq_remove(struct platform_device *pdev)
+{
+ return busfreq_unregister(pdev);
+}
+
+static const struct of_device_id busfreq_of_match[] = {
+ { .compatible = "fsl,busfreq-imx8mm" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, busfreq_of_match);
+
+static struct platform_driver imx8mm_busfreq_driver = {
+ .probe = imx8mm_busfreq_probe,
+ .remove = imx8mm_busfreq_remove,
+ .driver = {
+ .name = "busfreq-imx8mm",
+ .of_match_table = busfreq_of_match,
+ },
+};
+
+builtin_platform_driver(imx8mm_busfreq_driver);
+MODULE_AUTHOR("Alexandre Bailon <[email protected]>");
+MODULE_LICENSE("GPL v2");
diff --git a/include/dt-bindings/interconnect/imx8mm.h b/include/dt-bindings/interconnect/imx8mm.h
new file mode 100644
index 000000000000..4318ed319edc
--- /dev/null
+++ b/include/dt-bindings/interconnect/imx8mm.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Author: Alexandre Bailon <[email protected]>
+ */
+
+#ifndef __IMX8MM_INTERCONNECT_IDS_H
+#define __IMX8MM_INTERCONNECT_IDS_H
+
+#define IMX8MM_NOC 0
+#define IMX8MM_CPU_0 1
+#define IMX8MM_CPU_1 2
+#define IMX8MM_CPU_2 3
+#define IMX8MM_CPU_3 4
+#define IMX8MM_VPU_H1 5
+#define IMX8MM_VPU_G1 6
+#define IMX8MM_VPU_G2 7
+#define IMX8MM_MIPI 8
+#define IMX8MM_USB_1 9
+#define IMX8MM_USB_2 10
+#define IMX8MM_PCIE 11
+#define IMX8MM_GPU 12
+#define IMX8MM_DRAM 13
+
+#define IMX8MM_PL301 100
+#define IMX8MM_SAI1 101
+#define IMX8MM_SAI2 102
+#define IMX8MM_SAI3 103
+#define IMX8MM_SAI4 104
+#define IMX8MM_SAI5 105
+#define IMX8MM_SAI6 106
+#define IMX8MM_SPDIF 107
+#define IMX8MM_FEC 108
+
+#endif /* __IMX8MM_INTERCONNECT_IDS_H */
--
2.19.2
Document the device-tree bindings interconnect driver for i.MX8MM SoC.
Signed-off-by: Alexandre Bailon <[email protected]>
---
.../bindings/interconnect/imx8mm.txt | 24 +++++++++++++++++++
1 file changed, 24 insertions(+)
create mode 100644 Documentation/devicetree/bindings/interconnect/imx8mm.txt
diff --git a/Documentation/devicetree/bindings/interconnect/imx8mm.txt b/Documentation/devicetree/bindings/interconnect/imx8mm.txt
new file mode 100644
index 000000000000..81f0cf134de2
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/imx8mm.txt
@@ -0,0 +1,24 @@
+i.MX busfreq driver binding
+----------------------------------------------------
+
+Required properties :
+- compatible : must be "fsl,busfreq-imx8mm"
+- #interconnect-cells : should contain 1
+- clocks : list of phandles and specifiers to all interconnect bus clocks
+- clock-names : clock names must include "dram-alt", "dram-apb", "dram-core",
+ "noc", "ahb" and "axi"
+
+Examples:
+
+ icc0: icc {
+ compatible = "fsl,busfreq-imx8mm";
+ #interconnect-cells = <1>;
+ clocks = <&clk IMX8MM_CLK_DRAM_ALT>,
+ <&clk IMX8MM_CLK_DRAM_APB>,
+ <&clk IMX8MM_CLK_DRAM_CORE>,
+ <&clk IMX8MM_CLK_NOC_DIV>,
+ <&clk IMX8MM_CLK_AHB_DIV>,
+ <&clk IMX8MM_CLK_MAIN_AXI>;
+ clock-names = "dram-alt", "dram-apb", "dram-core", "noc",
+ "ahb", "axi";
+ };
--
2.19.2
This adds support of i.MX SoC to interconnect framework.
This is based on busfreq, from NXP's tree.
This is is generic enough to be used to add support of interconnect
framework to any i.MX SoC, and, even, this could used for some other
SoC.
Thanks to interconnect framework, devices' driver request for
bandwidth which is use by busfreq to determine a performance level,
and then scale the frequency.
Busfreq platform drivers just have to registers interconnect nodes,
and OPPs.
Signed-off-by: Alexandre Bailon <[email protected]>
---
drivers/interconnect/Kconfig | 1 +
drivers/interconnect/Makefile | 1 +
drivers/interconnect/imx/Kconfig | 13 +
drivers/interconnect/imx/Makefile | 1 +
drivers/interconnect/imx/busfreq.c | 570 +++++++++++++++++++++++++++++
drivers/interconnect/imx/busfreq.h | 123 +++++++
6 files changed, 709 insertions(+)
create mode 100644 drivers/interconnect/imx/Kconfig
create mode 100644 drivers/interconnect/imx/Makefile
create mode 100644 drivers/interconnect/imx/busfreq.c
create mode 100644 drivers/interconnect/imx/busfreq.h
diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig
index 07a8276fa35a..99955906bea8 100644
--- a/drivers/interconnect/Kconfig
+++ b/drivers/interconnect/Kconfig
@@ -11,5 +11,6 @@ menuconfig INTERCONNECT
if INTERCONNECT
source "drivers/interconnect/qcom/Kconfig"
+source "drivers/interconnect/imx/Kconfig"
endif
diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile
index 28f2ab0824d5..20a13b7eb37f 100644
--- a/drivers/interconnect/Makefile
+++ b/drivers/interconnect/Makefile
@@ -4,3 +4,4 @@ icc-core-objs := core.o
obj-$(CONFIG_INTERCONNECT) += icc-core.o
obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/
+obj-$(CONFIG_INTERCONNECT_IMX) += imx/
diff --git a/drivers/interconnect/imx/Kconfig b/drivers/interconnect/imx/Kconfig
new file mode 100644
index 000000000000..afd7b71bbd82
--- /dev/null
+++ b/drivers/interconnect/imx/Kconfig
@@ -0,0 +1,13 @@
+config INTERCONNECT_IMX
+ bool "i.MX interconnect drivers"
+ depends on ARCH_MXC || ARCH_MXC_ARM64 || COMPILE_TEST
+ help
+ Support for i.MX interconnect hardware.
+
+config BUSFREQ
+ bool "busfreq interconnect driver"
+ depends on INTERCONNECT_IMX
+ help
+ A generic interconnect driver that could be used for any i.MX.
+ This provides a way to register master and slave and some opp
+ to use when one or more master are in use.
diff --git a/drivers/interconnect/imx/Makefile b/drivers/interconnect/imx/Makefile
new file mode 100644
index 000000000000..fea647183815
--- /dev/null
+++ b/drivers/interconnect/imx/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_BUSFREQ) += busfreq.o
diff --git a/drivers/interconnect/imx/busfreq.c b/drivers/interconnect/imx/busfreq.c
new file mode 100644
index 000000000000..af461f788468
--- /dev/null
+++ b/drivers/interconnect/imx/busfreq.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Author: Alexandre Bailon <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+
+#include "busfreq.h"
+
+/*
+ * struct busfreq_opp_node - Describe the minimum bandwidth required by a node
+ * to enable the opp
+ * @icc_node: icc_node to test
+ * @min_avg_bw: minimum average bandwidth in kbps required to enable opp
+ * @min_peak_bw: minimum peak bandwidth in kbps required to enable opp
+ */
+struct busfreq_opp_node {
+ struct icc_node *icc_node;
+ u32 min_avg_bw;
+ u32 min_peak_bw;
+};
+
+/*
+ * struct busfreq_opp - Describe an opp
+ * This is used to configure multiple clock at once with the respect of
+ * hardware and performance requirements.
+ * @clks_count: number of clocks to configure
+ * @clks: array of clock
+ * @rates: array of rate, to apply for each clock when the opp is enabled
+ * @perf_level: Used to select the opp that would allow the lowest power
+ * consumption when two or more opp satisfies the performances
+ * requirements
+ * @node: entry in list of opp
+ * @nodes_count: number of opp node
+ * @nodes: array of opp node, to check node by node if opp satisfies the
+ * the required performances
+ */
+struct busfreq_opp {
+ int clks_count;
+ struct clk **clks;
+ u64 *rates;
+ u32 perf_level;
+
+ struct list_head node;
+
+ int nodes_count;
+ struct busfreq_opp_node *nodes;
+};
+
+/*
+ * struct busfreq_icc_desc - Hold data required to control the interconnects
+ * @dev: device pointer for the overall interconnect
+ * @opps: list of opp
+ * @default_opp: the opp opp to use when the system is in special states
+ * (boot, suspend, resume, shutdown)
+ * @current_opp: the opp currently in use
+ * @opp_locked: prevent opp to change while this is set
+ * @pm_notifier: used to set the default opp before suspend and
+ * and select the best one after resume
+ * @pm_notifier: used to set the default opp before to reboot
+ */
+struct busfreq_icc_desc {
+ struct device *dev;
+
+ struct list_head opps;
+ struct busfreq_opp *default_opp;
+ struct busfreq_opp *current_opp;
+ bool opp_locked;
+
+ struct notifier_block pm_notifier;
+ struct notifier_block reboot_notifier;
+
+ struct mutex mutex;
+};
+
+static int busfreq_icc_aggregate(struct icc_node *node, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static int busfreq_set_opp_no_lock(struct busfreq_icc_desc *desc,
+ struct busfreq_opp *requested_opp)
+{
+ int ret;
+ int i;
+
+ if (!requested_opp && !desc->default_opp)
+ return -EINVAL;
+
+ if (!requested_opp)
+ requested_opp = desc->default_opp;
+
+ if (desc->current_opp == requested_opp)
+ return 0;
+
+ if (desc->opp_locked)
+ return -EBUSY;
+
+ for (i = 0; i < requested_opp->clks_count; i++) {
+ ret = clk_set_rate(requested_opp->clks[i],
+ requested_opp->rates[i]);
+ if (ret)
+ goto err;
+ }
+ desc->current_opp = requested_opp;
+
+ return 0;
+
+err:
+ dev_err(desc->dev, "Failed to set opp\n");
+ for (; i >= 0; i--)
+ clk_set_rate(desc->current_opp->clks[i],
+ desc->current_opp->rates[i]);
+ return ret;
+}
+
+static int busfreq_set_opp(struct busfreq_icc_desc *desc,
+ struct busfreq_opp *requested_opp)
+{
+ int ret;
+
+ mutex_lock(&desc->mutex);
+ ret = busfreq_set_opp_no_lock(desc, requested_opp);
+ mutex_unlock(&desc->mutex);
+
+ return ret;
+}
+
+static int busfreq_opp_bw_gt(struct busfreq_opp_node *opp_node,
+ u32 avg_bw, u32 peak_bw)
+{
+ if (!opp_node)
+ return 0;
+ if (opp_node->min_avg_bw == BUSFREQ_UNDEFINED_BW) {
+ if (avg_bw)
+ return 2;
+ else
+ return 1;
+ }
+ if (opp_node->min_peak_bw == BUSFREQ_UNDEFINED_BW) {
+ if (peak_bw)
+ return 2;
+ else
+ return 1;
+ }
+ if (avg_bw >= opp_node->min_avg_bw)
+ return 1;
+ if (peak_bw >= opp_node->min_peak_bw)
+ return 1;
+ return 0;
+}
+
+static struct busfreq_opp *busfreq_cmp_bw_opp(struct busfreq_icc_desc *desc,
+ struct busfreq_opp *opp1,
+ struct busfreq_opp *opp2)
+{
+ int i;
+ int opp1_valid;
+ int opp2_valid;
+ int opp1_count = 0;
+ int opp2_count = 0;
+
+ if (!opp1 && !opp2)
+ return desc->current_opp;
+
+ if (!opp1)
+ return opp2;
+
+ if (!opp2)
+ return opp1;
+
+ if (opp1 == opp2)
+ return opp1;
+
+ for (i = 0; i < opp1->nodes_count; i++) {
+ struct busfreq_opp_node *opp_node1, *opp_node2;
+ struct icc_node *icc_node;
+ u32 avg_bw;
+ u32 peak_bw;
+
+ opp_node1 = &opp1->nodes[i];
+ opp_node2 = &opp2->nodes[i];
+ icc_node = opp_node1->icc_node;
+ avg_bw = icc_node->avg_bw;
+ peak_bw = icc_node->peak_bw;
+
+ opp1_valid = busfreq_opp_bw_gt(opp_node1, avg_bw, peak_bw);
+ opp2_valid = busfreq_opp_bw_gt(opp_node2, avg_bw, peak_bw);
+
+ if (opp1_valid == opp2_valid && opp1_valid == 1) {
+ if (opp_node1->min_avg_bw > opp_node2->min_avg_bw &&
+ opp_node1->min_avg_bw != BUSFREQ_UNDEFINED_BW)
+ opp1_valid++;
+ if (opp_node1->min_avg_bw < opp_node2->min_avg_bw &&
+ opp_node2->min_avg_bw != BUSFREQ_UNDEFINED_BW)
+ opp2_valid++;
+ }
+
+ opp1_count += opp1_valid;
+ opp2_count += opp2_valid;
+ }
+
+ if (opp1_count > opp2_count)
+ return opp1;
+ if (opp1_count < opp2_count)
+ return opp2;
+ return opp1->perf_level >= opp2->perf_level ? opp2 : opp1;
+}
+
+static int busfreq_set_best_opp(struct busfreq_icc_desc *desc)
+{
+ struct busfreq_opp *opp, *best_opp = desc->current_opp;
+
+ list_for_each_entry(opp, &desc->opps, node)
+ best_opp = busfreq_cmp_bw_opp(desc, opp, best_opp);
+ return busfreq_set_opp(desc, best_opp);
+}
+
+static int busfreq_set_locked_opp(struct busfreq_icc_desc *desc,
+ struct busfreq_opp *requested_opp)
+{
+ int ret;
+
+ mutex_lock(&desc->mutex);
+ ret = busfreq_set_opp_no_lock(desc, requested_opp);
+ if (ret)
+ goto err;
+ desc->opp_locked = true;
+err:
+ mutex_unlock(&desc->mutex);
+
+ return ret;
+}
+
+static int busfreq_unlock_opp(struct busfreq_icc_desc *desc)
+{
+ mutex_lock(&desc->mutex);
+ desc->opp_locked = false;
+ mutex_unlock(&desc->mutex);
+
+ return busfreq_set_best_opp(desc);
+}
+
+static int busfreq_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct busfreq_icc_desc *desc = src->provider->data;
+
+ if (!dst->num_links)
+ return busfreq_set_best_opp(desc);
+
+ return 0;
+}
+
+static int busfreq_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct busfreq_icc_desc *desc;
+
+ desc = container_of(nb, struct busfreq_icc_desc, pm_notifier);
+ if (event == PM_SUSPEND_PREPARE)
+ busfreq_set_locked_opp(desc, desc->default_opp);
+ else if (event == PM_POST_SUSPEND)
+ busfreq_unlock_opp(desc);
+
+ return NOTIFY_OK;
+}
+
+static int busfreq_reboot_notify(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct busfreq_icc_desc *desc;
+
+ desc = container_of(nb, struct busfreq_icc_desc, reboot_notifier);
+ busfreq_set_locked_opp(desc, desc->default_opp);
+
+ return NOTIFY_OK;
+}
+
+static struct icc_node *busfreq_icc_node_add(struct icc_provider *provider,
+ int id, const char *name)
+{
+ struct busfreq_icc_desc *desc = provider->data;
+ struct device *dev = desc->dev;
+ struct icc_node *icc_node;
+
+ icc_node = icc_node_create(id);
+ if (IS_ERR(icc_node)) {
+ dev_err(dev, "Failed to create node %d\n", id);
+ return icc_node;
+ }
+
+ if (icc_node->data)
+ return icc_node;
+
+ icc_node->name = name;
+ icc_node->data = &icc_node;
+ icc_node_add(icc_node, provider);
+
+ return icc_node;
+}
+
+static struct icc_node *busfreq_icc_node_get(struct icc_provider *provider,
+ int id)
+{
+ return busfreq_icc_node_add(provider, id, NULL);
+}
+
+static void busfreq_unregister_nodes(struct icc_provider *provider)
+{
+ struct icc_node *icc_node, *tmp;
+
+ list_for_each_entry_safe(icc_node, tmp, &provider->nodes, node_list)
+ icc_node_destroy(icc_node->id);
+}
+
+static int busfreq_register_nodes(struct icc_provider *provider,
+ struct busfreq_icc_node *busfreq_nodes,
+ int count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ struct icc_node *icc_node;
+ size_t j;
+
+ icc_node = busfreq_icc_node_add(provider,
+ busfreq_nodes[i].id,
+ busfreq_nodes[i].name);
+ if (IS_ERR(icc_node)) {
+ ret = PTR_ERR(icc_node);
+ goto err;
+ }
+
+ for (j = 0; j < busfreq_nodes[i].num_links; j++)
+ icc_link_create(icc_node, busfreq_nodes[i].links[j]);
+ }
+
+ return 0;
+
+err:
+ busfreq_unregister_nodes(provider);
+
+ return ret;
+}
+
+static struct busfreq_opp *busfreq_opp_alloc(struct icc_provider *provider,
+ int count)
+{
+ struct busfreq_icc_desc *desc = provider->data;
+ struct device *dev = desc->dev;
+ struct busfreq_opp *opp;
+ struct icc_node *icc_node;
+
+ opp = devm_kzalloc(dev, sizeof(*opp), GFP_KERNEL);
+ if (!opp)
+ return ERR_PTR(-ENOMEM);
+
+ opp->clks_count = count;
+ opp->clks = devm_kzalloc(dev, sizeof(struct clk *) * count, GFP_KERNEL);
+ if (!opp->clks)
+ return ERR_PTR(-ENOMEM);
+
+ opp->rates = devm_kzalloc(dev, sizeof(u64) * count, GFP_KERNEL);
+ if (!opp->rates)
+ return ERR_PTR(-ENOMEM);
+
+ count = 0;
+ list_for_each_entry(icc_node, &provider->nodes, node_list)
+ count++;
+
+ opp->nodes = devm_kzalloc(dev, sizeof(*opp->nodes) * count, GFP_KERNEL);
+ if (!opp->nodes)
+ return ERR_PTR(-ENOMEM);
+ opp->nodes_count = count;
+
+ return opp;
+}
+
+static int busfreq_init_opp(struct icc_provider *provider,
+ struct busfreq_opp *opp,
+ struct busfreq_plat_opp *plat_opp)
+{
+ struct busfreq_icc_desc *desc = provider->data;
+ struct device *dev = desc->dev;
+ struct busfreq_opp_node *node;
+ struct icc_node *icc_node;
+ int i, j;
+
+ opp->perf_level = 0;
+ for (i = 0; i < opp->clks_count; i++) {
+ opp->clks[i] = devm_clk_get(dev, plat_opp->clks[i].name);
+ if (IS_ERR(opp->clks[i])) {
+ dev_err(dev, "Failed to get clock %s\n",
+ plat_opp->clks[i].name);
+ return PTR_ERR(opp->clks[i]);
+ }
+ opp->rates[i] = plat_opp->clks[i].rate;
+ opp->perf_level += opp->rates[i] >> 10;
+ }
+
+ i = 0;
+ list_for_each_entry(icc_node, &provider->nodes, node_list) {
+ node = &opp->nodes[i++];
+ node->icc_node = icc_node;
+ }
+
+ for (i = 0; i < plat_opp->nodes_count; i++) {
+ icc_node = busfreq_icc_node_get(provider,
+ plat_opp->nodes[i].id);
+ if (IS_ERR_OR_NULL(icc_node))
+ return -EINVAL;
+
+ for (j = 0, node = &opp->nodes[j]; j < opp->nodes_count;
+ j++, node = &opp->nodes[j]) {
+ if (node->icc_node == icc_node) {
+ node->min_avg_bw = BUSFREQ_UNDEFINED_BW;
+ node->min_peak_bw = BUSFREQ_UNDEFINED_BW;
+ }
+ }
+ }
+
+ INIT_LIST_HEAD(&opp->node);
+
+ return 0;
+}
+
+static int busfreq_register_opps(struct device *dev,
+ struct icc_provider *provider,
+ struct busfreq_plat_opp *busfreq_opps,
+ int count)
+{
+ struct busfreq_icc_desc *desc = provider->data;
+ struct busfreq_opp *opp;
+ int ret;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ opp = busfreq_opp_alloc(provider, busfreq_opps[i].clks_count);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ ret = busfreq_init_opp(provider, opp, &busfreq_opps[i]);
+ if (ret)
+ return ret;
+
+ if (busfreq_opps[i].default_opp)
+ desc->default_opp = opp;
+
+ list_add(&opp->node, &desc->opps);
+ }
+
+ return 0;
+}
+
+static void busfreq_unregister_opps(struct icc_provider *provider)
+{
+ struct busfreq_icc_desc *desc = provider->data;
+ struct device *dev = desc->dev;
+ struct busfreq_opp *opp, *tmp_opp;
+
+ list_for_each_entry_safe(opp, tmp_opp, &desc->opps, node) {
+ list_del(&opp->node);
+ devm_kfree(dev, opp->nodes);
+ devm_kfree(dev, opp->clks);
+ devm_kfree(dev, opp->rates);
+ devm_kfree(dev, opp);
+ }
+}
+
+int busfreq_register(struct platform_device *pdev,
+ struct busfreq_icc_node *busfreq_nodes, int nodes_count,
+ struct busfreq_plat_opp *busfreq_opps, int opps_count)
+{
+ struct device *dev = &pdev->dev;
+ struct busfreq_icc_desc *desc;
+ struct icc_provider *provider;
+ int ret;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ desc->dev = dev;
+ mutex_init(&desc->mutex);
+ INIT_LIST_HEAD(&desc->opps);
+
+ provider = devm_kzalloc(dev, sizeof(*provider), GFP_KERNEL);
+ if (!provider)
+ return -ENOMEM;
+ provider->set = busfreq_icc_set;
+ provider->aggregate = busfreq_icc_aggregate;
+ provider->data = desc;
+ platform_set_drvdata(pdev, provider);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ ret = busfreq_register_nodes(provider, busfreq_nodes, nodes_count);
+ if (ret) {
+ dev_err(dev, "error adding interconnect nodes\n");
+ goto provider_del;
+ }
+
+ ret = busfreq_register_opps(dev, provider, busfreq_opps, opps_count);
+ if (ret) {
+ dev_err(dev, "error adding busfreq opp\n");
+ goto unregister_nodes;
+ }
+
+ ret = busfreq_set_opp(desc, desc->default_opp);
+ if (ret)
+ goto unregister_opps;
+
+ desc->pm_notifier.notifier_call = busfreq_pm_notify;
+ register_pm_notifier(&desc->pm_notifier);
+
+ desc->reboot_notifier.notifier_call = busfreq_reboot_notify;
+ register_reboot_notifier(&desc->reboot_notifier);
+
+ return 0;
+
+unregister_opps:
+ busfreq_unregister_opps(provider);
+unregister_nodes:
+ busfreq_unregister_nodes(provider);
+provider_del:
+ icc_provider_del(provider);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(busfreq_register);
+
+int busfreq_unregister(struct platform_device *pdev)
+{
+ struct icc_provider *provider = platform_get_drvdata(pdev);
+ struct busfreq_icc_desc *desc = provider->data;
+ int ret;
+
+ unregister_reboot_notifier(&desc->reboot_notifier);
+ unregister_pm_notifier(&desc->pm_notifier);
+
+ ret = busfreq_set_opp(desc, desc->default_opp);
+ if (ret)
+ return ret;
+
+ icc_provider_del(provider);
+
+ busfreq_unregister_opps(provider);
+ busfreq_unregister_nodes(provider);
+ devm_kfree(&pdev->dev, desc);
+ devm_kfree(&pdev->dev, provider);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(busfreq_unregister);
diff --git a/drivers/interconnect/imx/busfreq.h b/drivers/interconnect/imx/busfreq.h
new file mode 100644
index 000000000000..a60481f10500
--- /dev/null
+++ b/drivers/interconnect/imx/busfreq.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interconnect framework driver for i.MX SoC
+ *
+ * Copyright (c) 2019, BayLibre
+ * Author: Alexandre Bailon <[email protected]>
+ */
+#ifndef __BUSFREQ_H
+#define __BUSFREQ_H
+
+#include <linux/kernel.h>
+
+#define BUSFREQ_MAX_LINKS 32
+#define BUSFREQ_UNDEFINED_BW 0xffffffff
+
+/*
+ * struct busfreq_icc_node - Describe an interconnect node
+ * @name: name of the node
+ * @id: an unique id to identify the node
+ * @links: an array of slaves' node id
+ * @num_links: number of id defined in links
+ */
+struct busfreq_icc_node {
+ char *name;
+ u16 id;
+ u16 links[BUSFREQ_MAX_LINKS];
+ u16 num_links;
+};
+
+/*
+ * struct busfreq_opp_clk - Clock name and rate to set for an opp
+ * @name: name of clock
+ * @rate: the rate to set when the opp is enabled
+ */
+struct busfreq_opp_clk {
+ char *name;
+ unsigned long rate;
+};
+
+/*
+ * struct busfreq_opp_bw - Describe a condition to meet to enable an opp
+ * @id: id of the node to test
+ * @avg_bw: minimum average bandwidth required to enable the opp, or
+ * ignored if set to BUSFREQ_UNDEFINED_BW
+ * @peak_bw: minimum peak bandwidth required to enable the opp, or
+ * ignored if set to BUSFREQ_UNDEFINED_BW
+ */
+struct busfreq_opp_bw {
+ u16 id;
+ u32 avg_bw;
+ u32 peak_bw;
+};
+
+/*
+ * struct busfreq_plat_opp - Describe an opp to register in busfreq
+ * @clks: array of clocks to configure when the opp is enable
+ * @clks_count: number of clocks
+ * @nodes: array of opp nodes (condition to meet for each node to enable opp)
+ * @nodes_count: number of nodes
+ * @default_opp: use this opp as default opp if true
+ */
+struct busfreq_plat_opp {
+ struct busfreq_opp_clk *clks;
+ int clks_count;
+ struct busfreq_opp_bw *nodes;
+ int nodes_count;
+ bool default_opp;
+};
+
+#define DEFINE_BUS_INTERCONNECT(_name, _id, _numlinks, ...) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .num_links = _numlinks, \
+ .links = { __VA_ARGS__ }, \
+ }
+
+#define DEFINE_BUS_MASTER(_name, _id, _dest_id) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, 1, _dest_id)
+
+#define DEFINE_BUS_SLAVE(_name, _id) \
+ DEFINE_BUS_INTERCONNECT(_name, _id, 0)
+
+#define DEFINE_OPP_CLOCK(_name, _rate) \
+ { \
+ .name = _name, \
+ .rate = _rate, \
+ }
+
+#define DEFINE_OPP_BW(_id, _avg, _peak) \
+ { \
+ .id = _id, \
+ .avg_bw = _avg, \
+ .peak_bw = _peak, \
+ }
+
+#define DEFINE_OPP_NODE(_id) \
+ DEFINE_OPP_BW(_id, BUSFREQ_UNDEFINED_BW, BUSFREQ_UNDEFINED_BW)
+
+#define DEFINE_OPP(_clks, _nodes, _default) \
+ { \
+ .clks = _clks, \
+ .clks_count = ARRAY_SIZE(_clks), \
+ .nodes = _nodes, \
+ .nodes_count = ARRAY_SIZE(_nodes), \
+ .default_opp = _default, \
+ }
+
+#define DEFINE_OPP_NO_NODES(_clks, _default) \
+ { \
+ .clks = _clks, \
+ .clks_count = ARRAY_SIZE(_clks), \
+ .nodes = NULL, \
+ .nodes_count = 0, \
+ .default_opp = _default, \
+ }
+
+int busfreq_register(struct platform_device *pdev,
+ struct busfreq_icc_node *busfreq_nodes, int nodes_count,
+ struct busfreq_plat_opp *busfreq_opps, int opps_count);
+int busfreq_unregister(struct platform_device *pdev);
+
+#endif /* __BUSFREQ_H */
--
2.19.2
+Jacky and Leonard, Ranjani
Hi Alexandre,
> From: Alexandre Bailon [mailto:[email protected]]
>
> This series implements busfreq, a framework used in MXP's tree to scale the
> interconnect and dram frequencies.
> In the vendor tree, device's driver request for a performance level, which is
> used to scale the frequencies.
> This series implements it using the interconnect framework.
> Devices' driver request for bandwidth which is use by busfreq to determine a
> performance level, and then scale the frequency.
>
> Busfreq is quite generic. It could be used for any i.MX SoC.
> A busfreq platform driver just have to define a list of interconnect nodes, and
> some OPPs.
>
It's really great to see this patch series.
And it should be the correct direction we're heading to upstream busfreq support.
> This series is sent as RFC mostly because the current support of i.MX SoC won't
> benefit of busfreq framework, because the clocks' driver don't support
> interconnect / dram frequency scaling.
> As exemple, this series implements busfreq for i.MX8MM whose upstreaming
> is in progress. Because this relies on ATF to do the frequency scaling, it won't
> be hard make it work.
>
How can I test this patch series?
Any additional patches you can share with us?
Or what else we need to do to test it? We can help with it.
Regards
Dong Aisheng
> As exemple, this series implements busfreq for Alexandre Bailon (3):
> drivers: interconnect: Add a driver for i.MX SoC
> drivers: interconnect: imx: Add support of i.MX8MM
> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
>
> .../bindings/interconnect/imx8mm.txt | 24 +
> drivers/interconnect/Kconfig | 1 +
> drivers/interconnect/Makefile | 1 +
> drivers/interconnect/imx/Kconfig | 17 +
> drivers/interconnect/imx/Makefile | 2 +
> drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++
> drivers/interconnect/imx/busfreq.c | 570
> ++++++++++++++++++
> drivers/interconnect/imx/busfreq.h | 123 ++++
> include/dt-bindings/interconnect/imx8mm.h | 37 ++
> 9 files changed, 907 insertions(+)
> create mode 100644
> Documentation/devicetree/bindings/interconnect/imx8mm.txt
> create mode 100644 drivers/interconnect/imx/Kconfig create mode
> 100644 drivers/interconnect/imx/Makefile create mode 100644
> drivers/interconnect/imx/busfreq-imx8mm.c
> create mode 100644 drivers/interconnect/imx/busfreq.c
> create mode 100644 drivers/interconnect/imx/busfreq.h
> create mode 100644 include/dt-bindings/interconnect/imx8mm.h
>
> --
> 2.19.2
Hi Aisheng
On 3/15/19 3:39 AM, Aisheng Dong wrote:
> +Jacky and Leonard, Ranjani
>
> Hi Alexandre,
>
>> From: Alexandre Bailon [mailto:[email protected]]
>>
>> This series implements busfreq, a framework used in MXP's tree to scale the
>> interconnect and dram frequencies.
>> In the vendor tree, device's driver request for a performance level, which is
>> used to scale the frequencies.
>> This series implements it using the interconnect framework.
>> Devices' driver request for bandwidth which is use by busfreq to determine a
>> performance level, and then scale the frequency.
>>
>> Busfreq is quite generic. It could be used for any i.MX SoC.
>> A busfreq platform driver just have to define a list of interconnect nodes, and
>> some OPPs.
>>
>
> It's really great to see this patch series.
> And it should be the correct direction we're heading to upstream busfreq support.
>
>> This series is sent as RFC mostly because the current support of i.MX SoC won't
>> benefit of busfreq framework, because the clocks' driver don't support
>> interconnect / dram frequency scaling.
>> As exemple, this series implements busfreq for i.MX8MM whose upstreaming
>> is in progress. Because this relies on ATF to do the frequency scaling, it won't
>> be hard make it work.
>>
>
> How can I test this patch series?
> Any additional patches you can share with us?
> Or what else we need to do to test it? We can help with it.
Many other patches will be required to test the series.
There are a couple of patches that updates i.MX device drivers to
request for bandwidth (does similar thing as bus_freq_request and
bus_freq_release).
In addition, a patch to that allow to scale the DRAM
frequency using CCF is required. I'm still working on this patch. It
works correctly on 4.14, but since I rebased it on linux-next/master, it
stopped to work. I'm trying to fix it now.
I will share all this patches ASAP.
Best Regards,
Alexandre
>
> Regards
> Dong Aisheng
>
>> As exemple, this series implements busfreq for Alexandre Bailon (3):
>> drivers: interconnect: Add a driver for i.MX SoC
>> drivers: interconnect: imx: Add support of i.MX8MM
>> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
>>
>> .../bindings/interconnect/imx8mm.txt | 24 +
>> drivers/interconnect/Kconfig | 1 +
>> drivers/interconnect/Makefile | 1 +
>> drivers/interconnect/imx/Kconfig | 17 +
>> drivers/interconnect/imx/Makefile | 2 +
>> drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++
>> drivers/interconnect/imx/busfreq.c | 570
>> ++++++++++++++++++
>> drivers/interconnect/imx/busfreq.h | 123 ++++
>> include/dt-bindings/interconnect/imx8mm.h | 37 ++
>> 9 files changed, 907 insertions(+)
>> create mode 100644
>> Documentation/devicetree/bindings/interconnect/imx8mm.txt
>> create mode 100644 drivers/interconnect/imx/Kconfig create mode
>> 100644 drivers/interconnect/imx/Makefile create mode 100644
>> drivers/interconnect/imx/busfreq-imx8mm.c
>> create mode 100644 drivers/interconnect/imx/busfreq.c
>> create mode 100644 drivers/interconnect/imx/busfreq.h
>> create mode 100644 include/dt-bindings/interconnect/imx8mm.h
>>
>> --
>> 2.19.2
Hi Alex,
Some nitpick review comments below.
On Wed, Mar 13, 2019 at 12:33 PM Alexandre Bailon <[email protected]> wrote:
>
> This series implements busfreq, a framework used in MXP's
s/MXP/NXP/
> tree to scale the interconnect and dram frequencies.
> In the vendor tree, device's driver request for a
> performance level, which is used to scale the frequencies.
> This series implements it using the interconnect framework.
> Devices' driver request for bandwidth which is use by busfreq
> to determine a performance level, and then scale the frequency.
>
> Busfreq is quite generic. It could be used for any i.MX SoC.
> A busfreq platform driver just have to define a list of
> interconnect nodes, and some OPPs.
>
> This series is sent as RFC mostly because the current support
> of i.MX SoC won't benefit of busfreq framework, because the
> clocks' driver don't support interconnect / dram frequency
> scaling.
In your v2 cover letter could you post a link to a git branch that has
everything integrated that is needed to test the series? I guess this
is similar to what Aisheng asked for already.
> As exemple, this series implements busfreq for i.MX8MM whose
> upstreaming is in progress. Because this relies on ATF to
> do the frequency scaling, it won't be hard make it work.
It's not clear to me whether this series actual scales the dram
frequency based on what you said above. Is it just theoretical or do
you have it working with a pile of out-of-tree patches? Would be good
to include that pile of patches in your integration branch that I
suggested above.
Thanks,
Mike
>
> As exemple, this series implements busfreq for
> Alexandre Bailon (3):
> drivers: interconnect: Add a driver for i.MX SoC
> drivers: interconnect: imx: Add support of i.MX8MM
> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
>
> .../bindings/interconnect/imx8mm.txt | 24 +
> drivers/interconnect/Kconfig | 1 +
> drivers/interconnect/Makefile | 1 +
> drivers/interconnect/imx/Kconfig | 17 +
> drivers/interconnect/imx/Makefile | 2 +
> drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++
> drivers/interconnect/imx/busfreq.c | 570 ++++++++++++++++++
> drivers/interconnect/imx/busfreq.h | 123 ++++
> include/dt-bindings/interconnect/imx8mm.h | 37 ++
> 9 files changed, 907 insertions(+)
> create mode 100644 Documentation/devicetree/bindings/interconnect/imx8mm.txt
> create mode 100644 drivers/interconnect/imx/Kconfig
> create mode 100644 drivers/interconnect/imx/Makefile
> create mode 100644 drivers/interconnect/imx/busfreq-imx8mm.c
> create mode 100644 drivers/interconnect/imx/busfreq.c
> create mode 100644 drivers/interconnect/imx/busfreq.h
> create mode 100644 include/dt-bindings/interconnect/imx8mm.h
>
> --
> 2.19.2
--
Michael Turquette
CEO
BayLibre - At the Heart of Embedded Linux
http://baylibre.com/
Schedule a meeting: https://calendly.com/mturquette
Hi Mike,
On 3/15/19 5:17 PM, Michael Turquette wrote:
> Hi Alex,
>
> Some nitpick review comments below.
>
> On Wed, Mar 13, 2019 at 12:33 PM Alexandre Bailon <[email protected]> wrote:
>>
>> This series implements busfreq, a framework used in MXP's
>
> s/MXP/NXP/
>
>> tree to scale the interconnect and dram frequencies.
>> In the vendor tree, device's driver request for a
>> performance level, which is used to scale the frequencies.
>> This series implements it using the interconnect framework.
>> Devices' driver request for bandwidth which is use by busfreq
>> to determine a performance level, and then scale the frequency.
>>
>> Busfreq is quite generic. It could be used for any i.MX SoC.
>> A busfreq platform driver just have to define a list of
>> interconnect nodes, and some OPPs.
>>
>> This series is sent as RFC mostly because the current support
>> of i.MX SoC won't benefit of busfreq framework, because the
>> clocks' driver don't support interconnect / dram frequency
>> scaling.
>
> In your v2 cover letter could you post a link to a git branch that has
> everything integrated that is needed to test the series? I guess this
> is similar to what Aisheng asked for already.
I will do it.
>
>> As exemple, this series implements busfreq for i.MX8MM whose
>> upstreaming is in progress. Because this relies on ATF to
>> do the frequency scaling, it won't be hard make it work.
>
> It's not clear to me whether this series actual scales the dram
> frequency based on what you said above. Is it just theoretical or do
> you have it working with a pile of out-of-tree patches? Would be good
> to include that pile of patches in your integration branch that I
> suggested above.
The current series only introduce busfreq generic driver, and the
busfreq driver for the imx8mm.
As is, the imx8mm driver will just be loaded, but do nothing because
none of the drivers have been updated to request bandwidth using the
interconnect framework.
In addition, the current clock driver of imx8mm doesn't allow dram
frequency scaling, so if busfreq driver tries, it will fail (should be
harmless because any other clocks should restored to their previous rate).
My intent was to sent a first draft o busfreq, to get some feedback,
before to send a more complete, and fully functional series.
Thanks,
Alexandre
>
> Thanks,
> Mike
>
>>
>> As exemple, this series implements busfreq for
>> Alexandre Bailon (3):
>> drivers: interconnect: Add a driver for i.MX SoC
>> drivers: interconnect: imx: Add support of i.MX8MM
>> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
>>
>> .../bindings/interconnect/imx8mm.txt | 24 +
>> drivers/interconnect/Kconfig | 1 +
>> drivers/interconnect/Makefile | 1 +
>> drivers/interconnect/imx/Kconfig | 17 +
>> drivers/interconnect/imx/Makefile | 2 +
>> drivers/interconnect/imx/busfreq-imx8mm.c | 132 ++++
>> drivers/interconnect/imx/busfreq.c | 570 ++++++++++++++++++
>> drivers/interconnect/imx/busfreq.h | 123 ++++
>> include/dt-bindings/interconnect/imx8mm.h | 37 ++
>> 9 files changed, 907 insertions(+)
>> create mode 100644 Documentation/devicetree/bindings/interconnect/imx8mm.txt
>> create mode 100644 drivers/interconnect/imx/Kconfig
>> create mode 100644 drivers/interconnect/imx/Makefile
>> create mode 100644 drivers/interconnect/imx/busfreq-imx8mm.c
>> create mode 100644 drivers/interconnect/imx/busfreq.c
>> create mode 100644 drivers/interconnect/imx/busfreq.h
>> create mode 100644 include/dt-bindings/interconnect/imx8mm.h
>>
>> --
>> 2.19.2
>
>
>
On 3/15/19 11:31 AM, Alexandre Bailon wrote:
>>> This series is sent as RFC mostly because the current support of i.MX SoC won't
>>> benefit of busfreq framework, because the clocks' driver don't support
>>> interconnect / dram frequency scaling.
>>> As exemple, this series implements busfreq for i.MX8MM whose upstreaming
>>> is in progress. Because this relies on ATF to do the frequency scaling, it won't
>>> be hard make it work.
>> How can I test this patch series?
>> Any additional patches you can share with us?
>> Or what else we need to do to test it? We can help with it.
> Many other patches will be required to test the series.
> There are a couple of patches that updates i.MX device drivers to
> request for bandwidth (does similar thing as bus_freq_request and
> bus_freq_release).
The interconnect framework asks for bandwidth in bytes/second but in NXP
tree most requests are of the form "request_bus_freq(BUS_FREQ_HIGH);".
In many cases (ethernet) it doesn't seem you can calculate a specific
bandwidth usefully.
Instead of asking for "infinite bandwidth zero latency" to force
everything to "high" it would be nicer to "request an opp".
Power-domain bindings mentioned that consumer-devices can specify a
"required-opps" property but I've found zero users in tree. Maybe some
helpers could be written to parse that property and automatically
request ICC opp on device suspend/resume via device-links?
I know that stuff was written for genpd but it looks like a very good
fit to me.
> In addition, a patch to that allow to scale the DRAM
> frequency using CCF is required. I'm still working on this patch.
I'm not sure what you mean here, do you want a clk set_rate to change
the DRAM freq? It makes sense for DDRC to be a node in the interconnect
framework and switch OPP inside icc implementation via an ATF call.
--
Regards,
Leonard
On 15-03-19, 17:17, Leonard Crestez wrote:
> On 3/15/19 11:31 AM, Alexandre Bailon wrote:
> >>> This series is sent as RFC mostly because the current support of i.MX SoC won't
> >>> benefit of busfreq framework, because the clocks' driver don't support
> >>> interconnect / dram frequency scaling.
> >>> As exemple, this series implements busfreq for i.MX8MM whose upstreaming
> >>> is in progress. Because this relies on ATF to do the frequency scaling, it won't
> >>> be hard make it work.
>
> >> How can I test this patch series?
> >> Any additional patches you can share with us?
> >> Or what else we need to do to test it? We can help with it.
>
> > Many other patches will be required to test the series.
> > There are a couple of patches that updates i.MX device drivers to
> > request for bandwidth (does similar thing as bus_freq_request and
> > bus_freq_release).
>
> The interconnect framework asks for bandwidth in bytes/second but in NXP
> tree most requests are of the form "request_bus_freq(BUS_FREQ_HIGH);".
> In many cases (ethernet) it doesn't seem you can calculate a specific
> bandwidth usefully.
>
> Instead of asking for "infinite bandwidth zero latency" to force
> everything to "high" it would be nicer to "request an opp".
>
> Power-domain bindings mentioned that consumer-devices can specify a
> "required-opps" property but I've found zero users in tree. Maybe some
> helpers could be written to parse that property and automatically
> request ICC opp on device suspend/resume via device-links?
Documentation/devicetree/bindings/power/qcom,rpmpd.txt is using it currently in
mainline.
> I know that stuff was written for genpd but it looks like a very good
> fit to me.
Yes, the very first user is genpd but we have designed it with an open mind and
so it shouldn't be difficult to make use of it at other places.
There is some WIP which you can look at :
- Introduce OPP bandwidth bindings
lore.kernel.org/lkml/[email protected]
- DVFS in the OPP core
https://lore.kernel.org/lkml/[email protected]
--
viresh
On Wed, 13 Mar 2019 at 20:35, Alexandre Bailon <[email protected]> wrote:
>
> This series implements busfreq, a framework used in MXP's
> tree to scale the interconnect and dram frequencies.
> In the vendor tree, device's driver request for a
> performance level, which is used to scale the frequencies.
> This series implements it using the interconnect framework.
> Devices' driver request for bandwidth which is use by busfreq
> to determine a performance level, and then scale the frequency.
>
> Busfreq is quite generic. It could be used for any i.MX SoC.
> A busfreq platform driver just have to define a list of
> interconnect nodes, and some OPPs.
>
> This series is sent as RFC mostly because the current support
> of i.MX SoC won't benefit of busfreq framework, because the
> clocks' driver don't support interconnect / dram frequency
> scaling.
> As exemple, this series implements busfreq for i.MX8MM whose
> upstreaming is in progress. Because this relies on ATF to
> do the frequency scaling, it won't be hard make it work.
>
> As exemple, this series implements busfreq for
> Alexandre Bailon (3):
> drivers: interconnect: Add a driver for i.MX SoC
> drivers: interconnect: imx: Add support of i.MX8MM
> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
Hi Alexandre,
I am quite late but I just found your email.
This looks very similar to existing framework - devfreq, which purpose
is to scale the system busses based on performance counters/events. It
would be nice if we could avoid duplication of existing subsystems.
Best regards,
Krzysztof
On 5/3/19 14:19, Krzysztof Kozlowski wrote:
> On Wed, 13 Mar 2019 at 20:35, Alexandre Bailon <[email protected]> wrote:
>>
>> This series implements busfreq, a framework used in MXP's
>> tree to scale the interconnect and dram frequencies.
>> In the vendor tree, device's driver request for a
>> performance level, which is used to scale the frequencies.
>> This series implements it using the interconnect framework.
>> Devices' driver request for bandwidth which is use by busfreq
>> to determine a performance level, and then scale the frequency.
>>
>> Busfreq is quite generic. It could be used for any i.MX SoC.
>> A busfreq platform driver just have to define a list of
>> interconnect nodes, and some OPPs.
>>
>> This series is sent as RFC mostly because the current support
>> of i.MX SoC won't benefit of busfreq framework, because the
>> clocks' driver don't support interconnect / dram frequency
>> scaling.
>> As exemple, this series implements busfreq for i.MX8MM whose
>> upstreaming is in progress. Because this relies on ATF to
>> do the frequency scaling, it won't be hard make it work.
>>
>> As exemple, this series implements busfreq for
>> Alexandre Bailon (3):
>> drivers: interconnect: Add a driver for i.MX SoC
>> drivers: interconnect: imx: Add support of i.MX8MM
>> dt-bindings: interconnect: Document fsl,busfreq-imx8mm bindings
>
> Hi Alexandre,
>
> I am quite late but I just found your email.
>
> This looks very similar to existing framework - devfreq, which purpose
> is to scale the system busses based on performance counters/events. It
> would be nice if we could avoid duplication of existing subsystems.
Hi Krzysztof,
Scaling buses based on performance counters is suboptimal and sometimes might
not work well. In contrast with devfreq, the interconnect API is allowing
drivers to express their needs in advance and be proactive. It is also designed
to deal with multi-tiered bus topologies and to aggregate the requests from the
different consumer drivers.
Thanks,
Georgi
>
> Best regards,
> Krzysztof
>
On 15.03.2019 18:55, Alexandre Bailon wrote:
>> On Wed, Mar 13, 2019 at 12:33 PM Alexandre Bailon <[email protected]> wrote:
>>> As exemple, this series implements busfreq for i.MX8MM whose
>>> upstreaming is in progress. Because this relies on ATF to
>>> do the frequency scaling, it won't be hard make it work.
>>
>> It's not clear to me whether this series actual scales the dram
>> frequency based on what you said above. Is it just theoretical or do
>> you have it working with a pile of out-of-tree patches? Would be good
>> to include that pile of patches in your integration branch that I
>> suggested above.
> The current series only introduce busfreq generic driver, and the
> busfreq driver for the imx8mm.
> As is, the imx8mm driver will just be loaded, but do nothing because
> none of the drivers have been updated to request bandwidth using the
> interconnect framework.
>
> My intent was to sent a first draft o busfreq, to get some feedback,
> before to send a more complete, and fully functional series.
It's been a while since this was first posted and imx8mm now boots fine
in linux-next. Is there a more up-to-date WIP branch somewhere?
Otherwise I can try to hack this series into a bootable form.
> In addition, the current clock driver of imx8mm doesn't allow dram
> frequency scaling, so if busfreq driver tries, it will fail (should be
> harmless because any other clocks should restored to their previous
> rate).
I'm confused about this. In NXP tree the actual DRAM switch is done
inside ATF via SIP calls and involves corralling all CPUs. Do you want
an "dram" clk which wraps the SIP calls required to changing dram
frequency and root switching etc?
I've been looking at the busfreq implementation in the NXP tree and
refactoring just the "dram freq switch" behind a clk might work nicely.
This would be similar to the imx_cpu clk used for cpufreq-dt and it
might even be possible to upstream this separately from the rest of
busfreq logic dealing with device requests.
I haven't done a very careful review but I noticed you're not using the
OPP framework and instead redefined everything? It's not clear why.
--
Regards,
Leonard
Hi, Alexandre
> -----Original Message-----
> From: Leonard Crestez
> Sent: Wednesday, May 15, 2019 3:34 AM
> To: Alexandre Bailon <[email protected]>; Jacky Bai <[email protected]>
> Cc: Michael Turquette <[email protected]>; Linux PM list <linux-
> [email protected]>; Georgi Djakov <[email protected]>; Patrick
> Titiano <[email protected]>; Linux Kernel Mailing List <linux-
> [email protected]>; Stephen Boyd <[email protected]>; Emilio
> Lopez <[email protected]>; Hans de Goede <[email protected]>;
> linux-clk <[email protected]>; linux-arm-kernel <linux-arm-
> [email protected]>; Zening Wang <[email protected]>;
> Aisheng Dong <[email protected]>; Kevin Hilman
> <[email protected]>; Carlo Caione <[email protected]>; dl-linux-
> imx <[email protected]>; Anson Huang <[email protected]>; Viresh
> Kumar <[email protected]>
> Subject: Re: [RFC PATCH 0/3] Add support of busfreq
>
> On 15.03.2019 18:55, Alexandre Bailon wrote:
> >> On Wed, Mar 13, 2019 at 12:33 PM Alexandre Bailon
> <[email protected]> wrote:
>
> >>> As exemple, this series implements busfreq for i.MX8MM whose
> >>> upstreaming is in progress. Because this relies on ATF to do the
> >>> frequency scaling, it won't be hard make it work.
I have similar question as previous reviewer, is there any branch that we can test
this series?
And, from the patch, it has multiple levels description of fabric arch, while we ONLY
intend to scale "bus" frequency per devices' request, here "bus" includes DRAM, NOC and
AHB, AXI, should we make it more flatter, such as just a virtual fabric as a single provider, and then
all other devices as nodes under this provider?
Anson
> >>
> >> It's not clear to me whether this series actual scales the dram
> >> frequency based on what you said above. Is it just theoretical or do
> >> you have it working with a pile of out-of-tree patches? Would be good
> >> to include that pile of patches in your integration branch that I
> >> suggested above.
>
> > The current series only introduce busfreq generic driver, and the
> > busfreq driver for the imx8mm.
> > As is, the imx8mm driver will just be loaded, but do nothing because
> > none of the drivers have been updated to request bandwidth using the
> > interconnect framework.
> >
> > My intent was to sent a first draft o busfreq, to get some feedback,
> > before to send a more complete, and fully functional series.
>
> It's been a while since this was first posted and imx8mm now boots fine in
> linux-next. Is there a more up-to-date WIP branch somewhere?
> Otherwise I can try to hack this series into a bootable form.
>
> > In addition, the current clock driver of imx8mm doesn't allow dram >
> frequency scaling, so if busfreq driver tries, it will fail (should be > harmless
> because any other clocks should restored to their previous > rate).
>
> I'm confused about this. In NXP tree the actual DRAM switch is done inside
> ATF via SIP calls and involves corralling all CPUs. Do you want an "dram" clk
> which wraps the SIP calls required to changing dram frequency and root
> switching etc?
>
> I've been looking at the busfreq implementation in the NXP tree and
> refactoring just the "dram freq switch" behind a clk might work nicely.
>
> This would be similar to the imx_cpu clk used for cpufreq-dt and it might
> even be possible to upstream this separately from the rest of busfreq logic
> dealing with device requests.
>
>
> I haven't done a very careful review but I noticed you're not using the OPP
> framework and instead redefined everything? It's not clear why.
>
> --
> Regards,
> Leonard
On 6/4/2019 11:44 AM, Anson Huang wrote:
>>>>> As exemple, this series implements busfreq for i.MX8MM whose
>>>>> upstreaming is in progress. Because this relies on ATF to do the
>>>>> frequency scaling, it won't be hard make it work.
>
> I have similar question as previous reviewer, is there any branch that we can test
> this series?
I've been looking at this and pushed a fixed-up functional variant to my
personal github:
https://github.com/cdleonard/linux/commits/next_imx8mm_busfreq
It builds and probes and switches DRAM freq between low and high based
on whether ethernet is down or up (for testing purposes). The pile of
out-of-tree patches required to get this work is quite small.
The DRAM freq switch is done via a clk wrapper previously sent as RFC:
https://patchwork.kernel.org/patch/10968303/
That part needs more work but it could serve as a neat encapsulation
similar to imx_cpu clk used for cpufreq-dt.
> And, from the patch, it has multiple levels description of fabric arch, while we ONLY
> intend to scale "bus" frequency per devices' request, here "bus" includes DRAM, NOC and
> AHB, AXI, should we make it more flatter, such as just a virtual fabric as a single provider, and then
> all other devices as nodes under this provider?
The imx8mm interconnect bindings describe many bus endpoints but all
requests are aggregated to a single platform-level OPP which is
equivalent to "low/audio/high mode" from NXP tree.
It might be better to associate clks to several ICC nodes and this way
scale NOC and DRAM separately? As far as I understand an interconnect
provider is free to decide on granularity.
As a wilder idea it might even be possible to use a stanard
"devfreq-with-perfmon" for DDRC and have interconnect request a min freq
to that instead of doing clk_set_rate on dram directly. That could bring
features from both worlds, scaling both proactively and reactively.
--
Regards,
Leonard
On 3/13/2019 9:36 PM, Alexandre Bailon wrote:
>
> This adds support of i.MX SoC to interconnect framework.
> This is based on busfreq, from NXP's tree.
> This is is generic enough to be used to add support of interconnect
> framework to any i.MX SoC, and, even, this could used for some other
> SoC.
> Thanks to interconnect framework, devices' driver request for
> bandwidth which is use by busfreq to determine a performance level,
> and then scale the frequency.
This part is difficult for me to understand:
> +static int busfreq_opp_bw_gt(struct busfreq_opp_node *opp_node,
> + u32 avg_bw, u32 peak_bw)
> +{
> + if (!opp_node)
> + return 0;
> + if (opp_node->min_avg_bw == BUSFREQ_UNDEFINED_BW) {
> + if (avg_bw)
> + return 2;
> + else
> + return 1;
> + }
> + if (opp_node->min_peak_bw == BUSFREQ_UNDEFINED_BW) {
> + if (peak_bw)
> + return 2;
> + else
> + return 1;
> + }
> + if (avg_bw >= opp_node->min_avg_bw)
> + return 1;
> + if (peak_bw >= opp_node->min_peak_bw)
> + return 1;
> + return 0;
> +}
> +
> +static struct busfreq_opp *busfreq_cmp_bw_opp(struct busfreq_icc_desc *desc,
> + struct busfreq_opp *opp1,
> + struct busfreq_opp *opp2)
> +{
> + int i;
> + int opp1_valid;
> + int opp2_valid;
> + int opp1_count = 0;
> + int opp2_count = 0;
> +
> + if (!opp1 && !opp2)
> + return desc->current_opp;
> +
> + if (!opp1)
> + return opp2;
> +
> + if (!opp2)
> + return opp1;
> +
> + if (opp1 == opp2)
> + return opp1;
> +
> + for (i = 0; i < opp1->nodes_count; i++) {
> + struct busfreq_opp_node *opp_node1, *opp_node2;
> + struct icc_node *icc_node;
> + u32 avg_bw;
> + u32 peak_bw;
> +
> + opp_node1 = &opp1->nodes[i];
> + opp_node2 = &opp2->nodes[i];
> + icc_node = opp_node1->icc_node;
> + avg_bw = icc_node->avg_bw;
> + peak_bw = icc_node->peak_bw;
> +
> + opp1_valid = busfreq_opp_bw_gt(opp_node1, avg_bw, peak_bw);
> + opp2_valid = busfreq_opp_bw_gt(opp_node2, avg_bw, peak_bw);
> +
> + if (opp1_valid == opp2_valid && opp1_valid == 1) {
> + if (opp_node1->min_avg_bw > opp_node2->min_avg_bw &&
> + opp_node1->min_avg_bw != BUSFREQ_UNDEFINED_BW)
> + opp1_valid++;
> + if (opp_node1->min_avg_bw < opp_node2->min_avg_bw &&
> + opp_node2->min_avg_bw != BUSFREQ_UNDEFINED_BW)
> + opp2_valid++;
> + }
> +
> + opp1_count += opp1_valid;
> + opp2_count += opp2_valid;
> + }
> +
> + if (opp1_count > opp2_count)
> + return opp1;
> + if (opp1_count < opp2_count)
> + return opp2;
> + return opp1->perf_level >= opp2->perf_level ? opp2 : opp1;
> +}
> +
> +static int busfreq_set_best_opp(struct busfreq_icc_desc *desc)
> +{
> + struct busfreq_opp *opp, *best_opp = desc->current_opp;
> +
> + list_for_each_entry(opp, &desc->opps, node)
> + best_opp = busfreq_cmp_bw_opp(desc, opp, best_opp);
> + return busfreq_set_opp(desc, best_opp);
> +}
The goal seems to be to find the smaller platform-level "busfreq_opp"
which can meet all aggregated requests.
But why implement this by comparing opps against each other? It would be
easier to just iterate OPPs from low to high and stop on the first one
which meets all requirements.
The sdm845 provider seems to take a different approach: there are BCMs
("Bus Clock Managers") which are attached to some of the icc nodes and
those are individually scaled in order to meet BW requirements. On imx8m
we can scale buses via clk so we could just attach clks to some of the
nodes (NOC, DRAM, few PL301).
--
Regards,
Leonard