These refinements include using standard mailbox callback interface,
timeout detection, and a fixed cmdq_handle.
Chun-Kuang Hu (4):
drm/mediatek: Use mailbox rx_callback instead of cmdq_task_cb
drm/mediatek: Remove struct cmdq_client
drm/mediatek: Detect CMDQ execution timeout
drm/mediatek: Add cmdq_handle in mtk_crtc
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 110 ++++++++++++++++++++----
1 file changed, 91 insertions(+), 19 deletions(-)
--
2.25.1
rx_callback is a standard mailbox callback mechanism and could cover the
function of proprietary cmdq_task_cb, so use the standard one instead of
the proprietary one.
Signed-off-by: Chun-Kuang Hu <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 16 +++++++++++++---
1 file changed, 13 insertions(+), 3 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 474efb844249..cac8fe219c95 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -4,6 +4,8 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/mailbox_controller.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
@@ -222,9 +224,11 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static void ddp_cmdq_cb(struct cmdq_cb_data data)
+static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
- cmdq_pkt_destroy(data.data);
+ struct cmdq_cb_data *data = mssg;
+
+ cmdq_pkt_destroy(data->pkt);
}
#endif
@@ -475,7 +479,12 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+ dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev,
+ cmdq_handle->pa_base,
+ cmdq_handle->cmd_buf_size,
+ DMA_TO_DEVICE);
+ mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle);
+ mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0);
}
#endif
mtk_crtc->config_updating = false;
@@ -842,6 +851,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
if (mtk_crtc->cmdq_client) {
+ mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb;
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
--
2.25.1
In mailbox rx_callback, it pass struct mbox_client to callback
function, but it could not map back to mtk_drm_crtc instance
because struct cmdq_client use a pointer to struct mbox_client:
struct cmdq_client {
struct mbox_client client;
struct mbox_chan *chan;
};
struct mtk_drm_crtc {
/* client instance data */
struct cmdq_client *cmdq_client;
};
so remove struct cmdq_client and let mtk_drm_crtc instance define
mbox_client as:
struct mtk_drm_crtc {
/* client instance data */
struct mbox_client cl;
};
and in rx_callback function, use struct mbox_client to get
struct mtk_drm_crtc.
Signed-off-by: Chun-Kuang Hu <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 80 +++++++++++++++++++------
1 file changed, 62 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index cac8fe219c95..db8621df7d85 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -52,7 +52,8 @@ struct mtk_drm_crtc {
bool pending_async_planes;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_client *cmdq_client;
+ struct mbox_client cmdq_cl;
+ struct mbox_chan *cmdq_chan;
u32 cmdq_event;
#endif
@@ -223,12 +224,52 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
return NULL;
}
+static struct cmdq_pkt *mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, size_t size)
+{
+ struct cmdq_pkt *pkt;
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return ERR_PTR(-ENOMEM);
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pkt->buf_size = size;
+
+ dev = chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ kfree(pkt);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return pkt;
+}
+
+static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
+{
+ dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ kfree(pkt);
+}
+
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
{
+ struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
struct cmdq_cb_data *data = mssg;
- cmdq_pkt_destroy(data->pkt);
+ mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
}
#endif
@@ -472,19 +513,19 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
mtk_mutex_release(mtk_crtc->mutex);
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (mtk_crtc->cmdq_client) {
- mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
- cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+ if (mtk_crtc->cmdq_chan) {
+ mbox_flush(mtk_crtc->cmdq_chan, 2000);
+ cmdq_handle = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- dma_sync_single_for_device(mtk_crtc->cmdq_client->chan->mbox->dev,
+ dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
DMA_TO_DEVICE);
- mbox_send_message(mtk_crtc->cmdq_client->chan, cmdq_handle);
- mbox_client_txdone(mtk_crtc->cmdq_client->chan, 0);
+ mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
+ mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
}
#endif
mtk_crtc->config_updating = false;
@@ -498,7 +539,7 @@ static void mtk_crtc_ddp_irq(void *data)
struct mtk_drm_private *priv = crtc->dev->dev_private;
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+ if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
#else
if (!priv->data->shadow_register)
#endif
@@ -841,17 +882,20 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mutex_init(&mtk_crtc->hw_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_crtc->cmdq_client =
- cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base));
- if (IS_ERR(mtk_crtc->cmdq_client)) {
+ mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
+ mtk_crtc->cmdq_cl.tx_block = false;
+ mtk_crtc->cmdq_cl.knows_txdone = true;
+ mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
+ mtk_crtc->cmdq_chan =
+ mbox_request_channel(&mtk_crtc->cmdq_cl,
+ drm_crtc_index(&mtk_crtc->base));
+ if (IS_ERR(mtk_crtc->cmdq_chan)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
- mtk_crtc->cmdq_client = NULL;
+ mtk_crtc->cmdq_chan = NULL;
}
- if (mtk_crtc->cmdq_client) {
- mtk_crtc->cmdq_client->client.rx_callback = ddp_cmdq_cb;
+ if (mtk_crtc->cmdq_chan) {
ret = of_property_read_u32_index(priv->mutex_node,
"mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
@@ -859,8 +903,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
drm_crtc_index(&mtk_crtc->base));
- cmdq_mbox_destroy(mtk_crtc->cmdq_client);
- mtk_crtc->cmdq_client = NULL;
+ mbox_free_channel(mtk_crtc->cmdq_chan);
+ mtk_crtc->cmdq_chan = NULL;
}
}
#endif
--
2.25.1
CMDQ is used to update display register in vblank period, so
it should be execute in next vblank. If it fail to execute
in next 2 vblank, tiemout happen.
Signed-off-by: Chun-Kuang Hu <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 14 ++++++++++++--
1 file changed, 12 insertions(+), 2 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index db8621df7d85..5dd61eacce6c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -55,6 +55,7 @@ struct mtk_drm_crtc {
struct mbox_client cmdq_cl;
struct mbox_chan *cmdq_chan;
u32 cmdq_event;
+ u32 cmdq_vblank_cnt;
#endif
struct device *mmsys_dev;
@@ -269,6 +270,7 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
struct cmdq_cb_data *data = mssg;
+ mtk_crtc->cmdq_vblank_cnt = 0;
mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
}
#endif
@@ -524,6 +526,11 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
DMA_TO_DEVICE);
+ /*
+ * CMDQ command should execute in next vblank,
+ * If it fail to execute in next 2 vblank, timeout happen.
+ */
+ mtk_crtc->cmdq_vblank_cnt = 2;
mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
}
@@ -540,11 +547,14 @@ static void mtk_crtc_ddp_irq(void *data)
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
+ mtk_crtc_ddp_config(crtc, NULL);
+ else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
+ DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
+ drm_crtc_index(&mtk_crtc->base));
#else
if (!priv->data->shadow_register)
-#endif
mtk_crtc_ddp_config(crtc, NULL);
-
+#endif
mtk_drm_finish_page_flip(mtk_crtc);
}
--
2.25.1
One mtk_crtc need just one cmdq_handle, so add one cmdq_handle
in mtk_crtc to prevent frequently allocation and free of
cmdq_handle.
Signed-off-by: Chun-Kuang Hu <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 28 ++++++++++++++++---------
1 file changed, 18 insertions(+), 10 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 5dd61eacce6c..4c25e33638a7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -54,6 +54,7 @@ struct mtk_drm_crtc {
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
struct mbox_client cmdq_cl;
struct mbox_chan *cmdq_chan;
+ struct cmdq_pkt cmdq_handle;
u32 cmdq_event;
u32 cmdq_vblank_cnt;
#endif
@@ -225,19 +226,16 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
return NULL;
}
-static struct cmdq_pkt *mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, size_t size)
+static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
+ size_t size)
{
- struct cmdq_pkt *pkt;
struct device *dev;
dma_addr_t dma_addr;
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return ERR_PTR(-ENOMEM);
pkt->va_base = kzalloc(size, GFP_KERNEL);
if (!pkt->va_base) {
kfree(pkt);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
pkt->buf_size = size;
@@ -248,12 +246,12 @@ static struct cmdq_pkt *mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, size_t s
dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
kfree(pkt->va_base);
kfree(pkt);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
pkt->pa_base = dma_addr;
- return pkt;
+ return 0;
}
static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
@@ -477,7 +475,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
bool needs_vblank)
{
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- struct cmdq_pkt *cmdq_handle;
+ struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
#endif
struct drm_crtc *crtc = &mtk_crtc->base;
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -517,7 +515,7 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_chan) {
mbox_flush(mtk_crtc->cmdq_chan, 2000);
- cmdq_handle = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan, PAGE_SIZE);
+ cmdq_handle->cmd_buf_size = 0;
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
@@ -915,6 +913,16 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mbox_free_channel(mtk_crtc->cmdq_chan);
mtk_crtc->cmdq_chan = NULL;
+ } else {
+ ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
+ if (ret) {
+ dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
+ drm_crtc_index(&mtk_crtc->base));
+ mbox_free_channel(mtk_crtc->cmdq_chan);
+ mtk_crtc->cmdq_chan = NULL;
+ }
}
}
#endif
--
2.25.1