Change since v1:
- remove useless patch
- rebase
https://patchwork.kernel.org/project/linux-mediatek/cover/[email protected]/
https://patchwork.kernel.org/project/linux-mediatek/patch/YNHg5NuJILrrBIZ/@mwanda/
Yongqiang Niu (1):
drm/mediatek: clear pending flag when cmdq packet is done.
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 92 ++++++++++++++++++++++++++++++---
1 file changed, 85 insertions(+), 7 deletions(-)
--
1.8.1.1.dirty
In cmdq mode, packet may be flushed before it is executed, so
the pending flag should be cleared after cmdq packet is done.
Signed-off-by: CK Hu <[email protected]>
Signed-off-by: Yongqiang Niu <[email protected]>
---
drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 92 ++++++++++++++++++++++++++++++---
1 file changed, 85 insertions(+), 7 deletions(-)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 40df2c8..8cd107b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -73,6 +73,13 @@ struct mtk_crtc_state {
unsigned int pending_vrefresh;
};
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+struct mtk_cmdq_cb_data {
+ struct cmdq_pkt *cmdq_handle;
+ struct mtk_drm_crtc *mtk_crtc;
+};
+#endif
+
static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
{
return container_of(c, struct mtk_drm_crtc, base);
@@ -224,7 +231,64 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
static void ddp_cmdq_cb(struct cmdq_cb_data data)
{
- cmdq_pkt_destroy(data.data);
+ struct mtk_cmdq_cb_data *cb_data = data.data;
+ struct mtk_drm_crtc *mtk_crtc;
+ struct mtk_crtc_state *state;
+ unsigned int i;
+
+ if (!cb_data) {
+ DRM_ERROR("cmdq callback data is null pointer!\n");
+ return;
+ }
+
+ if (data.sta != 0) {
+ DRM_WARN("cmdq callback error %d!!\n", data.sta);
+ goto destroy_pkt;
+ }
+
+ mtk_crtc = cb_data->mtk_crtc;
+ if (!mtk_crtc) {
+ DRM_ERROR("cmdq callback mtk_crtc is null pointer!\n");
+ goto destroy_pkt;
+ }
+
+ state = to_mtk_crtc_state(mtk_crtc->base.state);
+
+ if (state->pending_config) {
+ state->pending_config = false;
+ }
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.config)
+ plane_state->pending.config = false;
+ }
+ mtk_crtc->pending_planes = false;
+ }
+
+ if (mtk_crtc->pending_async_planes) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.async_config)
+ plane_state->pending.async_config = false;
+ }
+ mtk_crtc->pending_async_planes = false;
+ }
+
+destroy_pkt:
+ if (cb_data->cmdq_handle)
+ cmdq_pkt_destroy(cb_data->cmdq_handle);
+
+ kfree(cb_data);
}
#endif
@@ -378,7 +442,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
state->pending_vrefresh, 0,
cmdq_handle);
- state->pending_config = false;
+ if (!cmdq_handle)
+ state->pending_config = false;
}
if (mtk_crtc->pending_planes) {
@@ -398,9 +463,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.config = false;
+ if (!cmdq_handle)
+ plane_state->pending.config = false;
}
- mtk_crtc->pending_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_planes = false;
}
if (mtk_crtc->pending_async_planes) {
@@ -420,9 +488,13 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
mtk_ddp_comp_layer_config(comp, local_layer,
plane_state,
cmdq_handle);
- plane_state->pending.async_config = false;
+
+ if (!cmdq_handle)
+ plane_state->pending.async_config = false;
}
- mtk_crtc->pending_async_planes = false;
+
+ if (!cmdq_handle)
+ mtk_crtc->pending_async_planes = false;
}
}
@@ -469,13 +541,19 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
+ struct mtk_cmdq_cb_data *cb_data;
+
mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
cmdq_pkt_finalize(cmdq_handle);
- cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+
+ cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
+ cb_data->cmdq_handle = cmdq_handle;
+ cb_data->mtk_crtc = mtk_crtc;
+ cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cb_data);
}
#endif
mtk_crtc->config_updating = false;
--
1.8.1.1.dirty
Hi, Yongqiang:
Yongqiang Niu <[email protected]> 於 2021年7月12日 週一 下午2:42寫道:
>
> In cmdq mode, packet may be flushed before it is executed, so
> the pending flag should be cleared after cmdq packet is done.
I would like this patch to base on the series [1] because that series
use the standard mailbox callback interface.
[1] https://patchwork.kernel.org/project/linux-mediatek/list/?series=514369
Regards,
Chun-Kuang.
>
> Signed-off-by: CK Hu <[email protected]>
> Signed-off-by: Yongqiang Niu <[email protected]>
> ---
> drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 92 ++++++++++++++++++++++++++++++---
> 1 file changed, 85 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> index 40df2c8..8cd107b 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> @@ -73,6 +73,13 @@ struct mtk_crtc_state {
> unsigned int pending_vrefresh;
> };
>
> +#if IS_REACHABLE(CONFIG_MTK_CMDQ)
> +struct mtk_cmdq_cb_data {
> + struct cmdq_pkt *cmdq_handle;
> + struct mtk_drm_crtc *mtk_crtc;
> +};
> +#endif
> +
> static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
> {
> return container_of(c, struct mtk_drm_crtc, base);
> @@ -224,7 +231,64 @@ struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
> #if IS_REACHABLE(CONFIG_MTK_CMDQ)
> static void ddp_cmdq_cb(struct cmdq_cb_data data)
> {
> - cmdq_pkt_destroy(data.data);
> + struct mtk_cmdq_cb_data *cb_data = data.data;
> + struct mtk_drm_crtc *mtk_crtc;
> + struct mtk_crtc_state *state;
> + unsigned int i;
> +
> + if (!cb_data) {
> + DRM_ERROR("cmdq callback data is null pointer!\n");
> + return;
> + }
> +
> + if (data.sta != 0) {
> + DRM_WARN("cmdq callback error %d!!\n", data.sta);
> + goto destroy_pkt;
> + }
> +
> + mtk_crtc = cb_data->mtk_crtc;
> + if (!mtk_crtc) {
> + DRM_ERROR("cmdq callback mtk_crtc is null pointer!\n");
> + goto destroy_pkt;
> + }
> +
> + state = to_mtk_crtc_state(mtk_crtc->base.state);
> +
> + if (state->pending_config) {
> + state->pending_config = false;
> + }
> +
> + if (mtk_crtc->pending_planes) {
> + for (i = 0; i < mtk_crtc->layer_nr; i++) {
> + struct drm_plane *plane = &mtk_crtc->planes[i];
> + struct mtk_plane_state *plane_state;
> +
> + plane_state = to_mtk_plane_state(plane->state);
> +
> + if (plane_state->pending.config)
> + plane_state->pending.config = false;
> + }
> + mtk_crtc->pending_planes = false;
> + }
> +
> + if (mtk_crtc->pending_async_planes) {
> + for (i = 0; i < mtk_crtc->layer_nr; i++) {
> + struct drm_plane *plane = &mtk_crtc->planes[i];
> + struct mtk_plane_state *plane_state;
> +
> + plane_state = to_mtk_plane_state(plane->state);
> +
> + if (plane_state->pending.async_config)
> + plane_state->pending.async_config = false;
> + }
> + mtk_crtc->pending_async_planes = false;
> + }
> +
> +destroy_pkt:
> + if (cb_data->cmdq_handle)
> + cmdq_pkt_destroy(cb_data->cmdq_handle);
> +
> + kfree(cb_data);
> }
> #endif
>
> @@ -378,7 +442,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
> state->pending_vrefresh, 0,
> cmdq_handle);
>
> - state->pending_config = false;
> + if (!cmdq_handle)
> + state->pending_config = false;
> }
>
> if (mtk_crtc->pending_planes) {
> @@ -398,9 +463,12 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
> mtk_ddp_comp_layer_config(comp, local_layer,
> plane_state,
> cmdq_handle);
> - plane_state->pending.config = false;
> + if (!cmdq_handle)
> + plane_state->pending.config = false;
> }
> - mtk_crtc->pending_planes = false;
> +
> + if (!cmdq_handle)
> + mtk_crtc->pending_planes = false;
> }
>
> if (mtk_crtc->pending_async_planes) {
> @@ -420,9 +488,13 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
> mtk_ddp_comp_layer_config(comp, local_layer,
> plane_state,
> cmdq_handle);
> - plane_state->pending.async_config = false;
> +
> + if (!cmdq_handle)
> + plane_state->pending.async_config = false;
> }
> - mtk_crtc->pending_async_planes = false;
> +
> + if (!cmdq_handle)
> + mtk_crtc->pending_async_planes = false;
> }
> }
>
> @@ -469,13 +541,19 @@ static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
> }
> #if IS_REACHABLE(CONFIG_MTK_CMDQ)
> if (mtk_crtc->cmdq_client) {
> + struct mtk_cmdq_cb_data *cb_data;
> +
> mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
> cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
> cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
> cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
> mtk_crtc_ddp_config(crtc, cmdq_handle);
> cmdq_pkt_finalize(cmdq_handle);
> - cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
> +
> + cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
> + cb_data->cmdq_handle = cmdq_handle;
> + cb_data->mtk_crtc = mtk_crtc;
> + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cb_data);
> }
> #endif
> mtk_crtc->config_updating = false;
> --
> 1.8.1.1.dirty
>