Next version of https://patchwork.freedesktop.org/series/41576/
All changes in this patch series are just to make checkpatch a little
happier, no functional changes.
Lyude Paul (10):
drm/atomic: Print debug message on atomic check failure
drm/i915: Move DP modeset retry work into intel_dp
drm/dp_mst: Fix naming on drm_atomic_get_mst_topology_state()
drm/dp_mst: Remove all evil duplicate state pointers
drm/dp_mst: Make drm_dp_mst_topology_state subclassable
drm/dp_mst: Add reset_state callback to topology mgr
drm/i915: Only use one link bw config for MST topologies
drm/i915: Make intel_dp_mst_atomic_check() idempotent
drm/dp_mst: Add MST fallback retraining helpers
drm/i915: Implement proper fallback training for MST
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 +-
.../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 46 +-
.../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 4 +-
drivers/gpu/drm/drm_atomic.c | 5 +-
drivers/gpu/drm/drm_dp_mst_topology.c | 550 ++++++++++++++++++++-
drivers/gpu/drm/i915/intel_ddi.c | 10 +-
drivers/gpu/drm/i915/intel_display.c | 14 +-
drivers/gpu/drm/i915/intel_dp.c | 376 ++++++++++----
drivers/gpu/drm/i915/intel_dp_link_training.c | 6 +-
drivers/gpu/drm/i915/intel_dp_mst.c | 186 +++++--
drivers/gpu/drm/i915/intel_drv.h | 20 +-
drivers/gpu/drm/nouveau/nv50_display.c | 17 +-
drivers/gpu/drm/radeon/radeon_dp_mst.c | 13 +-
include/drm/drm_dp_mst_helper.h | 43 +-
14 files changed, 1120 insertions(+), 184 deletions(-)
--
2.14.3
There's no reason to track the atomic state three times. Unfortunately,
this is currently what we're doing, and even worse is that there is only
one actually correct state pointer: the one in mst_state->base.state.
mgr->state never seems to be used, along with the one in
mst_state->state.
This confused me for over 4 hours until I realized there was no magic
behind these pointers. So, let's save everyone else from the trouble.
Signed-off-by: Lyude Paul <[email protected]>.
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
Signed-off-by: Lyude Paul <[email protected]>
---
include/drm/drm_dp_mst_helper.h | 6 ------
1 file changed, 6 deletions(-)
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 2f6203407fd2..5ca77dcf4f90 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -409,7 +409,6 @@ struct drm_dp_payload {
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
- struct drm_atomic_state *state;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -497,11 +496,6 @@ struct drm_dp_mst_topology_mgr {
*/
int pbn_div;
- /**
- * @state: State information for topology manager
- */
- struct drm_dp_mst_topology_state *state;
-
/**
* @funcs: Atomic helper callbacks
*/
--
2.14.3
This is useful for drivers (which will probably be all of them soon)
which need to track state that is exclusive to the topology, and not a
specific connector on said topology. This includes things such as the
link rate and lane count that are shared by all of the connectors on the
topology.
Signed-off-by: Lyude Paul <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 14 +++-
.../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 46 ++++++++---
.../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 4 +-
drivers/gpu/drm/drm_dp_mst_topology.c | 95 +++++++++++++++++-----
drivers/gpu/drm/i915/intel_dp_mst.c | 13 ++-
drivers/gpu/drm/nouveau/nv50_display.c | 17 +++-
drivers/gpu/drm/radeon/radeon_dp_mst.c | 13 ++-
include/drm/drm_dp_mst_helper.h | 8 ++
8 files changed, 173 insertions(+), 37 deletions(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index e42a28e3adc5..2c3660c36732 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3626,9 +3626,17 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_register(&aconnector->base);
- if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
- || connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ res = amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ if (res) {
+ drm_connector_unregister(&aconnector->base);
+ drm_connector_cleanup(&aconnector->base);
+ aconnector->connector_id = -1;
+
+ goto out_free;
+ }
+ }
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 8291d74f26bc..dcaa92d12cbc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -475,22 +475,48 @@ static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.register_connector = dm_dp_mst_register_connector
};
-void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector)
+static const struct drm_private_state_funcs dm_mst_state_funcs = {
+ .atomic_duplicate_state = drm_atomic_dp_mst_duplicate_topology_state,
+ .atomic_destroy_state = drm_atomic_dp_mst_destroy_topology_state,
+};
+
+int amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
{
+ struct drm_dp_mst_topology_state *state =
+ kzalloc(sizeof(*state), GFP_KERNEL);
+ int ret = 0;
+
+ if (!state)
+ return -ENOMEM;
+
aconnector->dm_dp_aux.aux.name = "dmdc";
aconnector->dm_dp_aux.aux.dev = dm->adev->dev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
- drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ ret = drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ if (ret)
+ goto err_aux;
+
aconnector->mst_mgr.cbs = &dm_mst_cbs;
- drm_dp_mst_topology_mgr_init(
- &aconnector->mst_mgr,
- dm->adev->ddev,
- &aconnector->dm_dp_aux.aux,
- 16,
- 4,
- aconnector->connector_id);
+ aconnector->mst_mgr.funcs = &dm_mst_state_funcs;
+ ret = drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr,
+ state,
+ dm->adev->ddev,
+ &aconnector->dm_dp_aux.aux,
+ 16,
+ 4,
+ aconnector->connector_id);
+ if (ret)
+ goto err_mst;
+
+ return 0;
+
+err_mst:
+ drm_dp_aux_unregister(&aconnector->dm_dp_aux.aux);
+err_aux:
+ kfree(state);
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index 8cf51da26657..d28fb456d2d5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -29,8 +29,8 @@
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
-void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector);
+int amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector);
void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index ba67f1782a04..fbd7888ebca8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3100,33 +3100,90 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
-static struct drm_private_state *
-drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
+/**
+ * drm_atomic_dp_mst_duplicate_topology_state - default
+ * drm_dp_mst_topology_state duplicate handler
+ *
+ * For drivers which don't yet subclass drm_dp_mst_topology_state
+ *
+ * RETURNS: the duplicated state on success, or an error code embedded into a
+ * pointer value otherwise.
+ */
+struct drm_private_state *
+drm_atomic_dp_mst_duplicate_topology_state(struct drm_private_obj *obj)
{
+ struct drm_dp_mst_topology_mgr *mgr = to_dp_mst_topology_mgr(obj);
struct drm_dp_mst_topology_state *state;
+ int ret;
state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
- __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+ ret = __drm_atomic_dp_mst_duplicate_topology_state(mgr, state);
+ if (ret) {
+ kfree(state);
+ return NULL;
+ }
return &state->base;
}
+EXPORT_SYMBOL(drm_atomic_dp_mst_duplicate_topology_state);
+
+/**
+ * __drm_atomic_dp_mst_duplicate_topology_state - default
+ * drm_dp_mst_topology_state duplicate hook
+ *
+ * Copies atomic state from an MST topology's current state. This is useful
+ * for drivers that subclass the MST topology state.
+ *
+ * RETURNS: 0 on success, negative error code on failure.
+ */
+int
+__drm_atomic_dp_mst_duplicate_topology_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *state)
+{
+ struct drm_private_obj *obj = &mgr->base;
+
+ memcpy(state, obj->state, sizeof(*state));
+
+ __drm_atomic_helper_private_obj_duplicate_state(&mgr->base,
+ &state->base);
+ return 0;
+}
+EXPORT_SYMBOL(__drm_atomic_dp_mst_duplicate_topology_state);
-static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
+/**
+ * drm_atomic_dp_mst_destroy_topology_state - default
+ * drm_dp_mst_topology_state destroy handler
+ *
+ * For drivers which don't yet subclass drm_dp_mst_topology_state.
+ */
+void
+drm_atomic_dp_mst_destroy_topology_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
+ __drm_atomic_dp_mst_destroy_topology_state(mst_state);
+
kfree(mst_state);
}
+EXPORT_SYMBOL(drm_atomic_dp_mst_destroy_topology_state);
-static const struct drm_private_state_funcs mst_state_funcs = {
- .atomic_duplicate_state = drm_dp_mst_duplicate_state,
- .atomic_destroy_state = drm_dp_mst_destroy_state,
-};
+/**
+ * __drm_atomic_dp_mst_destroy_topology_state - default
+ * drm_dp_mst_topology_state destroy hook
+ *
+ * Frees the resources associated with the given drm_dp_mst_topology_state.
+ * This is useful for drivers that subclass the MST topology state.
+ */
+void
+__drm_atomic_dp_mst_destroy_topology_state(struct drm_dp_mst_topology_state *state)
+{
+}
+EXPORT_SYMBOL(__drm_atomic_dp_mst_destroy_topology_state);
/**
* drm_atomic_dp_mst_get_topology_state: get MST topology state
@@ -3157,21 +3214,25 @@ EXPORT_SYMBOL(drm_atomic_dp_mst_get_topology_state);
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
+ * @state: atomic topology state to init, allocated by the driver
* @dev: device providing this structure - for i2c addition.
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
* @conn_base_id: the connector object ID the MST device is connected to.
*
+ * Note that this function doesn't take care of allocating the atomic MST
+ * state, this must be handled by the caller before calling
+ * drm_dp_mst_topology_mgr_init().
+ *
* Return 0 for success, or negative error code on failure
*/
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *state,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
- struct drm_dp_mst_topology_state *mst_state;
-
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
@@ -3200,18 +3261,14 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
- mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
- if (mst_state == NULL)
- return -ENOMEM;
-
- mst_state->mgr = mgr;
+ state->mgr = mgr;
/* max. time slots - one slot for MTP header */
- mst_state->avail_slots = 63;
+ state->avail_slots = 63;
drm_atomic_private_obj_init(&mgr->base,
- &mst_state->base,
- &mst_state_funcs);
+ &state->base,
+ mgr->funcs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9e6956c08688..cf844cfd2bb0 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -587,19 +587,30 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
return true;
}
+static const struct drm_private_state_funcs mst_state_funcs = {
+ .atomic_destroy_state = drm_atomic_dp_mst_destroy_topology_state,
+ .atomic_duplicate_state = drm_atomic_dp_mst_duplicate_topology_state,
+};
+
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_dp_mst_topology_state *mst_state;
struct drm_device *dev = intel_dig_port->base.base.dev;
int ret;
+ mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+ if (!mst_state)
+ return -ENOMEM;
+
intel_dp->can_mst = true;
intel_dp->mst_mgr.cbs = &mst_cbs;
+ intel_dp->mst_mgr.funcs = &mst_state_funcs;
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, mst_state, dev,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8bd739cfd00d..200db30a9c43 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3310,6 +3310,12 @@ nv50_mstm = {
.hotplug = nv50_mstm_hotplug,
};
+static const struct drm_private_state_funcs
+nv50_mst_state_funcs = {
+ .atomic_duplicate_state = drm_atomic_dp_mst_duplicate_topology_state,
+ .atomic_destroy_state = drm_atomic_dp_mst_destroy_topology_state,
+};
+
void
nv50_mstm_service(struct nv50_mstm *mstm)
{
@@ -3438,6 +3444,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
{
const int max_payloads = hweight8(outp->dcb->heads);
struct drm_device *dev = outp->base.base.dev;
+ struct drm_dp_mst_topology_state *state;
struct nv50_mstm *mstm;
int ret, i;
u8 dpcd;
@@ -3454,10 +3461,18 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
return -ENOMEM;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state) {
+ kfree(mstm);
+ return -ENOMEM;
+ }
mstm->outp = outp;
mstm->mgr.cbs = &nv50_mstm;
+ mstm->mgr.funcs = &nv50_mst_state_funcs;
- ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
+ ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, state, dev,
+ aux, aux_max,
max_payloads, conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index cd8a3ee16649..6edf52404256 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -335,6 +335,11 @@ static const struct drm_dp_mst_topology_cbs mst_cbs = {
.hotplug = radeon_dp_mst_hotplug,
};
+static const struct drm_private_state_funcs mst_state_funcs = {
+ .atomic_duplicate_state = drm_atomic_dp_mst_duplicate_topology_state,
+ .atomic_destroy_state = drm_atomic_dp_mst_destroy_topology_state,
+};
+
static struct
radeon_connector *radeon_mst_find_connector(struct drm_encoder *encoder)
{
@@ -657,12 +662,18 @@ int
radeon_dp_mst_init(struct radeon_connector *radeon_connector)
{
struct drm_device *dev = radeon_connector->base.dev;
+ struct drm_dp_mst_topology_state *state =
+ kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
if (!radeon_connector->ddc_bus->has_aux)
return 0;
radeon_connector->mst_mgr.cbs = &mst_cbs;
- return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
+ radeon_connector->mst_mgr.funcs = &mst_state_funcs;
+ return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr,
+ state, dev,
&radeon_connector->ddc_bus->aux, 16, 6,
radeon_connector->base.base.id);
}
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 5ca77dcf4f90..ad1aaec8d514 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -565,6 +565,7 @@ struct drm_dp_mst_topology_mgr {
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *state,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id);
@@ -621,6 +622,13 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
struct drm_dp_mst_topology_state *
drm_atomic_dp_mst_get_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr);
+struct drm_private_state *drm_atomic_dp_mst_duplicate_topology_state(struct drm_private_obj *obj);
+int __drm_atomic_dp_mst_duplicate_topology_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *state);
+void drm_atomic_dp_mst_destroy_topology_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
+void __drm_atomic_dp_mst_destroy_topology_state(struct drm_dp_mst_topology_state *state);
+
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn);
--
2.14.3
This gives drivers subclassing drm_dp_mst_topology_state the ability to
prepare their topology states for a new hub to be connected whenever
it's detected that the currently connected topology has been
disconnected from the system. We'll need this in order to correctly
track RX capabilities in i915 from the topology's atomic state.
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
Signed-off-by: Lyude Paul <[email protected]>
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/drm_dp_mst_topology.c | 10 ++++++++++
include/drm/drm_dp_mst_helper.h | 3 ++-
2 files changed, 12 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index fbd7888ebca8..981bd0f7d3ab 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2096,6 +2096,15 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
return true;
}
+static void drm_dp_mst_topology_reset_state(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_dp_mst_topology_state *state =
+ to_dp_mst_topology_state(mgr->base.state);
+
+ if (mgr->cbs->reset_state)
+ mgr->cbs->reset_state(state);
+}
+
/**
* drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
* @mgr: manager to set state for
@@ -2171,6 +2180,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
mgr->vcpi_mask = 0;
+ drm_dp_mst_topology_reset_state(mgr);
}
out_unlock:
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index ad1aaec8d514..3a7378cd5bd1 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -381,6 +381,7 @@ struct drm_dp_sideband_msg_tx {
/* sideband msg handler */
struct drm_dp_mst_topology_mgr;
+struct drm_dp_mst_topology_state;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
@@ -388,7 +389,7 @@ struct drm_dp_mst_topology_cbs {
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
-
+ void (*reset_state)(struct drm_dp_mst_topology_state *state);
};
#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
--
2.14.3
For a while we actually haven't had any way of retraining MST links with
fallback link parameters like we do with SST. While uncommon, certain
setups such as my Caldigit TS3 + EVGA MST hub require this since
otherwise, they end up getting stuck in an infinite MST retraining loop.
MST retraining is somewhat different then SST retraining. While it's
possible during the normal link retraining sequence for a hub to indicate
bad link status, it's also possible for a hub to only indicate this
status through ESI messages and it's possible for this to happen after
the initial link training succeeds. This can lead to a pattern that
looks like this:
- Train MST link
- Training completes successfully
- MST hub sets Channel EQ failed bit in ESI
- Retraining starts
- Retraining completes successfully
- MST hub sets Channel EQ failed bit in ESI again
- Rinse and repeat
In these situations, we need to be able to actually trigger fallback
link training from the ESI handler as well, along with using the ESI
handler during retraining to figure out whether or not our retraining
actually succeeded.
This gets a bit more complicated since we have to ensure that we don't
block the ESI handler at all while doing retraining. If we do, due to
DisplayPort's general issues with being sensitive to IRQ latency most
MST hubs will just stop responding to us if their interrupts aren't
handled in a timely manner.
So: move retraining into it's own separate handler. Running in a
separate handler allows us to avoid stalling the ESI during link
retraining, and we can have the ESI signal that the channel EQ bit was
cleared through a simple completion struct. Additionally, we take care
to stick as much of this into the SST retraining path as possible since
sharing is caring.
V4:
- Move all MST retraining work into hotplug_work
- Grab the correct power wells when retraining.
- Loop through MST encoders in intel_dp_get_crtc_mask(), quicker/easier
than connectors
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
---
drivers/gpu/drm/i915/intel_ddi.c | 10 +-
drivers/gpu/drm/i915/intel_dp.c | 370 ++++++++++++++++++++------
drivers/gpu/drm/i915/intel_dp_link_training.c | 6 +-
drivers/gpu/drm/i915/intel_dp_mst.c | 28 +-
drivers/gpu/drm/i915/intel_drv.h | 7 +
5 files changed, 329 insertions(+), 92 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 92cb26b18a9b..7474300cad01 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3042,6 +3042,7 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
{
struct drm_modeset_acquire_ctx ctx;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bool changed;
int ret;
@@ -3063,9 +3064,16 @@ static bool intel_ddi_hotplug(struct intel_encoder *encoder,
break;
}
+ if (ret == -EINVAL)
+ ret = intel_dp_handle_train_failure(intel_dp);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+
+ if (ret == -EIO)
+ changed = true;
+ else
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
return changed;
}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fbb467bc227d..c62d03c69ec3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -45,6 +45,8 @@
#define DP_DPRX_ESI_LEN 14
+#define DP_MST_RETRAIN_TIMEOUT (msecs_to_jiffies(100))
+
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
@@ -2760,6 +2762,7 @@ static void intel_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_dp->link_trained = false;
+ intel_dp->link_is_bad = false;
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder,
@@ -4205,8 +4208,134 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Could not write test response to sink\n");
}
+/* Get a mask of the CRTCs that are running on the given intel_dp struct. For
+ * MST, this returns a crtc mask containing all of the CRTCs driving
+ * downstream sinks, for SST it just returns a mask of the attached
+ * connector's CRTC.
+ */
+int
+intel_dp_get_crtc_mask(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = dp_to_dig_port(intel_dp)->base.base.dev;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc;
+ int crtc_mask = 0;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+
+ if (intel_dp->is_mst) {
+ struct intel_dp_mst_encoder *mst_enc;
+ struct intel_connector *intel_connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dp->mst_encoders); i++) {
+ mst_enc = intel_dp->mst_encoders[i];
+
+ intel_connector = mst_enc->connector;
+ if (!intel_connector)
+ continue;
+
+ conn_state = intel_connector->base.state;
+ if (conn_state->crtc)
+ crtc_mask |= drm_crtc_mask(conn_state->crtc);
+ }
+ } else if (intel_dp->attached_connector) {
+ connector = &intel_dp->attached_connector->base;
+ crtc = connector->state->crtc;
+
+ if (crtc)
+ crtc_mask |= drm_crtc_mask(crtc);
+ }
+
+ return crtc_mask;
+}
+
+static bool
+intel_dp_needs_link_retrain(struct intel_dp *intel_dp,
+ const u8 esi[DP_DPRX_ESI_LEN])
+{
+ u8 buf[DP_LINK_STATUS_SIZE];
+ const u8 *link_status = NULL;
+
+ if (intel_dp->link_is_bad)
+ return false;
+
+ if (intel_dp->is_mst) {
+ if (!intel_dp->active_mst_links)
+ return false;
+
+ if (esi) {
+ link_status = &esi[10];
+ } else {
+ struct completion *retrain_completion =
+ &intel_dp->mst_retrain_completion;
+ /* We're not running from the ESI handler, so wait a
+ * little bit to see if the ESI handler lets us know
+ * that the link status is OK
+ */
+ if (wait_for_completion_timeout(retrain_completion,
+ DP_MST_RETRAIN_TIMEOUT))
+ return false;
+ }
+ } else {
+ if (intel_dp->link_trained)
+ return false;
+ if (!intel_dp_get_link_status(intel_dp, buf))
+ return false;
+
+ link_status = buf;
+ }
+
+ /*
+ * Validate the cached values of intel_dp->link_rate and
+ * intel_dp->lane_count before attempting to retrain.
+ */
+ if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
+ intel_dp->lane_count))
+ return false;
+
+ if (link_status) {
+ return !drm_dp_channel_eq_ok(link_status,
+ intel_dp->lane_count);
+ } else {
+ return true;
+ }
+}
+
+static inline bool
+intel_dp_mst_needs_retrain(struct intel_dp *intel_dp,
+ const u8 esi[DP_DPRX_ESI_LEN])
+{
+ struct drm_device *dev = dp_to_dig_port(intel_dp)->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (intel_dp_needs_link_retrain(intel_dp, esi)) {
+ DRM_DEBUG_KMS("Channel EQ failing\n");
+
+ if (completion_done(&intel_dp->mst_retrain_completion)) {
+ reinit_completion(&intel_dp->mst_retrain_completion);
+ DRM_DEBUG_KMS("Starting retrain\n");
+
+ return true;
+ } else if (!work_busy(&dev_priv->hotplug.hotplug_work)) {
+ /* Retraining was probably interrupted (usually from
+ * one of the CRTCs being in a modeset during a
+ * previous retrain attempt)
+ */
+ return true;
+ }
+
+ } else if (!completion_done(&intel_dp->mst_retrain_completion)) {
+ DRM_DEBUG_KMS("Channel EQ stable\n");
+ complete_all(&intel_dp->mst_retrain_completion);
+ }
+
+ return false;
+}
+
static int
-intel_dp_check_mst_status(struct intel_dp *intel_dp)
+intel_dp_check_mst_status(struct intel_dp *intel_dp, bool *need_retrain)
{
bool bret;
@@ -4215,16 +4344,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
int ret = 0;
int retry;
bool handled;
+ *need_retrain = false;
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
-
- /* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links &&
- !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
- DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+ if (!*need_retrain) {
+ *need_retrain =
+ intel_dp_mst_needs_retrain(intel_dp,
+ esi);
}
DRM_DEBUG_KMS("got esi %3ph\n", esi);
@@ -4262,29 +4389,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
return -EINVAL;
}
-static bool
-intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
-
- if (!intel_dp->link_trained)
- return false;
-
- if (!intel_dp_get_link_status(intel_dp, link_status))
- return false;
-
- /*
- * Validate the cached values of intel_dp->link_rate and
- * intel_dp->lane_count before attempting to retrain.
- */
- if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
- intel_dp->lane_count))
- return false;
-
- /* Retrain if Channel EQ or CR not ok */
- return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
-}
-
/*
* If display is now connected check links status,
* there has been known issues of link loss triggering
@@ -4300,68 +4404,131 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state;
- struct intel_crtc *crtc;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+ enum pipe pch_transcoder;
+ int crtc_mask, retry_count = 0;
int ret;
- /* FIXME handle the MST connectors as well */
-
- if (!connector || connector->base.status != connector_status_connected)
- return 0;
-
ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
ctx);
if (ret)
return ret;
- conn_state = connector->base.state;
+ crtc_mask = intel_dp_get_crtc_mask(intel_dp);
+ for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) {
+ struct drm_crtc_state *crtc_state;
+ struct intel_crtc_state *intel_crtc_state;
- crtc = to_intel_crtc(conn_state->crtc);
- if (!crtc)
- return 0;
-
- ret = drm_modeset_lock(&crtc->base.mutex, ctx);
- if (ret)
- return ret;
+ crtc = &intel_crtc->base;
+ ret = drm_modeset_lock(&crtc->mutex, ctx);
+ if (ret)
+ return ret;
- crtc_state = to_intel_crtc_state(crtc->base.state);
+ crtc_state = crtc->state;
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+ WARN_ON(!intel_crtc_has_dp_encoder(intel_crtc_state));
- WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+ if (crtc_state->commit &&
+ !try_wait_for_completion(&crtc_state->commit->hw_done)) {
+ DRM_DEBUG_KMS("Not all CRTCs ready yet\n");
+ return 0;
+ }
- if (!crtc_state->base.active)
+ if (!crtc_state->active)
+ crtc_mask &= ~drm_crtc_mask(crtc);
+ }
+ if (!crtc_mask)
return 0;
- if (conn_state->commit &&
- !try_wait_for_completion(&conn_state->commit->hw_done))
+ if (!intel_dp_needs_link_retrain(intel_dp, NULL))
return 0;
- if (!intel_dp_needs_link_retrain(intel_dp))
- return 0;
+ for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) {
+ pch_transcoder = intel_crtc_pch_transcoder(intel_crtc);
- /* Suppress underruns caused by re-training */
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- if (crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), false);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv,
+ intel_crtc->pipe,
+ false);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
+ if (intel_crtc->config->has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ pch_transcoder,
+ false);
+ }
+ }
+
+ do {
+ if (++retry_count > 5) {
+ DRM_DEBUG_KMS("Too many retries, can't retrain\n");
+ return -EINVAL;
+ }
+
+ intel_dp_start_link_train(intel_dp);
+ intel_dp_stop_link_train(intel_dp);
+ } while (intel_dp_needs_link_retrain(intel_dp, NULL));
+
+ /* Wait for things to become stable */
+ for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask)
+ intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+
+ /* Now that we know everything is OK, finally re-enable underrun
+ * reporting
+ */
+ for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) {
+ pch_transcoder = intel_crtc_pch_transcoder(intel_crtc);
- /* Keep underrun reporting disabled until things are stable */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_set_cpu_fifo_underrun_reporting(dev_priv,
+ intel_crtc->pipe,
+ true);
- intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (crtc->config->has_pch_encoder)
- intel_set_pch_fifo_underrun_reporting(dev_priv,
- intel_crtc_pch_transcoder(crtc), true);
+ if (intel_crtc->config->has_pch_encoder) {
+ intel_set_pch_fifo_underrun_reporting(dev_priv,
+ pch_transcoder,
+ true);
+ }
+ }
return 0;
}
+int intel_dp_handle_train_failure(struct intel_dp *intel_dp)
+{
+ int ret;
+ int max_link_rate, max_lane_count;
+
+ if (intel_dp->link_is_bad)
+ return 0;
+ intel_dp->link_is_bad = true;
+
+ /* Sometimes sinks will change their RX caps in response to a link
+ * retraining failure, so only fallback the link rate if we need to
+ */
+ ret = intel_dp_get_dpcd(intel_dp);
+ if (!ret) {
+ DRM_ERROR("IO error while reading dpcd from sink\n");
+ return -EIO;
+ }
+
+ max_link_rate = intel_dp_max_link_rate(intel_dp);
+ max_lane_count = intel_dp_max_lane_count(intel_dp);
+ if (intel_dp->link_rate == max_link_rate &&
+ intel_dp->lane_count == max_lane_count) {
+ if (intel_dp_get_link_train_fallback_values(intel_dp,
+ max_link_rate,
+ max_lane_count)) {
+ DRM_ERROR("No possible fallback values for link retraining\n");
+ return -EINVAL;
+ }
+ }
+
+ schedule_work(&intel_dp->modeset_retry_work);
+ return 0;
+}
+
/*
* If display is now connected check links status,
* there has been known issues of link loss triggering
@@ -4378,6 +4545,7 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector)
{
struct drm_modeset_acquire_ctx ctx;
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bool changed;
int ret;
@@ -4396,9 +4564,16 @@ static bool intel_dp_hotplug(struct intel_encoder *encoder,
break;
}
+ if (ret == -EINVAL)
+ ret = intel_dp_handle_train_failure(intel_dp);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+
+ if (ret == -EIO)
+ changed = true;
+ else
+ WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
return changed;
}
@@ -4459,7 +4634,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
}
/* defer to the hotplug work for link retraining if needed */
- if (intel_dp_needs_link_retrain(intel_dp))
+ if (intel_dp_needs_link_retrain(intel_dp, NULL))
return false;
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
@@ -5379,6 +5554,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
enum irqreturn ret = IRQ_NONE;
+ bool need_retrain = false;
if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
/*
@@ -5405,7 +5581,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
if (intel_dp->is_mst) {
- if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
+ if (intel_dp_check_mst_status(intel_dp,
+ &need_retrain) == -EINVAL) {
/*
* If we were in MST mode, and device is not
* there, get out of MST mode
@@ -5417,6 +5594,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_dp->is_mst);
intel_dp->detect_done = false;
goto put_power;
+ } else if (need_retrain) {
+ goto put_power;
}
}
@@ -6251,21 +6430,35 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
struct intel_dp *intel_dp = container_of(work, typeof(*intel_dp),
modeset_retry_work);
- struct drm_connector *connector = &intel_dp->attached_connector->base;
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_connector *connector;
+ int max_link_rate, max_lane_count;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ mutex_lock(&dev->mode_config.mutex);
- /* Grab the locks before changing connector property*/
- mutex_lock(&connector->dev->mode_config.mutex);
- /* Set connector link status to BAD and send a Uevent to notify
- * userspace to do a modeset.
+ /* Set the connector link status of all (possibly downstream) ports to
+ * BAD and send a Uevent to notify userspace to do a modeset.
*/
- drm_mode_connector_set_link_status_property(connector,
- DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
+ if (intel_dp->is_mst) {
+ max_link_rate = intel_dp_max_link_rate(intel_dp);
+ max_lane_count = intel_dp_max_lane_count(intel_dp);
+ drm_dp_mst_topology_mgr_lower_link_rate(&intel_dp->mst_mgr,
+ max_link_rate,
+ max_lane_count);
+ } else {
+ connector = &intel_dp->attached_connector->base;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ drm_mode_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
/* Send Hotplug uevent so userspace can reprobe */
- drm_kms_helper_hotplug_event(connector->dev);
+ drm_kms_helper_hotplug_event(dev);
}
bool
@@ -6283,6 +6476,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* Initialize the work for modeset in case of link train failure */
INIT_WORK(&intel_dp->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
+ init_completion(&intel_dp->mst_retrain_completion);
+ complete_all(&intel_dp->mst_retrain_completion);
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
@@ -6499,6 +6694,7 @@ void intel_dp_mst_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
+ bool need_retrain = false;
for (i = 0; i < I915_MAX_PORTS; i++) {
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
@@ -6508,7 +6704,11 @@ void intel_dp_mst_resume(struct drm_device *dev)
continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
- if (ret)
- intel_dp_check_mst_status(&intel_dig_port->dp);
+ if (ret) {
+ intel_dp_check_mst_status(&intel_dig_port->dp,
+ &need_retrain);
+ if (need_retrain)
+ drm_kms_helper_hotplug_event(dev);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 2cfa58ce1f95..aa6cfccdab0f 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -336,11 +336,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_connector->base.base.id,
intel_connector->base.name,
intel_dp->link_rate, intel_dp->lane_count);
- if (!intel_dp_get_link_train_fallback_values(intel_dp,
- intel_dp->link_rate,
- intel_dp->lane_count))
- /* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_dp->modeset_retry_work);
+ intel_dp_handle_train_failure(intel_dp);
} else {
DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
intel_connector->base.base.id,
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 31b37722cd89..f61950073f05 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -156,13 +156,37 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
}
static int intel_dp_mst_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *new_conn_state)
+ struct drm_connector_state *new_conn_state)
{
struct drm_atomic_state *state = new_conn_state->state;
struct drm_connector_state *old_conn_state;
struct drm_crtc_state *new_crtc_state;
+ struct intel_dp *intel_dp = to_intel_connector(connector)->mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct intel_dp_mst_topology_state *intel_mst_state;
int ret = 0;
+ /* We can't retrain anything if the hub isn't there anymore */
+ if (intel_dp) {
+ ret = drm_dp_atomic_mst_check_retrain(new_conn_state,
+ &intel_dp->mst_mgr);
+
+ /* This commit invokes a retrain, reset the link rate */
+ if (ret == 1) {
+ mst_state =
+ drm_atomic_dp_mst_get_topology_state(state,
+ &intel_dp->mst_mgr);
+ intel_mst_state =
+ to_intel_dp_mst_topology_state(mst_state);
+
+ intel_mst_state->link_rate = 0;
+ intel_mst_state->lane_count = 0;
+ ret = 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ }
+
old_conn_state = drm_atomic_get_old_connector_state(state, connector);
/* Only free VCPI here if we're not going to be detaching the
@@ -230,7 +254,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
+ intel_dp->link_is_bad = false;
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+
intel_dig_port->base.post_disable(&intel_dig_port->base,
old_crtc_state, NULL);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index eccb4bd042c3..9567a0e71424 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1128,6 +1128,11 @@ struct intel_dp {
/* mst connector list */
struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
struct drm_dp_mst_topology_mgr mst_mgr;
+ struct completion mst_retrain_completion;
+ /* Set when retraining the link at the current parameters is
+ * impossible
+ */
+ bool link_is_bad;
uint32_t (*get_aux_clock_divider)(struct intel_dp *dp, int index);
/*
@@ -1649,6 +1654,7 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
int intel_dp_retrain_link(struct intel_encoder *encoder,
struct drm_modeset_acquire_ctx *ctx);
+int intel_dp_handle_train_failure(struct intel_dp *intel_dp);
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
void intel_dp_encoder_reset(struct drm_encoder *encoder);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
@@ -1701,6 +1707,7 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
bool
intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
+int intel_dp_get_crtc_mask(struct intel_dp *intel_dp);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
--
2.14.3
Unlike SST, MST can have far more then a single display hooked up on a
single port. What this also means however, is that if the DisplayPort
link to the top-level MST branch device becomes unstable then every
single branch device also has an unstable link. Additionally, MST has a
few more steps that must be taken in order to properly retrain the
entire topology under a lower link rate. Since the VCPI allocations for
each mstb is determined based off the link rate for the top of the
topology, we also have to recalculate all of the VCPI allocations for
the downstream ports as well to ensure that we still have enough link
bandwidth to drive each mstb.
Additionally, since we have multiple downstream connectors per port,
setting the link status of the parent mstb's port to bad isn't enough:
all of the downstream mstb ports have to have their link status set to
bad.
This basically follows the same paradigm that our DP link status logic
in DRM does, in that we simply tell userspace that all of the mstb ports
need retraining and additionally applying the new lower bandwidth
constraints to all of the atomic commits on the topology that follow.
Additionally; we add helpers that handle automatically checking whether
or not a new atomic commit would perform the modesets required to
retrain a link and if so, additionally handles updating the link status
of each connector on the MST topology.
V4:
- clarify slightly confusing message in
drm_dp_mst_topology_mgr_lower_link_rate()
- Fix argument naming
- Squash this with the other retrain helper, because now they're
intertwined with one another
- Track which connectors with CRTCs need modesets in order to retrain a
topology in the topology's atomic state. This lets us greatly simplify
the helpers, along with alleviating drivers of the responsibility of
figuring out when to call the retrain helpers during atomic checks. It
also ensures that we can keep zombie connectors that a retrain is
dependent on alive until the topology either disappears, or they are
disabled. We needed to do most of this anyway, since our original
helpers didn't take into account that we need to invoke retraining
when the link status prop changes, regardless of whether or not a
modeset has been initiated yet.
- Handle situation we completely forgot about: adding new connectors to
an MST topology that needs fallback retraining (solution: new
connectors on a topology inherit the link status of the rest of the
topology)
- Also make sure to handle connectors that are orphaned due to their
MST topology disappearing. Solution: remove from topology state,
reset link status to good
- Write more docs on the retraining procedure.
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
---
drivers/gpu/drm/drm_dp_mst_topology.c | 440 +++++++++++++++++++++++++++++++++-
include/drm/drm_dp_mst_helper.h | 20 ++
2 files changed, 455 insertions(+), 5 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 981bd0f7d3ab..cc4b737a47b0 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1199,6 +1199,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (created && !port->input) {
char proppath[255];
+ struct drm_dp_mst_topology_state *state;
build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
@@ -1217,6 +1218,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
drm_mode_connector_set_tile_property(port->connector);
}
+ state = to_dp_mst_topology_state(port->mgr->base.state);
+ if (!list_empty(&state->bad_ports)) {
+ port->connector->state->link_status =
+ DRM_LINK_STATUS_BAD;
+ }
(*mstb->mgr->cbs->register_connector)(port->connector);
}
@@ -2076,7 +2082,7 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
{
switch (dp_link_bw) {
default:
- DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
+ DRM_DEBUG_KMS("invalid link bandwidth: %x (link count: %d)\n",
dp_link_bw, dp_link_count);
return false;
@@ -2096,10 +2102,409 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
return true;
}
+static int drm_dp_set_mstb_link_status(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ enum drm_link_status status)
+{
+ struct drm_dp_mst_topology_state *state =
+ to_dp_mst_topology_state(mgr->base.state);
+ struct drm_dp_mst_branch *rmstb;
+ struct drm_dp_mst_port *port;
+ struct drm_connector *connector;
+ struct drm_dp_mst_retrain_dep *retrain_dep;
+ int ret;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ rmstb = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+ if (rmstb) {
+ ret = drm_dp_set_mstb_link_status(mgr, rmstb, status);
+ drm_dp_put_mst_branch_device(rmstb);
+
+ if (ret)
+ return ret;
+ }
+
+ connector = port->connector;
+ if (!connector)
+ continue;
+
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] link status good -> bad\n",
+ connector->base.id, connector->name);
+ connector->state->link_status = status;
+
+ if (!connector->state->crtc)
+ continue;
+
+ retrain_dep = kmalloc(sizeof(*retrain_dep), GFP_KERNEL);
+ if (!retrain_dep) {
+ DRM_ERROR("Not enough memory to update atomic state\n");
+ return -ENOMEM;
+ }
+
+ retrain_dep->connector = connector;
+ drm_connector_get(connector);
+ list_add(&retrain_dep->entry, &state->bad_ports);
+ }
+
+ return 0;
+}
+
+static void drm_dp_mst_destroy_retrain_dep(struct drm_dp_mst_retrain_dep *dep)
+{
+ drm_connector_put(dep->connector);
+ list_del(&dep->entry);
+ kfree(dep);
+}
+
+/**
+ * DOC: handling link retraining on MST topologies
+ *
+ * DisplayPort MST hubs work by allowing multiple connections on a single
+ * physical DisplayPort link. Because of this, any changes in the link status
+ * affect all connectors on the shared link. This has quite a number of
+ * important implications.
+ *
+ * When an MST topology requires that it be retrained at a lower link rate,
+ * the new link rate must be applied to all active connectors at the same
+ * time, since they share the same link. This means that if a connector has
+ * it's link status set to &DRM_MODE_LINK_STATUS_BAD and it's on an MST
+ * topology, changing the property to &DRM_MODE_LINK_STATUS_GOOD will result
+ * in adding every other connector that resides on the same topology into the
+ * new atomic state. Additionally, any MST connectors that were added to the
+ * state with CRTCs attached to them will automatically have a modeset forced
+ * on them in the atomic state, and have their link status updated to
+ * &DRM_MODE_LINK_STATUS_GOOD. If userspace has not set a new mode on these
+ * connectors, the kernel will attempt to reuse the same modes that are
+ * currently active on said CRTCs. All modes set on said connectors within
+ * this state are then checked against the new lowered link configuration to
+ * ensure that there is still enough bandwidth to support them. If there is
+ * not enough bandwidth, the atomic check will fail.
+ *
+ * Detaching CRTCs from MST connectors that have their link status set to
+ * &DRM_MODE_LINK_STATUS_BAD has a different effect however. Because disabling
+ * a CRTC on an MST connector only requires that the driver free the bandwidth
+ * allocated to a connector and does not require the driver to allocate more
+ * bandwidth, states which disable CRTCs on an MST topology but do not enable
+ * new CRTCs or apply new modes to CRTCs on the topology will not implicitly
+ * pull in the state of other CRTCs attached to connectors on the topology.
+ * This allows userspace to disable CRTCs on a topology that requires
+ * retraining in any order it chooses, so long as it doesn't try to apply new
+ * modes to the topology.
+ *
+ * If an atomic state would put an MST topology into a state where it has no
+ * connectors present that are attached to CRTCs, the kernel will
+ * automatically add each connector on the topology to the state and update
+ * their respective link statuses to &DRM_MODE_LINK_STATUS_GOOD. This is
+ * because when there are no active VC channels allocated on a hub, there is
+ * nothing which prevents the driver from immediately changing the maximum
+ * link rate/lane count, as the updated link configuration can simply be
+ * applied the next time a CRTC is attached to a connector on the topology.
+ *
+ * When an MST topology requires link retraining at a lower link rate, any new
+ * connectors that appear on the topology will automatically inherit the link
+ * status value of other connectors on the topology. This is to ensure that
+ * the shared link status remains consistent across the topology, and to
+ * prevent new modesets from occurring on the topology without first requiring
+ * a full retrain.
+ */
+
+/**
+ * drm_dp_mst_topology_mgr_lower_link_rate() - Override the DP link rate/count
+ * for all connectors in a given MST topology
+ * @mgr: manager to set state for
+ * @link_rate: The new DP link bandwidth
+ * @lane_count: The new DP lane count
+ *
+ * This is called by the driver when it detects that the current DP link for
+ * the given topology manager is unstable, and needs to be retrained at a
+ * lower link rate/lane count.
+ *
+ * This takes care of updating the link status on all downstream connectors,
+ * the mst topology's atomic state, and VC payloads for each port.
+ * The driver should send a hotplug event after calling this function to
+ * notify userspace of the link status change.
+ *
+ * RETURNS:
+ *
+ * True for success, or negative error code on failure.
+ */
+int drm_dp_mst_topology_mgr_lower_link_rate(struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int lane_count)
+{
+ struct drm_device *dev = mgr->dev;
+ struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_branch *mst_primary;
+ struct drm_dp_mst_retrain_dep *retrain_dep, *retrain_tmp;
+ struct drm_connector *connector;
+ int new_pbn_div;
+ int ret = 0;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ state = to_dp_mst_topology_state(mgr->base.state);
+
+ if (!drm_dp_get_vc_payload_bw(drm_dp_link_rate_to_bw_code(link_rate),
+ lane_count, &new_pbn_div)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ mst_primary = drm_dp_get_validated_mstb_ref(mgr, mgr->mst_primary);
+ if (!mst_primary)
+ goto out;
+
+ DRM_DEBUG_KMS("MST link impossible to retrain at current params, lowering pbn_div to %d\n",
+ new_pbn_div);
+ mgr->pbn_div = new_pbn_div;
+
+ ret = drm_dp_set_mstb_link_status(mgr, mst_primary,
+ DRM_LINK_STATUS_BAD);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to update link status, rolling back\n");
+
+ list_for_each_entry_safe(retrain_dep, retrain_tmp,
+ &state->bad_ports, entry) {
+ connector = retrain_dep->connector;
+ connector->state->link_status = DRM_LINK_STATUS_GOOD;
+
+ drm_dp_mst_destroy_retrain_dep(retrain_dep);
+ }
+ }
+
+ drm_dp_put_mst_branch_device(mst_primary);
+out:
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_topology_mgr_lower_link_rate);
+
+static void
+drm_atomic_dp_mst_satisfy_retrain_dep(struct drm_dp_mst_topology_state *mst_state,
+ struct drm_connector *connector)
+{
+ struct drm_dp_mst_retrain_dep *dep;
+
+ list_for_each_entry(dep, &mst_state->bad_ports, entry) {
+ if (dep->connector != connector)
+ continue;
+
+ drm_dp_mst_destroy_retrain_dep(dep);
+ break;
+ }
+}
+
+static int
+drm_atomic_dp_mst_retrain_connector(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_connector *connector)
+{
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_connector_state(state, connector);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (IS_ERR(conn_state))
+ return PTR_ERR(conn_state);
+
+ if (conn_state->link_status == DRM_LINK_STATUS_BAD) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] link status bad -> good\n",
+ connector->base.id, connector->name);
+ conn_state->link_status = DRM_LINK_STATUS_GOOD;
+ }
+
+ drm_atomic_dp_mst_satisfy_retrain_dep(mst_state, connector);
+
+ crtc = conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] needs full modeset\n",
+ crtc->base.id, crtc->name);
+
+ crtc_state->mode_changed = true;
+ return 0;
+}
+
+static int
+drm_atomic_dp_mst_retrain_mstb(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct drm_dp_mst_branch *mstb)
+{
+ struct drm_dp_mst_branch *rmstb;
+ struct drm_dp_mst_port *port;
+ struct drm_connector *connector;
+ int ret;
+
+ list_for_each_entry(port, &mstb->ports, next) {
+ rmstb = drm_dp_get_validated_mstb_ref(mstb->mgr, port->mstb);
+ if (rmstb) {
+ ret = drm_atomic_dp_mst_retrain_mstb(state, mst_state,
+ rmstb);
+ drm_dp_put_mst_branch_device(rmstb);
+ if (ret)
+ return ret;
+ }
+
+ connector = port->connector;
+ if (!connector)
+ continue;
+
+ ret = drm_atomic_dp_mst_retrain_connector(state, mst_state,
+ connector);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_dp_atomic_mst_check_retrain() - Prepare the topology's state to be
+ * retrained during this atomic commit, if required
+ * @new_conn_state: the new connector state possibly triggering a retrain
+ * @mgr: The DP MST topology to retrain
+ *
+ * When the link status of an MST topology goes from
+ * &DRM_MODE_LINK_STATUS_GOOD to &DRM_MODE_LINK_STATUS_BAD, the entire
+ * topology is considered to be in a state where a retrain at a lower link
+ * rate is required. Because each connector on an MST topology shares the same
+ * DP link, each connector must have it's link rate lowered and be retrained
+ * at the same time. Additionally, drivers must take care to update the link
+ * status of a connector on an MST topology themselves if userspace attempts
+ * to set a new mode to the connector regardless of whether or not it properly
+ * updates the connector's link status property from
+ * &DRM_MODE_LINK_STATUS_BAD to &DRM_MODE_LINK_STATUS_GOOD. In this case, the
+ * driver must take care of updating this link status property on it's own.
+ *
+ * This function takes care of handling all of the above requirements, and
+ * should be called during the start of the atomic check phase for an MST
+ * connector if the driver properly implements MST fallback retraining.
+ *
+ * Additionally; drivers are recommended to check the return value of this
+ * function in order to determine whether or not a full retrain of the MST
+ * topology would be performed by the new atomic state. This gives the
+ * driver a chance to update any private portions of the topology state that
+ * must be reset before a retrain, such as the link rate and lane count being
+ * shared by the topology.
+ *
+ * RETURNS: 1 if the given atomic state retrains the MST topology, 0 if the
+ * atomic state doesn't retrain the topology. On error, a negative error code
+ * is returned.
+ */
+int drm_dp_atomic_mst_check_retrain(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_branch *mstb;
+ struct drm_dp_mst_retrain_dep *retrain_dep, *retrain_tmp;
+ struct drm_connector *connector = new_conn_state->connector;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+ int ret;
+
+ if (old_conn_state->link_status != DRM_LINK_STATUS_BAD)
+ return 0;
+
+ mst_state = drm_atomic_dp_mst_get_topology_state(state, mgr);
+ if (list_empty(&mst_state->bad_ports))
+ return 0;
+
+ /* Check if the new state requires us to update the link status
+ * prop
+ */
+ if (new_conn_state->link_status == DRM_LINK_STATUS_BAD) {
+ /* Any modesets that leave a CRTC enabled must update link
+ * status
+ */
+ if (new_conn_state->crtc) {
+ crtc = new_conn_state->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ crtc);
+ if (crtc_state &&
+ drm_atomic_crtc_needs_modeset(crtc_state)) {
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] modeset requires link retrain\n",
+ crtc->base.id, crtc->name);
+ new_conn_state->link_status =
+ DRM_LINK_STATUS_GOOD;
+ }
+ } else if (old_conn_state->crtc) {
+ drm_atomic_dp_mst_satisfy_retrain_dep(mst_state,
+ connector);
+ /* If all CRTCs on this hub would be disabled by this
+ * state, link status can be updated to GOOD
+ */
+ if (list_empty(&mst_state->bad_ports)) {
+ DRM_DEBUG_ATOMIC("state %p disables all CRTCs on %p, link would be retrained\n",
+ state, mgr);
+ new_conn_state->link_status =
+ DRM_LINK_STATUS_GOOD;
+ }
+ }
+ if (new_conn_state->link_status == DRM_LINK_STATUS_BAD)
+ return 0;
+ }
+
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] link status bad -> good\n",
+ connector->base.id, connector->name);
+ ret = drm_atomic_dp_mst_retrain_connector(state, mst_state,
+ connector);
+ if (ret)
+ return ret;
+
+ /* Retrain whole topology */
+ mstb = drm_dp_get_validated_mstb_ref(mgr, mgr->mst_primary);
+ if (!mstb)
+ return -EIO;
+ ret = drm_atomic_dp_mst_retrain_mstb(state, mst_state, mstb);
+ drm_dp_put_mst_branch_device(mstb);
+ if (ret)
+ return ret;
+
+ /* Additionally, it's possible that the topology connector state may
+ * have changed between the time the topology's link status went to
+ * BAD. As a result, it's possible that there's still leftover retrain
+ * dependencies that refer to connectors that no longer exist on the
+ * hub. Ensure these are added to the state as well
+ */
+ list_for_each_entry_safe(retrain_dep, retrain_tmp,
+ &mst_state->bad_ports, entry) {
+ ret = drm_atomic_dp_mst_retrain_connector(state, mst_state,
+ connector);
+ if (ret)
+ return ret;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(drm_dp_atomic_mst_check_retrain);
+
static void drm_dp_mst_topology_reset_state(struct drm_dp_mst_topology_mgr *mgr)
{
+ struct drm_dp_mst_retrain_dep *retrain_dep, *retrain_tmp;
struct drm_dp_mst_topology_state *state =
to_dp_mst_topology_state(mgr->base.state);
+ struct drm_connector *connector;
+
+ list_for_each_entry_safe(retrain_dep, retrain_tmp, &state->bad_ports,
+ entry) {
+ /* Reset the connector link state, since there's no way to
+ * retrain something that no longer exists
+ */
+ connector = retrain_dep->connector;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] loses parent MST topology mgr %p, link status bad -> good\n",
+ connector->base.id, connector->name, mgr);
+ connector->state->link_status = DRM_LINK_STATUS_GOOD;
+ drm_dp_mst_destroy_retrain_dep(retrain_dep);
+ }
if (mgr->cbs->reset_state)
mgr->cbs->reset_state(state);
@@ -3110,7 +3515,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
-/**
+/*
* drm_atomic_dp_mst_duplicate_topology_state - default
* drm_dp_mst_topology_state duplicate handler
*
@@ -3153,9 +3558,26 @@ int
__drm_atomic_dp_mst_duplicate_topology_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *state)
{
+ struct drm_dp_mst_retrain_dep *retrain_dep, *new_retrain_dep;
struct drm_private_obj *obj = &mgr->base;
+ struct drm_dp_mst_topology_state *src_state =
+ to_dp_mst_topology_state(obj->state);
memcpy(state, obj->state, sizeof(*state));
+ INIT_LIST_HEAD(&state->bad_ports);
+
+ /* Copy the bad ports list to the new state */
+ list_for_each_entry(retrain_dep, &src_state->bad_ports, entry) {
+ new_retrain_dep = kmalloc(sizeof(*retrain_dep), GFP_KERNEL);
+ if (!new_retrain_dep) {
+ __drm_atomic_dp_mst_destroy_topology_state(state);
+ return -ENOMEM;
+ }
+
+ new_retrain_dep->connector = retrain_dep->connector;
+ drm_connector_get(retrain_dep->connector);
+ list_add(&new_retrain_dep->entry, &state->bad_ports);
+ }
__drm_atomic_helper_private_obj_duplicate_state(&mgr->base,
&state->base);
@@ -3169,9 +3591,8 @@ EXPORT_SYMBOL(__drm_atomic_dp_mst_duplicate_topology_state);
*
* For drivers which don't yet subclass drm_dp_mst_topology_state.
*/
-void
-drm_atomic_dp_mst_destroy_topology_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
+void drm_atomic_dp_mst_destroy_topology_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
@@ -3192,6 +3613,13 @@ EXPORT_SYMBOL(drm_atomic_dp_mst_destroy_topology_state);
void
__drm_atomic_dp_mst_destroy_topology_state(struct drm_dp_mst_topology_state *state)
{
+ struct drm_dp_mst_retrain_dep *retrain_dep, *retrain_tmp;
+
+ list_for_each_entry_safe(retrain_dep, retrain_tmp, &state->bad_ports,
+ entry) {
+ drm_connector_put(retrain_dep->connector);
+ kfree(retrain_dep);
+ }
}
EXPORT_SYMBOL(__drm_atomic_dp_mst_destroy_topology_state);
@@ -3276,6 +3704,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
/* max. time slots - one slot for MTP header */
state->avail_slots = 63;
+ INIT_LIST_HEAD(&state->bad_ports);
+
drm_atomic_private_obj_init(&mgr->base,
&state->base,
mgr->funcs);
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 3a7378cd5bd1..32f6944de560 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -407,9 +407,24 @@ struct drm_dp_payload {
#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+/* Represents a connector with an MST port whose mstb had a mode programmed
+ * when the link rate of the topology was lowered. As long as a topology state
+ * has these, no mstbs may be activated
+ */
+struct drm_dp_mst_retrain_dep {
+ struct drm_connector *connector;
+ struct list_head entry;
+};
+
struct drm_dp_mst_topology_state {
struct drm_private_state base;
int avail_slots;
+ /**
+ * @bad_ports: DRM connectors that must have their mode changed before
+ * the link status of each connector on the MST hub can be updated
+ * from BAD to GOOD
+ */
+ struct list_head bad_ports;
struct drm_dp_mst_topology_mgr *mgr;
};
@@ -639,4 +654,9 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
+int drm_dp_mst_topology_mgr_lower_link_rate(struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int lane_count);
+int drm_dp_atomic_mst_check_retrain(struct drm_connector_state *new_conn_state,
+ struct drm_dp_mst_topology_mgr *mgr);
+
#endif
--
2.14.3
When a DP MST link needs retraining, sometimes the hub will detect that
the current link bw config is impossible and will update it's RX caps in
the DPCD to reflect the new maximum link rate. Currently, we make the
assumption that the RX caps in the dpcd will never change like this.
This means if the sink changes it's RX caps after we've already set up
an MST link and we attempt to add or remove another sink from the
topology, we could put ourselves into an invalid state where we've tried
to configure different sinks on the same MST topology with different
link rates. We could also run into this situation if a sink reports a
higher link rate after suspend, usually from us having trained it with a
fallback bw configuration before suspending.
So: keep the link rate consistent by subclassing
drm_dp_mst_topology_state, and tracking it there. For the time being, we
only allow the link rate to change when the entire topology has been
disconnected.
V4:
- Track link rate/lane count in the atomic topology state instead of in
intel_dp.
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
---
drivers/gpu/drm/i915/intel_dp_mst.c | 79 +++++++++++++++++++++++++++++--------
drivers/gpu/drm/i915/intel_drv.h | 7 ++++
2 files changed, 70 insertions(+), 16 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index cf844cfd2bb0..19de0b5a7a40 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -41,8 +41,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_dp_mst_topology_state *mst_state =
+ drm_atomic_dp_mst_get_topology_state(state, &intel_dp->mst_mgr);
+ struct intel_dp_mst_topology_state *intel_mst_state;
int bpp;
- int lane_count, slots;
+ int slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
@@ -55,18 +58,22 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
bpp);
}
+
+ intel_mst_state =
+ to_intel_dp_mst_topology_state(mst_state);
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = intel_dp_max_lane_count(intel_dp);
-
- pipe_config->lane_count = lane_count;
+ if (!intel_mst_state->link_rate || !intel_mst_state->lane_count) {
+ intel_mst_state->link_rate = intel_dp_max_link_rate(intel_dp);
+ intel_mst_state->lane_count = intel_dp_max_lane_count(intel_dp);
+ }
+ pipe_config->lane_count = intel_mst_state->lane_count;
+ pipe_config->port_clock = intel_mst_state->link_rate;
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
-
if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
pipe_config->has_audio = true;
@@ -80,7 +87,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
return false;
}
- intel_link_compute_m_n(bpp, lane_count,
+ intel_link_compute_m_n(bpp, intel_mst_state->lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
&pipe_config->dp_m_n,
@@ -530,11 +537,55 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
+static void intel_mst_reset_state(struct drm_dp_mst_topology_state *state)
+{
+ struct intel_dp_mst_topology_state *intel_mst_state =
+ to_intel_dp_mst_topology_state(state);
+
+ intel_mst_state->link_rate = 0;
+ intel_mst_state->lane_count = 0;
+}
+
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
+ .reset_state = intel_mst_reset_state,
+};
+
+static struct drm_private_state *
+intel_dp_mst_duplicate_state(struct drm_private_obj *obj)
+{
+ struct intel_dp_mst_topology_state *state;
+ struct drm_dp_mst_topology_mgr *mgr = to_dp_mst_topology_mgr(obj);
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_dp_mst_duplicate_topology_state(mgr, &state->base);
+
+ return &state->base.base;
+}
+
+static void
+intel_dp_mst_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct drm_dp_mst_topology_state *mst_state =
+ to_dp_mst_topology_state(state);
+ struct intel_dp_mst_topology_state *intel_mst_state =
+ to_intel_dp_mst_topology_state(mst_state);
+
+ __drm_atomic_dp_mst_destroy_topology_state(mst_state);
+
+ kfree(intel_mst_state);
+}
+
+static const struct drm_private_state_funcs mst_state_funcs = {
+ .atomic_duplicate_state = intel_dp_mst_duplicate_state,
+ .atomic_destroy_state = intel_dp_mst_destroy_state,
};
static struct intel_dp_mst_encoder *
@@ -587,21 +638,16 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
return true;
}
-static const struct drm_private_state_funcs mst_state_funcs = {
- .atomic_destroy_state = drm_atomic_dp_mst_destroy_topology_state,
- .atomic_duplicate_state = drm_atomic_dp_mst_duplicate_topology_state,
-};
-
int
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_dp_mst_topology_state *mst_state;
+ struct intel_dp_mst_topology_state *intel_mst_state;
struct drm_device *dev = intel_dig_port->base.base.dev;
int ret;
- mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
- if (!mst_state)
+ intel_mst_state = kzalloc(sizeof(*intel_mst_state), GFP_KERNEL);
+ if (!intel_mst_state)
return -ENOMEM;
intel_dp->can_mst = true;
@@ -610,7 +656,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_ba
/* create encoders */
intel_dp_create_fake_mst_encoders(intel_dig_port);
- ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, mst_state, dev,
+ ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr,
+ &intel_mst_state->base, dev,
&intel_dp->aux, 16, 3, conn_base_id);
if (ret) {
intel_dp->can_mst = false;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 742d53495974..eccb4bd042c3 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -985,6 +985,12 @@ struct cxsr_latency {
u16 cursor_hpll_disable;
};
+struct intel_dp_mst_topology_state {
+ struct drm_dp_mst_topology_state base;
+ int link_rate;
+ int lane_count;
+};
+
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base)
@@ -993,6 +999,7 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define to_intel_plane_state(x) container_of(x, struct intel_plane_state, base)
+#define to_intel_dp_mst_topology_state(x) container_of(x, struct intel_dp_mst_topology_state, base)
#define intel_fb_obj(x) (x ? to_intel_framebuffer(x)->obj : NULL)
struct intel_hdmi {
--
2.14.3
The current way of handling changing VCPI allocations doesn't make a
whole ton of sense. Since drm_atomic_helper_check_modeset() can be
called multiple times which means that intel_dp_mst_atomic_check() can
also be called multiple times. However, since we make the silly mistake
of trying to free VCPI slots based off the /new/ atomic state instead of
the old atomic state, we'll end up potentially double freeing the vcpi
slots for the ports.
This also has another unintended consequence that came back up while
implementing MST fallback retraining: if a modeset is forced on a
connector that's already part of the state, but it's atomic_check() has
already been run once and doesn't get run again, we'll end up not
freeing the VCPI allocations on the connector at all.
The easiest way to solve this is to be clever and, with the exception of
connectors that are being disabled and thus will never have
compute_config() ran on them, move vcpi freeing out of the atomic check
and into compute_config().
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
Signed-off-by: Lyude Paul <[email protected]>
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/i915/intel_dp_mst.c | 84 +++++++++++++++++++++++++++----------
1 file changed, 63 insertions(+), 21 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 19de0b5a7a40..31b37722cd89 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -30,6 +30,38 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+static int
+intel_dp_mst_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_crtc_state *new_crtc_state;
+ struct intel_crtc_state *intel_crtc_state =
+ to_intel_crtc_state(crtc_state);
+ struct drm_encoder *encoder;
+ struct drm_dp_mst_topology_mgr *mgr;
+ int slots, ret;
+
+ slots = intel_crtc_state->dp_m_n.tu;
+ if (slots <= 0)
+ return 0;
+
+ encoder = conn_state->best_encoder;
+ mgr = &enc_to_mst(encoder)->primary->dp.mst_mgr;
+
+ ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
+ if (ret) {
+ DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n",
+ slots, ret);
+ } else {
+ new_crtc_state = drm_atomic_get_crtc_state(state,
+ crtc_state->crtc);
+ to_intel_crtc_state(new_crtc_state)->dp_m_n.tu = 0;
+ }
+
+ return ret;
+}
+
static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -44,10 +76,14 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_dp_mst_get_topology_state(state, &intel_dp->mst_mgr);
struct intel_dp_mst_topology_state *intel_mst_state;
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, &connector->base);
+ struct drm_crtc *old_crtc;
int bpp;
int slots;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
+ int ret;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
@@ -80,6 +116,21 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
pipe_config->pbn = mst_pbn;
+ /* Free any VCPI allocations on this connector from the previous
+ * state
+ */
+ old_crtc = old_conn_state->crtc;
+ if (old_crtc) {
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, old_crtc);
+
+ ret = intel_dp_mst_atomic_release_vcpi_slots(state,
+ old_crtc_state,
+ old_conn_state);
+ if (ret)
+ return ret;
+ }
+
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port, mst_pbn);
if (slots < 0) {
@@ -109,31 +160,22 @@ static int intel_dp_mst_atomic_check(struct drm_connector *connector,
{
struct drm_atomic_state *state = new_conn_state->state;
struct drm_connector_state *old_conn_state;
- struct drm_crtc *old_crtc;
- struct drm_crtc_state *crtc_state;
- int slots, ret = 0;
+ struct drm_crtc_state *new_crtc_state;
+ int ret = 0;
old_conn_state = drm_atomic_get_old_connector_state(state, connector);
- old_crtc = old_conn_state->crtc;
- if (!old_crtc)
- return ret;
- crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
- slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
- if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
- struct drm_dp_mst_topology_mgr *mgr;
- struct drm_encoder *old_encoder;
-
- old_encoder = old_conn_state->best_encoder;
- mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
+ /* Only free VCPI here if we're not going to be detaching the
+ * connector's current CRTC, since no config will be computed
+ */
+ if (new_conn_state->crtc || !old_conn_state->crtc)
+ return ret;
- ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
- if (ret)
- DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
- else
- to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
- }
- return ret;
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ old_conn_state->crtc);
+ return intel_dp_mst_atomic_release_vcpi_slots(state,
+ new_crtc_state,
+ old_conn_state);
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
--
2.14.3
Since these functions are dealing with an atomic state that needs to be
modified during atomic check and commit, change the naming on this
function to drm_atomic_dp_mst_get_topology_state() since we're the only
one using the function, and it's more consistent with the naming
scheme used in drm_atomic.c/drm_atomic_helper.c.
Signed-off-by: Lyude Paul <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/drm_dp_mst_topology.c | 13 +++++++------
include/drm/drm_dp_mst_helper.h | 6 ++++--
2 files changed, 11 insertions(+), 8 deletions(-)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6fac4129e6a2..ba67f1782a04 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -2622,7 +2622,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *topology_state;
int req_slots;
- topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+ topology_state = drm_atomic_dp_mst_get_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
@@ -2662,7 +2662,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
{
struct drm_dp_mst_topology_state *topology_state;
- topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+ topology_state = drm_atomic_dp_mst_get_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
@@ -3129,7 +3129,7 @@ static const struct drm_private_state_funcs mst_state_funcs = {
};
/**
- * drm_atomic_get_mst_topology_state: get MST topology state
+ * drm_atomic_dp_mst_get_topology_state: get MST topology state
*
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
@@ -3143,15 +3143,16 @@ static const struct drm_private_state_funcs mst_state_funcs = {
*
* The MST topology state or error pointer.
*/
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr)
+struct drm_dp_mst_topology_state *
+drm_atomic_dp_mst_get_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_device *dev = mgr->dev;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
}
-EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+EXPORT_SYMBOL(drm_atomic_dp_mst_get_topology_state);
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26a0766..2f6203407fd2 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -623,8 +623,10 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
-struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
- struct drm_dp_mst_topology_mgr *mgr);
+
+struct drm_dp_mst_topology_state *
+drm_atomic_dp_mst_get_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn);
--
2.14.3
While having the modeset_retry_work in intel_connector makes sense with
SST, this paradigm doesn't make a whole ton of sense when it comes to
MST since we have to deal with multiple connectors. In most cases, it's
more useful to just use the intel_dp struct since it indicates whether
or not we're dealing with an MST device, along with being able to easily
trace the intel_dp struct back to it's respective connector (if there is
any). So, move the modeset_retry_work function out of the
intel_connector struct and into intel_dp.
Signed-off-by: Lyude Paul <[email protected]>
Reviewed-by: Manasi Navare <[email protected]>
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
V2:
- Remove accidental duplicate modeset_retry_work in intel_connector
V3:
- Also check against eDP in intel_hpd_poll_fini() - mdnavare
V4:
- Don't bother looping over connectors for canceling modeset rety work,
just encoders.
V7:
- Fix CHECKPATCH errors
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/i915/intel_display.c | 14 +++++++++++---
drivers/gpu/drm/i915/intel_dp.c | 10 ++++------
drivers/gpu/drm/i915/intel_dp_link_training.c | 2 +-
drivers/gpu/drm/i915/intel_drv.h | 6 +++---
4 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e04050ea3e28..18edb9628a54 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15471,20 +15471,28 @@ void intel_connector_unregister(struct drm_connector *connector)
static void intel_hpd_poll_fini(struct drm_device *dev)
{
- struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
/* Kill all the work that may have been queued by hpd. */
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- if (connector->modeset_retry_work.func)
- cancel_work_sync(&connector->modeset_retry_work);
if (connector->hdcp_shim) {
cancel_delayed_work_sync(&connector->hdcp_check_work);
cancel_work_sync(&connector->hdcp_prop_work);
}
}
drm_connector_list_iter_end(&conn_iter);
+
+ for_each_intel_encoder(dev, encoder) {
+ if (encoder->type == INTEL_OUTPUT_DP ||
+ encoder->type == INTEL_OUTPUT_EDP) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
+ cancel_work_sync(&intel_dp->modeset_retry_work);
+ }
+ }
}
void intel_modeset_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 62f82c4298ac..fbb467bc227d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -6249,12 +6249,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
{
- struct intel_connector *intel_connector;
- struct drm_connector *connector;
+ struct intel_dp *intel_dp = container_of(work, typeof(*intel_dp),
+ modeset_retry_work);
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
- intel_connector = container_of(work, typeof(*intel_connector),
- modeset_retry_work);
- connector = &intel_connector->base;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
connector->name);
@@ -6283,7 +6281,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
int type;
/* Initialize the work for modeset in case of link train failure */
- INIT_WORK(&intel_connector->modeset_retry_work,
+ INIT_WORK(&intel_dp->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
if (WARN(intel_dig_port->max_lanes < 1,
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index f59b59bb0a21..2cfa58ce1f95 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -340,7 +340,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
intel_dp->link_rate,
intel_dp->lane_count))
/* Schedule a Hotplug Uevent to userspace to start modeset */
- schedule_work(&intel_connector->modeset_retry_work);
+ schedule_work(&intel_dp->modeset_retry_work);
} else {
DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
intel_connector->base.base.id,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5bd2263407b2..742d53495974 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -406,9 +406,6 @@ struct intel_connector {
struct intel_dp *mst_port;
- /* Work struct to schedule a uevent on link train failure */
- struct work_struct modeset_retry_work;
-
const struct intel_hdcp_shim *hdcp_shim;
struct mutex hdcp_mutex;
uint64_t hdcp_value; /* protected by hdcp_mutex */
@@ -1143,6 +1140,9 @@ struct intel_dp {
/* Displayport compliance testing */
struct intel_dp_compliance compliance;
+
+ /* Work struct to schedule a uevent on link train failure */
+ struct work_struct modeset_retry_work;
};
struct intel_lspcon {
--
2.14.3
Does what it says on the label, it's a little confusing debugging atomic
check failures otherwise.
Cc: Manasi Navare <[email protected]>
Cc: Ville Syrjälä <[email protected]>
Signed-off-by: Lyude Paul <[email protected]>
---
drivers/gpu/drm/drm_atomic.c | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7d25c42f22db..972a7e9634ab 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1705,8 +1705,11 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
- if (ret)
+ if (ret) {
+ DRM_DEBUG_ATOMIC("atomic driver check for %p failed: %d\n",
+ state, ret);
return ret;
+ }
if (!state->allow_modeset) {
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
--
2.14.3
On Wed, 2018-04-11 at 18:54 -0400, Lyude Paul wrote:
> While having the modeset_retry_work in intel_connector makes sense with
> SST, this paradigm doesn't make a whole ton of sense when it comes to
> MST since we have to deal with multiple connectors. In most cases, it's
> more useful to just use the intel_dp struct since it indicates whether
> or not we're dealing with an MST device, along with being able to easily
> trace the intel_dp struct back to it's respective connector (if there is
> any). So, move the modeset_retry_work function out of the
> intel_connector struct and into intel_dp.
>
> Signed-off-by: Lyude Paul <[email protected]>
> Reviewed-by: Manasi Navare <[email protected]>
> Cc: Manasi Navare <[email protected]>
> Cc: Ville Syrjälä <[email protected]>
>
> V2:
> - Remove accidental duplicate modeset_retry_work in intel_connector
> V3:
> - Also check against eDP in intel_hpd_poll_fini() - mdnavare
contradicts with
commit c0cfb10d9e1de490e36d3b9d4228c0ea0ca30677
Author: Manasi Navare <[email protected]>
Date: Thu Oct 12 12:13:38 2017 -0700
drm/i915/edp: Do not do link training fallback or prune modes on EDP
In case of eDP because the panel has a fixed mode, the link rate
and lane count at which it is trained corresponds to the link BW
required to support the native resolution of the panel. In case of
panles with lower resolutions where fewer lanes are hooked up
internally,
that number is reflected in the MAX_LANE_COUNT DPCD register of the
panel.
So it is pointless to fallback to lower link rate/lane count in case
of link training failure on eDP connector since the lower link BW
will not support the native resolution of the panel and we cannot
prune the preferred mode on the eDP connector.
> V4:
> - Don't bother looping over connectors for canceling modeset rety work,
> just encoders.
> V7:
> - Fix CHECKPATCH errors
> Signed-off-by: Lyude Paul <[email protected]>
> ---
> drivers/gpu/drm/i915/intel_display.c | 14 +++++++++++---
> drivers/gpu/drm/i915/intel_dp.c | 10 ++++------
> drivers/gpu/drm/i915/intel_dp_link_training.c | 2 +-
> drivers/gpu/drm/i915/intel_drv.h | 6 +++---
> 4 files changed, 19 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index e04050ea3e28..18edb9628a54 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -15471,20 +15471,28 @@ void intel_connector_unregister(struct drm_connector *connector)
>
> static void intel_hpd_poll_fini(struct drm_device *dev)
> {
> - struct intel_connector *connector;
> struct drm_connector_list_iter conn_iter;
> + struct intel_connector *connector;
> + struct intel_encoder *encoder;
> + struct intel_dp *intel_dp;
>
> /* Kill all the work that may have been queued by hpd. */
> drm_connector_list_iter_begin(dev, &conn_iter);
> for_each_intel_connector_iter(connector, &conn_iter) {
> - if (connector->modeset_retry_work.func)
> - cancel_work_sync(&connector->modeset_retry_work);
> if (connector->hdcp_shim) {
> cancel_delayed_work_sync(&connector->hdcp_check_work);
> cancel_work_sync(&connector->hdcp_prop_work);
> }
> }
> drm_connector_list_iter_end(&conn_iter);
> +
> + for_each_intel_encoder(dev, encoder) {
> + if (encoder->type == INTEL_OUTPUT_DP ||
commit 7e732cacb1ae27b2eb6902cabd93e9da086c54f0
Author: Ville Syrjälä <[email protected]>
Date: Fri Oct 27 22:31:24 2017 +0300
drm/i915: Stop frobbing with DDI encoder->type
Currently the DDI encoder->type will change at runtime depending on
what kind of hotplugs we've processed. That's quite bad since we
can't
really trust that that current value of encoder->type actually
matches
the type of signal we're trying to drive through it.
Let's eliminate that problem by declaring that non-eDP DDI port will
always have the encoder type as INTEL_OUTPUT_DDI. This means the
code
can no longer try to distinguish DP vs. HDMI based on encoder->type.
We'll leave eDP as INTEL_OUTPUT_EDP, since it'll never change and
there's a bunch of code that relies on that value to identify eDP
encoders.
> + encoder->type == INTEL_OUTPUT_EDP) {
> + intel_dp = enc_to_intel_dp(&encoder->base);
> + cancel_work_sync(&intel_dp->modeset_retry_work);
> + }
> + }
> }
>
> void intel_modeset_cleanup(struct drm_device *dev)
> diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
> index 62f82c4298ac..fbb467bc227d 100644
> --- a/drivers/gpu/drm/i915/intel_dp.c
> +++ b/drivers/gpu/drm/i915/intel_dp.c
> @@ -6249,12 +6249,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
>
> static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
> {
> - struct intel_connector *intel_connector;
> - struct drm_connector *connector;
> + struct intel_dp *intel_dp = container_of(work, typeof(*intel_dp),
> + modeset_retry_work);
> + struct drm_connector *connector = &intel_dp->attached_connector->base;
>
> - intel_connector = container_of(work, typeof(*intel_connector),
> - modeset_retry_work);
> - connector = &intel_connector->base;
> DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
> connector->name);
>
> @@ -6283,7 +6281,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
> int type;
>
> /* Initialize the work for modeset in case of link train failure */
> - INIT_WORK(&intel_connector->modeset_retry_work,
> + INIT_WORK(&intel_dp->modeset_retry_work,
> intel_dp_modeset_retry_work_fn);
>
> if (WARN(intel_dig_port->max_lanes < 1,
> diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
> index f59b59bb0a21..2cfa58ce1f95 100644
> --- a/drivers/gpu/drm/i915/intel_dp_link_training.c
> +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
> @@ -340,7 +340,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
> intel_dp->link_rate,
> intel_dp->lane_count))
> /* Schedule a Hotplug Uevent to userspace to start modeset */
> - schedule_work(&intel_connector->modeset_retry_work);
> + schedule_work(&intel_dp->modeset_retry_work);
> } else {
> DRM_ERROR("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
> intel_connector->base.base.id,
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 5bd2263407b2..742d53495974 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -406,9 +406,6 @@ struct intel_connector {
>
> struct intel_dp *mst_port;
>
> - /* Work struct to schedule a uevent on link train failure */
> - struct work_struct modeset_retry_work;
> -
> const struct intel_hdcp_shim *hdcp_shim;
> struct mutex hdcp_mutex;
> uint64_t hdcp_value; /* protected by hdcp_mutex */
> @@ -1143,6 +1140,9 @@ struct intel_dp {
>
> /* Displayport compliance testing */
> struct intel_dp_compliance compliance;
> +
> + /* Work struct to schedule a uevent on link train failure */
> + struct work_struct modeset_retry_work;
> };
>
> struct intel_lspcon {