Add some symmetric to rt checks.
Signed-off-by: Kirill Tkhai <[email protected]>
CC: Juri Lelli <[email protected]>
CC: Peter Zijlstra <[email protected]>
CC: Ingo Molnar <[email protected]>
---
kernel/locking/mutex.c | 8 +++++---
kernel/sched/core.c | 4 ++--
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33..cb9f381 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -21,6 +21,7 @@
#include <linux/ww_mutex.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
+#include <linux/sched/deadline.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -459,10 +460,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/*
* When there's no owner, we might have preempted between the
* owner acquiring the lock and setting the owner field. If
- * we're an RT task that will live-lock because we won't let
- * the owner complete.
+ * we're an RT or DL task that will live-lock because we won't
+ * let the owner complete.
*/
- if (!owner && (need_resched() || rt_task(task)))
+ if (!owner &&
+ (need_resched() || rt_task(task) || dl_task(task)))
break;
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 240aa83..ba7617a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -962,11 +962,11 @@ static int effective_prio(struct task_struct *p)
{
p->normal_prio = normal_prio(p);
/*
- * If we are RT tasks or we were boosted to RT priority,
+ * If we are RT or DL tasks or we were boosted to RT or DL priority,
* keep the priority unchanged. Otherwise, update priority
* to the normal priority:
*/
- if (!rt_prio(p->prio))
+ if (!rt_prio(p->prio) && !dl_prio(p->prio))
return p->normal_prio;
return p->prio;
}