2022-05-06 04:07:55

by Eric W. Biederman

[permalink] [raw]
Subject: [PATCH v4 10/12] ptrace: Don't change __state

Stop playing with tsk->__state to remove TASK_WAKEKILL while a ptrace
command is executing.

Instead remove TASK_WAKEKILL from the definition of TASK_TRACED, and
implement a new jobctl flag TASK_PTRACE_FROZEN. This new flag is set
in jobctl_freeze_task and cleared when ptrace_stop is awoken or in
jobctl_unfreeze_task (when ptrace_stop remains asleep).

In signal_wake_up add __TASK_TRACED to state along with TASK_WAKEKILL
when the wake up is for a fatal signal. Skip adding __TASK_TRACED
when TASK_PTRACE_FROZEN is not set. This has the same effect as
changing TASK_TRACED to __TASK_TRACED as all of the wake_ups that use
TASK_KILLABLE go through signal_wake_up.

Handle a ptrace_stop being called with a pending fatal signal.
Previously it would have been handled by schedule simply failing to
sleep. As TASK_WAKEKILL is no longer part of TASK_TRACED schedule
will sleep with a fatal_signal_pending. The code in signal_wake_up
guarantees that the code will be awaked by any fatal signal that
codes after TASK_TRACED is set.

Previously the __state value of __TASK_TRACED was changed to
TASK_RUNNING when woken up or back to TASK_TRACED when the code was
left in ptrace_stop. Now when woken up ptrace_stop now clears
JOBCTL_PTRACE_FROZEN and when left sleeping ptrace_unfreezed_traced
clears JOBCTL_PTRACE_FROZEN.

Signed-off-by: "Eric W. Biederman" <[email protected]>
---
include/linux/sched.h | 2 +-
include/linux/sched/jobctl.h | 2 ++
include/linux/sched/signal.h | 5 +++--
kernel/ptrace.c | 21 ++++++++-------------
kernel/sched/core.c | 5 +----
kernel/signal.c | 14 ++++++--------
6 files changed, 21 insertions(+), 28 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d5e3c00b74e1..610f2fdb1e2c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -103,7 +103,7 @@ struct task_group;
/* Convenience macros for the sake of set_current_state: */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
-#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+#define TASK_TRACED __TASK_TRACED

#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)

diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index fa067de9f1a9..d556c3425963 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -19,6 +19,7 @@ struct task_struct;
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */

#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -28,6 +29,7 @@ struct task_struct;
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT)

#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 3c8b34876744..e66948abbee4 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -435,9 +435,10 @@ extern void calculate_sigpending(void);

extern void signal_wake_up_state(struct task_struct *t, unsigned int state);

-static inline void signal_wake_up(struct task_struct *t, bool resume)
+static inline void signal_wake_up(struct task_struct *t, bool fatal)
{
- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
+ fatal = fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN);
+ signal_wake_up_state(t, fatal ? TASK_WAKEKILL | __TASK_TRACED : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 05953ac9f7bd..83ed28262708 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -197,7 +197,7 @@ static bool ptrace_freeze_traced(struct task_struct *task)
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !looks_like_a_spurious_pid(task) &&
!__fatal_signal_pending(task)) {
- WRITE_ONCE(task->__state, __TASK_TRACED);
+ task->jobctl |= JOBCTL_PTRACE_FROZEN;
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
@@ -207,23 +207,19 @@ static bool ptrace_freeze_traced(struct task_struct *task)

static void ptrace_unfreeze_traced(struct task_struct *task)
{
- if (READ_ONCE(task->__state) != __TASK_TRACED)
- return;
-
- WARN_ON(!task->ptrace || task->parent != current);
+ unsigned long flags;

/*
- * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
- * Recheck state under the lock to close this race.
+ * The child may be awake and may have cleared
+ * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
+ * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
*/
- spin_lock_irq(&task->sighand->siglock);
- if (READ_ONCE(task->__state) == __TASK_TRACED) {
+ if (lock_task_sighand(task, &flags)) {
+ task->jobctl &= ~JOBCTL_PTRACE_FROZEN;
if (__fatal_signal_pending(task))
wake_up_state(task, __TASK_TRACED);
- else
- WRITE_ONCE(task->__state, TASK_TRACED);
+ unlock_task_sighand(task, &flags);
}
- spin_unlock_irq(&task->sighand->siglock);
}

/**
@@ -256,7 +252,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
*/
read_lock(&tasklist_lock);
if (child->ptrace && child->parent == current) {
- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED);
/*
* child->sighand can't be NULL, release_task()
* does ptrace_unlink() before __exit_signal().
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d575b4914925..3c351707e830 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6304,10 +6304,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)

/*
* We must load prev->state once (task_struct::state is volatile), such
- * that:
- *
- * - we form a control dependency vs deactivate_task() below.
- * - ptrace_{,un}freeze_traced() can change ->state underneath us.
+ * that we form a control dependency vs deactivate_task() below.
*/
prev_state = READ_ONCE(prev->__state);
if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
diff --git a/kernel/signal.c b/kernel/signal.c
index d2d0c753156c..a58b68a2d3c6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2209,14 +2209,12 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
}

/*
- * schedule() will not sleep if there is a pending signal that
- * can awaken the task.
- *
- * After this point ptrace_signal_wake_up will clear TASK_TRACED
- * if ptrace_unlink happens. Handle previous ptrace_unlinks
- * here to prevent ptrace_stop sleeping in schedule.
+ * After this point ptrace_signal_wake_up or signal_wake_up
+ * will clear TASK_TRACED if ptrace_unlink happens or a fatal
+ * signal comes in. Handle previous ptrace_unlinks and fatal
+ * signals here to prevent ptrace_stop sleeping in schedule.
*/
- if (!current->ptrace)
+ if (!current->ptrace || __fatal_signal_pending(current))
return exit_code;

set_special_state(TASK_TRACED);
@@ -2305,7 +2303,7 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
current->exit_code = 0;

/* LISTENING can be set only during STOP traps, clear it */
- current->jobctl &= ~JOBCTL_LISTENING;
+ current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);

/*
* Queued signals ignored us while we were stopped for tracing.
--
2.35.3



2022-05-09 01:52:03

by Oleg Nesterov

[permalink] [raw]
Subject: Re: [PATCH v4 10/12] ptrace: Don't change __state

On 05/05, Eric W. Biederman wrote:
>
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -103,7 +103,7 @@ struct task_group;
> /* Convenience macros for the sake of set_current_state: */
> #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
> #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
> -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
> +#define TASK_TRACED __TASK_TRACED

however I personally still dislike this change. But let me read the
code with this series applied, perhaps I will change my mind. If not,
I will argue ;)

Oleg.


2022-05-09 05:03:05

by Eric W. Biederman

[permalink] [raw]
Subject: Re: [PATCH v4 10/12] ptrace: Don't change __state

Oleg Nesterov <[email protected]> writes:

> On 05/05, Eric W. Biederman wrote:
>>
>> --- a/include/linux/sched.h
>> +++ b/include/linux/sched.h
>> @@ -103,7 +103,7 @@ struct task_group;
>> /* Convenience macros for the sake of set_current_state: */
>> #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
>> #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
>> -#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
>> +#define TASK_TRACED __TASK_TRACED
>
> however I personally still dislike this change. But let me read the
> code with this series applied, perhaps I will change my mind. If not,
> I will argue ;)

That is fair. I kind of grew on my after I implemented it and wrapped
my head around what was going on, as it is simple and there are no
implicit cases.

Eric

2022-05-10 15:45:34

by Oleg Nesterov

[permalink] [raw]
Subject: Re: [PATCH v4 10/12] ptrace: Don't change __state

On 05/10, Eric W. Biederman wrote:
>
> > But I still think that a lockless
> >
> > if (!(task->jobctl & JOBCTL_PTRACE_FROZEN))
> > return;
> >
> > check at the start of ptrace_unfreeze_traced() makes sense to avoid
> > lock_task_sighand() if possible.
> >
> > And ptrace_resume() can probably clear JOBCTL_PTRACE_FROZEN along with
> > JOBCTL_TRACED to make this optimization work better. The same for
> > ptrace_signal_wake_up().
>
> What do you have that suggests that taking siglock there is a problem?

Not necessarily a problem, but this optimization is free. If the tracee
was resumed, it can compete for siglock with debugger.

> What you propose will definitely work as an incremental change, and
> in an incremental change we can explain why doing the stupid simple
> thing is not good enough.

OK.

Oleg.


2022-05-10 18:37:29

by Eric W. Biederman

[permalink] [raw]
Subject: Re: [PATCH v4 10/12] ptrace: Don't change __state

Oleg Nesterov <[email protected]> writes:

> On 05/05, Eric W. Biederman wrote:
>>
>> static void ptrace_unfreeze_traced(struct task_struct *task)
>> {
>> - if (READ_ONCE(task->__state) != __TASK_TRACED)
>> - return;
>> -
>> - WARN_ON(!task->ptrace || task->parent != current);
>> + unsigned long flags;
>>
>> /*
>> - * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
>> - * Recheck state under the lock to close this race.
>> + * The child may be awake and may have cleared
>> + * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
>> + * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
>> */
>> - spin_lock_irq(&task->sighand->siglock);
>> - if (READ_ONCE(task->__state) == __TASK_TRACED) {
>> + if (lock_task_sighand(task, &flags)) {
>
> But I still think that a lockless
>
> if (!(task->jobctl & JOBCTL_PTRACE_FROZEN))
> return;
>
> check at the start of ptrace_unfreeze_traced() makes sense to avoid
> lock_task_sighand() if possible.
>
> And ptrace_resume() can probably clear JOBCTL_PTRACE_FROZEN along with
> JOBCTL_TRACED to make this optimization work better. The same for
> ptrace_signal_wake_up().

What do you have that suggests that taking siglock there is a problem?

What you propose will definitely work as an incremental change, and
in an incremental change we can explain why doing the stupid simple
thing is not good enough.

I am not really opposed on any grounds except that simplicity is good,
and hard to get wrong.

Eric


2022-05-10 19:48:58

by Oleg Nesterov

[permalink] [raw]
Subject: Re: [PATCH v4 10/12] ptrace: Don't change __state

On 05/05, Eric W. Biederman wrote:
>
> static void ptrace_unfreeze_traced(struct task_struct *task)
> {
> - if (READ_ONCE(task->__state) != __TASK_TRACED)
> - return;
> -
> - WARN_ON(!task->ptrace || task->parent != current);
> + unsigned long flags;
>
> /*
> - * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
> - * Recheck state under the lock to close this race.
> + * The child may be awake and may have cleared
> + * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will
> + * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew.
> */
> - spin_lock_irq(&task->sighand->siglock);
> - if (READ_ONCE(task->__state) == __TASK_TRACED) {
> + if (lock_task_sighand(task, &flags)) {

But I still think that a lockless

if (!(task->jobctl & JOBCTL_PTRACE_FROZEN))
return;

check at the start of ptrace_unfreeze_traced() makes sense to avoid
lock_task_sighand() if possible.

And ptrace_resume() can probably clear JOBCTL_PTRACE_FROZEN along with
JOBCTL_TRACED to make this optimization work better. The same for
ptrace_signal_wake_up().

Oleg.