2023-03-30 08:18:17

by Domenico Cerasuolo

[permalink] [raw]
Subject: [PATCH v5 0/4] sched/psi: Allow unprivileged PSI polling

PSI offers 2 mechanisms to get information about a specific resource
pressure. One is reading from /proc/pressure/<resource>, which gives
average pressures aggregated every 2s. The other is creating a pollable
fd for a specific resource and cgroup.

The trigger creation requires CAP_SYS_RESOURCE, and gives the
possibility to pick specific time window and threshold, spawing an RT
thread to aggregate the data.

Systemd would like to provide containers the option to monitor pressure
on their own cgroup and sub-cgroups. For example, if systemd launches a
container that itself then launches services, the container should have
the ability to poll() for pressure in individual services. But neither
the container nor the services are privileged.

The series is implemented in 4 steps in order to reduce the noise of
the change.

V5:
- few suggested code style changes in psi.c and psy_types.h
- fix trigger destruction logic now using proper locks

V4:
- fixed psi_open leftover usage in IRQ accounting (patch 4/4)

V3:
- restored renaming patch (#2 of 4) as suggested in review
- rebased #3 and #4 on the renaming commit

V2:
- removed renaming patch (previous 2/4) and applied suggested solution
- changed update_triggers side effect removal as suggested in review
- rebased core patch on other V2 changes


Domenico Cerasuolo (4):
sched/psi: rearrange polling code in preparation
sched/psi: rename existing poll members in preparation
sched/psi: extract update_triggers side effect
sched/psi: allow unprivileged polling of N*2s period

Documentation/accounting/psi.rst | 4 +
include/linux/psi.h | 2 +-
include/linux/psi_types.h | 43 +--
kernel/cgroup/cgroup.c | 2 +-
kernel/sched/psi.c | 458 +++++++++++++++++--------------
5 files changed, 280 insertions(+), 229 deletions(-)

--
2.34.1


2023-03-30 08:18:25

by Domenico Cerasuolo

[permalink] [raw]
Subject: [PATCH v5 2/4] sched/psi: rename existing poll members in preparation

Renaming in PSI implementation to make a clear distinction between
privileged and unprivileged triggers code to be implemented in the
next patch.

Suggested-by: Johannes Weiner <[email protected]>
Signed-off-by: Domenico Cerasuolo <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
---
include/linux/psi_types.h | 36 ++++-----
kernel/sched/psi.c | 163 +++++++++++++++++++-------------------
2 files changed, 100 insertions(+), 99 deletions(-)

diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 1e0a0d7ace3a..1819afa8b198 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -175,26 +175,26 @@ struct psi_group {
u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
unsigned long avg[NR_PSI_STATES - 1][3];

- /* Monitor work control */
- struct task_struct __rcu *poll_task;
- struct timer_list poll_timer;
- wait_queue_head_t poll_wait;
- atomic_t poll_wakeup;
- atomic_t poll_scheduled;
+ /* Monitor RT polling work control */
+ struct task_struct __rcu *rtpoll_task;
+ struct timer_list rtpoll_timer;
+ wait_queue_head_t rtpoll_wait;
+ atomic_t rtpoll_wakeup;
+ atomic_t rtpoll_scheduled;

/* Protects data used by the monitor */
- struct mutex trigger_lock;
-
- /* Configured polling triggers */
- struct list_head triggers;
- u32 nr_triggers[NR_PSI_STATES - 1];
- u32 poll_states;
- u64 poll_min_period;
-
- /* Total stall times at the start of monitor activation */
- u64 polling_total[NR_PSI_STATES - 1];
- u64 polling_next_update;
- u64 polling_until;
+ struct mutex rtpoll_trigger_lock;
+
+ /* Configured RT polling triggers */
+ struct list_head rtpoll_triggers;
+ u32 rtpoll_nr_triggers[NR_PSI_STATES - 1];
+ u32 rtpoll_states;
+ u64 rtpoll_min_period;
+
+ /* Total stall times at the start of RT polling monitor activation */
+ u64 rtpoll_total[NR_PSI_STATES - 1];
+ u64 rtpoll_next_update;
+ u64 rtpoll_until;
};

#else /* CONFIG_PSI */
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index fe9269f1d2a4..a3d0b5cf797a 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -189,14 +189,14 @@ static void group_init(struct psi_group *group)
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
/* Init trigger-related members */
- atomic_set(&group->poll_scheduled, 0);
- mutex_init(&group->trigger_lock);
- INIT_LIST_HEAD(&group->triggers);
- group->poll_min_period = U32_MAX;
- group->polling_next_update = ULLONG_MAX;
- init_waitqueue_head(&group->poll_wait);
- timer_setup(&group->poll_timer, poll_timer_fn, 0);
- rcu_assign_pointer(group->poll_task, NULL);
+ atomic_set(&group->rtpoll_scheduled, 0);
+ mutex_init(&group->rtpoll_trigger_lock);
+ INIT_LIST_HEAD(&group->rtpoll_triggers);
+ group->rtpoll_min_period = U32_MAX;
+ group->rtpoll_next_update = ULLONG_MAX;
+ init_waitqueue_head(&group->rtpoll_wait);
+ timer_setup(&group->rtpoll_timer, poll_timer_fn, 0);
+ rcu_assign_pointer(group->rtpoll_task, NULL);
}

void __init psi_init(void)
@@ -440,11 +440,11 @@ static u64 update_triggers(struct psi_group *group, u64 now)
* On subsequent updates, calculate growth deltas and let
* watchers know when their specified thresholds are exceeded.
*/
- list_for_each_entry(t, &group->triggers, node) {
+ list_for_each_entry(t, &group->rtpoll_triggers, node) {
u64 growth;
bool new_stall;

- new_stall = group->polling_total[t->state] != total[t->state];
+ new_stall = group->rtpoll_total[t->state] != total[t->state];

/* Check for stall activity or a previous threshold breach */
if (!new_stall && !t->pending_event)
@@ -486,10 +486,10 @@ static u64 update_triggers(struct psi_group *group, u64 now)
}

if (update_total)
- memcpy(group->polling_total, total,
- sizeof(group->polling_total));
+ memcpy(group->rtpoll_total, total,
+ sizeof(group->rtpoll_total));

- return now + group->poll_min_period;
+ return now + group->rtpoll_min_period;
}

static u64 update_averages(struct psi_group *group, u64 now)
@@ -582,53 +582,53 @@ static void init_triggers(struct psi_group *group, u64 now)
{
struct psi_trigger *t;

- list_for_each_entry(t, &group->triggers, node)
+ list_for_each_entry(t, &group->rtpoll_triggers, node)
window_reset(&t->win, now,
group->total[PSI_POLL][t->state], 0);
- memcpy(group->polling_total, group->total[PSI_POLL],
- sizeof(group->polling_total));
- group->polling_next_update = now + group->poll_min_period;
+ memcpy(group->rtpoll_total, group->total[PSI_POLL],
+ sizeof(group->rtpoll_total));
+ group->rtpoll_next_update = now + group->rtpoll_min_period;
}

/* Schedule polling if it's not already scheduled or forced. */
-static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay,
+static void psi_schedule_rtpoll_work(struct psi_group *group, unsigned long delay,
bool force)
{
struct task_struct *task;

/*
* atomic_xchg should be called even when !force to provide a
- * full memory barrier (see the comment inside psi_poll_work).
+ * full memory barrier (see the comment inside psi_rtpoll_work).
*/
- if (atomic_xchg(&group->poll_scheduled, 1) && !force)
+ if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force)
return;

rcu_read_lock();

- task = rcu_dereference(group->poll_task);
+ task = rcu_dereference(group->rtpoll_task);
/*
* kworker might be NULL in case psi_trigger_destroy races with
* psi_task_change (hotpath) which can't use locks
*/
if (likely(task))
- mod_timer(&group->poll_timer, jiffies + delay);
+ mod_timer(&group->rtpoll_timer, jiffies + delay);
else
- atomic_set(&group->poll_scheduled, 0);
+ atomic_set(&group->rtpoll_scheduled, 0);

rcu_read_unlock();
}

-static void psi_poll_work(struct psi_group *group)
+static void psi_rtpoll_work(struct psi_group *group)
{
bool force_reschedule = false;
u32 changed_states;
u64 now;

- mutex_lock(&group->trigger_lock);
+ mutex_lock(&group->rtpoll_trigger_lock);

now = sched_clock();

- if (now > group->polling_until) {
+ if (now > group->rtpoll_until) {
/*
* We are either about to start or might stop polling if no
* state change was recorded. Resetting poll_scheduled leaves
@@ -638,7 +638,7 @@ static void psi_poll_work(struct psi_group *group)
* should be negligible and polling_next_update still keeps
* updates correctly on schedule.
*/
- atomic_set(&group->poll_scheduled, 0);
+ atomic_set(&group->rtpoll_scheduled, 0);
/*
* A task change can race with the poll worker that is supposed to
* report on it. To avoid missing events, ensure ordering between
@@ -667,9 +667,9 @@ static void psi_poll_work(struct psi_group *group)

collect_percpu_times(group, PSI_POLL, &changed_states);

- if (changed_states & group->poll_states) {
+ if (changed_states & group->rtpoll_states) {
/* Initialize trigger windows when entering polling mode */
- if (now > group->polling_until)
+ if (now > group->rtpoll_until)
init_triggers(group, now);

/*
@@ -677,50 +677,50 @@ static void psi_poll_work(struct psi_group *group)
* minimum tracking window as long as monitor states are
* changing.
*/
- group->polling_until = now +
- group->poll_min_period * UPDATES_PER_WINDOW;
+ group->rtpoll_until = now +
+ group->rtpoll_min_period * UPDATES_PER_WINDOW;
}

- if (now > group->polling_until) {
- group->polling_next_update = ULLONG_MAX;
+ if (now > group->rtpoll_until) {
+ group->rtpoll_next_update = ULLONG_MAX;
goto out;
}

- if (now >= group->polling_next_update)
- group->polling_next_update = update_triggers(group, now);
+ if (now >= group->rtpoll_next_update)
+ group->rtpoll_next_update = update_triggers(group, now);

- psi_schedule_poll_work(group,
- nsecs_to_jiffies(group->polling_next_update - now) + 1,
+ psi_schedule_rtpoll_work(group,
+ nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
force_reschedule);

out:
- mutex_unlock(&group->trigger_lock);
+ mutex_unlock(&group->rtpoll_trigger_lock);
}

-static int psi_poll_worker(void *data)
+static int psi_rtpoll_worker(void *data)
{
struct psi_group *group = (struct psi_group *)data;

sched_set_fifo_low(current);

while (true) {
- wait_event_interruptible(group->poll_wait,
- atomic_cmpxchg(&group->poll_wakeup, 1, 0) ||
+ wait_event_interruptible(group->rtpoll_wait,
+ atomic_cmpxchg(&group->rtpoll_wakeup, 1, 0) ||
kthread_should_stop());
if (kthread_should_stop())
break;

- psi_poll_work(group);
+ psi_rtpoll_work(group);
}
return 0;
}

static void poll_timer_fn(struct timer_list *t)
{
- struct psi_group *group = from_timer(group, t, poll_timer);
+ struct psi_group *group = from_timer(group, t, rtpoll_timer);

- atomic_set(&group->poll_wakeup, 1);
- wake_up_interruptible(&group->poll_wait);
+ atomic_set(&group->rtpoll_wakeup, 1);
+ wake_up_interruptible(&group->rtpoll_wait);
}

static void record_times(struct psi_group_cpu *groupc, u64 now)
@@ -851,8 +851,8 @@ static void psi_group_change(struct psi_group *group, int cpu,

write_seqcount_end(&groupc->seq);

- if (state_mask & group->poll_states)
- psi_schedule_poll_work(group, 1, false);
+ if (state_mask & group->rtpoll_states)
+ psi_schedule_rtpoll_work(group, 1, false);

if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
@@ -1005,8 +1005,8 @@ void psi_account_irqtime(struct task_struct *task, u32 delta)

write_seqcount_end(&groupc->seq);

- if (group->poll_states & (1 << PSI_IRQ_FULL))
- psi_schedule_poll_work(group, 1, false);
+ if (group->rtpoll_states & (1 << PSI_IRQ_FULL))
+ psi_schedule_rtpoll_work(group, 1, false);
} while ((group = group->parent));
}
#endif
@@ -1101,7 +1101,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
cancel_delayed_work_sync(&cgroup->psi->avgs_work);
free_percpu(cgroup->psi->pcpu);
/* All triggers must be removed by now */
- WARN_ONCE(cgroup->psi->poll_states, "psi: trigger leak\n");
+ WARN_ONCE(cgroup->psi->rtpoll_states, "psi: trigger leak\n");
kfree(cgroup->psi);
}

@@ -1302,29 +1302,29 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
init_waitqueue_head(&t->event_wait);
t->pending_event = false;

- mutex_lock(&group->trigger_lock);
+ mutex_lock(&group->rtpoll_trigger_lock);

- if (!rcu_access_pointer(group->poll_task)) {
+ if (!rcu_access_pointer(group->rtpoll_task)) {
struct task_struct *task;

- task = kthread_create(psi_poll_worker, group, "psimon");
+ task = kthread_create(psi_rtpoll_worker, group, "psimon");
if (IS_ERR(task)) {
kfree(t);
- mutex_unlock(&group->trigger_lock);
+ mutex_unlock(&group->rtpoll_trigger_lock);
return ERR_CAST(task);
}
- atomic_set(&group->poll_wakeup, 0);
+ atomic_set(&group->rtpoll_wakeup, 0);
wake_up_process(task);
- rcu_assign_pointer(group->poll_task, task);
+ rcu_assign_pointer(group->rtpoll_task, task);
}

- list_add(&t->node, &group->triggers);
- group->poll_min_period = min(group->poll_min_period,
+ list_add(&t->node, &group->rtpoll_triggers);
+ group->rtpoll_min_period = min(group->rtpoll_min_period,
div_u64(t->win.size, UPDATES_PER_WINDOW));
- group->nr_triggers[t->state]++;
- group->poll_states |= (1 << t->state);
+ group->rtpoll_nr_triggers[t->state]++;
+ group->rtpoll_states |= (1 << t->state);

- mutex_unlock(&group->trigger_lock);
+ mutex_unlock(&group->rtpoll_trigger_lock);

return t;
}
@@ -1349,51 +1349,52 @@ void psi_trigger_destroy(struct psi_trigger *t)
*/
wake_up_pollfree(&t->event_wait);

- mutex_lock(&group->trigger_lock);
+ mutex_lock(&group->rtpoll_trigger_lock);

if (!list_empty(&t->node)) {
struct psi_trigger *tmp;
u64 period = ULLONG_MAX;

list_del(&t->node);
- group->nr_triggers[t->state]--;
- if (!group->nr_triggers[t->state])
- group->poll_states &= ~(1 << t->state);
+ group->rtpoll_nr_triggers[t->state]--;
+ if (!group->rtpoll_nr_triggers[t->state])
+ group->rtpoll_states &= ~(1 << t->state);
/* reset min update period for the remaining triggers */
- list_for_each_entry(tmp, &group->triggers, node)
+ list_for_each_entry(tmp, &group->rtpoll_triggers, node)
period = min(period, div_u64(tmp->win.size,
UPDATES_PER_WINDOW));
- group->poll_min_period = period;
- /* Destroy poll_task when the last trigger is destroyed */
- if (group->poll_states == 0) {
- group->polling_until = 0;
+ group->rtpoll_min_period = period;
+ /* Destroy rtpoll_task when the last trigger is destroyed */
+ if (group->rtpoll_states == 0) {
+ group->rtpoll_until = 0;
task_to_destroy = rcu_dereference_protected(
- group->poll_task,
- lockdep_is_held(&group->trigger_lock));
- rcu_assign_pointer(group->poll_task, NULL);
- del_timer(&group->poll_timer);
+ group->rtpoll_task,
+ lockdep_is_held(&group->rtpoll_trigger_lock));
+ rcu_assign_pointer(group->rtpoll_task, NULL);
+ del_timer(&group->rtpoll_timer);
}
}

- mutex_unlock(&group->trigger_lock);
+ mutex_unlock(&group->rtpoll_trigger_lock);

/*
- * Wait for psi_schedule_poll_work RCU to complete its read-side
+ * Wait for psi_schedule_rtpoll_work RCU to complete its read-side
* critical section before destroying the trigger and optionally the
- * poll_task.
+ * rtpoll_task.
*/
synchronize_rcu();
/*
- * Stop kthread 'psimon' after releasing trigger_lock to prevent a
- * deadlock while waiting for psi_poll_work to acquire trigger_lock
+ * Stop kthread 'psimon' after releasing rtpoll_trigger_lock to prevent
+ * a deadlock while waiting for psi_rtpoll_work to acquire
+ * rtpoll_trigger_lock
*/
if (task_to_destroy) {
/*
* After the RCU grace period has expired, the worker
- * can no longer be found through group->poll_task.
+ * can no longer be found through group->rtpoll_task.
*/
kthread_stop(task_to_destroy);
- atomic_set(&group->poll_scheduled, 0);
+ atomic_set(&group->rtpoll_scheduled, 0);
}
kfree(t);
}
--
2.34.1

2023-03-30 08:18:38

by Domenico Cerasuolo

[permalink] [raw]
Subject: [PATCH v5 3/4] sched/psi: extract update_triggers side effect

This change moves update_total flag out of update_triggers function,
currently called only in psi_poll_work.
In the next patch, update_triggers will be called also in psi_avgs_work,
but the total update information is specific to psi_poll_work.
Returning update_total value to the caller let us avoid differentiating
the implementation of update_triggers for different aggregators.

Suggested-by: Johannes Weiner <[email protected]>
Signed-off-by: Domenico Cerasuolo <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
---
kernel/sched/psi.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index a3d0b5cf797a..f3df6a8ff493 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -430,11 +430,11 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value)
return growth;
}

-static u64 update_triggers(struct psi_group *group, u64 now)
+static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total)
{
struct psi_trigger *t;
- bool update_total = false;
u64 *total = group->total[PSI_POLL];
+ *update_total = false;

/*
* On subsequent updates, calculate growth deltas and let
@@ -462,7 +462,7 @@ static u64 update_triggers(struct psi_group *group, u64 now)
* been through all of them. Also remember to extend the
* polling time if we see new stall activity.
*/
- update_total = true;
+ *update_total = true;

/* Calculate growth since last update */
growth = window_update(&t->win, now, total[t->state]);
@@ -485,10 +485,6 @@ static u64 update_triggers(struct psi_group *group, u64 now)
t->pending_event = false;
}

- if (update_total)
- memcpy(group->rtpoll_total, total,
- sizeof(group->rtpoll_total));
-
return now + group->rtpoll_min_period;
}

@@ -622,6 +618,7 @@ static void psi_rtpoll_work(struct psi_group *group)
{
bool force_reschedule = false;
u32 changed_states;
+ bool update_total;
u64 now;

mutex_lock(&group->rtpoll_trigger_lock);
@@ -686,8 +683,12 @@ static void psi_rtpoll_work(struct psi_group *group)
goto out;
}

- if (now >= group->rtpoll_next_update)
- group->rtpoll_next_update = update_triggers(group, now);
+ if (now >= group->rtpoll_next_update) {
+ group->rtpoll_next_update = update_triggers(group, now, &update_total);
+ if (update_total)
+ memcpy(group->rtpoll_total, group->total[PSI_POLL],
+ sizeof(group->rtpoll_total));
+ }

psi_schedule_rtpoll_work(group,
nsecs_to_jiffies(group->rtpoll_next_update - now) + 1,
--
2.34.1

2023-03-30 08:18:52

by Domenico Cerasuolo

[permalink] [raw]
Subject: [PATCH v5 4/4] sched/psi: allow unprivileged polling of N*2s period

PSI offers 2 mechanisms to get information about a specific resource
pressure. One is reading from /proc/pressure/<resource>, which gives
average pressures aggregated every 2s. The other is creating a pollable
fd for a specific resource and cgroup.

The trigger creation requires CAP_SYS_RESOURCE, and gives the
possibility to pick specific time window and threshold, spawing an RT
thread to aggregate the data.

Systemd would like to provide containers the option to monitor pressure
on their own cgroup and sub-cgroups. For example, if systemd launches a
container that itself then launches services, the container should have
the ability to poll() for pressure in individual services. But neither
the container nor the services are privileged.

This patch implements a mechanism to allow unprivileged users to create
pressure triggers. The difference with privileged triggers creation is
that unprivileged ones must have a time window that's a multiple of 2s.
This is so that we can avoid unrestricted spawning of rt threads, and
use instead the same aggregation mechanism done for the averages, which
runs independently of any triggers.

Suggested-by: Johannes Weiner <[email protected]>
Signed-off-by: Domenico Cerasuolo <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
---
Documentation/accounting/psi.rst | 4 +
include/linux/psi.h | 2 +-
include/linux/psi_types.h | 7 ++
kernel/cgroup/cgroup.c | 2 +-
kernel/sched/psi.c | 148 +++++++++++++++++++------------
5 files changed, 106 insertions(+), 57 deletions(-)

diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst
index 5e40b3f437f9..df6062eb3abb 100644
--- a/Documentation/accounting/psi.rst
+++ b/Documentation/accounting/psi.rst
@@ -105,6 +105,10 @@ prevent overly frequent polling. Max limit is chosen as a high enough number
after which monitors are most likely not needed and psi averages can be used
instead.

+Unprivileged users can also create monitors, with the only limitation that the
+window size must be a multiple of 2s, in order to prevent excessive resource
+usage.
+
When activated, psi monitor stays active for at least the duration of one
tracking window to avoid repeated activations/deactivations when system is
bouncing in and out of the stall state.
diff --git a/include/linux/psi.h b/include/linux/psi.h
index b029a847def1..ab26200c2803 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -24,7 +24,7 @@ void psi_memstall_leave(unsigned long *flags);

int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, enum psi_res res);
+ char *buf, enum psi_res res, struct file *file);
void psi_trigger_destroy(struct psi_trigger *t);

__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 1819afa8b198..040c089581c6 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -151,6 +151,9 @@ struct psi_trigger {

/* Deferred event(s) from previous ratelimit window */
bool pending_event;
+
+ /* Trigger type - PSI_AVGS for unprivileged, PSI_POLL for RT */
+ enum psi_aggregators aggregator;
};

struct psi_group {
@@ -171,6 +174,10 @@ struct psi_group {
/* Aggregator work control */
struct delayed_work avgs_work;

+ /* Unprivileged triggers against N*PSI_FREQ windows */
+ struct list_head avg_triggers;
+ u32 avg_nr_triggers[NR_PSI_STATES - 1];
+
/* Total stall times and sampled pressure averages */
u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
unsigned long avg[NR_PSI_STATES - 1][3];
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 935e8121b21e..dead36969bba 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3761,7 +3761,7 @@ static ssize_t pressure_write(struct kernfs_open_file *of, char *buf,
}

psi = cgroup_psi(cgrp);
- new = psi_trigger_create(psi, buf, res);
+ new = psi_trigger_create(psi, buf, res, of->file);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index f3df6a8ff493..bd4cc941239e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -186,9 +186,14 @@ static void group_init(struct psi_group *group)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
group->avg_last_update = sched_clock();
group->avg_next_update = group->avg_last_update + psi_period;
- INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
- /* Init trigger-related members */
+
+ /* Init avg trigger-related members */
+ INIT_LIST_HEAD(&group->avg_triggers);
+ memset(group->avg_nr_triggers, 0, sizeof(group->avg_nr_triggers));
+ INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+
+ /* Init rtpoll trigger-related members */
atomic_set(&group->rtpoll_scheduled, 0);
mutex_init(&group->rtpoll_trigger_lock);
INIT_LIST_HEAD(&group->rtpoll_triggers);
@@ -430,21 +435,32 @@ static u64 window_update(struct psi_window *win, u64 now, u64 value)
return growth;
}

-static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total)
+static u64 update_triggers(struct psi_group *group, u64 now, bool *update_total,
+ enum psi_aggregators aggregator)
{
struct psi_trigger *t;
- u64 *total = group->total[PSI_POLL];
+ u64 *total = group->total[aggregator];
+ struct list_head *triggers;
+ u64 *aggregator_total;
*update_total = false;

+ if (aggregator == PSI_AVGS) {
+ triggers = &group->avg_triggers;
+ aggregator_total = group->avg_total;
+ } else {
+ triggers = &group->rtpoll_triggers;
+ aggregator_total = group->rtpoll_total;
+ }
+
/*
* On subsequent updates, calculate growth deltas and let
* watchers know when their specified thresholds are exceeded.
*/
- list_for_each_entry(t, &group->rtpoll_triggers, node) {
+ list_for_each_entry(t, triggers, node) {
u64 growth;
bool new_stall;

- new_stall = group->rtpoll_total[t->state] != total[t->state];
+ new_stall = aggregator_total[t->state] != total[t->state];

/* Check for stall activity or a previous threshold breach */
if (!new_stall && !t->pending_event)
@@ -546,6 +562,7 @@ static void psi_avgs_work(struct work_struct *work)
struct delayed_work *dwork;
struct psi_group *group;
u32 changed_states;
+ bool update_total;
u64 now;

dwork = to_delayed_work(work);
@@ -563,8 +580,10 @@ static void psi_avgs_work(struct work_struct *work)
* Once restarted, we'll catch up the running averages in one
* go - see calc_avgs() and missed_periods.
*/
- if (now >= group->avg_next_update)
+ if (now >= group->avg_next_update) {
+ update_triggers(group, now, &update_total, PSI_AVGS);
group->avg_next_update = update_averages(group, now);
+ }

if (changed_states & PSI_STATE_RESCHEDULE) {
schedule_delayed_work(dwork, nsecs_to_jiffies(
@@ -574,7 +593,7 @@ static void psi_avgs_work(struct work_struct *work)
mutex_unlock(&group->avgs_lock);
}

-static void init_triggers(struct psi_group *group, u64 now)
+static void init_rtpoll_triggers(struct psi_group *group, u64 now)
{
struct psi_trigger *t;

@@ -667,7 +686,7 @@ static void psi_rtpoll_work(struct psi_group *group)
if (changed_states & group->rtpoll_states) {
/* Initialize trigger windows when entering polling mode */
if (now > group->rtpoll_until)
- init_triggers(group, now);
+ init_rtpoll_triggers(group, now);

/*
* Keep the monitor active for at least the duration of the
@@ -684,7 +703,7 @@ static void psi_rtpoll_work(struct psi_group *group)
}

if (now >= group->rtpoll_next_update) {
- group->rtpoll_next_update = update_triggers(group, now, &update_total);
+ group->rtpoll_next_update = update_triggers(group, now, &update_total, PSI_POLL);
if (update_total)
memcpy(group->rtpoll_total, group->total[PSI_POLL],
sizeof(group->rtpoll_total));
@@ -1254,16 +1273,23 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
}

struct psi_trigger *psi_trigger_create(struct psi_group *group,
- char *buf, enum psi_res res)
+ char *buf, enum psi_res res, struct file *file)
{
struct psi_trigger *t;
enum psi_states state;
u32 threshold_us;
+ bool privileged;
u32 window_us;

if (static_branch_likely(&psi_disabled))
return ERR_PTR(-EOPNOTSUPP);

+ /*
+ * Checking the privilege here on file->f_cred implies that a privileged user
+ * could open the file and delegate the write to an unprivileged one.
+ */
+ privileged = cap_raised(file->f_cred->cap_effective, CAP_SYS_RESOURCE);
+
if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
state = PSI_IO_SOME + res * 2;
else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
@@ -1283,6 +1309,13 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
window_us > WINDOW_MAX_US)
return ERR_PTR(-EINVAL);

+ /*
+ * Unprivileged users can only use 2s windows so that averages aggregation
+ * work is used, and no RT threads need to be spawned.
+ */
+ if (!privileged && window_us % 2000000)
+ return ERR_PTR(-EINVAL);
+
/* Check threshold */
if (threshold_us == 0 || threshold_us > window_us)
return ERR_PTR(-EINVAL);
@@ -1302,10 +1335,11 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
t->last_event_time = 0;
init_waitqueue_head(&t->event_wait);
t->pending_event = false;
+ t->aggregator = privileged ? PSI_POLL : PSI_AVGS;

mutex_lock(&group->rtpoll_trigger_lock);

- if (!rcu_access_pointer(group->rtpoll_task)) {
+ if (privileged && !rcu_access_pointer(group->rtpoll_task)) {
struct task_struct *task;

task = kthread_create(psi_rtpoll_worker, group, "psimon");
@@ -1319,11 +1353,16 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
rcu_assign_pointer(group->rtpoll_task, task);
}

- list_add(&t->node, &group->rtpoll_triggers);
- group->rtpoll_min_period = min(group->rtpoll_min_period,
- div_u64(t->win.size, UPDATES_PER_WINDOW));
- group->rtpoll_nr_triggers[t->state]++;
- group->rtpoll_states |= (1 << t->state);
+ if (privileged) {
+ list_add(&t->node, &group->rtpoll_triggers);
+ group->rtpoll_min_period = min(group->rtpoll_min_period,
+ div_u64(t->win.size, UPDATES_PER_WINDOW));
+ group->rtpoll_nr_triggers[t->state]++;
+ group->rtpoll_states |= (1 << t->state);
+ } else {
+ list_add(&t->node, &group->avg_triggers);
+ group->avg_nr_triggers[t->state]++;
+ }

mutex_unlock(&group->rtpoll_trigger_lock);

@@ -1350,34 +1389,41 @@ void psi_trigger_destroy(struct psi_trigger *t)
*/
wake_up_pollfree(&t->event_wait);

- mutex_lock(&group->rtpoll_trigger_lock);
-
- if (!list_empty(&t->node)) {
- struct psi_trigger *tmp;
- u64 period = ULLONG_MAX;
-
- list_del(&t->node);
- group->rtpoll_nr_triggers[t->state]--;
- if (!group->rtpoll_nr_triggers[t->state])
- group->rtpoll_states &= ~(1 << t->state);
- /* reset min update period for the remaining triggers */
- list_for_each_entry(tmp, &group->rtpoll_triggers, node)
- period = min(period, div_u64(tmp->win.size,
- UPDATES_PER_WINDOW));
- group->rtpoll_min_period = period;
- /* Destroy rtpoll_task when the last trigger is destroyed */
- if (group->rtpoll_states == 0) {
- group->rtpoll_until = 0;
- task_to_destroy = rcu_dereference_protected(
- group->rtpoll_task,
- lockdep_is_held(&group->rtpoll_trigger_lock));
- rcu_assign_pointer(group->rtpoll_task, NULL);
- del_timer(&group->rtpoll_timer);
+ if (t->aggregator == PSI_AVGS) {
+ mutex_lock(&group->avgs_lock);
+ if (!list_empty(&t->node)) {
+ list_del(&t->node);
+ group->avg_nr_triggers[t->state]--;
}
+ mutex_unlock(&group->avgs_lock);
+ } else {
+ mutex_lock(&group->rtpoll_trigger_lock);
+ if (!list_empty(&t->node)) {
+ struct psi_trigger *tmp;
+ u64 period = ULLONG_MAX;
+
+ list_del(&t->node);
+ group->rtpoll_nr_triggers[t->state]--;
+ if (!group->rtpoll_nr_triggers[t->state])
+ group->rtpoll_states &= ~(1 << t->state);
+ /* reset min update period for the remaining triggers */
+ list_for_each_entry(tmp, &group->rtpoll_triggers, node)
+ period = min(period, div_u64(tmp->win.size,
+ UPDATES_PER_WINDOW));
+ group->rtpoll_min_period = period;
+ /* Destroy rtpoll_task when the last trigger is destroyed */
+ if (group->rtpoll_states == 0) {
+ group->rtpoll_until = 0;
+ task_to_destroy = rcu_dereference_protected(
+ group->rtpoll_task,
+ lockdep_is_held(&group->rtpoll_trigger_lock));
+ rcu_assign_pointer(group->rtpoll_task, NULL);
+ del_timer(&group->rtpoll_timer);
+ }
+ }
+ mutex_unlock(&group->rtpoll_trigger_lock);
}

- mutex_unlock(&group->rtpoll_trigger_lock);
-
/*
* Wait for psi_schedule_rtpoll_work RCU to complete its read-side
* critical section before destroying the trigger and optionally the
@@ -1437,27 +1483,19 @@ static int psi_cpu_show(struct seq_file *m, void *v)
return psi_show(m, &psi_system, PSI_CPU);
}

-static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
-{
- if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
- return -EPERM;
-
- return single_open(file, psi_show, NULL);
-}
-
static int psi_io_open(struct inode *inode, struct file *file)
{
- return psi_open(file, psi_io_show);
+ return single_open(file, psi_io_show, NULL);
}

static int psi_memory_open(struct inode *inode, struct file *file)
{
- return psi_open(file, psi_memory_show);
+ return single_open(file, psi_memory_show, NULL);
}

static int psi_cpu_open(struct inode *inode, struct file *file)
{
- return psi_open(file, psi_cpu_show);
+ return single_open(file, psi_cpu_show, NULL);
}

static ssize_t psi_write(struct file *file, const char __user *user_buf,
@@ -1491,7 +1529,7 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
return -EBUSY;
}

- new = psi_trigger_create(&psi_system, buf, res);
+ new = psi_trigger_create(&psi_system, buf, res, file);
if (IS_ERR(new)) {
mutex_unlock(&seq->lock);
return PTR_ERR(new);
@@ -1571,7 +1609,7 @@ static int psi_irq_show(struct seq_file *m, void *v)

static int psi_irq_open(struct inode *inode, struct file *file)
{
- return psi_open(file, psi_irq_show);
+ return single_open(file, psi_irq_show, NULL);
}

static ssize_t psi_irq_write(struct file *file, const char __user *user_buf,
--
2.34.1

2023-03-30 09:48:50

by Johannes Weiner

[permalink] [raw]
Subject: Re: [PATCH v5 4/4] sched/psi: allow unprivileged polling of N*2s period

Hi Domenico,

thanks, the destruction path looks right to me now.

On Thu, Mar 30, 2023 at 10:11:00AM +0200, Domenico Cerasuolo wrote:
> @@ -1319,11 +1353,16 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
> rcu_assign_pointer(group->rtpoll_task, task);
> }
>
> - list_add(&t->node, &group->rtpoll_triggers);
> - group->rtpoll_min_period = min(group->rtpoll_min_period,
> - div_u64(t->win.size, UPDATES_PER_WINDOW));
> - group->rtpoll_nr_triggers[t->state]++;
> - group->rtpoll_states |= (1 << t->state);
> + if (privileged) {
> + list_add(&t->node, &group->rtpoll_triggers);
> + group->rtpoll_min_period = min(group->rtpoll_min_period,
> + div_u64(t->win.size, UPDATES_PER_WINDOW));
> + group->rtpoll_nr_triggers[t->state]++;
> + group->rtpoll_states |= (1 << t->state);
> + } else {
> + list_add(&t->node, &group->avg_triggers);
> + group->avg_nr_triggers[t->state]++;
> + }
>
> mutex_unlock(&group->rtpoll_trigger_lock);

But creation needs the same treatment. group->avg_triggers and
avg_nr_triggers are protected by the avgs_lock, not the
rtpoll_trigger_lock.

I suppose the lock taking and the group->rtpoll_task creation
(anything with rtpoll in the name, really) should be within a big

if (privileged) {
rtpoll_trigger_lock()
kthread_create()
init-and-link()
rtpoll_trigger_unlock()
} else {
avgs_lock()
...
}