Implement the mount querying syscalls agreed on at LSF/MM 2023. This is an
RFC with just x86_64 syscalls.
Excepting notification this should allow full replacement for
parsing /proc/self/mountinfo.
It is not a replacement for /proc/$OTHER_PID/mountinfo, since mount
namespace and root are taken from the current task. I guess namespace and
root could be switched before invoking these syscalls but that sounds a bit
complicated. Not sure if this is a problem.
Test utility attached at the end.
---
Miklos Szeredi (3):
add unique mount ID
add statmnt(2) syscall
add listmnt(2) syscall
arch/x86/entry/syscalls/syscall_64.tbl | 2 +
fs/internal.h | 5 +
fs/mount.h | 3 +-
fs/namespace.c | 365 +++++++++++++++++++++++++
fs/proc_namespace.c | 19 +-
fs/stat.c | 9 +-
fs/statfs.c | 1 +
include/linux/syscalls.h | 5 +
include/uapi/asm-generic/unistd.h | 8 +-
include/uapi/linux/mount.h | 36 +++
include/uapi/linux/stat.h | 1 +
11 files changed, 443 insertions(+), 11 deletions(-)
--
2.41.0
=== statmnt.c ===
#define _GNU_SOURCE
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <err.h>
struct stmt_str {
__u32 off;
__u32 len;
};
struct statmnt {
__u64 mask; /* What results were written [uncond] */
__u32 sb_dev_major; /* Device ID */
__u32 sb_dev_minor;
__u64 sb_magic; /* ..._SUPER_MAGIC */
__u32 sb_flags; /* MS_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
__u32 __spare1;
__u64 mnt_id; /* Unique ID of mount */
__u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
__u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
__u32 mnt_parent_id_old;
__u64 mnt_attr; /* MOUNT_ATTR_... */
__u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
__u64 mnt_peer_group; /* ID of shared peer group */
__u64 mnt_master; /* Mount receives propagation from this ID */
__u64 propagate_from; /* Propagation from in current namespace */
__u64 __spare[20];
struct stmt_str mnt_root; /* Root of mount relative to root of fs */
struct stmt_str mountpoint; /* Mountpoint relative to root of process */
struct stmt_str fs_type; /* Filesystem type[.subtype] */
struct stmt_str sb_opts; /* Super block string options (nul delimted) */
};
#define STMT_SB_BASIC 0x00000001U /* Want/got sb_... */
#define STMT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
#define STMT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
#define STMT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STMT_MOUNTPOINT 0x00000010U /* Want/got mountpoint */
#define STMT_FS_TYPE 0x00000020U /* Want/got fs_type */
#define STMT_SB_OPTS 0x00000040U /* Want/got sb_opts */
#define __NR_statmnt 454
#define __NR_listmnt 455
#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
int main(int argc, char *argv[])
{
char buf[65536];
struct statmnt *st = (void *) buf;
char *end;
const char *arg = argv[1];
long res;
int list = 0;
unsigned long mnt_id;
unsigned int mask = STMT_SB_BASIC | STMT_MNT_BASIC | STMT_PROPAGATE_FROM | STMT_MNT_ROOT | STMT_MOUNTPOINT | STMT_FS_TYPE | STMT_SB_OPTS;
if (arg && strcmp(arg, "-l") == 0) {
list = 1;
arg = argv[2];
}
if (argc != list + 2)
errx(1, "usage: %s [-l] (mnt_id|path)", argv[0]);
mnt_id = strtol(arg, &end, 0);
if (!mnt_id || *end != '\0') {
struct statx sx;
res = statx(AT_FDCWD, arg, 0, STATX_MNT_ID_UNIQUE, &sx);
if (res == -1)
err(1, "%s", arg);
if (!(sx.stx_mask & (STATX_MNT_ID | STATX_MNT_ID_UNIQUE)))
errx(1, "Sorry, no mount ID");
mnt_id = sx.stx_mnt_id;
}
if (list) {
size_t size = 8192;
uint64_t list[size];
long i, num;
res = syscall(__NR_listmnt, mnt_id, list, size, 0);
if (res == -1)
err(1, "listmnt(%lu)", mnt_id);
num = res;
for (i = 0; i < num; i++) {
printf("0x%lx / ", list[i]);
res = syscall(__NR_statmnt, list[i], STMT_MNT_BASIC | STMT_MOUNTPOINT, &buf, sizeof(buf), 0);
if (res == -1) {
printf("???\t[%s]\n", strerror(errno));
} else {
printf("%u\t%s\n", st->mnt_id_old,
(st->mask & STMT_MOUNTPOINT) ? buf + st->mountpoint.off : "???");
}
}
return 0;
}
res = syscall(__NR_statmnt, mnt_id, mask, &buf, sizeof(buf), 0);
if (res == -1)
err(1, "statmnt(%lu)", mnt_id);
printf("mask: 0x%llx\n", st->mask);
if (st->mask & STMT_SB_BASIC) {
printf("sb_dev_major: %u\n", st->sb_dev_major);
printf("sb_dev_minor: %u\n", st->sb_dev_minor);
printf("sb_magic: 0x%llx\n", st->sb_magic);
printf("sb_flags: 0x%08x\n", st->sb_flags);
}
if (st->mask & STMT_MNT_BASIC) {
printf("mnt_id: 0x%llx\n", st->mnt_id);
printf("mnt_parent_id: 0x%llx\n", st->mnt_parent_id);
printf("mnt_id_old: %u\n", st->mnt_id_old);
printf("mnt_parent_id_old: %u\n", st->mnt_parent_id_old);
printf("mnt_attr: 0x%08llx\n", st->mnt_attr);
printf("mnt_propagation: %s%s%s%s\n",
st->mnt_propagation & MS_SHARED ? "shared," : "",
st->mnt_propagation & MS_SLAVE ? "slave," : "",
st->mnt_propagation & MS_UNBINDABLE ? "unbindable," : "",
st->mnt_propagation & MS_PRIVATE ? "private" : "");
printf("mnt_peer_group: %llu\n", st->mnt_peer_group);
printf("mnt_master: %llu\n", st->mnt_master);
}
if (st->mask & STMT_PROPAGATE_FROM) {
printf("propagate_from: %llu\n", st->propagate_from);
}
if (st->mask & STMT_MNT_ROOT) {
printf("mnt_root: %i/%u <%s>\n", st->mnt_root.off,
st->mnt_root.len, buf + st->mnt_root.off);
}
if (st->mask & STMT_MOUNTPOINT) {
printf("mountpoint: %i/%u <%s>\n", st->mountpoint.off,
st->mountpoint.len, buf + st->mountpoint.off);
}
if (st->mask & STMT_FS_TYPE) {
printf("fs_type: %i/%u <%s>\n", st->fs_type.off,
st->fs_type.len, buf + st->fs_type.off);
}
if (st->mask & STMT_SB_OPTS) {
char *p = buf + st->sb_opts.off;
char *end = p + st->sb_opts.len;
printf("sb_opts: %i/%u ", st->sb_opts.off, st->sb_opts.len);
for (; p < end; p += strlen(p) + 1)
printf("<%s>, ", p);
printf("\n");
}
return 0;
}
If a mount is released then it's mnt_id can immediately be reused. This is
bad news for user interfaces that want to uniquely identify a mount.
Implementing a unique mount ID is trivial (use a 64bit counter).
Unfortunately userspace assumes 32bit size and would overflow after the
counter reaches 2^32.
Introduce a new 64bit ID alongside the old one. Allow new interfaces to
work on both the old and new IDs by starting the counter from 2^32.
Signed-off-by: Miklos Szeredi <[email protected]>
---
fs/mount.h | 3 ++-
fs/namespace.c | 4 ++++
fs/stat.c | 9 +++++++--
include/uapi/linux/stat.h | 1 +
4 files changed, 14 insertions(+), 3 deletions(-)
diff --git a/fs/mount.h b/fs/mount.h
index 130c07c2f8d2..a14f762b3f29 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -72,7 +72,8 @@ struct mount {
struct fsnotify_mark_connector __rcu *mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
#endif
- int mnt_id; /* mount identifier */
+ int mnt_id; /* mount identifier, reused */
+ u64 mnt_id_unique; /* mount ID unique until reboot */
int mnt_group_id; /* peer group identifier */
int mnt_expiry_mark; /* true if marked for expiry */
struct hlist_head mnt_pins;
diff --git a/fs/namespace.c b/fs/namespace.c
index e157efc54023..de47c5f66e17 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -68,6 +68,9 @@ static u64 event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
+/* Don't allow confusion with mount ID allocated wit IDA */
+static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
+
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
@@ -131,6 +134,7 @@ static int mnt_alloc_id(struct mount *mnt)
if (res < 0)
return res;
mnt->mnt_id = res;
+ mnt->mnt_id_unique = atomic64_inc_return(&mnt_id_ctr);
return 0;
}
diff --git a/fs/stat.c b/fs/stat.c
index 6822ac77aec2..46d901b6b2de 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -280,8 +280,13 @@ static int vfs_statx(int dfd, struct filename *filename, int flags,
error = vfs_getattr(&path, stat, request_mask, flags);
- stat->mnt_id = real_mount(path.mnt)->mnt_id;
- stat->result_mask |= STATX_MNT_ID;
+ if (request_mask & STATX_MNT_ID_UNIQUE) {
+ stat->mnt_id = real_mount(path.mnt)->mnt_id_unique;
+ stat->result_mask |= STATX_MNT_ID_UNIQUE;
+ } else {
+ stat->mnt_id = real_mount(path.mnt)->mnt_id;
+ stat->result_mask |= STATX_MNT_ID;
+ }
if (path.mnt->mnt_root == path.dentry)
stat->attributes |= STATX_ATTR_MOUNT_ROOT;
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7cab2c65d3d7..2f2ee82d5517 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -154,6 +154,7 @@ struct statx {
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+#define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
--
2.41.0
On Fri, Sep 15, 2023 at 4:20 AM Ian Kent <[email protected]> wrote:
>
> On 14/9/23 14:47, Amir Goldstein wrote:
> > On Wed, Sep 13, 2023 at 6:22 PM Miklos Szeredi <[email protected]> wrote:
> >> Implement the mount querying syscalls agreed on at LSF/MM 2023. This is an
> >> RFC with just x86_64 syscalls.
> >>
> >> Excepting notification this should allow full replacement for
> >> parsing /proc/self/mountinfo.
> > Since you mentioned notifications, I will add that the plan discussed
> > in LFSMM was, once we have an API to query mount stats and children,
> > implement fanotify events for:
> > mount [mntuid] was un/mounted at [parent mntuid],[dirfid+name]
> >
> > As with other fanotify events, the self mntuid and dirfid+name
> > information can be omitted and without it, multiple un/mount events
> > from the same parent mntuid will be merged, allowing userspace
> > to listmnt() periodically only mntuid whose child mounts have changed,
> > with little risk of event queue overflow.
> >
> > The possible monitoring scopes would be the entire mount namespace
> > of the monitoring program or watching a single mount for change in
> > its children mounts. The latter is similar to inotify directory children watch,
> > where the watches needs to be set recursively, with all the weight on
> > userspace to avoid races.
>
> It's been my belief that the existing notification mechanisms don't
> quite fully satisfy the needs of users of these calls (aka. the need
> I found when implementing David's original calls into systemd).
>
> Specifically the ability to process a batch of notifications at once.
>
> Admittedly the notifications mechanism that David originally implemented
> didn't fully implement what I found I needed but it did provide for a
> settable queue length and getting a batch of notifications at a time.
>
> Am I mistaken in my belief?
>
I am not sure I understand the question.
fanotify has an event queue (16K events by default), but it can
also use unlimited size.
With a limited size queue, event queue overflow generates an
overflow event.
event listeners can read a batch of events, depending on
the size of the buffer that they provide.
when multiple events with same information are queued,
for example "something was un/mounted over parent mntuid 100"
fanotify will merged those all those events in the queue and the
event listeners will get only one such event in the batch.
> Don't misunderstand me, it would be great for the existing notification
> mechanisms to support these system calls, I just have a specific use case
> in mind that I think is important, at least to me.
>
Please explain the use case and your belief about existing fanotify
limitations. I did not understand it.
Thanks,
Amir.
On 15/9/23 11:06, Amir Goldstein wrote:
> On Fri, Sep 15, 2023 at 4:20 AM Ian Kent <[email protected]> wrote:
>> On 14/9/23 14:47, Amir Goldstein wrote:
>>> On Wed, Sep 13, 2023 at 6:22 PM Miklos Szeredi <[email protected]> wrote:
>>>> Implement the mount querying syscalls agreed on at LSF/MM 2023. This is an
>>>> RFC with just x86_64 syscalls.
>>>>
>>>> Excepting notification this should allow full replacement for
>>>> parsing /proc/self/mountinfo.
>>> Since you mentioned notifications, I will add that the plan discussed
>>> in LFSMM was, once we have an API to query mount stats and children,
>>> implement fanotify events for:
>>> mount [mntuid] was un/mounted at [parent mntuid],[dirfid+name]
>>>
>>> As with other fanotify events, the self mntuid and dirfid+name
>>> information can be omitted and without it, multiple un/mount events
>>> from the same parent mntuid will be merged, allowing userspace
>>> to listmnt() periodically only mntuid whose child mounts have changed,
>>> with little risk of event queue overflow.
>>>
>>> The possible monitoring scopes would be the entire mount namespace
>>> of the monitoring program or watching a single mount for change in
>>> its children mounts. The latter is similar to inotify directory children watch,
>>> where the watches needs to be set recursively, with all the weight on
>>> userspace to avoid races.
>> It's been my belief that the existing notification mechanisms don't
>> quite fully satisfy the needs of users of these calls (aka. the need
>> I found when implementing David's original calls into systemd).
>>
>> Specifically the ability to process a batch of notifications at once.
>>
>> Admittedly the notifications mechanism that David originally implemented
>> didn't fully implement what I found I needed but it did provide for a
>> settable queue length and getting a batch of notifications at a time.
>>
>> Am I mistaken in my belief?
>>
> I am not sure I understand the question.
>
> fanotify has an event queue (16K events by default), but it can
> also use unlimited size.
> With a limited size queue, event queue overflow generates an
> overflow event.
>
> event listeners can read a batch of events, depending on
> the size of the buffer that they provide.
>
> when multiple events with same information are queued,
> for example "something was un/mounted over parent mntuid 100"
> fanotify will merged those all those events in the queue and the
> event listeners will get only one such event in the batch.
>
>> Don't misunderstand me, it would be great for the existing notification
>> mechanisms to support these system calls, I just have a specific use case
>> in mind that I think is important, at least to me.
>>
> Please explain the use case and your belief about existing fanotify
> limitations. I did not understand it.
Yes, it's not obvious, I'll try and explain it more clearly.
I did some work to enable systemd to use the original fsinfo() call
and the notifications system David had written.
My use case was perhaps unrealistic but I have seen real world reports
with similar symptoms and autofs usage can behave like this usage at
times as well so it's not entirely manufactured. The use case is basically
when there are a large number of mounts occurring for a sustained amount
of time.
Anyway, systemd processes get notified when there is mount activity and
it then reads the mount table to update it state. I observed there are
usually 3 separate systemd processes monitoring mount table changes and,
under the above load, they use around 80-85% of a CPU each.
Thing is systemd is actually pretty good at processing notifications so
when there is sustained mount activity and the fsinfo() call was used the
load changes from processing the table to processing notifications. The
load goes down to a bit over 40% for each process.
But if you can batch those notifications, like introduce a high water
mark (yes I know this is not at all simple and I'm by no means suggesting
this is all that needs to be done), to get a bunch of these notifications
at once the throughput increases quite a bit. In my initial testing adding
a delay of 10 or 20 milliseconds before fetching the queue of notifications
and processing them saw a reduction of CPU usage to around 8% per process.
What I'm saying is I've found that system calls to get the information
directly isn't all that's needed to improve the scalability.
Ian
Add a way to query attributes of a single mount instead of having to parse
the complete /proc/$PID/mountinfo, which might be huge.
Lookup the mount by the old (32bit) or new (64bit) mount ID. If a mount
needs to be queried based on path, then statx(2) can be used to first query
the mount ID belonging to the path.
Design is based on a suggestion by Linus:
"So I'd suggest something that is very much like "statfsat()", which gets
a buffer and a length, and returns an extended "struct statfs" *AND*
just a string description at the end."
The interface closely mimics that of statx.
Handle ASCII attributes by appending after the end of the structure (as per
above suggestion). Allow querying multiple string attributes with
individual offset/length for each. String are nul terminated (termination
isn't counted in length).
Mount options are also delimited with nul characters. Unlike proc, special
characters are not quoted.
Link: https://lore.kernel.org/all/CAHk-=wh5YifP7hzKSbwJj94+DZ2czjrZsczy6GBimiogZws=rg@mail.gmail.com/
Signed-off-by: Miklos Szeredi <[email protected]>
---
arch/x86/entry/syscalls/syscall_64.tbl | 1 +
fs/internal.h | 5 +
fs/namespace.c | 312 ++++++++++++++++++++++++-
fs/proc_namespace.c | 19 +-
fs/statfs.c | 1 +
include/linux/syscalls.h | 3 +
include/uapi/asm-generic/unistd.h | 5 +-
include/uapi/linux/mount.h | 36 +++
8 files changed, 373 insertions(+), 9 deletions(-)
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index 1d6eee30eceb..6d807c30cd16 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -375,6 +375,7 @@
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
453 64 map_shadow_stack sys_map_shadow_stack
+454 common statmnt sys_statmnt
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/fs/internal.h b/fs/internal.h
index d64ae03998cc..8f75271428aa 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -83,6 +83,11 @@ int path_mount(const char *dev_name, struct path *path,
const char *type_page, unsigned long flags, void *data_page);
int path_umount(struct path *path, int flags);
+/*
+ * proc_namespace.c
+ */
+int show_path(struct seq_file *m, struct dentry *root);
+
/*
* fs_struct.c
*/
diff --git a/fs/namespace.c b/fs/namespace.c
index de47c5f66e17..088a52043bba 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -69,7 +69,8 @@ static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
/* Don't allow confusion with mount ID allocated wit IDA */
-static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
+#define OLD_MNT_ID_MAX UINT_MAX
+static atomic64_t mnt_id_ctr = ATOMIC64_INIT(OLD_MNT_ID_MAX);
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
@@ -4678,6 +4679,315 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const char __user *, path,
return err;
}
+static bool mnt_id_match(struct mount *mnt, u64 id)
+{
+ if (id <= OLD_MNT_ID_MAX)
+ return id == mnt->mnt_id;
+ else
+ return id == mnt->mnt_id_unique;
+}
+
+struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
+{
+ struct mount *mnt;
+ struct vfsmount *res = NULL;
+
+ lock_ns_list(ns);
+ list_for_each_entry(mnt, &ns->list, mnt_list) {
+ if (!mnt_is_cursor(mnt) && mnt_id_match(mnt, id)) {
+ res = &mnt->mnt;
+ break;
+ }
+ }
+ unlock_ns_list(ns);
+ return res;
+}
+
+struct stmt_state {
+ void __user *const buf;
+ size_t const bufsize;
+ struct vfsmount *const mnt;
+ u64 const mask;
+ struct seq_file seq;
+ struct path root;
+ struct statmnt sm;
+ size_t pos;
+ int err;
+};
+
+typedef int (*stmt_func_t)(struct stmt_state *);
+
+static int stmt_string_seq(struct stmt_state *s, stmt_func_t func)
+{
+ struct seq_file *seq = &s->seq;
+ int ret;
+
+ seq->count = 0;
+ seq->size = min_t(size_t, seq->size, s->bufsize - s->pos);
+ seq->buf = kvmalloc(seq->size, GFP_KERNEL_ACCOUNT);
+ if (!seq->buf)
+ return -ENOMEM;
+
+ ret = func(s);
+ if (ret)
+ return ret;
+
+ if (seq_has_overflowed(seq)) {
+ if (seq->size == s->bufsize - s->pos)
+ return -EOVERFLOW;
+ seq->size *= 2;
+ if (seq->size > MAX_RW_COUNT)
+ return -ENOMEM;
+ kvfree(seq->buf);
+ return 0;
+ }
+
+ /* Done */
+ return 1;
+}
+
+static void stmt_string(struct stmt_state *s, u64 mask, stmt_func_t func,
+ stmt_str_t *str)
+{
+ int ret = s->pos >= s->bufsize ? -EOVERFLOW : 0;
+ struct statmnt *sm = &s->sm;
+ struct seq_file *seq = &s->seq;
+
+ if (s->err || !(s->mask & mask))
+ return;
+
+ seq->size = PAGE_SIZE;
+ while (!ret)
+ ret = stmt_string_seq(s, func);
+
+ if (ret < 0) {
+ s->err = ret;
+ } else {
+ seq->buf[seq->count++] = '\0';
+ if (copy_to_user(s->buf + s->pos, seq->buf, seq->count)) {
+ s->err = -EFAULT;
+ } else {
+ str->off = s->pos;
+ str->len = seq->count - 1;
+ s->pos += seq->count;
+ }
+ }
+ kvfree(seq->buf);
+ sm->mask |= mask;
+}
+
+static void stmt_numeric(struct stmt_state *s, u64 mask, stmt_func_t func)
+{
+ if (s->err || !(s->mask & mask))
+ return;
+
+ s->err = func(s);
+ s->sm.mask |= mask;
+}
+
+static u64 mnt_to_attr_flags(struct vfsmount *mnt)
+{
+ unsigned int mnt_flags = READ_ONCE(mnt->mnt_flags);
+ u64 attr_flags = 0;
+
+ if (mnt_flags & MNT_READONLY)
+ attr_flags |= MOUNT_ATTR_RDONLY;
+ if (mnt_flags & MNT_NOSUID)
+ attr_flags |= MOUNT_ATTR_NOSUID;
+ if (mnt_flags & MNT_NODEV)
+ attr_flags |= MOUNT_ATTR_NODEV;
+ if (mnt_flags & MNT_NOEXEC)
+ attr_flags |= MOUNT_ATTR_NOEXEC;
+ if (mnt_flags & MNT_NODIRATIME)
+ attr_flags |= MOUNT_ATTR_NODIRATIME;
+ if (mnt_flags & MNT_NOSYMFOLLOW)
+ attr_flags |= MOUNT_ATTR_NOSYMFOLLOW;
+
+ if (mnt_flags & MNT_NOATIME)
+ attr_flags |= MOUNT_ATTR_NOATIME;
+ else if (mnt_flags & MNT_RELATIME)
+ attr_flags |= MOUNT_ATTR_RELATIME;
+ else
+ attr_flags |= MOUNT_ATTR_STRICTATIME;
+
+ if (is_idmapped_mnt(mnt))
+ attr_flags |= MOUNT_ATTR_IDMAP;
+
+ return attr_flags;
+}
+
+static u64 mnt_to_propagation_flags(struct mount *m)
+{
+ u64 propagation = 0;
+
+ if (IS_MNT_SHARED(m))
+ propagation |= MS_SHARED;
+ if (IS_MNT_SLAVE(m))
+ propagation |= MS_SLAVE;
+ if (IS_MNT_UNBINDABLE(m))
+ propagation |= MS_UNBINDABLE;
+ if (!propagation)
+ propagation |= MS_PRIVATE;
+
+ return propagation;
+}
+
+static int stmt_sb_basic(struct stmt_state *s)
+{
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ s->sm.sb_dev_major = MAJOR(sb->s_dev);
+ s->sm.sb_dev_minor = MINOR(sb->s_dev);
+ s->sm.sb_magic = sb->s_magic;
+ s->sm.sb_flags = sb->s_flags & (SB_RDONLY|SB_SYNCHRONOUS|SB_DIRSYNC|SB_LAZYTIME);
+
+ return 0;
+}
+
+static int stmt_mnt_basic(struct stmt_state *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ s->sm.mnt_id = m->mnt_id_unique;
+ s->sm.mnt_parent_id = m->mnt_parent->mnt_id_unique;
+ s->sm.mnt_id_old = m->mnt_id;
+ s->sm.mnt_parent_id_old = m->mnt_parent->mnt_id;
+ s->sm.mnt_attr = mnt_to_attr_flags(&m->mnt);
+ s->sm.mnt_propagation = mnt_to_propagation_flags(m);
+ s->sm.mnt_peer_group = IS_MNT_SHARED(m) ? m->mnt_group_id : 0;
+ s->sm.mnt_master = IS_MNT_SLAVE(m) ? m->mnt_master->mnt_group_id : 0;
+
+ return 0;
+}
+
+static int stmt_propagate_from(struct stmt_state *s)
+{
+ struct mount *m = real_mount(s->mnt);
+
+ if (!IS_MNT_SLAVE(m))
+ return 0;
+
+ s->sm.propagate_from = get_dominating_id(m, ¤t->fs->root);
+
+ return 0;
+}
+
+static int stmt_mnt_root(struct stmt_state *s)
+{
+ struct seq_file *seq = &s->seq;
+ int err = show_path(seq, s->mnt->mnt_root);
+
+ if (!err && !seq_has_overflowed(seq)) {
+ seq->buf[seq->count] = '\0';
+ seq->count = string_unescape_inplace(seq->buf, UNESCAPE_OCTAL);
+ }
+ return err;
+}
+
+static int stmt_mountpoint(struct stmt_state *s)
+{
+ struct vfsmount *mnt = s->mnt;
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ int err = seq_path_root(&s->seq, &mnt_path, &s->root, "");
+
+ return err == SEQ_SKIP ? 0 : err;
+}
+
+static int stmt_fs_type(struct stmt_state *s)
+{
+ struct seq_file *seq = &s->seq;
+ struct super_block *sb = s->mnt->mnt_sb;
+
+ seq_puts(seq, sb->s_type->name);
+ if (sb->s_subtype) {
+ seq_putc(seq, '.');
+ seq_puts(seq, sb->s_subtype);
+ }
+ return 0;
+}
+
+static int stmt_sb_opts(struct stmt_state *s)
+{
+ struct seq_file *seq = &s->seq;
+ struct super_block *sb = s->mnt->mnt_sb;
+ char *p, *end, *next, *u = seq->buf;
+ int err;
+
+ if (!sb->s_op->show_options)
+ return 0;
+
+ err = sb->s_op->show_options(seq, s->mnt->mnt_root);
+ if (err || seq_has_overflowed(seq) || !seq->count)
+ return err;
+
+ end = seq->buf + seq->count;
+ *end = '\0';
+ for (p = seq->buf + 1; p < end; p = next + 1) {
+ next = strchrnul(p, ',');
+ *next = '\0';
+ u += string_unescape(p, u, 0, UNESCAPE_OCTAL) + 1;
+ }
+ seq->count = u - 1 - seq->buf;
+ return 0;
+}
+
+static int do_statmnt(struct stmt_state *s)
+{
+ struct statmnt *sm = &s->sm;
+ struct mount *m = real_mount(s->mnt);
+
+ if (!capable(CAP_SYS_ADMIN) &&
+ !is_path_reachable(m, m->mnt.mnt_root, &s->root))
+ return -EPERM;
+
+ stmt_numeric(s, STMT_SB_BASIC, stmt_sb_basic);
+ stmt_numeric(s, STMT_MNT_BASIC, stmt_mnt_basic);
+ stmt_numeric(s, STMT_PROPAGATE_FROM, stmt_propagate_from);
+ stmt_string(s, STMT_MNT_ROOT, stmt_mnt_root, &sm->mnt_root);
+ stmt_string(s, STMT_MOUNTPOINT, stmt_mountpoint, &sm->mountpoint);
+ stmt_string(s, STMT_FS_TYPE, stmt_fs_type, &sm->fs_type);
+ stmt_string(s, STMT_SB_OPTS, stmt_sb_opts, &sm->sb_opts);
+
+ if (s->err)
+ return s->err;
+
+ if (copy_to_user(s->buf, sm, min_t(size_t, s->bufsize, sizeof(*sm))))
+ return -EFAULT;
+
+ return 0;
+}
+
+SYSCALL_DEFINE5(statmnt, u64, mnt_id,
+ u64, mask, struct statmnt __user *, buf,
+ size_t, bufsize, unsigned int, flags)
+{
+ struct vfsmount *mnt;
+ int err;
+
+ if (flags)
+ return -EINVAL;
+
+ down_read(&namespace_sem);
+ mnt = lookup_mnt_in_ns(mnt_id, current->nsproxy->mnt_ns);
+ err = -ENOENT;
+ if (mnt) {
+ struct stmt_state s = {
+ .mask = mask,
+ .buf = buf,
+ .bufsize = bufsize,
+ .mnt = mnt,
+ .pos = sizeof(*buf),
+ };
+
+ get_fs_root(current->fs, &s.root);
+ err = do_statmnt(&s);
+ path_put(&s.root);
+ }
+ up_read(&namespace_sem);
+
+ return err;
+}
+
static void __init init_mount_tree(void)
{
struct vfsmount *mnt;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 250eb5bf7b52..20681d1f6798 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -132,6 +132,15 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
return err;
}
+int show_path(struct seq_file *m, struct dentry *root)
+{
+ if (root->d_sb->s_op->show_path)
+ return root->d_sb->s_op->show_path(m, root);
+
+ seq_dentry(m, root, " \t\n\\");
+ return 0;
+}
+
static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
{
struct proc_mounts *p = m->private;
@@ -142,13 +151,9 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
MAJOR(sb->s_dev), MINOR(sb->s_dev));
- if (sb->s_op->show_path) {
- err = sb->s_op->show_path(m, mnt->mnt_root);
- if (err)
- goto out;
- } else {
- seq_dentry(m, mnt->mnt_root, " \t\n\\");
- }
+ err = show_path(m, mnt->mnt_root);
+ if (err)
+ goto out;
seq_putc(m, ' ');
/* mountpoints outside of chroot jail will give SEQ_SKIP on this */
diff --git a/fs/statfs.c b/fs/statfs.c
index 96d1c3edf289..cc774c2e2c9a 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -9,6 +9,7 @@
#include <linux/security.h>
#include <linux/uaccess.h>
#include <linux/compat.h>
+#include <uapi/linux/mount.h>
#include "internal.h"
static int flags_by_mnt(int mnt_flags)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 22bc6bc147f8..1099bd307fa7 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -408,6 +408,9 @@ asmlinkage long sys_statfs64(const char __user *path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
+asmlinkage long sys_statmnt(u64 mnt_id, u64 mask,
+ struct statmnt __user *buf, size_t bufsize,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index abe087c53b4b..640997231ff6 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -823,8 +823,11 @@ __SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
+#define __NR_statmnt 454
+__SYSCALL(__NR_statmnt, sys_statmnt)
+
#undef __NR_syscalls
-#define __NR_syscalls 453
+#define __NR_syscalls 455
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index bb242fdcfe6b..4ec7308a9259 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -138,4 +138,40 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
+struct stmt_str {
+ __u32 off;
+ __u32 len;
+};
+
+struct statmnt {
+ __u64 mask; /* What results were written [uncond] */
+ __u32 sb_dev_major; /* Device ID */
+ __u32 sb_dev_minor;
+ __u64 sb_magic; /* ..._SUPER_MAGIC */
+ __u32 sb_flags; /* MS_{RDONLY,SYNCHRONOUS,DIRSYNC,LAZYTIME} */
+ __u32 __spare1;
+ __u64 mnt_id; /* Unique ID of mount */
+ __u64 mnt_parent_id; /* Unique ID of parent (for root == mnt_id) */
+ __u32 mnt_id_old; /* Reused IDs used in proc/.../mountinfo */
+ __u32 mnt_parent_id_old;
+ __u64 mnt_attr; /* MOUNT_ATTR_... */
+ __u64 mnt_propagation; /* MS_{SHARED,SLAVE,PRIVATE,UNBINDABLE} */
+ __u64 mnt_peer_group; /* ID of shared peer group */
+ __u64 mnt_master; /* Mount receives propagation from this ID */
+ __u64 propagate_from; /* Propagation from in current namespace */
+ __u64 __spare[20];
+ struct stmt_str mnt_root; /* Root of mount relative to root of fs */
+ struct stmt_str mountpoint; /* Mountpoint relative to root of process */
+ struct stmt_str fs_type; /* Filesystem type[.subtype] */
+ struct stmt_str sb_opts; /* Super block string options (nul delimted) */
+};
+
+#define STMT_SB_BASIC 0x00000001U /* Want/got sb_... */
+#define STMT_MNT_BASIC 0x00000002U /* Want/got mnt_... */
+#define STMT_PROPAGATE_FROM 0x00000004U /* Want/got propagate_from */
+#define STMT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
+#define STMT_MOUNTPOINT 0x00000010U /* Want/got mountpoint */
+#define STMT_FS_TYPE 0x00000020U /* Want/got fs_type */
+#define STMT_SB_OPTS 0x00000040U /* Want/got sb_opts */
+
#endif /* _UAPI_LINUX_MOUNT_H */
--
2.41.0
On 15/9/23 11:06, Amir Goldstein wrote:
> On Fri, Sep 15, 2023 at 4:20 AM Ian Kent <[email protected]> wrote:
>> On 14/9/23 14:47, Amir Goldstein wrote:
>>> On Wed, Sep 13, 2023 at 6:22 PM Miklos Szeredi <[email protected]> wrote:
>>>> Implement the mount querying syscalls agreed on at LSF/MM 2023. This is an
>>>> RFC with just x86_64 syscalls.
>>>>
>>>> Excepting notification this should allow full replacement for
>>>> parsing /proc/self/mountinfo.
>>> Since you mentioned notifications, I will add that the plan discussed
>>> in LFSMM was, once we have an API to query mount stats and children,
>>> implement fanotify events for:
>>> mount [mntuid] was un/mounted at [parent mntuid],[dirfid+name]
>>>
>>> As with other fanotify events, the self mntuid and dirfid+name
>>> information can be omitted and without it, multiple un/mount events
>>> from the same parent mntuid will be merged, allowing userspace
>>> to listmnt() periodically only mntuid whose child mounts have changed,
>>> with little risk of event queue overflow.
>>>
>>> The possible monitoring scopes would be the entire mount namespace
>>> of the monitoring program or watching a single mount for change in
>>> its children mounts. The latter is similar to inotify directory children watch,
>>> where the watches needs to be set recursively, with all the weight on
>>> userspace to avoid races.
>> It's been my belief that the existing notification mechanisms don't
>> quite fully satisfy the needs of users of these calls (aka. the need
>> I found when implementing David's original calls into systemd).
>>
>> Specifically the ability to process a batch of notifications at once.
>>
>> Admittedly the notifications mechanism that David originally implemented
>> didn't fully implement what I found I needed but it did provide for a
>> settable queue length and getting a batch of notifications at a time.
>>
>> Am I mistaken in my belief?
>>
> I am not sure I understand the question.
>
> fanotify has an event queue (16K events by default), but it can
> also use unlimited size.
> With a limited size queue, event queue overflow generates an
> overflow event.
>
> event listeners can read a batch of events, depending on
> the size of the buffer that they provide.
So it sounds like I can get a bunch of events at once with fanotify.
I'll have to look at the code again ...
Ian
>
> when multiple events with same information are queued,
> for example "something was un/mounted over parent mntuid 100"
> fanotify will merged those all those events in the queue and the
> event listeners will get only one such event in the batch.
>
>> Don't misunderstand me, it would be great for the existing notification
>> mechanisms to support these system calls, I just have a specific use case
>> in mind that I think is important, at least to me.
>>
> Please explain the use case and your belief about existing fanotify
> limitations. I did not understand it.
>
> Thanks,
> Amir.
On Wed, Sep 13, 2023 at 9:25 AM Miklos Szeredi <[email protected]> wrote:
>
> Add a way to query attributes of a single mount instead of having to parse
> the complete /proc/$PID/mountinfo, which might be huge.
>
> Lookup the mount by the old (32bit) or new (64bit) mount ID. If a mount
> needs to be queried based on path, then statx(2) can be used to first query
> the mount ID belonging to the path.
>
> Design is based on a suggestion by Linus:
>
> "So I'd suggest something that is very much like "statfsat()", which gets
> a buffer and a length, and returns an extended "struct statfs" *AND*
> just a string description at the end."
>
> The interface closely mimics that of statx.
>
> Handle ASCII attributes by appending after the end of the structure (as per
> above suggestion). Allow querying multiple string attributes with
> individual offset/length for each. String are nul terminated (termination
> isn't counted in length).
>
> Mount options are also delimited with nul characters. Unlike proc, special
> characters are not quoted.
>
Thank you for writing this patch. I wish that this had existed the many times
I've written parsers for mounts files in my life.
What do you think about exposing the locked flags, a la what happens
on propagation of mount across user namespaces?
On 18/9/23 02:18, Sargun Dhillon wrote:
> On Wed, Sep 13, 2023 at 9:25 AM Miklos Szeredi <[email protected]> wrote:
>> Add a way to query attributes of a single mount instead of having to parse
>> the complete /proc/$PID/mountinfo, which might be huge.
>>
>> Lookup the mount by the old (32bit) or new (64bit) mount ID. If a mount
>> needs to be queried based on path, then statx(2) can be used to first query
>> the mount ID belonging to the path.
>>
>> Design is based on a suggestion by Linus:
>>
>> "So I'd suggest something that is very much like "statfsat()", which gets
>> a buffer and a length, and returns an extended "struct statfs" *AND*
>> just a string description at the end."
>>
>> The interface closely mimics that of statx.
>>
>> Handle ASCII attributes by appending after the end of the structure (as per
>> above suggestion). Allow querying multiple string attributes with
>> individual offset/length for each. String are nul terminated (termination
>> isn't counted in length).
>>
>> Mount options are also delimited with nul characters. Unlike proc, special
>> characters are not quoted.
>>
> Thank you for writing this patch. I wish that this had existed the many times
> I've written parsers for mounts files in my life.
>
> What do you think about exposing the locked flags, a la what happens
> on propagation of mount across user namespaces?
Which flags do you mean?
If you mean shared, slave and I think there's a group id as well, etc.
then yes
they were available in the original fsinfo() implementation as they were
requested.
So, yes, it would be good to also include those too.
Ian
> So to be clear about your proposal: .mnt_root and .mountpoint are
> initialized by the caller to buffers that the kernel can copy paths
> into?
Yeah, u64 pointer to a buffer and a size (see e.g., @set_tid and
@set_tid_size for struct clone_args, @log_buf and @log_size and other
args in there).
>
> If there's an overflow (one of the buffers was too small) the syscall
> returns -EOVERFLOW?
Yeah, I mean we have to make some things their problem.
To me that is an acceptable compromise.
On Mon, Sep 18, 2023 at 04:32:30PM +0200, Miklos Szeredi wrote:
> On Mon, 18 Sept 2023 at 16:25, Christian Brauner <[email protected]> wrote:
>
> > The system call should please have a proper struct like you had in your
> > first proposal. This is what I'm concerned about:
> >
> > int statmount(u64 mnt_id,
> > struct statmnt __user *st,
> > size_t size,
> > unsigned int flags)
> >
> > instead of taking an void pointer.
>
> So you are not concerned about having ascii strings returned by the
> syscall? I thought that was the main complaint.
I'm not following. The original proposals were only returning strings
even for basic binary data such as mount flags, propagation options, and
so on and we're using the xattr interface for any type of information.
What we're talking about here is a nicely typed struct which returns two
paths @mnt_root and @mnt_point which can both be represented as u64
pointers with length parameters like we do in other binary structs such
as bpf and clone3 and a few others. That is a compromise I can live
with. I'm really trying to find as much common ground here as we can.
On Mon, 18 Sept 2023 at 17:22, Christian Brauner <[email protected]> wrote:
>
> > So to be clear about your proposal: .mnt_root and .mountpoint are
> > initialized by the caller to buffers that the kernel can copy paths
> > into?
>
> Yeah, u64 pointer to a buffer and a size (see e.g., @set_tid and
> @set_tid_size for struct clone_args, @log_buf and @log_size and other
> args in there).
>
> >
> > If there's an overflow (one of the buffers was too small) the syscall
> > returns -EOVERFLOW?
>
> Yeah, I mean we have to make some things their problem.
>
> To me that is an acceptable compromise.
Okay, so there are now (at least) two buffers, and on overflow the
caller cannot know which one got overflown. It can resize both, but
that doesn't make the caller any simpler to implement.
Also the interface is kind of weird in that some struct members are
out, some are in (the pointers and the lengths).
I'd prefer the single buffer interface, which has none of the above issues.
Thanks,
Miklos
On Mon, Sep 18, 2023 at 07:36:39AM +0800, Ian Kent wrote:
>
> On 18/9/23 02:18, Sargun Dhillon wrote:
> > On Wed, Sep 13, 2023 at 9:25 AM Miklos Szeredi <[email protected]> wrote:
> > > Add a way to query attributes of a single mount instead of having to parse
> > > the complete /proc/$PID/mountinfo, which might be huge.
> > >
> > > Lookup the mount by the old (32bit) or new (64bit) mount ID. If a mount
> > > needs to be queried based on path, then statx(2) can be used to first query
> > > the mount ID belonging to the path.
> > >
> > > Design is based on a suggestion by Linus:
> > >
> > > "So I'd suggest something that is very much like "statfsat()", which gets
> > > a buffer and a length, and returns an extended "struct statfs" *AND*
> > > just a string description at the end."
> > >
> > > The interface closely mimics that of statx.
> > >
> > > Handle ASCII attributes by appending after the end of the structure (as per
> > > above suggestion). Allow querying multiple string attributes with
> > > individual offset/length for each. String are nul terminated (termination
> > > isn't counted in length).
> > >
> > > Mount options are also delimited with nul characters. Unlike proc, special
> > > characters are not quoted.
> > >
> > Thank you for writing this patch. I wish that this had existed the many times
> > I've written parsers for mounts files in my life.
> >
> > What do you think about exposing the locked flags, a la what happens
> > on propagation of mount across user namespaces?
>
> Which flags do you mean?
When you propagate mounts across mount+user namespaces a subset of
(security sensitive) mount attributes become locked. This information is
currently only available via internal flags but not in any way
explicitly exposed to userspace.
There's a proposal to extend mount_setattr(2) to explicitly allow
locking flags but that would mean a new set of mount attr flags.
So until the format of that is determined and settled this should be
kept out of statmount().
On Mon, Sep 18, 2023 at 04:14:02PM +0200, Miklos Szeredi wrote:
> On Mon, Sep 18, 2023 at 3:51 PM Christian Brauner <[email protected]> wrote:
>
> > I really would prefer a properly typed struct and that's what everyone
> > was happy with in the session as well. So I would not like to change the
> > main parameters.
>
> I completely agree. Just would like to understand this point:
>
> struct statmnt *statmnt(u64 mntid, u64 mask, unsigned int flags);
>
> What's not properly typed about this interface?
>
> I guess the answer is that it's not a syscall interface, which will
> have an added [void *buf, size_t bufsize], while the buffer sizing is
> done by a simple libc wrapper.
>
> Do you think that's a problem? If so, why?
Sorry, I think we just talked passed each other.
I didn't realize you were talking about a glibc wrapper.
I'm not so much concerned with that they can expose this in whathever
way they like. But we will have a lot of low-level userspace that will
directly use statmount() or not even have glibc like go and other
languages.
The system call should please have a proper struct like you had in your
first proposal. This is what I'm concerned about:
int statmount(u64 mnt_id,
struct statmnt __user *st,
size_t size,
unsigned int flags)
instead of taking an void pointer.
> Fixed size structs are much nicer to deal with, and most of the fields
> we're talking about don't change ofetn enough to make trying to strive
> for perfect atomicity worthwhile.
I think we can live with mnt_root and mnt_mountpoint in struct statmnt
if we add a length field for both them and make them __u64 pointers.
That's what we did in clone3() for the pid array and bpf is doing that
as well for log buffers and pathnames.
So if Miklos is fine with that then I'm happy to compromise. And I think
that's all the variable length data we want in struct statmount anyway.
> ...and then if the mnt_change_cookie hasn't changed, you know that the
> string option was stable during that window.
Meh, I would really like to sidestep this and keep it as simple as we
can. I like the proposal overall I just don't want it to get diluted too
much by exploding into another overly broad solution.
On Mon, 18 Sept 2023 at 16:40, Christian Brauner <[email protected]> wrote:
> What we're talking about here is a nicely typed struct which returns two
> paths @mnt_root and @mnt_point which can both be represented as u64
> pointers with length parameters like we do in other binary structs such
> as bpf and clone3 and a few others. That is a compromise I can live
> with. I'm really trying to find as much common ground here as we can.
So to be clear about your proposal: .mnt_root and .mountpoint are
initialized by the caller to buffers that the kernel can copy paths
into?
If there's an overflow (one of the buffers was too small) the syscall
returns -EOVERFLOW?
Thanks,
Miklos
On Mon, 18 Sept 2023 at 16:25, Christian Brauner <[email protected]> wrote:
> The system call should please have a proper struct like you had in your
> first proposal. This is what I'm concerned about:
>
> int statmount(u64 mnt_id,
> struct statmnt __user *st,
> size_t size,
> unsigned int flags)
>
> instead of taking an void pointer.
So you are not concerned about having ascii strings returned by the
syscall? I thought that was the main complaint.
Thanks,
Miklos
On Tue, Sep 19, 2023 at 10:02:17AM +0200, Miklos Szeredi wrote:
> On Tue, 19 Sept 2023 at 02:38, Matthew House <[email protected]> wrote:
>
> > One natural solution is to set either of the two lengths to the expected
> > size if the provided buffer are too small. That way, the caller learns both
> > which of the buffers is too small, and how large they need to be. Replacing
> > a provided size with an expected size in this way already has precedent in
> > existing syscalls:
>
> This is where the thread started. Knowing the size of the buffer is
> no good, since the needed buffer could change between calls.
The same problem would exist for the single buffer. Realistically, users
will most often simply use a fixed size PATH_MAX buffer that will cover
most cases and fallback to allocating a larger buffer in case things go
awry.
I don't think we need to make this atomic either. Providing a hint for
the required buffer size in case this fails is good enough and should be
a rather rare occurence and is exactly how other variable-sized buffers
are handled.
> Also having the helper allocate buffers inside the struct could easily
> result in leaks since it's not obvious what the caller needs to free,
I don't think we need to be overly concerned with how userspace
implements the wrapper here. Leaks can occur in both scenarios and
low-level userspace can use automatic cleanup macros (we even support it
in the kernel since v6.5) to harden against this.
Really, the main things I care about are 64 bit alignment of the whole
struct, typed __u64 pointers with __u32 size for mnt_root and mnt_point
and that we please spell out "mount" and not use "mnt": so statmount
because the new mount api uses "mount" (move_mount(), mount_setattr(),
fsmount(), MOUNT_ATTR_*) almost everywhere.
On Tue, 19 Sept 2023 at 02:38, Matthew House <[email protected]> wrote:
> One natural solution is to set either of the two lengths to the expected
> size if the provided buffer are too small. That way, the caller learns both
> which of the buffers is too small, and how large they need to be. Replacing
> a provided size with an expected size in this way already has precedent in
> existing syscalls:
This is where the thread started. Knowing the size of the buffer is
no good, since the needed buffer could change between calls.
We are trying to create a simple interface, no? My proposal would
need a helper like this:
struct statmnt *statmount(uint64_t mnt_id, uint64_t mask, unsigned int flags)
{
size_t bufsize = 1 << 15;
void *buf;
int ret;
for (;;) {
buf = malloc(bufsize <<= 1);
if (!buf)
return NULL;
ret = syscall(__NR_statmnt, mnt_id, mask, buf, bufsize, flags);
if (!ret)
return buf;
free(buf);
if (errno != EOVERFLOW)
return NULL;
}
}
Christian's would be (ignoring .fs_type for now):
int statmount(uint64_t mnt_id, uint64_t mask, struct statmnt *st,
unsigned int flags)
{
int ret;
st->mnt_root_size = 1 << 15;
st->mountpoint_size = 1 << 15;
for (;;) {
st->mnt_root = malloc(st->mnt_root_size <<= 1);
st->mountpoint = malloc(st->mountpoint <<= 1);
if (!st->mnt_root || !st->mountpoint) {
free(st->mnt_root);
free(st->mountpoint);
return -1;
}
ret = syscall(__NR_statmnt, mnt_id, mask, st,
sizeof(*st), flags);
if (!ret || errno != EOVERFLOW)
return ret;
free(st->mnt_root);
free(st->mountpoint);
}
}
It's not hugely more complex, but more complex nonetheless.
Also having the helper allocate buffers inside the struct could easily
result in leaks since it's not obvious what the caller needs to free,
while in the first example it is.
Note that I'm not against having the prototype on the kernel interface
take a typed pointer. If strings are not needed, both interfaces
would work in exactly the same way.
Thanks,
Miklos
> > with __u32 size for mnt_root and mnt_point
>
> Unnecessary if the strings are nul terminated.
All ok by me so far but how does the kernel know the size of the buffer
to copy into? Wouldn't it be better to allow userspace to specify that?
I'm probably just missing something but I better ask.
On Tue, Sep 19, 2023 at 02:59:53PM +0200, Miklos Szeredi wrote:
> On Tue, 19 Sept 2023 at 14:41, Christian Brauner <[email protected]> wrote:
> >
> > > > with __u32 size for mnt_root and mnt_point
> > >
> > > Unnecessary if the strings are nul terminated.
> >
> > All ok by me so far but how does the kernel know the size of the buffer
> > to copy into? Wouldn't it be better to allow userspace to specify that?
> > I'm probably just missing something but I better ask.
>
> Because size of the buffer is given as the syscall argument.
>
> long statmount(u64 mnt_id, u64 mask, struct statmnt __user *buf,
> size_t bufsize, unsigned int flags);
>
> If you are still hung up about this not being properly typed, how about this:
I really just wasn't clear how exactly you envisioned this. Your
proposal as is sounds good to me! I'm on board. I prefer the two offsets
as that lets us avoid searching for null bytes. So please leave it as is!
Thanks!
On Mon, Sep 18, 2023 at 11:39 AM Miklos Szeredi <[email protected]> wrote:
> Okay, so there are now (at least) two buffers, and on overflow the
> caller cannot know which one got overflown. It can resize both, but
> that doesn't make the caller any simpler to implement.
>
> Also the interface is kind of weird in that some struct members are
> out, some are in (the pointers and the lengths).
>
> I'd prefer the single buffer interface, which has none of the above issues.
>
> Thanks,
> Miklos
One natural solution is to set either of the two lengths to the expected
size if the provided buffer are too small. That way, the caller learns both
which of the buffers is too small, and how large they need to be. Replacing
a provided size with an expected size in this way already has precedent in
existing syscalls:
recvmsg(2):
The msg argument points to an in/out struct msghdr, and msg->msg_name
points to an optional buffer which receives the source address. If
msg->msg_namelen is less than the actual size of the source address,
the function truncates the address to that length before storing it in
msg->msg_name; otherwise, it stores the full address. In either case,
it sets msg->msg_namelen to the full size of the source address before
returning.
(An address buffer size is similarly provided directly as an in/out pointer
in accept(2), accept4(2), getpeername(2), getsockname(2), and recvfrom(2).)
name_to_handle_at(2):
The handle argument points to an in/out struct file_handle, followed by
a variable-length char array. If handle->handle_bytes is too small to
store the opaque handle, the function returns -EOVERFLOW; otherwise,
it succeeds. In either case, it sets handle->handle_bytes to the size
of the opaque handle before returning.
perf_event_open(2):
The attr argument points to an in/out struct perf_event_attr. If
attr->size is not a valid size for the struct, the function sets it to
the latest size and returns -E2BIG.
sched_setattr(2):
The attr argument points to an in/out struct sched_attr. If attr->size
is not a valid size for the struct, the function sets it to the latest
size and returns -E2BIG.
The specific pattern of returning the actual size of the strings both on
success and on failure, as with recvmsg(2) and name_to_handle_at(2), is
beneficial for callers that want to copy the strings elsewhere without
having to scan for the null byte. (Also, it would work well if we ever
wanted to return variable-size binary data, such as arrays of structs.)
Indeed, if we returned the actual size of the string, we could even take a
more radical approach of never setting a null byte after the data, leaving
the caller to append its own null byte if it really wants one. But perhaps
that would be taking it a bit too far; I just don't want this API to end up
in an awful situation like strncpy(3) or struct sockaddr_un, where the
buffer is always null-terminated except in one particular edge case. Also,
if we include a null byte in the returned size, it could invite off-by-one
errors in callers that just expect it to be the length of the string.
Meanwhile, if this solution of in/out size fields were adopted, then
there'd still be the question of what to do when a provided size is too
small: should the returned string be truncated (indicating the issue only
by the returned size being greater than the provided size), or should the
entire call fail with an -EOVERFLOW? IMO, the former is strictly more
flexible, since the caller can set a limit on how big a buffer it's willing
to dedicate to any particular string, and it can still retrieve the
remaining data if that buffer isn't quite big enough. But the latter might
be considered a bit more foolproof against callers who don't properly test
for truncation.
Thank you,
Matthew House
On Tue, 19 Sept 2023 at 11:07, Christian Brauner <[email protected]> wrote:
>
> On Tue, Sep 19, 2023 at 10:02:17AM +0200, Miklos Szeredi wrote:
> > This is where the thread started. Knowing the size of the buffer is
> > no good, since the needed buffer could change between calls.
>
> The same problem would exist for the single buffer. Realistically, users
> will most often simply use a fixed size PATH_MAX buffer that will cover
> most cases and fallback to allocating a larger buffer in case things go
> awry.
Exactly. A large buffer will work in 99.99% of the cases. Good
quality implementations will deal with the 0.01% as well, but
optimizing that case is nonsense.
> Really, the main things I care about are 64 bit alignment of the whole
> struct, typed __u64 pointers
Okay.
> with __u32 size for mnt_root and mnt_point
Unnecessary if the strings are nul terminated.
> and that we please spell out "mount" and not use "mnt": so statmount
> because the new mount api uses "mount" (move_mount(), mount_setattr(),
> fsmount(), MOUNT_ATTR_*) almost everywhere.
Okay.
Incremental below.
Also pushed to:
git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git#statmount
Thanks,
Miklos
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl
b/arch/x86/entry/syscalls/syscall_64.tbl
index 0d9a47b0ce9b..a1b3ce7d22cc 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -375,8 +375,8 @@
451 common cachestat sys_cachestat
452 common fchmodat2 sys_fchmodat2
453 64 map_shadow_stack sys_map_shadow_stack
-454 common statmnt sys_statmnt
-455 common listmnt sys_listmnt
+454 common statmount sys_statmount
+455 common listmount sys_listmount
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/fs/namespace.c b/fs/namespace.c
index 5362b1ffb26f..803003052bfb 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -68,9 +68,8 @@ static u64 event;
static DEFINE_IDA(mnt_id_ida);
static DEFINE_IDA(mnt_group_ida);
-/* Don't allow confusion with mount ID allocated wit IDA */
-#define OLD_MNT_ID_MAX UINT_MAX
-static atomic64_t mnt_id_ctr = ATOMIC64_INIT(OLD_MNT_ID_MAX);
+/* Don't allow confusion with old 32bit mount ID */
+static atomic64_t mnt_id_ctr = ATOMIC64_INIT(1ULL << 32);
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
@@ -4679,14 +4678,6 @@ SYSCALL_DEFINE5(mount_setattr, int, dfd, const
char __user *, path,
return err;
}
-static bool mnt_id_match(struct mount *mnt, u64 id)
-{
- if (id <= OLD_MNT_ID_MAX)
- return id == mnt->mnt_id;
- else
- return id == mnt->mnt_id_unique;
-}
-
struct vfsmount *lookup_mnt_in_ns(u64 id, struct mnt_namespace *ns)
{
struct mount *mnt;
@@ -4694,7 +4685,7 @@ struct vfsmount *lookup_mnt_in_ns(u64 id, struct
mnt_namespace *ns)
lock_ns_list(ns);
list_for_each_entry(mnt, &ns->list, mnt_list) {
- if (!mnt_is_cursor(mnt) && mnt_id_match(mnt, id)) {
+ if (!mnt_is_cursor(mnt) && id == mnt->mnt_id_unique) {
res = &mnt->mnt;
break;
}
@@ -4747,7 +4738,7 @@ static int stmt_string_seq(struct stmt_state *s,
stmt_func_t func)
}
static void stmt_string(struct stmt_state *s, u64 mask, stmt_func_t func,
- stmt_str_t *str)
+ u64 *str)
{
int ret = s->pos >= s->bufsize ? -EOVERFLOW : 0;
struct statmnt *sm = &s->sm;
@@ -4767,8 +4758,7 @@ static void stmt_string(struct stmt_state *s,
u64 mask, stmt_func_t func,
if (copy_to_user(s->buf + s->pos, seq->buf, seq->count)) {
s->err = -EFAULT;
} else {
- str->off = s->pos;
- str->len = seq->count - 1;
+ *str = (unsigned long) (s->buf + s->pos);
s->pos += seq->count;
}
}
@@ -4899,39 +4889,10 @@ static int stmt_fs_type(struct stmt_state *s)
struct super_block *sb = s->mnt->mnt_sb;
seq_puts(seq, sb->s_type->name);
- if (sb->s_subtype) {
- seq_putc(seq, '.');
- seq_puts(seq, sb->s_subtype);
- }
- return 0;
-}
-
-static int stmt_sb_opts(struct stmt_state *s)
-{
- struct seq_file *seq = &s->seq;
- struct super_block *sb = s->mnt->mnt_sb;
- char *p, *end, *next, *u = seq->buf;
- int err;
-
- if (!sb->s_op->show_options)
- return 0;
-
- err = sb->s_op->show_options(seq, s->mnt->mnt_root);
- if (err || seq_has_overflowed(seq) || !seq->count)
- return err;
-
- end = seq->buf + seq->count;
- *end = '\0';
- for (p = seq->buf + 1; p < end; p = next + 1) {
- next = strchrnul(p, ',');
- *next = '\0';
- u += string_unescape(p, u, 0, UNESCAPE_OCTAL) + 1;
- }
- seq->count = u - 1 - seq->buf;
return 0;
}
-static int do_statmnt(struct stmt_state *s)
+static int do_statmount(struct stmt_state *s)
{
struct statmnt *sm = &s->sm;
struct mount *m = real_mount(s->mnt);
@@ -4946,7 +4907,6 @@ static int do_statmnt(struct stmt_state *s)
stmt_string(s, STMT_MNT_ROOT, stmt_mnt_root, &sm->mnt_root);
stmt_string(s, STMT_MOUNTPOINT, stmt_mountpoint, &sm->mountpoint);
stmt_string(s, STMT_FS_TYPE, stmt_fs_type, &sm->fs_type);
- stmt_string(s, STMT_SB_OPTS, stmt_sb_opts, &sm->sb_opts);
if (s->err)
return s->err;
@@ -4957,7 +4917,7 @@ static int do_statmnt(struct stmt_state *s)
return 0;
}
-SYSCALL_DEFINE5(statmnt, u64, mnt_id,
+SYSCALL_DEFINE5(statmount, u64, mnt_id,
u64, mask, struct statmnt __user *, buf,
size_t, bufsize, unsigned int, flags)
{
@@ -4980,7 +4940,7 @@ SYSCALL_DEFINE5(statmnt, u64, mnt_id,
};
get_fs_root(current->fs, &s.root);
- err = do_statmnt(&s);
+ err = do_statmount(&s);
path_put(&s.root);
}
up_read(&namespace_sem);
@@ -4988,19 +4948,25 @@ SYSCALL_DEFINE5(statmnt, u64, mnt_id,
return err;
}
-static long do_listmnt(struct vfsmount *mnt, u64 __user *buf, size_t bufsize,
- const struct path *root)
+static long do_listmount(struct vfsmount *mnt, u64 __user *buf, size_t bufsize,
+ const struct path *root, unsigned int flags)
{
struct mount *r, *m = real_mount(mnt);
struct path rootmnt = { .mnt = root->mnt, .dentry =
root->mnt->mnt_root };
long ctr = 0;
+ bool reachable_only = true;
- if (!capable(CAP_SYS_ADMIN) &&
- !is_path_reachable(m, mnt->mnt_root, &rootmnt))
- return -EPERM;
+ if (flags & LISTMOUNT_UNREACHABLE) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ reachable_only = false;
+ }
+
+ if (reachable_only && !is_path_reachable(m, mnt->mnt_root, &rootmnt))
+ return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
list_for_each_entry(r, &m->mnt_mounts, mnt_child) {
- if (!capable(CAP_SYS_ADMIN) &&
+ if (reachable_only &&
!is_path_reachable(r, r->mnt.mnt_root, root))
continue;
@@ -5015,14 +4981,14 @@ static long do_listmnt(struct vfsmount *mnt,
u64 __user *buf, size_t bufsize,
return ctr;
}
-SYSCALL_DEFINE4(listmnt, u64, mnt_id, u64 __user *, buf, size_t, bufsize,
+SYSCALL_DEFINE4(listmount, u64, mnt_id, u64 __user *, buf, size_t, bufsize,
unsigned int, flags)
{
struct vfsmount *mnt;
struct path root;
long err;
- if (flags)
+ if (flags & ~LISTMOUNT_UNREACHABLE)
return -EINVAL;
down_read(&namespace_sem);
@@ -5030,7 +4996,7 @@ SYSCALL_DEFINE4(listmnt, u64, mnt_id, u64 __user
*, buf, size_t, bufsize,
err = -ENOENT;
if (mnt) {
get_fs_root(current->fs, &root);
- err = do_listmnt(mnt, buf, bufsize, &root);
+ err = do_listmount(mnt, buf, bufsize, &root, flags);
path_put(&root);
}
up_read(&namespace_sem);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 5d776cdb6f18..a35fb7b2c842 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -74,6 +74,7 @@ struct landlock_ruleset_attr;
enum landlock_rule_type;
struct cachestat_range;
struct cachestat;
+struct statmnt;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -408,11 +409,11 @@ asmlinkage long sys_statfs64(const char __user
*path, size_t sz,
asmlinkage long sys_fstatfs(unsigned int fd, struct statfs __user *buf);
asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
struct statfs64 __user *buf);
-asmlinkage long sys_statmnt(u64 mnt_id, u64 mask,
- struct statmnt __user *buf, size_t bufsize,
- unsigned int flags);
-asmlinkage long sys_listmnt(u64 mnt_id, u64 __user *buf, size_t bufsize,
- unsigned int flags);
+asmlinkage long sys_statmount(u64 mnt_id, u64 mask,
+ struct statmnt __user *buf, size_t bufsize,
+ unsigned int flags);
+asmlinkage long sys_listmount(u64 mnt_id, u64 __user *buf, size_t bufsize,
+ unsigned int flags);
asmlinkage long sys_truncate(const char __user *path, long length);
asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
#if BITS_PER_LONG == 32
diff --git a/include/uapi/asm-generic/unistd.h
b/include/uapi/asm-generic/unistd.h
index a2b41370f603..8df6a747e21a 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -823,11 +823,11 @@ __SYSCALL(__NR_cachestat, sys_cachestat)
#define __NR_fchmodat2 452
__SYSCALL(__NR_fchmodat2, sys_fchmodat2)
-#define __NR_statmnt 454
-__SYSCALL(__NR_statmnt, sys_statmnt)
+#define __NR_statmount 454
+__SYSCALL(__NR_statmount, sys_statmount)
-#define __NR_listmnt 455
-__SYSCALL(__NR_listmnt, sys_listmnt)
+#define __NR_listmount 455
+__SYSCALL(__NR_listmount, sys_listmount)
#undef __NR_syscalls
#define __NR_syscalls 456
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 4ec7308a9259..d98b41024507 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -138,11 +138,6 @@ struct mount_attr {
/* List of all mount_attr versions. */
#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
-struct stmt_str {
- __u32 off;
- __u32 len;
-};
-
struct statmnt {
__u64 mask; /* What results were written [uncond] */
__u32 sb_dev_major; /* Device ID */
@@ -159,11 +154,10 @@ struct statmnt {
__u64 mnt_peer_group; /* ID of shared peer group */
__u64 mnt_master; /* Mount receives propagation from this ID */
__u64 propagate_from; /* Propagation from in current namespace */
- __u64 __spare[20];
- struct stmt_str mnt_root; /* Root of mount relative to
root of fs */
- struct stmt_str mountpoint; /* Mountpoint relative to root
of process */
- struct stmt_str fs_type; /* Filesystem type[.subtype] */
- struct stmt_str sb_opts; /* Super block string options
(nul delimted) */
+ __u64 mnt_root; /* [str] Root of mount relative to root of fs */
+ __u64 mountpoint; /* [str] Mountpoint relative to root
of process */
+ __u64 fs_type; /* [srt] Filesystem type */
+ __u64 __spare[49];
};
#define STMT_SB_BASIC 0x00000001U /* Want/got sb_... */
@@ -172,6 +166,8 @@ struct statmnt {
#define STMT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STMT_MOUNTPOINT 0x00000010U /* Want/got
mountpoint */
#define STMT_FS_TYPE 0x00000020U /* Want/got fs_type */
-#define STMT_SB_OPTS 0x00000040U /* Want/got sb_opts */
+
+/* listmount(2) flags */
+#define LISTMOUNT_UNREACHABLE 0x01 /* List unreachable mounts too */
#endif /* _UAPI_LINUX_MOUNT_H */
On Tue, Sep 19, 2023 at 4:02 AM Miklos Szeredi <[email protected]> wrote:
> On Tue, 19 Sept 2023 at 02:38, Matthew House <[email protected]> wrote:
>
> > One natural solution is to set either of the two lengths to the expected
> > size if the provided buffer are too small. That way, the caller learns both
> > which of the buffers is too small, and how large they need to be. Replacing
> > a provided size with an expected size in this way already has precedent in
> > existing syscalls:
>
> This is where the thread started. Knowing the size of the buffer is
> no good, since the needed buffer could change between calls.
As Brauner mentioned, this does not change with the single-buffer
interface. And since changes are not likely to occur extremely frequently,
I feel like it would be better for the caller to only need one retry in the
common case rather than N retries for however many doublings it takes to
fit the whole buffer.
> We are trying to create a simple interface, no? My proposal would
> need a helper like this:
>
> struct statmnt *statmount(uint64_t mnt_id, uint64_t mask, unsigned int flags)
> {
> size_t bufsize = 1 << 15;
> void *buf;
> int ret;
>
> for (;;) {
> buf = malloc(bufsize <<= 1);
> if (!buf)
> return NULL;
> ret = syscall(__NR_statmnt, mnt_id, mask, buf, bufsize, flags);
> if (!ret)
> return buf;
> free(buf);
> if (errno != EOVERFLOW)
> return NULL;
> }
> }
>
> Christian's would be (ignoring .fs_type for now):
>
> int statmount(uint64_t mnt_id, uint64_t mask, struct statmnt *st,
> unsigned int flags)
> {
> int ret;
>
> st->mnt_root_size = 1 << 15;
> st->mountpoint_size = 1 << 15;
> for (;;) {
> st->mnt_root = malloc(st->mnt_root_size <<= 1);
> st->mountpoint = malloc(st->mountpoint <<= 1);
> if (!st->mnt_root || !st->mountpoint) {
> free(st->mnt_root);
> free(st->mountpoint);
> return -1;
> }
> ret = syscall(__NR_statmnt, mnt_id, mask, st,
> sizeof(*st), flags);
> if (!ret || errno != EOVERFLOW)
> return ret;
> free(st->mnt_root);
> free(st->mountpoint);
> }
> }
>
> It's not hugely more complex, but more complex nonetheless.
>
> Also having the helper allocate buffers inside the struct could easily
> result in leaks since it's not obvious what the caller needs to free,
> while in the first example it is.
There's nothing stopping the userspace helper from exposing a contiguous
buffer that can be easily freed, even if the kernel API uses a separate-
buffer interface internally. It just takes a bit of addition in the helper
to calculate the correct pointers. To wit:
struct statmnt *statmount(uint64_t mnt_id, uint64_t mask, unsigned int flags)
{
uint32_t mnt_root_size = PATH_MAX;
uint32_t mountpoint_size = PATH_MAX;
struct statmnt *st;
int ret;
for (;;) {
st = malloc(sizeof(*st) + mnt_root_size + mountpoint_size);
if (!st)
return NULL;
st->mnt_root = (char *)st + sizeof(*st);
st->mountpoint = (char *)st + sizeof(*st) + mnt_root_size;
st->mnt_root_size = mnt_root_size;
st->mountpoint_size = mountpoint_size;
ret = syscall(__NR_statmnt, mnt_id, mask, st, sizeof(*st),
flags);
if (ret) {
free(st);
return NULL;
}
if (st->mnt_root_size <= mnt_root_size &&
st->mountpoint_size <= mountpoint_size)
return st;
mnt_root_size = st->mnt_root_size;
mountpoint_size = st->mountpoint_size;
free(st);
}
}
(This is also far more helpful for users of the returned struct statmnt *,
since they can just dereference the two pointers instead of having to
decode the offsets by hand.)
More generally speaking, the biggest reason I dislike the current single-
buffer interface is that the output is "all or nothing": either the caller
has enough space in the buffer to store every single string, or it's unable
to get any fields at all, just an -EOVERFLOW. There's no room for the
caller to say that it just wants the integer fields and doesn't care about
the strings. Thus, to reliably call statmnt() on an arbitrary mount, the
ability to dynamically allocate memory is effectively mandatory. The only
real solution to this would be additional statx-like flags to select the
returned strings.
Meanwhile, with a separate-buffer interface, where the caller provides a
pointer and capacity for each string, granular output would be trivial: the
caller could just specify NULL/0 for any string it doesn't want, and still
successfully retrieve all the integer fields. This would also work well if
the caller, e.g., wants to set a hard cap of PATH_MAX bytes for each string
(since it's using static buffers), but nonetheless wants to retrieve the
integer fields if a string is too long.
Besides that, if the caller is written in standard C but doesn't want to
use malloc(3) to allocate the buffer, then its helper function must be
written very carefully (with a wrapper struct around the header and data)
to satisfy the aliasing rules, which forbid programs from using a struct
statmnt * pointer to read from a declared char[N] array. In practice,
callers tend to very rarely exercise this proper care with existing single-
buffer interfaces, such as recvmsg(2)'s msg_control buffer, and I would not
be very happy if statmnt() further contributed to this widespread issue.
Thank you,
Matthew House
On Tue, 19 Sept 2023 at 23:28, Matthew House <[email protected]> wrote:
> More generally speaking, the biggest reason I dislike the current single-
> buffer interface is that the output is "all or nothing": either the caller
> has enough space in the buffer to store every single string, or it's unable
> to get any fields at all, just an -EOVERFLOW. There's no room for the
> caller to say that it just wants the integer fields and doesn't care about
> the strings. Thus, to reliably call statmnt() on an arbitrary mount, the
> ability to dynamically allocate memory is effectively mandatory. The only
> real solution to this would be additional statx-like flags to select the
> returned strings.
It's already there:
#define STMT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
#define STMT_MNT_POINT 0x00000010U /* Want/got mnt_point */
#define STMT_FS_TYPE 0x00000020U /* Want/got fs_type */
For example, it's perfectly fine to do the following, and it's
guaranteed not to return EOVERFLOW:
struct statmnt st;
unsigned int mask = STMT_SB_BASIC | STMT_MNT_BASIC;
ret = statmount(mnt_id, mask, &st, sizeof(st), flags);
> Besides that, if the caller is written in standard C but doesn't want to
> use malloc(3) to allocate the buffer, then its helper function must be
> written very carefully (with a wrapper struct around the header and data)
> to satisfy the aliasing rules, which forbid programs from using a struct
> statmnt * pointer to read from a declared char[N] array.
I think you interpret aliasing rules incorrectly. The issue with
aliasing is if you access the same piece of memory though different
types. Which is not the case here. In fact with the latest
incarnation of the interface[1] there's no need to access the
underlying buffer at all:
printf("mnt_root: <%s>\n", st->str + st->mnt_root);
So the following is perfectly safe to do (as long as you don't care
about buffer overflow):
char buf[10000];
struct statmnt *st = (void *) buf;
ret = statmount(mnt_id, mask, st, sizeof(buf), flags);
If you do care about handling buffer overflows, then dynamic
allocation is the only sane way.
And before you dive into how this is going to be horrible because the
buffer size needs to be doubled an unknown number of times, think a
bit: have you *ever* seen a line in /proc/self/mountinfo longer than
say 1000 characters? So if the buffer starts out at 64k, how often
will this doubling happen? Right: practically never. Adding
complexity to handle this case is nonsense, as I've said many times.
And there is definitely nonzero complexity involved (just see the
special casing in getxattr and listxattr implementations all over the
place).
Thanks,
Miklos
[1] git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git#statmount-v2
On Tue, 19 Sept 2023 at 14:41, Christian Brauner <[email protected]> wrote:
>
> > > with __u32 size for mnt_root and mnt_point
> >
> > Unnecessary if the strings are nul terminated.
>
> All ok by me so far but how does the kernel know the size of the buffer
> to copy into? Wouldn't it be better to allow userspace to specify that?
> I'm probably just missing something but I better ask.
Because size of the buffer is given as the syscall argument.
long statmount(u64 mnt_id, u64 mask, struct statmnt __user *buf,
size_t bufsize, unsigned int flags);
If you are still hung up about this not being properly typed, how about this:
struct statmnt {
__u64 mask; /* What results were written [uncond] */
__u32 sb_dev_major; /* Device ID */
[...]
__u64 fs_type; /* [str] Filesystem type */
__u64 __spare[49];
char __string_buf[];
};
Such variable length structures are used all over the place, this
isn't some big invention. The only new thing is that we set pointers
to within the tail part of the buffer, to make the interface work for
the multiple strings case.
Thanks,
Miklos
On Wed, Sep 20, 2023 at 5:42 AM Miklos Szeredi <[email protected]> wrote:
> On Tue, 19 Sept 2023 at 23:28, Matthew House <[email protected]> wrote:
>
> > More generally speaking, the biggest reason I dislike the current single-
> > buffer interface is that the output is "all or nothing": either the caller
> > has enough space in the buffer to store every single string, or it's unable
> > to get any fields at all, just an -EOVERFLOW. There's no room for the
> > caller to say that it just wants the integer fields and doesn't care about
> > the strings. Thus, to reliably call statmnt() on an arbitrary mount, the
> > ability to dynamically allocate memory is effectively mandatory. The only
> > real solution to this would be additional statx-like flags to select the
> > returned strings.
>
> It's already there:
>
> #define STMT_MNT_ROOT 0x00000008U /* Want/got mnt_root */
> #define STMT_MNT_POINT 0x00000010U /* Want/got mnt_point */
> #define STMT_FS_TYPE 0x00000020U /* Want/got fs_type */
>
> For example, it's perfectly fine to do the following, and it's
> guaranteed not to return EOVERFLOW:
>
> struct statmnt st;
> unsigned int mask = STMT_SB_BASIC | STMT_MNT_BASIC;
>
> ret = statmount(mnt_id, mask, &st, sizeof(st), flags);
Whoops, my apologies; perhaps I should try to learn to read for once. (I
just saw the undecorated sequence of stmt_numeric() and stmt_string() calls
and didn't notice the early exits within the functions.) I withdraw that
particular objection.
> > Besides that, if the caller is written in standard C but doesn't want to
> > use malloc(3) to allocate the buffer, then its helper function must be
> > written very carefully (with a wrapper struct around the header and data)
> > to satisfy the aliasing rules, which forbid programs from using a struct
> > statmnt * pointer to read from a declared char[N] array.
>
> I think you interpret aliasing rules incorrectly. The issue with
> aliasing is if you access the same piece of memory though different
> types. Which is not the case here. In fact with the latest
> incarnation of the interface[1] there's no need to access the
> underlying buffer at all:
>
> printf("mnt_root: <%s>\n", st->str + st->mnt_root);
>
> So the following is perfectly safe to do (as long as you don't care
> about buffer overflow):
>
> char buf[10000];
> struct statmnt *st = (void *) buf;
>
> ret = statmount(mnt_id, mask, st, sizeof(buf), flags);
The declared type of a variable *is* one of the different types, as far as
the aliasing rules are concerned. In C17, section 6.5 ("Expressions"):
> The *effective type* of an object for an access to its stored value is
> the declared type of the object, if any. [More rules about objects with
> no declared type, i.e., those created with malloc(3) or realloc(3)...]
>
> An object shall have its stored value accessed only by an lvalue
> expression that has one of the following types:
>
> -- a type compatible with the effective type of the object,
>
> -- a qualified version of a type compatible with the effective type of
> the object,
>
> -- a type that is the signed or unsigned type corresponding to the
> effective type of the object,
>
> -- a type that is the signed or unsigned type corresponding to a
> qualified version of the effective type of the object,
>
> -- an aggregate or union type that includes one of the aforementioned
> types among its members (including, recursively, a member of a
> subaggregate or contained union), or
>
> -- a character type.
In this case, buf is declared in the program as a char[10000] array, so the
declared type of each element is char, and the effective type of each
element is also char. If we want to access, say, st->mnt_id, the lvalue
expression has type __u64, and it tries to access 8 of the char objects.
However, the integer type that __u64 expands to doesn't meet any of those
criteria, so the aliasing rules are violated and the behavior is undefined.
(The statmount() helper could in theory avoid UB by saying the struct
statmnt object is stored in the buffer as if by memcpy(3), but it would
still be UB for the caller to access the fields of that pointer directly
instead of memcpy'ing them back out of the buffer. And practically no one
does that in the real world.)
It's a common misconception that the aliasing rules as written are about
accessing the same object through two different pointer types. That
corollary is indeed what compilers mainly care about, but the C/C++
standards further say that objects in memory "remember" the types they were
created with, and they demand that programs respect objects' original types
when trying to access them (except when accessing their raw representations
via a pointer of character type).
> If you do care about handling buffer overflows, then dynamic
> allocation is the only sane way.
>
> And before you dive into how this is going to be horrible because the
> buffer size needs to be doubled an unknown number of times, think a
> bit: have you *ever* seen a line in /proc/self/mountinfo longer than
> say 1000 characters? So if the buffer starts out at 64k, how often
> will this doubling happen? Right: practically never. Adding
> complexity to handle this case is nonsense, as I've said many times.
> And there is definitely nonzero complexity involved (just see the
> special casing in getxattr and listxattr implementations all over the
> place).
>
> Thanks,
> Miklos
I've always felt that capacity doubling is a bit wasteful, but it's
definitely something I can live with, especially if providing size feedback
is as complex as you suggest. Still, I'm not a big fan of single-buffer
interfaces in general, with how poorly they tend to interact with C's
aliasing rules. (Also, those kinds of interfaces also invite alignment
errors: for instance, your snippet above is missing the necessary union to
prevent the buffer from being misaligned, which would cause UB when you
cast it to a struct statmnt *.)
Thank you,
Matthew House
On Wed, 20 Sept 2023 at 15:26, Matthew House <[email protected]> wrote:
> The declared type of a variable *is* one of the different types, as far as
> the aliasing rules are concerned. In C17, section 6.5 ("Expressions"):
>
> > The *effective type* of an object for an access to its stored value is
> > the declared type of the object, if any. [More rules about objects with
> > no declared type, i.e., those created with malloc(3) or realloc(3)...]
> >
> > An object shall have its stored value accessed only by an lvalue
> > expression that has one of the following types:
> >
> > -- a type compatible with the effective type of the object,
> >
> > -- a qualified version of a type compatible with the effective type of
> > the object,
> >
> > -- a type that is the signed or unsigned type corresponding to the
> > effective type of the object,
> >
> > -- a type that is the signed or unsigned type corresponding to a
> > qualified version of the effective type of the object,
> >
> > -- an aggregate or union type that includes one of the aforementioned
> > types among its members (including, recursively, a member of a
> > subaggregate or contained union), or
> >
> > -- a character type.
>
> In this case, buf is declared in the program as a char[10000] array, so the
> declared type of each element is char, and the effective type of each
> element is also char. If we want to access, say, st->mnt_id, the lvalue
> expression has type __u64, and it tries to access 8 of the char objects.
> However, the integer type that __u64 expands to doesn't meet any of those
> criteria, so the aliasing rules are violated and the behavior is undefined.
Some of the above is new information for me.
However for all practical purposes the code doesn't violate aliasing
rules. Even the most aggressive "-Wstrict-aliasing=1" doesn't trigger
a warning. I guess this is because gcc takes the definition to be
symmetric, i.e. anything may safely be aliased to a char pointer and a
char pointer may safely be aliased to anything. I'm not saying that
that is what the language definition says, just that gcc interprets
the language definition that way. Also plain "-Wstrict-aliasing"
doesn't trigger even if the type of the array is not char, because gcc
tries hard not to warn about cases where there's no dereference of the
aliased pointer. This is consistent with what I said and what the gcc
manpage says: only accesses count, declarations don't.
>
> I've always felt that capacity doubling is a bit wasteful, but it's
> definitely something I can live with, especially if providing size feedback
> is as complex as you suggest. Still, I'm not a big fan of single-buffer
> interfaces in general, with how poorly they tend to interact with C's
> aliasing rules. (Also, those kinds of interfaces also invite alignment
> errors: for instance, your snippet above is missing the necessary union to
> prevent the buffer from being misaligned, which would cause UB when you
> cast it to a struct statmnt *.)
Okay, alignment is a different story. I'll note this in the man page.
Thanks,
Miklos
On Mon, Sep 25, 2023 at 02:57:31PM +0200, Arnd Bergmann wrote:
> On Wed, Sep 13, 2023, at 17:22, Miklos Szeredi wrote:
>
> > asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
> > struct statfs64 __user *buf);
> > +asmlinkage long sys_statmnt(u64 mnt_id, u64 mask,
> > + struct statmnt __user *buf, size_t bufsize,
> > + unsigned int flags);
>
> This definition is problematic on 32-bit architectures for two
> reasons:
>
> - 64-bit register arguments are passed in pairs of registers
> on two architectures, so anything passing those needs to
> have a separate entry point for compat syscalls on 64-bit
> architectures. I would suggest also using the same one on
> 32-bit ones, so you don't rely on the compiler splitting
> up the long arguments into pairs.
>
> - There is a limit of six argument registers for system call
> entry points, but with two pairs and three single registers
> you end up with seven of them.
>
> The listmnt syscall in patch 3 also has the first problem,
> but not the second.
Both fields could also just be moved into the struct itself just like we
did for clone3() and others.
> How about passing u64 *?
struct statmnt_req {
__u64 mnt_id;
__u64 mask;
};
?
On Wed, Sep 13, 2023, at 17:22, Miklos Szeredi wrote:
> asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
> struct statfs64 __user *buf);
> +asmlinkage long sys_statmnt(u64 mnt_id, u64 mask,
> + struct statmnt __user *buf, size_t bufsize,
> + unsigned int flags);
This definition is problematic on 32-bit architectures for two
reasons:
- 64-bit register arguments are passed in pairs of registers
on two architectures, so anything passing those needs to
have a separate entry point for compat syscalls on 64-bit
architectures. I would suggest also using the same one on
32-bit ones, so you don't rely on the compiler splitting
up the long arguments into pairs.
- There is a limit of six argument registers for system call
entry points, but with two pairs and three single registers
you end up with seven of them.
The listmnt syscall in patch 3 also has the first problem,
but not the second.
Arnd
On Mon, 25 Sept 2023 at 15:19, Christian Brauner <[email protected]> wrote:
>
> > How about passing u64 *?
>
> struct statmnt_req {
> __u64 mnt_id;
> __u64 mask;
> };
>
> ?
I'm fine with that as well.
Thanks,
Miklos
On Mon, 25 Sept 2023 at 15:04, Christian Brauner <[email protected]> wrote:
>
> On Mon, Sep 25, 2023 at 02:57:31PM +0200, Arnd Bergmann wrote:
> > On Wed, Sep 13, 2023, at 17:22, Miklos Szeredi wrote:
> >
> > > asmlinkage long sys_fstatfs64(unsigned int fd, size_t sz,
> > > struct statfs64 __user *buf);
> > > +asmlinkage long sys_statmnt(u64 mnt_id, u64 mask,
> > > + struct statmnt __user *buf, size_t bufsize,
> > > + unsigned int flags);
> >
> > This definition is problematic on 32-bit architectures for two
> > reasons:
> >
> > - 64-bit register arguments are passed in pairs of registers
> > on two architectures, so anything passing those needs to
> > have a separate entry point for compat syscalls on 64-bit
> > architectures. I would suggest also using the same one on
> > 32-bit ones, so you don't rely on the compiler splitting
> > up the long arguments into pairs.
> >
> > - There is a limit of six argument registers for system call
> > entry points, but with two pairs and three single registers
> > you end up with seven of them.
> >
> > The listmnt syscall in patch 3 also has the first problem,
> > but not the second.
>
> Both fields could also just be moved into the struct itself just like we
> did for clone3() and others.
Let's not mix in and out args, please.
How about passing u64 *?
Thanks,
Miklos
On Mon, Sep 25, 2023 at 05:46:59PM +0200, Arnd Bergmann wrote:
> On Mon, Sep 25, 2023, at 15:20, Miklos Szeredi wrote:
> > On Mon, 25 Sept 2023 at 15:19, Christian Brauner <[email protected]> wrote:
> >>
> >> > How about passing u64 *?
> >>
> >> struct statmnt_req {
> >> __u64 mnt_id;
> >> __u64 mask;
> >> };
> >>
> >> ?
> >
> > I'm fine with that as well.
>
> Yes, this looks fine for the compat syscall purpose.
>
> Not sure if losing visibility of the mnt_id and mask in ptrace
> or seccomp/bpf is a problem though.
It's an information retrieval syscall so there shouldn't be any need to
block it and I think that this ship has sailed in general. Container
workloads should migrate from seccomp to landlock if they need to filter
system calls like this.
On Mon, Sep 25, 2023, at 15:20, Miklos Szeredi wrote:
> On Mon, 25 Sept 2023 at 15:19, Christian Brauner <[email protected]> wrote:
>>
>> > How about passing u64 *?
>>
>> struct statmnt_req {
>> __u64 mnt_id;
>> __u64 mask;
>> };
>>
>> ?
>
> I'm fine with that as well.
Yes, this looks fine for the compat syscall purpose.
Not sure if losing visibility of the mnt_id and mask in ptrace
or seccomp/bpf is a problem though.
Arnd
> I also don't quite understand the dislike of variable-sized records.
> Don't getdents, inotify, Netlink all use them? And I think at least for
> Netlink, more stuff is added all the time?
Netlink is absolutely atrocious to work with because everything is
variable sized and figuring out the correct allocation size is a
complete nightmare even with the "helpful" macros that are provided.
The bigger problem however is the complete untypedness even of the most
basic things. For example, retrieving the mtu of a network interface
through netlink is a complete nightmare. getdents, inotify, fanotify,
open_by_handle_at()'s struct fiel_handle are all fine. But let's
absolutely not take netlink as a model for anything related to mounts.
And no one is against again variable sized records per se. I think we're
coming to a good compromise here.
* Miklos Szeredi:
> On Tue, 26 Sept 2023 at 16:19, Florian Weimer <[email protected]> wrote:
>
>> getdents gets away with this buffer size because applications can copy
>> out all the data from struct dirent if they need long-term storage.
>> They have to do that because the usual readdir interface overwrites the
>> buffer, potentially at the next readdir call. This means the buffer
>> size does not introduce an amount of memory fragmention that is
>> dependent on the directory size.
>>
>> With an opaque, pointer-carrying struct, copying out the data is not
>> possible in a generic fashion. Only the parts that the application
>> knows about can be copied out. So I think it's desirable to have a
>> fairly exact allocation.
>
> Okay, so let's add a 'size' field to the struct, which is set to the
> size used (as opposed to the size of the buffer). That should solve
> copying without wasting a single byte of memory.
That would be helpful.
> Otherwise the format is fully copyable, since the strings are denoted
> with an offset, which doesn't change after the buffer is copied.
I missed the development in that direction. Yes, offsets would work
nicely in this context. They help with compat syscalls, too.
If the buffer is relocatable like that, we can even try first with a
reasonably sized on-stack buffer and create an exactly-sized heap
allocation from that.
Thanks,
Florian
> With an opaque, pointer-carrying struct, copying out the data is not
> possible in a generic fashion. Only the parts that the application
> knows about can be copied out. So I think it's desirable to have a
> fairly exact allocation.
This could easily be added if we added size parameters like I originally
suggested for the variable sized mnt_root and mnt_point records into
struct statmount.
If the user specified that they want to retrieve the mnt_root and
mnt_mountpoint in @mask and the size for the relevant field is zero then
we fill in the required size for the relevant field. If they aren't zero
we just try to copy in the data in the relevant pointer field.
I prefer this interface as it allows for both strategies:
* users that don't care about exact allocation size can just pass a
guesstimated buffer usually PATH_MAX/2 or sm
* users that care about exact allocation size can query the kernel
On Tue, 26 Sept 2023 at 16:19, Florian Weimer <[email protected]> wrote:
> getdents gets away with this buffer size because applications can copy
> out all the data from struct dirent if they need long-term storage.
> They have to do that because the usual readdir interface overwrites the
> buffer, potentially at the next readdir call. This means the buffer
> size does not introduce an amount of memory fragmention that is
> dependent on the directory size.
>
> With an opaque, pointer-carrying struct, copying out the data is not
> possible in a generic fashion. Only the parts that the application
> knows about can be copied out. So I think it's desirable to have a
> fairly exact allocation.
Okay, so let's add a 'size' field to the struct, which is set to the
size used (as opposed to the size of the buffer). That should solve
copying without wasting a single byte of memory.
Otherwise the format is fully copyable, since the strings are denoted
with an offset, which doesn't change after the buffer is copied.
Thanks,
Miklos
On Tue, 26 Sept 2023 at 15:49, Florian Weimer <[email protected]> wrote:
>
> * Miklos Szeredi:
>
> > On Mon, Sep 18, 2023 at 3:51 PM Christian Brauner <[email protected]> wrote:
> >
> >> I really would prefer a properly typed struct and that's what everyone
> >> was happy with in the session as well. So I would not like to change the
> >> main parameters.
> >
> > I completely agree. Just would like to understand this point:
> >
> > struct statmnt *statmnt(u64 mntid, u64 mask, unsigned int flags);
> >
> > What's not properly typed about this interface?
> >
> > I guess the answer is that it's not a syscall interface, which will
> > have an added [void *buf, size_t bufsize], while the buffer sizing is
> > done by a simple libc wrapper.
> >
> > Do you think that's a problem? If so, why?
>
> Try-and-resize interfaces can be quite bad for data obtained from the
> network.
In this particular case it's all local information.
> If the first call provides the minimum buffer size (like
> getgroups, but unlike readlink or the glibc *_r interfaces for NSS),
> this could at least allow us to avoid allocating too much. In
> userspace, we cannot reduce the size of the heap allocation without
> knowing where the pointers are and what they mean.
Does it matter if the heap allocation is say 32k instead of 589bytes?
The returned strings are not limited in size, but are quite unlikely
to be over PATH_MAX.
E.g. getdents apparently uses 32k buffers, which is really a tiny
amount of heap these days, but more than enough for the purpose. Not
sure if this is hard coded into libc or if it's the result of some
heuristic based on available memory, but I don't see why similar
treatment couldn't be applied to the statmount(2) syscall.
> I also don't quite understand the dislike of variable-sized records.
> Don't getdents, inotify, Netlink all use them? And I think at least for
> Netlink, more stuff is added all the time?
What do you mean by variable sized records?
Thanks,
Miklos
* Miklos Szeredi:
>> Try-and-resize interfaces can be quite bad for data obtained from the
>> network.
>
> In this particular case it's all local information.
That's good.
>> If the first call provides the minimum buffer size (like
>> getgroups, but unlike readlink or the glibc *_r interfaces for NSS),
>> this could at least allow us to avoid allocating too much. In
>> userspace, we cannot reduce the size of the heap allocation without
>> knowing where the pointers are and what they mean.
>
> Does it matter if the heap allocation is say 32k instead of 589bytes?
> The returned strings are not limited in size, but are quite unlikely
> to be over PATH_MAX.
It matters if the application needs to keep a copy.
> E.g. getdents apparently uses 32k buffers, which is really a tiny
> amount of heap these days, but more than enough for the purpose. Not
> sure if this is hard coded into libc or if it's the result of some
> heuristic based on available memory, but I don't see why similar
> treatment couldn't be applied to the statmount(2) syscall.
getdents gets away with this buffer size because applications can copy
out all the data from struct dirent if they need long-term storage.
They have to do that because the usual readdir interface overwrites the
buffer, potentially at the next readdir call. This means the buffer
size does not introduce an amount of memory fragmention that is
dependent on the directory size.
With an opaque, pointer-carrying struct, copying out the data is not
possible in a generic fashion. Only the parts that the application
knows about can be copied out. So I think it's desirable to have a
fairly exact allocation.
>> I also don't quite understand the dislike of variable-sized records.
>> Don't getdents, inotify, Netlink all use them? And I think at least for
>> Netlink, more stuff is added all the time?
>
> What do you mean by variable sized records?
Iterating through d_reclen-sized subojects (for getdents).
Thanks,
Florian
On Mon, 25 Sept 2023 at 15:20, Miklos Szeredi <[email protected]> wrote:
>
> On Mon, 25 Sept 2023 at 15:19, Christian Brauner <[email protected]> wrote:
> >
> > > How about passing u64 *?
> >
> > struct statmnt_req {
> > __u64 mnt_id;
> > __u64 mask;
> > };
> >
> > ?
>
> I'm fine with that as well.
So after a bit more thinking: this is okay to make life easier for
32bit archs, but only on the kernel ABI.
On the library API the args should *not* be multiplexed, as it's just
a pointless complication. This is just an internal implementation
detail for the sake of legacy architectures, instead of being good API
design.
And because it's an internal thingy, my feeling is that this struct
could be reused for passing mnt_id to listmount(2) as well, despite
the fact that the mask would be unused. But I'm ready to be
convinced otherwise...
Thanks,
Miklos