LTP NFS tests (which use netns) fails on tmpfs since d4066486:
mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
Fixes: d4066486 ("mount.nfs: improve version negotiation when vers=4 is specified.")
Signed-off-by: Petr Vorel <[email protected]>
---
Hi,
not sure, if this is a correct fix thus RFC (I'm not from @umn.edu :)).
I suppose tmpfs is still meant to be supported, but maybe I'm wrong.
I did testing with LTP [1]:
$ for i in 3 4 4.1 4.2; do echo "* version: $i"; PATH="/opt/ltp/testcases/bin:$PATH" nfs01 -v $i -t tcp; done
Core of the tests is nfs_lib.sh [2], which sets network namespace (with
help of tst_net.sh [3]) and setup nfs with exportfs (use fsid to be
working properly on tmpfs) and run various tests with these NFS
versions: 3, 4, 4.1, 4.2.
Kind regards,
Petr
[1] https://github.com/linux-test-project/ltp
[2] https://github.com/linux-test-project/ltp/blob/master/testcases/network/nfs/nfs_stress/nfs_lib.sh
[3] https://github.com/linux-test-project/ltp/blob/master/testcases/lib/tst_net.sh
utils/mount/Makefile.am | 3 ++-
utils/mount/stropts.c | 29 ++++++++++++++++++++++++++---
2 files changed, 28 insertions(+), 4 deletions(-)
diff --git a/utils/mount/Makefile.am b/utils/mount/Makefile.am
index ad0be93b..d3905bec 100644
--- a/utils/mount/Makefile.am
+++ b/utils/mount/Makefile.am
@@ -28,7 +28,8 @@ endif
mount_nfs_LDADD = ../../support/nfs/libnfs.la \
../../support/export/libexport.a \
../../support/misc/libmisc.a \
- $(LIBTIRPC)
+ $(LIBTIRPC) \
+ $(LIBPTHREAD)
mount_nfs_SOURCES = $(mount_common)
diff --git a/utils/mount/stropts.c b/utils/mount/stropts.c
index 174a05f6..3961b8ce 100644
--- a/utils/mount/stropts.c
+++ b/utils/mount/stropts.c
@@ -31,6 +31,7 @@
#include <time.h>
#include <sys/socket.h>
+#include <sys/statfs.h>
#include <sys/mount.h>
#include <netinet/in.h>
#include <arpa/inet.h>
@@ -50,6 +51,7 @@
#include "parse_dev.h"
#include "conffile.h"
#include "misc.h"
+#include "nfsd_path.h"
#ifndef NFS_PROGRAM
#define NFS_PROGRAM (100003)
@@ -104,6 +106,21 @@ struct nfsmount_info {
child; /* forked bg child? */
};
+/*
+ * Returns TRUE if mounting on tmpfs, otherwise FALSE.
+ */
+static int is_tmpfs(struct nfsmount_info *mi)
+{
+ struct statfs64 st;
+
+ if (nfsd_path_statfs64(mi->node, &st)) {
+ nfs_error(_("%s: Failed to statfs64 on path %s: %s"),
+ progname, mi->node, strerror(errno));
+ return 0;
+ }
+
+ return st.f_type == 0x01021994;
+}
static void nfs_default_version(struct nfsmount_info *mi)
{
@@ -873,6 +890,9 @@ static int nfs_try_mount_v4(struct nfsmount_info *mi)
case EACCES:
continue;
default:
+ if (is_tmpfs(mi))
+ return 1;
+
goto out;
}
}
@@ -951,9 +971,12 @@ check_result:
}
fall_back:
- if (mi->version.v_mode == V_GENERAL)
- /* v2,3 fallback not allowed */
- return result;
+ if (mi->version.v_mode == V_GENERAL) {
+
+ /* v2,3 fallback not allowed unless tmpfs */
+ if (!is_tmpfs(mi))
+ return result;
+ }
/*
* Save the original errno in case the v3
--
2.31.1
On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> LTP NFS tests (which use netns) fails on tmpfs since d4066486:
>
> mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
We should figure out the reason for the failure. A network trace might
help.
--b.
>
> Fixes: d4066486 ("mount.nfs: improve version negotiation when vers=4 is specified.")
>
> Signed-off-by: Petr Vorel <[email protected]>
> ---
> Hi,
>
> not sure, if this is a correct fix thus RFC (I'm not from @umn.edu :)).
> I suppose tmpfs is still meant to be supported, but maybe I'm wrong.
>
> I did testing with LTP [1]:
>
> $ for i in 3 4 4.1 4.2; do echo "* version: $i"; PATH="/opt/ltp/testcases/bin:$PATH" nfs01 -v $i -t tcp; done
>
> Core of the tests is nfs_lib.sh [2], which sets network namespace (with
> help of tst_net.sh [3]) and setup nfs with exportfs (use fsid to be
> working properly on tmpfs) and run various tests with these NFS
> versions: 3, 4, 4.1, 4.2.
>
> Kind regards,
> Petr
>
> [1] https://github.com/linux-test-project/ltp
> [2] https://github.com/linux-test-project/ltp/blob/master/testcases/network/nfs/nfs_stress/nfs_lib.sh
> [3] https://github.com/linux-test-project/ltp/blob/master/testcases/lib/tst_net.sh
>
> utils/mount/Makefile.am | 3 ++-
> utils/mount/stropts.c | 29 ++++++++++++++++++++++++++---
> 2 files changed, 28 insertions(+), 4 deletions(-)
>
> diff --git a/utils/mount/Makefile.am b/utils/mount/Makefile.am
> index ad0be93b..d3905bec 100644
> --- a/utils/mount/Makefile.am
> +++ b/utils/mount/Makefile.am
> @@ -28,7 +28,8 @@ endif
> mount_nfs_LDADD = ../../support/nfs/libnfs.la \
> ../../support/export/libexport.a \
> ../../support/misc/libmisc.a \
> - $(LIBTIRPC)
> + $(LIBTIRPC) \
> + $(LIBPTHREAD)
>
> mount_nfs_SOURCES = $(mount_common)
>
> diff --git a/utils/mount/stropts.c b/utils/mount/stropts.c
> index 174a05f6..3961b8ce 100644
> --- a/utils/mount/stropts.c
> +++ b/utils/mount/stropts.c
> @@ -31,6 +31,7 @@
> #include <time.h>
>
> #include <sys/socket.h>
> +#include <sys/statfs.h>
> #include <sys/mount.h>
> #include <netinet/in.h>
> #include <arpa/inet.h>
> @@ -50,6 +51,7 @@
> #include "parse_dev.h"
> #include "conffile.h"
> #include "misc.h"
> +#include "nfsd_path.h"
>
> #ifndef NFS_PROGRAM
> #define NFS_PROGRAM (100003)
> @@ -104,6 +106,21 @@ struct nfsmount_info {
> child; /* forked bg child? */
> };
>
> +/*
> + * Returns TRUE if mounting on tmpfs, otherwise FALSE.
> + */
> +static int is_tmpfs(struct nfsmount_info *mi)
> +{
> + struct statfs64 st;
> +
> + if (nfsd_path_statfs64(mi->node, &st)) {
> + nfs_error(_("%s: Failed to statfs64 on path %s: %s"),
> + progname, mi->node, strerror(errno));
> + return 0;
> + }
> +
> + return st.f_type == 0x01021994;
> +}
>
> static void nfs_default_version(struct nfsmount_info *mi)
> {
> @@ -873,6 +890,9 @@ static int nfs_try_mount_v4(struct nfsmount_info *mi)
> case EACCES:
> continue;
> default:
> + if (is_tmpfs(mi))
> + return 1;
> +
> goto out;
> }
> }
> @@ -951,9 +971,12 @@ check_result:
> }
>
> fall_back:
> - if (mi->version.v_mode == V_GENERAL)
> - /* v2,3 fallback not allowed */
> - return result;
> + if (mi->version.v_mode == V_GENERAL) {
> +
> + /* v2,3 fallback not allowed unless tmpfs */
> + if (!is_tmpfs(mi))
> + return result;
> + }
>
> /*
> * Save the original errno in case the v3
> --
> 2.31.1
Hi,
> On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> We should figure out the reason for the failure. A network trace might
> help.
Anything specific you're looking for?
Doing full debug
rpcdebug -m nfs -s all
rpcdebug -m nfsd -s all
rpcdebug -m rpc -s all
I see
[13890.993127] nfsd_inet6addr_event: removed fd00:0001:0001:0001:0000:0000:0000:0002
[13890.995428] nfsd_inet6addr_event: removed fe80:0000:0000:0000:1463:9fff:fea6:01b1
[13891.002920] nfsd_inetaddr_event: removed 10.0.0.2
[13891.007501] IPv6: ADDRCONF(NETDEV_CHANGE): ltp_ns_veth2: link becomes ready
[13891.223432] NFS: parsing nfs mount option 'source'
[13891.225347] NFS: parsing nfs mount option 'proto'
[13891.227216] NFS: parsing nfs mount option 'vers'
[13891.228684] NFS: parsing nfs mount option 'addr'
[13891.229994] NFS: parsing nfs mount option 'clientaddr'
[13891.231326] NFS: MNTPATH: '/tmp/LTP_nfs01.lQghifD6NF/4.1/tcp'
[13891.232923] --> nfs4_try_get_tree()
[13891.235025] NFS: get client cookie (0x0000000013cf211e/0x0000000014b6df5b)
[13891.237466] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
[13891.239618] RPC: Couldn't create auth handle (flavor 390004)
[13891.241556] nfs_create_rpc_client: cannot create RPC client. Error = -22
[13891.243306] RPC: destroy backchannel transport
[13891.244017] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
[13891.244610] RPC: backchannel list empty= true
[13891.246232] RPC: xs_connect scheduled xprt 000000005ddf4c3d
[13891.247547] RPC: xs_destroy xprt 00000000aeaed403
[13891.250574] RPC: xs_bind 0.0.0.0:873: ok (0)
[13891.252225] RPC: worker connecting xprt 000000005ddf4c3d via tcp to 10.0.0.2 (port 2049)
[13891.254253] RPC: xs_tcp_state_change client 000000005ddf4c3d...
[13891.255693] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[13891.257195] svc: server 000000007f0b7417, pool 0, transport 00000000f97f22bc, inuse=2
[13891.258946] RPC: 000000005ddf4c3d connect status 115 connected 1 sock state 1
[13891.260685] RPC: xs_close xprt 00000000aeaed403
[13891.260794] RPC: xs_tcp_send_request(40) = 0
[13891.263161] svc: server 000000007f0b7417, pool 0, transport 00000000f97f22bc, inuse=2
[13891.264876] svc: svc_authenticate (0)
[13891.264981] svc: server 00000000b4f0c1f0, pool 0, transport 00000000f97f22bc, inuse=3
[13891.267954] svc: calling dispatcher
[13891.270971] RPC: xs_data_ready...
[13891.273229] RPC: setup backchannel transport
[13891.274481] RPC: adding req= 000000002ea0e13e
[13891.275823] RPC: setup backchannel transport done
[13891.278253] svc: initialising pool 0 for NFSv4 callback
[13891.279811] nfs_callback_create_svc: service created
[13891.281138] NFS: create per-net callback data; net=f0000304
FYI tests are being done on network namespaces, thus there should be no real
network issue. But I'll retest it on a real network. I admit that netns + tmpfs
might be unrealistic scenario in practice.
Kind regards,
Petr
> --b.
Hi all,
> On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> We should figure out the reason for the failure. A network trace might
> help.
> --b.
I'm sorry previous log I sent was log from some different debugging.
Sending actual logs for version 4 (removed duplicate events).
Logs for 4.1 and 4.2 are slightly different, but "cannot create RPC client" is
the same; I can also send mount.nfs strace output if useful.
Some more info (tested on both 2f669b6f - current master and d4066486):
version 3 is not affected. Affected versions: 4, 4.1, 4.2.
rpcdebug -m nfs -s all; rpcdebug -m nfsd -s all; rpcdebug -m rpc -s all
[14800.072718] nfsd_inet6addr_event: removed fd00:0001:0001:0001:0000:0000:0000:0002
[14800.074310] nfsd_inet6addr_event: removed fe80:0000:0000:0000:1463:9fff:fea6:01b1
[14800.079600] nfsd_inetaddr_event: removed 10.0.0.2
[14800.083410] IPv6: ADDRCONF(NETDEV_CHANGE): ltp_ns_veth2: link becomes ready
[14800.280525] NFS: parsing nfs mount option 'source'
[14800.282709] NFS: parsing nfs mount option 'proto'
[14800.284300] NFS: parsing nfs mount option 'vers'
[14800.286041] NFS: parsing nfs mount option 'addr'
[14800.287636] NFS: parsing nfs mount option 'clientaddr'
[14800.289344] NFS: MNTPATH: '/tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp'
[14800.290754] --> nfs4_try_get_tree()
[14800.292518] NFS: get client cookie (0x00000000375cff7e/0x00000000c0f026bb)
[14800.293862] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
[14800.295100] RPC: Couldn't create auth handle (flavor 390004)
[14800.296295] nfs_create_rpc_client: cannot create RPC client. Error = -22
[14800.298345] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
[14800.298632] RPC: destroy backchannel transport
[14800.299710] RPC: xs_connect scheduled xprt 000000004980c4c0
[14800.300781] RPC: backchannel list empty= true
[14800.300782] RPC: xs_destroy xprt 000000001716165f
[14800.301466] RPC: xs_close xprt 000000001716165f
[14800.302416] RPC: xs_bind 0.0.0.0:921: ok (0)
[14800.306603] RPC: worker connecting xprt 000000004980c4c0 via tcp to 10.0.0.2 (port 2049)
[14800.308268] RPC: xs_tcp_state_change client 000000004980c4c0...
[14800.309531] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.310827] RPC: 000000004980c4c0 connect status 115 connected 1 sock state 1
[14800.310871] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.313880] RPC: xs_tcp_send_request(40) = 0
[14800.314812] svc: svc_authenticate (0)
[14800.316079] svc: calling dispatcher
[14800.316669] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.317884] RPC: xs_data_ready...
[14800.319547] --> nfs4_realloc_slot_table: max_reqs=1024, tbl->max_slots 0
[14800.322475] nfs4_realloc_slot_table: tbl=000000002fac0907 slots=000000005bb343d4 max_slots=1024
[14800.324088] <-- nfs4_realloc_slot_table: return 0
[14800.325937] svc: initialising pool 0 for NFSv4 callback
[14800.327108] nfs_callback_create_svc: service created
[14800.328222] NFS: create per-net callback data; net=f0000304
[14800.330210] NFS: Callback listener port = 35999 (af 2, net f0000304)
[14800.332216] NFS: Callback listener port = 33759 (af 10, net f0000304)
[14800.334658] nfs_callback_up: service started
[14800.335704] svc: svc_destroy(NFSv4 callback, 2)
[14800.336789] NFS: nfs4_discover_server_trunking: testing '10.0.0.2'
[14800.338131] NFS call setclientid auth=UNIX, 'Linux NFSv4.0 opensuse-20201019/10.0.0.2'
[14800.339738] RPC: xs_tcp_send_request(184) = 0
[14800.339776] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.342797] svc: svc_authenticate (1)
[14800.343828] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.345197] RPC: Want update, refage=120, age=0
[14800.347277] RPC: Want update, refage=120, age=0
[14800.348484] svc: calling dispatcher
[14800.350321] RPC: xs_data_ready...
[14800.352245] NFS reply setclientid: 0
[14800.353334] NFS call setclientid_confirm auth=UNIX, (client ID 168c82609a2db6cf)
[14800.354955] RPC: xs_tcp_send_request(112) = 0
[14800.354990] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.358191] svc: svc_authenticate (1)
[14800.358553] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.360759] svc: calling dispatcher
[14800.362584] NFSD: move_to_confirm nfs4_client 00000000d2d16ac0
[14800.364089] RPC: set up xprt to 10.0.0.1 (port 35999) via tcp
[14800.365557] RPC: xs_connect scheduled xprt 000000001716165f
[14800.365863] RPC: xs_data_ready...
[14800.366918] RPC: xs_bind 10.0.0.2:1017: ok (0)
[14800.368053] NFS reply setclientid_confirm: 0
[14800.369181] RPC: worker connecting xprt 000000001716165f via tcp to 10.0.0.1 (port 35999)
[14800.370252] nfs4_schedule_state_renewal: requeueing work. Lease period = 5
[14800.372248] RPC: xs_tcp_state_change client 000000001716165f...
[14800.375198] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.376575] RPC: 000000001716165f connect status 115 connected 1 sock state 1
[14800.377017] svc: server 0000000037f454bf, pool 0, transport 0000000036c0bdf0, inuse=2
[14800.378141] RPC: xs_tcp_send_request(80) = 0
[14800.379650] svc: svc_authenticate (1)
[14800.382291] RPC: xs_data_ready...
[14800.383450] NFS: nfs4_discover_server_trunking: status = 0
[14800.384866] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.386485] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.388769] svc: server 0000000037f454bf, pool 0, transport 0000000036c0bdf0, inuse=2
[14800.390337] RPC: xs_tcp_send_request(120) = 0
[14800.391734] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.394532] svc: svc_authenticate (1)
[14800.395538] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.398189] svc: calling dispatcher
[14800.399205] RPC: Want update, refage=120, age=0
[14800.401214] nfsd: fh_compose(exp 00:2b/256 1/snapshot, ino=256)
[14800.402634] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.404315] RPC: xs_data_ready...
[14800.406649] decode_attr_type: type=040000
[14800.407898] decode_attr_change: change attribute=6954294660659412992
[14800.409259] decode_attr_size: file size=154
[14800.410365] decode_attr_fsid: fsid=(0x0/0x0)
[14800.411402] decode_attr_fileid: fileid=256
[14800.412435] decode_attr_fs_locations: fs_locations done, error = 0
[14800.413820] decode_attr_mode: file mode=0755
[14800.414840] decode_attr_nlink: nlink=1
[14800.415783] decode_attr_owner: uid=0
[14800.416707] decode_attr_group: gid=0
[14800.417733] decode_attr_rdev: rdev=(0x0:0x0)
[14800.418845] decode_attr_space_used: space used=0
[14800.419881] decode_attr_time_access: atime=1619115491
[14800.421083] decode_attr_time_metadata: ctime=1618569204
[14800.422241] decode_attr_time_modify: mtime=1618569204
[14800.423320] decode_attr_mounted_on_fileid: fileid=1
[14800.424513] decode_getfattr_attrs: xdr returned 0
[14800.425614] decode_getfattr_generic: xdr returned 0
[14800.426669] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.426713] NFS call setclientid auth=UNIX, 'Linux NFSv4.0 opensuse-20201019/10.0.0.2'
[14800.429835] RPC: xs_tcp_send_request(184) = 0
[14800.431412] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.434120] svc: svc_authenticate (1)
[14800.434166] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.436380] svc: calling dispatcher
[14800.438361] RPC: xs_data_ready...
[14800.439350] NFS reply setclientid: 0
[14800.440253] NFS call setclientid_confirm auth=UNIX, (client ID 168c82609a2db6cf)
[14800.442050] RPC: xs_tcp_send_request(112) = 0
[14800.442051] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.445339] svc: svc_authenticate (1)
[14800.445795] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.446427] svc: calling dispatcher
[14800.449387] RPC: set up xprt to 10.0.0.1 (port 35999) via tcp
[14800.451055] RPC: xs_connect scheduled xprt 00000000b9cef903
[14800.452346] RPC: xs_bind 10.0.0.2:949: ok (0)
[14800.453103] RPC: destroy backchannel transport
[14800.453725] RPC: worker connecting xprt 00000000b9cef903 via tcp to 10.0.0.1 (port 35999)
[14800.454807] RPC: backchannel list empty= true
[14800.456406] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.457444] RPC: xs_destroy xprt 000000001716165f
[14800.458832] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.461346] RPC: 00000000b9cef903 connect status 115 connected 1 sock state 1
[14800.461982] RPC: xs_data_ready...
[14800.463235] RPC: xs_tcp_send_request(80) = 0
[14800.464163] RPC: xs_close xprt 000000001716165f
[14800.465413] svc: server 0000000037f454bf, pool 0, transport 00000000587e5d0e, inuse=2
[14800.467446] RPC: xs_tcp_state_change client 000000001716165f...
[14800.468005] svc: svc_authenticate (1)
[14800.469354] RPC: state 4 conn 1 dead 0 zapped 1 sk_shutdown 3
[14800.470337] NFS reply setclientid_confirm: 0
[14800.473062] --> nfs4_get_lease_time_prepare
[14800.474272] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.475926] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.477197] <-- nfs4_get_lease_time_prepare
[14800.479389] RPC: xs_data_ready...
[14800.481004] svc: server 0000000037f454bf, pool 0, transport 00000000587e5d0e, inuse=2
[14800.483759] RPC: xs_tcp_send_request(108) = 0
[14800.485086] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.486634] svc: svc_authenticate (1)
[14800.486678] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.487662] svc: calling dispatcher
[14800.490136] nfsd: fh_compose(exp 00:2b/256 1/snapshot, ino=256)
[14800.491396] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.494042] RPC: xs_data_ready...
[14800.495690] decode_attr_lease_time: lease time=90
[14800.496924] decode_attr_maxfilesize: maxfilesize=0
[14800.498144] decode_attr_maxread: maxread=1024
[14800.499160] decode_attr_maxwrite: maxwrite=1024
[14800.500188] decode_attr_time_delta: time_delta=0 0
[14800.501373] decode_attr_pnfstype: bitmap is 0
[14800.502884] decode_attr_layout_blksize: bitmap is 0
[14800.503986] decode_attr_clone_blksize: bitmap is 0
[14800.505105] decode_attr_xattrsupport: XATTR support=false
[14800.506377] decode_fsinfo: xdr returned 0!
[14800.507376] --> nfs4_get_lease_time_done
[14800.508392] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.509692] <-- nfs4_get_lease_time_done
[14800.510785] nfs4_schedule_state_renewal: requeueing work. Lease period = 60
[14800.512069] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.513788] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.515322] --> nfs4_alloc_slot used_slots=0001 highest_used=0 max_slots=1024
[14800.515417] RPC: xs_tcp_send_request(124) = 0
[14800.516681] <-- nfs4_alloc_slot used_slots=0003 highest_used=1 slotid=1
[14800.516682] nfs4_free_slot: slotid 1 highest_used_slotid 0
[14800.520157] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.522542] svc: svc_authenticate (1)
[14800.522552] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.525723] svc: calling dispatcher
[14800.526792] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.528612] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.531547] RPC: xs_data_ready...
[14800.532940] decode_attr_supported: bitmask=fdffbfff:00f9be3e:00000000
[14800.534531] decode_attr_fh_expire_type: expire type=0x0
[14800.535725] decode_attr_link_support: link support=true
[14800.536903] decode_attr_symlink_support: symlink support=true
[14800.538190] decode_attr_aclsupport: ACLs supported=3
[14800.539279] decode_attr_exclcreat_supported: bitmask=00000000:00000000:00000000
[14800.540771] decode_server_caps: xdr returned 0!
[14800.541862] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.544291] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.545870] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.547897] RPC: xs_tcp_send_request(128) = 0
[14800.547942] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.550686] svc: svc_authenticate (1)
[14800.550696] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.553660] svc: calling dispatcher
[14800.554652] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.556145] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.557709] RPC: xs_data_ready...
[14800.558760] decode_attr_lease_time: lease time=90
[14800.559953] decode_attr_maxfilesize: maxfilesize=9223372036854775807
[14800.561356] decode_attr_maxread: maxread=262144
[14800.562624] decode_attr_maxwrite: maxwrite=262144
[14800.563691] decode_attr_time_delta: time_delta=0 4000000
[14800.564842] decode_attr_pnfstype: bitmap is 0
[14800.565949] decode_attr_layout_blksize: bitmap is 0
[14800.566991] decode_attr_clone_blksize: bitmap is 0
[14800.568051] decode_attr_xattrsupport: XATTR support=false
[14800.569189] decode_fsinfo: xdr returned 0!
[14800.570291] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.571563] Server FSID: 0:0
[14800.572362] Pseudo-fs root FH at 0000000027e10228 is 8 bytes, crc: 0x62d40c52:
[14800.574375] 01000100 00000000
[14800.575881] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.577423] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.579460] RPC: xs_tcp_send_request(124) = 0
[14800.579468] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.582290] svc: svc_authenticate (1)
[14800.582299] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.584810] svc: calling dispatcher
[14800.586149] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.587905] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.590556] RPC: xs_data_ready...
[14800.591722] decode_attr_supported: bitmask=fdffbfff:00f9be3e:00000000
[14800.593175] decode_attr_fh_expire_type: expire type=0x0
[14800.594431] decode_attr_link_support: link support=true
[14800.595599] decode_attr_symlink_support: symlink support=true
[14800.596807] decode_attr_aclsupport: ACLs supported=3
[14800.598052] decode_attr_exclcreat_supported: bitmask=00000000:00000000:00000000
[14800.599408] decode_server_caps: xdr returned 0!
[14800.600460] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.602427] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.604231] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.606368] RPC: xs_tcp_send_request(128) = 0
[14800.606400] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.609425] svc: svc_authenticate (1)
[14800.609830] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.611857] svc: calling dispatcher
[14800.613382] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.615566] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.617774] RPC: xs_data_ready...
[14800.618878] decode_attr_lease_time: lease time=90
[14800.620114] decode_attr_maxfilesize: maxfilesize=9223372036854775807
[14800.621481] decode_attr_maxread: maxread=262144
[14800.622606] decode_attr_maxwrite: maxwrite=262144
[14800.623718] decode_attr_time_delta: time_delta=0 4000000
[14800.624904] decode_attr_pnfstype: bitmap is 0
[14800.626040] decode_attr_layout_blksize: bitmap is 0
[14800.627086] decode_attr_clone_blksize: bitmap is 0
[14800.628147] decode_attr_xattrsupport: XATTR support=false
[14800.629278] decode_fsinfo: xdr returned 0!
[14800.630307] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.631562] set_pnfs_layoutdriver: Using NFSv4 I/O
[14800.633911] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.635477] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.637411] RPC: xs_tcp_send_request(124) = 0
[14800.637457] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.640086] svc: svc_authenticate (1)
[14800.641307] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.642092] svc: calling dispatcher
[14800.643897] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.646029] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.647631] RPC: xs_data_ready...
[14800.649827] decode_attr_maxlink: maxlink=255
[14800.650923] decode_attr_maxname: maxname=255
[14800.652117] decode_pathconf: xdr returned 0!
[14800.653154] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.654451] NFS: parsing nfs mount option 'source'
[14800.655504] NFS: MNTPATH: '/'
[14800.658220] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.659739] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.661818] RPC: xs_tcp_send_request(124) = 0
[14800.661826] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.665600] svc: svc_authenticate (1)
[14800.665618] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.667892] svc: calling dispatcher
[14800.669153] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.672199] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.674996] RPC: xs_data_ready...
[14800.676133] decode_attr_supported: bitmask=fdffbfff:00f9be3e:00000000
[14800.677669] decode_attr_fh_expire_type: expire type=0x0
[14800.678859] decode_attr_link_support: link support=true
[14800.679990] decode_attr_symlink_support: symlink support=true
[14800.681204] decode_attr_aclsupport: ACLs supported=3
[14800.682355] decode_attr_exclcreat_supported: bitmask=00000000:00000000:00000000
[14800.683691] decode_server_caps: xdr returned 0!
[14800.684718] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.686581] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.688102] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.689884] RPC: xs_tcp_send_request(128) = 0
[14800.690292] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.693183] svc: svc_authenticate (1)
[14800.693223] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.695871] svc: calling dispatcher
[14800.697064] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.700006] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.702920] RPC: xs_data_ready...
[14800.704078] decode_attr_type: type=040000
[14800.705241] decode_attr_change: change attribute=6954294660659412992
[14800.706688] decode_attr_size: file size=154
[14800.707766] decode_attr_fsid: fsid=(0x0/0x0)
[14800.708819] decode_attr_fileid: fileid=256
[14800.709846] decode_attr_fs_locations: fs_locations done, error = 0
[14800.711117] decode_attr_mode: file mode=0755
[14800.712321] decode_attr_nlink: nlink=1
[14800.713304] decode_attr_owner: uid=0
[14800.714253] decode_attr_group: gid=0
[14800.715131] decode_attr_rdev: rdev=(0x0:0x0)
[14800.716079] decode_attr_space_used: space used=0
[14800.717089] decode_attr_time_access: atime=1619115491
[14800.718218] decode_attr_time_metadata: ctime=1618569204
[14800.719287] decode_attr_time_modify: mtime=1618569204
[14800.720315] decode_attr_mounted_on_fileid: fileid=1
[14800.721505] decode_getfattr_attrs: xdr returned 0
[14800.722631] decode_getfattr_generic: xdr returned 0
[14800.724122] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.726027] NFS: nfs_fhget(0:177/256 fh_crc=0x62d40c52 ct=1)
[14800.727814] NFS: permission(0:177/256), mask=0x81, res=-10
[14800.729995] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.731405] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.733497] RPC: xs_tcp_send_request(136) = 0
[14800.733500] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.736136] svc: svc_authenticate (1)
[14800.736180] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.739234] svc: calling dispatcher
[14800.740138] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.742609] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.744470] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.746571] RPC: xs_data_ready...
[14800.747584] decode_attr_type: type=00
[14800.748576] decode_attr_change: change attribute=6954294660659412992
[14800.749881] decode_attr_size: file size=154
[14800.750876] decode_attr_fsid: fsid=(0x0/0x0)
[14800.751873] decode_attr_fileid: fileid=0
[14800.753003] decode_attr_fs_locations: fs_locations done, error = 0
[14800.754249] decode_attr_mode: file mode=00
[14800.755185] decode_attr_nlink: nlink=1
[14800.756064] decode_attr_rdev: rdev=(0x0:0x0)
[14800.756982] decode_attr_space_used: space used=0
[14800.757981] decode_attr_time_access: atime=0
[14800.758876] decode_attr_time_metadata: ctime=1618569204
[14800.759867] decode_attr_time_modify: mtime=1618569204
[14800.760841] decode_attr_mounted_on_fileid: fileid=0
[14800.762046] decode_getfattr_attrs: xdr returned 0
[14800.763191] decode_getfattr_generic: xdr returned 0
[14800.764117] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.765290] NFS: nfs_update_inode(0:177/256 fh_crc=0x62d40c52 ct=2 info=0x26040)
[14800.766696] NFS: permission(0:177/256), mask=0x1, res=0
[14800.767916] NFS: lookup(/tmp)
[14800.769333] NFS call lookup /tmp
[14800.770415] --> nfs4_alloc_slot used_slots=0000 highest_used=4294967295 max_slots=1024
[14800.772221] <-- nfs4_alloc_slot used_slots=0001 highest_used=0 slotid=0
[14800.774803] RPC: xs_tcp_send_request(144) = 0
[14800.774846] svc: server 00000000b086fa8a, pool 0, transport 00000000b7a9b30d, inuse=2
[14800.777429] svc: svc_authenticate (1)
[14800.778439] svc: server 0000000002aec424, pool 0, transport 00000000b7a9b30d, inuse=3
[14800.780803] svc: calling dispatcher
[14800.781771] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.784033] nfsd: fh_verify(8: 00010001 00000000 00000000 00000000 00000000 00000000)
[14800.786220] nfsd: nfsd_lookup(fh 8: 00010001 00000000 00000000 00000000 00000000 00000000, tmp)
[14800.787811] RPC: Want update, refage=120, age=0
[14800.789583] exp_export: export of non-dev fs without fsid
[14800.791151] RPC: xs_data_ready...
[14800.792218] nfs4_free_slot: slotid 0 highest_used_slotid 4294967295
[14800.793965] NFS reply lookup: -2
[14800.796355] NFS: dentry_delete(/tmp, 80c)
[14800.798250] NFS4: Couldn't follow remote path
[14800.799302] <-- nfs4_try_get_tree() = -2 [error]
[14800.801239] NFS: clear cookie (0x00000000fdcf48e1/0x0000000000000000)
[14800.804445] NFS: releasing superblock cookie (0x00000000f07d5f34/0x0000000000000000)
[14800.820712] NFS: destroy per-net callback data; net=f0000304
[14800.822199] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.823702] RPC: state 8 conn 1 dead 0 zapped 1 sk_shutdown 1
[14800.825162] RPC: xs_data_ready...
[14800.826358] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.827889] RPC: state 9 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.828772] svc: svc_destroy(NFSv4 callback, 2)
[14800.829362] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.832015] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.833436] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.835001] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.836386] RPC: xs_tcp_state_change client 00000000b9cef903...
[14800.836434] svc: svc_destroy(NFSv4 callback, 1)
[14800.837901] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.840374] nfs_callback_down: service destroyed
[14800.842234] NFS: releasing client cookie (0x00000000375cff7e/0x00000000c0f026bb)
[14800.844396] RPC: destroy backchannel transport
[14800.845641] RPC: backchannel list empty= true
[14800.846832] RPC: xs_destroy xprt 000000004980c4c0
[14800.848049] RPC: xs_close xprt 000000004980c4c0
[14800.849247] RPC: xs_tcp_state_change client 000000004980c4c0...
[14800.850593] RPC: state 4 conn 1 dead 0 zapped 1 sk_shutdown 3
[14800.858235] svc: server 00000000b086fa8a, pool 0, transport 00000000f25ede2b, inuse=2
[14800.859794] svc: svc_authenticate (0)
[14800.859806] svc: server 0000000002aec424, pool 0, transport 00000000f25ede2b, inuse=3
[14800.860842] svc: calling dispatcher
[14800.865076] NFS: parsing nfs mount option 'source'
[14800.866299] NFS: parsing nfs mount option 'addr'
[14800.867434] NFS: parsing nfs mount option 'vers'
[14800.869168] NFS: parsing nfs mount option 'proto'
[14800.870519] NFS: parsing nfs mount option 'mountvers'
[14800.872284] NFS: parsing nfs mount option 'mountproto'
[14800.874141] NFS: parsing nfs mount option 'mountport'
[14800.875415] NFS: MNTPATH: '/tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp'
[14800.877006] NFS: sending MNT request for 10.0.0.2:/tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp
[14800.879436] RPC: set up xprt to 10.0.0.2 (port 20048) via tcp
[14800.880931] RPC: xs_connect scheduled xprt 00000000a9c1d171
[14800.882347] RPC: xs_bind 0.0.0.0:817: ok (0)
[14800.883515] RPC: worker connecting xprt 00000000a9c1d171 via tcp to 10.0.0.2 (port 20048)
[14800.885281] RPC: xs_tcp_state_change client 00000000a9c1d171...
[14800.886700] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.888081] RPC: 00000000a9c1d171 connect status 115 connected 1 sock state 1
[14800.890080] RPC: xs_tcp_send_request(40) = 0
[14800.890230] RPC: xs_data_ready...
[14800.892396] RPC: xs_tcp_send_request(136) = 0
[14800.892819] nfsd: exp_rootfh(/tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp [000000006b821e1e] *:tmpfs/7310)
[14800.895506] nfsd: fh_compose(exp 00:32/7310 4/tcp, ino=7310)
[14800.896885] RPC: xs_data_ready...
[14800.898014] NFS: received 1 auth flavors
[14800.899151] NFS: auth flavor[0]: 1
[14800.900123] NFS: MNT request succeeded
[14800.901099] NFS: attempting to use auth flavor 1
[14800.902226] RPC: destroy backchannel transport
[14800.903431] RPC: backchannel list empty= true
[14800.904839] RPC: xs_destroy xprt 00000000a9c1d171
[14800.905284] NFS: get client cookie (0x000000006b121986/0x00000000ae4f1e65)
[14800.906087] RPC: xs_close xprt 00000000a9c1d171
[14800.907505] RPC: set up xprt to 10.0.0.2 (autobind) via tcp
[14800.910209] RPC: xs_tcp_state_change client 00000000a9c1d171...
[14800.910343] RPC: set up xprt to 10.0.0.2 (port 111) via tcp
[14800.911509] RPC: state 4 conn 1 dead 0 zapped 1 sk_shutdown 3
[14800.914246] RPC: xs_tcp_state_change client 00000000a9c1d171...
[14800.915516] RPC: state 5 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.916838] RPC: xs_tcp_state_change client 00000000a9c1d171...
[14800.917207] RPC: xs_connect scheduled xprt 00000000f9895d79
[14800.918155] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.919363] RPC: worker connecting xprt 00000000f9895d79 via tcp to 10.0.0.2 (port 111)
[14800.920665] RPC: xs_tcp_state_change client 00000000a9c1d171...
[14800.920667] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.920668] RPC: xs_data_ready...
[14800.926561] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.927941] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.929215] RPC: 00000000f9895d79 connect status 115 connected 1 sock state 1
[14800.930761] RPC: xs_tcp_send_request(96) = 0
[14800.930852] RPC: xs_data_ready...
[14800.933674] RPC: setting port for xprt 0000000011a852a8 to 2049
[14800.935625] RPC: destroy backchannel transport
[14800.936806] RPC: backchannel list empty= true
[14800.938032] RPC: xs_destroy xprt 00000000f9895d79
[14800.939134] RPC: xs_close xprt 00000000f9895d79
[14800.940237] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.940325] RPC: xs_connect scheduled xprt 0000000011a852a8
[14800.941582] RPC: state 4 conn 1 dead 0 zapped 1 sk_shutdown 3
[14800.941587] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.944605] RPC: xs_bind 0.0.0.0:752: ok (0)
[14800.945746] RPC: state 5 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.946941] RPC: worker connecting xprt 0000000011a852a8 via tcp to 10.0.0.2 (port 2049)
[14800.948135] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.949759] RPC: xs_tcp_state_change client 0000000011a852a8...
[14800.951304] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.952740] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
[14800.953992] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.955310] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14800.956450] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.958113] RPC: 0000000011a852a8 connect status 115 connected 1 sock state 1
[14800.959338] RPC: xs_data_ready...
[14800.960810] RPC: xs_tcp_send_request(40) = 0
[14800.964247] svc: svc_authenticate (0)
[14800.964289] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14800.965237] svc: calling dispatcher
[14800.967725] RPC: xs_data_ready...
[14800.968695] RPC: worker connecting xprt 000000004980c4c0 via AF_LOCAL to /var/run/rpcbind.sock
[14800.969287] RPC: xs_tcp_state_change client 00000000f9895d79...
[14800.970437] RPC: xprt 000000004980c4c0 connected to /var/run/rpcbind.sock
[14800.971698] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14800.973318] RPC: set up xprt to /var/run/rpcbind.sock via AF_LOCAL
[14800.976080] RPC: xs_local_send_request(40) = 0
[14800.976272] RPC: xs_data_ready...
[14800.978959] RPC: xs_local_send_request(40) = 0
[14800.979002] RPC: xs_data_ready...
[14800.981921] RPC: xs_local_send_request(64) = 0
[14800.982580] RPC: xs_data_ready...
[14800.984412] RPC: xs_local_send_request(64) = 0
[14800.984462] RPC: xs_data_ready...
[14800.986646] RPC: xs_local_send_request(64) = 0
[14800.987141] RPC: xs_data_ready...
[14800.989470] RPC: xs_local_send_request(84) = 0
[14800.989511] RPC: xs_data_ready...
[14800.992059] RPC: xs_local_send_request(84) = 0
[14800.992116] RPC: xs_data_ready...
[14800.994784] RPC: xs_local_send_request(84) = 0
[14800.995175] RPC: xs_data_ready...
[14800.997352] svc: server 000000004d3956fa, pool 0, transport 0000000052605e92, inuse=2
[14800.999424] RPC: xs_local_send_request(84) = 0
[14800.999485] RPC: xs_data_ready...
[14801.002638] RPC: xs_local_send_request(84) = 0
[14801.002703] RPC: xs_data_ready...
[14801.005407] RPC: xs_local_send_request(84) = 0
[14801.005449] RPC: xs_data_ready...
[14801.007816] RPC: xs_local_send_request(80) = 0
[14801.007867] RPC: xs_data_ready...
[14801.010230] RPC: xs_local_send_request(80) = 0
[14801.010831] RPC: xs_data_ready...
[14801.013165] RPC: xs_local_send_request(80) = 0
[14801.013237] RPC: xs_data_ready...
[14801.015507] svc: server 000000004d3956fa, pool 0, transport 0000000026ef78d5, inuse=2
[14801.017588] RPC: xs_local_send_request(80) = 0
[14801.017751] RPC: xs_data_ready...
[14801.019793] RPC: xs_local_send_request(80) = 0
[14801.019874] RPC: xs_data_ready...
[14801.022126] RPC: xs_local_send_request(80) = 0
[14801.022701] RPC: xs_data_ready...
[14801.024846] svc: svc_destroy(lockd, 2)
[14801.025871] RPC: set up xprt to 10.0.0.2 (autobind) via tcp
[14801.027229] NFS call fsinfo
[14801.028101] RPC: xs_tcp_send_request(96) = 0
[14801.028146] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.030695] svc: svc_authenticate (1)
[14801.030702] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.031618] svc: calling dispatcher
[14801.034034] nfsd: FSINFO(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14801.035615] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.038093] RPC: Want update, refage=120, age=0
[14801.039871] found domain *
[14801.040689] found fsidtype 1
[14801.041488] found fsid length 4
[14801.042603] Path seems to be </tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp>
[14801.043967] Found the path /tmp/ltp.nfs01.nfs-4/LTP_nfs01.ny7RbNMo0D/4/tcp
[14801.046055] RPC: xs_data_ready...
[14801.047745] NFS reply fsinfo: 0
[14801.048737] NFS call pathconf
[14801.050909] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.052513] svc: svc_authenticate (1)
[14801.052604] RPC: xs_tcp_send_request(96) = 0
[14801.053463] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.057360] svc: calling dispatcher
[14801.058309] nfsd: PATHCONF(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14801.059862] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.062640] RPC: xs_data_ready...
[14801.063900] NFS reply pathconf: 0
[14801.064912] NFS call getattr
[14801.066134] RPC: xs_tcp_send_request(96) = 0
[14801.066171] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.069348] svc: svc_authenticate (1)
[14801.070417] svc: calling dispatcher
[14801.070416] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.071403] nfsd: GETATTR(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14801.074803] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.076993] RPC: xs_data_ready...
[14801.078157] NFS reply getattr: 0
[14801.079134] Server FSID: 7805:0
[14801.080173] RPC: xs_tcp_send_request(40) = 0
[14801.082031] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.083697] svc: svc_authenticate (0)
[14801.083741] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.084705] svc: calling dispatcher
[14801.084734] RPC: xs_data_ready...
[14801.088899] do_proc_get_root: call fsinfo
[14801.090168] RPC: xs_tcp_send_request(96) = 0
[14801.090179] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.092963] svc: svc_authenticate (1)
[14801.093567] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.093995] svc: calling dispatcher
[14801.096599] nfsd: FSINFO(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14801.098276] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.100449] RPC: xs_data_ready...
[14801.102712] do_proc_get_root: reply fsinfo: 0
[14801.103899] RPC: xs_tcp_send_request(96) = 0
[14801.103909] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.106714] svc: svc_authenticate (1)
[14801.106763] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.108849] svc: calling dispatcher
[14801.110560] nfsd: GETATTR(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14801.112146] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.114923] RPC: xs_data_ready...
[14801.116087] do_proc_get_root: reply getattr: 0
[14801.118023] NFS: nfs_fhget(0:177/7310 fh_crc=0x0fe8c588 ct=1)
[14801.126002] NFS: nfs_weak_revalidate: inode 7310 is valid
[14801.127314] NFS: nfs_weak_revalidate: inode 7310 is valid
[14801.128589] NFS call access
[14801.129585] RPC: xs_tcp_send_request(100) = 0
[14801.130764] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14801.132332] svc: svc_authenticate (1)
[14801.132377] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14801.133407] svc: calling dispatcher
[14801.135973] nfsd: ACCESS(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000 0x1f
[14801.137542] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14801.139900] RPC: xs_data_ready...
...
[14915.174477] NFS: nfs_update_inode(0:177/7310 fh_crc=0x0fe8c588 ct=2 info=0x27e7f)
[14915.176367] NFS: (0:177/7310) revalidation complete
[14915.177702] NFS: nfs_weak_revalidate: inode 7310 is valid
[14915.179100] NFS: permission(0:177/7310), mask=0x24, res=0
[14915.180614] NFS: open dir(/)
[14915.183496] NFS: nfs_weak_revalidate: inode 7310 is valid
[14915.185979] NFS: readdir(/) starting at cookie 0
[14915.189249] NFS: nfs_do_filldir() filling ended @ cookie 127
[14915.191854] NFS: nfs_do_filldir() filling ended @ cookie 254
[14915.193412] NFS: nfs_do_filldir() filling ended @ cookie 381
[14915.194895] NFS: nfs_do_filldir() filling ended @ cookie 508
[14915.197265] NFS: nfs_do_filldir() filling ended @ cookie 635
[14915.198721] NFS: nfs_do_filldir() filling ended @ cookie 762
[14915.201117] NFS: nfs_do_filldir() filling ended @ cookie 889
[14915.203767] NFS: nfs_do_filldir() filling ended @ cookie 1002
[14915.205363] NFS: readdir(/) returns 0
[14915.206898] NFS: readdir(/) starting at cookie 1002
[14915.208769] NFS: readdir(/) returns 0
[14915.215462] NFS: nfs_weak_revalidate: inode 7310 is valid
[14915.217845] NFS: nfs_weak_revalidate: inode 7310 is valid
[14915.220055] NFS call fsstat
[14915.221310] svc: server 00000000b086fa8a, pool 0, transport 00000000ea621c47, inuse=2
[14915.223020] svc: svc_authenticate (1)
[14915.225255] svc: server 0000000002aec424, pool 0, transport 00000000ea621c47, inuse=3
[14915.226911] RPC: xs_tcp_send_request(96) = 0
[14915.227238] RPC: Want update, refage=120, age=115
[14915.230027] svc: calling dispatcher
[14915.231301] nfsd: FSSTAT(3) 8: 00010001 00007805 00000000 00000000 00000000 00000000
[14915.233507] nfsd: fh_verify(8: 00010001 00007805 00000000 00000000 00000000 00000000)
[14915.236729] RPC: xs_data_ready...
[14915.239221] NFS reply fsstat: 0
[14915.246263] NFS: nfs_weak_revalidate: inode 7310 is valid
[14915.255495] NFS: clear cookie (0x00000000d9d434fd/0x0000000000000000)
[14915.258423] NFS: clear cookie (0x0000000061e9c1e7/0x0000000000000000)
[14917.184910] NFS: clear cookie (0x000000000dbe2efa/0x0000000000000000)
...
[14917.186215] NFS: clear cookie (0x00000000253d681d/0x0000000000000000)
[14917.187631] NFS: clear cookie (0x0000000030cc97f8/0x0000000000000000)
[14917.190376] NFS: releasing superblock cookie (0x00000000e2c01c59/0x0000000000000000)
[14917.207498] RPC: destroy backchannel transport
[14917.209349] RPC: backchannel list empty= true
[14917.211033] RPC: xs_destroy xprt 0000000047412e6a
[14917.212928] RPC: xs_close xprt 0000000047412e6a
[14917.219492] RPC: xs_local_send_request(64) = 0
[14917.220887] RPC: xs_data_ready...
[14917.222317] RPC: xs_local_send_request(64) = 0
[14917.225222] RPC: xs_data_ready...
[14917.226733] RPC: xs_local_send_request(64) = 0
[14917.227883] RPC: xs_data_ready...
[14917.228929] RPC: xs_destroy xprt 000000004980c4c0
[14917.230225] RPC: xs_close xprt 000000004980c4c0
[14917.231730] NFS: releasing client cookie (0x000000006b121986/0x00000000ae4f1e65)
[14917.235457] RPC: destroy backchannel transport
[14917.236711] RPC: backchannel list empty= true
[14917.237952] RPC: xs_destroy xprt 0000000011a852a8
[14917.239223] RPC: xs_close xprt 0000000011a852a8
[14917.240676] RPC: xs_tcp_state_change client 0000000011a852a8...
[14917.242100] RPC: state 4 conn 1 dead 0 zapped 1 sk_shutdown 3
[14917.243546] RPC: xs_tcp_state_change client 0000000011a852a8...
[14917.245200] RPC: state 5 conn 0 dead 0 zapped 1 sk_shutdown 3
[14917.246635] RPC: xs_tcp_state_change client 0000000011a852a8...
[14917.248102] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14917.249849] RPC: xs_tcp_state_change client 0000000011a852a8...
[14917.251480] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
[14917.253001] RPC: xs_data_ready...
[14917.254049] RPC: xs_tcp_state_change client 0000000011a852a8...
[14917.255689] RPC: state 7 conn 0 dead 0 zapped 1 sk_shutdown 3
Kind regards,
Petr
On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> Hi,
>
> > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
>
> > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
>
> > We should figure out the reason for the failure. A network trace might
> > help.
>
> Anything specific you're looking for?
Actually I was thinking of capturing the network traffic, something
like:
tcpdump -s0 -wtmp.pcap -i<interface>
then try the mount, then kill tcpdump and look at tmp.pcap.
First, though, what's the output of "exportfs -v" on the server?
Note you need an "fsid=" option on tmpfs exports.
--b.
>
> Doing full debug
> rpcdebug -m nfs -s all
> rpcdebug -m nfsd -s all
> rpcdebug -m rpc -s all
>
> I see
> [13890.993127] nfsd_inet6addr_event: removed fd00:0001:0001:0001:0000:0000:0000:0002
> [13890.995428] nfsd_inet6addr_event: removed fe80:0000:0000:0000:1463:9fff:fea6:01b1
> [13891.002920] nfsd_inetaddr_event: removed 10.0.0.2
> [13891.007501] IPv6: ADDRCONF(NETDEV_CHANGE): ltp_ns_veth2: link becomes ready
> [13891.223432] NFS: parsing nfs mount option 'source'
> [13891.225347] NFS: parsing nfs mount option 'proto'
> [13891.227216] NFS: parsing nfs mount option 'vers'
> [13891.228684] NFS: parsing nfs mount option 'addr'
> [13891.229994] NFS: parsing nfs mount option 'clientaddr'
> [13891.231326] NFS: MNTPATH: '/tmp/LTP_nfs01.lQghifD6NF/4.1/tcp'
> [13891.232923] --> nfs4_try_get_tree()
> [13891.235025] NFS: get client cookie (0x0000000013cf211e/0x0000000014b6df5b)
> [13891.237466] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
> [13891.239618] RPC: Couldn't create auth handle (flavor 390004)
> [13891.241556] nfs_create_rpc_client: cannot create RPC client. Error = -22
> [13891.243306] RPC: destroy backchannel transport
> [13891.244017] RPC: set up xprt to 10.0.0.2 (port 2049) via tcp
> [13891.244610] RPC: backchannel list empty= true
> [13891.246232] RPC: xs_connect scheduled xprt 000000005ddf4c3d
> [13891.247547] RPC: xs_destroy xprt 00000000aeaed403
> [13891.250574] RPC: xs_bind 0.0.0.0:873: ok (0)
> [13891.252225] RPC: worker connecting xprt 000000005ddf4c3d via tcp to 10.0.0.2 (port 2049)
> [13891.254253] RPC: xs_tcp_state_change client 000000005ddf4c3d...
> [13891.255693] RPC: state 1 conn 0 dead 0 zapped 1 sk_shutdown 0
> [13891.257195] svc: server 000000007f0b7417, pool 0, transport 00000000f97f22bc, inuse=2
> [13891.258946] RPC: 000000005ddf4c3d connect status 115 connected 1 sock state 1
> [13891.260685] RPC: xs_close xprt 00000000aeaed403
> [13891.260794] RPC: xs_tcp_send_request(40) = 0
> [13891.263161] svc: server 000000007f0b7417, pool 0, transport 00000000f97f22bc, inuse=2
> [13891.264876] svc: svc_authenticate (0)
> [13891.264981] svc: server 00000000b4f0c1f0, pool 0, transport 00000000f97f22bc, inuse=3
> [13891.267954] svc: calling dispatcher
> [13891.270971] RPC: xs_data_ready...
> [13891.273229] RPC: setup backchannel transport
> [13891.274481] RPC: adding req= 000000002ea0e13e
> [13891.275823] RPC: setup backchannel transport done
> [13891.278253] svc: initialising pool 0 for NFSv4 callback
> [13891.279811] nfs_callback_create_svc: service created
> [13891.281138] NFS: create per-net callback data; net=f0000304
>
> FYI tests are being done on network namespaces, thus there should be no real
> network issue. But I'll retest it on a real network. I admit that netns + tmpfs
> might be unrealistic scenario in practice.
>
> Kind regards,
> Petr
>
> > --b.
Hi Bruce,
> On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > Hi,
> > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> > > We should figure out the reason for the failure. A network trace might
> > > help.
> > Anything specific you're looking for?
> Actually I was thinking of capturing the network traffic, something
> like:
> tcpdump -s0 -wtmp.pcap -i<interface>
> then try the mount, then kill tcpdump and look at tmp.pcap.
I don't see anything suspicious, can you please have a look?
https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
tcpdump -s0 -iany host 10.0.0.1 and 10.0.0.2
> First, though, what's the output of "exportfs -v" on the server?
* v3 (the only working)
mount -t nfs -o proto=tcp,vers=3 10.0.0.2:/tmp/LTP_nfs01.S5dolLhIlD/3/tcp /tmp/LTP_nfs01.S5dolLhIlD/3/0
exportfs -v
/tmp/LTP_nfs01.S5dolLhIlD/3/tcp
<world>(sync,wdelay,hide,no_subtree_check,fsid=6436,sec=sys,rw,secure,no_root_squash,no_all_squash)
* v4
mount -t nfs -o proto=tcp,vers=4 10.0.0.2:/tmp/LTP_nfs01.5yzuZYRSRl/4/tcp /tmp/LTP_nfs01.5yzuZYRSRl/4/0
mount.nfs: mounting 10.0.0.2:/tmp/LTP_nfs01.5yzuZYRSRl/4/tcp failed, reason given by server: No such file or directory
exportfs -v
/tmp/LTP_nfs01.5yzuZYRSRl/4/tcp
<world>(sync,wdelay,hide,no_subtree_check,fsid=6695,sec=sys,rw,secure,no_root_squash,no_all_squash)
* v4.1
mount -t nfs -o proto=tcp,vers=4.1 10.0.0.2:/tmp/LTP_nfs01.xSWfcygtYM/4.1/tcp /tmp/LTP_nfs01.xSWfcygtYM/4.1/0
mount.nfs: mounting 10.0.0.2:/tmp/LTP_nfs01.xSWfcygtYM/4.1/tcp failed, reason given by server: No such file or directory
exportfs -v
/tmp/LTP_nfs01.xSWfcygtYM/4.1/tcp
<world>(sync,wdelay,hide,no_subtree_check,fsid=6965,sec=sys,rw,secure,no_root_squash,no_all_squash)
* v4.2
mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/LTP_nfs01.xkKpqpRikV/4.2/tcp /tmp/LTP_nfs01.xkKpqpRikV/4.2/0
mount.nfs: mounting 10.0.0.2:/tmp/LTP_nfs01.xkKpqpRikV/4.2/tcp failed, reason given by server: No such file or directory
exportfs -v
/tmp/LTP_nfs01.xkKpqpRikV/4.2/tcp
<world>(sync,wdelay,hide,no_subtree_check,fsid=7239,sec=sys,rw,secure,no_root_squash,no_all_squash)
> Note you need an "fsid=" option on tmpfs exports.
Yes, the test uses PID based fsid: fsid=$$
> --b.
Thanks a lot for your time!
Kind regards,
Petr
On Fri, Apr 23, 2021 at 07:04:41PM +0200, Petr Vorel wrote:
> Hi Bruce,
>
> > On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > > Hi,
>
> > > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
>
> > > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
>
> > > > We should figure out the reason for the failure. A network trace might
> > > > help.
>
> > > Anything specific you're looking for?
>
> > Actually I was thinking of capturing the network traffic, something
> > like:
> > tcpdump -s0 -wtmp.pcap -i<interface>
>
> > then try the mount, then kill tcpdump and look at tmp.pcap.
>
> I don't see anything suspicious, can you please have a look?
> https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
> https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
> https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
> https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
It might be the "hide" option, that's odd:
> exportfs -v
> /tmp/LTP_nfs01.xkKpqpRikV/4.2/tcp
> <world>(sync,wdelay,hide,no_subtree_check,fsid=7239,sec=sys,rw,secure,no_root_squash,no_all_squash)
--b.
> On Fri, Apr 23, 2021 at 07:04:41PM +0200, Petr Vorel wrote:
> > Hi Bruce,
> > > On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > > > Hi,
> > > > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > > > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> > > > > We should figure out the reason for the failure. A network trace might
> > > > > help.
> > > > Anything specific you're looking for?
> > > Actually I was thinking of capturing the network traffic, something
> > > like:
> > > tcpdump -s0 -wtmp.pcap -i<interface>
> > > then try the mount, then kill tcpdump and look at tmp.pcap.
> > I don't see anything suspicious, can you please have a look?
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
> It might be the "hide" option, that's odd:
> > exportfs -v
> > /tmp/LTP_nfs01.xkKpqpRikV/4.2/tcp
> > <world>(sync,wdelay,hide,no_subtree_check,fsid=7239,sec=sys,rw,secure,no_root_squash,no_all_squash)
NOTE: hide is also in v3, which is working.
I can provide you even setup. I thought it's related to mount.nfs change (I
tested it on openSUSE and Debian, with various kernel versions), but the real
cause might be in kernel or elsewhere.
Kind regards,
Petr
> --b.
On Sat, 24 Apr 2021, J . Bruce Fields wrote:
> On Fri, Apr 23, 2021 at 07:04:41PM +0200, Petr Vorel wrote:
> > Hi Bruce,
> >
> > > On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > > > Hi,
> >
> > > > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> >
> > > > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> >
> > > > > We should figure out the reason for the failure. A network trace might
> > > > > help.
> >
> > > > Anything specific you're looking for?
> >
> > > Actually I was thinking of capturing the network traffic, something
> > > like:
> > > tcpdump -s0 -wtmp.pcap -i<interface>
> >
> > > then try the mount, then kill tcpdump and look at tmp.pcap.
> >
> > I don't see anything suspicious, can you please have a look?
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
> > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
>
> It might be the "hide" option, that's odd:
Nup. I think "hide" is ignored for NFSv4 anyway.
Problem is that a subdirectory of a tmpfs filesystem is being exported.
That requires (for NFSv4), the top of the tmpfs filesystem to be
exported with NFSEXP_V4ROOT so that an NFSv4 client can navigate down to
it.
But when mountd creates that V4ROOT export, it doesn't provide the fsid.
So the kernel rejects the export request.
We need to fix mountd to set the fsid on all exports within a filesystem
for which it was specified, particularly the NFSEXP_V4ROOT ancestors.
I might see if I how easy that is later.
NeilBrown
> On Sat, 24 Apr 2021, J . Bruce Fields wrote:
> > On Fri, Apr 23, 2021 at 07:04:41PM +0200, Petr Vorel wrote:
> > > Hi Bruce,
> > > > On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > > > > Hi,
> > > > > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > > > > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> > > > > > We should figure out the reason for the failure. A network trace might
> > > > > > help.
> > > > > Anything specific you're looking for?
> > > > Actually I was thinking of capturing the network traffic, something
> > > > like:
> > > > tcpdump -s0 -wtmp.pcap -i<interface>
> > > > then try the mount, then kill tcpdump and look at tmp.pcap.
> > > I don't see anything suspicious, can you please have a look?
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
> > It might be the "hide" option, that's odd:
> Nup. I think "hide" is ignored for NFSv4 anyway.
> Problem is that a subdirectory of a tmpfs filesystem is being exported.
> That requires (for NFSv4), the top of the tmpfs filesystem to be
> exported with NFSEXP_V4ROOT so that an NFSv4 client can navigate down to
> it.
> But when mountd creates that V4ROOT export, it doesn't provide the fsid.
> So the kernel rejects the export request.
> We need to fix mountd to set the fsid on all exports within a filesystem
> for which it was specified, particularly the NFSEXP_V4ROOT ancestors.
> I might see if I how easy that is later.
Hi Neil,
Thanks a lot for analysis. Right, mount.nfs is not to blame, but the server part
(nfs-kernel-server).
Kind regards,
Petr
> NeilBrown
On Mon, May 03, 2021 at 12:21:27PM +1000, NeilBrown wrote:
> On Sat, 24 Apr 2021, J . Bruce Fields wrote:
> > On Fri, Apr 23, 2021 at 07:04:41PM +0200, Petr Vorel wrote:
> > > Hi Bruce,
> > >
> > > > On Fri, Apr 23, 2021 at 04:17:52AM +0200, Petr Vorel wrote:
> > > > > Hi,
> > >
> > > > > > On Thu, Apr 22, 2021 at 09:18:03PM +0200, Petr Vorel wrote:
> > > > > > > LTP NFS tests (which use netns) fails on tmpfs since d4066486:
> > >
> > > > > > > mount -t nfs -o proto=tcp,vers=4.2 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp /tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/0
> > > > > > > mount.nfs: mounting 10.0.0.2:/tmp/ltp.nfs01.nfs-4.2/LTP_nfs01.UF6gRZCy3O/4.2/tcp failed, reason given by server: No such file or directory
> > >
> > > > > > We should figure out the reason for the failure. A network trace might
> > > > > > help.
> > >
> > > > > Anything specific you're looking for?
> > >
> > > > Actually I was thinking of capturing the network traffic, something
> > > > like:
> > > > tcpdump -s0 -wtmp.pcap -i<interface>
> > >
> > > > then try the mount, then kill tcpdump and look at tmp.pcap.
> > >
> > > I don't see anything suspicious, can you please have a look?
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v3.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.1.pcap
> > > https://gitlab.com/pevik/tmp/-/raw/master/nfs.v4.2.pcap
> >
> > It might be the "hide" option, that's odd:
>
> Nup. I think "hide" is ignored for NFSv4 anyway.
Yes, that's just the default, sorry for the distraction....
> Problem is that a subdirectory of a tmpfs filesystem is being exported.
Yuck.
> That requires (for NFSv4), the top of the tmpfs filesystem to be
> exported with NFSEXP_V4ROOT so that an NFSv4 client can navigate down to
> it.
> But when mountd creates that V4ROOT export, it doesn't provide the fsid.
> So the kernel rejects the export request.
>
> We need to fix mountd to set the fsid on all exports within a filesystem
> for which it was specified, particularly the NFSEXP_V4ROOT ancestors.
Got it, that makes sense.
--b.
[[This is a proposed fix. It seems to work. I'd like
some review comments before it is committed.
Petr: it would be great if you could test it to confirm
it actually works in your case.
]]
Some filesystems cannot be exported without an fsid or uuid.
tmpfs is the main example.
When mountd creates nfsv4 pseudo-root exports for the path leading down
to an export point it exports each directory without any fsid or uuid.
If one of these directories is on tmp, that will fail.
The net result is that exporting a subdirectory of a tmpfs filesystem
will not work over NFSv4 as the parents within the filesystem cannot be
exported. It will either fail, or fall-back to NFSv3 (depending on the
version of the mount.nfs program).
To fix this we need to provide an fsid or uuid for these pseudo-root
exports. This patch does that by creating a UUID with the first 4 bytes
0xFFFFFFFF and the remaining 12 bytes form from the path name, xoring
bytes together if the path is longer than 12 characters.
Hopefully no filesystem uses a UUID like this....
The patch borrows some code from exportfs. Maybe that code should be
move to a library..
Signed-off-by: NeilBrown <[email protected]>
---
support/export/v4root.c | 57 +++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/support/export/v4root.c b/support/export/v4root.c
index 3654bd7c10c0..fd36eb704441 100644
--- a/support/export/v4root.c
+++ b/support/export/v4root.c
@@ -11,6 +11,7 @@
#include <config.h>
#endif
+#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/queue.h>
@@ -21,6 +22,7 @@
#include <unistd.h>
#include <errno.h>
+#include "nfsd_path.h"
#include "xlog.h"
#include "exportfs.h"
#include "nfslib.h"
@@ -73,6 +75,38 @@ set_pseudofs_security(struct exportent *pseudo)
}
}
+static ssize_t exportfs_write(int fd, const char *buf, size_t len)
+{
+ return nfsd_path_write(fd, buf, len);
+}
+
+static int test_export(struct exportent *eep, int with_fsid)
+{
+ char *path = eep->e_path;
+ int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
+ /* beside max path, buf size should take protocol str into account */
+ char buf[NFS_MAXPATHLEN+1+64] = { 0 };
+ char *bp = buf;
+ int len = sizeof(buf);
+ int fd, n;
+
+ n = snprintf(buf, len, "-test-client- ");
+ bp += n;
+ len -= n;
+ qword_add(&bp, &len, path);
+ if (len < 1)
+ return 0;
+ snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
+ fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
+ if (fd < 0)
+ return 0;
+ n = exportfs_write(fd, buf, strlen(buf));
+ close(fd);
+ if (n < 0)
+ return 0;
+ return 1;
+}
+
/*
* Create a pseudo export
*/
@@ -82,6 +116,7 @@ v4root_create(char *path, nfs_export *export)
nfs_export *exp;
struct exportent eep;
struct exportent *curexp = &export->m_export;
+ char uuid[33];
dupexportent(&eep, &pseudo_root.m_export);
eep.e_ttl = default_ttl;
@@ -89,6 +124,28 @@ v4root_create(char *path, nfs_export *export)
strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
if (strcmp(path, "/") != 0)
eep.e_flags &= ~NFSEXP_FSID;
+ if (strcmp(path, "/") != 0 &&
+ !test_export(&eep, 0)) {
+ /* Need a uuid - base it on path */
+ char buf[12], *pp = path;
+ unsigned int i = 0;
+
+ memset(buf, 0, sizeof(buf));
+ while (*pp) {
+ buf[i] ^= *pp++;
+ i += 1;
+ if (i >= sizeof(buf))
+ i = 0;
+ }
+ memset(uuid, 'F', 32);
+ uuid[32] = '\0';
+ pp = uuid + 32 - sizeof(buf) * 2;
+ for (i = 0; i < sizeof(buf); i++) {
+ snprintf(pp, 3, "%02X", buf[i]);
+ pp += 2;
+ }
+ eep.e_uuid = uuid;
+ }
set_pseudofs_security(&eep);
exp = export_create(&eep, 0);
if (exp == NULL)
--
2.31.1
Hi Neil,
> [[This is a proposed fix. It seems to work. I'd like
> some review comments before it is committed.
> Petr: it would be great if you could test it to confirm
> it actually works in your case.
> ]]
Thanks for a quick fix. It runs nicely in newer kernels (5.11.12-1-default
openSUSE and 5.10.0-6-amd64 Debian). But it somehow fails on older ones
(SLES 5.3.18-54-default heavily patched and 4.9.0-11-amd64).
I have some problem on Debian with 4.9.0-11-amd64 fails on both tmpfs and ext4,
others work fine (testing tmpfs, btrfs and ext4). But maybe I did something
wrong during testing. I did:
cp ./utils/mountd/mountd /usr/sbin/rpc.mountd
systemctl restart nfs-mountd.service
Failure is regardless I use new mount.nfs (master) or the original from
Debian (1.3.3).
strace looks nearly the same on tmpfs and ext4:
socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 5
fcntl(5, F_GETFL) = 0x2 (flags O_RDWR)
fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
connect(5, {sa_family=AF_INET, sin_port=htons(111), sin_addr=inet_addr("10.0.0.2")}, 16) = -1 EINPROGRESS (Operation now in progress)
select(6, NULL, [5], NULL, {tv_sec=10, tv_usec=0}) = 1 (out [5], left {tv_sec=9, tv_usec=999997})
getsockopt(5, SOL_SOCKET, SO_ERROR, [0], [4]) = 0
fcntl(5, F_SETFL, O_RDWR) = 0
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
getpeername(5, {sa_family=AF_INET, sin_port=htons(111), sin_addr=inet_addr("10.0.0.2")}, [128->16]) = 0
getsockname(5, {sa_family=AF_INET, sin_port=htons(54140), sin_addr=inet_addr("10.0.0.1")}, [128->16]) = 0
getsockopt(5, SOL_SOCKET, SO_TYPE, [1], [4]) = 0
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
getpid() = 920
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
write(5, "\200\0\0008bZ\360\303\0\0\0\0\0\0\0\2\0\1\206\240\0\0\0\2\0\0\0\3\0\0\0\0"..., 60) = 60
poll([{fd=5, events=POLLIN}], 1, 9999) = 1 ([{fd=5, revents=POLLIN}])
read(5, "\200\0\0\34bZ\360\303\0\0\0\1\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\10\1", 65536) = 32
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
rt_sigprocmask(SIG_SETMASK, ~[RTMIN RT_1], [], 8) = 0
close(5) = 0
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 5
fcntl(5, F_GETFL) = 0x2 (flags O_RDWR)
fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
Kind regards,
Petr
> On May 6, 2021, at 9:48 PM, NeilBrown <[email protected]> wrote:
>
>
> [[This is a proposed fix. It seems to work. I'd like
> some review comments before it is committed.
> Petr: it would be great if you could test it to confirm
> it actually works in your case.
> ]]
>
> Some filesystems cannot be exported without an fsid or uuid.
> tmpfs is the main example.
>
> When mountd creates nfsv4 pseudo-root exports for the path leading down
> to an export point it exports each directory without any fsid or uuid.
> If one of these directories is on tmp, that will fail.
>
> The net result is that exporting a subdirectory of a tmpfs filesystem
> will not work over NFSv4 as the parents within the filesystem cannot be
> exported. It will either fail, or fall-back to NFSv3 (depending on the
> version of the mount.nfs program).
>
> To fix this we need to provide an fsid or uuid for these pseudo-root
> exports. This patch does that by creating a UUID with the first 4 bytes
> 0xFFFFFFFF and the remaining 12 bytes form from the path name, xoring
> bytes together if the path is longer than 12 characters.
> Hopefully no filesystem uses a UUID like this....
That's not really a UUID, as per RFC 4122. I'm guessing it's possible
for a collision to occur pretty quickly, for instance. It would be nicer
if a conformant UUID could be used here.
Is there a problem with specifying the export's fsid in /etc/exports?
> The patch borrows some code from exportfs. Maybe that code should be
> move to a library..
>
> Signed-off-by: NeilBrown <[email protected]>
> ---
> support/export/v4root.c | 57 +++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 57 insertions(+)
>
> diff --git a/support/export/v4root.c b/support/export/v4root.c
> index 3654bd7c10c0..fd36eb704441 100644
> --- a/support/export/v4root.c
> +++ b/support/export/v4root.c
> @@ -11,6 +11,7 @@
> #include <config.h>
> #endif
>
> +#include <fcntl.h>
> #include <sys/types.h>
> #include <sys/stat.h>
> #include <sys/queue.h>
> @@ -21,6 +22,7 @@
> #include <unistd.h>
> #include <errno.h>
>
> +#include "nfsd_path.h"
> #include "xlog.h"
> #include "exportfs.h"
> #include "nfslib.h"
> @@ -73,6 +75,38 @@ set_pseudofs_security(struct exportent *pseudo)
> }
> }
>
> +static ssize_t exportfs_write(int fd, const char *buf, size_t len)
> +{
> + return nfsd_path_write(fd, buf, len);
> +}
> +
> +static int test_export(struct exportent *eep, int with_fsid)
> +{
> + char *path = eep->e_path;
> + int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
> + /* beside max path, buf size should take protocol str into account */
> + char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> + char *bp = buf;
> + int len = sizeof(buf);
> + int fd, n;
> +
> + n = snprintf(buf, len, "-test-client- ");
> + bp += n;
> + len -= n;
> + qword_add(&bp, &len, path);
> + if (len < 1)
> + return 0;
> + snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> + fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> + if (fd < 0)
> + return 0;
> + n = exportfs_write(fd, buf, strlen(buf));
> + close(fd);
> + if (n < 0)
> + return 0;
> + return 1;
> +}
> +
> /*
> * Create a pseudo export
> */
> @@ -82,6 +116,7 @@ v4root_create(char *path, nfs_export *export)
> nfs_export *exp;
> struct exportent eep;
> struct exportent *curexp = &export->m_export;
> + char uuid[33];
>
> dupexportent(&eep, &pseudo_root.m_export);
> eep.e_ttl = default_ttl;
> @@ -89,6 +124,28 @@ v4root_create(char *path, nfs_export *export)
> strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
> if (strcmp(path, "/") != 0)
> eep.e_flags &= ~NFSEXP_FSID;
> + if (strcmp(path, "/") != 0 &&
> + !test_export(&eep, 0)) {
> + /* Need a uuid - base it on path */
> + char buf[12], *pp = path;
> + unsigned int i = 0;
> +
> + memset(buf, 0, sizeof(buf));
> + while (*pp) {
> + buf[i] ^= *pp++;
> + i += 1;
> + if (i >= sizeof(buf))
> + i = 0;
> + }
> + memset(uuid, 'F', 32);
> + uuid[32] = '\0';
> + pp = uuid + 32 - sizeof(buf) * 2;
> + for (i = 0; i < sizeof(buf); i++) {
> + snprintf(pp, 3, "%02X", buf[i]);
> + pp += 2;
> + }
> + eep.e_uuid = uuid;
> + }
> set_pseudofs_security(&eep);
> exp = export_create(&eep, 0);
> if (exp == NULL)
> --
> 2.31.1
>
--
Chuck Lever
On Fri, 07 May 2021, Chuck Lever III wrote:
>
> That's not really a UUID, as per RFC 4122. I'm guessing it's possible
> for a collision to occur pretty quickly, for instance. It would be nicer
> if a conformant UUID could be used here.
That sounds like a sensible approach. I'll go and read RFC 4122 and see
what I can learn.
>
> Is there a problem with specifying the export's fsid in /etc/exports?
Each ancestor directory of any export point needs to be exported with
the v4root flag, and those that are on tmpfs each need a unique uuid or
fsid.
Requiring that to be specified in /etc/exports is an extra burden to
impose on admin
So yes, I think it s a problem with requiring that specification.
(I'm not even sure if NFSEXP_V4ROOT exports can be specified in
/etc/exports, but that would be easy to fix of course.
Thanks,
NeilBrown
>
>
> > The patch borrows some code from exportfs. Maybe that code should be
> > move to a library..
> >
> > Signed-off-by: NeilBrown <[email protected]>
> > ---
> > support/export/v4root.c | 57 +++++++++++++++++++++++++++++++++++++++++
> > 1 file changed, 57 insertions(+)
> >
> > diff --git a/support/export/v4root.c b/support/export/v4root.c
> > index 3654bd7c10c0..fd36eb704441 100644
> > --- a/support/export/v4root.c
> > +++ b/support/export/v4root.c
> > @@ -11,6 +11,7 @@
> > #include <config.h>
> > #endif
> >
> > +#include <fcntl.h>
> > #include <sys/types.h>
> > #include <sys/stat.h>
> > #include <sys/queue.h>
> > @@ -21,6 +22,7 @@
> > #include <unistd.h>
> > #include <errno.h>
> >
> > +#include "nfsd_path.h"
> > #include "xlog.h"
> > #include "exportfs.h"
> > #include "nfslib.h"
> > @@ -73,6 +75,38 @@ set_pseudofs_security(struct exportent *pseudo)
> > }
> > }
> >
> > +static ssize_t exportfs_write(int fd, const char *buf, size_t len)
> > +{
> > + return nfsd_path_write(fd, buf, len);
> > +}
> > +
> > +static int test_export(struct exportent *eep, int with_fsid)
> > +{
> > + char *path = eep->e_path;
> > + int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
> > + /* beside max path, buf size should take protocol str into account */
> > + char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> > + char *bp = buf;
> > + int len = sizeof(buf);
> > + int fd, n;
> > +
> > + n = snprintf(buf, len, "-test-client- ");
> > + bp += n;
> > + len -= n;
> > + qword_add(&bp, &len, path);
> > + if (len < 1)
> > + return 0;
> > + snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> > + fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> > + if (fd < 0)
> > + return 0;
> > + n = exportfs_write(fd, buf, strlen(buf));
> > + close(fd);
> > + if (n < 0)
> > + return 0;
> > + return 1;
> > +}
> > +
> > /*
> > * Create a pseudo export
> > */
> > @@ -82,6 +116,7 @@ v4root_create(char *path, nfs_export *export)
> > nfs_export *exp;
> > struct exportent eep;
> > struct exportent *curexp = &export->m_export;
> > + char uuid[33];
> >
> > dupexportent(&eep, &pseudo_root.m_export);
> > eep.e_ttl = default_ttl;
> > @@ -89,6 +124,28 @@ v4root_create(char *path, nfs_export *export)
> > strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
> > if (strcmp(path, "/") != 0)
> > eep.e_flags &= ~NFSEXP_FSID;
> > + if (strcmp(path, "/") != 0 &&
> > + !test_export(&eep, 0)) {
> > + /* Need a uuid - base it on path */
> > + char buf[12], *pp = path;
> > + unsigned int i = 0;
> > +
> > + memset(buf, 0, sizeof(buf));
> > + while (*pp) {
> > + buf[i] ^= *pp++;
> > + i += 1;
> > + if (i >= sizeof(buf))
> > + i = 0;
> > + }
> > + memset(uuid, 'F', 32);
> > + uuid[32] = '\0';
> > + pp = uuid + 32 - sizeof(buf) * 2;
> > + for (i = 0; i < sizeof(buf); i++) {
> > + snprintf(pp, 3, "%02X", buf[i]);
> > + pp += 2;
> > + }
> > + eep.e_uuid = uuid;
> > + }
> > set_pseudofs_security(&eep);
> > exp = export_create(&eep, 0);
> > if (exp == NULL)
> > --
> > 2.31.1
> >
>
> --
> Chuck Lever
>
>
>
>
On Fri, 07 May 2021, Petr Vorel wrote:
> Hi Neil,
>
> > [[This is a proposed fix. It seems to work. I'd like
> > some review comments before it is committed.
> > Petr: it would be great if you could test it to confirm
> > it actually works in your case.
> > ]]
> Thanks for a quick fix. It runs nicely in newer kernels (5.11.12-1-default
> openSUSE and 5.10.0-6-amd64 Debian). But it somehow fails on older ones
> (SLES 5.3.18-54-default heavily patched and 4.9.0-11-amd64).
>
> I have some problem on Debian with 4.9.0-11-amd64 fails on both tmpfs and ext4,
> others work fine (testing tmpfs, btrfs and ext4). But maybe I did something
> wrong during testing. I did:
> cp ./utils/mountd/mountd /usr/sbin/rpc.mountd
> systemctl restart nfs-mountd.service
That is the correct procedure. It should work...
>
> Failure is regardless I use new mount.nfs (master) or the original from
> Debian (1.3.3).
What error message do you get on failure? It might help to add "-v" to
the mount command to see more messages.
>
> strace looks nearly the same on tmpfs and ext4:
This shows mount.nfs connecting to rpcbind, sending a request, getting a
reply, and maybe looping around and trying again?
There doesn't seem to be anything kernel related that would affect
anything there so I cannot think why on older kernel would make a
difference. Or an older rpcbind...
Maybe I'll experiment on a SLE12 kernel.
NeilBrown
Hi Neil,
> On Fri, 07 May 2021, Petr Vorel wrote:
> > Hi Neil,
> > > [[This is a proposed fix. It seems to work. I'd like
> > > some review comments before it is committed.
> > > Petr: it would be great if you could test it to confirm
> > > it actually works in your case.
> > > ]]
> > Thanks for a quick fix. It runs nicely in newer kernels (5.11.12-1-default
> > openSUSE and 5.10.0-6-amd64 Debian). But it somehow fails on older ones
> > (SLES 5.3.18-54-default heavily patched and 4.9.0-11-amd64).
> > I have some problem on Debian with 4.9.0-11-amd64 fails on both tmpfs and ext4,
> > others work fine (testing tmpfs, btrfs and ext4). But maybe I did something
> > wrong during testing. I did:
> > cp ./utils/mountd/mountd /usr/sbin/rpc.mountd
> > systemctl restart nfs-mountd.service
> That is the correct procedure. It should work...
Thanks for a confirmation.
> > Failure is regardless I use new mount.nfs (master) or the original from
> > Debian (1.3.3).
> What error message do you get on failure? It might help to add "-v" to
> the mount command to see more messages.
+1. I'll add it to the tests (+ printing mount.nfs version).
Here is debug info (when using nfs-utils 2.5.3 - master, with your patch):
* NFSv3 on ext4 (OK)
nfs01 1 TINFO: setup NFSv3, socket type tcp
nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=3 10.0.0.2:/var/tmp/LTP_nfs01.14F4lC51P6/3/tcp /var/tmp/LTP_nfs01.14F4lC51P6/3/0
mount.nfs: trying 10.0.0.2 prog 100003 vers 3 prot TCP port 2049
mount.nfs: trying 10.0.0.2 prog 100005 vers 3 prot TCP port 58997
mount.nfs: timeout set for Thu May 13 08:22:46 2021
mount.nfs: trying text-based options 'proto=tcp,vers=3,addr=10.0.0.2'
mount.nfs: prog 100003, trying vers=3, prot=6
mount.nfs: prog 100005, trying vers=3, prot=6
nfs01 1 TINFO: starting 'nfs01_open_files 10'
* NFSv3 on tmpfs (OK)
nfs01 1 TINFO: setup NFSv3, socket type tcp
nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=3 10.0.0.2:/tmp/LTP_nfs01.oJEz2xd9ZI/3/tcp /tmp/LTP_nfs01.oJEz2xd9ZI/3/0
mount.nfs: trying 10.0.0.2 prog 100003 vers 3 prot TCP port 2049
mount.nfs: trying 10.0.0.2 prog 100005 vers 3 prot TCP port 58997
mount.nfs: timeout set for Thu May 13 08:23:01 2021
mount.nfs: trying text-based options 'proto=tcp,vers=3,addr=10.0.0.2'
mount.nfs: prog 100003, trying vers=3, prot=6
mount.nfs: prog 100005, trying vers=3, prot=6
nfs01 1 TINFO: starting 'nfs01_open_files 10'
* NFSv4.1 on tmpfs (FAIL)
nfs01 1 TINFO: setup NFSv4.1, socket type tcp
nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=4.1 10.0.0.2:/tmp/LTP_nfs01.QnnFGEV4rs/4.1/tcp /tmp/LTP_nfs01.QnnFGEV4rs/4.1/0
mount.nfs: mount(2): No such file or directory
mount.nfs: mounting 10.0.0.2:/tmp/LTP_nfs01.QnnFGEV4rs/4.1/tcp failed, reason given by server: No such file or directory
mount.nfs: timeout set for Thu May 13 08:23:02 2021
mount.nfs: trying text-based options 'proto=tcp,vers=4.1,addr=10.0.0.2,clientaddr=10.0.0.1'
nfs01 1 TBROK: mount command failed
nfs01 1 TINFO: Cleaning up testcase
> > strace looks nearly the same on tmpfs and ext4:
> This shows mount.nfs connecting to rpcbind, sending a request, getting a
> reply, and maybe looping around and trying again?
> There doesn't seem to be anything kernel related that would affect
> anything there so I cannot think why on older kernel would make a
> difference. Or an older rpcbind...
Well, it uses 1.2.5-0.3+deb10u1, i.e. nearly the latest version (Steve released
1.2.6 3 days ago). And it's the same version as in openSUSE Tumbleweed where it
works well.
> Maybe I'll experiment on a SLE12 kernel.
Thanks!
> NeilBrown
Kind regards,
Petr
Hi Neil, all,
[Cc also Debian kernel folks as we're trying to fix issues which could hit them.
See https://lore.kernel.org/linux-nfs/YILQip3nAxhpXP9+@pevik/T/#t ]
> Hi Neil,
> > On Fri, 07 May 2021, Petr Vorel wrote:
> > > Hi Neil,
> > > > [[This is a proposed fix. It seems to work. I'd like
> > > > some review comments before it is committed.
> > > > Petr: it would be great if you could test it to confirm
> > > > it actually works in your case.
> > > > ]]
> > > Thanks for a quick fix. It runs nicely in newer kernels (5.11.12-1-default
> > > openSUSE and 5.10.0-6-amd64 Debian). But it somehow fails on older ones
> > > (SLES 5.3.18-54-default heavily patched and 4.9.0-11-amd64).
> > > I have some problem on Debian with 4.9.0-11-amd64 fails on both tmpfs and ext4,
> > > others work fine (testing tmpfs, btrfs and ext4). But maybe I did something
> > > wrong during testing. I did:
> > > cp ./utils/mountd/mountd /usr/sbin/rpc.mountd
> > > systemctl restart nfs-mountd.service
> > That is the correct procedure. It should work...
> Thanks for a confirmation.
> > > Failure is regardless I use new mount.nfs (master) or the original from
> > > Debian (1.3.3).
> > What error message do you get on failure? It might help to add "-v" to
> > the mount command to see more messages.
> +1. I'll add it to the tests (+ printing mount.nfs version).
> Here is debug info (when using nfs-utils 2.5.3 - master, with your patch):
> * NFSv3 on ext4 (OK)
> nfs01 1 TINFO: setup NFSv3, socket type tcp
> nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=3 10.0.0.2:/var/tmp/LTP_nfs01.14F4lC51P6/3/tcp /var/tmp/LTP_nfs01.14F4lC51P6/3/0
> mount.nfs: trying 10.0.0.2 prog 100003 vers 3 prot TCP port 2049
> mount.nfs: trying 10.0.0.2 prog 100005 vers 3 prot TCP port 58997
> mount.nfs: timeout set for Thu May 13 08:22:46 2021
> mount.nfs: trying text-based options 'proto=tcp,vers=3,addr=10.0.0.2'
> mount.nfs: prog 100003, trying vers=3, prot=6
> mount.nfs: prog 100005, trying vers=3, prot=6
> nfs01 1 TINFO: starting 'nfs01_open_files 10'
I'm sorry, this one is when running the original rpc.nfsd.
When running your patched, it fail (as I previously reported).
> * NFSv3 on tmpfs (OK)
> nfs01 1 TINFO: setup NFSv3, socket type tcp
> nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=3 10.0.0.2:/tmp/LTP_nfs01.oJEz2xd9ZI/3/tcp /tmp/LTP_nfs01.oJEz2xd9ZI/3/0
> mount.nfs: trying 10.0.0.2 prog 100003 vers 3 prot TCP port 2049
> mount.nfs: trying 10.0.0.2 prog 100005 vers 3 prot TCP port 58997
> mount.nfs: timeout set for Thu May 13 08:23:01 2021
> mount.nfs: trying text-based options 'proto=tcp,vers=3,addr=10.0.0.2'
> mount.nfs: prog 100003, trying vers=3, prot=6
> mount.nfs: prog 100005, trying vers=3, prot=6
> nfs01 1 TINFO: starting 'nfs01_open_files 10'
The same here.
> * NFSv4.1 on tmpfs (FAIL)
> nfs01 1 TINFO: setup NFSv4.1, socket type tcp
> nfs01 1 TINFO: Mounting NFS: mount -v -t nfs -o proto=tcp,vers=4.1 10.0.0.2:/tmp/LTP_nfs01.QnnFGEV4rs/4.1/tcp /tmp/LTP_nfs01.QnnFGEV4rs/4.1/0
> mount.nfs: mount(2): No such file or directory
> mount.nfs: mounting 10.0.0.2:/tmp/LTP_nfs01.QnnFGEV4rs/4.1/tcp failed, reason given by server: No such file or directory
> mount.nfs: timeout set for Thu May 13 08:23:02 2021
> mount.nfs: trying text-based options 'proto=tcp,vers=4.1,addr=10.0.0.2,clientaddr=10.0.0.1'
> nfs01 1 TBROK: mount command failed
> nfs01 1 TINFO: Cleaning up testcase
The failure has really something to do with rpcbind ("mount.nfs: portmap query
failed:"):
rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
write(2, "mount.nfs: trying 10.0.0.2 prog "..., 66) = 66
socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 5
fcntl(5, F_GETFL) = 0x2 (flags O_RDWR)
fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
connect(5, {sa_family=AF_INET, sin_port=htons(37873), sin_addr=inet_addr("10.0.0.2")}, 16) = -1 EINPROGRESS (Operation now in progress)
select(6, NULL, [5], NULL, {tv_sec=10, tv_usec=0}) = 1 (out [5], left {tv_sec=9, tv_usec=999998})
getsockopt(5, SOL_SOCKET, SO_ERROR, [111], [4]) = 0
fcntl(5, F_SETFL, O_RDWR) = 0
close(5) = 0
write(2, "mount.nfs: portmap query failed:"..., 79) = 79
Kind regards,
Petr
> > > strace looks nearly the same on tmpfs and ext4:
> > This shows mount.nfs connecting to rpcbind, sending a request, getting a
> > reply, and maybe looping around and trying again?
> > There doesn't seem to be anything kernel related that would affect
> > anything there so I cannot think why on older kernel would make a
> > difference. Or an older rpcbind...
> Well, it uses 1.2.5-0.3+deb10u1, i.e. nearly the latest version (Steve released
> 1.2.6 3 days ago). And it's the same version as in openSUSE Tumbleweed where it
> works well.
> > Maybe I'll experiment on a SLE12 kernel.
> Thanks!
> > NeilBrown
> Kind regards,
> Petr
On Fri, 14 May 2021, Petr Vorel wrote:
>
> The failure has really something to do with rpcbind ("mount.nfs: portmap query
> failed:"):
> rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
> write(2, "mount.nfs: trying 10.0.0.2 prog "..., 66) = 66
> socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 5
> fcntl(5, F_GETFL) = 0x2 (flags O_RDWR)
> fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
> connect(5, {sa_family=AF_INET, sin_port=htons(37873), sin_addr=inet_addr("10.0.0.2")}, 16) = -1 EINPROGRESS (Operation now in progress)
> select(6, NULL, [5], NULL, {tv_sec=10, tv_usec=0}) = 1 (out [5], left {tv_sec=9, tv_usec=999998})
> getsockopt(5, SOL_SOCKET, SO_ERROR, [111], [4]) = 0
> fcntl(5, F_SETFL, O_RDWR) = 0
> close(5) = 0
> write(2, "mount.nfs: portmap query failed:"..., 79) = 79
The "111" from getsockopt...SO_ERROR is ECONNREFUSED. That suggests
that rpcbind wasn't even running.
This is different to the first strace you reported where mount.nfs
successfully connected to rpcbind, sent and request and got a response,
and then fail the mount. That would happen if, for example, rpc.mountd
wasn't running.
So I think these failures are caused by some problem with restarting the
services and aren't actually testing the code at all.
Could you try again and make sure rpcbind and rpc.mountd are running on
the server before attempting the mount?
Thanks,
NeilBrown
Some filesystems cannot be exported without an fsid or uuid.
tmpfs is the main example.
When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
leading down to an export point it exports each directory without any
fsid or uuid. If one of these directories is on tmp, that will fail.
The net result is that exporting a subdirectory of a tmpfs filesystem
will not work over NFSv4 as the parents within the filesystem cannot be
exported. It will either fail, or fall-back to NFSv3 (depending on the
version of the mount.nfs program).
To fix this we need to provide an fsid or uuid for these pseudo-root
exports. This patch does that by creating an RFC-4122 V5 compatible
UUID based on an arbitrary seed and the path to the export.
To check if an export needs a uuid, text_export() is moved from exportfs
to libexport.a, modified slightly and renamed to export_test().
Signed-off-by: NeilBrown <[email protected]>
---
This version contains Chuck's suggestion for improving the uuid, and
general clean-up.
support/export/cache.c | 3 ++-
support/export/export.c | 29 +++++++++++++++++++++++++++++
support/export/v4root.c | 23 ++++++++++++++++++++++-
support/include/exportfs.h | 1 +
utils/exportd/Makefile.am | 2 +-
utils/exportfs/exportfs.c | 38 +++-----------------------------------
utils/mountd/Makefile.am | 2 +-
7 files changed, 59 insertions(+), 39 deletions(-)
diff --git a/support/export/cache.c b/support/export/cache.c
index 3e4f53c0a32e..a5823e92e9f2 100644
--- a/support/export/cache.c
+++ b/support/export/cache.c
@@ -981,7 +981,8 @@ static int dump_to_cache(int f, char *buf, int blen, char *domain,
write_secinfo(&bp, &blen, exp, flag_mask);
if (exp->e_uuid == NULL || different_fs) {
char u[16];
- if (uuid_by_path(path, 0, 16, u)) {
+ if ((exp->e_flags & flag_mask & NFSEXP_FSID) == 0 &&
+ uuid_by_path(path, 0, 16, u)) {
qword_add(&bp, &blen, "uuid");
qword_addhex(&bp, &blen, u, 16);
}
diff --git a/support/export/export.c b/support/export/export.c
index c753f68e4d63..03390dfc1de8 100644
--- a/support/export/export.c
+++ b/support/export/export.c
@@ -10,9 +10,11 @@
#include <config.h>
#endif
+#include <unistd.h>
#include <string.h>
#include <sys/types.h>
#include <sys/param.h>
+#include <fcntl.h>
#include <netinet/in.h>
#include <limits.h>
#include <stdlib.h>
@@ -420,3 +422,30 @@ export_hash(char *str)
return num % HASH_TABLE_SIZE;
}
+
+int export_test(struct exportent *eep, int with_fsid)
+{
+ char *path = eep->e_path;
+ int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
+ /* beside max path, buf size should take protocol str into account */
+ char buf[NFS_MAXPATHLEN+1+64] = { 0 };
+ char *bp = buf;
+ int len = sizeof(buf);
+ int fd, n;
+
+ n = snprintf(buf, len, "-test-client- ");
+ bp += n;
+ len -= n;
+ qword_add(&bp, &len, path);
+ if (len < 1)
+ return 0;
+ snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
+ fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
+ if (fd < 0)
+ return 0;
+ n = nfsd_path_write(fd, buf, strlen(buf));
+ close(fd);
+ if (n < 0)
+ return 0;
+ return 1;
+}
diff --git a/support/export/v4root.c b/support/export/v4root.c
index 3654bd7c10c0..c12a7d8562b2 100644
--- a/support/export/v4root.c
+++ b/support/export/v4root.c
@@ -20,6 +20,7 @@
#include <unistd.h>
#include <errno.h>
+#include <uuid/uuid.h>
#include "xlog.h"
#include "exportfs.h"
@@ -89,11 +90,31 @@ v4root_create(char *path, nfs_export *export)
strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
if (strcmp(path, "/") != 0)
eep.e_flags &= ~NFSEXP_FSID;
+
+ if (strcmp(path, "/") != 0 &&
+ !export_test(&eep, 0)) {
+ /* Need a uuid - base it on path using a fixed seed that
+ * was generated randomly.
+ */
+ const char seed_s[] = "39c6b5c1-3f24-4f4e-977c-7fe6546b8a25";
+ uuid_t seed, uuid;
+ char uuid_s[UUID_STR_LEN];
+ unsigned int i, j;
+
+ uuid_parse(seed_s, seed);
+ uuid_generate_sha1(uuid, seed, path, strlen(path));
+ uuid_unparse_upper(uuid, uuid_s);
+ /* strip hyhens */
+ for (i = j = 0; uuid_s[i]; i++)
+ if (uuid_s[i] != '-')
+ uuid_s[j++] = uuid_s[i];
+ eep.e_uuid = uuid_s;
+ }
set_pseudofs_security(&eep);
exp = export_create(&eep, 0);
if (exp == NULL)
return NULL;
- xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
+ xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
exp->m_export.e_path, exp->m_export.e_flags);
return &exp->m_export;
}
diff --git a/support/include/exportfs.h b/support/include/exportfs.h
index 81d137210862..7c1b74537186 100644
--- a/support/include/exportfs.h
+++ b/support/include/exportfs.h
@@ -173,5 +173,6 @@ struct export_features {
struct export_features *get_export_features(void);
void fix_pseudoflavor_flags(struct exportent *ep);
char *exportent_realpath(struct exportent *eep);
+int export_test(struct exportent *eep, int with_fsid);
#endif /* EXPORTFS_H */
diff --git a/utils/exportd/Makefile.am b/utils/exportd/Makefile.am
index eb521f15032d..c95bdee76d3f 100644
--- a/utils/exportd/Makefile.am
+++ b/utils/exportd/Makefile.am
@@ -16,7 +16,7 @@ exportd_SOURCES = exportd.c
exportd_LDADD = ../../support/export/libexport.a \
../../support/nfs/libnfs.la \
../../support/misc/libmisc.a \
- $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD)
+ $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD) -luuid
exportd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
-I$(top_srcdir)/support/export
diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
index 25d757d8b4b4..bc76aaaf8714 100644
--- a/utils/exportfs/exportfs.c
+++ b/utils/exportfs/exportfs.c
@@ -54,11 +54,6 @@ static int _lockfd = -1;
struct state_paths etab;
-static ssize_t exportfs_write(int fd, const char *buf, size_t len)
-{
- return nfsd_path_write(fd, buf, len);
-}
-
/*
* If we aren't careful, changes made by exportfs can be lost
* when multiple exports process run at once:
@@ -510,33 +505,6 @@ static int can_test(void)
return 1;
}
-static int test_export(nfs_export *exp, int with_fsid)
-{
- char *path = exp->m_export.e_path;
- int flags = exp->m_export.e_flags | (with_fsid ? NFSEXP_FSID : 0);
- /* beside max path, buf size should take protocol str into account */
- char buf[NFS_MAXPATHLEN+1+64] = { 0 };
- char *bp = buf;
- int len = sizeof(buf);
- int fd, n;
-
- n = snprintf(buf, len, "-test-client- ");
- bp += n;
- len -= n;
- qword_add(&bp, &len, path);
- if (len < 1)
- return 0;
- snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
- fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
- if (fd < 0)
- return 0;
- n = exportfs_write(fd, buf, strlen(buf));
- close(fd);
- if (n < 0)
- return 0;
- return 1;
-}
-
static void
validate_export(nfs_export *exp)
{
@@ -568,12 +536,12 @@ validate_export(nfs_export *exp)
if ((exp->m_export.e_flags & NFSEXP_FSID) || exp->m_export.e_uuid ||
fs_has_fsid) {
- if ( !test_export(exp, 1)) {
+ if ( !export_test(&exp->m_export, 1)) {
xlog(L_ERROR, "%s does not support NFS export", path);
return;
}
- } else if ( !test_export(exp, 0)) {
- if (test_export(exp, 1))
+ } else if ( !export_test(&exp->m_export, 0)) {
+ if (export_test(&exp->m_export, 1))
xlog(L_ERROR, "%s requires fsid= for NFS export", path);
else
xlog(L_ERROR, "%s does not support NFS export", path);
diff --git a/utils/mountd/Makefile.am b/utils/mountd/Makefile.am
index 859f28ecd6f3..13b25c90f06e 100644
--- a/utils/mountd/Makefile.am
+++ b/utils/mountd/Makefile.am
@@ -18,7 +18,7 @@ mountd_LDADD = ../../support/export/libexport.a \
../../support/nfs/libnfs.la \
../../support/misc/libmisc.a \
$(OPTLIBS) \
- $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) $(LIBTIRPC) \
+ $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) -luuid $(LIBTIRPC) \
$(LIBPTHREAD)
mountd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
-I$(top_builddir)/support/include \
--
2.31.1
Hi Neil,
> On Fri, 14 May 2021, Petr Vorel wrote:
> > The failure has really something to do with rpcbind ("mount.nfs: portmap query
> > failed:"):
> > rt_sigprocmask(SIG_SETMASK, [], NULL, 8) = 0
> > write(2, "mount.nfs: trying 10.0.0.2 prog "..., 66) = 66
> > socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) = 5
> > fcntl(5, F_GETFL) = 0x2 (flags O_RDWR)
> > fcntl(5, F_SETFL, O_RDWR|O_NONBLOCK) = 0
> > connect(5, {sa_family=AF_INET, sin_port=htons(37873), sin_addr=inet_addr("10.0.0.2")}, 16) = -1 EINPROGRESS (Operation now in progress)
> > select(6, NULL, [5], NULL, {tv_sec=10, tv_usec=0}) = 1 (out [5], left {tv_sec=9, tv_usec=999998})
> > getsockopt(5, SOL_SOCKET, SO_ERROR, [111], [4]) = 0
> > fcntl(5, F_SETFL, O_RDWR) = 0
> > close(5) = 0
> > write(2, "mount.nfs: portmap query failed:"..., 79) = 79
> The "111" from getsockopt...SO_ERROR is ECONNREFUSED. That suggests
> that rpcbind wasn't even running.
> This is different to the first strace you reported where mount.nfs
> successfully connected to rpcbind, sent and request and got a response,
> and then fail the mount. That would happen if, for example, rpc.mountd
> wasn't running.
> So I think these failures are caused by some problem with restarting the
> services and aren't actually testing the code at all.
> Could you try again and make sure rpcbind and rpc.mountd are running on
> the server before attempting the mount?
I'm sorry, you're right, indeed rpc.mountd was not running and previously
probably by rpcbind not running. Not sure what exactly was wrong I'll test your
v2 probably only on openSUSE.
BTW apart from checking whether rpcbind is running I'd check only rpc.nfsd.
Or should be anything else tested? IMHO rpc.mountd, rpc.idmapd and rpc.statd
are nfs-server.service dependencies (the service, which starts also rpc.nfsd).
Kind regards,
Petr
> Thanks,
> NeilBrown
Hi Neil,
> Some filesystems cannot be exported without an fsid or uuid.
> tmpfs is the main example.
> When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
> leading down to an export point it exports each directory without any
> fsid or uuid. If one of these directories is on tmp, that will fail.
> The net result is that exporting a subdirectory of a tmpfs filesystem
> will not work over NFSv4 as the parents within the filesystem cannot be
> exported. It will either fail, or fall-back to NFSv3 (depending on the
> version of the mount.nfs program).
> To fix this we need to provide an fsid or uuid for these pseudo-root
> exports. This patch does that by creating an RFC-4122 V5 compatible
> UUID based on an arbitrary seed and the path to the export.
> To check if an export needs a uuid, text_export() is moved from exportfs
> to libexport.a, modified slightly and renamed to export_test().
> Signed-off-by: NeilBrown <[email protected]>
Tested-by: Petr Vorel <[email protected]>
tmpfs and btrfs via LTP test nfs01 on openSUSE.
Kind regards,
Petr
Hi Neil,
> Some filesystems cannot be exported without an fsid or uuid.
> tmpfs is the main example.
> When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
> leading down to an export point it exports each directory without any
> fsid or uuid. If one of these directories is on tmp, that will fail.
^ nit: you probably
mean tmpfs
> The net result is that exporting a subdirectory of a tmpfs filesystem
> will not work over NFSv4 as the parents within the filesystem cannot be
> exported. It will either fail, or fall-back to NFSv3 (depending on the
> version of the mount.nfs program).
> To fix this we need to provide an fsid or uuid for these pseudo-root
> exports. This patch does that by creating an RFC-4122 V5 compatible
> UUID based on an arbitrary seed and the path to the export.
> To check if an export needs a uuid, text_export() is moved from exportfs
> to libexport.a, modified slightly and renamed to export_test().
> Signed-off-by: NeilBrown <[email protected]>
Reported-by: Petr Vorel <[email protected]>
Reviewed-by: Petr Vorel <[email protected]>
Tested-by: Petr Vorel <[email protected]>
LGTM, thanks for fixing it.
...
> --- a/support/include/exportfs.h
> +++ b/support/include/exportfs.h
> @@ -173,5 +173,6 @@ struct export_features {
> struct export_features *get_export_features(void);
> void fix_pseudoflavor_flags(struct exportent *ep);
> char *exportent_realpath(struct exportent *eep);
> +int export_test(struct exportent *eep, int with_fsid);
> #endif /* EXPORTFS_H */
> diff --git a/utils/exportd/Makefile.am b/utils/exportd/Makefile.am
> index eb521f15032d..c95bdee76d3f 100644
> --- a/utils/exportd/Makefile.am
> +++ b/utils/exportd/Makefile.am
> @@ -16,7 +16,7 @@ exportd_SOURCES = exportd.c
> exportd_LDADD = ../../support/export/libexport.a \
> ../../support/nfs/libnfs.la \
> ../../support/misc/libmisc.a \
> - $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD)
> + $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD) -luuid
I wonder if configure.ac should have libuuid check as it's now mandatory, e.g.:
AC_CHECK_LIB[uuid], uuid_parse, [LIBUUID="-luuid"], AC_MSG_ERROR([libuuid needed]))
Kind regards,
Petr
Hey!
On 5/17/21 12:45 AM, NeilBrown wrote:
>
> Some filesystems cannot be exported without an fsid or uuid.
> tmpfs is the main example.
>
> When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
> leading down to an export point it exports each directory without any
> fsid or uuid. If one of these directories is on tmp, that will fail.
>
> The net result is that exporting a subdirectory of a tmpfs filesystem
> will not work over NFSv4 as the parents within the filesystem cannot be
> exported. It will either fail, or fall-back to NFSv3 (depending on the
> version of the mount.nfs program).
>
> To fix this we need to provide an fsid or uuid for these pseudo-root
> exports. This patch does that by creating an RFC-4122 V5 compatible
> UUID based on an arbitrary seed and the path to the export.
>
> To check if an export needs a uuid, text_export() is moved from exportfs
> to libexport.a, modified slightly and renamed to export_test().
Well.... it appears you guys did not compile with the --with-systemd
config flag... Because if you did you would have seeing this compile error
the in systemd code:
/usr/bin/ld: ../support/nfs/.libs/libnfs.a(cacheio.o): in function `stat':
/usr/include/sys/stat.h:455: undefined reference to `etab'
collect2: error: ld returned 1 exit status
make[1]: *** [Makefile:560: nfs-server-generator] Error 1
make[1]: Leaving directory '/home/src/up/nfs-utils/systemd'
make: *** [Makefile:479: all-recursive] Error 1
It turns out the moving of export_test() in to the libexport.a
is causing any binary linking with libexport.a to have a
global definition of struct state_paths etab;
The reason is export_test() calls qword_add(). Now qword_add()
does not use an etab, but the file qword_add() lives in is
cacheio.c which does have a extern struct state_paths etab
which is the reason libnfs.a(cacheio.o) is mentioned in
the error. At least that is what I *think* is going on...
The extern came from commit a15bd94.
Now the work around is to simply define a
struct state_paths etab; in nfs-server-generator.c
which will not be used at least by the systemd code.
Now is that something we want continue doing... make any
binaries linking with libexport.a define a global etab.
It seems a little messy but the interface is not documented
and the alternative, moving a bunch of code around see a lot
more messy that simple adding one definition.
Other than not compiling... Things looks good! ;-)
Thoughts?
steved.
>
> Signed-off-by: NeilBrown <[email protected]>
> ---
>
> This version contains Chuck's suggestion for improving the uuid, and
> general clean-up.
>
> support/export/cache.c | 3 ++-
> support/export/export.c | 29 +++++++++++++++++++++++++++++
> support/export/v4root.c | 23 ++++++++++++++++++++++-
> support/include/exportfs.h | 1 +
> utils/exportd/Makefile.am | 2 +-
> utils/exportfs/exportfs.c | 38 +++-----------------------------------
> utils/mountd/Makefile.am | 2 +-
> 7 files changed, 59 insertions(+), 39 deletions(-)
>
> diff --git a/support/export/cache.c b/support/export/cache.c
> index 3e4f53c0a32e..a5823e92e9f2 100644
> --- a/support/export/cache.c
> +++ b/support/export/cache.c
> @@ -981,7 +981,8 @@ static int dump_to_cache(int f, char *buf, int blen, char *domain,
> write_secinfo(&bp, &blen, exp, flag_mask);
> if (exp->e_uuid == NULL || different_fs) {
> char u[16];
> - if (uuid_by_path(path, 0, 16, u)) {
> + if ((exp->e_flags & flag_mask & NFSEXP_FSID) == 0 &&
> + uuid_by_path(path, 0, 16, u)) {
> qword_add(&bp, &blen, "uuid");
> qword_addhex(&bp, &blen, u, 16);
> }
> diff --git a/support/export/export.c b/support/export/export.c
> index c753f68e4d63..03390dfc1de8 100644
> --- a/support/export/export.c
> +++ b/support/export/export.c
> @@ -10,9 +10,11 @@
> #include <config.h>
> #endif
>
> +#include <unistd.h>
> #include <string.h>
> #include <sys/types.h>
> #include <sys/param.h>
> +#include <fcntl.h>
> #include <netinet/in.h>
> #include <limits.h>
> #include <stdlib.h>
> @@ -420,3 +422,30 @@ export_hash(char *str)
>
> return num % HASH_TABLE_SIZE;
> }
> +
> +int export_test(struct exportent *eep, int with_fsid)
> +{
> + char *path = eep->e_path;
> + int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
> + /* beside max path, buf size should take protocol str into account */
> + char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> + char *bp = buf;
> + int len = sizeof(buf);
> + int fd, n;
> +
> + n = snprintf(buf, len, "-test-client- ");
> + bp += n;
> + len -= n;
> + qword_add(&bp, &len, path);
> + if (len < 1)
> + return 0;
> + snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> + fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> + if (fd < 0)
> + return 0;
> + n = nfsd_path_write(fd, buf, strlen(buf));
> + close(fd);
> + if (n < 0)
> + return 0;
> + return 1;
> +}
> diff --git a/support/export/v4root.c b/support/export/v4root.c
> index 3654bd7c10c0..c12a7d8562b2 100644
> --- a/support/export/v4root.c
> +++ b/support/export/v4root.c
> @@ -20,6 +20,7 @@
>
> #include <unistd.h>
> #include <errno.h>
> +#include <uuid/uuid.h>
>
> #include "xlog.h"
> #include "exportfs.h"
> @@ -89,11 +90,31 @@ v4root_create(char *path, nfs_export *export)
> strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
> if (strcmp(path, "/") != 0)
> eep.e_flags &= ~NFSEXP_FSID;
> +
> + if (strcmp(path, "/") != 0 &&
> + !export_test(&eep, 0)) {
> + /* Need a uuid - base it on path using a fixed seed that
> + * was generated randomly.
> + */
> + const char seed_s[] = "39c6b5c1-3f24-4f4e-977c-7fe6546b8a25";
> + uuid_t seed, uuid;
> + char uuid_s[UUID_STR_LEN];
> + unsigned int i, j;
> +
> + uuid_parse(seed_s, seed);
> + uuid_generate_sha1(uuid, seed, path, strlen(path));
> + uuid_unparse_upper(uuid, uuid_s);
> + /* strip hyhens */
> + for (i = j = 0; uuid_s[i]; i++)
> + if (uuid_s[i] != '-')
> + uuid_s[j++] = uuid_s[i];
> + eep.e_uuid = uuid_s;
> + }
> set_pseudofs_security(&eep);
> exp = export_create(&eep, 0);
> if (exp == NULL)
> return NULL;
> - xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
> + xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
> exp->m_export.e_path, exp->m_export.e_flags);
> return &exp->m_export;
> }
> diff --git a/support/include/exportfs.h b/support/include/exportfs.h
> index 81d137210862..7c1b74537186 100644
> --- a/support/include/exportfs.h
> +++ b/support/include/exportfs.h
> @@ -173,5 +173,6 @@ struct export_features {
> struct export_features *get_export_features(void);
> void fix_pseudoflavor_flags(struct exportent *ep);
> char *exportent_realpath(struct exportent *eep);
> +int export_test(struct exportent *eep, int with_fsid);
>
> #endif /* EXPORTFS_H */
> diff --git a/utils/exportd/Makefile.am b/utils/exportd/Makefile.am
> index eb521f15032d..c95bdee76d3f 100644
> --- a/utils/exportd/Makefile.am
> +++ b/utils/exportd/Makefile.am
> @@ -16,7 +16,7 @@ exportd_SOURCES = exportd.c
> exportd_LDADD = ../../support/export/libexport.a \
> ../../support/nfs/libnfs.la \
> ../../support/misc/libmisc.a \
> - $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD)
> + $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD) -luuid
>
> exportd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
> -I$(top_srcdir)/support/export
> diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
> index 25d757d8b4b4..bc76aaaf8714 100644
> --- a/utils/exportfs/exportfs.c
> +++ b/utils/exportfs/exportfs.c
> @@ -54,11 +54,6 @@ static int _lockfd = -1;
>
> struct state_paths etab;
>
> -static ssize_t exportfs_write(int fd, const char *buf, size_t len)
> -{
> - return nfsd_path_write(fd, buf, len);
> -}
> -
> /*
> * If we aren't careful, changes made by exportfs can be lost
> * when multiple exports process run at once:
> @@ -510,33 +505,6 @@ static int can_test(void)
> return 1;
> }
>
> -static int test_export(nfs_export *exp, int with_fsid)
> -{
> - char *path = exp->m_export.e_path;
> - int flags = exp->m_export.e_flags | (with_fsid ? NFSEXP_FSID : 0);
> - /* beside max path, buf size should take protocol str into account */
> - char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> - char *bp = buf;
> - int len = sizeof(buf);
> - int fd, n;
> -
> - n = snprintf(buf, len, "-test-client- ");
> - bp += n;
> - len -= n;
> - qword_add(&bp, &len, path);
> - if (len < 1)
> - return 0;
> - snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> - fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> - if (fd < 0)
> - return 0;
> - n = exportfs_write(fd, buf, strlen(buf));
> - close(fd);
> - if (n < 0)
> - return 0;
> - return 1;
> -}
> -
> static void
> validate_export(nfs_export *exp)
> {
> @@ -568,12 +536,12 @@ validate_export(nfs_export *exp)
>
> if ((exp->m_export.e_flags & NFSEXP_FSID) || exp->m_export.e_uuid ||
> fs_has_fsid) {
> - if ( !test_export(exp, 1)) {
> + if ( !export_test(&exp->m_export, 1)) {
> xlog(L_ERROR, "%s does not support NFS export", path);
> return;
> }
> - } else if ( !test_export(exp, 0)) {
> - if (test_export(exp, 1))
> + } else if ( !export_test(&exp->m_export, 0)) {
> + if (export_test(&exp->m_export, 1))
> xlog(L_ERROR, "%s requires fsid= for NFS export", path);
> else
> xlog(L_ERROR, "%s does not support NFS export", path);
> diff --git a/utils/mountd/Makefile.am b/utils/mountd/Makefile.am
> index 859f28ecd6f3..13b25c90f06e 100644
> --- a/utils/mountd/Makefile.am
> +++ b/utils/mountd/Makefile.am
> @@ -18,7 +18,7 @@ mountd_LDADD = ../../support/export/libexport.a \
> ../../support/nfs/libnfs.la \
> ../../support/misc/libmisc.a \
> $(OPTLIBS) \
> - $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) $(LIBTIRPC) \
> + $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) -luuid $(LIBTIRPC) \
> $(LIBPTHREAD)
> mountd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
> -I$(top_builddir)/support/include \
>
Hi Steve, all,
> Hey!
> On 5/17/21 12:45 AM, NeilBrown wrote:
> > Some filesystems cannot be exported without an fsid or uuid.
> > tmpfs is the main example.
> > When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
> > leading down to an export point it exports each directory without any
> > fsid or uuid. If one of these directories is on tmp, that will fail.
> > The net result is that exporting a subdirectory of a tmpfs filesystem
> > will not work over NFSv4 as the parents within the filesystem cannot be
> > exported. It will either fail, or fall-back to NFSv3 (depending on the
> > version of the mount.nfs program).
> > To fix this we need to provide an fsid or uuid for these pseudo-root
> > exports. This patch does that by creating an RFC-4122 V5 compatible
> > UUID based on an arbitrary seed and the path to the export.
> > To check if an export needs a uuid, text_export() is moved from exportfs
> > to libexport.a, modified slightly and renamed to export_test().
> Well.... it appears you guys did not compile with the --with-systemd
> config flag... Because if you did you would have seeing this compile error
> the in systemd code:
You're right, I didn't :(.
> /usr/bin/ld: ../support/nfs/.libs/libnfs.a(cacheio.o): in function `stat':
> /usr/include/sys/stat.h:455: undefined reference to `etab'
> collect2: error: ld returned 1 exit status
> make[1]: *** [Makefile:560: nfs-server-generator] Error 1
> make[1]: Leaving directory '/home/src/up/nfs-utils/systemd'
> make: *** [Makefile:479: all-recursive] Error 1
> It turns out the moving of export_test() in to the libexport.a
> is causing any binary linking with libexport.a to have a
> global definition of struct state_paths etab;
> The reason is export_test() calls qword_add(). Now qword_add()
> does not use an etab, but the file qword_add() lives in is
> cacheio.c which does have a extern struct state_paths etab
> which is the reason libnfs.a(cacheio.o) is mentioned in
> the error. At least that is what I *think* is going on...
> The extern came from commit a15bd94.
> Now the work around is to simply define a
> struct state_paths etab; in nfs-server-generator.c
> which will not be used at least by the systemd code.
> Now is that something we want continue doing... make any
> binaries linking with libexport.a define a global etab.
> It seems a little messy but the interface is not documented
> and the alternative, moving a bunch of code around see a lot
> more messy that simple adding one definition.
+1
Kind regards,
Petr
> Other than not compiling... Things looks good! ;-)
> Thoughts?
> steved.
...
On Fri, 21 May 2021, Steve Dickson wrote:
> Well.... it appears you guys did not compile with the --with-systemd
> config flag... Because if you did you would have seeing this compile error
> the in systemd code:
>
> /usr/bin/ld: ../support/nfs/.libs/libnfs.a(cacheio.o): in function `stat':
> /usr/include/sys/stat.h:455: undefined reference to `etab'
> collect2: error: ld returned 1 exit status
> make[1]: *** [Makefile:560: nfs-server-generator] Error 1
> make[1]: Leaving directory '/home/src/up/nfs-utils/systemd'
> make: *** [Makefile:479: all-recursive] Error 1
>
> It turns out the moving of export_test() in to the libexport.a
> is causing any binary linking with libexport.a to have a
> global definition of struct state_paths etab;
>
> The reason is export_test() calls qword_add(). Now qword_add()
> does not use an etab, but the file qword_add() lives in is
> cacheio.c which does have a extern struct state_paths etab
> which is the reason libnfs.a(cacheio.o) is mentioned in
> the error. At least that is what I *think* is going on...
> The extern came from commit a15bd94.
Thanks for finding and analysis that! I'd rather fix this "properly" so
we don't more pointless declaration of "etab". We already have one in
svcgssd.c.
I have two patches which make this problem go away. I'll post them as a
followup.
Thanks,
NeilBrown
Since v4.17 the timestamp written to 'flush' is ignored,
so there isn't much point choosing too precisely.
For kernels since v4.3-rc3-13-g778620364ef5 it is safe
to write 1 second beyond the current time.
For earlier kernels, nothing is really safe (even the current
behaviour), but writing one second beyond the current time isn't too bad
in the unlikely case the people use a new nfs-utils on a 5 year old
kernel.
This remove a dependency for libnfs.a on 'etab' being declare,
so svcgssd no longer needs to declare it.
Signed-off-by: NeilBrown <[email protected]>
---
support/export/auth.c | 2 +-
support/include/nfslib.h | 2 +-
support/nfs/cacheio.c | 17 ++++++++---------
utils/exportfs/exportfs.c | 4 ++--
utils/gssd/svcgssd.c | 1 -
5 files changed, 12 insertions(+), 14 deletions(-)
diff --git a/support/export/auth.c b/support/export/auth.c
index cea376300d01..17bdfc83748e 100644
--- a/support/export/auth.c
+++ b/support/export/auth.c
@@ -80,7 +80,7 @@ check_useipaddr(void)
use_ipaddr = 0;
if (use_ipaddr != old_use_ipaddr)
- cache_flush(1);
+ cache_flush();
}
unsigned int
diff --git a/support/include/nfslib.h b/support/include/nfslib.h
index 84d8270b330f..58eeb3382fcc 100644
--- a/support/include/nfslib.h
+++ b/support/include/nfslib.h
@@ -132,7 +132,7 @@ int wildmat(char *text, char *pattern);
int qword_get(char **bpp, char *dest, int bufsize);
int qword_get_int(char **bpp, int *anint);
-void cache_flush(int force);
+void cache_flush(void);
void qword_add(char **bpp, int *lp, char *str);
void qword_addhex(char **bpp, int *lp, char *buf, int blen);
void qword_addint(char **bpp, int *lp, int n);
diff --git a/support/nfs/cacheio.c b/support/nfs/cacheio.c
index 70ead94d64f0..73f4be4af9f9 100644
--- a/support/nfs/cacheio.c
+++ b/support/nfs/cacheio.c
@@ -32,8 +32,6 @@
#include <time.h>
#include <errno.h>
-extern struct state_paths etab;
-
void qword_add(char **bpp, int *lp, char *str)
{
char *bp = *bpp;
@@ -213,7 +211,7 @@ int qword_get_uint(char **bpp, unsigned int *anint)
*/
void
-cache_flush(int force)
+cache_flush(void)
{
struct stat stb;
int c;
@@ -234,12 +232,13 @@ cache_flush(int force)
NULL
};
now = time(0);
- if (force ||
- stat(etab.statefn, &stb) != 0 ||
- stb.st_mtime > now)
- stb.st_mtime = time(0);
-
- sprintf(stime, "%" PRId64 "\n", (int64_t)stb.st_mtime);
+
+ /* Since v4.16-rc2-3-g3b68e6ee3cbd the timestamp written is ignored.
+ * It is safest always to flush caches if there is any doubt.
+ * For earlier kernels, writing the next second from now is
+ * the best we can do.
+ */
+ sprintf(stime, "%" PRId64 "\n", (int64_t)now+1);
for (c=0; cachelist[c]; c++) {
int fd;
sprintf(path, "/proc/net/rpc/%s/flush", cachelist[c]);
diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
index bc76aaaf8714..d586296796a9 100644
--- a/utils/exportfs/exportfs.c
+++ b/utils/exportfs/exportfs.c
@@ -188,7 +188,7 @@ main(int argc, char **argv)
if (optind == argc && ! f_all) {
if (force_flush) {
- cache_flush(1);
+ cache_flush();
free_state_path_names(&etab);
return 0;
} else {
@@ -235,7 +235,7 @@ main(int argc, char **argv)
unexportfs(argv[i], f_verbose);
}
xtab_export_write();
- cache_flush(force_flush);
+ cache_flush();
free_state_path_names(&etab);
export_freeall();
diff --git a/utils/gssd/svcgssd.c b/utils/gssd/svcgssd.c
index 3ab2100b66bb..881207b3e8a2 100644
--- a/utils/gssd/svcgssd.c
+++ b/utils/gssd/svcgssd.c
@@ -67,7 +67,6 @@
#include "misc.h"
#include "svcgssd_krb5.h"
-struct state_paths etab; /* from cacheio.c */
static bool signal_received = false;
static struct event_base *evbase = NULL;
static int nullrpc_fd = -1;
--
2.31.1
There are two global "struct stat_paths" structures: etab and rmtab.
They are currently needed by some library code so any program which is
linked with that library code needs to declare the structures even if it
doesn't use the functionality. This is clumsy and error-prone.
Instead: have the library declare the structure and put the definition
in a header file. Now programs only need to know about these structures
if they use the functionality.
'rmtab' is now declared in libnfs.a (rmtab.c). 'etab' is declared in
export.a (xtab.c).
Signed-off-by: NeilBrown <[email protected]>
---
support/export/auth.c | 2 --
support/export/xtab.c | 2 +-
support/include/exportfs.h | 1 +
support/include/nfslib.h | 1 +
support/nfs/rmtab.c | 2 +-
utils/exportd/exportd.c | 2 --
utils/exportfs/exportfs.c | 2 --
utils/mountd/mountd.c | 3 ---
utils/mountd/rmtab.c | 2 --
9 files changed, 4 insertions(+), 13 deletions(-)
diff --git a/support/export/auth.c b/support/export/auth.c
index 17bdfc83748e..03ce4b8a0e1e 100644
--- a/support/export/auth.c
+++ b/support/export/auth.c
@@ -41,8 +41,6 @@ static nfs_client my_client;
extern int use_ipaddr;
-extern struct state_paths etab;
-
/*
void
auth_init(void)
diff --git a/support/export/xtab.c b/support/export/xtab.c
index 00b25eaac07d..c888a80aa741 100644
--- a/support/export/xtab.c
+++ b/support/export/xtab.c
@@ -27,7 +27,7 @@
#include "misc.h"
static char state_base_dirname[PATH_MAX] = NFS_STATEDIR;
-extern struct state_paths etab;
+struct state_paths etab;
int v4root_needed;
static void cond_rename(char *newfile, char *oldfile);
diff --git a/support/include/exportfs.h b/support/include/exportfs.h
index 7c1b74537186..9edf0d04732f 100644
--- a/support/include/exportfs.h
+++ b/support/include/exportfs.h
@@ -145,6 +145,7 @@ nfs_export * export_create(struct exportent *, int canonical);
void exportent_release(struct exportent *);
void export_freeall(void);
+extern struct state_paths etab;
int xtab_export_read(void);
int xtab_export_write(void);
diff --git a/support/include/nfslib.h b/support/include/nfslib.h
index 58eeb3382fcc..6faba71bf0cd 100644
--- a/support/include/nfslib.h
+++ b/support/include/nfslib.h
@@ -106,6 +106,7 @@ void dupexportent(struct exportent *dst,
struct exportent *src);
int updateexportent(struct exportent *eep, char *options);
+extern struct state_paths rmtab;
int setrmtabent(char *type);
struct rmtabent * getrmtabent(int log, long *pos);
void putrmtabent(struct rmtabent *xep, long *pos);
diff --git a/support/nfs/rmtab.c b/support/nfs/rmtab.c
index 9f03167ddbe1..154b26fa3402 100644
--- a/support/nfs/rmtab.c
+++ b/support/nfs/rmtab.c
@@ -33,7 +33,7 @@
static FILE *rmfp = NULL;
-extern struct state_paths rmtab;
+struct state_paths rmtab;
int
setrmtabent(char *type)
diff --git a/utils/exportd/exportd.c b/utils/exportd/exportd.c
index f36f51d215b5..2dd12cb6015b 100644
--- a/utils/exportd/exportd.c
+++ b/utils/exportd/exportd.c
@@ -25,8 +25,6 @@
extern void my_svc_run(void);
-struct state_paths etab;
-
/* Number of mountd threads to start. Default is 1 and
* that's probably enough unless you need hundreds of
* clients to be able to mount at once. */
diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
index d586296796a9..6ba615d1443d 100644
--- a/utils/exportfs/exportfs.c
+++ b/utils/exportfs/exportfs.c
@@ -52,8 +52,6 @@ static void release_lockfile(void);
static const char *lockfile = EXP_LOCKFILE;
static int _lockfd = -1;
-struct state_paths etab;
-
/*
* If we aren't careful, changes made by exportfs can be lost
* when multiple exports process run at once:
diff --git a/utils/mountd/mountd.c b/utils/mountd/mountd.c
index 39e85fd53a87..bcf749fabbb3 100644
--- a/utils/mountd/mountd.c
+++ b/utils/mountd/mountd.c
@@ -43,9 +43,6 @@ int reverse_resolve = 0;
int manage_gids;
int use_ipaddr = -1;
-struct state_paths etab;
-struct state_paths rmtab;
-
/* PRC: a high-availability callout program can be specified with -H
* When this is done, the program will receive callouts whenever clients
* send mount or unmount requests -- the callout is not needed for 2.6 kernel */
diff --git a/utils/mountd/rmtab.c b/utils/mountd/rmtab.c
index c8962439ddd2..2da97615ca0f 100644
--- a/utils/mountd/rmtab.c
+++ b/utils/mountd/rmtab.c
@@ -28,8 +28,6 @@
extern int reverse_resolve;
-extern struct state_paths rmtab;
-
/* If new path is a link do not destroy it but place the
* file where the link points.
*/
--
2.31.1
Hi Neil,
> Since v4.17 the timestamp written to 'flush' is ignored,
> so there isn't much point choosing too precisely.
> For kernels since v4.3-rc3-13-g778620364ef5 it is safe
> to write 1 second beyond the current time.
> For earlier kernels, nothing is really safe (even the current
> behaviour), but writing one second beyond the current time isn't too bad
> in the unlikely case the people use a new nfs-utils on a 5 year old
> kernel.
> This remove a dependency for libnfs.a on 'etab' being declare,
> so svcgssd no longer needs to declare it.
Reviewed-by: Petr Vorel <[email protected]>
Kind regards,
Petr
Hi Neil,
> There are two global "struct stat_paths" structures: etab and rmtab.
> They are currently needed by some library code so any program which is
> linked with that library code needs to declare the structures even if it
> doesn't use the functionality. This is clumsy and error-prone.
> Instead: have the library declare the structure and put the definition
> in a header file. Now programs only need to know about these structures
> if they use the functionality.
> 'rmtab' is now declared in libnfs.a (rmtab.c). 'etab' is declared in
> export.a (xtab.c).
Reviewed-by: Petr Vorel <[email protected]>
Nice cleanup!
Kind regards,
Petr
On 5/17/21 12:45 AM, NeilBrown wrote:
>
> Some filesystems cannot be exported without an fsid or uuid.
> tmpfs is the main example.
>
> When mountd (or exportd) creates nfsv4 pseudo-root exports for the path
> leading down to an export point it exports each directory without any
> fsid or uuid. If one of these directories is on tmp, that will fail.
>
> The net result is that exporting a subdirectory of a tmpfs filesystem
> will not work over NFSv4 as the parents within the filesystem cannot be
> exported. It will either fail, or fall-back to NFSv3 (depending on the
> version of the mount.nfs program).
>
> To fix this we need to provide an fsid or uuid for these pseudo-root
> exports. This patch does that by creating an RFC-4122 V5 compatible
> UUID based on an arbitrary seed and the path to the export.
>
> To check if an export needs a uuid, text_export() is moved from exportfs
> to libexport.a, modified slightly and renamed to export_test().
>
> Signed-off-by: NeilBrown <[email protected]>
Committed... (tag: nfs-utils-2-5-4-rc4)
Thanks!
steved.
> ---
>
> This version contains Chuck's suggestion for improving the uuid, and
> general clean-up.
>
> support/export/cache.c | 3 ++-
> support/export/export.c | 29 +++++++++++++++++++++++++++++
> support/export/v4root.c | 23 ++++++++++++++++++++++-
> support/include/exportfs.h | 1 +
> utils/exportd/Makefile.am | 2 +-
> utils/exportfs/exportfs.c | 38 +++-----------------------------------
> utils/mountd/Makefile.am | 2 +-
> 7 files changed, 59 insertions(+), 39 deletions(-)
>
> diff --git a/support/export/cache.c b/support/export/cache.c
> index 3e4f53c0a32e..a5823e92e9f2 100644
> --- a/support/export/cache.c
> +++ b/support/export/cache.c
> @@ -981,7 +981,8 @@ static int dump_to_cache(int f, char *buf, int blen, char *domain,
> write_secinfo(&bp, &blen, exp, flag_mask);
> if (exp->e_uuid == NULL || different_fs) {
> char u[16];
> - if (uuid_by_path(path, 0, 16, u)) {
> + if ((exp->e_flags & flag_mask & NFSEXP_FSID) == 0 &&
> + uuid_by_path(path, 0, 16, u)) {
> qword_add(&bp, &blen, "uuid");
> qword_addhex(&bp, &blen, u, 16);
> }
> diff --git a/support/export/export.c b/support/export/export.c
> index c753f68e4d63..03390dfc1de8 100644
> --- a/support/export/export.c
> +++ b/support/export/export.c
> @@ -10,9 +10,11 @@
> #include <config.h>
> #endif
>
> +#include <unistd.h>
> #include <string.h>
> #include <sys/types.h>
> #include <sys/param.h>
> +#include <fcntl.h>
> #include <netinet/in.h>
> #include <limits.h>
> #include <stdlib.h>
> @@ -420,3 +422,30 @@ export_hash(char *str)
>
> return num % HASH_TABLE_SIZE;
> }
> +
> +int export_test(struct exportent *eep, int with_fsid)
> +{
> + char *path = eep->e_path;
> + int flags = eep->e_flags | (with_fsid ? NFSEXP_FSID : 0);
> + /* beside max path, buf size should take protocol str into account */
> + char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> + char *bp = buf;
> + int len = sizeof(buf);
> + int fd, n;
> +
> + n = snprintf(buf, len, "-test-client- ");
> + bp += n;
> + len -= n;
> + qword_add(&bp, &len, path);
> + if (len < 1)
> + return 0;
> + snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> + fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> + if (fd < 0)
> + return 0;
> + n = nfsd_path_write(fd, buf, strlen(buf));
> + close(fd);
> + if (n < 0)
> + return 0;
> + return 1;
> +}
> diff --git a/support/export/v4root.c b/support/export/v4root.c
> index 3654bd7c10c0..c12a7d8562b2 100644
> --- a/support/export/v4root.c
> +++ b/support/export/v4root.c
> @@ -20,6 +20,7 @@
>
> #include <unistd.h>
> #include <errno.h>
> +#include <uuid/uuid.h>
>
> #include "xlog.h"
> #include "exportfs.h"
> @@ -89,11 +90,31 @@ v4root_create(char *path, nfs_export *export)
> strncpy(eep.e_path, path, sizeof(eep.e_path)-1);
> if (strcmp(path, "/") != 0)
> eep.e_flags &= ~NFSEXP_FSID;
> +
> + if (strcmp(path, "/") != 0 &&
> + !export_test(&eep, 0)) {
> + /* Need a uuid - base it on path using a fixed seed that
> + * was generated randomly.
> + */
> + const char seed_s[] = "39c6b5c1-3f24-4f4e-977c-7fe6546b8a25";
> + uuid_t seed, uuid;
> + char uuid_s[UUID_STR_LEN];
> + unsigned int i, j;
> +
> + uuid_parse(seed_s, seed);
> + uuid_generate_sha1(uuid, seed, path, strlen(path));
> + uuid_unparse_upper(uuid, uuid_s);
> + /* strip hyhens */
> + for (i = j = 0; uuid_s[i]; i++)
> + if (uuid_s[i] != '-')
> + uuid_s[j++] = uuid_s[i];
> + eep.e_uuid = uuid_s;
> + }
> set_pseudofs_security(&eep);
> exp = export_create(&eep, 0);
> if (exp == NULL)
> return NULL;
> - xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
> + xlog(D_CALL, "v4root_create: path '%s' flags 0x%x",
> exp->m_export.e_path, exp->m_export.e_flags);
> return &exp->m_export;
> }
> diff --git a/support/include/exportfs.h b/support/include/exportfs.h
> index 81d137210862..7c1b74537186 100644
> --- a/support/include/exportfs.h
> +++ b/support/include/exportfs.h
> @@ -173,5 +173,6 @@ struct export_features {
> struct export_features *get_export_features(void);
> void fix_pseudoflavor_flags(struct exportent *ep);
> char *exportent_realpath(struct exportent *eep);
> +int export_test(struct exportent *eep, int with_fsid);
>
> #endif /* EXPORTFS_H */
> diff --git a/utils/exportd/Makefile.am b/utils/exportd/Makefile.am
> index eb521f15032d..c95bdee76d3f 100644
> --- a/utils/exportd/Makefile.am
> +++ b/utils/exportd/Makefile.am
> @@ -16,7 +16,7 @@ exportd_SOURCES = exportd.c
> exportd_LDADD = ../../support/export/libexport.a \
> ../../support/nfs/libnfs.la \
> ../../support/misc/libmisc.a \
> - $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD)
> + $(OPTLIBS) $(LIBBLKID) $(LIBPTHREAD) -luuid
>
> exportd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
> -I$(top_srcdir)/support/export
> diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
> index 25d757d8b4b4..bc76aaaf8714 100644
> --- a/utils/exportfs/exportfs.c
> +++ b/utils/exportfs/exportfs.c
> @@ -54,11 +54,6 @@ static int _lockfd = -1;
>
> struct state_paths etab;
>
> -static ssize_t exportfs_write(int fd, const char *buf, size_t len)
> -{
> - return nfsd_path_write(fd, buf, len);
> -}
> -
> /*
> * If we aren't careful, changes made by exportfs can be lost
> * when multiple exports process run at once:
> @@ -510,33 +505,6 @@ static int can_test(void)
> return 1;
> }
>
> -static int test_export(nfs_export *exp, int with_fsid)
> -{
> - char *path = exp->m_export.e_path;
> - int flags = exp->m_export.e_flags | (with_fsid ? NFSEXP_FSID : 0);
> - /* beside max path, buf size should take protocol str into account */
> - char buf[NFS_MAXPATHLEN+1+64] = { 0 };
> - char *bp = buf;
> - int len = sizeof(buf);
> - int fd, n;
> -
> - n = snprintf(buf, len, "-test-client- ");
> - bp += n;
> - len -= n;
> - qword_add(&bp, &len, path);
> - if (len < 1)
> - return 0;
> - snprintf(bp, len, " 3 %d 65534 65534 0\n", flags);
> - fd = open("/proc/net/rpc/nfsd.export/channel", O_WRONLY);
> - if (fd < 0)
> - return 0;
> - n = exportfs_write(fd, buf, strlen(buf));
> - close(fd);
> - if (n < 0)
> - return 0;
> - return 1;
> -}
> -
> static void
> validate_export(nfs_export *exp)
> {
> @@ -568,12 +536,12 @@ validate_export(nfs_export *exp)
>
> if ((exp->m_export.e_flags & NFSEXP_FSID) || exp->m_export.e_uuid ||
> fs_has_fsid) {
> - if ( !test_export(exp, 1)) {
> + if ( !export_test(&exp->m_export, 1)) {
> xlog(L_ERROR, "%s does not support NFS export", path);
> return;
> }
> - } else if ( !test_export(exp, 0)) {
> - if (test_export(exp, 1))
> + } else if ( !export_test(&exp->m_export, 0)) {
> + if (export_test(&exp->m_export, 1))
> xlog(L_ERROR, "%s requires fsid= for NFS export", path);
> else
> xlog(L_ERROR, "%s does not support NFS export", path);
> diff --git a/utils/mountd/Makefile.am b/utils/mountd/Makefile.am
> index 859f28ecd6f3..13b25c90f06e 100644
> --- a/utils/mountd/Makefile.am
> +++ b/utils/mountd/Makefile.am
> @@ -18,7 +18,7 @@ mountd_LDADD = ../../support/export/libexport.a \
> ../../support/nfs/libnfs.la \
> ../../support/misc/libmisc.a \
> $(OPTLIBS) \
> - $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) $(LIBTIRPC) \
> + $(LIBBSD) $(LIBWRAP) $(LIBNSL) $(LIBBLKID) -luuid $(LIBTIRPC) \
> $(LIBPTHREAD)
> mountd_CPPFLAGS = $(AM_CPPFLAGS) $(CPPFLAGS) \
> -I$(top_builddir)/support/include \
>
On 5/20/21 9:41 PM, NeilBrown wrote:
>
> There are two global "struct stat_paths" structures: etab and rmtab.
> They are currently needed by some library code so any program which is
> linked with that library code needs to declare the structures even if it
> doesn't use the functionality. This is clumsy and error-prone.
>
> Instead: have the library declare the structure and put the definition
> in a header file. Now programs only need to know about these structures
> if they use the functionality.
>
> 'rmtab' is now declared in libnfs.a (rmtab.c). 'etab' is declared in
> export.a (xtab.c).
>
> Signed-off-by: NeilBrown <[email protected]>
Committed... (tag: nfs-utils-2-5-4-rc4)
Nice work!! Thanks again!!!
steved.
> ---
> support/export/auth.c | 2 --
> support/export/xtab.c | 2 +-
> support/include/exportfs.h | 1 +
> support/include/nfslib.h | 1 +
> support/nfs/rmtab.c | 2 +-
> utils/exportd/exportd.c | 2 --
> utils/exportfs/exportfs.c | 2 --
> utils/mountd/mountd.c | 3 ---
> utils/mountd/rmtab.c | 2 --
> 9 files changed, 4 insertions(+), 13 deletions(-)
>
> diff --git a/support/export/auth.c b/support/export/auth.c
> index 17bdfc83748e..03ce4b8a0e1e 100644
> --- a/support/export/auth.c
> +++ b/support/export/auth.c
> @@ -41,8 +41,6 @@ static nfs_client my_client;
>
> extern int use_ipaddr;
>
> -extern struct state_paths etab;
> -
> /*
> void
> auth_init(void)
> diff --git a/support/export/xtab.c b/support/export/xtab.c
> index 00b25eaac07d..c888a80aa741 100644
> --- a/support/export/xtab.c
> +++ b/support/export/xtab.c
> @@ -27,7 +27,7 @@
> #include "misc.h"
>
> static char state_base_dirname[PATH_MAX] = NFS_STATEDIR;
> -extern struct state_paths etab;
> +struct state_paths etab;
>
> int v4root_needed;
> static void cond_rename(char *newfile, char *oldfile);
> diff --git a/support/include/exportfs.h b/support/include/exportfs.h
> index 7c1b74537186..9edf0d04732f 100644
> --- a/support/include/exportfs.h
> +++ b/support/include/exportfs.h
> @@ -145,6 +145,7 @@ nfs_export * export_create(struct exportent *, int canonical);
> void exportent_release(struct exportent *);
> void export_freeall(void);
>
> +extern struct state_paths etab;
> int xtab_export_read(void);
> int xtab_export_write(void);
>
> diff --git a/support/include/nfslib.h b/support/include/nfslib.h
> index 58eeb3382fcc..6faba71bf0cd 100644
> --- a/support/include/nfslib.h
> +++ b/support/include/nfslib.h
> @@ -106,6 +106,7 @@ void dupexportent(struct exportent *dst,
> struct exportent *src);
> int updateexportent(struct exportent *eep, char *options);
>
> +extern struct state_paths rmtab;
> int setrmtabent(char *type);
> struct rmtabent * getrmtabent(int log, long *pos);
> void putrmtabent(struct rmtabent *xep, long *pos);
> diff --git a/support/nfs/rmtab.c b/support/nfs/rmtab.c
> index 9f03167ddbe1..154b26fa3402 100644
> --- a/support/nfs/rmtab.c
> +++ b/support/nfs/rmtab.c
> @@ -33,7 +33,7 @@
>
> static FILE *rmfp = NULL;
>
> -extern struct state_paths rmtab;
> +struct state_paths rmtab;
>
> int
> setrmtabent(char *type)
> diff --git a/utils/exportd/exportd.c b/utils/exportd/exportd.c
> index f36f51d215b5..2dd12cb6015b 100644
> --- a/utils/exportd/exportd.c
> +++ b/utils/exportd/exportd.c
> @@ -25,8 +25,6 @@
>
> extern void my_svc_run(void);
>
> -struct state_paths etab;
> -
> /* Number of mountd threads to start. Default is 1 and
> * that's probably enough unless you need hundreds of
> * clients to be able to mount at once. */
> diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
> index d586296796a9..6ba615d1443d 100644
> --- a/utils/exportfs/exportfs.c
> +++ b/utils/exportfs/exportfs.c
> @@ -52,8 +52,6 @@ static void release_lockfile(void);
> static const char *lockfile = EXP_LOCKFILE;
> static int _lockfd = -1;
>
> -struct state_paths etab;
> -
> /*
> * If we aren't careful, changes made by exportfs can be lost
> * when multiple exports process run at once:
> diff --git a/utils/mountd/mountd.c b/utils/mountd/mountd.c
> index 39e85fd53a87..bcf749fabbb3 100644
> --- a/utils/mountd/mountd.c
> +++ b/utils/mountd/mountd.c
> @@ -43,9 +43,6 @@ int reverse_resolve = 0;
> int manage_gids;
> int use_ipaddr = -1;
>
> -struct state_paths etab;
> -struct state_paths rmtab;
> -
> /* PRC: a high-availability callout program can be specified with -H
> * When this is done, the program will receive callouts whenever clients
> * send mount or unmount requests -- the callout is not needed for 2.6 kernel */
> diff --git a/utils/mountd/rmtab.c b/utils/mountd/rmtab.c
> index c8962439ddd2..2da97615ca0f 100644
> --- a/utils/mountd/rmtab.c
> +++ b/utils/mountd/rmtab.c
> @@ -28,8 +28,6 @@
>
> extern int reverse_resolve;
>
> -extern struct state_paths rmtab;
> -
> /* If new path is a link do not destroy it but place the
> * file where the link points.
> */
>
On 5/20/21 9:40 PM, NeilBrown wrote:
>
> Since v4.17 the timestamp written to 'flush' is ignored,
> so there isn't much point choosing too precisely.
>
> For kernels since v4.3-rc3-13-g778620364ef5 it is safe
> to write 1 second beyond the current time.
>
> For earlier kernels, nothing is really safe (even the current
> behaviour), but writing one second beyond the current time isn't too bad
> in the unlikely case the people use a new nfs-utils on a 5 year old
> kernel.
>
> This remove a dependency for libnfs.a on 'etab' being declare,
> so svcgssd no longer needs to declare it.
>
> Signed-off-by: NeilBrown <[email protected]>
Committed... (tag: nfs-utils-2-5-4-rc4)
steved.
> ---
> support/export/auth.c | 2 +-
> support/include/nfslib.h | 2 +-
> support/nfs/cacheio.c | 17 ++++++++---------
> utils/exportfs/exportfs.c | 4 ++--
> utils/gssd/svcgssd.c | 1 -
> 5 files changed, 12 insertions(+), 14 deletions(-)
>
> diff --git a/support/export/auth.c b/support/export/auth.c
> index cea376300d01..17bdfc83748e 100644
> --- a/support/export/auth.c
> +++ b/support/export/auth.c
> @@ -80,7 +80,7 @@ check_useipaddr(void)
> use_ipaddr = 0;
>
> if (use_ipaddr != old_use_ipaddr)
> - cache_flush(1);
> + cache_flush();
> }
>
> unsigned int
> diff --git a/support/include/nfslib.h b/support/include/nfslib.h
> index 84d8270b330f..58eeb3382fcc 100644
> --- a/support/include/nfslib.h
> +++ b/support/include/nfslib.h
> @@ -132,7 +132,7 @@ int wildmat(char *text, char *pattern);
>
> int qword_get(char **bpp, char *dest, int bufsize);
> int qword_get_int(char **bpp, int *anint);
> -void cache_flush(int force);
> +void cache_flush(void);
> void qword_add(char **bpp, int *lp, char *str);
> void qword_addhex(char **bpp, int *lp, char *buf, int blen);
> void qword_addint(char **bpp, int *lp, int n);
> diff --git a/support/nfs/cacheio.c b/support/nfs/cacheio.c
> index 70ead94d64f0..73f4be4af9f9 100644
> --- a/support/nfs/cacheio.c
> +++ b/support/nfs/cacheio.c
> @@ -32,8 +32,6 @@
> #include <time.h>
> #include <errno.h>
>
> -extern struct state_paths etab;
> -
> void qword_add(char **bpp, int *lp, char *str)
> {
> char *bp = *bpp;
> @@ -213,7 +211,7 @@ int qword_get_uint(char **bpp, unsigned int *anint)
> */
>
> void
> -cache_flush(int force)
> +cache_flush(void)
> {
> struct stat stb;
> int c;
> @@ -234,12 +232,13 @@ cache_flush(int force)
> NULL
> };
> now = time(0);
> - if (force ||
> - stat(etab.statefn, &stb) != 0 ||
> - stb.st_mtime > now)
> - stb.st_mtime = time(0);
> -
> - sprintf(stime, "%" PRId64 "\n", (int64_t)stb.st_mtime);
> +
> + /* Since v4.16-rc2-3-g3b68e6ee3cbd the timestamp written is ignored.
> + * It is safest always to flush caches if there is any doubt.
> + * For earlier kernels, writing the next second from now is
> + * the best we can do.
> + */
> + sprintf(stime, "%" PRId64 "\n", (int64_t)now+1);
> for (c=0; cachelist[c]; c++) {
> int fd;
> sprintf(path, "/proc/net/rpc/%s/flush", cachelist[c]);
> diff --git a/utils/exportfs/exportfs.c b/utils/exportfs/exportfs.c
> index bc76aaaf8714..d586296796a9 100644
> --- a/utils/exportfs/exportfs.c
> +++ b/utils/exportfs/exportfs.c
> @@ -188,7 +188,7 @@ main(int argc, char **argv)
>
> if (optind == argc && ! f_all) {
> if (force_flush) {
> - cache_flush(1);
> + cache_flush();
> free_state_path_names(&etab);
> return 0;
> } else {
> @@ -235,7 +235,7 @@ main(int argc, char **argv)
> unexportfs(argv[i], f_verbose);
> }
> xtab_export_write();
> - cache_flush(force_flush);
> + cache_flush();
> free_state_path_names(&etab);
> export_freeall();
>
> diff --git a/utils/gssd/svcgssd.c b/utils/gssd/svcgssd.c
> index 3ab2100b66bb..881207b3e8a2 100644
> --- a/utils/gssd/svcgssd.c
> +++ b/utils/gssd/svcgssd.c
> @@ -67,7 +67,6 @@
> #include "misc.h"
> #include "svcgssd_krb5.h"
>
> -struct state_paths etab; /* from cacheio.c */
> static bool signal_received = false;
> static struct event_base *evbase = NULL;
> static int nullrpc_fd = -1;
>