In mlx5_ib_init(), we call memalloc_noio_{save,restore} in a parenthetic
fashion when enabled by the module parameter force_noio.
This in order to conditionally enable mlx5_ib to work aligned with
I/O devices. Any work queued later on work-queues created during
module initialization will inherit the PF_MEMALLOC_{NOIO,NOFS}
flag(s), due to commit ("workqueue: Inherit NOIO and NOFS alloc
flags").
We do this in order to enable ULPs using the RDMA stack and the
mlx5_ib driver to be used as a network block I/O device. This to
support a filesystem on top of a raw block device which uses said
ULP(s) and the RDMA stack as the network transport layer.
Under intense memory pressure, we get memory reclaims. Assume the
filesystem reclaims memory, goes to the raw block device, which calls
into the ULP in question, which calls the RDMA stack. Now, if regular
GFP_KERNEL allocations in ULP or the RDMA stack require reclaims to be
fulfilled, we end up in a circular dependency.
We break this circular dependency by:
1. Force all allocations in the ULP and the relevant RDMA stack to use
GFP_NOIO, by means of a parenthetic use of
memalloc_noio_{save,restore} on all relevant entry points.
2. Make sure work-queues inherits current->flags
wrt. PF_MEMALLOC_{NOIO,NOFS}, such that work executed on the
work-queue inherits the same flag(s).
Signed-off-by: HÃ¥kon Bugge <[email protected]>
---
drivers/infiniband/hw/mlx5/main.c | 22 ++++++++++++++++++----
1 file changed, 18 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2b557e642906..a424d518538ed 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -56,6 +56,10 @@ MODULE_AUTHOR("Eli Cohen <[email protected]>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver");
MODULE_LICENSE("Dual BSD/GPL");
+static bool mlx5_ib_force_noio;
+module_param_named(force_noio, mlx5_ib_force_noio, bool, 0444);
+MODULE_PARM_DESC(force_noio, "Force the use of GFP_NOIO (Y/N)");
+
struct mlx5_ib_event_work {
struct work_struct work;
union {
@@ -4489,16 +4493,23 @@ static struct auxiliary_driver mlx5r_driver = {
static int __init mlx5_ib_init(void)
{
+ unsigned int noio_flags;
int ret;
+ if (mlx5_ib_force_noio)
+ noio_flags = memalloc_noio_save();
+
xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL);
- if (!xlt_emergency_page)
- return -ENOMEM;
+ if (!xlt_emergency_page) {
+ ret = -ENOMEM;
+ goto out;
+ }
mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0);
if (!mlx5_ib_event_wq) {
free_page((unsigned long)xlt_emergency_page);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
ret = mlx5_ib_qp_event_init();
@@ -4515,7 +4526,7 @@ static int __init mlx5_ib_init(void)
ret = auxiliary_driver_register(&mlx5r_driver);
if (ret)
goto drv_err;
- return 0;
+ goto out;
drv_err:
auxiliary_driver_unregister(&mlx5r_mp_driver);
@@ -4526,6 +4537,9 @@ static int __init mlx5_ib_init(void)
qp_event_err:
destroy_workqueue(mlx5_ib_event_wq);
free_page((unsigned long)xlt_emergency_page);
+out:
+ if (mlx5_ib_force_noio)
+ memalloc_noio_restore(noio_flags);
return ret;
}
--
2.45.0