The percpu_ref to gate whether cxl_memdev_ioctl() is free to use the
driver context (@cxlm) to issue I/O is overkill, implemented incorrectly
(missing a device reference before accessing the percpu_ref), and the
complexities of shutting down a percpu_ref contributed to a bug in the
error unwind in cxl_mem_add_memdev() (missing put_device() to be fixed
separately).
Use an rwsem to explicitly synchronize the usage of cxlmd->cxlm, and add
the missing reference counting for cxlmd in cxl_memdev_open() and
cxl_memdev_release_file().
Fixes: b39cb1052a5c ("cxl/mem: Register CXL memX devices")
Reported-by: Jason Gunthorpe <[email protected]>
Signed-off-by: Dan Williams <[email protected]>
---
drivers/cxl/mem.c | 97 ++++++++++++++++++++++++++++-------------------------
1 file changed, 51 insertions(+), 46 deletions(-)
diff --git a/drivers/cxl/mem.c b/drivers/cxl/mem.c
index 30bf4f0f3c17..2cf620d201a6 100644
--- a/drivers/cxl/mem.c
+++ b/drivers/cxl/mem.c
@@ -96,21 +96,18 @@ struct mbox_cmd {
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
* @cxlm: pointer to the parent device driver data
- * @ops_active: active user of @cxlm in ops handlers
- * @ops_dead: completion when all @cxlm ops users have exited
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
struct cxl_mem *cxlm;
- struct percpu_ref ops_active;
- struct completion ops_dead;
int id;
};
static int cxl_mem_major;
static DEFINE_IDA(cxl_memdev_ida);
+static DECLARE_RWSEM(cxl_memdev_rwsem);
static struct dentry *cxl_debugfs;
static bool cxl_raw_allow_all;
@@ -776,26 +773,43 @@ static long __cxl_memdev_ioctl(struct cxl_memdev *cxlmd, unsigned int cmd,
static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct cxl_memdev *cxlmd;
- struct inode *inode;
- int rc = -ENOTTY;
+ struct cxl_memdev *cxlmd = file->private_data;
+ int rc = -ENXIO;
- inode = file_inode(file);
- cxlmd = container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+ down_read(&cxl_memdev_rwsem);
+ if (cxlmd->cxlm)
+ rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+ up_read(&cxl_memdev_rwsem);
- if (!percpu_ref_tryget_live(&cxlmd->ops_active))
- return -ENXIO;
+ return rc;
+}
- rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
+static int cxl_memdev_open(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
- percpu_ref_put(&cxlmd->ops_active);
+ get_device(&cxlmd->dev);
+ file->private_data = cxlmd;
- return rc;
+ return 0;
+}
+
+static int cxl_memdev_release_file(struct inode *inode, struct file *file)
+{
+ struct cxl_memdev *cxlmd =
+ container_of(inode->i_cdev, typeof(*cxlmd), cdev);
+
+ put_device(&cxlmd->dev);
+
+ return 0;
}
static const struct file_operations cxl_memdev_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = cxl_memdev_ioctl,
+ .open = cxl_memdev_open,
+ .release = cxl_memdev_release_file,
.compat_ioctl = compat_ptr_ioctl,
.llseek = noop_llseek,
};
@@ -1049,7 +1063,6 @@ static void cxl_memdev_release(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- percpu_ref_exit(&cxlmd->ops_active);
ida_free(&cxl_memdev_ida, cxlmd->id);
kfree(cxlmd);
}
@@ -1150,24 +1163,28 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-static void cxlmdev_unregister(void *_cxlmd)
+static void cxl_memdev_activate(struct cxl_memdev *cxlmd, struct cxl_mem *cxlm)
{
- struct cxl_memdev *cxlmd = _cxlmd;
- struct device *dev = &cxlmd->dev;
+ cxlmd->cxlm = cxlm;
+ down_write(&cxl_memdev_rwsem);
+ up_write(&cxl_memdev_rwsem);
+}
- percpu_ref_kill(&cxlmd->ops_active);
- cdev_device_del(&cxlmd->cdev, dev);
- wait_for_completion(&cxlmd->ops_dead);
+static void cxl_memdev_shutdown(struct cxl_memdev *cxlmd)
+{
+ down_write(&cxl_memdev_rwsem);
cxlmd->cxlm = NULL;
- put_device(dev);
+ up_write(&cxl_memdev_rwsem);
}
-static void cxlmdev_ops_active_release(struct percpu_ref *ref)
+static void cxl_memdev_unregister(void *_cxlmd)
{
- struct cxl_memdev *cxlmd =
- container_of(ref, typeof(*cxlmd), ops_active);
+ struct cxl_memdev *cxlmd = _cxlmd;
+ struct device *dev = &cxlmd->dev;
- complete(&cxlmd->ops_dead);
+ cdev_device_del(&cxlmd->cdev, dev);
+ cxl_memdev_shutdown(cxlmd);
+ put_device(dev);
}
static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
@@ -1181,17 +1198,6 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cxlmd = kzalloc(sizeof(*cxlmd), GFP_KERNEL);
if (!cxlmd)
return -ENOMEM;
- init_completion(&cxlmd->ops_dead);
-
- /*
- * @cxlm is deallocated when the driver unbinds so operations
- * that are using it need to hold a live reference.
- */
- cxlmd->cxlm = cxlm;
- rc = percpu_ref_init(&cxlmd->ops_active, cxlmdev_ops_active_release, 0,
- GFP_KERNEL);
- if (rc)
- goto err_ref;
rc = ida_alloc_range(&cxl_memdev_ida, 0, CXL_MEM_MAX_DEVS, GFP_KERNEL);
if (rc < 0)
@@ -1209,23 +1215,22 @@ static int cxl_mem_add_memdev(struct cxl_mem *cxlm)
cdev = &cxlmd->cdev;
cdev_init(cdev, &cxl_memdev_fops);
+ cxl_memdev_activate(cxlmd, cxlm);
rc = cdev_device_add(cdev, dev);
if (rc)
goto err_add;
- return devm_add_action_or_reset(dev->parent, cxlmdev_unregister, cxlmd);
+ return devm_add_action_or_reset(&pdev->dev, cxl_memdev_unregister,
+ cxlmd);
err_add:
- ida_free(&cxl_memdev_ida, cxlmd->id);
-err_id:
/*
- * Theoretically userspace could have already entered the fops,
- * so flush ops_active.
+ * The cdev was briefly live, shutdown any ioctl operations that
+ * saw that state.
*/
- percpu_ref_kill(&cxlmd->ops_active);
- wait_for_completion(&cxlmd->ops_dead);
- percpu_ref_exit(&cxlmd->ops_active);
-err_ref:
+ cxl_memdev_shutdown(cxlmd);
+ ida_free(&cxl_memdev_ida, cxlmd->id);
+err_id:
kfree(cxlmd);
return rc;
On Tue, Mar 30, 2021 at 04:36:37PM -0700, Dan Williams wrote:
> -static void cxlmdev_unregister(void *_cxlmd)
> +static void cxl_memdev_activate(struct cxl_memdev *cxlmd, struct cxl_mem *cxlm)
> {
> - struct cxl_memdev *cxlmd = _cxlmd;
> - struct device *dev = &cxlmd->dev;
> + cxlmd->cxlm = cxlm;
> + down_write(&cxl_memdev_rwsem);
> + up_write(&cxl_memdev_rwsem);
> +}
No reason not to put the assignment inside the lock. Though using the
lock at all is overkill as the pointer hasn't left the local stack
frame at this point.
> err_add:
> - ida_free(&cxl_memdev_ida, cxlmd->id);
> -err_id:
> /*
> - * Theoretically userspace could have already entered the fops,
> - * so flush ops_active.
> + * The cdev was briefly live, shutdown any ioctl operations that
> + * saw that state.
> */
Wow it is really subtle that cdev_device_add has this tiny hole where
it can fail but have already allowed open() :<
Other than the lock it looks OK
Reviewed-by: Jason Gunthorpe <[email protected]>
Jason
On Wed, Mar 31, 2021 at 6:07 AM Jason Gunthorpe <[email protected]> wrote:
>
> On Tue, Mar 30, 2021 at 04:36:37PM -0700, Dan Williams wrote:
> > -static void cxlmdev_unregister(void *_cxlmd)
> > +static void cxl_memdev_activate(struct cxl_memdev *cxlmd, struct cxl_mem *cxlm)
> > {
> > - struct cxl_memdev *cxlmd = _cxlmd;
> > - struct device *dev = &cxlmd->dev;
> > + cxlmd->cxlm = cxlm;
> > + down_write(&cxl_memdev_rwsem);
> > + up_write(&cxl_memdev_rwsem);
> > +}
>
> No reason not to put the assignment inside the lock. Though using the
> lock at all is overkill as the pointer hasn't left the local stack
> frame at this point.
Right, I was considering just leaving it as a bare pointer assignment,
in fact that must be sufficient as publishing the cdev needs to depend
on all cdev init having completed. So if this write somehow leaks into
cdev_device_add() there are much larger problems afoot.
>
> > err_add:
> > - ida_free(&cxl_memdev_ida, cxlmd->id);
> > -err_id:
> > /*
> > - * Theoretically userspace could have already entered the fops,
> > - * so flush ops_active.
> > + * The cdev was briefly live, shutdown any ioctl operations that
> > + * saw that state.
> > */
>
> Wow it is really subtle that cdev_device_add has this tiny hole where
> it can fail but have already allowed open() :<
Yes, this was something I wanted to address in the cdev api proposal
integrating the debugfs fops proxy / reference counting aproach. I
want a cdev api that does not allow open until after the associated
device has registered and fired the KOBJ_ADD event.
>
> Other than the lock it looks OK
>
> Reviewed-by: Jason Gunthorpe <[email protected]>
Thanks, Jason.