The binary content of nvmem devices is available to the user so in the
easiest cases, finding the content of a cell is rather easy as it is
just a matter of looking at a known and fixed offset. However, nvmem
layouts have been recently introduced to cope with more advanced
situations, where the offset and size of the cells is not known in
advance or is dynamic. When using layouts, more advanced parsers are
used by the kernel in order to give direct access to the content of each
cell, regardless of its position/size in the underlying
device. Unfortunately, these information are not accessible by users,
unless by fully re-implementing the parser logic in userland.
Let's expose the cells and their content through sysfs to avoid these
situations. Of course the relevant NVMEM sysfs Kconfig option must be
enabled for this support to be available.
Not all nvmem devices expose cells. Indeed, the .bin_attrs attribute
group member will be filled at runtime only when relevant and will
remain empty otherwise. In this case, as the cells attribute group will
be empty, it will not lead to any additional folder/file creation.
Exposed cells are read-only. There is, in practice, everything in the
core to support a write path, but as I don't see any need for that, I
prefer to keep the interface simple (and probably safer). The interface
is documented as being in the "testing" state which means we can later
add a write attribute if though relevant.
Signed-off-by: Miquel Raynal <[email protected]>
Tested-by: Rafał Miłecki <[email protected]>
Tested-by: Chen-Yu Tsai <[email protected]>
---
drivers/nvmem/core.c | 131 +++++++++++++++++++++++++++++++++++++-
drivers/nvmem/internals.h | 1 +
2 files changed, 131 insertions(+), 1 deletion(-)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6c6b0bac24f5..9ac74a6e38a0 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -299,6 +299,43 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
return nvmem_bin_attr_get_umode(nvmem);
}
+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
+ const char *id, int index);
+
+static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t pos, size_t count)
+{
+ struct nvmem_cell_entry *entry;
+ struct nvmem_cell *cell = NULL;
+ size_t cell_sz, read_len;
+ void *content;
+
+ entry = attr->private;
+ cell = nvmem_create_cell(entry, entry->name, 0);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ if (!cell)
+ return -EINVAL;
+
+ content = nvmem_cell_read(cell, &cell_sz);
+ if (IS_ERR(content)) {
+ read_len = PTR_ERR(content);
+ goto destroy_cell;
+ }
+
+ read_len = min_t(unsigned int, cell_sz - pos, count);
+ memcpy(buf, content + pos, read_len);
+ kfree(content);
+
+destroy_cell:
+ kfree_const(cell->id);
+ kfree(cell);
+
+ return read_len;
+}
+
/* default read/write permissions */
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
@@ -320,11 +357,21 @@ static const struct attribute_group nvmem_bin_group = {
.is_bin_visible = nvmem_bin_attr_is_visible,
};
+/* Cell attributes will be dynamically allocated */
+static struct attribute_group nvmem_cells_group = {
+ .name = "cells",
+};
+
static const struct attribute_group *nvmem_dev_groups[] = {
&nvmem_bin_group,
NULL,
};
+static const struct attribute_group *nvmem_cells_groups[] = {
+ &nvmem_cells_group,
+ NULL,
+};
+
static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
.attr = {
.name = "eeprom",
@@ -380,6 +427,68 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
}
+static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
+{
+ struct bin_attribute **cells_attrs, *attrs;
+ struct nvmem_cell_entry *entry;
+ unsigned int ncells = 0, i = 0;
+ int ret = 0;
+
+ mutex_lock(&nvmem_mutex);
+
+ if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
+ nvmem_cells_group.bin_attrs = NULL;
+ goto unlock_mutex;
+ }
+
+ /* Allocate an array of attributes with a sentinel */
+ ncells = list_count_nodes(&nvmem->cells);
+ cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
+ sizeof(struct bin_attribute *), GFP_KERNEL);
+ if (!cells_attrs) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
+ if (!attrs) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ /* Initialize each attribute to take the name and size of the cell */
+ list_for_each_entry(entry, &nvmem->cells, node) {
+ sysfs_bin_attr_init(&attrs[i]);
+ attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
+ "%s@%x", entry->name,
+ entry->offset);
+ attrs[i].attr.mode = 0444;
+ attrs[i].size = entry->bytes;
+ attrs[i].read = &nvmem_cell_attr_read;
+ attrs[i].private = entry;
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
+
+ cells_attrs[i] = &attrs[i];
+ i++;
+ }
+
+ nvmem_cells_group.bin_attrs = cells_attrs;
+
+ ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
+ if (ret)
+ goto unlock_mutex;
+
+ nvmem->sysfs_cells_populated = true;
+
+unlock_mutex:
+ mutex_unlock(&nvmem_mutex);
+
+ return ret;
+}
+
#else /* CONFIG_NVMEM_SYSFS */
static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
@@ -744,6 +853,7 @@ static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem)
int nvmem_layout_register(struct nvmem_layout *layout)
{
struct nvmem_device *nvmem = dev_get_platdata(layout->dev);
+ int ret;
if (!layout->add_cells)
return -EINVAL;
@@ -752,7 +862,19 @@ int nvmem_layout_register(struct nvmem_layout *layout)
nvmem->layout = layout;
/* Populate the cells */
- return nvmem->layout->add_cells(&nvmem->dev, nvmem, nvmem->layout);
+ ret = nvmem->layout->add_cells(&nvmem->dev, nvmem, nvmem->layout);
+ if (ret)
+ return ret;
+
+#ifdef CONFIG_NVMEM_SYSFS
+ ret = nvmem_populate_sysfs_cells(nvmem);
+ if (ret) {
+ nvmem_device_remove_all_cells(nvmem);
+ return ret;
+ }
+#endif
+
+ return 0;
}
EXPORT_SYMBOL_GPL(nvmem_layout_register);
@@ -910,11 +1032,18 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_destroy_layout;
+#ifdef CONFIG_NVMEM_SYSFS
+ rval = nvmem_populate_sysfs_cells(nvmem);
+ if (rval)
+ goto err_remove_dev;
+#endif
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
+err_remove_dev:
+ device_del(&nvmem->dev);
err_destroy_layout:
nvmem_destroy_layout(nvmem);
err_remove_cells:
diff --git a/drivers/nvmem/internals.h b/drivers/nvmem/internals.h
index 10a317d46fb6..bd24fc915298 100644
--- a/drivers/nvmem/internals.h
+++ b/drivers/nvmem/internals.h
@@ -31,6 +31,7 @@ struct nvmem_device {
struct device *layout_dev;
struct nvmem_layout *layout;
void *priv;
+ bool sysfs_cells_populated;
};
#if IS_ENABLED(CONFIG_OF)
--
2.34.1
On 05/10/2023 16:59, Miquel Raynal wrote:
> , int index);
> +
> +static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj,
> + struct bin_attribute *attr, char *buf,
> + loff_t pos, size_t count)
> +{
> + struct nvmem_cell_entry *entry;
> + struct nvmem_cell *cell = NULL;
> + size_t cell_sz, read_len;
> + void *content;
> +
> + entry = attr->private;
> + cell = nvmem_create_cell(entry, entry->name, 0);
> + if (IS_ERR(cell))
> + return PTR_ERR(cell);
> +
> + if (!cell)
> + return -EINVAL;
> +
> + content = nvmem_cell_read(cell, &cell_sz);
> + if (IS_ERR(content)) {
> + read_len = PTR_ERR(content);
> + goto destroy_cell;
> + }
> +
> + read_len = min_t(unsigned int, cell_sz - pos, count);
> + memcpy(buf, content + pos, read_len);
> + kfree(content);
> +
> +destroy_cell:
> + kfree_const(cell->id);
> + kfree(cell);
> +
> + return read_len;
> +}
> +
> /* default read/write permissions */
> static struct bin_attribute bin_attr_rw_nvmem = {
> .attr = {
> @@ -320,11 +357,21 @@ static const struct attribute_group nvmem_bin_group = {
> .is_bin_visible = nvmem_bin_attr_is_visible,
> };
>
> +/* Cell attributes will be dynamically allocated */
> +static struct attribute_group nvmem_cells_group = {
> + .name = "cells",
> +};
> +
> static const struct attribute_group *nvmem_dev_groups[] = {
> &nvmem_bin_group,
> NULL,
> };
>
> +static const struct attribute_group *nvmem_cells_groups[] = {
> + &nvmem_cells_group,
> + NULL,
> +};
> +
> static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
> .attr = {
> .name = "eeprom",
> @@ -380,6 +427,68 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
> device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
> }
>
> +static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem)
> +{
> + struct bin_attribute **cells_attrs, *attrs;
> + struct nvmem_cell_entry *entry;
> + unsigned int ncells = 0, i = 0;
> + int ret = 0;
> +
> + mutex_lock(&nvmem_mutex);
> +
> + if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
> + nvmem_cells_group.bin_attrs = NULL;
> + goto unlock_mutex;
> + }
> +
> + /* Allocate an array of attributes with a sentinel */
> + ncells = list_count_nodes(&nvmem->cells);
> + cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
> + sizeof(struct bin_attribute *), GFP_KERNEL);
> + if (!cells_attrs) {
> + ret = -ENOMEM;
> + goto unlock_mutex;
> + }
> +
> + attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
> + if (!attrs) {
> + ret = -ENOMEM;
> + goto unlock_mutex;
> + }
> +
> + /* Initialize each attribute to take the name and size of the cell */
> + list_for_each_entry(entry, &nvmem->cells, node) {
> + sysfs_bin_attr_init(&attrs[i]);
> + attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
> + "%s@%x", entry->name,
> + entry->offset);
> + attrs[i].attr.mode = 0444;
> + attrs[i].size = entry->bytes;
> + attrs[i].read = &nvmem_cell_attr_read;
> + attrs[i].private = entry;
> + if (!attrs[i].attr.name) {
> + ret = -ENOMEM;
> + goto unlock_mutex;
> + }
> +
> + cells_attrs[i] = &attrs[i];
> + i++;
> + }
> +
> + nvmem_cells_group.bin_attrs = cells_attrs;
> +
> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> + if (ret)
> + goto unlock_mutex;
This is going to create groups after the nvmem device is added, isn't
this going to be problem with user space notifications?
--srini
> +
> + nvmem->sysfs_cells_populated = true;
> +
> +unlock_mutex:
> + mutex_unlock(&nvmem_mutex);
> +
> + return ret;
Hi Srinivas,
> > +{
> > + struct bin_attribute **cells_attrs, *attrs;
> > + struct nvmem_cell_entry *entry;
> > + unsigned int ncells = 0, i = 0;
> > + int ret = 0;
> > +
> > + mutex_lock(&nvmem_mutex);
> > +
> > + if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) {
> > + nvmem_cells_group.bin_attrs = NULL;
> > + goto unlock_mutex;
> > + }
> > +
> > + /* Allocate an array of attributes with a sentinel */
> > + ncells = list_count_nodes(&nvmem->cells);
> > + cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1,
> > + sizeof(struct bin_attribute *), GFP_KERNEL);
> > + if (!cells_attrs) {
> > + ret = -ENOMEM;
> > + goto unlock_mutex;
> > + }
> > +
> > + attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL);
> > + if (!attrs) {
> > + ret = -ENOMEM;
> > + goto unlock_mutex;
> > + }
> > +
> > + /* Initialize each attribute to take the name and size of the cell */
> > + list_for_each_entry(entry, &nvmem->cells, node) {
> > + sysfs_bin_attr_init(&attrs[i]);
> > + attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL,
> > + "%s@%x", entry->name,
> > + entry->offset);
> > + attrs[i].attr.mode = 0444;
> > + attrs[i].size = entry->bytes;
> > + attrs[i].read = &nvmem_cell_attr_read;
> > + attrs[i].private = entry;
> > + if (!attrs[i].attr.name) {
> > + ret = -ENOMEM;
> > + goto unlock_mutex;
> > + }
> > +
> > + cells_attrs[i] = &attrs[i];
> > + i++;
> > + }
> > +
> > + nvmem_cells_group.bin_attrs = cells_attrs;
> > +
> > + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> > + if (ret)
> > + goto unlock_mutex;
>
> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
Greg said it was not. I hope I understood correctly :-)
And anyway, cells have never been available to userspace, so there is
nothing userspace might expect yet?
Thanks,
Miquèl
On 11/10/2023 08:15, Miquel Raynal wrote:
>>> +
>>> + nvmem_cells_group.bin_attrs = cells_attrs;
>>> +
>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
>>> + if (ret)
>>> + goto unlock_mutex;
>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> Greg said it was not. I hope I understood correctly ????
>
> And anyway, cells have never been available to userspace, so there is
> nothing userspace might expect yet?
I agree, but once we add sysfs uapi then this is going to change.
--srini
>
> Thanks,
> Miquèl
Hi Srinivas,
[email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
> On 11/10/2023 08:15, Miquel Raynal wrote:
> >>> +
> >>> + nvmem_cells_group.bin_attrs = cells_attrs;
> >>> +
> >>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> >>> + if (ret)
> >>> + goto unlock_mutex;
> >> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> > Greg said it was not. I hope I understood correctly ????
> >
> > And anyway, cells have never been available to userspace, so there is
> > nothing userspace might expect yet?
> I agree, but once we add sysfs uapi then this is going to change.
Can you elaborate? I'm not sure I follow you here. Is there still a
problem you fear or you think it's okay?
Thanks,
Miquèl
On 11/10/2023 09:33, Miquel Raynal wrote:
> Hi Srinivas,
>
> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
>
>> On 11/10/2023 08:15, Miquel Raynal wrote:
>>>>> +
>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
>>>>> +
>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
>>>>> + if (ret)
>>>>> + goto unlock_mutex;
>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
>>> Greg said it was not. I hope I understood correctly ????
>>>
>>> And anyway, cells have never been available to userspace, so there is
>>> nothing userspace might expect yet?
>> I agree, but once we add sysfs uapi then this is going to change.
>
> Can you elaborate? I'm not sure I follow you here. Is there still a
> problem you fear or you think it's okay?
>
Now that we add cells to sysfs.
AFAIU, By the time the userspace sees the udev event from this device we
might not have cells populated.
--srini
> Thanks,
> Miquèl
Hi Srinivas,
[email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
> On 11/10/2023 09:33, Miquel Raynal wrote:
> > Hi Srinivas,
> >
> > [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
> >
> >> On 11/10/2023 08:15, Miquel Raynal wrote:
> >>>>> +
> >>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
> >>>>> +
> >>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> >>>>> + if (ret)
> >>>>> + goto unlock_mutex;
> >>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> >>> Greg said it was not. I hope I understood correctly ????
> >>>
> >>> And anyway, cells have never been available to userspace, so there is
> >>> nothing userspace might expect yet?
> >> I agree, but once we add sysfs uapi then this is going to change.
> >
> > Can you elaborate? I'm not sure I follow you here. Is there still a
> > problem you fear or you think it's okay?
> >
> Now that we add cells to sysfs.
> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
Yes, but why would this be a problem?
Thanks,
Miquèl
On 11/10/2023 09:58, Miquel Raynal wrote:
> Hi Srinivas,
>
> [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
>
>> On 11/10/2023 09:33, Miquel Raynal wrote:
>>> Hi Srinivas,
>>>
>>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
>>>
>>>> On 11/10/2023 08:15, Miquel Raynal wrote:
>>>>>>> +
>>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
>>>>>>> +
>>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
>>>>>>> + if (ret)
>>>>>>> + goto unlock_mutex;
>>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
>>>>> Greg said it was not. I hope I understood correctly ????
>>>>>
>>>>> And anyway, cells have never been available to userspace, so there is
>>>>> nothing userspace might expect yet?
>>>> I agree, but once we add sysfs uapi then this is going to change.
>>>
>>> Can you elaborate? I'm not sure I follow you here. Is there still a
>>> problem you fear or you think it's okay?
>>>
>> Now that we add cells to sysfs.
>> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
>
> Yes, but why would this be a problem?
>
It will be problem if the userspace is using things like libudev to act
on these events. There seems to be some caching of attributes in udev
during event more info
http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
--srini
> Thanks,
> Miquèl
Hi Srinivas,
[email protected] wrote on Wed, 11 Oct 2023 10:26:43 +0100:
> On 11/10/2023 09:58, Miquel Raynal wrote:
> > Hi Srinivas,
> >
> > [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
> >
> >> On 11/10/2023 09:33, Miquel Raynal wrote:
> >>> Hi Srinivas,
> >>>
> >>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
> >>> >>>> On 11/10/2023 08:15, Miquel Raynal wrote:
> >>>>>>> +
> >>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
> >>>>>>> +
> >>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> >>>>>>> + if (ret)
> >>>>>>> + goto unlock_mutex;
> >>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> >>>>> Greg said it was not. I hope I understood correctly ????
> >>>>>
> >>>>> And anyway, cells have never been available to userspace, so there is
> >>>>> nothing userspace might expect yet?
> >>>> I agree, but once we add sysfs uapi then this is going to change.
> >>>
> >>> Can you elaborate? I'm not sure I follow you here. Is there still a
> >>> problem you fear or you think it's okay?
> >>> >> Now that we add cells to sysfs.
> >> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
> >
> > Yes, but why would this be a problem?
> >
> It will be problem if the userspace is using things like libudev to act on these events. There seems to be some caching of attributes in udev during event more info http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
I am already using these attributes, right? The problem here is that we
always attach cells sysfs attributes to the nvmem device, but in some
cases (when using layout devices/drivers) the probe of these devices
will happen after the main nvmem device has been announced to userspace
and thus these attributes might not be populated yet (but Greg said it
was "supported" and I assumed it was fine).
So what is your idea here to overcome this?
Thanks,
Miquèl
Hi Miquel,
On 11/10/2023 10:44, Miquel Raynal wrote:
> Hi Srinivas,
>
> [email protected] wrote on Wed, 11 Oct 2023 10:26:43 +0100:
>
>> On 11/10/2023 09:58, Miquel Raynal wrote:
>>> Hi Srinivas,
>>>
>>> [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
>>>
>>>> On 11/10/2023 09:33, Miquel Raynal wrote:
>>>>> Hi Srinivas,
>>>>>
>>>>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
>>>>> >>>> On 11/10/2023 08:15, Miquel Raynal wrote:
>>>>>>>>> +
>>>>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
>>>>>>>>> +
>>>>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
>>>>>>>>> + if (ret)
>>>>>>>>> + goto unlock_mutex;
>>>>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
>>>>>>> Greg said it was not. I hope I understood correctly ????
>>>>>>>
>>>>>>> And anyway, cells have never been available to userspace, so there is
>>>>>>> nothing userspace might expect yet?
>>>>>> I agree, but once we add sysfs uapi then this is going to change.
>>>>>
>>>>> Can you elaborate? I'm not sure I follow you here. Is there still a
>>>>> problem you fear or you think it's okay?
>>>>> >> Now that we add cells to sysfs.
>>>> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
>>>
>>> Yes, but why would this be a problem?
>>>
>> It will be problem if the userspace is using things like libudev to act on these events. There seems to be some caching of attributes in udev during event more info http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
>
> I am already using these attributes, right? The problem here is that we
> always attach cells sysfs attributes to the nvmem device, but in some
> cases (when using layout devices/drivers) the probe of these devices
> will happen after the main nvmem device has been announced to userspace
> and thus these attributes might not be populated yet (but Greg said it
> was "supported" and I assumed it was fine).
> > So what is your idea here to overcome this?
Ideally we should have all the cells definitions ready by the time nvmem
is registered.
--srini
>
> Thanks,
> Miquèl
Hi Srinivas,
[email protected] wrote on Wed, 11 Oct 2023 11:02:16 +0100:
> Hi Miquel,
>
> On 11/10/2023 10:44, Miquel Raynal wrote:
> > Hi Srinivas,
> >
> > [email protected] wrote on Wed, 11 Oct 2023 10:26:43 +0100:
> >
> >> On 11/10/2023 09:58, Miquel Raynal wrote:
> >>> Hi Srinivas,
> >>>
> >>> [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
> >>> >>>> On 11/10/2023 09:33, Miquel Raynal wrote:
> >>>>> Hi Srinivas,
> >>>>>
> >>>>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
> >>>>> >>>> On 11/10/2023 08:15, Miquel Raynal wrote:
> >>>>>>>>> +
> >>>>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
> >>>>>>>>> +
> >>>>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> >>>>>>>>> + if (ret)
> >>>>>>>>> + goto unlock_mutex;
> >>>>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> >>>>>>> Greg said it was not. I hope I understood correctly ????
> >>>>>>>
> >>>>>>> And anyway, cells have never been available to userspace, so there is
> >>>>>>> nothing userspace might expect yet?
> >>>>>> I agree, but once we add sysfs uapi then this is going to change.
> >>>>>
> >>>>> Can you elaborate? I'm not sure I follow you here. Is there still a
> >>>>> problem you fear or you think it's okay?
> >>>>> >> Now that we add cells to sysfs.
> >>>> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
> >>>
> >>> Yes, but why would this be a problem?
> >>> >> It will be problem if the userspace is using things like libudev to act on these events. There seems to be some caching of attributes in udev during event more info http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
> >
> > I am already using these attributes, right? The problem here is that we
> > always attach cells sysfs attributes to the nvmem device, but in some
> > cases (when using layout devices/drivers) the probe of these devices
> > will happen after the main nvmem device has been announced to userspace
> > and thus these attributes might not be populated yet (but Greg said it
> > was "supported" and I assumed it was fine).
> > > So what is your idea here to overcome this?
>
> Ideally we should have all the cells definitions ready by the time nvmem is registered.
I no longer think what you describe can happen because even though the
rootfs might be mounted, the daemons will only be 'started' once the
kernel is done starting and starts the init process, which means all
the devices have probed and all the cells have been registered as well.
Thanks,
Miquèl
On 11/10/2023 12:09, Miquel Raynal wrote:
> Hi Srinivas,
>
> [email protected] wrote on Wed, 11 Oct 2023 11:02:16 +0100:
>
>> Hi Miquel,
>>
>> On 11/10/2023 10:44, Miquel Raynal wrote:
>>> Hi Srinivas,
>>>
>>> [email protected] wrote on Wed, 11 Oct 2023 10:26:43 +0100:
>>>
>>>> On 11/10/2023 09:58, Miquel Raynal wrote:
>>>>> Hi Srinivas,
>>>>>
>>>>> [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
>>>>> >>>> On 11/10/2023 09:33, Miquel Raynal wrote:
>>>>>>> Hi Srinivas,
>>>>>>>
>>>>>>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
>>>>>>> >>>> On 11/10/2023 08:15, Miquel Raynal wrote:
>>>>>>>>>>> +
>>>>>>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
>>>>>>>>>>> +
>>>>>>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
>>>>>>>>>>> + if (ret)
>>>>>>>>>>> + goto unlock_mutex;
>>>>>>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
>>>>>>>>> Greg said it was not. I hope I understood correctly ????
>>>>>>>>>
>>>>>>>>> And anyway, cells have never been available to userspace, so there is
>>>>>>>>> nothing userspace might expect yet?
>>>>>>>> I agree, but once we add sysfs uapi then this is going to change.
>>>>>>>
>>>>>>> Can you elaborate? I'm not sure I follow you here. Is there still a
>>>>>>> problem you fear or you think it's okay?
>>>>>>> >> Now that we add cells to sysfs.
>>>>>> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
>>>>>
>>>>> Yes, but why would this be a problem?
>>>>> >> It will be problem if the userspace is using things like libudev to act on these events. There seems to be some caching of attributes in udev during event more info http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
>>>
>>> I am already using these attributes, right? The problem here is that we
>>> always attach cells sysfs attributes to the nvmem device, but in some
>>> cases (when using layout devices/drivers) the probe of these devices
>>> will happen after the main nvmem device has been announced to userspace
>>> and thus these attributes might not be populated yet (but Greg said it
>>> was "supported" and I assumed it was fine).
>>> > So what is your idea here to overcome this?
>>
>> Ideally we should have all the cells definitions ready by the time nvmem is registered.
>
> I no longer think what you describe can happen because even though the
> rootfs might be mounted, the daemons will only be 'started' once the
> kernel is done starting and starts the init process, which means all
> the devices have probed and all the cells have been registered as well.
I think you forgot about modules in the above flow.
--srini
>
> Thanks,
> Miquèl
Hi Srinivas,
[email protected] wrote on Wed, 11 Oct 2023 14:56:02 +0100:
> On 11/10/2023 12:09, Miquel Raynal wrote:
> > Hi Srinivas,
> >
> > [email protected] wrote on Wed, 11 Oct 2023 11:02:16 +0100:
> >
> >> Hi Miquel,
> >>
> >> On 11/10/2023 10:44, Miquel Raynal wrote:
> >>> Hi Srinivas,
> >>>
> >>> [email protected] wrote on Wed, 11 Oct 2023 10:26:43 +0100:
> >>> >>>> On 11/10/2023 09:58, Miquel Raynal wrote:
> >>>>> Hi Srinivas,
> >>>>>
> >>>>> [email protected] wrote on Wed, 11 Oct 2023 09:45:11 +0100:
> >>>>> >>>> On 11/10/2023 09:33, Miquel Raynal wrote:
> >>>>>>> Hi Srinivas,
> >>>>>>>
> >>>>>>> [email protected] wrote on Wed, 11 Oct 2023 09:27:20 +0100:
> >>>>>>> >>>> On 11/10/2023 08:15, Miquel Raynal wrote:
> >>>>>>>>>>> +
> >>>>>>>>>>> + nvmem_cells_group.bin_attrs = cells_attrs;
> >>>>>>>>>>> +
> >>>>>>>>>>> + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups);
> >>>>>>>>>>> + if (ret)
> >>>>>>>>>>> + goto unlock_mutex;
> >>>>>>>>>> This is going to create groups after the nvmem device is added, isn't this going to be problem with user space notifications?
> >>>>>>>>> Greg said it was not. I hope I understood correctly ????
> >>>>>>>>>
> >>>>>>>>> And anyway, cells have never been available to userspace, so there is
> >>>>>>>>> nothing userspace might expect yet?
> >>>>>>>> I agree, but once we add sysfs uapi then this is going to change.
> >>>>>>>
> >>>>>>> Can you elaborate? I'm not sure I follow you here. Is there still a
> >>>>>>> problem you fear or you think it's okay?
> >>>>>>> >> Now that we add cells to sysfs.
> >>>>>> AFAIU, By the time the userspace sees the udev event from this device we might not have cells populated.
> >>>>>
> >>>>> Yes, but why would this be a problem?
> >>>>> >> It will be problem if the userspace is using things like libudev to act on these events. There seems to be some caching of attributes in udev during event more info http://www.kroah.com/log/blog/2013/06/26/how-to-create-a-sysfs-file-correctly/
> >>>
> >>> I am already using these attributes, right? The problem here is that we
> >>> always attach cells sysfs attributes to the nvmem device, but in some
> >>> cases (when using layout devices/drivers) the probe of these devices
> >>> will happen after the main nvmem device has been announced to userspace
> >>> and thus these attributes might not be populated yet (but Greg said it
> >>> was "supported" and I assumed it was fine).
> >>> > So what is your idea here to overcome this?
> >>
> >> Ideally we should have all the cells definitions ready by the time nvmem is registered.
> >
> > I no longer think what you describe can happen because even though the
> > rootfs might be mounted, the daemons will only be 'started' once the
> > kernel is done starting and starts the init process, which means all
> > the devices have probed and all the cells have been registered as well.
> I think you forgot about modules in the above flow.
We request module insertion when the layout gets populated. By the time
userspace starts the kernel is done initializing, meaning that any
available nvmem layout module has already been loaded and the devices
probed -> the cells are there already.
Thanks,
Miquèl