When creating new binder references, the driver assigns a descriptor id
that is shared with userspace. Regrettably, the driver needs to keep the
descriptors small enough to accommodate userspace potentially using them
as Vector indexes. Currently, the driver performs a linear search on the
rb-tree of references to find the smallest available descriptor id. This
approach, however, scales poorly as the number of references grows.
This patch introduces the usage of bitmaps to boost the performance of
descriptor assignments. This optimization results in notable performance
gains, particularly in processes with a large number of references. The
following benchmark with 100,000 references showcases the difference in
latency between the dbitmap implementation and the legacy approach:
[ 587.145098] get_ref_desc_olocked: 15us (dbitmap on)
[ 602.788623] get_ref_desc_olocked: 47343us (dbitmap off)
Note the bitmap size is dynamically adjusted in line with the number of
references, ensuring efficient memory usage. In cases where growing the
bitmap is not possible, the driver falls back to the slow legacy method.
A previous attempt to solve this issue was proposed in [1]. However,
such method involved adding new ioctls which isn't great, plus older
userspace code would not have benefited from the optimizations either.
Link: https://lore.kernel.org/all/[email protected]/ [1]
Cc: Tim Murray <[email protected]>
Cc: Arve Hjønnevåg <[email protected]>
Cc: Alice Ryhl <[email protected]>
Cc: Martijn Coenen <[email protected]>
Cc: Todd Kjos <[email protected]>
Cc: John Stultz <[email protected]>
Cc: Steven Moreland <[email protected]>
Suggested-by: Nick Chen <[email protected]>
Signed-off-by: Carlos Llamas <[email protected]>
---
Notes:
v1: first solution using a new "flags" ioctl:
https://lore.kernel.org/all/[email protected]/
v2: switch to a different implementation using bitmaps.
drivers/android/binder.c | 113 +++++++++++++++++++++---
drivers/android/binder_internal.h | 5 +-
drivers/android/dbitmap.h | 139 ++++++++++++++++++++++++++++++
3 files changed, 243 insertions(+), 14 deletions(-)
create mode 100644 drivers/android/dbitmap.h
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index dd6923d37931..6a310f8373fc 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1045,6 +1045,67 @@ static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
return NULL;
}
+/* find first non-zero available descriptor the "slow way" */
+static u32 slow_desc_lookup_olocked(struct binder_proc *proc)
+{
+ struct binder_ref *ref;
+ struct rb_node *n;
+ u32 desc;
+
+ desc = 1;
+ for (n = rb_first(&proc->refs_by_desc); n; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->data.desc > desc)
+ break;
+ desc = ref->data.desc + 1;
+ }
+
+ return desc;
+}
+
+/*
+ * Find an available reference descriptor. Note that the proc->outer_lock
+ * might be released in the process. In such case, -EAGAIN is returned and
+ * the provided descriptor is invalid.
+ */
+static int get_ref_desc_olocked(struct binder_proc *proc,
+ struct binder_node *node,
+ u32 *desc)
+{
+ struct dbitmap *dmap = &proc->dmap;
+ unsigned long *new, bit;
+ unsigned int nbits;
+
+ /* 0 is reserved for the context manager */
+ if (node == proc->context->binder_context_mgr_node) {
+ *desc = 0;
+ return 0;
+ }
+
+ if (unlikely(!dbitmap_enabled(dmap))) {
+ *desc = slow_desc_lookup_olocked(proc);
+ return 0;
+ }
+
+ if (dbitmap_get_first_zero_bit(dmap, &bit) == 0) {
+ *desc = bit;
+ return 0;
+ }
+
+ /*
+ * The descriptors bitmap is full and needs to be expanded.
+ * The proc->outer_lock is briefly released to allocate the
+ * new bitmap safely.
+ */
+ nbits = dbitmap_expand_nbits(dmap);
+ binder_proc_unlock(proc);
+ new = bitmap_zalloc(nbits, GFP_KERNEL | __GFP_ZERO);
+ binder_proc_lock(proc);
+ dbitmap_expand(dmap, new, nbits);
+
+ return -EAGAIN;
+}
+
/**
* binder_get_ref_for_node_olocked() - get the ref associated with given node
* @proc: binder_proc that owns the ref
@@ -1068,12 +1129,14 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
struct binder_node *node,
struct binder_ref *new_ref)
{
- struct binder_context *context = proc->context;
- struct rb_node **p = &proc->refs_by_node.rb_node;
- struct rb_node *parent = NULL;
struct binder_ref *ref;
- struct rb_node *n;
+ struct rb_node *parent;
+ struct rb_node **p;
+ u32 desc;
+retry:
+ p = &proc->refs_by_node.rb_node;
+ parent = NULL;
while (*p) {
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_node);
@@ -1088,6 +1151,10 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
if (!new_ref)
return NULL;
+ /* might release the proc->outer_lock */
+ if (get_ref_desc_olocked(proc, node, &desc) == -EAGAIN)
+ goto retry;
+
binder_stats_created(BINDER_STAT_REF);
new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
@@ -1095,14 +1162,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
- for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
- ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->data.desc > new_ref->data.desc)
- break;
- new_ref->data.desc = ref->data.desc + 1;
- }
-
+ new_ref->data.desc = desc;
p = &proc->refs_by_desc.rb_node;
while (*p) {
parent = *p;
@@ -1131,6 +1191,7 @@ static struct binder_ref *binder_get_ref_for_node_olocked(
static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ struct dbitmap *dmap = &ref->proc->dmap;
bool delete_node = false;
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -1138,6 +1199,8 @@ static void binder_cleanup_ref_olocked(struct binder_ref *ref)
ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
+ if (dbitmap_enabled(dmap))
+ dbitmap_clear_bit(dmap, ref->data.desc);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
@@ -1298,6 +1361,25 @@ static void binder_free_ref(struct binder_ref *ref)
kfree(ref);
}
+/* shrink descriptor bitmap if needed */
+static void try_shrink_dmap(struct binder_proc *proc)
+{
+ unsigned long *new;
+ int nbits;
+
+ binder_proc_lock(proc);
+ nbits = dbitmap_shrink_nbits(&proc->dmap);
+ binder_proc_unlock(proc);
+
+ if (!nbits)
+ return;
+
+ new = bitmap_zalloc(nbits, GFP_KERNEL | __GFP_ZERO);
+ binder_proc_lock(proc);
+ dbitmap_shrink(&proc->dmap, new, nbits);
+ binder_proc_unlock(proc);
+}
+
/**
* binder_update_ref_for_handle() - inc/dec the ref for given handle
* @proc: proc containing the ref
@@ -1334,8 +1416,10 @@ static int binder_update_ref_for_handle(struct binder_proc *proc,
*rdata = ref->data;
binder_proc_unlock(proc);
- if (delete_ref)
+ if (delete_ref) {
binder_free_ref(ref);
+ try_shrink_dmap(proc);
+ }
return ret;
err_no_ref:
@@ -4931,6 +5015,7 @@ static void binder_free_proc(struct binder_proc *proc)
put_task_struct(proc->tsk);
put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
+ dbitmap_free(&proc->dmap);
kfree(proc);
}
@@ -5634,6 +5719,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+
+ dbitmap_init(&proc->dmap);
spin_lock_init(&proc->inner_lock);
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 7270d4d22207..256bb75d9d8c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -14,6 +14,7 @@
#include <linux/uidgid.h>
#include <uapi/linux/android/binderfs.h>
#include "binder_alloc.h"
+#include "dbitmap.h"
struct binder_context {
struct binder_node *binder_context_mgr_node;
@@ -368,6 +369,8 @@ struct binder_ref {
* @freeze_wait: waitqueue of processes waiting for all outstanding
* transactions to be processed
* (protected by @inner_lock)
+ * @dmap bitmap to manage available reference descriptors
+ * (protected by @outer_lock)
* @todo: list of work for this process
* (protected by @inner_lock)
* @stats: per-process binder statistics
@@ -417,7 +420,7 @@ struct binder_proc {
bool sync_recv;
bool async_recv;
wait_queue_head_t freeze_wait;
-
+ struct dbitmap dmap;
struct list_head todo;
struct binder_stats stats;
struct list_head delivered_death;
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
new file mode 100644
index 000000000000..1c6a274e4c0a
--- /dev/null
+++ b/drivers/android/dbitmap.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LINUX_DBITMAP_H
+#define _LINUX_DBITMAP_H
+#include <linux/bitmap.h>
+
+#define NBITS_MIN BITS_PER_TYPE(unsigned long)
+
+struct dbitmap {
+ unsigned int nbits;
+ unsigned long *map;
+};
+
+static inline int dbitmap_enabled(struct dbitmap *dmap)
+{
+ return dmap->map != NULL;
+}
+
+static inline void dbitmap_free(struct dbitmap *dmap)
+{
+ dmap->nbits = 0;
+ kfree(dmap->map);
+ dmap->map = NULL;
+}
+
+static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
+{
+ unsigned int bit;
+
+ if (dmap->nbits <= NBITS_MIN)
+ return 0;
+
+ bit = find_last_bit(dmap->map, dmap->nbits);
+ if (unlikely(bit == dmap->nbits))
+ return NBITS_MIN;
+
+ if (unlikely(bit <= (dmap->nbits >> 2)))
+ return dmap->nbits >> 1;
+
+ return 0;
+}
+
+static inline void
+dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
+ kfree(dmap->map);
+ dmap->map = new;
+ dmap->nbits = nbits;
+}
+
+static inline void
+dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ if (unlikely(!new))
+ return;
+
+ /*
+ * Make sure we can still shrink to the requested nbits as
+ * this call might have raced with another shrink or more
+ * bits have been assigned. In such case, release the @new
+ * bitmap and move on.
+ */
+ if (unlikely(!dbitmap_enabled(dmap) ||
+ dbitmap_shrink_nbits(dmap) != nbits)) {
+ kfree(new);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+static inline unsigned int
+dbitmap_expand_nbits(struct dbitmap *dmap)
+{
+ return dmap->nbits << 1;
+}
+
+static inline void
+dbitmap_expand(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
+{
+ /*
+ * Determine if the expand is still valid as it might have
+ * raced with another expand or free. In such case, release
+ * the @new bitmap and move on.
+ */
+ if (unlikely(!dbitmap_enabled(dmap) || nbits <= dmap->nbits)) {
+ kfree(new);
+ return;
+ }
+
+ /*
+ * ENOMEM is checked here as we can now discard a potential
+ * race with another successful expand. In such case, disable
+ * the dbitmap and fallback to slow_desc_lookup_olocked().
+ */
+ if (unlikely(!new)) {
+ dbitmap_free(dmap);
+ return;
+ }
+
+ dbitmap_replace(dmap, new, nbits);
+}
+
+static inline int
+dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
+{
+ unsigned long n;
+
+ n = find_first_zero_bit(dmap->map, dmap->nbits);
+ if (unlikely(n == dmap->nbits))
+ return -ENOSPC;
+
+ *bit = n;
+ set_bit(n, dmap->map);
+
+ return 0;
+}
+
+static inline void
+dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
+{
+ clear_bit(bit, dmap->map);
+}
+
+static inline int dbitmap_init(struct dbitmap *dmap)
+{
+ dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
+ if (!dmap->map) {
+ dmap->nbits = 0;
+ return -ENOMEM;
+ }
+
+ dmap->nbits = NBITS_MIN;
+ /* 0 is reserved for the context manager */
+ set_bit(0, dmap->map);
+
+ return 0;
+}
+#endif
--
2.45.0.rc1.225.g2a3ae87e7f-goog
Le 14/05/2024 à 18:09, Carlos Llamas a écrit :
> When creating new binder references, the driver assigns a descriptor id
> that is shared with userspace. Regrettably, the driver needs to keep the
> descriptors small enough to accommodate userspace potentially using them
> as Vector indexes. Currently, the driver performs a linear search on the
> rb-tree of references to find the smallest available descriptor id. This
> approach, however, scales poorly as the number of references grows.
>
> This patch introduces the usage of bitmaps to boost the performance of
> descriptor assignments. This optimization results in notable performance
> gains, particularly in processes with a large number of references. The
> following benchmark with 100,000 references showcases the difference in
> latency between the dbitmap implementation and the legacy approach:
>
> [ 587.145098] get_ref_desc_olocked: 15us (dbitmap on)
> [ 602.788623] get_ref_desc_olocked: 47343us (dbitmap off)
>
> Note the bitmap size is dynamically adjusted in line with the number of
> references, ensuring efficient memory usage. In cases where growing the
> bitmap is not possible, the driver falls back to the slow legacy method.
>
> A previous attempt to solve this issue was proposed in [1]. However,
> such method involved adding new ioctls which isn't great, plus older
> userspace code would not have benefited from the optimizations either.
>
> Link: https://lore.kernel.org/all/[email protected]/ [1]
..
> +static int get_ref_desc_olocked(struct binder_proc *proc,
> + struct binder_node *node,
> + u32 *desc)
> +{
> + struct dbitmap *dmap = &proc->dmap;
> + unsigned long *new, bit;
> + unsigned int nbits;
> +
> + /* 0 is reserved for the context manager */
> + if (node == proc->context->binder_context_mgr_node) {
> + *desc = 0;
> + return 0;
> + }
> +
> + if (unlikely(!dbitmap_enabled(dmap))) {
> + *desc = slow_desc_lookup_olocked(proc);
> + return 0;
> + }
> +
> + if (dbitmap_get_first_zero_bit(dmap, &bit) == 0) {
> + *desc = bit;
> + return 0;
> + }
> +
> + /*
> + * The descriptors bitmap is full and needs to be expanded.
> + * The proc->outer_lock is briefly released to allocate the
> + * new bitmap safely.
> + */
> + nbits = dbitmap_expand_nbits(dmap);
> + binder_proc_unlock(proc);
> + new = bitmap_zalloc(nbits, GFP_KERNEL | __GFP_ZERO);
Hi,
Nitpick: No need to __GFP_ZERO when using zalloc().
CJ
> + binder_proc_lock(proc);
> + dbitmap_expand(dmap, new, nbits);
> +
> + return -EAGAIN;
> +}
..
> +#define NBITS_MIN BITS_PER_TYPE(unsigned long)
> +
> +struct dbitmap {
> + unsigned int nbits;
Should it be long (or unsigned long) to better match the bitmap API?
(not sure if it can overflow in this use case, but at least for consistancy)
> + unsigned long *map;
> +};
..
> +static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
> +{
> + unsigned int bit;
> +
> + if (dmap->nbits <= NBITS_MIN)
> + return 0;
> +
> + bit = find_last_bit(dmap->map, dmap->nbits);
find_last_bit() returns an unsigned long.
(bitmap_get_first_zero_bit() below uses unsigned long)
CJ
> + if (unlikely(bit == dmap->nbits))
> + return NBITS_MIN;
> +
> + if (unlikely(bit <= (dmap->nbits >> 2)))
> + return dmap->nbits >> 1;
> +
> + return 0;
> +}
..
> +static inline int
> +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> +{
> + unsigned long n;
> +
> + n = find_first_zero_bit(dmap->map, dmap->nbits);
> + if (unlikely(n == dmap->nbits))
> + return -ENOSPC;
> +
> + *bit = n;
> + set_bit(n, dmap->map);
> +
> + return 0;
> +}
..
On Tue, May 14, 2024 at 10:35:56PM +0200, Christophe JAILLET wrote:
> Le 14/05/2024 ? 18:09, Carlos Llamas a ?crit?:
> > When creating new binder references, the driver assigns a descriptor id
> > that is shared with userspace. Regrettably, the driver needs to keep the
> > descriptors small enough to accommodate userspace potentially using them
> > as Vector indexes. Currently, the driver performs a linear search on the
> > rb-tree of references to find the smallest available descriptor id. This
> > approach, however, scales poorly as the number of references grows.
> >
> > This patch introduces the usage of bitmaps to boost the performance of
> > descriptor assignments. This optimization results in notable performance
> > gains, particularly in processes with a large number of references. The
> > following benchmark with 100,000 references showcases the difference in
> > latency between the dbitmap implementation and the legacy approach:
> >
> > [ 587.145098] get_ref_desc_olocked: 15us (dbitmap on)
> > [ 602.788623] get_ref_desc_olocked: 47343us (dbitmap off)
> >
> > Note the bitmap size is dynamically adjusted in line with the number of
> > references, ensuring efficient memory usage. In cases where growing the
> > bitmap is not possible, the driver falls back to the slow legacy method.
> >
> > A previous attempt to solve this issue was proposed in [1]. However,
> > such method involved adding new ioctls which isn't great, plus older
> > userspace code would not have benefited from the optimizations either.
> >
> > Link: https://lore.kernel.org/all/[email protected]/ [1]
>
> ...
>
> > +static int get_ref_desc_olocked(struct binder_proc *proc,
> > + struct binder_node *node,
> > + u32 *desc)
> > +{
> > + struct dbitmap *dmap = &proc->dmap;
> > + unsigned long *new, bit;
> > + unsigned int nbits;
> > +
> > + /* 0 is reserved for the context manager */
> > + if (node == proc->context->binder_context_mgr_node) {
> > + *desc = 0;
> > + return 0;
> > + }
> > +
> > + if (unlikely(!dbitmap_enabled(dmap))) {
> > + *desc = slow_desc_lookup_olocked(proc);
> > + return 0;
> > + }
> > +
> > + if (dbitmap_get_first_zero_bit(dmap, &bit) == 0) {
> > + *desc = bit;
> > + return 0;
> > + }
> > +
> > + /*
> > + * The descriptors bitmap is full and needs to be expanded.
> > + * The proc->outer_lock is briefly released to allocate the
> > + * new bitmap safely.
> > + */
> > + nbits = dbitmap_expand_nbits(dmap);
> > + binder_proc_unlock(proc);
> > + new = bitmap_zalloc(nbits, GFP_KERNEL | __GFP_ZERO);
>
> Hi,
>
> Nitpick: No need to __GFP_ZERO when using zalloc().
Oops, you are right. I'll drop this for v2.
>
> CJ
>
> > + binder_proc_lock(proc);
> > + dbitmap_expand(dmap, new, nbits);
> > +
> > + return -EAGAIN;
> > +}
>
> ...
>
> > +#define NBITS_MIN BITS_PER_TYPE(unsigned long)
> > +
> > +struct dbitmap {
> > + unsigned int nbits;
>
> Should it be long (or unsigned long) to better match the bitmap API?
I picked 'unsigned int' precisely because that is what the bitmap API
uses for the nbits (mostly). Unfortunately, there seems to be some
inconsistencies across bitmap.h and the find.h APIs on the bit type.
Ultimately the decision was made on a type that would work with both.
For extra fun, checkout the types of set_bit() and clear_bit(). Those
are using 'long' for the n bit.
>
> (not sure if it can overflow in this use case, but at least for consistancy)
Bitmaps are allocated with "unsigned int" via bitmap_zalloc() so it
can't overflow.
>
> > + unsigned long *map;
> > +};
>
> ...
>
> > +static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
> > +{
> > + unsigned int bit;
> > +
> > + if (dmap->nbits <= NBITS_MIN)
> > + return 0;
> > +
> > + bit = find_last_bit(dmap->map, dmap->nbits);
>
> find_last_bit() returns an unsigned long.
>
> (bitmap_get_first_zero_bit() below uses unsigned long)
Yes. However, these are both restricted to the @size parameter.
>
> CJ
>
> > + if (unlikely(bit == dmap->nbits))
> > + return NBITS_MIN;
> > +
> > + if (unlikely(bit <= (dmap->nbits >> 2)))
> > + return dmap->nbits >> 1;
> > +
> > + return 0;
> > +}
>
> ...
>
> > +static inline int
> > +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> > +{
> > + unsigned long n;
> > +
> > + n = find_first_zero_bit(dmap->map, dmap->nbits);
> > + if (unlikely(n == dmap->nbits))
> > + return -ENOSPC;
> > +
> > + *bit = n;
> > + set_bit(n, dmap->map);
> > +
> > + return 0;
> > +}
>
> ...
>
On Tue, May 14, 2024 at 6:09 PM Carlos Llamas <[email protected]> wrote:
> +static inline int
> +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> +{
> + unsigned long n;
> +
> + n = find_first_zero_bit(dmap->map, dmap->nbits);
> + if (unlikely(n == dmap->nbits))
> + return -ENOSPC;
> +
> + *bit = n;
> + set_bit(n, dmap->map);
> +
> + return 0;
> +}
Could we rename this method to something that makes it more clear that
it's not just a getter, but that it actually also sets the bit?
Alice
On Wed, May 15, 2024 at 05:29:29PM +0200, Alice Ryhl wrote:
> On Tue, May 14, 2024 at 6:09 PM Carlos Llamas <[email protected]> wrote:
> > +static inline int
> > +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> > +{
> > + unsigned long n;
> > +
> > + n = find_first_zero_bit(dmap->map, dmap->nbits);
> > + if (unlikely(n == dmap->nbits))
> > + return -ENOSPC;
> > +
> > + *bit = n;
> > + set_bit(n, dmap->map);
> > +
> > + return 0;
> > +}
>
> Could we rename this method to something that makes it more clear that
> it's not just a getter, but that it actually also sets the bit?
>
> Alice
Sure, what were you thinking? I had picked "get" and not just "find" to
indicate this behavior. However, I'll take any better ideas. The option
of dbitmap_find_and_set_first_zero_bit() seemed too long for me.
--
Carlos Llamas
On Wed, May 15, 2024 at 04:38:51PM +0000, Carlos Llamas wrote:
> On Wed, May 15, 2024 at 05:29:29PM +0200, Alice Ryhl wrote:
> > On Tue, May 14, 2024 at 6:09 PM Carlos Llamas <[email protected]> wrote:
> > > +static inline int
> > > +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> > > +{
> > > + unsigned long n;
> > > +
> > > + n = find_first_zero_bit(dmap->map, dmap->nbits);
> > > + if (unlikely(n == dmap->nbits))
> > > + return -ENOSPC;
> > > +
> > > + *bit = n;
> > > + set_bit(n, dmap->map);
> > > +
> > > + return 0;
> > > +}
> >
> > Could we rename this method to something that makes it more clear that
> > it's not just a getter, but that it actually also sets the bit?
> >
> > Alice
>
> Sure, what were you thinking? I had picked "get" and not just "find" to
> indicate this behavior. However, I'll take any better ideas. The option
> of dbitmap_find_and_set_first_zero_bit() seemed too long for me.
>
> --
> Carlos Llamas
I like dbitmap_acquire_first_zero_bit(). Sounds better?
On Wed, May 15, 2024 at 6:43 PM Carlos Llamas <[email protected]> wrote:
>
> On Wed, May 15, 2024 at 04:38:51PM +0000, Carlos Llamas wrote:
> > On Wed, May 15, 2024 at 05:29:29PM +0200, Alice Ryhl wrote:
> > > On Tue, May 14, 2024 at 6:09 PM Carlos Llamas <[email protected]> wrote:
> > > > +static inline int
> > > > +dbitmap_get_first_zero_bit(struct dbitmap *dmap, unsigned long *bit)
> > > > +{
> > > > + unsigned long n;
> > > > +
> > > > + n = find_first_zero_bit(dmap->map, dmap->nbits);
> > > > + if (unlikely(n == dmap->nbits))
> > > > + return -ENOSPC;
> > > > +
> > > > + *bit = n;
> > > > + set_bit(n, dmap->map);
> > > > +
> > > > + return 0;
> > > > +}
> > >
> > > Could we rename this method to something that makes it more clear that
> > > it's not just a getter, but that it actually also sets the bit?
> > >
> > > Alice
> >
> > Sure, what were you thinking? I had picked "get" and not just "find" to
> > indicate this behavior. However, I'll take any better ideas. The option
> > of dbitmap_find_and_set_first_zero_bit() seemed too long for me.
> >
> > --
> > Carlos Llamas
>
> I like dbitmap_acquire_first_zero_bit(). Sounds better?
acquire sounds good!
Alice