Artem,
as requested I'm resending my fastmap work in smaller pieces.
This is pile 1 of 7.
Rebasing my patches to ubifs.git was a massive PITA because the
logging style changes touched a lot of code and almost every patch
failed to apply and needed inspection by hand.
The first patches are bug fixes, the latter introduce cleanups
and new features.
After all bugfixes are mainline I'll make sure that all needed
fixes go into -stable.
[PATCH 1/6] UBI: Fastmap: Care about the protection queue
[PATCH 2/6] UBI: Fastmap: Ensure that only one fastmap work is
[PATCH 3/6] UBI: Fastmap: Ensure that all fastmap work is done upon
[PATCH 4/6] UBI: Fastmap: Fix races in ubi_wl_get_peb()
[PATCH 5/6] UBI: Split __wl_get_peb()
[PATCH 6/6] UBI: Fastmap: Make ubi_refill_pools() fair
The whole series can be found at
git://git.kernel.org/pub/scm/linux/kernel/git/rw/misc.git fastmap_upgrade2
Thanks,
//richard
Currently ubi_refill_pools() first fills the first and then
the second one.
If only very few free PEBs are available the second pool can get
zero PEBs.
Change ubi_refill_pools() to distribute free PEBs fair between
all pools.
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/wl.c | 77 +++++++++++++++++++++++++++-------------------------
1 file changed, 40 insertions(+), 37 deletions(-)
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index f028b68..c2822f7 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -583,59 +583,62 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
/**
- * refill_wl_pool - refills all the fastmap pool used by the
- * WL sub-system.
+ * ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
*/
-static void refill_wl_pool(struct ubi_device *ubi)
+void ubi_refill_pools(struct ubi_device *ubi)
{
+ struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+ struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_wl_entry *e;
- struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
+ int enough;
+ spin_lock(&ubi->wl_lock);
+
+ return_unused_pool_pebs(ubi, wl_pool);
return_unused_pool_pebs(ubi, pool);
- for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- if (!ubi->free.rb_node ||
- (ubi->free_count - ubi->beb_rsvd_pebs < 5))
- break;
+ wl_pool->size = 0;
+ pool->size = 0;
- e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
- self_check_in_wl_tree(ubi, e, &ubi->free);
- rb_erase(&e->u.rb, &ubi->free);
- ubi->free_count--;
+ for (;;) {
+ enough = 0;
+ if (pool->size < pool->max_size) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
- pool->pebs[pool->size] = e->pnum;
- }
- pool->used = 0;
-}
+ e = wl_get_wle(ubi);
+ if (!e)
+ break;
-/**
- * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
- * @ubi: UBI device description object
- */
-static void refill_wl_user_pool(struct ubi_device *ubi)
-{
- struct ubi_fm_pool *pool = &ubi->fm_pool;
+ pool->pebs[pool->size] = e->pnum;
+ pool->size++;
+ } else
+ enough++;
- return_unused_pool_pebs(ubi, pool);
+ if (wl_pool->size < wl_pool->max_size) {
+ if (!ubi->free.rb_node ||
+ (ubi->free_count - ubi->beb_rsvd_pebs < 5))
+ break;
- for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
- pool->pebs[pool->size] = __wl_get_peb(ubi);
- if (pool->pebs[pool->size] < 0)
+ e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+
+ wl_pool->pebs[wl_pool->size] = e->pnum;
+ wl_pool->size++;
+ } else
+ enough++;
+
+ if (enough == 2)
break;
}
+
+ wl_pool->used = 0;
pool->used = 0;
-}
-/**
- * ubi_refill_pools - refills all fastmap PEB pools.
- * @ubi: UBI device description object
- */
-void ubi_refill_pools(struct ubi_device *ubi)
-{
- spin_lock(&ubi->wl_lock);
- refill_wl_pool(ubi);
- refill_wl_user_pool(ubi);
spin_unlock(&ubi->wl_lock);
}
--
1.8.4.5
ubi_wl_get_peb() has two problems, it reads the pool
size and usage counters without any protection.
While reading one value would be perfectly fine it reads multiple
values and compares them. This is racy and can lead to incorrect
pool handling.
Furthermore ubi_update_fastmap() is called without wl_lock held,
before incrementing the used counter it needs to be checked again.
It could happen that another thread consumed all PEBs from the
pool and the counter goes beyond ->size.
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/ubi.h | 3 ++-
drivers/mtd/ubi/wl.c | 34 +++++++++++++++++++++++-----------
2 files changed, 25 insertions(+), 12 deletions(-)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 04c4c05..d672412 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -439,7 +439,8 @@ struct ubi_debug_info {
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
+ * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
+ * and @fm_wl_pool fields
* @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index cb2e571..7730b97 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -629,24 +629,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
- int ret;
+ int ret, retried = 0;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
- if (!pool->size || !wl_pool->size || pool->used == pool->size ||
- wl_pool->used == wl_pool->size)
+again:
+ spin_lock(&ubi->wl_lock);
+ /* We check here also for the WL pool because at this point we can
+ * refill the WL pool synchronous. */
+ if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
+ spin_unlock(&ubi->wl_lock);
ubi_update_fastmap(ubi);
-
- /* we got not a single free PEB */
- if (!pool->size)
- ret = -ENOSPC;
- else {
spin_lock(&ubi->wl_lock);
- ret = pool->pebs[pool->used++];
- prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ }
+
+ if (pool->used == pool->size) {
spin_unlock(&ubi->wl_lock);
+ if (retried) {
+ ubi_err(ubi, "Unable to get a free PEB from user WL pool");
+ ret = -ENOSPC;
+ goto out;
+ }
+ retried = 1;
+ goto again;
}
+ ubi_assert(pool->used < pool->size);
+ ret = pool->pebs[pool->used++];
+ prot_queue_add(ubi, ubi->lookuptbl[ret]);
+ spin_unlock(&ubi->wl_lock);
+out:
return ret;
}
@@ -659,7 +671,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
- if (pool->used == pool->size || !pool->size) {
+ if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
--
1.8.4.5
Make it two functions, wl_get_wle() and wl_get_peb().
wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle()
does not call produce_free_peb().
While refilling the fastmap user pool we cannot release ubi->wl_lock
as produce_free_peb() does.
Hence the fastmap logic uses now wl_get_wle().
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/wl.c | 61 ++++++++++++++++++++++++++++++++--------------------
1 file changed, 38 insertions(+), 23 deletions(-)
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7730b97..f028b68 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -499,13 +499,46 @@ out:
#endif
/**
- * __wl_get_peb - get a physical eraseblock.
+ * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
+ * refill_wl_user_pool().
+ * @ubi: UBI device description object
+ *
+ * This function returns a a wear leveling entry in case of success and
+ * NULL in case of failure.
+ */
+static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
+{
+ struct ubi_wl_entry *e;
+
+ e = find_mean_wl_entry(ubi, &ubi->free);
+ if (!e) {
+ ubi_err(ubi, "no free eraseblocks");
+ return NULL;
+ }
+
+ self_check_in_wl_tree(ubi, e, &ubi->free);
+
+ /*
+ * Move the physical eraseblock to the protection queue where it will
+ * be protected from being moved for some time.
+ */
+ rb_erase(&e->u.rb, &ubi->free);
+ ubi->free_count--;
+ dbg_wl("PEB %d EC %d", e->pnum, e->ec);
+
+ return e;
+}
+
+/**
+ * wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure.
+ * It is the low level component of ubi_wl_get_peb() in the non-fastmap
+ * case.
*/
-static int __wl_get_peb(struct ubi_device *ubi)
+static int wl_get_peb(struct ubi_device *ubi)
{
int err;
struct ubi_wl_entry *e;
@@ -524,27 +557,9 @@ retry:
goto retry;
}
- e = find_mean_wl_entry(ubi, &ubi->free);
- if (!e) {
- ubi_err(ubi, "no free eraseblocks");
- return -ENOSPC;
- }
-
- self_check_in_wl_tree(ubi, e, &ubi->free);
-
- /*
- * Move the physical eraseblock to the protection queue where it will
- * be protected from being moved for some time.
- */
- rb_erase(&e->u.rb, &ubi->free);
- ubi->free_count--;
- dbg_wl("PEB %d EC %d", e->pnum, e->ec);
-#ifndef CONFIG_MTD_UBI_FASTMAP
- /* We have to enqueue e only if fastmap is disabled,
- * is fastmap enabled prot_queue_add() will be called by
- * ubi_wl_get_peb() after removing e from the pool. */
+ e = wl_get_wle(ubi);
prot_queue_add(ubi, e);
-#endif
+
return e->pnum;
}
@@ -704,7 +719,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
int peb, err;
spin_lock(&ubi->wl_lock);
- peb = __wl_get_peb(ubi);
+ peb = wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
if (peb < 0)
--
1.8.4.5
...otherwise the deferred work might run after datastructures
got freed and corrupt memory.
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/wl.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 7f135df..cb2e571 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("close the WL sub-system");
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ flush_work(&ubi->fm_work);
+#endif
shutdown_work(ubi);
protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
--
1.8.4.5
If the WL pool runs out of PEBs we schedule a fastmap write
to refill it as soon as possible.
Ensure that only one at a time is scheduled otherwise we might end in
a fastmap write storm because writing the fastmap can schedule another
write if bitflips are detected.
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/ubi.h | 4 +++-
drivers/mtd/ubi/wl.c | 8 +++++++-
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index f80ffab..04c4c05 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -427,6 +427,7 @@ struct ubi_debug_info {
* @fm_size: fastmap size in bytes
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -438,7 +439,7 @@ struct ubi_debug_info {
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- * @erroneous, and @erroneous_peb_count fields
+ * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
* @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
@@ -533,6 +534,7 @@ struct ubi_device {
void *fm_buf;
size_t fm_size;
struct work_struct fm_work;
+ int fm_work_scheduled;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 834f6fe..7f135df 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
}
/**
@@ -660,7 +663,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
- schedule_work(&ubi->fm_work);
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
return NULL;
} else {
pnum = pool->pebs[pool->used++];
--
1.8.4.5
Fastmap can miss a PEB if it is in the protection queue
and not jet in the used tree.
Treat every protected PEB as used.
Signed-off-by: Richard Weinberger <[email protected]>
---
drivers/mtd/ubi/fastmap.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b56672b..db3defd 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1196,6 +1196,19 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
+
+ for (i = 0; i < UBI_PROT_QUEUE_LEN; i++) {
+ list_for_each_entry(wl_e, &ubi->pq[i], u.list) {
+ fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
+
+ fec->pnum = cpu_to_be32(wl_e->pnum);
+ fec->ec = cpu_to_be32(wl_e->ec);
+
+ used_peb_count++;
+ fm_pos += sizeof(*fec);
+ ubi_assert(fm_pos <= ubi->fm_size);
+ }
+ }
fmh->used_peb_count = cpu_to_be32(used_peb_count);
for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
--
1.8.4.5
On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
> Artem,
>
> as requested I'm resending my fastmap work in smaller pieces.
> This is pile 1 of 7.
Thanks, but could be even smaller and more often, in theory :) You could
send 3 or something of these several weeks ago - would be even better -
I was much less busy at that point. And next week I am out of office and
out of network.
On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
> Fastmap can miss a PEB if it is in the protection queue
> and not jet in the used tree.
> Treat every protected PEB as used.
>
> Signed-off-by: Richard Weinberger <[email protected]>
Picked this one and pushed, thanks!
Am 27.11.2014 um 15:53 schrieb Artem Bityutskiy:
> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>> Artem,
>>
>> as requested I'm resending my fastmap work in smaller pieces.
>> This is pile 1 of 7.
>
> Thanks, but could be even smaller and more often, in theory :) You could
> send 3 or something of these several weeks ago - would be even better -
> I was much less busy at that point. And next week I am out of office and
> out of network.
Ah, ok. I misunderstood you then.
I thought you want at most one small series "in flight" of my Fastmap work.
Thanks,
//richard
On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
> If the WL pool runs out of PEBs we schedule a fastmap write
> to refill it as soon as possible.
> Ensure that only one at a time is scheduled otherwise we might end in
> a fastmap write storm because writing the fastmap can schedule another
> write if bitflips are detected.
Could you please provide some more details about the "write storm". Does
it happen when there are 2 fastmap works in the queue? Or if they run
simultaneously? Why the storm happens and white kind of "writes" it
consists of?
Thanks!
On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
> ...otherwise the deferred work might run after datastructures
> got freed and corrupt memory.
>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 7f135df..cb2e571 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
> void ubi_wl_close(struct ubi_device *ubi)
> {
> dbg_wl("close the WL sub-system");
> +#ifdef CONFIG_MTD_UBI_FASTMAP
> + flush_work(&ubi->fm_work);
> +#endif
If you are using the work infrastructure implemented in wl.c, then
fastmap work should be no different to any other work. And we do flush
all works in 'shutdown_work()'. The fastmap work should be flushed there
too.
I think we discussed this already - there should be one single queue of
works, managed by the same set of functions, all flushed in the same
place, one-by-one...
Obviously, there is some misunderstanding. This looks like lack of
separation and misuse of layering. I am missing explanations why I am
wrong...
Am 27.11.2014 um 16:38 schrieb Artem Bityutskiy:
> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>> ...otherwise the deferred work might run after datastructures
>> got freed and corrupt memory.
>>
>> Signed-off-by: Richard Weinberger <[email protected]>
>> ---
>> drivers/mtd/ubi/wl.c | 3 +++
>> 1 file changed, 3 insertions(+)
>>
>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>> index 7f135df..cb2e571 100644
>> --- a/drivers/mtd/ubi/wl.c
>> +++ b/drivers/mtd/ubi/wl.c
>> @@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
>> void ubi_wl_close(struct ubi_device *ubi)
>> {
>> dbg_wl("close the WL sub-system");
>> +#ifdef CONFIG_MTD_UBI_FASTMAP
>> + flush_work(&ubi->fm_work);
>> +#endif
>
> If you are using the work infrastructure implemented in wl.c, then
> fastmap work should be no different to any other work. And we do flush
> all works in 'shutdown_work()'. The fastmap work should be flushed there
> too.
>
> I think we discussed this already - there should be one single queue of
> works, managed by the same set of functions, all flushed in the same
> place, one-by-one...
>
> Obviously, there is some misunderstanding. This looks like lack of
> separation and misuse of layering. I am missing explanations why I am
> wrong...
So you want me to use the UBI WL background thread for the scheduled fastmap work?
I didn't do it that way because you said more than once that fastmap is fastmap and
WL is WL. Therefore I've separated it.
Thanks,
//richard
Am 27.11.2014 um 16:27 schrieb Artem Bityutskiy:
> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>> If the WL pool runs out of PEBs we schedule a fastmap write
>> to refill it as soon as possible.
>> Ensure that only one at a time is scheduled otherwise we might end in
>> a fastmap write storm because writing the fastmap can schedule another
>> write if bitflips are detected.
>
> Could you please provide some more details about the "write storm". Does
> it happen when there are 2 fastmap works in the queue? Or if they run
> simultaneously? Why the storm happens and white kind of "writes" it
> consists of?
If UBI needs to write a new fastmap while wear leveling (by using get_peb_for_wl())
a fastmap work is scheduled.
We cannot write a fastmap in this context because we're in atomic context.
At this point one fastmap write is scheduled. If now get_peb_for_wl() is executed
a second time it will schedule another fastmap work because the pools are still not refilled.
This will go on until finally a fastmap got written. Either by the the kworker or
ubi_wl_get_peb().
As now a lot of fastmap works are scheduled they all will be issues in series.
Hence, a write storm. :)
Thanks,
//richard
On Thu, 2014-11-27 at 17:08 +0100, Richard Weinberger wrote:
> > Obviously, there is some misunderstanding. This looks like lack of
> > separation and misuse of layering. I am missing explanations why I am
> > wrong...
>
> So you want me to use the UBI WL background thread for the scheduled fastmap work?
No. It is more like either use it or do not use it.
> I didn't do it that way because you said more than once that fastmap is fastmap and
> WL is WL. Therefore I've separated it.
And "separated" meaning adding this code to wl.c?
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ flush_work(&ubi->fm_work);
+#endif
Could it be separated some more then?
Am 27.11.2014 um 17:29 schrieb Artem Bityutskiy:
> On Thu, 2014-11-27 at 17:08 +0100, Richard Weinberger wrote:
>>> Obviously, there is some misunderstanding. This looks like lack of
>>> separation and misuse of layering. I am missing explanations why I am
>>> wrong...
>>
>> So you want me to use the UBI WL background thread for the scheduled fastmap work?
>
> No. It is more like either use it or do not use it.
Sorry, I don't understand.
What do you want to do to?
>> I didn't do it that way because you said more than once that fastmap is fastmap and
>> WL is WL. Therefore I've separated it.
>
> And "separated" meaning adding this code to wl.c?
>
> +#ifdef CONFIG_MTD_UBI_FASTMAP
> + flush_work(&ubi->fm_work);
> +#endif
>
> Could it be separated some more then?
>
Of course, commit "UBI: Move fastmap specific functions out of wl.c" does.
But this commit is *bugfix* commit.
Thanks,
//richard
On Thu, 2014-11-27 at 17:13 +0100, Richard Weinberger wrote:
> Am 27.11.2014 um 16:27 schrieb Artem Bityutskiy:
> > On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
> >> If the WL pool runs out of PEBs we schedule a fastmap write
> >> to refill it as soon as possible.
> >> Ensure that only one at a time is scheduled otherwise we might end in
> >> a fastmap write storm because writing the fastmap can schedule another
> >> write if bitflips are detected.
> >
> > Could you please provide some more details about the "write storm". Does
> > it happen when there are 2 fastmap works in the queue? Or if they run
> > simultaneously? Why the storm happens and white kind of "writes" it
> > consists of?
>
> If UBI needs to write a new fastmap while wear leveling (by using get_peb_for_wl())
> a fastmap work is scheduled.
> We cannot write a fastmap in this context because we're in atomic context.
> At this point one fastmap write is scheduled. If now get_peb_for_wl() is executed
> a second time it will schedule another fastmap work because the pools are still not refilled.
Sounds like just you do not need any works and any queues at all. All
you need is a "please, fastmap me!" flag.
Then this flag should be checked every time we enter the background
thread or the fastmap code, and be acted upon.
So the background thread would first check this flag, and if it is set -
call the fastmap stuff. The go do the WL works.
Just off-the top of my head, take with grain of salt.
Am 27.11.2014 um 17:35 schrieb Artem Bityutskiy:
> On Thu, 2014-11-27 at 17:13 +0100, Richard Weinberger wrote:
>> Am 27.11.2014 um 16:27 schrieb Artem Bityutskiy:
>>> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>>>> If the WL pool runs out of PEBs we schedule a fastmap write
>>>> to refill it as soon as possible.
>>>> Ensure that only one at a time is scheduled otherwise we might end in
>>>> a fastmap write storm because writing the fastmap can schedule another
>>>> write if bitflips are detected.
>>>
>>> Could you please provide some more details about the "write storm". Does
>>> it happen when there are 2 fastmap works in the queue? Or if they run
>>> simultaneously? Why the storm happens and white kind of "writes" it
>>> consists of?
>>
>> If UBI needs to write a new fastmap while wear leveling (by using get_peb_for_wl())
>> a fastmap work is scheduled.
>> We cannot write a fastmap in this context because we're in atomic context.
>> At this point one fastmap write is scheduled. If now get_peb_for_wl() is executed
>> a second time it will schedule another fastmap work because the pools are still not refilled.
>
> Sounds like just you do not need any works and any queues at all. All
> you need is a "please, fastmap me!" flag.
>
> Then this flag should be checked every time we enter the background
> thread or the fastmap code, and be acted upon.
>
> So the background thread would first check this flag, and if it is set -
> call the fastmap stuff. The go do the WL works.
>
> Just off-the top of my head, take with grain of salt.
So you want me to redesign it?
IMHO this is just a matter of taste.
Face it, my brain does not work like yours. I design things differently.
Thanks,
//richard
On Thu, 2014-11-27 at 17:35 +0100, Richard Weinberger wrote:
> Am 27.11.2014 um 17:29 schrieb Artem Bityutskiy:
> > On Thu, 2014-11-27 at 17:08 +0100, Richard Weinberger wrote:
> >>> Obviously, there is some misunderstanding. This looks like lack of
> >>> separation and misuse of layering. I am missing explanations why I am
> >>> wrong...
> >>
> >> So you want me to use the UBI WL background thread for the scheduled fastmap work?
> >
> > No. It is more like either use it or do not use it.
>
> Sorry, I don't understand.
> What do you want to do to?
Just keep the code structured. I am just asking questions and trying to
to analyze your patches. If at some point I would like you to do
something specific, I clearly state this. In this case I was complaining
about fastmap specifics in an unrelated file, so basically the wish is
to have it go away. How exactly - not specified, up to you :-) Or, this
means just telling me why it is this way, justify.
When I was working with this code, I did give people specific
suggestions, line-by-line. Now I am more doing more of a sanity check,
looking after the bigger picture.
I understand that this is not a picture of an ideal maintainer, and I am
not anymore an ideal maintainer for this stuff (I think I used to,
though), simply because of lack of time. Doing the best effort job now.
> >> I didn't do it that way because you said more than once that fastmap is fastmap and
> >> WL is WL. Therefore I've separated it.
> >
> > And "separated" meaning adding this code to wl.c?
> >
> > +#ifdef CONFIG_MTD_UBI_FASTMAP
> > + flush_work(&ubi->fm_work);
> > +#endif
> >
> > Could it be separated some more then?
> >
>
> Of course, commit "UBI: Move fastmap specific functions out of wl.c" does.
I did not see it in this series. So you could tell this earlier, not
after 2 e-mail exchanges. Do not assume I remember the details of our
previous discussion. Assume I forgot everything :-)
> But this commit is *bugfix* commit.
I thought adding an close function to fastmap.c is a simple task.
On Thu, 2014-11-27 at 17:39 +0100, Richard Weinberger wrote:
> > So the background thread would first check this flag, and if it is set -
> > call the fastmap stuff. The go do the WL works.
> >
> > Just off-the top of my head, take with grain of salt.
>
> So you want me to redesign it?
> IMHO this is just a matter of taste.
>
> Face it, my brain does not work like yours. I design things differently.
OK.
Am 27.11.2014 um 17:47 schrieb Artem Bityutskiy:
> On Thu, 2014-11-27 at 17:35 +0100, Richard Weinberger wrote:
>> Am 27.11.2014 um 17:29 schrieb Artem Bityutskiy:
>>> On Thu, 2014-11-27 at 17:08 +0100, Richard Weinberger wrote:
>>>>> Obviously, there is some misunderstanding. This looks like lack of
>>>>> separation and misuse of layering. I am missing explanations why I am
>>>>> wrong...
>>>>
>>>> So you want me to use the UBI WL background thread for the scheduled fastmap work?
>>>
>>> No. It is more like either use it or do not use it.
>>
>> Sorry, I don't understand.
>> What do you want to do to?
>
> Just keep the code structured. I am just asking questions and trying to
> to analyze your patches. If at some point I would like you to do
> something specific, I clearly state this. In this case I was complaining
> about fastmap specifics in an unrelated file, so basically the wish is
> to have it go away. How exactly - not specified, up to you :-) Or, this
> means just telling me why it is this way, justify.
>
> When I was working with this code, I did give people specific
> suggestions, line-by-line. Now I am more doing more of a sanity check,
> looking after the bigger picture.
>
> I understand that this is not a picture of an ideal maintainer, and I am
> not anymore an ideal maintainer for this stuff (I think I used to,
> though), simply because of lack of time. Doing the best effort job now.
This is perfectly fine. I'm trying hard to keep your job as easy as possible.
>>>> I didn't do it that way because you said more than once that fastmap is fastmap and
>>>> WL is WL. Therefore I've separated it.
>>>
>>> And "separated" meaning adding this code to wl.c?
>>>
>>> +#ifdef CONFIG_MTD_UBI_FASTMAP
>>> + flush_work(&ubi->fm_work);
>>> +#endif
>>>
>>> Could it be separated some more then?
>>>
>>
>> Of course, commit "UBI: Move fastmap specific functions out of wl.c" does.
>
> I did not see it in this series. So you could tell this earlier, not
> after 2 e-mail exchanges. Do not assume I remember the details of our
> previous discussion. Assume I forgot everything :-)
You did not see it in that series because you asked me to split it.
The massive clean stuff comes after the fixes.
This is the branch where I keep the whole series.
https://git.kernel.org/cgit/linux/kernel/git/rw/misc.git/log/?h=fastmap_upgrade2
Right now you've seen 6 out of 40 patches.
Maybe I'll change a few commits before submitting them.
I have some new ideas for more cleanups. :)
>> But this commit is *bugfix* commit.
>
> I thought adding an close function to fastmap.c is a simple task.
As I said, later in the series I clean up a lot.
Thanks,
//richard
On 11/24/2014 3:20 PM, Richard Weinberger wrote:
> If the WL pool runs out of PEBs we schedule a fastmap write
> to refill it as soon as possible.
> Ensure that only one at a time is scheduled otherwise we might end in
> a fastmap write storm because writing the fastmap can schedule another
> write if bitflips are detected.
>
Reviewed-by: Tanya Brokhman <[email protected]>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/ubi.h | 4 +++-
> drivers/mtd/ubi/wl.c | 8 +++++++-
> 2 files changed, 10 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
> index f80ffab..04c4c05 100644
> --- a/drivers/mtd/ubi/ubi.h
> +++ b/drivers/mtd/ubi/ubi.h
> @@ -427,6 +427,7 @@ struct ubi_debug_info {
> * @fm_size: fastmap size in bytes
> * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
> * @fm_work: fastmap work queue
> + * @fm_work_scheduled: non-zero if fastmap work was scheduled
> *
> * @used: RB-tree of used physical eraseblocks
> * @erroneous: RB-tree of erroneous used physical eraseblocks
> @@ -438,7 +439,7 @@ struct ubi_debug_info {
> * @pq_head: protection queue head
> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
> - * @erroneous, and @erroneous_peb_count fields
> + * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
> * @move_mutex: serializes eraseblock moves
> * @work_sem: used to wait for all the scheduled works to finish and prevent
> * new works from being submitted
> @@ -533,6 +534,7 @@ struct ubi_device {
> void *fm_buf;
> size_t fm_size;
> struct work_struct fm_work;
> + int fm_work_scheduled;
>
> /* Wear-leveling sub-system's stuff */
> struct rb_root used;
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 834f6fe..7f135df 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
> {
> struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
> ubi_update_fastmap(ubi);
> + spin_lock(&ubi->wl_lock);
> + ubi->fm_work_scheduled = 0;
> + spin_unlock(&ubi->wl_lock);
> }
>
> /**
> @@ -660,7 +663,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
> /* We cannot update the fastmap here because this
> * function is called in atomic context.
> * Let's fail here and refill/update it as soon as possible. */
> - schedule_work(&ubi->fm_work);
> + if (!ubi->fm_work_scheduled) {
> + ubi->fm_work_scheduled = 1;
> + schedule_work(&ubi->fm_work);
> + }
> return NULL;
> } else {
> pnum = pool->pebs[pool->used++];
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
On 11/27/2014 5:38 PM, Artem Bityutskiy wrote:
> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>> ...otherwise the deferred work might run after datastructures
>> got freed and corrupt memory.
>>
>> Signed-off-by: Richard Weinberger <[email protected]>
>> ---
>> drivers/mtd/ubi/wl.c | 3 +++
>> 1 file changed, 3 insertions(+)
>>
>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>> index 7f135df..cb2e571 100644
>> --- a/drivers/mtd/ubi/wl.c
>> +++ b/drivers/mtd/ubi/wl.c
>> @@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
>> void ubi_wl_close(struct ubi_device *ubi)
>> {
>> dbg_wl("close the WL sub-system");
>> +#ifdef CONFIG_MTD_UBI_FASTMAP
>> + flush_work(&ubi->fm_work);
>> +#endif
>
> If you are using the work infrastructure implemented in wl.c, then
> fastmap work should be no different to any other work. And we do flush
> all works in 'shutdown_work()'. The fastmap work should be flushed there
> too.
I tend to agree with Artem. Could you please move it to shutdown_work()?
I'm asking because I do want to pick up all the bug fixes patches, but
not going to pick up the "cleaning up" patches at this point.
>
> I think we discussed this already - there should be one single queue of
> works, managed by the same set of functions, all flushed in the same
> place, one-by-one...
>
> Obviously, there is some misunderstanding. This looks like lack of
> separation and misuse of layering. I am missing explanations why I am
> wrong...
>
>
> ______________________________________________________
> Linux MTD discussion mailing list
> http://lists.infradead.org/mailman/listinfo/linux-mtd/
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
Forum, a Linux Foundation Collaborative Project
Am 04.12.2014 um 17:44 schrieb Tanya Brokhman:
> On 11/27/2014 5:38 PM, Artem Bityutskiy wrote:
>> On Mon, 2014-11-24 at 14:20 +0100, Richard Weinberger wrote:
>>> ...otherwise the deferred work might run after datastructures
>>> got freed and corrupt memory.
>>>
>>> Signed-off-by: Richard Weinberger <[email protected]>
>>> ---
>>> drivers/mtd/ubi/wl.c | 3 +++
>>> 1 file changed, 3 insertions(+)
>>>
>>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>>> index 7f135df..cb2e571 100644
>>> --- a/drivers/mtd/ubi/wl.c
>>> +++ b/drivers/mtd/ubi/wl.c
>>> @@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
>>> void ubi_wl_close(struct ubi_device *ubi)
>>> {
>>> dbg_wl("close the WL sub-system");
>>> +#ifdef CONFIG_MTD_UBI_FASTMAP
>>> + flush_work(&ubi->fm_work);
>>> +#endif
>>
>> If you are using the work infrastructure implemented in wl.c, then
>> fastmap work should be no different to any other work. And we do flush
>> all works in 'shutdown_work()'. The fastmap work should be flushed there
>> too.
>
> I tend to agree with Artem. Could you please move it to shutdown_work()? I'm asking because I do want to pick up all the bug fixes patches, but not going to pick up the "cleaning
> up" patches at this point.
Okay. You have overruled me. :-)
Thanks,
//richard
Tanya,
Am 05.12.2014 um 14:09 schrieb Tanya Brokhman:
> On 11/24/2014 3:20 PM, Richard Weinberger wrote:
>> ubi_wl_get_peb() has two problems, it reads the pool
>> size and usage counters without any protection.
>> While reading one value would be perfectly fine it reads multiple
>> values and compares them. This is racy and can lead to incorrect
>> pool handling.
>> Furthermore ubi_update_fastmap() is called without wl_lock held,
>> before incrementing the used counter it needs to be checked again.
>
> I didn't see where you fixed the ubi_update_fastmap issue you just mentioned.
This is exactly what you're questioning below.
We have to recheck as the pool counter could have changed.
>> It could happen that another thread consumed all PEBs from the
>> pool and the counter goes beyond ->size.
>>
>> Signed-off-by: Richard Weinberger <[email protected]>
>> ---
>> drivers/mtd/ubi/ubi.h | 3 ++-
>> drivers/mtd/ubi/wl.c | 34 +++++++++++++++++++++++-----------
>> 2 files changed, 25 insertions(+), 12 deletions(-)
>>
>> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
>> index 04c4c05..d672412 100644
>> --- a/drivers/mtd/ubi/ubi.h
>> +++ b/drivers/mtd/ubi/ubi.h
>> @@ -439,7 +439,8 @@ struct ubi_debug_info {
>> * @pq_head: protection queue head
>> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
>> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
>> - * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
>> + * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
>> + * and @fm_wl_pool fields
>> * @move_mutex: serializes eraseblock moves
>> * @work_sem: used to wait for all the scheduled works to finish and prevent
>> * new works from being submitted
>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>> index cb2e571..7730b97 100644
>> --- a/drivers/mtd/ubi/wl.c
>> +++ b/drivers/mtd/ubi/wl.c
>> @@ -629,24 +629,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
>> */
>> int ubi_wl_get_peb(struct ubi_device *ubi)
>> {
>> - int ret;
>> + int ret, retried = 0;
>> struct ubi_fm_pool *pool = &ubi->fm_pool;
>> struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
>>
>> - if (!pool->size || !wl_pool->size || pool->used == pool->size ||
>> - wl_pool->used == wl_pool->size)
>> +again:
>> + spin_lock(&ubi->wl_lock);
>> + /* We check here also for the WL pool because at this point we can
>> + * refill the WL pool synchronous. */
>> + if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
>> + spin_unlock(&ubi->wl_lock);
>> ubi_update_fastmap(ubi);
>> -
>> - /* we got not a single free PEB */
>> - if (!pool->size)
>> - ret = -ENOSPC;
>> - else {
>> spin_lock(&ubi->wl_lock);
>> - ret = pool->pebs[pool->used++];
>> - prot_queue_add(ubi, ubi->lookuptbl[ret]);
>> + }
>> +
>> + if (pool->used == pool->size) {
>
> Im confused about this "if" condition. You just tested pool->used == pool->size in the previous "if". If in the previous if pool->used != pool->size and wl_pool->used !=
> wl_pool->size, you didn't enter, the lock is still held so pool->used != pool->size still. If in the previos "if" wl_pool->used == wl_pool->size was true nd tou released the lock,
> ubi_update_fastmap(ubi) was called, which refills the pools. So again, if pools were refilled pool->used would be 0 here and pool->size > 0.
>
> So in both cases I don't see how at this point pool->used == pool->size could ever be true?
If we enter the "if (pool->used == pool->size || wl_pool->used == wl_pool->size) {" branch we unlock wl_lock and call ubi_update_fastmap().
Another thread can enter ubi_wl_get_peb() and alter the pool counter. So we have to recheck the counter after taking wl_lock again.
>> spin_unlock(&ubi->wl_lock);
>> + if (retried) {
>> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
>> + ret = -ENOSPC;
>> + goto out;
>> + }
>> + retried = 1;
>
> Why did you decide to retry in this function? and why only 1 retry attempt? I'm not against it, trying to understand the logic.
Because failing immediately with -ENOSPC is not nice. Before we do that I'll give UBI a second chance to produce a free PEB.
Thanks,
//richard
On 11/24/2014 3:20 PM, Richard Weinberger wrote:
> ubi_wl_get_peb() has two problems, it reads the pool
> size and usage counters without any protection.
> While reading one value would be perfectly fine it reads multiple
> values and compares them. This is racy and can lead to incorrect
> pool handling.
> Furthermore ubi_update_fastmap() is called without wl_lock held,
> before incrementing the used counter it needs to be checked again.
I didn't see where you fixed the ubi_update_fastmap issue you just
mentioned.
> It could happen that another thread consumed all PEBs from the
> pool and the counter goes beyond ->size.
>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/ubi.h | 3 ++-
> drivers/mtd/ubi/wl.c | 34 +++++++++++++++++++++++-----------
> 2 files changed, 25 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
> index 04c4c05..d672412 100644
> --- a/drivers/mtd/ubi/ubi.h
> +++ b/drivers/mtd/ubi/ubi.h
> @@ -439,7 +439,8 @@ struct ubi_debug_info {
> * @pq_head: protection queue head
> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
> - * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
> + * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
> + * and @fm_wl_pool fields
> * @move_mutex: serializes eraseblock moves
> * @work_sem: used to wait for all the scheduled works to finish and prevent
> * new works from being submitted
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index cb2e571..7730b97 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -629,24 +629,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
> */
> int ubi_wl_get_peb(struct ubi_device *ubi)
> {
> - int ret;
> + int ret, retried = 0;
> struct ubi_fm_pool *pool = &ubi->fm_pool;
> struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
>
> - if (!pool->size || !wl_pool->size || pool->used == pool->size ||
> - wl_pool->used == wl_pool->size)
> +again:
> + spin_lock(&ubi->wl_lock);
> + /* We check here also for the WL pool because at this point we can
> + * refill the WL pool synchronous. */
> + if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
> + spin_unlock(&ubi->wl_lock);
> ubi_update_fastmap(ubi);
> -
> - /* we got not a single free PEB */
> - if (!pool->size)
> - ret = -ENOSPC;
> - else {
> spin_lock(&ubi->wl_lock);
> - ret = pool->pebs[pool->used++];
> - prot_queue_add(ubi, ubi->lookuptbl[ret]);
> + }
> +
> + if (pool->used == pool->size) {
Im confused about this "if" condition. You just tested pool->used ==
pool->size in the previous "if". If in the previous if pool->used !=
pool->size and wl_pool->used != wl_pool->size, you didn't enter, the
lock is still held so pool->used != pool->size still. If in the previos
"if" wl_pool->used == wl_pool->size was true nd tou released the lock,
ubi_update_fastmap(ubi) was called, which refills the pools. So again,
if pools were refilled pool->used would be 0 here and pool->size > 0.
So in both cases I don't see how at this point pool->used == pool->size
could ever be true?
> spin_unlock(&ubi->wl_lock);
> + if (retried) {
> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
> + ret = -ENOSPC;
> + goto out;
> + }
> + retried = 1;
Why did you decide to retry in this function? and why only 1 retry
attempt? I'm not against it, trying to understand the logic.
> + goto again;
> }
>
> + ubi_assert(pool->used < pool->size);
> + ret = pool->pebs[pool->used++];
> + prot_queue_add(ubi, ubi->lookuptbl[ret]);
> + spin_unlock(&ubi->wl_lock);
> +out:
> return ret;
> }
>
> @@ -659,7 +671,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
> struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
> int pnum;
>
> - if (pool->used == pool->size || !pool->size) {
> + if (pool->used == pool->size) {
> /* We cannot update the fastmap here because this
> * function is called in atomic context.
> * Let's fail here and refill/update it as soon as possible. */
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
Forum, a Linux Foundation Collaborative Project
Hi Richard
On 12/5/2014 3:20 PM, Richard Weinberger wrote:
> Tanya,
>
> Am 05.12.2014 um 14:09 schrieb Tanya Brokhman:
>> On 11/24/2014 3:20 PM, Richard Weinberger wrote:
>>> ubi_wl_get_peb() has two problems, it reads the pool
>>> size and usage counters without any protection.
>>> While reading one value would be perfectly fine it reads multiple
>>> values and compares them. This is racy and can lead to incorrect
>>> pool handling.
>>> Furthermore ubi_update_fastmap() is called without wl_lock held,
>>> before incrementing the used counter it needs to be checked again.
>>
>> I didn't see where you fixed the ubi_update_fastmap issue you just mentioned.
>
> This is exactly what you're questioning below.
> We have to recheck as the pool counter could have changed.
>
Oh, I understood the commit msg a bit differently, but now I see that it
was my mistake. thanks!
>>> It could happen that another thread consumed all PEBs from the
>>> pool and the counter goes beyond ->size.
>>>
>>> Signed-off-by: Richard Weinberger <[email protected]>
>>> ---
>>> drivers/mtd/ubi/ubi.h | 3 ++-
>>> drivers/mtd/ubi/wl.c | 34 +++++++++++++++++++++++-----------
>>> 2 files changed, 25 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
>>> index 04c4c05..d672412 100644
>>> --- a/drivers/mtd/ubi/ubi.h
>>> +++ b/drivers/mtd/ubi/ubi.h
>>> @@ -439,7 +439,8 @@ struct ubi_debug_info {
>>> * @pq_head: protection queue head
>>> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
>>> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
>>> - * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
>>> + * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
>>> + * and @fm_wl_pool fields
>>> * @move_mutex: serializes eraseblock moves
>>> * @work_sem: used to wait for all the scheduled works to finish and prevent
>>> * new works from being submitted
>>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>>> index cb2e571..7730b97 100644
>>> --- a/drivers/mtd/ubi/wl.c
>>> +++ b/drivers/mtd/ubi/wl.c
>>> @@ -629,24 +629,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
>>> */
>>> int ubi_wl_get_peb(struct ubi_device *ubi)
>>> {
>>> - int ret;
>>> + int ret, retried = 0;
>>> struct ubi_fm_pool *pool = &ubi->fm_pool;
>>> struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
>>>
>>> - if (!pool->size || !wl_pool->size || pool->used == pool->size ||
>>> - wl_pool->used == wl_pool->size)
>>> +again:
>>> + spin_lock(&ubi->wl_lock);
>>> + /* We check here also for the WL pool because at this point we can
>>> + * refill the WL pool synchronous. */
>>> + if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
>>> + spin_unlock(&ubi->wl_lock);
>>> ubi_update_fastmap(ubi);
>>> -
>>> - /* we got not a single free PEB */
>>> - if (!pool->size)
>>> - ret = -ENOSPC;
>>> - else {
>>> spin_lock(&ubi->wl_lock);
>>> - ret = pool->pebs[pool->used++];
>>> - prot_queue_add(ubi, ubi->lookuptbl[ret]);
>>> + }
>>> +
>>> + if (pool->used == pool->size) {
>>
>> Im confused about this "if" condition. You just tested pool->used == pool->size in the previous "if". If in the previous if pool->used != pool->size and wl_pool->used !=
>> wl_pool->size, you didn't enter, the lock is still held so pool->used != pool->size still. If in the previos "if" wl_pool->used == wl_pool->size was true nd tou released the lock,
>> ubi_update_fastmap(ubi) was called, which refills the pools. So again, if pools were refilled pool->used would be 0 here and pool->size > 0.
>>
>> So in both cases I don't see how at this point pool->used == pool->size could ever be true?
>
> If we enter the "if (pool->used == pool->size || wl_pool->used == wl_pool->size) {" branch we unlock wl_lock and call ubi_update_fastmap().
> Another thread can enter ubi_wl_get_peb() and alter the pool counter. So we have to recheck the counter after taking wl_lock again.
hmmm... ok. Perhaps a comment could be added in the code to explain this
case in a few words?
>
>>> spin_unlock(&ubi->wl_lock);
>>> + if (retried) {
>>> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
>>> + ret = -ENOSPC;
>>> + goto out;
>>> + }
>>> + retried = 1;
>>
>> Why did you decide to retry in this function? and why only 1 retry attempt? I'm not against it, trying to understand the logic.
>
> Because failing immediately with -ENOSPC is not nice.
Why not? this is what was done before....
I think what I really bothers me in this case is that you don't sleep,
you branch immediately to retry again, so the chances that there will be
context switch and free pebs appear aren't that high.
I'm used to functions using some sort of "retry" logic to sleep before
retrying. Of course sleeping isn't a good idea here. That's why the
"retry" bugs me a bit.
Before we do that I'll give UBI a second chance to produce a free PEB.
>
> Thanks,
> //richard
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
On 11/24/2014 3:20 PM, Richard Weinberger wrote:
> Make it two functions, wl_get_wle() and wl_get_peb().
> wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle()
> does not call produce_free_peb().
> While refilling the fastmap user pool we cannot release ubi->wl_lock
> as produce_free_peb() does.
> Hence the fastmap logic uses now wl_get_wle().
hmmm... confused... I don't see fastmap code changed
>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 61 ++++++++++++++++++++++++++++++++--------------------
> 1 file changed, 38 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 7730b97..f028b68 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -499,13 +499,46 @@ out:
> #endif
>
> /**
> - * __wl_get_peb - get a physical eraseblock.
> + * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
> + * refill_wl_user_pool().
> + * @ubi: UBI device description object
> + *
> + * This function returns a a wear leveling entry in case of success and
If you upload a new version, you have a double "a" here: "returns a a
wear leveling"
> + * NULL in case of failure.
> + */
> +static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
> +{
> + struct ubi_wl_entry *e;
> +
> + e = find_mean_wl_entry(ubi, &ubi->free);
> + if (!e) {
> + ubi_err(ubi, "no free eraseblocks");
> + return NULL;
> + }
> +
> + self_check_in_wl_tree(ubi, e, &ubi->free);
> +
> + /*
> + * Move the physical eraseblock to the protection queue where it will
> + * be protected from being moved for some time.
> + */
I don't think this comment is valid anymore....
> + rb_erase(&e->u.rb, &ubi->free);
> + ubi->free_count--;
> + dbg_wl("PEB %d EC %d", e->pnum, e->ec);
> +
> + return e;
> +}
> +
> +/**
> + * wl_get_peb - get a physical eraseblock.
> * @ubi: UBI device description object
> *
> * This function returns a physical eraseblock in case of success and a
> * negative error code in case of failure.
> + * It is the low level component of ubi_wl_get_peb() in the non-fastmap
> + * case.
> */
> -static int __wl_get_peb(struct ubi_device *ubi)
> +static int wl_get_peb(struct ubi_device *ubi)
> {
> int err;
> struct ubi_wl_entry *e;
> @@ -524,27 +557,9 @@ retry:
> goto retry;
> }
>
> - e = find_mean_wl_entry(ubi, &ubi->free);
> - if (!e) {
> - ubi_err(ubi, "no free eraseblocks");
> - return -ENOSPC;
> - }
> -
> - self_check_in_wl_tree(ubi, e, &ubi->free);
> -
> - /*
> - * Move the physical eraseblock to the protection queue where it will
> - * be protected from being moved for some time.
> - */
> - rb_erase(&e->u.rb, &ubi->free);
> - ubi->free_count--;
> - dbg_wl("PEB %d EC %d", e->pnum, e->ec);
> -#ifndef CONFIG_MTD_UBI_FASTMAP
> - /* We have to enqueue e only if fastmap is disabled,
> - * is fastmap enabled prot_queue_add() will be called by
> - * ubi_wl_get_peb() after removing e from the pool. */
> + e = wl_get_wle(ubi);
> prot_queue_add(ubi, e);
> -#endif
> +
> return e->pnum;
> }
>
> @@ -704,7 +719,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
> int peb, err;
>
> spin_lock(&ubi->wl_lock);
> - peb = __wl_get_peb(ubi);
> + peb = wl_get_peb(ubi);
> spin_unlock(&ubi->wl_lock);
>
> if (peb < 0)
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
Hi Richard,
On 11/24/2014 3:20 PM, Richard Weinberger wrote:
> Currently ubi_refill_pools() first fills the first and then
> the second one.
> If only very few free PEBs are available the second pool can get
> zero PEBs.
> Change ubi_refill_pools() to distribute free PEBs fair between
> all pools.
>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 77 +++++++++++++++++++++++++++-------------------------
> 1 file changed, 40 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index f028b68..c2822f7 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -583,59 +583,62 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
> }
>
> /**
> - * refill_wl_pool - refills all the fastmap pool used by the
> - * WL sub-system.
> + * ubi_refill_pools - refills all fastmap PEB pools.
> * @ubi: UBI device description object
> */
> -static void refill_wl_pool(struct ubi_device *ubi)
> +void ubi_refill_pools(struct ubi_device *ubi)
> {
> + struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
> + struct ubi_fm_pool *pool = &ubi->fm_pool;
> struct ubi_wl_entry *e;
> - struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
> + int enough;
>
> + spin_lock(&ubi->wl_lock);
> +
> + return_unused_pool_pebs(ubi, wl_pool);
> return_unused_pool_pebs(ubi, pool);
>
> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
> - if (!ubi->free.rb_node ||
> - (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> - break;
> + wl_pool->size = 0;
> + pool->size = 0;
>
> - e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
> - self_check_in_wl_tree(ubi, e, &ubi->free);
> - rb_erase(&e->u.rb, &ubi->free);
> - ubi->free_count--;
> + for (;;) {
You loop for max(pool->max_size, wl_pool->max_size) itterations. IMO,
the code will be more clear if you use for(i=0; i<max(pool->max_size,
wl_pool->max_size); i++) instead of "int enough".
This is just coding style preference of course. I personally don't like
for(;;) that much.... Just a suggestion. :)
> + enough = 0;
> + if (pool->size < pool->max_size) {
> + if (!ubi->free.rb_node ||
> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> + break;
>
> - pool->pebs[pool->size] = e->pnum;
> - }
> - pool->used = 0;
> -}
> + e = wl_get_wle(ubi);
> + if (!e)
> + break;
>
> -/**
> - * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
> - * @ubi: UBI device description object
> - */
> -static void refill_wl_user_pool(struct ubi_device *ubi)
> -{
> - struct ubi_fm_pool *pool = &ubi->fm_pool;
> + pool->pebs[pool->size] = e->pnum;
> + pool->size++;
> + } else
> + enough++;
>
> - return_unused_pool_pebs(ubi, pool);
> + if (wl_pool->size < wl_pool->max_size) {
> + if (!ubi->free.rb_node ||
> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> + break;
>
> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
> - pool->pebs[pool->size] = __wl_get_peb(ubi);
> - if (pool->pebs[pool->size] < 0)
> + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
> + self_check_in_wl_tree(ubi, e, &ubi->free);
> + rb_erase(&e->u.rb, &ubi->free);
> + ubi->free_count--;
why don't you use wl_get_peb() here?
Other then that - I agree with the patch. So if you want to keep it as
is, I'll add Reviewed-by.
> +
> + wl_pool->pebs[wl_pool->size] = e->pnum;
> + wl_pool->size++;
> + } else
> + enough++;
> +
> + if (enough == 2)
> break;
> }
> +
> + wl_pool->used = 0;
> pool->used = 0;
> -}
>
> -/**
> - * ubi_refill_pools - refills all fastmap PEB pools.
> - * @ubi: UBI device description object
> - */
> -void ubi_refill_pools(struct ubi_device *ubi)
> -{
> - spin_lock(&ubi->wl_lock);
> - refill_wl_pool(ubi);
> - refill_wl_user_pool(ubi);
> spin_unlock(&ubi->wl_lock);
> }
>
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
Tanya,
Am 05.12.2014 um 18:55 schrieb Tanya Brokhman:
> Hi Richard,
>
> On 11/24/2014 3:20 PM, Richard Weinberger wrote:
>> Currently ubi_refill_pools() first fills the first and then
>> the second one.
>> If only very few free PEBs are available the second pool can get
>> zero PEBs.
>> Change ubi_refill_pools() to distribute free PEBs fair between
>> all pools.
>>
>> Signed-off-by: Richard Weinberger <[email protected]>
>> ---
>> drivers/mtd/ubi/wl.c | 77 +++++++++++++++++++++++++++-------------------------
>> 1 file changed, 40 insertions(+), 37 deletions(-)
>>
>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>> index f028b68..c2822f7 100644
>> --- a/drivers/mtd/ubi/wl.c
>> +++ b/drivers/mtd/ubi/wl.c
>> @@ -583,59 +583,62 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
>> }
>>
>> /**
>> - * refill_wl_pool - refills all the fastmap pool used by the
>> - * WL sub-system.
>> + * ubi_refill_pools - refills all fastmap PEB pools.
>> * @ubi: UBI device description object
>> */
>> -static void refill_wl_pool(struct ubi_device *ubi)
>> +void ubi_refill_pools(struct ubi_device *ubi)
>> {
>> + struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
>> + struct ubi_fm_pool *pool = &ubi->fm_pool;
>> struct ubi_wl_entry *e;
>> - struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
>> + int enough;
>>
>> + spin_lock(&ubi->wl_lock);
>> +
>> + return_unused_pool_pebs(ubi, wl_pool);
>> return_unused_pool_pebs(ubi, pool);
>>
>> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
>> - if (!ubi->free.rb_node ||
>> - (ubi->free_count - ubi->beb_rsvd_pebs < 5))
>> - break;
>> + wl_pool->size = 0;
>> + pool->size = 0;
>>
>> - e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
>> - self_check_in_wl_tree(ubi, e, &ubi->free);
>> - rb_erase(&e->u.rb, &ubi->free);
>> - ubi->free_count--;
>> + for (;;) {
>
> You loop for max(pool->max_size, wl_pool->max_size) itterations. IMO, the code will be more clear if you use for(i=0; i<max(pool->max_size, wl_pool->max_size); i++) instead of "int
> enough".
> This is just coding style preference of course. I personally don't like for(;;) that much.... Just a suggestion. :)
I agree that it's a matter of taste. :)
>> + enough = 0;
>> + if (pool->size < pool->max_size) {
>> + if (!ubi->free.rb_node ||
>> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
>> + break;
>>
>> - pool->pebs[pool->size] = e->pnum;
>> - }
>> - pool->used = 0;
>> -}
>> + e = wl_get_wle(ubi);
>> + if (!e)
>> + break;
>>
>> -/**
>> - * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
>> - * @ubi: UBI device description object
>> - */
>> -static void refill_wl_user_pool(struct ubi_device *ubi)
>> -{
>> - struct ubi_fm_pool *pool = &ubi->fm_pool;
>> + pool->pebs[pool->size] = e->pnum;
>> + pool->size++;
>> + } else
>> + enough++;
>>
>> - return_unused_pool_pebs(ubi, pool);
>> + if (wl_pool->size < wl_pool->max_size) {
>> + if (!ubi->free.rb_node ||
>> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
>> + break;
>>
>> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
>> - pool->pebs[pool->size] = __wl_get_peb(ubi);
>> - if (pool->pebs[pool->size] < 0)
>> + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
>> + self_check_in_wl_tree(ubi, e, &ubi->free);
>> + rb_erase(&e->u.rb, &ubi->free);
>> + ubi->free_count--;
>
> why don't you use wl_get_peb() here?
Because wl_get_peb() is not equivalent to the above code.
We want a PEB to be used for wear-leveling not for "end users" like UBIFS.
Thanks,
//richard
Tanya,
Am 05.12.2014 um 18:41 schrieb Tanya Brokhman:
> On 11/24/2014 3:20 PM, Richard Weinberger wrote:
>> Make it two functions, wl_get_wle() and wl_get_peb().
>> wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle()
>> does not call produce_free_peb().
>> While refilling the fastmap user pool we cannot release ubi->wl_lock
>> as produce_free_peb() does.
>> Hence the fastmap logic uses now wl_get_wle().
>
> hmmm... confused... I don't see fastmap code changed
It will be used in ubi_refill_pools() later.
Patch 6/6 does this.
I'll fix the commit message.
>>
>> Signed-off-by: Richard Weinberger <[email protected]>
>> ---
>> drivers/mtd/ubi/wl.c | 61 ++++++++++++++++++++++++++++++++--------------------
>> 1 file changed, 38 insertions(+), 23 deletions(-)
>>
>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>> index 7730b97..f028b68 100644
>> --- a/drivers/mtd/ubi/wl.c
>> +++ b/drivers/mtd/ubi/wl.c
>> @@ -499,13 +499,46 @@ out:
>> #endif
>>
>> /**
>> - * __wl_get_peb - get a physical eraseblock.
>> + * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
>> + * refill_wl_user_pool().
>> + * @ubi: UBI device description object
>> + *
>> + * This function returns a a wear leveling entry in case of success and
>
> If you upload a new version, you have a double "a" here: "returns a a wear leveling"
Thx!
>> + * NULL in case of failure.
>> + */
>> +static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
>> +{
>> + struct ubi_wl_entry *e;
>> +
>> + e = find_mean_wl_entry(ubi, &ubi->free);
>> + if (!e) {
>> + ubi_err(ubi, "no free eraseblocks");
>> + return NULL;
>> + }
>> +
>> + self_check_in_wl_tree(ubi, e, &ubi->free);
>> +
>> + /*
>> + * Move the physical eraseblock to the protection queue where it will
>> + * be protected from being moved for some time.
>> + */
>
> I don't think this comment is valid anymore....
Correct, will update!
Thanks,
//richard
Tanya,
Am 05.12.2014 um 17:54 schrieb Tanya Brokhman:
> Hi Richard
>
> On 12/5/2014 3:20 PM, Richard Weinberger wrote:
>> Tanya,
>>
>> Am 05.12.2014 um 14:09 schrieb Tanya Brokhman:
>>> On 11/24/2014 3:20 PM, Richard Weinberger wrote:
>>>> ubi_wl_get_peb() has two problems, it reads the pool
>>>> size and usage counters without any protection.
>>>> While reading one value would be perfectly fine it reads multiple
>>>> values and compares them. This is racy and can lead to incorrect
>>>> pool handling.
>>>> Furthermore ubi_update_fastmap() is called without wl_lock held,
>>>> before incrementing the used counter it needs to be checked again.
>>>
>>> I didn't see where you fixed the ubi_update_fastmap issue you just mentioned.
>>
>> This is exactly what you're questioning below.
>> We have to recheck as the pool counter could have changed.
>>
>
> Oh, I understood the commit msg a bit differently, but now I see that it was my mistake. thanks!
>
>>>> It could happen that another thread consumed all PEBs from the
>>>> pool and the counter goes beyond ->size.
>>>>
>>>> Signed-off-by: Richard Weinberger <[email protected]>
>>>> ---
>>>> drivers/mtd/ubi/ubi.h | 3 ++-
>>>> drivers/mtd/ubi/wl.c | 34 +++++++++++++++++++++++-----------
>>>> 2 files changed, 25 insertions(+), 12 deletions(-)
>>>>
>>>> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
>>>> index 04c4c05..d672412 100644
>>>> --- a/drivers/mtd/ubi/ubi.h
>>>> +++ b/drivers/mtd/ubi/ubi.h
>>>> @@ -439,7 +439,8 @@ struct ubi_debug_info {
>>>> * @pq_head: protection queue head
>>>> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
>>>> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
>>>> - * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
>>>> + * @erroneous, @erroneous_peb_count, @fm_work_scheduled, @fm_pool,
>>>> + * and @fm_wl_pool fields
>>>> * @move_mutex: serializes eraseblock moves
>>>> * @work_sem: used to wait for all the scheduled works to finish and prevent
>>>> * new works from being submitted
>>>> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
>>>> index cb2e571..7730b97 100644
>>>> --- a/drivers/mtd/ubi/wl.c
>>>> +++ b/drivers/mtd/ubi/wl.c
>>>> @@ -629,24 +629,36 @@ void ubi_refill_pools(struct ubi_device *ubi)
>>>> */
>>>> int ubi_wl_get_peb(struct ubi_device *ubi)
>>>> {
>>>> - int ret;
>>>> + int ret, retried = 0;
>>>> struct ubi_fm_pool *pool = &ubi->fm_pool;
>>>> struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
>>>>
>>>> - if (!pool->size || !wl_pool->size || pool->used == pool->size ||
>>>> - wl_pool->used == wl_pool->size)
>>>> +again:
>>>> + spin_lock(&ubi->wl_lock);
>>>> + /* We check here also for the WL pool because at this point we can
>>>> + * refill the WL pool synchronous. */
>>>> + if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
>>>> + spin_unlock(&ubi->wl_lock);
>>>> ubi_update_fastmap(ubi);
>>>> -
>>>> - /* we got not a single free PEB */
>>>> - if (!pool->size)
>>>> - ret = -ENOSPC;
>>>> - else {
>>>> spin_lock(&ubi->wl_lock);
>>>> - ret = pool->pebs[pool->used++];
>>>> - prot_queue_add(ubi, ubi->lookuptbl[ret]);
>>>> + }
>>>> +
>>>> + if (pool->used == pool->size) {
>>>
>>> Im confused about this "if" condition. You just tested pool->used == pool->size in the previous "if". If in the previous if pool->used != pool->size and wl_pool->used !=
>>> wl_pool->size, you didn't enter, the lock is still held so pool->used != pool->size still. If in the previos "if" wl_pool->used == wl_pool->size was true nd tou released the lock,
>>> ubi_update_fastmap(ubi) was called, which refills the pools. So again, if pools were refilled pool->used would be 0 here and pool->size > 0.
>>>
>>> So in both cases I don't see how at this point pool->used == pool->size could ever be true?
>>
>> If we enter the "if (pool->used == pool->size || wl_pool->used == wl_pool->size) {" branch we unlock wl_lock and call ubi_update_fastmap().
>> Another thread can enter ubi_wl_get_peb() and alter the pool counter. So we have to recheck the counter after taking wl_lock again.
>
> hmmm... ok. Perhaps a comment could be added in the code to explain this case in a few words?
>
Makes sense, I'll add a comment.
>>>> spin_unlock(&ubi->wl_lock);
>>>> + if (retried) {
>>>> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
>>>> + ret = -ENOSPC;
>>>> + goto out;
>>>> + }
>>>> + retried = 1;
>>>
>>> Why did you decide to retry in this function? and why only 1 retry attempt? I'm not against it, trying to understand the logic.
>>
>> Because failing immediately with -ENOSPC is not nice.
>
> Why not? this is what was done before....
The behavior from before was not good.
If we return here a -ENOSPC it is not because we ran out of free PEBs, it is because the pool contains
no free PEBs and needs refilling.
As between refilling the pool and requesting a fresh PEB from it another thread could "steal" all PEBs
we retry.
> I think what I really bothers me in this case is that you don't sleep, you branch immediately to retry again, so the chances that there will be context switch and free pebs appear
> aren't that high.
> I'm used to functions using some sort of "retry" logic to sleep before retrying. Of course sleeping isn't a good idea here. That's why the "retry" bugs me a bit.
You mean a cond_resched()?
This retry-logic is common pattern in UBI. For exmaple see ubi_wl_put_peb().
Thanks,
//richard
On 12/5/2014 11:08 PM, Richard Weinberger wrote:
>
>>>>> spin_unlock(&ubi->wl_lock);
>>>>> + if (retried) {
>>>>> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
>>>>> + ret = -ENOSPC;
>>>>> + goto out;
>>>>> + }
>>>>> + retried = 1;
>>>>
>>>> Why did you decide to retry in this function? and why only 1 retry attempt? I'm not against it, trying to understand the logic.
>>>
>>> Because failing immediately with -ENOSPC is not nice.
>>
>> Why not? this is what was done before....
>
> The behavior from before was not good.
> If we return here a -ENOSPC it is not because we ran out of free PEBs, it is because the pool contains
> no free PEBs and needs refilling.
> As between refilling the pool and requesting a fresh PEB from it another thread could "steal" all PEBs
> we retry.
>
>> I think what I really bothers me in this case is that you don't sleep, you branch immediately to retry again, so the chances that there will be context switch and free pebs appear
>> aren't that high.
>> I'm used to functions using some sort of "retry" logic to sleep before retrying. Of course sleeping isn't a good idea here. That's why the "retry" bugs me a bit.
>
> You mean a cond_resched()?
> This retry-logic is common pattern in UBI. For exmaple see ubi_wl_put_peb().
you're right. didn't pay much attention to ubi_wl_put_peb() before.
don't like it there either :)
perhaps we can rethink this later for both cases.
>
> Thanks,
> //richard
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora
Forum, a Linux Foundation Collaborative Project
Hi Richard
On 12/5/2014 10:56 PM, Richard Weinberger wrote:
>>> -/**
>>> - * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
>>> - * @ubi: UBI device description object
>>> - */
>>> -static void refill_wl_user_pool(struct ubi_device *ubi)
>>> -{
>>> - struct ubi_fm_pool *pool = &ubi->fm_pool;
>>> + pool->pebs[pool->size] = e->pnum;
>>> + pool->size++;
>>> + } else
>>> + enough++;
>>>
>>> - return_unused_pool_pebs(ubi, pool);
>>> + if (wl_pool->size < wl_pool->max_size) {
>>> + if (!ubi->free.rb_node ||
>>> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
>>> + break;
>>>
>>> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
>>> - pool->pebs[pool->size] = __wl_get_peb(ubi);
>>> - if (pool->pebs[pool->size] < 0)
>>> + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
>>> + self_check_in_wl_tree(ubi, e, &ubi->free);
>>> + rb_erase(&e->u.rb, &ubi->free);
>>> + ubi->free_count--;
>>
>> why don't you use wl_get_peb() here?
>
> Because wl_get_peb() is not equivalent to the above code.
> We want a PEB to be used for wear-leveling not for "end users" like UBIFS.
sorry, my mistake. I meant wl_get_wle() (the new function). the only
diff between wl_get_wle() and the above is that you use find_wl_entry()
and wl_get_wle() uses find_mean_wl_entry() and takes the anchor into
consideration. So I;m trying to understand why wl_get_wle() isn't good here?
>
> Thanks,
> //richard
>
Thanks,
Tanya Brokhman
--
Qualcomm Israel, on behalf of Qualcomm Innovation Center, Inc.
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project
Am 07.12.2014 um 08:36 schrieb Tanya Brokhman:
> On 12/5/2014 11:08 PM, Richard Weinberger wrote:
>
>>
>>>>>> spin_unlock(&ubi->wl_lock);
>>>>>> + if (retried) {
>>>>>> + ubi_err(ubi, "Unable to get a free PEB from user WL pool");
>>>>>> + ret = -ENOSPC;
>>>>>> + goto out;
>>>>>> + }
>>>>>> + retried = 1;
>>>>>
>>>>> Why did you decide to retry in this function? and why only 1 retry attempt? I'm not against it, trying to understand the logic.
>>>>
>>>> Because failing immediately with -ENOSPC is not nice.
>>>
>>> Why not? this is what was done before....
>>
>> The behavior from before was not good.
>> If we return here a -ENOSPC it is not because we ran out of free PEBs, it is because the pool contains
>> no free PEBs and needs refilling.
>> As between refilling the pool and requesting a fresh PEB from it another thread could "steal" all PEBs
>> we retry.
>>
>>> I think what I really bothers me in this case is that you don't sleep, you branch immediately to retry again, so the chances that there will be context switch and free pebs appear
>>> aren't that high.
>>> I'm used to functions using some sort of "retry" logic to sleep before retrying. Of course sleeping isn't a good idea here. That's why the "retry" bugs me a bit.
>>
>> You mean a cond_resched()?
>> This retry-logic is common pattern in UBI. For exmaple see ubi_wl_put_peb().
>
> you're right. didn't pay much attention to ubi_wl_put_peb() before. don't like it there either :)
> perhaps we can rethink this later for both cases.
If there is room for improvement I'm all open for an extra patch set all over UBI. :-)
Thanks,
//richard
Am 07.12.2014 um 08:55 schrieb Tanya Brokhman:
> Hi Richard
>
> On 12/5/2014 10:56 PM, Richard Weinberger wrote:
>>>> -/**
>>>> - * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
>>>> - * @ubi: UBI device description object
>>>> - */
>>>> -static void refill_wl_user_pool(struct ubi_device *ubi)
>>>> -{
>>>> - struct ubi_fm_pool *pool = &ubi->fm_pool;
>>>> + pool->pebs[pool->size] = e->pnum;
>>>> + pool->size++;
>>>> + } else
>>>> + enough++;
>>>>
>>>> - return_unused_pool_pebs(ubi, pool);
>>>> + if (wl_pool->size < wl_pool->max_size) {
>>>> + if (!ubi->free.rb_node ||
>>>> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
>>>> + break;
>>>>
>>>> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
>>>> - pool->pebs[pool->size] = __wl_get_peb(ubi);
>>>> - if (pool->pebs[pool->size] < 0)
>>>> + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
>>>> + self_check_in_wl_tree(ubi, e, &ubi->free);
>>>> + rb_erase(&e->u.rb, &ubi->free);
>>>> + ubi->free_count--;
>>>
>>> why don't you use wl_get_peb() here?
>>
>> Because wl_get_peb() is not equivalent to the above code.
>> We want a PEB to be used for wear-leveling not for "end users" like UBIFS.
>
> sorry, my mistake. I meant wl_get_wle() (the new function). the only diff between wl_get_wle() and the above is that you use find_wl_entry() and wl_get_wle() uses
> find_mean_wl_entry() and takes the anchor into consideration. So I;m trying to understand why wl_get_wle() isn't good here?
wl_get_wle() uses find_mean_wl_entry() which returns a PEB to be used for "end users".
Please see the 3rd parameter to find_wl_entry().
For "end users" a medium worn out PEB is good enough.
Thanks,
//richard
On Mon, Nov 24, 2014 at 2:20 PM, Richard Weinberger <[email protected]> wrote:
> Artem,
>
> as requested I'm resending my fastmap work in smaller pieces.
> This is pile 1 of 7.
> Rebasing my patches to ubifs.git was a massive PITA because the
> logging style changes touched a lot of code and almost every patch
> failed to apply and needed inspection by hand.
> The first patches are bug fixes, the latter introduce cleanups
> and new features.
> After all bugfixes are mainline I'll make sure that all needed
> fixes go into -stable.
Artem,
how shall we proceed?
Currently around 45 of my patches are in flight.
Some got comments, some not. If it helps I can address these comments
immediately
and resend the individual patches. The vast majority of all comments
are trivial to fix (typos, style, etc..)
Initially I hoped that all patches will get comments such that I can
do a v3 which can be merged
in 3.19-rc1. :-\
In my local queue even more patches wait to be submitted.
--
Thanks,
//richard
Hi Richard,
On Mon, Nov 24, 2014 at 02:20:32PM +0100, Richard Weinberger wrote:
> If the WL pool runs out of PEBs we schedule a fastmap write
> to refill it as soon as possible.
> Ensure that only one at a time is scheduled otherwise we might end in
> a fastmap write storm because writing the fastmap can schedule another
> write if bitflips are detected.
Reviewed-by: Guido Mart?nez <[email protected]>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/ubi.h | 4 +++-
> drivers/mtd/ubi/wl.c | 8 +++++++-
> 2 files changed, 10 insertions(+), 2 deletions(-)
>
> diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
> index f80ffab..04c4c05 100644
> --- a/drivers/mtd/ubi/ubi.h
> +++ b/drivers/mtd/ubi/ubi.h
> @@ -427,6 +427,7 @@ struct ubi_debug_info {
> * @fm_size: fastmap size in bytes
> * @fm_sem: allows ubi_update_fastmap() to block EBA table changes
> * @fm_work: fastmap work queue
> + * @fm_work_scheduled: non-zero if fastmap work was scheduled
> *
> * @used: RB-tree of used physical eraseblocks
> * @erroneous: RB-tree of erroneous used physical eraseblocks
> @@ -438,7 +439,7 @@ struct ubi_debug_info {
> * @pq_head: protection queue head
> * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
> * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
> - * @erroneous, and @erroneous_peb_count fields
> + * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
> * @move_mutex: serializes eraseblock moves
> * @work_sem: used to wait for all the scheduled works to finish and prevent
> * new works from being submitted
> @@ -533,6 +534,7 @@ struct ubi_device {
> void *fm_buf;
> size_t fm_size;
> struct work_struct fm_work;
> + int fm_work_scheduled;
>
> /* Wear-leveling sub-system's stuff */
> struct rb_root used;
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 834f6fe..7f135df 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
> {
> struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
> ubi_update_fastmap(ubi);
> + spin_lock(&ubi->wl_lock);
> + ubi->fm_work_scheduled = 0;
> + spin_unlock(&ubi->wl_lock);
> }
>
> /**
> @@ -660,7 +663,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
> /* We cannot update the fastmap here because this
> * function is called in atomic context.
> * Let's fail here and refill/update it as soon as possible. */
> - schedule_work(&ubi->fm_work);
> + if (!ubi->fm_work_scheduled) {
> + ubi->fm_work_scheduled = 1;
> + schedule_work(&ubi->fm_work);
> + }
> return NULL;
> } else {
> pnum = pool->pebs[pool->used++];
> --
> 1.8.4.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Guido Mart?nez, VanguardiaSur
http://www.vanguardiasur.com.ar
On Mon, Nov 24, 2014 at 02:20:33PM +0100, Richard Weinberger wrote:
> ...otherwise the deferred work might run after datastructures
> got freed and corrupt memory.
>
> Signed-off-by: Richard Weinberger <[email protected]>
After moving it to shutdown_work():
Reviewed-by: Guido Mart?nez <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 3 +++
> 1 file changed, 3 insertions(+)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 7f135df..cb2e571 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -2041,6 +2041,9 @@ static void protection_queue_destroy(struct ubi_device *ubi)
> void ubi_wl_close(struct ubi_device *ubi)
> {
> dbg_wl("close the WL sub-system");
> +#ifdef CONFIG_MTD_UBI_FASTMAP
> + flush_work(&ubi->fm_work);
> +#endif
> shutdown_work(ubi);
> protection_queue_destroy(ubi);
> tree_destroy(&ubi->used);
> --
> 1.8.4.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Guido Mart?nez, VanguardiaSur
http://www.vanguardiasur.com.ar
Hi Richard,
On Mon, Nov 24, 2014 at 02:20:35PM +0100, Richard Weinberger wrote:
> Make it two functions, wl_get_wle() and wl_get_peb().
> wl_get_peb() works exactly like __wl_get_peb() but wl_get_wle()
> does not call produce_free_peb().
> While refilling the fastmap user pool we cannot release ubi->wl_lock
> as produce_free_peb() does.
> Hence the fastmap logic uses now wl_get_wle().
>
> Signed-off-by: Richard Weinberger <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 61 ++++++++++++++++++++++++++++++++--------------------
> 1 file changed, 38 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index 7730b97..f028b68 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -499,13 +499,46 @@ out:
> #endif
>
> /**
> - * __wl_get_peb - get a physical eraseblock.
> + * wl_get_wle - get a mean wl entry to be used by wl_get_peb() or
> + * refill_wl_user_pool().
> + * @ubi: UBI device description object
> + *
> + * This function returns a a wear leveling entry in case of success and
> + * NULL in case of failure.
> + */
> +static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
> +{
> + struct ubi_wl_entry *e;
> +
> + e = find_mean_wl_entry(ubi, &ubi->free);
> + if (!e) {
> + ubi_err(ubi, "no free eraseblocks");
> + return NULL;
> + }
> +
> + self_check_in_wl_tree(ubi, e, &ubi->free);
> +
> + /*
> + * Move the physical eraseblock to the protection queue where it will
> + * be protected from being moved for some time.
> + */
> + rb_erase(&e->u.rb, &ubi->free);
> + ubi->free_count--;
> + dbg_wl("PEB %d EC %d", e->pnum, e->ec);
> +
> + return e;
> +}
> +
> +/**
> + * wl_get_peb - get a physical eraseblock.
> * @ubi: UBI device description object
> *
> * This function returns a physical eraseblock in case of success and a
> * negative error code in case of failure.
> + * It is the low level component of ubi_wl_get_peb() in the non-fastmap
> + * case.
> */
> -static int __wl_get_peb(struct ubi_device *ubi)
> +static int wl_get_peb(struct ubi_device *ubi)
This bit breaks the build since (at this point) the usage of
__wl_get_peb on refill_wl_user_pool is not changed. There's also a
comment mentioning __wl_get_peb, which should be updated.
I can see both changes in the next patch, but it's way better if we
don't break the build :).
Besides from these two and the comments by Tanya, the patch looks good.
> {
> int err;
> struct ubi_wl_entry *e;
> @@ -524,27 +557,9 @@ retry:
> goto retry;
> }
>
> - e = find_mean_wl_entry(ubi, &ubi->free);
> - if (!e) {
> - ubi_err(ubi, "no free eraseblocks");
> - return -ENOSPC;
> - }
> -
> - self_check_in_wl_tree(ubi, e, &ubi->free);
> -
> - /*
> - * Move the physical eraseblock to the protection queue where it will
> - * be protected from being moved for some time.
> - */
> - rb_erase(&e->u.rb, &ubi->free);
> - ubi->free_count--;
> - dbg_wl("PEB %d EC %d", e->pnum, e->ec);
> -#ifndef CONFIG_MTD_UBI_FASTMAP
> - /* We have to enqueue e only if fastmap is disabled,
> - * is fastmap enabled prot_queue_add() will be called by
> - * ubi_wl_get_peb() after removing e from the pool. */
> + e = wl_get_wle(ubi);
> prot_queue_add(ubi, e);
> -#endif
> +
> return e->pnum;
> }
>
> @@ -704,7 +719,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
> int peb, err;
>
> spin_lock(&ubi->wl_lock);
> - peb = __wl_get_peb(ubi);
> + peb = wl_get_peb(ubi);
> spin_unlock(&ubi->wl_lock);
>
> if (peb < 0)
--
Guido Mart?nez, VanguardiaSur
http://www.vanguardiasur.com.ar
Hi Richard,
On Mon, Nov 24, 2014 at 02:20:36PM +0100, Richard Weinberger wrote:
> Currently ubi_refill_pools() first fills the first and then
> the second one.
> If only very few free PEBs are available the second pool can get
> zero PEBs.
> Change ubi_refill_pools() to distribute free PEBs fair between
> all pools.
>
> Signed-off-by: Richard Weinberger <[email protected]>
Reviewed-by: Guido Mart?nez <[email protected]>
> ---
> drivers/mtd/ubi/wl.c | 77 +++++++++++++++++++++++++++-------------------------
> 1 file changed, 40 insertions(+), 37 deletions(-)
>
> diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
> index f028b68..c2822f7 100644
> --- a/drivers/mtd/ubi/wl.c
> +++ b/drivers/mtd/ubi/wl.c
> @@ -583,59 +583,62 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
> }
>
> /**
> - * refill_wl_pool - refills all the fastmap pool used by the
> - * WL sub-system.
> + * ubi_refill_pools - refills all fastmap PEB pools.
> * @ubi: UBI device description object
> */
> -static void refill_wl_pool(struct ubi_device *ubi)
> +void ubi_refill_pools(struct ubi_device *ubi)
> {
> + struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
> + struct ubi_fm_pool *pool = &ubi->fm_pool;
> struct ubi_wl_entry *e;
> - struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
> + int enough;
>
> + spin_lock(&ubi->wl_lock);
> +
> + return_unused_pool_pebs(ubi, wl_pool);
> return_unused_pool_pebs(ubi, pool);
>
> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
> - if (!ubi->free.rb_node ||
> - (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> - break;
> + wl_pool->size = 0;
> + pool->size = 0;
>
> - e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
> - self_check_in_wl_tree(ubi, e, &ubi->free);
> - rb_erase(&e->u.rb, &ubi->free);
> - ubi->free_count--;
> + for (;;) {
> + enough = 0;
> + if (pool->size < pool->max_size) {
> + if (!ubi->free.rb_node ||
> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> + break;
>
> - pool->pebs[pool->size] = e->pnum;
> - }
> - pool->used = 0;
> -}
> + e = wl_get_wle(ubi);
> + if (!e)
> + break;
>
> -/**
> - * refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
> - * @ubi: UBI device description object
> - */
> -static void refill_wl_user_pool(struct ubi_device *ubi)
> -{
> - struct ubi_fm_pool *pool = &ubi->fm_pool;
> + pool->pebs[pool->size] = e->pnum;
> + pool->size++;
> + } else
> + enough++;
>
> - return_unused_pool_pebs(ubi, pool);
> + if (wl_pool->size < wl_pool->max_size) {
> + if (!ubi->free.rb_node ||
> + (ubi->free_count - ubi->beb_rsvd_pebs < 5))
> + break;
>
> - for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
> - pool->pebs[pool->size] = __wl_get_peb(ubi);
> - if (pool->pebs[pool->size] < 0)
> + e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
> + self_check_in_wl_tree(ubi, e, &ubi->free);
> + rb_erase(&e->u.rb, &ubi->free);
> + ubi->free_count--;
> +
> + wl_pool->pebs[wl_pool->size] = e->pnum;
> + wl_pool->size++;
> + } else
> + enough++;
> +
> + if (enough == 2)
> break;
> }
> +
> + wl_pool->used = 0;
> pool->used = 0;
> -}
>
> -/**
> - * ubi_refill_pools - refills all fastmap PEB pools.
> - * @ubi: UBI device description object
> - */
> -void ubi_refill_pools(struct ubi_device *ubi)
> -{
> - spin_lock(&ubi->wl_lock);
> - refill_wl_pool(ubi);
> - refill_wl_user_pool(ubi);
> spin_unlock(&ubi->wl_lock);
> }
>
> --
> 1.8.4.5
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to [email protected]
> More majordomo info at http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at http://www.tux.org/lkml/
--
Guido Mart?nez, VanguardiaSur
http://www.vanguardiasur.com.ar