We delete a block from the gc list before reclaim it, so
put it back to the list on its reclaim fail, otherwize
this block will not get reclaimed and be programable
in the future.
Signed-off-by: Wenwei Tao <[email protected]>
---
drivers/lightnvm/rrpc.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d40c140..67b14d4 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -379,16 +379,26 @@ static void rrpc_block_gc(struct work_struct *work)
struct rrpc *rrpc = gcb->rrpc;
struct rrpc_block *rblk = gcb->rblk;
struct nvm_dev *dev = rrpc->dev;
+ struct nvm_lun *lun = rblk->parent->lun;
+ struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+ mempool_free(gcb, rrpc->gcb_pool);
pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
if (rrpc_move_valid_pages(rrpc, rblk))
- goto done;
+ goto put_back;
+
+ if (nvm_erase_blk(dev, rblk->parent))
+ goto put_back;
- nvm_erase_blk(dev, rblk->parent);
rrpc_put_blk(rrpc, rblk);
-done:
- mempool_free(gcb, rrpc->gcb_pool);
+
+ return;
+
+put_back:
+ spin_lock(&rlun->lock);
+ list_add_tail(&rblk->prio, &rlun->prio_list);
+ spin_unlock(&rlun->lock);
}
/* the block with highest number of invalid pages, will be in the beginning
--
1.8.3.1
On 12/29/2015 09:27 AM, Wenwei Tao wrote:
> We delete a block from the gc list before reclaim it, so
> put it back to the list on its reclaim fail, otherwize
> this block will not get reclaimed and be programable
> in the future.
>
> Signed-off-by: Wenwei Tao <[email protected]>
> ---
> drivers/lightnvm/rrpc.c | 18 ++++++++++++++----
> 1 file changed, 14 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index d40c140..67b14d4 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -379,16 +379,26 @@ static void rrpc_block_gc(struct work_struct *work)
> struct rrpc *rrpc = gcb->rrpc;
> struct rrpc_block *rblk = gcb->rblk;
> struct nvm_dev *dev = rrpc->dev;
> + struct nvm_lun *lun = rblk->parent->lun;
> + struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
>
> + mempool_free(gcb, rrpc->gcb_pool);
> pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
>
> if (rrpc_move_valid_pages(rrpc, rblk))
> - goto done;
> + goto put_back;
> +
> + if (nvm_erase_blk(dev, rblk->parent))
> + goto put_back;
>
> - nvm_erase_blk(dev, rblk->parent);
> rrpc_put_blk(rrpc, rblk);
> -done:
> - mempool_free(gcb, rrpc->gcb_pool);
> +
> + return;
> +
> +put_back:
> + spin_lock(&rlun->lock);
> + list_add_tail(&rblk->prio, &rlun->prio_list);
> + spin_unlock(&rlun->lock);
> }
>
> /* the block with highest number of invalid pages, will be in the beginning
>
Thanks Tao. I've applied it for 4.5.