Cleanups for Device-Mapper based on feedback from kcopyd, dm-io, and
dm-mirror. These are based on 2.6.7-rc3-mm2.
--
Kevin Corry
[email protected]
http://evms.sourceforge.net/
Revision 1:
Create/destroy kcopyd on demand.
Revision 2:
Use structure assignments instead of memcpy's.
Revision 3:
dm-io: Proper error handling when trying to read from multiple regions.
Revision 4:
dm-raid1.c: Make struct region::delayed_bios a bio_list instead of a bio*.
Revision 5:
dm-raid1.c: In rh_exit(), use list_for_each_entry_safe
Create/destroy kcopyd on demand.
This changes kcopyd to initialize its mempool and workqueue only when a
client specifically needs to use it.
Signed-off-by: Kevin Corry <[email protected]>
--- diff/drivers/md/dm.c 2004-06-14 11:55:59.695988952 +0000
+++ source/drivers/md/dm.c 2004-06-14 11:56:22.879464528 +0000
@@ -153,7 +153,6 @@
xx(dm_target)
xx(dm_linear)
xx(dm_stripe)
- xx(kcopyd)
xx(dm_interface)
#undef xx
};
--- diff/drivers/md/dm.h 2004-06-14 11:55:59.696988800 +0000
+++ source/drivers/md/dm.h 2004-06-14 11:56:22.880464376 +0000
@@ -178,9 +178,6 @@
int dm_stripe_init(void);
void dm_stripe_exit(void);
-int kcopyd_init(void);
-void kcopyd_exit(void);
-
void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
#endif
--- diff/drivers/md/kcopyd.c 2004-06-14 11:55:59.697988648 +0000
+++ source/drivers/md/kcopyd.c 2004-06-14 11:56:30.599290936 +0000
@@ -220,7 +220,7 @@
static LIST_HEAD(_io_jobs);
static LIST_HEAD(_pages_jobs);
-static int __init jobs_init(void)
+static int jobs_init(void)
{
_job_cache = kmem_cache_create("kcopyd-jobs",
sizeof(struct kcopyd_job),
@@ -247,6 +247,8 @@
mempool_destroy(_job_pool);
kmem_cache_destroy(_job_cache);
+ _job_pool = NULL;
+ _job_cache = NULL;
}
/*
@@ -589,14 +591,67 @@
up(&_client_lock);
}
+static DECLARE_MUTEX(kcopyd_init_lock);
+static int kcopyd_clients = 0;
+
+static int kcopyd_init(void)
+{
+ int r;
+
+ down(&kcopyd_init_lock);
+
+ if (kcopyd_clients) {
+ /* Already initialized. */
+ kcopyd_clients++;
+ up(&kcopyd_init_lock);
+ return 0;
+ }
+
+ r = jobs_init();
+ if (r) {
+ up(&kcopyd_init_lock);
+ return r;
+ }
+
+ _kcopyd_wq = create_singlethread_workqueue("kcopyd");
+ if (!_kcopyd_wq) {
+ jobs_exit();
+ up(&kcopyd_init_lock);
+ return -ENOMEM;
+ }
+
+ kcopyd_clients++;
+ INIT_WORK(&_kcopyd_work, do_work, NULL);
+ up(&kcopyd_init_lock);
+ return 0;
+}
+
+static void kcopyd_exit(void)
+{
+ down(&kcopyd_init_lock);
+ kcopyd_clients--;
+ if (!kcopyd_clients) {
+ jobs_exit();
+ destroy_workqueue(_kcopyd_wq);
+ _kcopyd_wq = NULL;
+ }
+ up(&kcopyd_init_lock);
+}
+
int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
{
int r = 0;
struct kcopyd_client *kc;
+ r = kcopyd_init();
+ if (r)
+ return r;
+
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
- if (!kc)
+ if (!kc) {
+ kcopyd_exit();
return -ENOMEM;
+ }
kc->lock = SPIN_LOCK_UNLOCKED;
kc->pages = NULL;
@@ -604,6 +659,7 @@
r = client_alloc_pages(kc, nr_pages);
if (r) {
kfree(kc);
+ kcopyd_exit();
return r;
}
@@ -611,6 +667,7 @@
if (r) {
client_free_pages(kc);
kfree(kc);
+ kcopyd_exit();
return r;
}
@@ -619,6 +676,7 @@
dm_io_put(nr_pages);
client_free_pages(kc);
kfree(kc);
+ kcopyd_exit();
return r;
}
@@ -632,31 +690,7 @@
client_free_pages(kc);
client_del(kc);
kfree(kc);
-}
-
-
-int __init kcopyd_init(void)
-{
- int r;
-
- r = jobs_init();
- if (r)
- return r;
-
- _kcopyd_wq = create_singlethread_workqueue("kcopyd");
- if (!_kcopyd_wq) {
- jobs_exit();
- return -ENOMEM;
- }
-
- INIT_WORK(&_kcopyd_work, do_work, NULL);
- return 0;
-}
-
-void kcopyd_exit(void)
-{
- jobs_exit();
- destroy_workqueue(_kcopyd_wq);
+ kcopyd_exit();
}
EXPORT_SYMBOL(kcopyd_client_create);
--- diff/drivers/md/kcopyd.h 2004-06-14 11:55:59.697988648 +0000
+++ source/drivers/md/kcopyd.h 2004-06-14 11:56:22.882464072 +0000
@@ -13,9 +13,6 @@
#include "dm-io.h"
-int kcopyd_init(void);
-void kcopyd_exit(void);
-
/* FIXME: make this configurable */
#define KCOPYD_MAX_REGIONS 8
Use structure assignments instead of memcpy's.
[Suggested by akpm during kcopyd review.]
Signed-off-by: Kevin Corry <[email protected]>
--- diff/drivers/md/dm-raid1.c 2004-06-14 09:23:21.259282960 +0000
+++ source/drivers/md/dm-raid1.c 2004-06-14 09:29:56.946129416 +0000
@@ -850,9 +850,9 @@
struct bio_list reads, writes;
spin_lock(&ms->lock);
- memcpy(&reads, &ms->reads, sizeof(reads));
+ reads = ms->reads;
+ writes = ms->writes;
bio_list_init(&ms->reads);
- memcpy(&writes, &ms->writes, sizeof(writes));
bio_list_init(&ms->writes);
spin_unlock(&ms->lock);
--- diff/drivers/md/dm-snap.c 2004-06-14 09:23:21.261282656 +0000
+++ source/drivers/md/dm-snap.c 2004-06-14 09:29:56.947129264 +0000
@@ -612,7 +612,7 @@
error_bios(bio_list_get(&pe->snapshot_bios));
goto out;
}
- memcpy(e, &pe->e, sizeof(*e));
+ *e = pe->e;
/*
* Add a proper exception, and remove the
--- diff/drivers/md/dm-table.c 2004-06-14 09:23:21.265282048 +0000
+++ source/drivers/md/dm-table.c 2004-06-14 09:29:56.948129112 +0000
@@ -400,7 +400,7 @@
struct dm_dev dd_copy;
dev_t dev = dd->bdev->bd_dev;
- memcpy(&dd_copy, dd, sizeof(dd_copy));
+ dd_copy = *dd;
dd->mode |= new_mode;
dd->bdev = NULL;
@@ -408,7 +408,7 @@
if (!r)
close_dev(&dd_copy);
else
- memcpy(dd, &dd_copy, sizeof(dd_copy));
+ *dd = dd_copy;
return r;
}
--- diff/drivers/md/dm.c 2004-06-14 09:29:23.019287080 +0000
+++ source/drivers/md/dm.c 2004-06-14 09:29:56.949128960 +0000
@@ -394,7 +394,7 @@
struct bio_vec *bv = bio->bi_io_vec + idx;
clone = bio_alloc(GFP_NOIO, 1);
- memcpy(clone->bi_io_vec, bv, sizeof(*bv));
+ *clone->bi_io_vec = *bv;
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
--- diff/drivers/md/kcopyd.c 2004-06-14 09:29:23.020286928 +0000
+++ source/drivers/md/kcopyd.c 2004-06-14 09:29:56.950128808 +0000
@@ -476,7 +476,7 @@
int i;
struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
- memcpy(sub_job, job, sizeof(*job));
+ *sub_job = *job;
sub_job->source.sector += progress;
sub_job->source.count = count;
@@ -536,7 +536,7 @@
job->write_err = 0;
job->rw = READ;
- memcpy(&job->source, from, sizeof(*from));
+ job->source = *from;
job->num_dests = num_dests;
memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
dm-io: Proper error handling when someone is trying to read from multiple
regions.
Signed-off-by: Kevin Corry <[email protected]>
--- diff/drivers/md/dm-io.c 2004-06-14 10:46:31.944583472 +0000
+++ source/drivers/md/dm-io.c 2004-06-14 10:47:10.568711712 +0000
@@ -537,7 +537,10 @@
{
struct io io;
- BUG_ON(num_regions > 1 && rw != WRITE);
+ if (num_regions > 1 && rw != WRITE) {
+ WARN_ON(1);
+ return -EIO;
+ }
io.error = 0;
atomic_set(&io.count, 1); /* see dispatch_io() */
@@ -565,8 +568,15 @@
static int async_io(unsigned int num_regions, struct io_region *where, int rw,
struct dpages *dp, io_notify_fn fn, void *context)
{
- struct io *io = mempool_alloc(_io_pool, GFP_NOIO);
+ struct io *io;
+
+ if (num_regions > 1 && rw != WRITE) {
+ WARN_ON(1);
+ fn(1, context);
+ return -EIO;
+ }
+ io = mempool_alloc(_io_pool, GFP_NOIO);
io->error = 0;
atomic_set(&io->count, 1); /* see dispatch_io() */
io->sleeper = NULL;
dm-raid1.c: In rh_exit(), use list_for_each_entry_safe instead of
list_for_each_safe.
Signed-off-by: Kevin Corry <[email protected]>
--- diff/drivers/md/dm-raid1.c 2004-06-14 09:30:12.916701520 +0000
+++ source/drivers/md/dm-raid1.c 2004-06-14 09:30:17.978931944 +0000
@@ -187,13 +187,11 @@
static void rh_exit(struct region_hash *rh)
{
unsigned int h;
- struct region *reg;
- struct list_head *tmp, *tmp2;
+ struct region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
for (h = 0; h < rh->nr_buckets; h++) {
- list_for_each_safe (tmp, tmp2, rh->buckets + h) {
- reg = list_entry(tmp, struct region, hash_list);
+ list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
BUG_ON(atomic_read(®->pending));
mempool_free(reg, rh->region_pool);
}
dm-raid1.c: Make struct region::delayed_bios a bio_list instead of a bio*.
This will ensure the queued bios are kept in the proper order.
Signed-off-by: Kevin Corry <[email protected]>
--- diff/drivers/md/dm-raid1.c 2004-06-14 09:29:56.946129416 +0000
+++ source/drivers/md/dm-raid1.c 2004-06-14 09:30:12.916701520 +0000
@@ -103,7 +103,7 @@
struct list_head list;
atomic_t pending;
- struct bio *delayed_bios;
+ struct bio_list delayed_bios;
};
/*
@@ -244,7 +244,7 @@
INIT_LIST_HEAD(&nreg->list);
atomic_set(&nreg->pending, 0);
- nreg->delayed_bios = NULL;
+ bio_list_init(&nreg->delayed_bios);
write_lock_irq(&rh->hash_lock);
reg = __rh_lookup(rh, region);
@@ -310,14 +310,12 @@
return state == RH_CLEAN || state == RH_DIRTY;
}
-static void dispatch_bios(struct mirror_set *ms, struct bio *bio)
+static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
{
- struct bio *nbio;
+ struct bio *bio;
- while (bio) {
- nbio = bio->bi_next;
+ while ((bio = bio_list_pop(bio_list))) {
queue_bio(ms, bio, WRITE);
- bio = nbio;
}
}
@@ -361,7 +359,7 @@
list_for_each_entry_safe (reg, next, &recovered, list) {
rh->log->type->clear_region(rh->log, reg->key);
rh->log->type->complete_resync_work(rh->log, reg->key, 1);
- dispatch_bios(rh->ms, reg->delayed_bios);
+ dispatch_bios(rh->ms, ®->delayed_bios);
up(&rh->recovery_count);
mempool_free(reg, rh->region_pool);
}
@@ -516,8 +514,7 @@
read_lock(&rh->hash_lock);
reg = __rh_find(rh, bio_to_region(rh, bio));
- bio->bi_next = reg->delayed_bios;
- reg->delayed_bios = bio;
+ bio_list_add(®->delayed_bios, bio);
read_unlock(&rh->hash_lock);
}