Trivial: don't lock the queue spinlock when called from the request function.
Since the faulty function must use spinlock in another case, double-case it.
And since we will never use both functions together, let no object code be
shared between them.
Signed-off-by: Paolo 'Blaisorblade' Giarrusso <[email protected]>
---
uml-linux-2.6.8.1-paolo/arch/um/drivers/ubd_kern.c | 36 ++++++++++++++++-----
1 files changed, 28 insertions(+), 8 deletions(-)
diff -puN arch/um/drivers/ubd_kern.c~uml-fix-ubd-deadlock arch/um/drivers/ubd_kern.c
--- uml-linux-2.6.8.1/arch/um/drivers/ubd_kern.c~uml-fix-ubd-deadlock 2004-09-08 19:04:27.662926344 +0200
+++ uml-linux-2.6.8.1-paolo/arch/um/drivers/ubd_kern.c 2004-09-08 19:05:36.700431048 +0200
@@ -54,6 +54,7 @@
#include "mem.h"
#include "mem_kern.h"
+/*This is the queue lock. FIXME: make it per-UBD device.*/
static spinlock_t ubd_io_lock = SPIN_LOCK_UNLOCKED;
static spinlock_t ubd_lock = SPIN_LOCK_UNLOCKED;
@@ -396,14 +397,16 @@ int thread_fd = -1;
*/
int intr_count = 0;
-static void ubd_finish(struct request *req, int error)
+static inline void __ubd_finish(struct request *req, int error, int lock)
{
int nsect;
if(error){
- spin_lock(&ubd_io_lock);
+ if (lock)
+ spin_lock(&ubd_io_lock);
end_request(req, 0);
- spin_unlock(&ubd_io_lock);
+ if (lock)
+ spin_unlock(&ubd_io_lock);
return;
}
nsect = req->current_nr_sectors;
@@ -412,11 +415,28 @@ static void ubd_finish(struct request *r
req->errors = 0;
req->nr_sectors -= nsect;
req->current_nr_sectors = 0;
- spin_lock(&ubd_io_lock);
+ if (lock)
+ spin_lock(&ubd_io_lock);
end_request(req, 1);
- spin_unlock(&ubd_io_lock);
+ if (lock)
+ spin_unlock(&ubd_io_lock);
+}
+
+/* We will use only one of them, not both, i.e. ubd_finish with the io_thread
+ * and ubd_finish_nolock without the separate io thread, so it's better to waste
+ * some space to gain performance. */
+
+static void ubd_finish(struct request *req, int error)
+{
+ __ubd_finish(req, error, 1);
+}
+
+static void ubd_finish_nolock(struct request *req, int error)
+{
+ __ubd_finish(req, error, 0);
}
+/*Called with ubd_io_lock not held*/
static void ubd_handler(void)
{
struct io_thread_req req;
@@ -965,6 +985,7 @@ static int prepare_mmap_request(struct u
return(0);
}
+/*Called with ubd_io_lock held*/
static int prepare_request(struct request *req, struct io_thread_req *io_req)
{
struct gendisk *disk = req->rq_disk;
@@ -977,9 +998,7 @@ static int prepare_request(struct reques
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %s\n",
disk->disk_name);
- spin_lock(&ubd_io_lock);
end_request(req, 0);
- spin_unlock(&ubd_io_lock);
return(1);
}
@@ -1029,6 +1048,7 @@ static int prepare_request(struct reques
return(0);
}
+/*Called with ubd_io_lock held*/
static void do_ubd_request(request_queue_t *q)
{
struct io_thread_req io_req;
@@ -1040,7 +1060,7 @@ static void do_ubd_request(request_queue
err = prepare_request(req, &io_req);
if(!err){
do_io(&io_req);
- ubd_finish(req, io_req.error);
+ ubd_finish_nolock(req, io_req.error);
}
}
}
_
* [email protected] ([email protected]) wrote:
>
> Trivial: don't lock the queue spinlock when called from the request function.
> Since the faulty function must use spinlock in another case, double-case it.
> And since we will never use both functions together, let no object code be
> shared between them.
Why not add a helper which locks around the core function. Then either
call helper or core function directly depending on locking needs?
Smth. along the lines of below.
===== arch/um/drivers/ubd_kern.c 1.36 vs edited =====
--- 1.36/arch/um/drivers/ubd_kern.c 2004-08-24 02:08:18 -07:00
+++ edited/arch/um/drivers/ubd_kern.c 2004-09-08 11:06:54 -07:00
@@ -396,14 +396,20 @@
*/
int intr_count = 0;
-static void ubd_finish(struct request *req, int error)
+static inline void ubd_finish(struct request *req, int error)
+{
+ spin_lock(&ubd_io_lock);
+ __ubd_finish(req, error);
+ spin_unlock(&ubd_io_lock);
+}
+
+/* call ubd_finish if you need to serialize */
+static void __ubd_finish(struct request *req, int error)
{
int nsect;
if(error){
- spin_lock(&ubd_io_lock);
end_request(req, 0);
- spin_unlock(&ubd_io_lock);
return;
}
nsect = req->current_nr_sectors;
@@ -412,9 +418,7 @@
req->errors = 0;
req->nr_sectors -= nsect;
req->current_nr_sectors = 0;
- spin_lock(&ubd_io_lock);
end_request(req, 1);
- spin_unlock(&ubd_io_lock);
}
static void ubd_handler(void)
--
Linux Security Modules http://lsm.immunix.org http://lsm.bkbits.net
On Wed, Sep 08 2004, [email protected] wrote:
> diff -puN arch/um/drivers/ubd_kern.c~uml-fix-ubd-deadlock arch/um/drivers/ubd_kern.c
> --- uml-linux-2.6.8.1/arch/um/drivers/ubd_kern.c~uml-fix-ubd-deadlock 2004-09-08 19:04:27.662926344 +0200
> +++ uml-linux-2.6.8.1-paolo/arch/um/drivers/ubd_kern.c 2004-09-08 19:05:36.700431048 +0200
> @@ -54,6 +54,7 @@
> #include "mem.h"
> #include "mem_kern.h"
>
> +/*This is the queue lock. FIXME: make it per-UBD device.*/
> static spinlock_t ubd_io_lock = SPIN_LOCK_UNLOCKED;
> static spinlock_t ubd_lock = SPIN_LOCK_UNLOCKED;
probably not worth it to make it per-device. doing so should be a simple
search-replace job, though.
> @@ -396,14 +397,16 @@ int thread_fd = -1;
> */
> int intr_count = 0;
>
> -static void ubd_finish(struct request *req, int error)
> +static inline void __ubd_finish(struct request *req, int error, int lock)
> {
> int nsect;
>
> if(error){
> - spin_lock(&ubd_io_lock);
> + if (lock)
> + spin_lock(&ubd_io_lock);
> end_request(req, 0);
> - spin_unlock(&ubd_io_lock);
> + if (lock)
> + spin_unlock(&ubd_io_lock);
In general, doing it this way is throwned upon. Either split the
function, or just make the callers acquire the lock if they have to.
--
Jens Axboe
On Wednesday 08 September 2004 20:12, Chris Wright wrote:
> * [email protected] ([email protected]) wrote:
> > Trivial: don't lock the queue spinlock when called from the request
> > function. Since the faulty function must use spinlock in another case,
> > double-case it. And since we will never use both functions together, let
> > no object code be shared between them.
>
> Why not add a helper which locks around the core function. Then either
> call helper or core function directly depending on locking needs?
I'm happy with whatever is nicer.
--
Paolo Giarrusso, aka Blaisorblade
Linux registered user n. 292729
* BlaisorBlade ([email protected]) wrote:
> On Wednesday 08 September 2004 20:12, Chris Wright wrote:
> > * [email protected] ([email protected]) wrote:
> > > Trivial: don't lock the queue spinlock when called from the request
> > > function. Since the faulty function must use spinlock in another case,
> > > double-case it. And since we will never use both functions together, let
> > > no object code be shared between them.
> >
> > Why not add a helper which locks around the core function. Then either
> > call helper or core function directly depending on locking needs?
> I'm happy with whatever is nicer.
The way I outlined is nicer as it avoids all that conditional locking.
I can do a full patch if you like.
thanks,
-chris
--
Linux Security Modules http://lsm.immunix.org http://lsm.bkbits.net
On Thursday 09 September 2004 20:32, Chris Wright wrote:
> * BlaisorBlade ([email protected]) wrote:
> > On Wednesday 08 September 2004 20:12, Chris Wright wrote:
> > > * [email protected] ([email protected]) wrote:
> > > > Trivial: don't lock the queue spinlock when called from the request
> > > > function. Since the faulty function must use spinlock in another
> > > > case, double-case it. And since we will never use both functions
> > > > together, let no object code be shared between them.
> > >
> > > Why not add a helper which locks around the core function. Then either
> > > call helper or core function directly depending on locking needs?
> >
> > I'm happy with whatever is nicer.
>
> The way I outlined is nicer as it avoids all that conditional locking.
> I can do a full patch if you like.
Yes, thanks a lot for your help.
Bye
--
Paolo Giarrusso, aka Blaisorblade
Linux registered user n. 292729
* BlaisorBlade ([email protected]) wrote:
> Yes, thanks a lot for your help.
Rename ubd_finish() to __ubd_finsh() and remove ubd_io_lock from it.
Add wrapper, ubd_finish(), which grabs lock before calling __ubd_finish().
Update do_ubd_request to use the lock free __ubd_finish() to avoid
deadlock. Also, apparently prepare_request is called with ubd_io_lock
held, so remove locks there.
Signed-off-by: Chris Wright <[email protected]>
===== arch/um/drivers/ubd_kern.c 1.38 vs edited =====
--- 1.38/arch/um/drivers/ubd_kern.c 2004-09-07 23:33:13 -07:00
+++ edited/arch/um/drivers/ubd_kern.c 2004-09-09 12:18:01 -07:00
@@ -396,14 +396,20 @@
*/
int intr_count = 0;
-static void ubd_finish(struct request *req, int error)
+static inline void ubd_finish(struct request *req, int error)
+{
+ spin_lock(&ubd_io_lock);
+ __ubd_finish(req, error);
+ spin_unlock(&ubd_io_lock);
+}
+
+/* call ubd_finish if you need to serialize */
+static void __ubd_finish(struct request *req, int error)
{
int nsect;
if(error){
- spin_lock(&ubd_io_lock);
end_request(req, 0);
- spin_unlock(&ubd_io_lock);
return;
}
nsect = req->current_nr_sectors;
@@ -412,11 +418,10 @@
req->errors = 0;
req->nr_sectors -= nsect;
req->current_nr_sectors = 0;
- spin_lock(&ubd_io_lock);
end_request(req, 1);
- spin_unlock(&ubd_io_lock);
}
+/* Called without ubd_io_lock held */
static void ubd_handler(void)
{
struct io_thread_req req;
@@ -965,6 +970,7 @@
return(0);
}
+/* Called with ubd_io_lock held */
static int prepare_request(struct request *req, struct io_thread_req *io_req)
{
struct gendisk *disk = req->rq_disk;
@@ -977,9 +983,7 @@
if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
printk("Write attempted on readonly ubd device %s\n",
disk->disk_name);
- spin_lock(&ubd_io_lock);
end_request(req, 0);
- spin_unlock(&ubd_io_lock);
return(1);
}
@@ -1029,6 +1033,7 @@
return(0);
}
+/* Called with ubd_io_lock held */
static void do_ubd_request(request_queue_t *q)
{
struct io_thread_req io_req;
@@ -1040,7 +1045,7 @@
err = prepare_request(req, &io_req);
if(!err){
do_io(&io_req);
- ubd_finish(req, io_req.error);
+ __ubd_finish(req, io_req.error);
}
}
}
On Thursday 09 September 2004 21:29, Chris Wright wrote:
> * BlaisorBlade ([email protected]) wrote:
> > Yes, thanks a lot for your help.
>
> Rename ubd_finish() to __ubd_finsh() and remove ubd_io_lock from it.
> Add wrapper, ubd_finish(), which grabs lock before calling __ubd_finish().
> Update do_ubd_request to use the lock free __ubd_finish() to avoid
> deadlock. Also, apparently prepare_request is called with ubd_io_lock
> held, so remove locks there.
> Signed-off-by: Chris Wright <[email protected]>
Ok, this is good. And it's the only one which has been discussed upon, Andrew,
so you can merge the rest.
> ===== arch/um/drivers/ubd_kern.c 1.38 vs edited =====
> --- 1.38/arch/um/drivers/ubd_kern.c 2004-09-07 23:33:13 -07:00
> +++ edited/arch/um/drivers/ubd_kern.c 2004-09-09 12:18:01 -07:00
> @@ -396,14 +396,20 @@
> */
> int intr_count = 0;
>
> -static void ubd_finish(struct request *req, int error)
> +static inline void ubd_finish(struct request *req, int error)
> +{
> + spin_lock(&ubd_io_lock);
> + __ubd_finish(req, error);
> + spin_unlock(&ubd_io_lock);
> +}
> +
> +/* call ubd_finish if you need to serialize */
> +static void __ubd_finish(struct request *req, int error)
> {
> int nsect;
>
> if(error){
> - spin_lock(&ubd_io_lock);
> end_request(req, 0);
> - spin_unlock(&ubd_io_lock);
> return;
> }
> nsect = req->current_nr_sectors;
> @@ -412,11 +418,10 @@
> req->errors = 0;
> req->nr_sectors -= nsect;
> req->current_nr_sectors = 0;
> - spin_lock(&ubd_io_lock);
> end_request(req, 1);
> - spin_unlock(&ubd_io_lock);
> }
>
> +/* Called without ubd_io_lock held */
> static void ubd_handler(void)
> {
> struct io_thread_req req;
> @@ -965,6 +970,7 @@
> return(0);
> }
>
> +/* Called with ubd_io_lock held */
> static int prepare_request(struct request *req, struct io_thread_req
> *io_req) {
> struct gendisk *disk = req->rq_disk;
> @@ -977,9 +983,7 @@
> if((rq_data_dir(req) == WRITE) && !dev->openflags.w){
> printk("Write attempted on readonly ubd device %s\n",
> disk->disk_name);
> - spin_lock(&ubd_io_lock);
> end_request(req, 0);
> - spin_unlock(&ubd_io_lock);
> return(1);
> }
>
> @@ -1029,6 +1033,7 @@
> return(0);
> }
>
> +/* Called with ubd_io_lock held */
> static void do_ubd_request(request_queue_t *q)
> {
> struct io_thread_req io_req;
> @@ -1040,7 +1045,7 @@
> err = prepare_request(req, &io_req);
> if(!err){
> do_io(&io_req);
> - ubd_finish(req, io_req.error);
> + __ubd_finish(req, io_req.error);
> }
> }
> }
>
>
> -------------------------------------------------------
> This SF.Net email is sponsored by: YOU BE THE JUDGE. Be one of 170
> Project Admins to receive an Apple iPod Mini FREE for your judgement on
> who ports your project to Linux PPC the best. Sponsored by IBM.
> Deadline: Sept. 13. Go here: http://sf.net/ppc_contest.php
> _______________________________________________
> User-mode-linux-devel mailing list
> [email protected]
> https://lists.sourceforge.net/lists/listinfo/user-mode-linux-devel
--
Paolo Giarrusso, aka Blaisorblade
Linux registered user n. 292729