Please apply this patch to megaraid 2.00.6 driver for kernels that don't
support per host lock. This can be found at :
ftp://ftp.lsil.com/pub/linux-megaraid/drivers/version-2.00.6/
Sreenivas Bagalkote
LSI Logic
--- megaraid_2006/megaraid2.c 2003-07-30 14:43:36.000000000 -0400
+++ megaraid_2006_wo_hostlock/megaraid2.c 2003-07-30
15:05:35.000000000 -0400
@@ -394,7 +394,6 @@
adapter->flag = flag;
spin_lock_init(&adapter->lock);
- host->lock = &adapter->lock;
host->cmd_per_lun = max_cmd_per_lun;
host->max_sectors = max_sectors_per_io;
@@ -1775,7 +1774,7 @@
unsigned long flags;
- spin_lock_irqsave(&adapter->lock, flags);
+ spin_lock_irqsave(&io_request_lock, flags);
megaraid_iombox_ack_sequence(adapter);
@@ -1784,7 +1783,7 @@
mega_runpendq(adapter);
}
- spin_unlock_irqrestore(&adapter->lock, flags);
+ spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
@@ -1857,7 +1856,7 @@
unsigned long flags;
- spin_lock_irqsave(&adapter->lock, flags);
+ spin_lock_irqsave(&io_request_lock, flags);
megaraid_memmbox_ack_sequence(adapter);
@@ -1866,7 +1865,7 @@
mega_runpendq(adapter);
}
- spin_unlock_irqrestore(&adapter->lock, flags);
+ spin_unlock_irqrestore(&io_request_lock, flags);
return;
}
@@ -2618,7 +2617,7 @@
adapter = (adapter_t *)scp->host->hostdata;
- ASSERT( spin_is_locked(&adapter->lock) );
+ ASSERT( spin_is_locked(&io_request_lock) );
printk("megaraid: aborting-%ld cmd=%x <c=%d t=%d l=%d>\n",
scp->serial_number, scp->cmnd[0], scp->channel, scp->target,
@@ -2715,7 +2714,7 @@
adapter = (adapter_t *)cmd->host->hostdata;
- ASSERT( spin_is_locked(&adapter->lock) );
+ ASSERT( spin_is_locked(&io_request_lock) );
printk("megaraid: reset-%ld cmd=%x <c=%d t=%d l=%d>\n",
cmd->serial_number, cmd->cmnd[0], cmd->channel, cmd->target,
@@ -2726,7 +2725,7 @@
mc.cmd = MEGA_CLUSTER_CMD;
mc.opcode = MEGA_RESET_RESERVATIONS;
- spin_unlock_irq(&adapter->lock);
+ spin_unlock_irq(&io_request_lock);
if( mega_internal_command(adapter, LOCK_INT, &mc, NULL) != 0 ) {
printk(KERN_WARNING
"megaraid: reservation reset failed.\n");
@@ -2734,7 +2733,7 @@
else {
printk(KERN_INFO "megaraid: reservation reset.\n");
}
- spin_lock_irq(&adapter->lock);
+ spin_lock_irq(&io_request_lock);
#endif
/*
@@ -4958,7 +4957,7 @@
scb_t *scb;
int rval;
- ASSERT( !spin_is_locked(&adapter->lock) );
+ ASSERT( !spin_is_locked(&io_request_lock) );
/*
* Stop sending commands to the controller, queue them internally.
@@ -4978,7 +4977,7 @@
rval = mega_do_del_logdrv(adapter, logdrv);
- spin_lock_irqsave(&adapter->lock, flags);
+ spin_lock_irqsave(&io_request_lock, flags);
/*
* If delete operation was successful, add 0x80 to the logical drive
@@ -4997,7 +4996,7 @@
mega_runpendq(adapter);
- spin_unlock_irqrestore(&adapter->lock, flags);
+ spin_unlock_irqrestore(&io_request_lock, flags);
return rval;
}
@@ -5547,11 +5546,11 @@
/*
* Get the lock only if the caller has not acquired it already
*/
- if( ls == LOCK_INT ) spin_lock_irqsave(&adapter->lock, flags);
+ if( ls == LOCK_INT ) spin_lock_irqsave(&io_request_lock, flags);
megaraid_queue(scmd, mega_internal_done);
- if( ls == LOCK_INT ) spin_unlock_irqrestore(&adapter->lock, flags);
+ if( ls == LOCK_INT ) spin_unlock_irqrestore(&io_request_lock,
flags);
/*
* Wait till this command finishes. Do not use
On Wed, Jul 30 2003, Bagalkote, Sreenivas wrote:
> Please apply this patch to megaraid 2.00.6 driver for kernels that don't
> support per host lock. This can be found at :
>
> ftp://ftp.lsil.com/pub/linux-megaraid/drivers/version-2.00.6/
It's easily possible to keep the impact of maintaining a driver across
such kernels a lot smaller, by simply using the same lock in the
spin_lock calls and just assign that lock to adapter->lock or
io_request_lock depending on the kernel.
--
Jens Axboe
Well, that's definitely a good idea. Expect a new driver with this change.
BTW, is there a kernel version beyond which all versions would support per
host lock, and I mean a 2.4.x kernel :-)
Thanks
-Atul Mukker
> -----Original Message-----
> From: Jens Axboe [mailto:[email protected]]
> Sent: Thursday, July 31, 2003 5:06 AM
> To: Bagalkote, Sreenivas
> Cc: '[email protected]'; '[email protected]';
> '[email protected]'
> Subject: Re: [ANNOUNCE] megaraid 2.00.6 patch for kernels without
> hostlock
>
>
> On Wed, Jul 30 2003, Bagalkote, Sreenivas wrote:
> > Please apply this patch to megaraid 2.00.6 driver for
> kernels that don't
> > support per host lock. This can be found at :
> >
> > ftp://ftp.lsil.com/pub/linux-megaraid/drivers/version-2.00.6/
>
> It's easily possible to keep the impact of maintaining a driver across
> such kernels a lot smaller, by simply using the same lock in the
> spin_lock calls and just assign that lock to adapter->lock or
> io_request_lock depending on the kernel.
>
> --
> Jens Axboe
>
> _______________________________________________
> Linux-megaraid-devel mailing list
> [email protected]
> http://lists.us.dell.com/mailman/listinfo/linux-megaraid-devel
> Please read the FAQ at http://lists.us.dell.com/faq or search
> the list archives at http://lists.us.dell.com/htdig/
>
On Thu, Jul 31, 2003 at 05:10:50PM -0400, Mukker, Atul wrote:
>
> Well, that's definitely a good idea. Expect a new driver with this change.
> BTW, is there a kernel version beyond which all versions would support per
> host lock, and I mean a 2.4.x kernel :-)
that's a pretty dangerous change to make to a stable kernel. much better
to work on stabilising 2.6.
--
"It's not Hollywood. War is real, war is primarily not about defeat or
victory, it is about death. I've seen thousands and thousands of dead bodies.
Do you think I want to have an academic debate on this subject?" -- Robert Fisk
On Thu, Jul 31 2003, Mukker, Atul wrote:
>
> Well, that's definitely a good idea. Expect a new driver with this change.
> BTW, is there a kernel version beyond which all versions would support per
> host lock, and I mean a 2.4.x kernel :-)
Unfortunately no, however it is trivial to just add host->lock pointer
and make it point to io_request_lock. Ditto for q->queue_lock. That wont
change how the code operates at all. I will probably do that once 2.4.23
opens, it would make maintaining 2.6/2.4 drivers much easier (and ditto
for vendor kernels).
--
Jens Axboe
My apologies if this has already been discussed - but has anyone noticed
disk I/O speed differences under 2.6.0 as compared to 2.4.20? My system has
an ASUS A7V333 MB with 768M Ram, a 1300MHz Duron, three 40GB Maxtor drives
running linux software raid. See the screen capture below:
-----------------------------
2.4.20-19.9 -----------------------------------------------
[root@alpha root]# uname -a
Linux alpha 2.4.20-19.9 #1 Mon Jul 28 18:59:34 MDT 2003 i686 athlon i386
GNU/Linux
[root@alpha root]# hdparm -t /dev/md0
/dev/md0:
Timing buffered disk reads: 64 MB in 2.90 seconds = 22.07 MB/sec
[root@alpha root]# hdparm -t /dev/md1
/dev/md1:
Timing buffered disk reads: 64 MB in 1.66 seconds = 38.55 MB/sec
[root@alpha root]# hdparm -t /dev/md2
/dev/md2:
Timing buffered disk reads: 64 MB in 0.96 seconds = 66.67 MB/sec
[root@alpha root]# hdparm -t /dev/md3
/dev/md3:
Timing buffered disk reads: 64 MB in 1.74 seconds = 36.78 MB/sec
[root@alpha root]# hdparm -t /dev/md4
/dev/md4:
Timing buffered disk reads: 64 MB in 1.74 seconds = 36.78 MB/sec
[root@alpha root]# hdparm -t /dev/md5
/dev/md5:
Timing buffered disk reads: 64 MB in 1.68 seconds = 38.10 MB/sec
[root@alpha root]# hdparm -t /dev/md6
/dev/md6:
Timing buffered disk reads: 64 MB in 1.70 seconds = 37.65 MB/sec
-----------------------------
2.6.0-0.test.2.1.28 -----------------------------------------------
[root@alpha root]# uname -a
Linux alpha 2.6.0-0.test2.1.28custom #1 Thu Jul 31 20:02:20 MDT 2003 i686
athlon i386 GNU/Linux
[root@alpha root]# hdparm -t /dev/md0
/dev/md0:
Timing buffered disk reads: 64 MB in 2.67 seconds = 24.00 MB/sec
[root@alpha root]# hdparm -t /dev/md1
/dev/md1:
Timing buffered disk reads: 64 MB in 1.92 seconds = 33.37 MB/sec
[root@alpha root]# hdparm -t /dev/md2
/dev/md2:
Timing buffered disk reads: 64 MB in 1.37 seconds = 46.79 MB/sec
[root@alpha root]# hdparm -t /dev/md3
/dev/md3:
Timing buffered disk reads: 64 MB in 2.23 seconds = 28.69 MB/sec
[root@alpha root]# hdparm -t /dev/md4
/dev/md4:
Timing buffered disk reads: 64 MB in 2.20 seconds = 29.07 MB/sec
[root@alpha root]# hdparm -t /dev/md5
/dev/md5:
Timing buffered disk reads: 64 MB in 2.03 seconds = 31.45 MB/sec
[root@alpha root]# hdparm -t /dev/md6
/dev/md6:
Timing buffered disk reads: 64 MB in 1.99 seconds = 32.23 MB/sec
-----------------------------
Mounts -----------------------------------------------
[root@alpha root]# mount
/dev/md0 on / type ext3 (rw)
none on /proc type proc (rw)
usbdevfs on /proc/bus/usb type usbdevfs (rw)
/dev/md1 on /boot type ext3 (rw)
none on /dev/pts type devpts (rw,gid=5,mode=620)
/dev/md3 on /home type ext3 (rw)
none on /dev/shm type tmpfs (rw)
/dev/md6 on /tmp type ext3 (rw)
/dev/md4 on /usr type ext3 (rw)
/dev/md5 on /var type ext3 (rw)
----------------------------- MD Device
dump -----------------------------------------------
[root@alpha root]# lsraid -R -p
# md device [dev 9, 2] /dev/md2 queried online
raiddev /dev/md2
raid-level 0
nr-raid-disks 2
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde1
raid-disk 0
device /dev/hdg2
raid-disk 1
# md device [dev 9, 1] /dev/md1 queried online
raiddev /dev/md1
raid-level 1
nr-raid-disks 2
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde2
raid-disk 0
device /dev/hdg1
raid-disk 1
# md device [dev 9, 4] /dev/md4 queried online
raiddev /dev/md4
raid-level 5
nr-raid-disks 3
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde3
raid-disk 0
device /dev/hdg3
raid-disk 1
device /dev/hdh3
raid-disk 2
# md device [dev 9, 3] /dev/md3 queried online
raiddev /dev/md3
raid-level 5
nr-raid-disks 3
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde5
raid-disk 0
device /dev/hdg5
raid-disk 1
device /dev/hdh5
raid-disk 2
# md device [dev 9, 5] /dev/md5 queried online
raiddev /dev/md5
raid-level 5
nr-raid-disks 3
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde6
raid-disk 0
device /dev/hdg6
raid-disk 1
device /dev/hdh6
raid-disk 2
# md device [dev 9, 0] /dev/md0 queried online
raiddev /dev/md0
raid-level 1
nr-raid-disks 2
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde7
raid-disk 0
device /dev/hdg7
raid-disk 1
# md device [dev 9, 6] /dev/md6 queried online
raiddev /dev/md6
raid-level 5
nr-raid-disks 3
nr-spare-disks 0
persistent-superblock 1
chunk-size 64
device /dev/hde8
raid-disk 0
device /dev/hdg8
raid-disk 1
device /dev/hdh8
raid-disk 2
[root@alpha root]#
--------------------------------
end-of-capture -----------------------------------------------
Cheers...Gord
"Gordon Larsen" <[email protected]> writes:
> My apologies if this has already been discussed - but has anyone noticed
> disk I/O speed differences under 2.6.0 as compared to 2.4.20? My system has
It has been discussed. The solution is "hdparm -a 512 /dev/...".
--
M?ns Rullg?rd
[email protected]
Thank you
...Gord
-----Original Message-----
From: [email protected]
[mailto:[email protected]]On Behalf Of M?ns Rullg?rd
Sent: August 1, 2003 8:56 AM
To: [email protected]
Subject: Re: Disk speed differences under 2.6.0
"Gordon Larsen" <[email protected]> writes:
> My apologies if this has already been discussed - but has anyone noticed
> disk I/O speed differences under 2.6.0 as compared to 2.4.20? My system
has
It has been discussed. The solution is "hdparm -a 512 /dev/...".
--
M?ns Rullg?rd
[email protected]