Hi,
Here is a bunch of fixes/cleanups for the new skd driver.
Best regards,
--
Bartlomiej Zolnierkiewicz
Samsung R&D Institute Poland
Samsung Electronics
Bartlomiej Zolnierkiewicz (14):
skd: fix unregister_blkdev() placement
skd: fix error paths in skd_init()
skd: fix error messages in skd_init()
skd: alloc flush slab only if some devices are present
skd: register block device only if some devices are present
skd: remove SCSI subsystem specific includes
skd: use <asm/unaligned.h>
skd: remove redundant skdev->pdev assignment from skd_pci_probe()
skd: remove SKD_OMIT_FROM_SRC_DIST ifdefs
skd: cleanup skd_do_inq_page_da()
skd: reorder construct/destruct code
skd: reorder skd_flush_cmd_[en,de]queue() code
skd: fix formatting in skd_s1120.h
skd: remove skd_bio code
drivers/block/skd_main.c | 1015 ++++++++++++---------------------------------
drivers/block/skd_s1120.h | 244 +++++------
2 files changed, 367 insertions(+), 892 deletions(-)
--
1.8.2.3
register_blkdev() is called before pci_register_driver() in skd_init()
so unregister_blkdev() should be called after pci_unregister_driver()
in skd_exit(). Fix it.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 3110f68..a98d1bc 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5778,9 +5778,10 @@ static void __exit skd_exit(void)
{
pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
- unregister_blkdev(skd_major, DRV_NAME);
pci_unregister_driver(&skd_driver);
+ unregister_blkdev(skd_major, DRV_NAME);
+
kmem_cache_destroy(skd_flush_slab);
}
--
1.8.2.3
* change priority level from KERN_INFO to KERN_ERR
* add "skd: " prefix
* fix flush slab allocation failure message
* do minor CodingStyle fixes
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 28 ++++++++++++----------------
1 file changed, 12 insertions(+), 16 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a4eb480..04eb9bd 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5709,56 +5709,52 @@ static int __init skd_init(void)
case SKD_IRQ_MSIX:
break;
default:
- pr_info("skd_isr_type %d invalid, re-set to %d\n",
+ pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
skd_isr_type, SKD_IRQ_DEFAULT);
skd_isr_type = SKD_IRQ_DEFAULT;
}
skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
- sizeof(struct skd_flush_cmd),
- 0, 0, NULL);
-
+ sizeof(struct skd_flush_cmd),
+ 0, 0, NULL);
if (!skd_flush_slab) {
- pr_err("failed to allocated flush slab.\n");
+ pr_err(PFX "failed to allocate flush slab\n");
goto err_kmem_cache_create;
}
- if (skd_max_queue_depth < 1
- || skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
- pr_info(
- "skd_max_queue_depth %d invalid, re-set to %d\n",
+ if (skd_max_queue_depth < 1 ||
+ skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
+ pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
}
if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
- pr_info(
- "skd_max_req_per_msg %d invalid, re-set to %d\n",
+ pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
}
if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
- pr_info(
- "skd_sg_per_request %d invalid, re-set to %d\n",
+ pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
}
if (skd_dbg_level < 0 || skd_dbg_level > 2) {
- pr_info("skd_dbg_level %d invalid, re-set to %d\n",
+ pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
skd_dbg_level, 0);
skd_dbg_level = 0;
}
if (skd_isr_comp_limit < 0) {
- pr_info("skd_isr_comp_limit %d invalid, set to %d\n",
+ pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
skd_isr_comp_limit, 0);
skd_isr_comp_limit = 0;
}
if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
- pr_info("skd_max_pass_thru %d invalid, re-set to %d\n",
+ pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
}
--
1.8.2.3
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 20 ++++++++++++++++----
1 file changed, 16 insertions(+), 4 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index a98d1bc..a4eb480 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5699,7 +5699,7 @@ static void skd_log_skreq(struct skd_device *skdev,
static int __init skd_init(void)
{
- int rc = 0;
+ int rc = -ENOMEM;
pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
@@ -5720,7 +5720,7 @@ static int __init skd_init(void)
if (!skd_flush_slab) {
pr_err("failed to allocated flush slab.\n");
- return -ENOMEM;
+ goto err_kmem_cache_create;
}
if (skd_max_queue_depth < 1
@@ -5766,12 +5766,24 @@ static int __init skd_init(void)
/* Obtain major device number. */
rc = register_blkdev(0, DRV_NAME);
if (rc < 0)
- return rc;
+ goto err_register_blkdev;
skd_major = rc;
- return pci_register_driver(&skd_driver);
+ rc = pci_register_driver(&skd_driver);
+ if (rc < 0)
+ goto err_pci_register_driver;
+
+ return rc;
+err_pci_register_driver:
+ unregister_blkdev(skd_major, DRV_NAME);
+
+err_register_blkdev:
+ kmem_cache_destroy(skd_flush_slab);
+
+err_kmem_cache_create:
+ return rc;
}
static void __exit skd_exit(void)
--
1.8.2.3
Allocate flush slab in skd_pci_probe() instead of in skd_init() so it
is allocated only if some devices are present (currently it is always
allocated when the driver is loaded).
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 26 ++++++++++++++------------
1 file changed, 14 insertions(+), 12 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 04eb9bd..e63a63c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5203,6 +5203,18 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!skd_flush_slab) {
+ skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
+ sizeof(struct skd_flush_cmd),
+ 0, 0, NULL);
+ if (!skd_flush_slab) {
+ pr_err("(%s): failed to allocate flush slab\n",
+ pci_name(pdev));
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ }
+
skdev = skd_construct(pdev);
if (skdev == NULL)
goto err_out_regions;
@@ -5714,14 +5726,6 @@ static int __init skd_init(void)
skd_isr_type = SKD_IRQ_DEFAULT;
}
- skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
- sizeof(struct skd_flush_cmd),
- 0, 0, NULL);
- if (!skd_flush_slab) {
- pr_err(PFX "failed to allocate flush slab\n");
- goto err_kmem_cache_create;
- }
-
if (skd_max_queue_depth < 1 ||
skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
@@ -5776,9 +5780,6 @@ err_pci_register_driver:
unregister_blkdev(skd_major, DRV_NAME);
err_register_blkdev:
- kmem_cache_destroy(skd_flush_slab);
-
-err_kmem_cache_create:
return rc;
}
@@ -5790,7 +5791,8 @@ static void __exit skd_exit(void)
unregister_blkdev(skd_major, DRV_NAME);
- kmem_cache_destroy(skd_flush_slab);
+ if (skd_flush_slab)
+ kmem_cache_destroy(skd_flush_slab);
}
static int
--
1.8.2.3
Use <asm/unaligned.h> instead of <asm-generic/unaligned.h>.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 930ac88..e003137 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -45,7 +45,7 @@
#include <scsi/sg.h>
#include <linux/io.h>
#include <linux/uaccess.h>
-#include <asm-generic/unaligned.h>
+#include <asm/unaligned.h>
#include "skd_s1120.h"
--
1.8.2.3
skdev->pdev and skdev->pdev->bus are always different than NULL in
skd_do_inq_page_da() so simplify the code accordingly.
Also cache skdev->pdev value in pdev variable while at it.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 39 +++++++++++++--------------------------
1 file changed, 13 insertions(+), 26 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9fe910d..2ff8e37 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -2896,6 +2896,7 @@ static void skd_do_inq_page_da(struct skd_device *skdev,
volatile struct fit_comp_error_info *skerr,
uint8_t *cdb, uint8_t *buf)
{
+ struct pci_dev *pdev = skdev->pdev;
unsigned max_bytes;
struct driver_inquiry_data inq;
u16 val;
@@ -2906,36 +2907,22 @@ static void skd_do_inq_page_da(struct skd_device *skdev,
inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
- if (skdev->pdev && skdev->pdev->bus) {
- skd_get_link_info(skdev->pdev,
- &inq.pcie_link_speed, &inq.pcie_link_lanes);
- inq.pcie_bus_number = cpu_to_be16(skdev->pdev->bus->number);
- inq.pcie_device_number = PCI_SLOT(skdev->pdev->devfn);
- inq.pcie_function_number = PCI_FUNC(skdev->pdev->devfn);
+ skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
+ inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
+ inq.pcie_device_number = PCI_SLOT(pdev->devfn);
+ inq.pcie_function_number = PCI_FUNC(pdev->devfn);
- pci_read_config_word(skdev->pdev, PCI_VENDOR_ID, &val);
- inq.pcie_vendor_id = cpu_to_be16(val);
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
+ inq.pcie_vendor_id = cpu_to_be16(val);
- pci_read_config_word(skdev->pdev, PCI_DEVICE_ID, &val);
- inq.pcie_device_id = cpu_to_be16(val);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
+ inq.pcie_device_id = cpu_to_be16(val);
- pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_VENDOR_ID,
- &val);
- inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
+ inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
- pci_read_config_word(skdev->pdev, PCI_SUBSYSTEM_ID, &val);
- inq.pcie_subsystem_device_id = cpu_to_be16(val);
- } else {
- inq.pcie_bus_number = 0xFFFF;
- inq.pcie_device_number = 0xFF;
- inq.pcie_function_number = 0xFF;
- inq.pcie_link_speed = 0xFF;
- inq.pcie_link_lanes = 0xFF;
- inq.pcie_vendor_id = 0xFFFF;
- inq.pcie_device_id = 0xFFFF;
- inq.pcie_subsystem_vendor_id = 0xFFFF;
- inq.pcie_subsystem_device_id = 0xFFFF;
- }
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
+ inq.pcie_subsystem_device_id = cpu_to_be16(val);
/* Driver version, fixed lenth, padded with spaces on the right */
inq.driver_version_length = sizeof(inq.driver_version);
--
1.8.2.3
This is not a SCSI host driver so remove SCSI subsystem specific
includes.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 3 ---
1 file changed, 3 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 738f847..930ac88 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -42,9 +42,6 @@
#include <linux/wait.h>
#include <linux/uio.h>
#include <scsi/scsi.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_cmnd.h>
#include <scsi/sg.h>
#include <linux/io.h>
#include <linux/uaccess.h>
--
1.8.2.3
SKD_OMIT_FROM_SRC_DIST is never defined.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_s1120.h | 14 --------------
1 file changed, 14 deletions(-)
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
index bf01941..426581e 100644
--- a/drivers/block/skd_s1120.h
+++ b/drivers/block/skd_s1120.h
@@ -21,11 +21,9 @@
#define FIT_QCMD_QID_MASK (0x3 << 1)
#define FIT_QCMD_QID0 (0x0 << 1)
#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
-#ifndef SKD_OMIT_FROM_SRC_DIST
#define FIT_QCMD_QID1 (0x1 << 1)
#define FIT_QCMD_QID2 (0x2 << 1)
#define FIT_QCMD_QID3 (0x3 << 1)
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
@@ -39,13 +37,9 @@
* Control, 32-bit r/w
*/
#define FIT_CONTROL 0x500u
-#ifndef SKD_OMIT_FROM_SRC_DIST
#define FIT_CR_HARD_RESET (1u << 0u)
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_CR_SOFT_RESET (1u << 1u)
-#ifndef SKD_OMIT_FROM_SRC_DIST
#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
/*
@@ -53,10 +47,8 @@
*/
#define FIT_STATUS 0x510u
#define FIT_SR_DRIVE_STATE_MASK 0x000000FFu
-#ifndef SKD_OMIT_FROM_SRC_DIST
#define FIT_SR_SIGNATURE (0xFF << 8)
#define FIT_SR_PIO_DMA (1 << 16)
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_SR_DRIVE_OFFLINE 0x00
#define FIT_SR_DRIVE_INIT 0x01
/* #define FIT_SR_DRIVE_READY 0x02 */
@@ -74,14 +66,12 @@
#define FIT_SR_DEVICE_MISSING 0xFF
#define FIT_SR__RESERVED 0xFFFFFF00u
-#ifndef SKD_OMIT_FROM_SRC_DIST
/*
* FIT_STATUS - Status register data definition
*/
#define FIT_SR_STATE_MASK (0xFF << 0)
#define FIT_SR_SIGNATURE (0xFF << 8)
#define FIT_SR_PIO_DMA (1 << 16)
-#endif /* SKD_OMIT_FROM_SRC_DIST */
/*
@@ -189,10 +179,8 @@
#define FIT_MFD_PM_SLEEP 0x17u
#define FIT_MFD_CMD_PROGRESS 0x18u
-#ifndef SKD_OMIT_FROM_SRC_DIST
#define FIT_MTD_DEBUG 0xFEu
#define FIT_MFD_DEBUG 0xFFu
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_MFD_MASK (0xFFu)
#define FIT_MFD_DATA_MASK (0xFFu)
@@ -248,7 +236,6 @@ struct fit_msg_hdr {
#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
#define FIT_PROTOCOL_MAJOR_VER(mtd_val) ((mtd_val >> 20) & 0xF)
-#ifndef SKD_OMIT_FROM_SRC_DIST
/*
* Format of a completion entry. The completion queue is circular
* and must have at least as many entries as the maximum number
@@ -264,7 +251,6 @@ struct fit_msg_hdr {
* Command_context is opaque and taken verbatim from the SSDI command.
* All other fields are big endian.
*/
-#endif /* SKD_OMIT_FROM_SRC_DIST */
#define FIT_PROTOCOL_VERSION_0 0
/*
--
1.8.2.3
Reorder placement of skd_construct(), skd_cons_sg_list(), skd_destruct()
and skd_free_sg_list() functions. Then remove no longer needed function
prototypes.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 314 ++++++++++++++++++++++-------------------------
1 file changed, 144 insertions(+), 170 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 2ff8e37..c72b0e4 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -497,7 +497,6 @@ MODULE_PARM_DESC(skd_bio,
/* Major device number dynamically assigned. */
static u32 skd_major;
-static struct skd_device *skd_construct(struct pci_dev *pdev);
static void skd_destruct(struct skd_device *skdev);
static const struct block_device_operations skd_blockdev_ops;
static void skd_send_fitmsg(struct skd_device *skdev,
@@ -4408,102 +4407,6 @@ static void skd_release_irq(struct skd_device *skdev)
*****************************************************************************
*/
-static int skd_cons_skcomp(struct skd_device *skdev);
-static int skd_cons_skmsg(struct skd_device *skdev);
-static int skd_cons_skreq(struct skd_device *skdev);
-static int skd_cons_skspcl(struct skd_device *skdev);
-static int skd_cons_sksb(struct skd_device *skdev);
-static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
- u32 n_sg,
- dma_addr_t *ret_dma_addr);
-static int skd_cons_disk(struct skd_device *skdev);
-
-#define SKD_N_DEV_TABLE 16u
-static u32 skd_next_devno;
-
-static struct skd_device *skd_construct(struct pci_dev *pdev)
-{
- struct skd_device *skdev;
- int blk_major = skd_major;
- int rc;
-
- skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
-
- if (!skdev) {
- pr_err(PFX "(%s): memory alloc failure\n",
- pci_name(pdev));
- return NULL;
- }
-
- skdev->state = SKD_DRVR_STATE_LOAD;
- skdev->pdev = pdev;
- skdev->devno = skd_next_devno++;
- skdev->major = blk_major;
- skdev->irq_type = skd_isr_type;
- sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
- skdev->dev_max_queue_depth = 0;
-
- skdev->num_req_context = skd_max_queue_depth;
- skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->n_special = skd_max_pass_thru;
- skdev->cur_max_queue_depth = 1;
- skdev->queue_low_water_mark = 1;
- skdev->proto_ver = 99;
- skdev->sgs_per_request = skd_sgs_per_request;
- skdev->dbg_level = skd_dbg_level;
-
- if (skd_bio)
- bio_list_init(&skdev->bio_queue);
-
-
- atomic_set(&skdev->device_count, 0);
-
- spin_lock_init(&skdev->lock);
-
- INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- INIT_LIST_HEAD(&skdev->flush_list);
-
- VPRINTK(skdev, "skcomp\n");
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
- goto err_out;
-
- VPRINTK(skdev, "skmsg\n");
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
- goto err_out;
-
- VPRINTK(skdev, "skreq\n");
- rc = skd_cons_skreq(skdev);
- if (rc < 0)
- goto err_out;
-
- VPRINTK(skdev, "skspcl\n");
- rc = skd_cons_skspcl(skdev);
- if (rc < 0)
- goto err_out;
-
- VPRINTK(skdev, "sksb\n");
- rc = skd_cons_sksb(skdev);
- if (rc < 0)
- goto err_out;
-
- VPRINTK(skdev, "disk\n");
- rc = skd_cons_disk(skdev);
- if (rc < 0)
- goto err_out;
-
-
-
- DPRINTK(skdev, "VICTORY\n");
- return skdev;
-
-err_out:
- DPRINTK(skdev, "construct failed\n");
- skd_destruct(skdev);
- return NULL;
-}
-
static int skd_cons_skcomp(struct skd_device *skdev)
{
int rc = 0;
@@ -4590,6 +4493,35 @@ err_out:
return rc;
}
+static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
+ u32 n_sg,
+ dma_addr_t *ret_dma_addr)
+{
+ struct fit_sg_descriptor *sg_list;
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+
+ if (sg_list != NULL) {
+ uint64_t dma_address = *ret_dma_addr;
+ u32 i;
+
+ memset(sg_list, 0, nbytes);
+
+ for (i = 0; i < n_sg - 1; i++) {
+ uint64_t ndp_off;
+ ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
+
+ sg_list[i].next_desc_ptr = dma_address + ndp_off;
+ }
+ sg_list[i].next_desc_ptr = 0LL;
+ }
+
+ return sg_list;
+}
+
static int skd_cons_skreq(struct skd_device *skdev)
{
int rc = 0;
@@ -4760,35 +4692,6 @@ err_out:
return rc;
}
-static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
- u32 n_sg,
- dma_addr_t *ret_dma_addr)
-{
- struct fit_sg_descriptor *sg_list;
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
-
- sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
-
- if (sg_list != NULL) {
- uint64_t dma_address = *ret_dma_addr;
- u32 i;
-
- memset(sg_list, 0, nbytes);
-
- for (i = 0; i < n_sg - 1; i++) {
- uint64_t ndp_off;
- ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
-
- sg_list[i].next_desc_ptr = dma_address + ndp_off;
- }
- sg_list[i].next_desc_ptr = 0LL;
- }
-
- return sg_list;
-}
-
static int skd_cons_disk(struct skd_device *skdev)
{
int rc = 0;
@@ -4855,50 +4758,96 @@ err_out:
return rc;
}
-/*
- *****************************************************************************
- * DESTRUCT (FREE)
- *****************************************************************************
- */
-
-static void skd_free_skcomp(struct skd_device *skdev);
-static void skd_free_skmsg(struct skd_device *skdev);
-static void skd_free_skreq(struct skd_device *skdev);
-static void skd_free_skspcl(struct skd_device *skdev);
-static void skd_free_sksb(struct skd_device *skdev);
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr);
-static void skd_free_disk(struct skd_device *skdev);
+#define SKD_N_DEV_TABLE 16u
+static u32 skd_next_devno;
-static void skd_destruct(struct skd_device *skdev)
+static struct skd_device *skd_construct(struct pci_dev *pdev)
{
- if (skdev == NULL)
- return;
+ struct skd_device *skdev;
+ int blk_major = skd_major;
+ int rc;
+ skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
- VPRINTK(skdev, "disk\n");
- skd_free_disk(skdev);
+ if (!skdev) {
+ pr_err(PFX "(%s): memory alloc failure\n",
+ pci_name(pdev));
+ return NULL;
+ }
- VPRINTK(skdev, "sksb\n");
- skd_free_sksb(skdev);
+ skdev->state = SKD_DRVR_STATE_LOAD;
+ skdev->pdev = pdev;
+ skdev->devno = skd_next_devno++;
+ skdev->major = blk_major;
+ skdev->irq_type = skd_isr_type;
+ sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
+ skdev->dev_max_queue_depth = 0;
- VPRINTK(skdev, "skspcl\n");
- skd_free_skspcl(skdev);
+ skdev->num_req_context = skd_max_queue_depth;
+ skdev->num_fitmsg_context = skd_max_queue_depth;
+ skdev->n_special = skd_max_pass_thru;
+ skdev->cur_max_queue_depth = 1;
+ skdev->queue_low_water_mark = 1;
+ skdev->proto_ver = 99;
+ skdev->sgs_per_request = skd_sgs_per_request;
+ skdev->dbg_level = skd_dbg_level;
- VPRINTK(skdev, "skreq\n");
- skd_free_skreq(skdev);
+ if (skd_bio)
+ bio_list_init(&skdev->bio_queue);
- VPRINTK(skdev, "skmsg\n");
- skd_free_skmsg(skdev);
+
+ atomic_set(&skdev->device_count, 0);
+
+ spin_lock_init(&skdev->lock);
+
+ INIT_WORK(&skdev->completion_worker, skd_completion_worker);
+ INIT_LIST_HEAD(&skdev->flush_list);
VPRINTK(skdev, "skcomp\n");
- skd_free_skcomp(skdev);
+ rc = skd_cons_skcomp(skdev);
+ if (rc < 0)
+ goto err_out;
- VPRINTK(skdev, "skdev\n");
- kfree(skdev);
+ VPRINTK(skdev, "skmsg\n");
+ rc = skd_cons_skmsg(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ VPRINTK(skdev, "skreq\n");
+ rc = skd_cons_skreq(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ VPRINTK(skdev, "skspcl\n");
+ rc = skd_cons_skspcl(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ VPRINTK(skdev, "sksb\n");
+ rc = skd_cons_sksb(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ VPRINTK(skdev, "disk\n");
+ rc = skd_cons_disk(skdev);
+ if (rc < 0)
+ goto err_out;
+
+ DPRINTK(skdev, "VICTORY\n");
+ return skdev;
+
+err_out:
+ DPRINTK(skdev, "construct failed\n");
+ skd_destruct(skdev);
+ return NULL;
}
+/*
+ *****************************************************************************
+ * DESTRUCT (FREE)
+ *****************************************************************************
+ */
+
static void skd_free_skcomp(struct skd_device *skdev)
{
if (skdev->skcomp_table != NULL) {
@@ -4941,6 +4890,19 @@ static void skd_free_skmsg(struct skd_device *skdev)
skdev->skmsg_table = NULL;
}
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ u32 n_sg, dma_addr_t dma_addr)
+{
+ if (sg_list != NULL) {
+ u32 nbytes;
+
+ nbytes = sizeof(*sg_list) * n_sg;
+
+ pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
+ }
+}
+
static void skd_free_skreq(struct skd_device *skdev)
{
u32 i;
@@ -5037,19 +4999,6 @@ static void skd_free_sksb(struct skd_device *skdev)
skspcl->req.sksg_dma_address = 0;
}
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr)
-{
- if (sg_list != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
-
- pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
- }
-}
-
static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
@@ -5066,7 +5015,32 @@ static void skd_free_disk(struct skd_device *skdev)
skdev->disk = NULL;
}
+static void skd_destruct(struct skd_device *skdev)
+{
+ if (skdev == NULL)
+ return;
+
+ VPRINTK(skdev, "disk\n");
+ skd_free_disk(skdev);
+ VPRINTK(skdev, "sksb\n");
+ skd_free_sksb(skdev);
+
+ VPRINTK(skdev, "skspcl\n");
+ skd_free_skspcl(skdev);
+
+ VPRINTK(skdev, "skreq\n");
+ skd_free_skreq(skdev);
+
+ VPRINTK(skdev, "skmsg\n");
+ skd_free_skmsg(skdev);
+
+ VPRINTK(skdev, "skcomp\n");
+ skd_free_skcomp(skdev);
+
+ VPRINTK(skdev, "skdev\n");
+ kfree(skdev);
+}
/*
*****************************************************************************
--
1.8.2.3
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_s1120.h | 230 ++++++++++++++++++++++------------------------
1 file changed, 110 insertions(+), 120 deletions(-)
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
index 426581e..61c757f 100644
--- a/drivers/block/skd_s1120.h
+++ b/drivers/block/skd_s1120.h
@@ -17,30 +17,29 @@
/*
* Q-channel, 64-bit r/w
*/
-#define FIT_Q_COMMAND 0x400u
-#define FIT_QCMD_QID_MASK (0x3 << 1)
-#define FIT_QCMD_QID0 (0x0 << 1)
-#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
-#define FIT_QCMD_QID1 (0x1 << 1)
-#define FIT_QCMD_QID2 (0x2 << 1)
-#define FIT_QCMD_QID3 (0x3 << 1)
-#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
-#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
-#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
-#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
-#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
-#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
-
+#define FIT_Q_COMMAND 0x400u
+#define FIT_QCMD_QID_MASK (0x3 << 1)
+#define FIT_QCMD_QID0 (0x0 << 1)
+#define FIT_QCMD_QID_NORMAL FIT_QCMD_QID0
+#define FIT_QCMD_QID1 (0x1 << 1)
+#define FIT_QCMD_QID2 (0x2 << 1)
+#define FIT_QCMD_QID3 (0x3 << 1)
+#define FIT_QCMD_FLUSH_QUEUE (0ull) /* add QID */
+#define FIT_QCMD_MSGSIZE_MASK (0x3 << 4)
+#define FIT_QCMD_MSGSIZE_64 (0x0 << 4)
+#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
+#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
+#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
+#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
/*
* Control, 32-bit r/w
*/
-#define FIT_CONTROL 0x500u
-#define FIT_CR_HARD_RESET (1u << 0u)
-#define FIT_CR_SOFT_RESET (1u << 1u)
-#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
-#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
+#define FIT_CONTROL 0x500u
+#define FIT_CR_HARD_RESET (1u << 0u)
+#define FIT_CR_SOFT_RESET (1u << 1u)
+#define FIT_CR_DIS_TIMESTAMPS (1u << 6u)
+#define FIT_CR_ENABLE_INTERRUPTS (1u << 7u)
/*
* Status, 32-bit, r/o
@@ -63,88 +62,82 @@
#define FIT_SR_DRIVE_BUSY_ERASE 0x0B
#define FIT_SR_DRIVE_FW_BOOTING 0x0C
#define FIT_SR_DRIVE_NEED_FW_DOWNLOAD 0xFE
-#define FIT_SR_DEVICE_MISSING 0xFF
+#define FIT_SR_DEVICE_MISSING 0xFF
#define FIT_SR__RESERVED 0xFFFFFF00u
/*
* FIT_STATUS - Status register data definition
*/
-#define FIT_SR_STATE_MASK (0xFF << 0)
-#define FIT_SR_SIGNATURE (0xFF << 8)
-#define FIT_SR_PIO_DMA (1 << 16)
-
+#define FIT_SR_STATE_MASK (0xFF << 0)
+#define FIT_SR_SIGNATURE (0xFF << 8)
+#define FIT_SR_PIO_DMA (1 << 16)
/*
* Interrupt status, 32-bit r/w1c (w1c ==> write 1 to clear)
*/
-#define FIT_INT_STATUS_HOST 0x520u
-#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
-#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
-#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
-#define FIT_ISH_UNDEFINED_3 (1u << 3u)
-#define FIT_ISH_UNDEFINED_4 (1u << 4u)
-#define FIT_ISH_Q0_FULL (1u << 5u)
-#define FIT_ISH_Q1_FULL (1u << 6u)
-#define FIT_ISH_Q2_FULL (1u << 7u)
-#define FIT_ISH_Q3_FULL (1u << 8u)
-#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
-#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
-
-
-#define FIT_INT_DEF_MASK \
- (FIT_ISH_FW_STATE_CHANGE | \
- FIT_ISH_COMPLETION_POSTED | \
- FIT_ISH_MSG_FROM_DEV | \
- FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL | \
- FIT_ISH_QCMD_FIFO_OVERRUN | \
- FIT_ISH_BAD_EXP_ROM_READ)
-
-#define FIT_INT_QUEUE_FULL \
- (FIT_ISH_Q0_FULL | \
- FIT_ISH_Q1_FULL | \
- FIT_ISH_Q2_FULL | \
- FIT_ISH_Q3_FULL)
-
-
-#define MSI_MSG_NWL_ERROR_0 0x00000000
-#define MSI_MSG_NWL_ERROR_1 0x00000001
-#define MSI_MSG_NWL_ERROR_2 0x00000002
-#define MSI_MSG_NWL_ERROR_3 0x00000003
-#define MSI_MSG_STATE_CHANGE 0x00000004
-#define MSI_MSG_COMPLETION_POSTED 0x00000005
-#define MSI_MSG_MSG_FROM_DEV 0x00000006
-#define MSI_MSG_RESERVED_0 0x00000007
-#define MSI_MSG_RESERVED_1 0x00000008
-#define MSI_MSG_QUEUE_0_FULL 0x00000009
-#define MSI_MSG_QUEUE_1_FULL 0x0000000A
-#define MSI_MSG_QUEUE_2_FULL 0x0000000B
-#define MSI_MSG_QUEUE_3_FULL 0x0000000C
-
-
-
-#define FIT_INT_RESERVED_MASK \
- (FIT_ISH_UNDEFINED_3 | \
- FIT_ISH_UNDEFINED_4)
+#define FIT_INT_STATUS_HOST 0x520u
+#define FIT_ISH_FW_STATE_CHANGE (1u << 0u)
+#define FIT_ISH_COMPLETION_POSTED (1u << 1u)
+#define FIT_ISH_MSG_FROM_DEV (1u << 2u)
+#define FIT_ISH_UNDEFINED_3 (1u << 3u)
+#define FIT_ISH_UNDEFINED_4 (1u << 4u)
+#define FIT_ISH_Q0_FULL (1u << 5u)
+#define FIT_ISH_Q1_FULL (1u << 6u)
+#define FIT_ISH_Q2_FULL (1u << 7u)
+#define FIT_ISH_Q3_FULL (1u << 8u)
+#define FIT_ISH_QCMD_FIFO_OVERRUN (1u << 9u)
+#define FIT_ISH_BAD_EXP_ROM_READ (1u << 10u)
+
+#define FIT_INT_DEF_MASK \
+ (FIT_ISH_FW_STATE_CHANGE | \
+ FIT_ISH_COMPLETION_POSTED | \
+ FIT_ISH_MSG_FROM_DEV | \
+ FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL | \
+ FIT_ISH_QCMD_FIFO_OVERRUN | \
+ FIT_ISH_BAD_EXP_ROM_READ)
+
+#define FIT_INT_QUEUE_FULL \
+ (FIT_ISH_Q0_FULL | \
+ FIT_ISH_Q1_FULL | \
+ FIT_ISH_Q2_FULL | \
+ FIT_ISH_Q3_FULL)
+
+#define MSI_MSG_NWL_ERROR_0 0x00000000
+#define MSI_MSG_NWL_ERROR_1 0x00000001
+#define MSI_MSG_NWL_ERROR_2 0x00000002
+#define MSI_MSG_NWL_ERROR_3 0x00000003
+#define MSI_MSG_STATE_CHANGE 0x00000004
+#define MSI_MSG_COMPLETION_POSTED 0x00000005
+#define MSI_MSG_MSG_FROM_DEV 0x00000006
+#define MSI_MSG_RESERVED_0 0x00000007
+#define MSI_MSG_RESERVED_1 0x00000008
+#define MSI_MSG_QUEUE_0_FULL 0x00000009
+#define MSI_MSG_QUEUE_1_FULL 0x0000000A
+#define MSI_MSG_QUEUE_2_FULL 0x0000000B
+#define MSI_MSG_QUEUE_3_FULL 0x0000000C
+
+#define FIT_INT_RESERVED_MASK \
+ (FIT_ISH_UNDEFINED_3 | \
+ FIT_ISH_UNDEFINED_4)
+
/*
* Interrupt mask, 32-bit r/w
* Bit definitions are the same as FIT_INT_STATUS_HOST
*/
-#define FIT_INT_MASK_HOST 0x528u
-
+#define FIT_INT_MASK_HOST 0x528u
/*
* Message to device, 32-bit r/w
*/
-#define FIT_MSG_TO_DEVICE 0x540u
+#define FIT_MSG_TO_DEVICE 0x540u
/*
* Message from device, 32-bit, r/o
*/
-#define FIT_MSG_FROM_DEVICE 0x548u
-
+#define FIT_MSG_FROM_DEVICE 0x548u
/*
* 32-bit messages to/from device, composition/extraction macros
@@ -153,52 +146,50 @@
((((TYPE) & 0xFFu) << 24u) | \
(((PARAM) & 0xFFu) << 16u) | \
(((DATA) & 0xFFFFu) << 0u))
-#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
-#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
-#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
-
+#define FIT_MXD_TYPE(MXD) (((MXD) >> 24u) & 0xFFu)
+#define FIT_MXD_PARAM(MXD) (((MXD) >> 16u) & 0xFFu)
+#define FIT_MXD_DATA(MXD) (((MXD) >> 0u) & 0xFFFFu)
/*
* Types of messages to/from device
*/
-#define FIT_MTD_FITFW_INIT 0x01u
-#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
-#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
-#define FIT_MTD_SET_COMPQ_ADDR 0x04u
-#define FIT_MTD_ARM_QUEUE 0x05u
-#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
-#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
-#define FIT_MFD_SMART_EXCEEDED 0x10u
-#define FIT_MFD_POWER_DOWN 0x11u
-#define FIT_MFD_OFFLINE 0x12u
-#define FIT_MFD_ONLINE 0x13u
-#define FIT_MFD_FW_RESTARTING 0x14u
-#define FIT_MFD_PM_ACTIVE 0x15u
-#define FIT_MFD_PM_STANDBY 0x16u
-#define FIT_MFD_PM_SLEEP 0x17u
-#define FIT_MFD_CMD_PROGRESS 0x18u
-
-#define FIT_MTD_DEBUG 0xFEu
-#define FIT_MFD_DEBUG 0xFFu
+#define FIT_MTD_FITFW_INIT 0x01u
+#define FIT_MTD_GET_CMDQ_DEPTH 0x02u
+#define FIT_MTD_SET_COMPQ_DEPTH 0x03u
+#define FIT_MTD_SET_COMPQ_ADDR 0x04u
+#define FIT_MTD_ARM_QUEUE 0x05u
+#define FIT_MTD_CMD_LOG_HOST_ID 0x07u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_LO 0x08u
+#define FIT_MTD_CMD_LOG_TIME_STAMP_HI 0x09u
+#define FIT_MFD_SMART_EXCEEDED 0x10u
+#define FIT_MFD_POWER_DOWN 0x11u
+#define FIT_MFD_OFFLINE 0x12u
+#define FIT_MFD_ONLINE 0x13u
+#define FIT_MFD_FW_RESTARTING 0x14u
+#define FIT_MFD_PM_ACTIVE 0x15u
+#define FIT_MFD_PM_STANDBY 0x16u
+#define FIT_MFD_PM_SLEEP 0x17u
+#define FIT_MFD_CMD_PROGRESS 0x18u
+
+#define FIT_MTD_DEBUG 0xFEu
+#define FIT_MFD_DEBUG 0xFFu
#define FIT_MFD_MASK (0xFFu)
#define FIT_MFD_DATA_MASK (0xFFu)
#define FIT_MFD_MSG(x) (((x) >> 24) & FIT_MFD_MASK)
#define FIT_MFD_DATA(x) ((x) & FIT_MFD_MASK)
-
/*
* Extra arg to FIT_MSG_TO_DEVICE, 64-bit r/w
* Used to set completion queue address (FIT_MTD_SET_COMPQ_ADDR)
* (was Response buffer in docs)
*/
-#define FIT_MSG_TO_DEVICE_ARG 0x580u
+#define FIT_MSG_TO_DEVICE_ARG 0x580u
/*
* Hardware (ASIC) version, 32-bit r/o
*/
-#define FIT_HW_VERSION 0x588u
+#define FIT_HW_VERSION 0x588u
/*
* Scatter/gather list descriptor.
@@ -213,8 +204,8 @@ struct fit_sg_descriptor {
uint64_t next_desc_ptr;
};
-#define FIT_SGD_CONTROL_NOT_LAST 0x000u
-#define FIT_SGD_CONTROL_LAST 0x40Eu
+#define FIT_SGD_CONTROL_NOT_LAST 0x000u
+#define FIT_SGD_CONTROL_LAST 0x40Eu
/*
* Header at the beginning of a FIT message. The header
@@ -228,9 +219,9 @@ struct fit_msg_hdr {
uint8_t _reserved[62];
};
-#define FIT_PROTOCOL_ID_FIT 1
-#define FIT_PROTOCOL_ID_SSDI 2
-#define FIT_PROTOCOL_ID_SOFIT 3
+#define FIT_PROTOCOL_ID_FIT 1
+#define FIT_PROTOCOL_ID_SSDI 2
+#define FIT_PROTOCOL_ID_SOFIT 3
#define FIT_PROTOCOL_MINOR_VER(mtd_val) ((mtd_val >> 16) & 0xF)
@@ -251,7 +242,7 @@ struct fit_msg_hdr {
* Command_context is opaque and taken verbatim from the SSDI command.
* All other fields are big endian.
*/
-#define FIT_PROTOCOL_VERSION_0 0
+#define FIT_PROTOCOL_VERSION_0 0
/*
* Protocol major version 1 completion entry.
@@ -264,8 +255,8 @@ struct fit_completion_entry_v1 {
uint8_t status; /* SCSI status */
uint8_t cycle;
};
-#define FIT_PROTOCOL_VERSION_1 1
-#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
+#define FIT_PROTOCOL_VERSION_1 1
+#define FIT_PROTOCOL_VERSION_CURRENT FIT_PROTOCOL_VERSION_1
struct fit_comp_error_info {
uint8_t type:7; /* 00: Bits0-6 indicates the type of sense data. */
@@ -293,10 +284,9 @@ struct fit_comp_error_info {
/* Task management constants */
-#define SOFT_TASK_SIMPLE 0x00
-#define SOFT_TASK_HEAD_OF_QUEUE 0x01
-#define SOFT_TASK_ORDERED 0x02
-
+#define SOFT_TASK_SIMPLE 0x00
+#define SOFT_TASK_HEAD_OF_QUEUE 0x01
+#define SOFT_TASK_ORDERED 0x02
/* Version zero has the last 32 bits reserved,
* Version one has the last 32 bits sg_list_len_bytes;
--
1.8.2.3
Reorder placement of skd_flush_cmd_[en,de]queue() functions.
Then remove no longer needed function prototypes.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 61 +++++++++++++++++++++---------------------------
1 file changed, 27 insertions(+), 34 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index c72b0e4..4d8e94c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -530,11 +530,6 @@ static void skd_log_skmsg(struct skd_device *skdev,
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event);
-/* FLUSH FUA flag handling. */
-static int skd_flush_cmd_enqueue(struct skd_device *, void *);
-static void *skd_flush_cmd_dequeue(struct skd_device *);
-
-
/*
*****************************************************************************
* READ/WRITE REQUESTS
@@ -699,6 +694,33 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
}
}
+static int skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
+{
+ struct skd_flush_cmd *item;
+
+ item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
+ if (!item) {
+ pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
+ return -ENOMEM;
+ }
+
+ item->cmd = cmd;
+ list_add_tail(&item->flist, &skdev->flush_list);
+ return 0;
+}
+
+static void *skd_flush_cmd_dequeue(struct skd_device *skdev)
+{
+ void *cmd;
+ struct skd_flush_cmd *item;
+
+ item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
+ list_del_init(&item->flist);
+ cmd = item->cmd;
+ kmem_cache_free(skd_flush_slab, item);
+ return cmd;
+}
+
static void skd_request_fn_not_online(struct request_queue *q);
static void skd_request_fn(struct request_queue *q)
@@ -5743,34 +5765,5 @@ static void __exit skd_exit(void)
kmem_cache_destroy(skd_flush_slab);
}
-static int
-skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
-{
- struct skd_flush_cmd *item;
-
- item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
- if (!item) {
- pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
- return -ENOMEM;
- }
-
- item->cmd = cmd;
- list_add_tail(&item->flist, &skdev->flush_list);
- return 0;
-}
-
-static void *
-skd_flush_cmd_dequeue(struct skd_device *skdev)
-{
- void *cmd;
- struct skd_flush_cmd *item;
-
- item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
- list_del_init(&item->flist);
- cmd = item->cmd;
- kmem_cache_free(skd_flush_slab, item);
- return cmd;
-}
-
module_init(skd_init);
module_exit(skd_exit);
--
1.8.2.3
skd_bio feature adds a possibility to use the internal bio list to process
requests instead of using the normal block layer queueing functionality.
Its potential advantages are unclear and if there are any it is better to
identify and fix the block layer code deficiences instead. Moreover it
introduces separate code-paths through the whole driver which are difficult
to test properly and maintain in the long-term. Since it is currently not
used unless explicitly enabled by module parameter just remove it.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 605 ++++++-----------------------------------------
1 file changed, 76 insertions(+), 529 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 4d8e94c..6214d68 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -368,27 +368,7 @@ struct skd_device {
u32 timo_slot;
-
struct work_struct completion_worker;
-
- struct bio_list bio_queue;
- int queue_stopped;
-
- struct list_head flush_list;
-};
-
-#define SKD_FLUSH_JOB "skd-flush-jobs"
-struct kmem_cache *skd_flush_slab;
-
-/*
- * These commands hold "nonzero size FLUSH bios",
- * which are enqueud in skdev->flush_list during
- * completion of "zero size FLUSH commands".
- * It will be active in biomode.
- */
-struct skd_flush_cmd {
- void *cmd;
- struct list_head flist;
};
#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
@@ -489,11 +469,6 @@ MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
module_param(skd_isr_comp_limit, int, 0444);
MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
-static int skd_bio;
-module_param(skd_bio, int, 0444);
-MODULE_PARM_DESC(skd_bio,
- "Register as a bio device instead of block (0, 1) default=0");
-
/* Major device number dynamically assigned. */
static u32 skd_major;
@@ -535,42 +510,8 @@ static void skd_log_skreq(struct skd_device *skdev,
* READ/WRITE REQUESTS
*****************************************************************************
*/
-static void skd_stop_queue(struct skd_device *skdev)
-{
- if (!skd_bio)
- blk_stop_queue(skdev->queue);
- else
- skdev->queue_stopped = 1;
-}
-static void skd_unstop_queue(struct skd_device *skdev)
-{
- if (!skd_bio)
- queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
- else
- skdev->queue_stopped = 0;
-}
-
-static void skd_start_queue(struct skd_device *skdev)
-{
- if (!skd_bio) {
- blk_start_queue(skdev->queue);
- } else {
- pr_err("(%s): Starting queue\n", skd_name(skdev));
- skdev->queue_stopped = 0;
- skd_request_fn(skdev->queue);
- }
-}
-
-static int skd_queue_stopped(struct skd_device *skdev)
-{
- if (!skd_bio)
- return blk_queue_stopped(skdev->queue);
- else
- return skdev->queue_stopped;
-}
-
-static void skd_fail_all_pending_blk(struct skd_device *skdev)
+static void skd_fail_all_pending(struct skd_device *skdev)
{
struct request_queue *q = skdev->queue;
struct request *req;
@@ -584,42 +525,6 @@ static void skd_fail_all_pending_blk(struct skd_device *skdev)
}
}
-static void skd_fail_all_pending_bio(struct skd_device *skdev)
-{
- struct bio *bio;
- int error = -EIO;
-
- for (;; ) {
- bio = bio_list_pop(&skdev->bio_queue);
-
- if (bio == NULL)
- break;
-
- bio_endio(bio, error);
- }
-}
-
-static void skd_fail_all_pending(struct skd_device *skdev)
-{
- if (!skd_bio)
- skd_fail_all_pending_blk(skdev);
- else
- skd_fail_all_pending_bio(skdev);
-}
-
-static void skd_make_request(struct request_queue *q, struct bio *bio)
-{
- struct skd_device *skdev = q->queuedata;
- unsigned long flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- bio_list_add(&skdev->bio_queue, bio);
- skd_request_fn(skdev->queue);
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-}
-
static void
skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
int data_dir, unsigned lba,
@@ -680,45 +585,9 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(count, &buf[16]);
- if (!skd_bio) {
- req = skreq->req;
- blk_add_request_payload(req, page, len);
- req->buffer = buf;
- } else {
- skreq->bio->bi_io_vec->bv_page = page;
- skreq->bio->bi_io_vec->bv_offset = 0;
- skreq->bio->bi_io_vec->bv_len = len;
-
- skreq->bio->bi_vcnt = 1;
- skreq->bio->bi_phys_segments = 1;
- }
-}
-
-static int skd_flush_cmd_enqueue(struct skd_device *skdev, void *cmd)
-{
- struct skd_flush_cmd *item;
-
- item = kmem_cache_zalloc(skd_flush_slab, GFP_ATOMIC);
- if (!item) {
- pr_err("skd_flush_cmd_enqueue: Failed to allocated item.\n");
- return -ENOMEM;
- }
-
- item->cmd = cmd;
- list_add_tail(&item->flist, &skdev->flush_list);
- return 0;
-}
-
-static void *skd_flush_cmd_dequeue(struct skd_device *skdev)
-{
- void *cmd;
- struct skd_flush_cmd *item;
-
- item = list_entry(skdev->flush_list.next, struct skd_flush_cmd, flist);
- list_del_init(&item->flist);
- cmd = item->cmd;
- kmem_cache_free(skd_flush_slab, item);
- return cmd;
+ req = skreq->req;
+ blk_add_request_payload(req, page, len);
+ req->buffer = buf;
}
static void skd_request_fn_not_online(struct request_queue *q);
@@ -751,14 +620,14 @@ static void skd_request_fn(struct request_queue *q)
return;
}
- if (skd_queue_stopped(skdev)) {
+ if (blk_queue_stopped(skdev->queue)) {
if (skdev->skmsg_free_list == NULL ||
skdev->skreq_free_list == NULL ||
skdev->in_flight >= skdev->queue_low_water_mark)
/* There is still some kind of shortage */
return;
- skd_unstop_queue(skdev);
+ queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
}
/*
@@ -768,61 +637,29 @@ static void skd_request_fn(struct request_queue *q)
* - There are no more skd_request_context entries
* - There are no more FIT msg buffers
*/
- for (;; ) {
-
+ for (;;) {
flush = fua = 0;
- if (!skd_bio) {
- req = blk_peek_request(q);
-
- /* Are there any native requests to start? */
- if (req == NULL)
- break;
-
- lba = (u32)blk_rq_pos(req);
- count = blk_rq_sectors(req);
- data_dir = rq_data_dir(req);
- io_flags = req->cmd_flags;
-
- if (io_flags & REQ_FLUSH)
- flush++;
-
- if (io_flags & REQ_FUA)
- fua++;
-
- VPRINTK(skdev,
- "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- req, lba, lba, count, count, data_dir);
- } else {
- if (!list_empty(&skdev->flush_list)) {
- /* Process data part of FLUSH request. */
- bio = (struct bio *)skd_flush_cmd_dequeue(skdev);
- flush++;
- VPRINTK(skdev, "processing FLUSH request with data.\n");
- } else {
- /* peek at our bio queue */
- bio = bio_list_peek(&skdev->bio_queue);
- }
+ req = blk_peek_request(q);
- /* Are there any native requests to start? */
- if (bio == NULL)
- break;
+ /* Are there any native requests to start? */
+ if (req == NULL)
+ break;
- lba = (u32)bio->bi_sector;
- count = bio_sectors(bio);
- data_dir = bio_data_dir(bio);
- io_flags = bio->bi_rw;
+ lba = (u32)blk_rq_pos(req);
+ count = blk_rq_sectors(req);
+ data_dir = rq_data_dir(req);
+ io_flags = req->cmd_flags;
- VPRINTK(skdev,
- "new bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- bio, lba, lba, count, count, data_dir);
+ if (io_flags & REQ_FLUSH)
+ flush++;
- if (io_flags & REQ_FLUSH)
- flush++;
+ if (io_flags & REQ_FUA)
+ fua++;
- if (io_flags & REQ_FUA)
- fua++;
- }
+ VPRINTK(skdev,
+ "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
+ req, lba, lba, count, count, data_dir);
/* At this point we know there is a request
* (from our bio q or req q depending on the way
@@ -859,29 +696,15 @@ static void skd_request_fn(struct request_queue *q)
skreq->discard_page = 0;
/*
- * OK to now dequeue request from either bio or q.
+ * OK to now dequeue request from queue.
*
* At this point we are comitted to either start or reject
* the native request. Note that skd_request_context is
* available but is still at the head of the free list.
*/
- if (!skd_bio) {
- blk_start_request(req);
- skreq->req = req;
- skreq->fitmsg_id = 0;
- } else {
- if (unlikely(flush == SKD_FLUSH_DATA_SECOND)) {
- skreq->bio = bio;
- } else {
- skreq->bio = bio_list_pop(&skdev->bio_queue);
- SKD_ASSERT(skreq->bio == bio);
- skreq->start_time = jiffies;
- part_inc_in_flight(&skdev->disk->part0,
- bio_data_dir(bio));
- }
-
- skreq->fitmsg_id = 0;
- }
+ blk_start_request(req);
+ skreq->req = req;
+ skreq->fitmsg_id = 0;
/* Either a FIT msg is in progress or we have to start one. */
if (skmsg == NULL) {
@@ -955,8 +778,7 @@ static void skd_request_fn(struct request_queue *q)
if (fua)
scsi_req->cdb[1] |= SKD_FUA_NV;
- if ((!skd_bio && !req->bio) ||
- (skd_bio && flush == SKD_FLUSH_ZERO_SIZE_FIRST))
+ if (!req->bio)
goto skip_sg;
error = skd_preop_sg_list(skdev, skreq);
@@ -1040,13 +862,12 @@ skip_sg:
* If req is non-NULL it means there is something to do but
* we are out of a resource.
*/
- if (((!skd_bio) && req) ||
- ((skd_bio) && bio_list_peek(&skdev->bio_queue)))
- skd_stop_queue(skdev);
+ if (req)
+ blk_stop_queue(skdev->queue);
}
-static void skd_end_request_blk(struct skd_device *skdev,
- struct skd_request_context *skreq, int error)
+static void skd_end_request(struct skd_device *skdev,
+ struct skd_request_context *skreq, int error)
{
struct request *req = skreq->req;
unsigned int io_flags = req->cmd_flags;
@@ -1072,8 +893,8 @@ static void skd_end_request_blk(struct skd_device *skdev,
__blk_end_request_all(skreq->req, error);
}
-static int skd_preop_sg_list_blk(struct skd_device *skdev,
- struct skd_request_context *skreq)
+static int skd_preop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
{
struct request *req = skreq->req;
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
@@ -1133,8 +954,8 @@ static int skd_preop_sg_list_blk(struct skd_device *skdev,
return 0;
}
-static void skd_postop_sg_list_blk(struct skd_device *skdev,
- struct skd_request_context *skreq)
+static void skd_postop_sg_list(struct skd_device *skdev,
+ struct skd_request_context *skreq)
{
int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
@@ -1149,182 +970,6 @@ static void skd_postop_sg_list_blk(struct skd_device *skdev,
pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
}
-static void skd_end_request_bio(struct skd_device *skdev,
- struct skd_request_context *skreq, int error)
-{
- struct bio *bio = skreq->bio;
- int rw = bio_data_dir(bio);
- unsigned long io_flags = bio->bi_rw;
-
- if ((io_flags & REQ_DISCARD) &&
- (skreq->discard_page == 1)) {
- VPRINTK(skdev, "biomode: skd_end_request: freeing DISCARD page.\n");
- free_page((unsigned long)page_address(bio->bi_io_vec->bv_page));
- }
-
- if (unlikely(error)) {
- u32 lba = (u32)skreq->bio->bi_sector;
- u32 count = bio_sectors(skreq->bio);
- char *cmd = (rw == WRITE) ? "write" : "read";
- pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
- skd_name(skdev), cmd, lba, count, skreq->id);
- }
- {
- int cpu = part_stat_lock();
-
- if (likely(!error)) {
- part_stat_inc(cpu, &skdev->disk->part0, ios[rw]);
- part_stat_add(cpu, &skdev->disk->part0, sectors[rw],
- bio_sectors(bio));
- }
- part_stat_add(cpu, &skdev->disk->part0, ticks[rw],
- jiffies - skreq->start_time);
- part_dec_in_flight(&skdev->disk->part0, rw);
- part_stat_unlock();
- }
-
- VPRINTK(skdev, "id=0x%x error=%d\n", skreq->id, error);
-
- bio_endio(skreq->bio, error);
-}
-
-static int skd_preop_sg_list_bio(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- struct bio *bio = skreq->bio;
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- int n_sg;
- int i;
- struct bio_vec *vec;
- struct fit_sg_descriptor *sgd;
- u64 dma_addr;
- u32 count;
- int errs = 0;
- unsigned int io_flags = 0;
- io_flags |= bio->bi_rw;
-
- skreq->sg_byte_count = 0;
- n_sg = skreq->n_sg = skreq->bio->bi_vcnt;
-
- if (n_sg <= 0)
- return -EINVAL;
-
- if (n_sg > skdev->sgs_per_request) {
- pr_err("(%s): sg overflow n=%d\n",
- skd_name(skdev), n_sg);
- skreq->n_sg = 0;
- return -EIO;
- }
-
- for (i = 0; i < skreq->n_sg; i++) {
- vec = bio_iovec_idx(bio, i);
- dma_addr = pci_map_page(skdev->pdev,
- vec->bv_page,
- vec->bv_offset, vec->bv_len, pci_dir);
- count = vec->bv_len;
-
- if (count == 0 || count > 64u * 1024u || (count & 3) != 0
- || (dma_addr & 3) != 0) {
- pr_err(
- "(%s): Bad sg ix=%d count=%d addr=0x%llx\n",
- skd_name(skdev), i, count, dma_addr);
- errs++;
- }
-
- sgd = &skreq->sksg_list[i];
-
- sgd->control = FIT_SGD_CONTROL_NOT_LAST;
- sgd->byte_count = vec->bv_len;
- skreq->sg_byte_count += vec->bv_len;
- sgd->host_side_addr = dma_addr;
- sgd->dev_side_addr = 0; /* not used */
- }
-
- skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
- skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
-
-
- if (!(io_flags & REQ_DISCARD)) {
- count = bio_sectors(bio) << 9u;
- if (count != skreq->sg_byte_count) {
- pr_err("(%s): mismatch count sg=%d req=%d\n",
- skd_name(skdev), skreq->sg_byte_count, count);
- errs++;
- }
- }
-
- if (unlikely(skdev->dbg_level > 1)) {
- VPRINTK(skdev, "skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- VPRINTK(skdev, " sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- if (errs != 0) {
- skd_postop_sg_list(skdev, skreq);
- skreq->n_sg = 0;
- return -EIO;
- }
-
- return 0;
-}
-
-static int skd_preop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- if (!skd_bio)
- return skd_preop_sg_list_blk(skdev, skreq);
- else
- return skd_preop_sg_list_bio(skdev, skreq);
-}
-
-static void skd_postop_sg_list_bio(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- int i;
- struct fit_sg_descriptor *sgd;
-
- /*
- * restore the next ptr for next IO request so we
- * don't have to set it every time.
- */
- skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
- skreq->sksg_dma_address +
- ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
-
- for (i = 0; i < skreq->n_sg; i++) {
- sgd = &skreq->sksg_list[i];
- pci_unmap_page(skdev->pdev, sgd->host_side_addr,
- sgd->byte_count, pci_dir);
- }
-}
-
-static void skd_postop_sg_list(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- if (!skd_bio)
- skd_postop_sg_list_blk(skdev, skreq);
- else
- skd_postop_sg_list_bio(skdev, skreq);
-}
-
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, int error)
-{
- if (likely(!skd_bio))
- skd_end_request_blk(skdev, skreq, error);
- else
- skd_end_request_bio(skdev, skreq, error);
-}
-
static void skd_request_fn_not_online(struct request_queue *q)
{
struct skd_device *skdev = q->queuedata;
@@ -1426,7 +1071,7 @@ static void skd_timer_tick(ulong arg)
skdev->timer_countdown = SKD_DRAINING_TIMO;
skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
skdev->timo_slot = timo_slot;
- skd_stop_queue(skdev);
+ blk_stop_queue(skdev->queue);
timer_func_out:
mod_timer(&skdev->timer, (jiffies + HZ));
@@ -1482,7 +1127,7 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1506,7 +1151,7 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
if (skdev->timeout_slot[skdev->timo_slot] == 0) {
DPRINTK(skdev, "Slot drained, starting queue.\n");
skdev->state = SKD_DRVR_STATE_ONLINE;
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
return;
}
if (skdev->timer_countdown > 0) {
@@ -1556,7 +1201,7 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -2581,10 +2226,6 @@ static void skd_complete_other(struct skd_device *skdev,
volatile struct fit_completion_entry_v1 *skcomp,
volatile struct fit_comp_error_info *skerr);
-
-static void skd_requeue_request(struct skd_device *skdev,
- struct skd_request_context *skreq);
-
struct sns_info {
u8 type;
u8 stat;
@@ -2703,7 +2344,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- skd_requeue_request(skdev, skreq);
+ blk_requeue_request(skdev->queue, skreq->req);
pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
@@ -2711,13 +2352,10 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if (!skd_bio) {
- if ((unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES) {
- skd_log_skreq(skdev, skreq, "retry");
- skd_requeue_request(skdev, skreq);
- break;
- }
+ if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ skd_log_skreq(skdev, skreq, "retry");
+ blk_requeue_request(skdev->queue, skreq->req);
+ break;
}
/* fall through to report error */
@@ -2728,19 +2366,6 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
}
}
-static void skd_requeue_request(struct skd_device *skdev,
- struct skd_request_context *skreq)
-{
- if (!skd_bio) {
- blk_requeue_request(skdev->queue, skreq->req);
- } else {
- bio_list_add_head(&skdev->bio_queue, skreq->bio);
- skreq->bio = NULL;
- }
-}
-
-
-
/* assume spinlock is already held */
static void skd_release_skreq(struct skd_device *skdev,
struct skd_request_context *skreq)
@@ -2797,11 +2422,7 @@ static void skd_release_skreq(struct skd_device *skdev,
/*
* Reset backpointer
*/
- if (likely(!skd_bio))
- skreq->req = NULL;
- else
- skreq->bio = NULL;
-
+ skreq->req = NULL;
/*
* Reclaim the skd_request_context
@@ -3118,8 +2739,7 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (((!skd_bio) && !skreq->req) ||
- ((skd_bio) && !skreq->bio)) {
+ if (!skreq->req) {
DPRINTK(skdev, "NULL backptr skdreq %p, "
"req=0x%x req_id=0x%x\n",
skreq, skreq->id, req_id);
@@ -3128,30 +2748,10 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* Capture the outcome and post it back to the
* native request.
*/
- if (likely(cmp_status == SAM_STAT_GOOD)) {
- if (unlikely(skreq->flush_cmd)) {
- if (skd_bio) {
- /* if empty size bio, we are all done */
- if (bio_sectors(skreq->bio) == 0) {
- skd_end_request(skdev, skreq, 0);
- } else {
- ret = skd_flush_cmd_enqueue(skdev, (void *)skreq->bio);
- if (ret != 0) {
- pr_err("Failed to enqueue flush bio with Data. Err=%d.\n", ret);
- skd_end_request(skdev, skreq, ret);
- } else {
- ((*enqueued)++);
- }
- }
- } else {
- skd_end_request(skdev, skreq, 0);
- }
- } else {
- skd_end_request(skdev, skreq, 0);
- }
- } else {
+ if (likely(cmp_status == SAM_STAT_GOOD))
+ skd_end_request(skdev, skreq, 0);
+ else
skd_resolve_req_exception(skdev, skreq);
- }
}
/*
@@ -3508,7 +3108,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
break;
case FIT_SR_DRIVE_BUSY_ERASE:
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
@@ -3542,7 +3142,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
case FIT_SR_DRIVE_FAULT:
skd_drive_fault(skdev);
skd_recover_requests(skdev, 0);
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
break;
/* PCIe bus returned all Fs? */
@@ -3551,7 +3151,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
skd_name(skdev), state, sense);
skd_drive_disappeared(skdev);
skd_recover_requests(skdev, 0);
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
break;
default:
/*
@@ -3576,29 +3176,21 @@ static void skd_recover_requests(struct skd_device *skdev, int requeue)
skd_log_skreq(skdev, skreq, "recover");
SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- if (!skd_bio)
- SKD_ASSERT(skreq->req != NULL);
- else
- SKD_ASSERT(skreq->bio != NULL);
+ SKD_ASSERT(skreq->req != NULL);
/* Release DMA resources for the request. */
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (!skd_bio) {
- if (requeue &&
- (unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES)
- skd_requeue_request(skdev, skreq);
- else
- skd_end_request(skdev, skreq, -EIO);
- } else
+ if (requeue &&
+ (unsigned long) ++skreq->req->special <
+ SKD_MAX_RETRIES)
+ blk_requeue_request(skdev->queue,
+ skreq->req);
+ else
skd_end_request(skdev, skreq, -EIO);
- if (!skd_bio)
- skreq->req = NULL;
- else
- skreq->bio = NULL;
+ skreq->req = NULL;
skreq->state = SKD_REQ_STATE_IDLE;
skreq->id += SKD_ID_INCR;
@@ -3872,7 +3464,7 @@ static void skd_start_device(struct skd_device *skdev)
skd_drive_fault(skdev);
/*start the queue so we can respond with error to requests */
VPRINTK(skdev, "starting %s queue\n", skdev->name);
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -3884,7 +3476,7 @@ static void skd_start_device(struct skd_device *skdev)
/*start the queue so we can respond with error to requests */
VPRINTK(skdev, "starting %s queue to error-out reqs\n",
skdev->name);
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -4022,7 +3614,7 @@ static int skd_quiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
VPRINTK(skdev, "stopping %s queue\n", skdev->name);
- skd_stop_queue(skdev);
+ blk_stop_queue(skdev->queue);
break;
case SKD_DRVR_STATE_ONLINE:
case SKD_DRVR_STATE_STOPPING:
@@ -4086,7 +3678,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
DPRINTK(skdev, "**** device ONLINE...starting block queue\n");
VPRINTK(skdev, "starting %s queue\n", skdev->name);
pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
- skd_start_queue(skdev);
+ blk_start_queue(skdev->queue);
skdev->gendisk_on = 1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -4735,13 +4327,7 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->fops = &skd_blockdev_ops;
disk->private_data = skdev;
- if (!skd_bio) {
- q = blk_init_queue(skd_request_fn, &skdev->lock);
- } else {
- q = blk_alloc_queue(GFP_KERNEL);
- q->queue_flags = QUEUE_FLAG_IO_STAT | QUEUE_FLAG_STACKABLE;
- }
-
+ q = blk_init_queue(skd_request_fn, &skdev->lock);
if (!q) {
rc = -ENOMEM;
goto err_out;
@@ -4751,11 +4337,6 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->queue = q;
q->queuedata = skdev;
- if (skd_bio) {
- q->queue_lock = &skdev->lock;
- blk_queue_make_request(q, skd_make_request);
- }
-
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
@@ -4773,7 +4354,7 @@ static int skd_cons_disk(struct skd_device *skdev)
spin_lock_irqsave(&skdev->lock, flags);
VPRINTK(skdev, "stopping %s queue\n", skdev->name);
- skd_stop_queue(skdev);
+ blk_stop_queue(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
err_out:
@@ -4814,16 +4395,11 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->sgs_per_request = skd_sgs_per_request;
skdev->dbg_level = skd_dbg_level;
- if (skd_bio)
- bio_list_init(&skdev->bio_queue);
-
-
atomic_set(&skdev->device_count, 0);
spin_lock_init(&skdev->lock);
INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- INIT_LIST_HEAD(&skdev->flush_list);
VPRINTK(skdev, "skcomp\n");
rc = skd_cons_skcomp(skdev);
@@ -5183,18 +4759,6 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- if (!skd_flush_slab) {
- skd_flush_slab = kmem_cache_create(SKD_FLUSH_JOB,
- sizeof(struct skd_flush_cmd),
- 0, 0, NULL);
- if (!skd_flush_slab) {
- pr_err("(%s): failed to allocate flush slab\n",
- pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_regions;
- }
- }
-
if (!skd_major) {
rc = register_blkdev(0, DRV_NAME);
if (rc < 0)
@@ -5664,31 +5228,17 @@ static void skd_log_skreq(struct skd_device *skdev,
DPRINTK(skdev, " timo=0x%x sg_dir=%d n_sg=%d\n",
skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
- if (!skd_bio) {
- if (skreq->req != NULL) {
- struct request *req = skreq->req;
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- DPRINTK(skdev,
- " req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- req, lba, lba, count, count,
- (int)rq_data_dir(req));
- } else
- DPRINTK(skdev, " req=NULL\n");
- } else {
- if (skreq->bio != NULL) {
- struct bio *bio = skreq->bio;
- u32 lba = (u32)bio->bi_sector;
- u32 count = bio_sectors(bio);
+ if (skreq->req != NULL) {
+ struct request *req = skreq->req;
+ u32 lba = (u32)blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
- DPRINTK(skdev,
- " bio=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- bio, lba, lba, count, count,
- (int)bio_data_dir(bio));
- } else
- DPRINTK(skdev, " req=NULL\n");
- }
+ DPRINTK(skdev,
+ " req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
+ req, lba, lba, count, count,
+ (int)rq_data_dir(req));
+ } else
+ DPRINTK(skdev, " req=NULL\n");
}
/*
@@ -5760,9 +5310,6 @@ static void __exit skd_exit(void)
if (skd_major)
unregister_blkdev(skd_major, DRV_NAME);
-
- if (skd_flush_slab)
- kmem_cache_destroy(skd_flush_slab);
}
module_init(skd_init);
--
1.8.2.3
skdev->pdev is set to pdev twice in skd_pci_probe(), first time
through skd_construct() call and the second time directly in
the function. Remove the second assignment as it is not needed.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index e003137..9fe910d 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5239,7 +5239,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, skdev);
- skdev->pdev = pdev;
+
skdev->disk->driverfs_dev = &pdev->dev;
for (i = 0; i < SKD_MAX_BARS; i++) {
--
1.8.2.3
Register block device in skd_pci_probe() instead of in skd_init() so it
is registered only if some devices are present (currently it is always
registered when the driver is loaded). Please note that this change
depends on the fact that register_blkdev(0, ...) never returns 0.
Cc: Akhil Bhansali <[email protected]>
Cc: Jeff Moyer <[email protected]>
Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
Signed-off-by: Kyungmin Park <[email protected]>
---
drivers/block/skd_main.c | 32 +++++++++++---------------------
1 file changed, 11 insertions(+), 21 deletions(-)
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index e63a63c..738f847 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5215,6 +5215,14 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ if (!skd_major) {
+ rc = register_blkdev(0, DRV_NAME);
+ if (rc < 0)
+ goto err_out_regions;
+ BUG_ON(!rc);
+ skd_major = rc;
+ }
+
skdev = skd_construct(pdev);
if (skdev == NULL)
goto err_out_regions;
@@ -5711,8 +5719,6 @@ static void skd_log_skreq(struct skd_device *skdev,
static int __init skd_init(void)
{
- int rc = -ENOMEM;
-
pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
switch (skd_isr_type) {
@@ -5763,24 +5769,7 @@ static int __init skd_init(void)
skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
}
- /* Obtain major device number. */
- rc = register_blkdev(0, DRV_NAME);
- if (rc < 0)
- goto err_register_blkdev;
-
- skd_major = rc;
-
- rc = pci_register_driver(&skd_driver);
- if (rc < 0)
- goto err_pci_register_driver;
-
- return rc;
-
-err_pci_register_driver:
- unregister_blkdev(skd_major, DRV_NAME);
-
-err_register_blkdev:
- return rc;
+ return pci_register_driver(&skd_driver);
}
static void __exit skd_exit(void)
@@ -5789,7 +5778,8 @@ static void __exit skd_exit(void)
pci_unregister_driver(&skd_driver);
- unregister_blkdev(skd_major, DRV_NAME);
+ if (skd_major)
+ unregister_blkdev(skd_major, DRV_NAME);
if (skd_flush_slab)
kmem_cache_destroy(skd_flush_slab);
--
1.8.2.3
On Mon, Sep 30, 2013 at 03:25:47PM +0200, Bartlomiej Zolnierkiewicz wrote:
> This is not a SCSI host driver so remove SCSI subsystem specific
> includes.
The sad thing is that it is a driver for a device speaking SCSI, but for
some reason that was never discussed it is written to the block layer.
Hi Ramprasad,
On Tuesday, October 01, 2013 06:46:00 PM Ramprasad C wrote:
> Hi Bartlomiej,
>
> Thank for your contributions to skd driver.
>
> The reason why skd driver has two code paths is:-
> 1. For some workloads, Bio code path (skd_bio=1) performs better as
> compared to default code, like random r/w, high queue depths, direct i/o
> etc. In Bio code path, driver bypasses linux block elevator + scheduler
> layers and 'struct bio' comes directly to driver through make_request_fn().
Have you tried using "noop" I/O scheduler to alleviate the issue?
You need the kernel compiled with CONFIG_IOSCHED_NOOP=y (it is enabled by
default) and then do i.e. "echo noop > /sys/block/skd0/queue/scheduler" to
enable "noop" I/O scheduler for the skd0 block device.
> 2. For some workloads like buffered mode (app) i/o, the default code path
> performs much better. Hence, both the code paths are kept with module
> parameter. We would prefer to have both code paths.
It would be very useful to see some numbers for skd_bio=1 and skd_bio=0
(with "noop" I/O scheduler enabled) for the affected workloads to see what
kind of improvement is offered by skd_bio=1.
If the block layer queuing code is a problem even with "noop" I/O scheduler
it would be much better try to identify and fix (if possible) the problem
parts in the generic block layer code. This would benefit all SSD hardware
used on Linux not only the one supported by skd driver.
Also it is very inflexible to require the driver re-load just to optimize
it for some workload. I believe that a better solution (runtime selection
of the mode of operation at the block layer level) can be provided if
really necessary.
Best regards,
--
Bartlomiej Zolnierkiewicz
Samsung R&D Institute Poland
Samsung Electronics
> Regards,
> Ramprasad
>
>
>
> On Mon, Sep 30, 2013 at 6:55 PM, Bartlomiej Zolnierkiewicz <
> [email protected]> wrote:
>
> > skd_bio feature adds a possibility to use the internal bio list to process
> > requests instead of using the normal block layer queueing functionality.
> > Its potential advantages are unclear and if there are any it is better to
> > identify and fix the block layer code deficiences instead. Moreover it
> > introduces separate code-paths through the whole driver which are difficult
> > to test properly and maintain in the long-term. Since it is currently not
> > used unless explicitly enabled by module parameter just remove it.
> >
> > Cc: Akhil Bhansali <[email protected]>
> > Cc: Jeff Moyer <[email protected]>
> > Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
> > Signed-off-by: Kyungmin Park <[email protected]>
> > ---
> > drivers/block/skd_main.c | 605
> > ++++++-----------------------------------------
> > 1 file changed, 76 insertions(+), 529 deletions(-)
> -----Original Message-----
> From: Bartlomiej Zolnierkiewicz [mailto:[email protected]]
> Sent: Tuesday, October 01, 2013 10:01 PM
> To: Ramprasad C
> Cc: [email protected]; Akhil Bhansali; [email protected];
> [email protected]; [email protected]; OS
> Engineering; Amit Phansalkar
> Subject: Re: [PATCH 14/14] skd: remove skd_bio code
>
>
> Hi Ramprasad,
>
> On Tuesday, October 01, 2013 06:46:00 PM Ramprasad C wrote:
> > Hi Bartlomiej,
> >
> > Thank for your contributions to skd driver.
> >
> > The reason why skd driver has two code paths is:-
> > 1. For some workloads, Bio code path (skd_bio=1) performs better as
> > compared to default code, like random r/w, high queue depths, direct
> i/o
> > etc. In Bio code path, driver bypasses linux block elevator +
> scheduler
> > layers and 'struct bio' comes directly to driver through
> make_request_fn().
>
> Have you tried using "noop" I/O scheduler to alleviate the issue?
>
> You need the kernel compiled with CONFIG_IOSCHED_NOOP=y (it is enabled
> by
> default) and then do i.e. "echo noop > /sys/block/skd0/queue/scheduler"
> to
> enable "noop" I/O scheduler for the skd0 block device.
>
> > 2. For some workloads like buffered mode (app) i/o, the default code
> path
> > performs much better. Hence, both the code paths are kept with module
> > parameter. We would prefer to have both code paths.
>
> It would be very useful to see some numbers for skd_bio=1 and skd_bio=0
> (with "noop" I/O scheduler enabled) for the affected workloads to see
> what
> kind of improvement is offered by skd_bio=1.
>
> If the block layer queuing code is a problem even with "noop" I/O
> scheduler
> it would be much better try to identify and fix (if possible) the
> problem
> parts in the generic block layer code. This would benefit all SSD
> hardware
> used on Linux not only the one supported by skd driver.
>
> Also it is very inflexible to require the driver re-load just to
> optimize
> it for some workload. I believe that a better solution (runtime
> selection
> of the mode of operation at the block layer level) can be provided if
> really necessary.
>
Hi Bartlomiej,
There was performance gap with noop too. However, for noop with nomerge=1 and rotation=0, the performance difference is marginal.
We feel the skd_bio=1 code path can be removed and can keep the single (default) code path in the driver.
Regards,
Ramprasad
> Best regards,
> --
> Bartlomiej Zolnierkiewicz
> Samsung R&D Institute Poland
> Samsung Electronics
>
> > Regards,
> > Ramprasad
> >
> >
> >
> > On Mon, Sep 30, 2013 at 6:55 PM, Bartlomiej Zolnierkiewicz <
> > [email protected]> wrote:
> >
> > > skd_bio feature adds a possibility to use the internal bio list to
> process
> > > requests instead of using the normal block layer queueing
> functionality.
> > > Its potential advantages are unclear and if there are any it is
> better to
> > > identify and fix the block layer code deficiences instead. Moreover
> it
> > > introduces separate code-paths through the whole driver which are
> difficult
> > > to test properly and maintain in the long-term. Since it is
> currently not
> > > used unless explicitly enabled by module parameter just remove it.
> > >
> > > Cc: Akhil Bhansali <[email protected]>
> > > Cc: Jeff Moyer <[email protected]>
> > > Signed-off-by: Bartlomiej Zolnierkiewicz <[email protected]>
> > > Signed-off-by: Kyungmin Park <[email protected]>
> > > ---
> > > drivers/block/skd_main.c | 605
> > > ++++++-----------------------------------------
> > > 1 file changed, 76 insertions(+), 529 deletions(-)