Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752041AbbEFVg6 (ORCPT ); Wed, 6 May 2015 17:36:58 -0400 Received: from mail-bn1bbn0102.outbound.protection.outlook.com ([157.56.111.102]:44572 "EHLO na01-bn1-obe.outbound.protection.outlook.com" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1751393AbbEFVgx (ORCPT ); Wed, 6 May 2015 17:36:53 -0400 Authentication-Results: spf=fail (sender IP is 192.88.168.50) smtp.mailfrom=freescale.com; freescale.mail.onmicrosoft.com; dkim=none (message not signed) header.d=none; From: "J. German Rivera" To: , , , CC: , , , , , , , , , "J. German Rivera" Subject: [PATCH v2 7/7] staging: fsl-mc: Use DPMCP IRQ and completion var to wait for MC Date: Wed, 6 May 2015 16:28:28 -0500 Message-ID: <1430947708-10521-8-git-send-email-German.Rivera@freescale.com> X-Mailer: git-send-email 2.3.3 In-Reply-To: <1430947708-10521-1-git-send-email-German.Rivera@freescale.com> References: <1430947708-10521-1-git-send-email-German.Rivera@freescale.com> X-EOPAttributedMessage: 0 X-Forefront-Antispam-Report: CIP:192.88.168.50;CTRY:US;IPV:NLI;EFV:NLI;SFV:NSPM;SFS:(10019020)(6009001)(339900001)(189002)(199003)(85426001)(19580395003)(19580405001)(76176999)(46102003)(6806004)(87936001)(2201001)(2950100001)(575784001)(86362001)(105606002)(77096005)(92566002)(50226001)(47776003)(106466001)(50466002)(77156002)(62966003)(229853001)(36756003)(50986999)(5890100001)(5001770100001)(5001960100002)(107886002)(104016003)(189998001)(48376002)(4001430100001)(217873001);DIR:OUT;SFP:1102;SCL:1;SRVR:BN3PR0301MB1297;H:tx30smr01.am.freescale.net;FPR:;SPF:Fail;MLV:sfv;MX:1;A:1;LANG:en; MIME-Version: 1.0 Content-Type: text/plain X-Microsoft-Antispam: UriScan:;BCL:0;PCL:0;RULEID:;SRVR:BN3PR0301MB1297; X-Microsoft-Antispam-PRVS: X-Exchange-Antispam-Report-Test: UriScan:; X-Exchange-Antispam-Report-CFA-Test: BCL:0;PCL:0;RULEID:(601004)(5005006)(3002001);SRVR:BN3PR0301MB1297;BCL:0;PCL:0;RULEID:;SRVR:BN3PR0301MB1297; X-Forefront-PRVS: 0568F32D91 X-OriginatorOrg: freescale.com X-MS-Exchange-CrossTenant-OriginalArrivalTime: 06 May 2015 21:36:44.5176 (UTC) X-MS-Exchange-CrossTenant-Id: 710a03f5-10f6-4d38-9ff4-a80b81da590d X-MS-Exchange-CrossTenant-OriginalAttributedTenantConnectingIp: TenantId=710a03f5-10f6-4d38-9ff4-a80b81da590d;Ip=[192.88.168.50];Helo=[tx30smr01.am.freescale.net] X-MS-Exchange-CrossTenant-FromEntityHeader: HybridOnPrem X-MS-Exchange-Transport-CrossTenantHeadersStamped: BN3PR0301MB1297 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 35234 Lines: 1235 - Refactored fsl_mc_io object to have a DPMCP object attached to it - Created DPMCP object for DPRC's built-in portal, so that waiting on MC command completions for MC commands sent on the DPRC's built-in portal can be done using a DPMCP interrupt and a Linux completion variable. For most cases, mc_send_command() will wait on this completion variable, instead of doing polling. This completion variable will be signaled from the DPMCP IRQ handler. Signed-off-by: J. German Rivera Reviewed-by: Stuart Yoder --- Changes in v2: - Use msecs_to_jiffies() instead of HZ in timeout-related expression drivers/staging/fsl-mc/bus/dprc-driver.c | 180 ++++++++++-- drivers/staging/fsl-mc/bus/mc-allocator.c | 107 +++---- drivers/staging/fsl-mc/bus/mc-bus.c | 40 ++- drivers/staging/fsl-mc/bus/mc-sys.c | 420 +++++++++++++++++++++++++--- drivers/staging/fsl-mc/include/mc-private.h | 6 +- drivers/staging/fsl-mc/include/mc-sys.h | 37 ++- 6 files changed, 674 insertions(+), 116 deletions(-) diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c index 5e24f46..db1296a 100644 --- a/drivers/staging/fsl-mc/bus/dprc-driver.c +++ b/drivers/staging/fsl-mc/bus/dprc-driver.c @@ -15,6 +15,7 @@ #include #include #include "dprc-cmd.h" +#include "dpmcp.h" struct dprc_child_objs { int child_count; @@ -323,7 +324,6 @@ static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) int error; unsigned int irq_count; struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); dprc_init_all_resource_pools(mc_bus_dev); @@ -336,7 +336,7 @@ static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) if (error < 0) return error; - if (mc->gic_supported && !mc_bus->irq_resources) { + if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { irq_count += FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; error = fsl_mc_populate_irq_pool(mc_bus, irq_count); if (error < 0) @@ -373,7 +373,8 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) struct fsl_mc_io *mc_io = mc_dev->mc_io; int irq_index = 0; - dev_dbg(dev, "DPRC IRQ %d\n", irq_num); + dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", + irq_num, smp_processor_id()); if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) return IRQ_HANDLED; @@ -445,7 +446,8 @@ static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) error = dprc_set_irq_enable(mc_io, mc_dev->mc_handle, i, 0); if (error < 0) { dev_err(&mc_dev->dev, - "dprc_set_irq_enable() failed: %d\n", error); + "Disabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", + i, error); return error; } @@ -456,7 +458,8 @@ static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) error = dprc_set_irq_mask(mc_io, mc_dev->mc_handle, i, 0x0); if (error < 0) { dev_err(&mc_dev->dev, - "dprc_set_irq_mask() failed: %d\n", error); + "Disabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", + i, error); return error; } @@ -468,8 +471,9 @@ static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) ~0x0U); if (error < 0) { dev_err(&mc_dev->dev, - "dprc_clear_irq_status() failed: %d\n", - error); + "Disabling DPRC IRQ %d failed: dprc_clear_irq_status() failed: %d\n", + i, error); + return error; } } @@ -566,7 +570,8 @@ static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) ~0x0u); if (error < 0) { dev_err(&mc_dev->dev, - "dprc_set_irq_mask() failed: %d\n", error); + "Enabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", + i, error); return error; } @@ -579,7 +584,8 @@ static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) i, 1); if (error < 0) { dev_err(&mc_dev->dev, - "dprc_set_irq_enable() failed: %d\n", error); + "Enabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", + i, error); return error; } @@ -618,6 +624,95 @@ error_free_irqs: return error; } +/* + * Creates a DPMCP for a DPRC's built-in MC portal + */ +static int dprc_create_dpmcp(struct fsl_mc_device *dprc_dev) +{ + int error; + struct dpmcp_cfg dpmcp_cfg; + uint16_t dpmcp_handle; + struct dprc_res_req res_req; + struct dpmcp_attr dpmcp_attr; + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); + + dpmcp_cfg.portal_id = mc_bus->dprc_attr.portal_id; + error = dpmcp_create(dprc_dev->mc_io, &dpmcp_cfg, &dpmcp_handle); + if (error < 0) { + dev_err(&dprc_dev->dev, "dpmcp_create() failed: %d\n", + error); + return error; + } + + /* + * Set the state of the newly created DPMCP object to be "plugged": + */ + + error = dpmcp_get_attributes(dprc_dev->mc_io, dpmcp_handle, + &dpmcp_attr); + if (error < 0) { + dev_err(&dprc_dev->dev, "dpmcp_get_attributes() failed: %d\n", + error); + goto error_destroy_dpmcp; + } + + if (WARN_ON(dpmcp_attr.id != mc_bus->dprc_attr.portal_id)) { + error = -EINVAL; + goto error_destroy_dpmcp; + } + + strcpy(res_req.type, "dpmcp"); + res_req.num = 1; + res_req.options = + (DPRC_RES_REQ_OPT_EXPLICIT | DPRC_RES_REQ_OPT_PLUGGED); + res_req.id_base_align = dpmcp_attr.id; + + error = dprc_assign(dprc_dev->mc_io, + dprc_dev->mc_handle, + dprc_dev->obj_desc.id, + &res_req); + + if (error < 0) { + dev_err(&dprc_dev->dev, "dprc_assign() failed: %d\n", error); + goto error_destroy_dpmcp; + } + + (void)dpmcp_close(dprc_dev->mc_io, dpmcp_handle); + return 0; + +error_destroy_dpmcp: + (void)dpmcp_destroy(dprc_dev->mc_io, dpmcp_handle); + return error; +} + +/* + * Destroys the DPMCP for a DPRC's built-in MC portal + */ +static void dprc_destroy_dpmcp(struct fsl_mc_device *dprc_dev) +{ + int error; + uint16_t dpmcp_handle; + struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); + + if (WARN_ON(!dprc_dev->mc_io || dprc_dev->mc_io->dpmcp_dev)) + return; + + error = dpmcp_open(dprc_dev->mc_io, mc_bus->dprc_attr.portal_id, + &dpmcp_handle); + if (error < 0) { + dev_err(&dprc_dev->dev, "dpmcp_open() failed: %d\n", + error); + return; + } + + error = dpmcp_destroy(dprc_dev->mc_io, dpmcp_handle); + if (error < 0) { + dev_err(&dprc_dev->dev, "dpmcp_destroy() failed: %d\n", + error); + return; + } +} + /** * dprc_probe - callback invoked when a DPRC is being bound to this driver * @@ -633,7 +728,7 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) int error; size_t region_size; struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); - struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); + bool mc_io_created = false; if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) return -EINVAL; @@ -654,6 +749,8 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) NULL, 0, &mc_dev->mc_io); if (error < 0) return error; + + mc_io_created = true; } error = dprc_open(mc_dev->mc_io, mc_dev->obj_desc.id, @@ -663,16 +760,55 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) goto error_cleanup_mc_io; } + error = dprc_get_attributes(mc_dev->mc_io, mc_dev->mc_handle, + &mc_bus->dprc_attr); + if (error < 0) { + dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", + error); + goto error_cleanup_open; + } + + if (fsl_mc_interrupts_supported()) { + /* + * Create DPMCP for the DPRC's built-in portal: + */ + error = dprc_create_dpmcp(mc_dev); + if (error < 0) + goto error_cleanup_open; + } + mutex_init(&mc_bus->scan_mutex); /* - * Discover MC objects in DPRC object: + * Discover MC objects in the DPRC object: */ error = dprc_scan_container(mc_dev); if (error < 0) - goto error_cleanup_open; + goto error_destroy_dpmcp; + + if (fsl_mc_interrupts_supported()) { + /* + * The fsl_mc_device object associated with the DPMCP object + * created above was created as part of the + * dprc_scan_container() call above: + */ + if (WARN_ON(!mc_dev->mc_io->dpmcp_dev)) { + error = -EINVAL; + goto error_cleanup_dprc_scan; + } + + /* + * Configure interrupt for the DPMCP object associated with the + * DPRC object's built-in portal: + * + * NOTE: We have to do this after calling dprc_scan_container(), + * since dprc_scan_container() will populate the IRQ pool for + * this DPRC. + */ + error = fsl_mc_io_setup_dpmcp_irq(mc_dev->mc_io); + if (error < 0) + goto error_cleanup_dprc_scan; - if (mc->gic_supported) { /* * Configure interrupts for the DPRC object associated with * this MC bus: @@ -686,15 +822,22 @@ static int dprc_probe(struct fsl_mc_device *mc_dev) return 0; error_cleanup_dprc_scan: + fsl_mc_io_unset_dpmcp(mc_dev->mc_io); device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); - if (mc->gic_supported) + if (fsl_mc_interrupts_supported()) fsl_mc_cleanup_irq_pool(mc_bus); +error_destroy_dpmcp: + dprc_destroy_dpmcp(mc_dev); + error_cleanup_open: (void)dprc_close(mc_dev->mc_io, mc_dev->mc_handle); error_cleanup_mc_io: - fsl_destroy_mc_io(mc_dev->mc_io); + if (mc_io_created) { + fsl_destroy_mc_io(mc_dev->mc_io); + mc_dev->mc_io = NULL; + } return error; } @@ -721,7 +864,6 @@ static int dprc_remove(struct fsl_mc_device *mc_dev) { int error; struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); - struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) return -EINVAL; @@ -731,15 +873,17 @@ static int dprc_remove(struct fsl_mc_device *mc_dev) if (WARN_ON(!mc_bus->irq_resources)) return -EINVAL; - if (mc->gic_supported) + if (fsl_mc_interrupts_supported()) dprc_teardown_irqs(mc_dev); + fsl_mc_io_unset_dpmcp(mc_dev->mc_io); device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); + dprc_destroy_dpmcp(mc_dev); error = dprc_close(mc_dev->mc_io, mc_dev->mc_handle); if (error < 0) dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); - if (mc->gic_supported) + if (fsl_mc_interrupts_supported()) fsl_mc_cleanup_irq_pool(mc_bus); dev_info(&mc_dev->dev, "DPRC device unbound from driver"); diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c index 3bdfefb..15409ff 100644 --- a/drivers/staging/fsl-mc/bus/mc-allocator.c +++ b/drivers/staging/fsl-mc/bus/mc-allocator.c @@ -109,7 +109,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device goto out; resource = mc_dev->resource; - if (WARN_ON(resource->data != mc_dev)) + if (WARN_ON(!resource || resource->data != mc_dev)) goto out; mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); @@ -281,7 +281,7 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, struct fsl_mc_bus *mc_bus; phys_addr_t mc_portal_phys_addr; size_t mc_portal_size; - struct fsl_mc_device *mc_adev; + struct fsl_mc_device *dpmcp_dev; int error = -EINVAL; struct fsl_mc_resource *resource = NULL; struct fsl_mc_io *mc_io = NULL; @@ -301,23 +301,24 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, if (error < 0) return error; - mc_adev = resource->data; - if (WARN_ON(!mc_adev)) + dpmcp_dev = resource->data; + if (WARN_ON(!dpmcp_dev || + strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) goto error_cleanup_resource; - if (WARN_ON(mc_adev->obj_desc.region_count == 0)) + if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) goto error_cleanup_resource; - mc_portal_phys_addr = mc_adev->regions[0].start; - mc_portal_size = mc_adev->regions[0].end - - mc_adev->regions[0].start + 1; + mc_portal_phys_addr = dpmcp_dev->regions[0].start; + mc_portal_size = dpmcp_dev->regions[0].end - + dpmcp_dev->regions[0].start + 1; if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) goto error_cleanup_resource; error = fsl_create_mc_io(&mc_bus_dev->dev, mc_portal_phys_addr, - mc_portal_size, resource, + mc_portal_size, dpmcp_dev, mc_io_flags, &mc_io); if (error < 0) goto error_cleanup_resource; @@ -339,12 +340,26 @@ EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate); */ void fsl_mc_portal_free(struct fsl_mc_io *mc_io) { + struct fsl_mc_device *dpmcp_dev; struct fsl_mc_resource *resource; - resource = mc_io->resource; - if (WARN_ON(resource->type != FSL_MC_POOL_DPMCP)) + /* + * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed + * to have a DPMCP object associated with. + */ + dpmcp_dev = mc_io->dpmcp_dev; + if (WARN_ON(!dpmcp_dev)) + return; + if (WARN_ON(strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) + return; + if (WARN_ON(dpmcp_dev->mc_io != mc_io)) + return; + + resource = dpmcp_dev->resource; + if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP)) return; - if (WARN_ON(!resource->data)) + + if (WARN_ON(resource->data != dpmcp_dev)) return; fsl_destroy_mc_io(mc_io); @@ -360,31 +375,14 @@ EXPORT_SYMBOL_GPL(fsl_mc_portal_free); int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) { int error; - uint16_t token; - struct fsl_mc_resource *resource = mc_io->resource; - struct fsl_mc_device *mc_dev = resource->data; - - if (WARN_ON(resource->type != FSL_MC_POOL_DPMCP)) - return -EINVAL; + struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; - if (WARN_ON(!mc_dev)) + if (WARN_ON(!dpmcp_dev)) return -EINVAL; - error = dpmcp_open(mc_io, mc_dev->obj_desc.id, &token); + error = dpmcp_reset(mc_io, dpmcp_dev->mc_handle); if (error < 0) { - dev_err(&mc_dev->dev, "dpmcp_open() failed: %d\n", error); - return error; - } - - error = dpmcp_reset(mc_io, token); - if (error < 0) { - dev_err(&mc_dev->dev, "dpmcp_reset() failed: %d\n", error); - return error; - } - - error = dpmcp_close(mc_io, token); - if (error < 0) { - dev_err(&mc_dev->dev, "dpmcp_close() failed: %d\n", error); + dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); return error; } @@ -599,13 +597,28 @@ static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) goto error; mc_bus = to_fsl_mc_bus(mc_bus_dev); - error = object_type_to_pool_type(mc_dev->obj_desc.type, &pool_type); - if (error < 0) - goto error; - error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, mc_dev); - if (error < 0) - goto error; + /* + * If mc_dev is the DPMCP object for the parent DPRC's built-in + * portal, we don't add this DPMCP to the DPMCP object pool, + * but instead allocate it directly to the parent DPRC (mc_bus_dev): + */ + if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0 && + mc_dev->obj_desc.id == mc_bus->dprc_attr.portal_id) { + error = fsl_mc_io_set_dpmcp(mc_bus_dev->mc_io, mc_dev); + if (error < 0) + goto error; + } else { + error = object_type_to_pool_type(mc_dev->obj_desc.type, + &pool_type); + if (error < 0) + goto error; + + error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, + mc_dev); + if (error < 0) + goto error; + } dev_dbg(&mc_dev->dev, "Allocatable MC object device bound to fsl_mc_allocator driver"); @@ -621,20 +634,20 @@ error: */ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) { - int error = -EINVAL; + int error; if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) - goto out; + return -EINVAL; - error = fsl_mc_resource_pool_remove_device(mc_dev); - if (error < 0) - goto out; + if (mc_dev->resource) { + error = fsl_mc_resource_pool_remove_device(mc_dev); + if (error < 0) + return error; + } dev_dbg(&mc_dev->dev, "Allocatable MC object device unbound from fsl_mc_allocator driver"); - error = 0; -out: - return error; + return 0; } static const struct fsl_mc_device_match_id match_id_table[] = { diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c index 36116d7..6e25fd9 100644 --- a/drivers/staging/fsl-mc/bus/mc-bus.c +++ b/drivers/staging/fsl-mc/bus/mc-bus.c @@ -601,6 +601,11 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) if (&mc_dev->dev == fsl_mc_bus_type.dev_root) fsl_mc_bus_type.dev_root = NULL; + } else if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0) { + if (mc_dev->mc_io) { + fsl_destroy_mc_io(mc_dev->mc_io); + mc_dev->mc_io = NULL; + } } kfree(mc_dev->driver_override); @@ -680,19 +685,23 @@ static void mc_bus_msi_domain_write_msg(struct irq_data *irq_data, struct fsl_mc_device_irq *irq_res = &mc_bus->irq_resources[msi_entry->msi_attrib.entry_nr]; + /* + * NOTE: This function is invoked with interrupts disabled + */ + if (irq_res->irq_number == irq_data->irq) { /* * write msg->address_hi/lo to irq_resource */ irq_res->msi_paddr = ((u64)msg->address_hi << 32) | msg->address_lo; + irq_res->msi_value = msg->data; /* - * NOTE: We cannot do the actual programming of the MSI - * in the MC, in this function, as the object-independent - * way of programming MSIs for MC objects is not reliable - * if objects are being added/removed dynamically. + * NOTE: We cannot do the actual programming of the MSI, + * in the MC, here, because calling dprc_obj_set_irq() + * is not reliable, as object indexes may change under us. */ } } @@ -848,7 +857,12 @@ int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, dev_err(&mc_bus_dev->dev, "Failed to allocate IRQs\n"); goto cleanup_msi_entries; } +#endif + /* + * FIXME: Enable this code when the GIC-ITS MC support patch is merged + */ +#ifdef GIC_ITS_MC_SUPPORT for_each_msi_entry(msi_entry, &mc_bus_dev->dev) { u32 irq_num = msi_entry->irq; @@ -1140,7 +1154,14 @@ error_cleanup_mc_io: fsl_destroy_mc_io(mc_io); error_cleanup_irq_domain: - irq_domain_remove(mc->irq_domain); + /* + * FIXME: Enable this code when the GIC-ITS MC support patch is merged + */ +#ifdef GIC_ITS_MC_SUPPORT + if (mc->gic_supported) + irq_domain_remove(mc->irq_domain); +#endif + return error; } @@ -1155,7 +1176,14 @@ static int fsl_mc_bus_remove(struct platform_device *pdev) if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root)) return -EINVAL; - irq_domain_remove(mc->irq_domain); + /* + * FIXME: Enable this code when the GIC-ITS MC support patch is merged + */ +#ifdef GIC_ITS_MC_SUPPORT + if (mc->gic_supported) + irq_domain_remove(mc->irq_domain); +#endif + fsl_mc_device_remove(mc->root_mc_bus_dev); dev_info(&pdev->dev, "Root MC bus device removed"); return 0; diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c index 2b3d18c..1af35a1 100644 --- a/drivers/staging/fsl-mc/bus/mc-sys.c +++ b/drivers/staging/fsl-mc/bus/mc-sys.c @@ -34,10 +34,13 @@ #include "../include/mc-sys.h" #include "../include/mc-cmd.h" +#include "../include/mc.h" #include #include #include #include +#include +#include "dpmcp.h" /** * Timeout in milliseconds to wait for the completion of an MC command @@ -55,6 +58,230 @@ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) /** + * dpmcp_irq0_handler - Regular ISR for DPMCP interrupt 0 + * + * @irq: IRQ number of the interrupt being handled + * @arg: Pointer to device structure + */ +static irqreturn_t dpmcp_irq0_handler(int irq_num, void *arg) +{ + struct device *dev = (struct device *)arg; + struct fsl_mc_device *dpmcp_dev = to_fsl_mc_device(dev); + + dev_dbg(dev, "DPMCP IRQ %d triggered on CPU %u\n", irq_num, + smp_processor_id()); + + if (WARN_ON(dpmcp_dev->irqs[0]->irq_number != (uint32_t)irq_num)) + goto out; + + if (WARN_ON(!dpmcp_dev->mc_io)) + goto out; + + /* + * NOTE: We cannot invoke MC flib function here + */ + + complete(&dpmcp_dev->mc_io->mc_command_done_completion); +out: + return IRQ_HANDLED; +} + +/* + * Disable and clear interrupts for a given DPMCP object + */ +static int disable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) +{ + int error; + + /* + * Disable generation of the DPMCP interrupt: + */ + error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, + dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, 0); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_set_irq_enable() failed: %d\n", error); + + return error; + } + + /* + * Disable all DPMCP interrupt causes: + */ + error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, 0x0); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_set_irq_mask() failed: %d\n", error); + + return error; + } + + /* + * Clear any leftover interrupts: + */ + error = dpmcp_clear_irq_status(dpmcp_dev->mc_io, dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, ~0x0U); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_clear_irq_status() failed: %d\n", + error); + return error; + } + + return 0; +} + +static void unregister_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) +{ + struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; + + devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); +} + +static int register_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) +{ + int error; + struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; + + error = devm_request_irq(&dpmcp_dev->dev, + irq->irq_number, + dpmcp_irq0_handler, + IRQF_NO_SUSPEND | IRQF_ONESHOT, + "FSL MC DPMCP irq0", + &dpmcp_dev->dev); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "devm_request_irq() failed: %d\n", + error); + return error; + } + + error = dpmcp_set_irq(dpmcp_dev->mc_io, + dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, + irq->msi_paddr, + irq->msi_value, + irq->irq_number); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_set_irq() failed: %d\n", error); + goto error_unregister_irq_handler; + } + + return 0; + +error_unregister_irq_handler: + devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); + return error; +} + +static int enable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) +{ + int error; + + /* + * Enable MC command completion event to trigger DPMCP interrupt: + */ + error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, + dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, + DPMCP_IRQ_EVENT_CMD_DONE); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_set_irq_mask() failed: %d\n", error); + + return error; + } + + /* + * Enable generation of the interrupt: + */ + error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, + dpmcp_dev->mc_handle, + DPMCP_IRQ_INDEX, 1); + if (error < 0) { + dev_err(&dpmcp_dev->dev, + "dpmcp_set_irq_enable() failed: %d\n", error); + + return error; + } + + return 0; +} + +/* + * Setup MC command completion interrupt for the DPMCP device associated with a + * given fsl_mc_io object + */ +int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io) +{ + int error; + struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; + + if (WARN_ON(!dpmcp_dev)) + return -EINVAL; + + if (WARN_ON(!fsl_mc_interrupts_supported())) + return -EINVAL; + + if (WARN_ON(dpmcp_dev->obj_desc.irq_count != 1)) + return -EINVAL; + + if (WARN_ON(!dpmcp_dev->mc_io)) + return -EINVAL; + + error = fsl_mc_allocate_irqs(dpmcp_dev); + if (error < 0) + return error; + + error = disable_dpmcp_irq(dpmcp_dev); + if (error < 0) + goto error_free_irqs; + + error = register_dpmcp_irq_handler(dpmcp_dev); + if (error < 0) + goto error_free_irqs; + + error = enable_dpmcp_irq(dpmcp_dev); + if (error < 0) + goto error_unregister_irq_handler; + + mc_io->mc_command_done_irq_armed = true; + return 0; + +error_unregister_irq_handler: + unregister_dpmcp_irq_handler(dpmcp_dev); + +error_free_irqs: + fsl_mc_free_irqs(dpmcp_dev); + return error; +} +EXPORT_SYMBOL_GPL(fsl_mc_io_setup_dpmcp_irq); + +/* + * Tear down interrupts for the DPMCP device associated with a given fsl_mc_io + * object + */ +static void teardown_dpmcp_irq(struct fsl_mc_io *mc_io) +{ + struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; + + if (WARN_ON(!dpmcp_dev)) + return; + if (WARN_ON(!fsl_mc_interrupts_supported())) + return; + if (WARN_ON(!dpmcp_dev->irqs)) + return; + + mc_io->mc_command_done_irq_armed = false; + (void)disable_dpmcp_irq(dpmcp_dev); + unregister_dpmcp_irq_handler(dpmcp_dev); + fsl_mc_free_irqs(dpmcp_dev); +} + +/** * Creates an MC I/O object * * @dev: device to be associated with the MC I/O object @@ -70,9 +297,10 @@ int __must_check fsl_create_mc_io(struct device *dev, phys_addr_t mc_portal_phys_addr, uint32_t mc_portal_size, - struct fsl_mc_resource *resource, + struct fsl_mc_device *dpmcp_dev, uint32_t flags, struct fsl_mc_io **new_mc_io) { + int error; struct fsl_mc_io *mc_io; void __iomem *mc_portal_virt_addr; struct resource *res; @@ -85,11 +313,13 @@ int __must_check fsl_create_mc_io(struct device *dev, mc_io->flags = flags; mc_io->portal_phys_addr = mc_portal_phys_addr; mc_io->portal_size = mc_portal_size; - mc_io->resource = resource; - if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) + mc_io->mc_command_done_irq_armed = false; + if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) { spin_lock_init(&mc_io->spinlock); - else + } else { mutex_init(&mc_io->mutex); + init_completion(&mc_io->mc_command_done_completion); + } res = devm_request_mem_region(dev, mc_portal_phys_addr, @@ -113,8 +343,26 @@ int __must_check fsl_create_mc_io(struct device *dev, } mc_io->portal_virt_addr = mc_portal_virt_addr; + if (dpmcp_dev) { + error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); + if (error < 0) + goto error_destroy_mc_io; + + if (!(flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) && + fsl_mc_interrupts_supported()) { + error = fsl_mc_io_setup_dpmcp_irq(mc_io); + if (error < 0) + goto error_destroy_mc_io; + } + } + *new_mc_io = mc_io; return 0; + +error_destroy_mc_io: + fsl_destroy_mc_io(mc_io); + return error; + } EXPORT_SYMBOL_GPL(fsl_create_mc_io); @@ -125,6 +373,11 @@ EXPORT_SYMBOL_GPL(fsl_create_mc_io); */ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) { + struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; + + if (dpmcp_dev) + fsl_mc_io_unset_dpmcp(mc_io); + devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); devm_release_mem_region(mc_io->dev, mc_io->portal_phys_addr, @@ -135,6 +388,60 @@ void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) } EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); +int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, + struct fsl_mc_device *dpmcp_dev) +{ + int error; + + if (WARN_ON(!dpmcp_dev)) + return -EINVAL; + + if (WARN_ON(mc_io->dpmcp_dev)) + return -EINVAL; + + if (WARN_ON(dpmcp_dev->mc_io)) + return -EINVAL; + + if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { + error = dpmcp_open(mc_io, dpmcp_dev->obj_desc.id, + &dpmcp_dev->mc_handle); + if (error < 0) + return error; + } + + mc_io->dpmcp_dev = dpmcp_dev; + dpmcp_dev->mc_io = mc_io; + return 0; +} +EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); + +void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) +{ + int error; + struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; + + if (WARN_ON(!dpmcp_dev)) + return; + + if (WARN_ON(dpmcp_dev->mc_io != mc_io)) + return; + + if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { + if (dpmcp_dev->irqs) + teardown_dpmcp_irq(mc_io); + + error = dpmcp_close(mc_io, dpmcp_dev->mc_handle); + if (error < 0) { + dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", + error); + } + } + + mc_io->dpmcp_dev = NULL; + dpmcp_dev->mc_io = NULL; +} +EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); + static int mc_status_to_error(enum mc_cmd_status status) { static const int mc_status_to_error_map[] = { @@ -228,46 +535,51 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * return status; } -/** - * Sends an command to the MC device using the given MC I/O object - * - * @mc_io: MC I/O object to be used - * @cmd: command to be sent - * - * Returns '0' on Success; Error code otherwise. - */ -int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) +static int mc_completion_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, + enum mc_cmd_status *mc_status) { - int error; enum mc_cmd_status status; - unsigned long jiffies_until_timeout = - jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); + unsigned long jiffies_left; + unsigned long timeout_jiffies = + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); - if (WARN_ON(in_irq())) + if (WARN_ON(!mc_io->dpmcp_dev)) return -EINVAL; - if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) - spin_lock(&mc_io->spinlock); - else - mutex_lock(&mc_io->mutex); + if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) + return -EINVAL; - /* - * Send command to the MC hardware: - */ - mc_write_command(mc_io->portal_virt_addr, cmd); + if (WARN_ON(!preemptible())) + return -EINVAL; + + for (;;) { + status = mc_read_response(mc_io->portal_virt_addr, cmd); + if (status != MC_CMD_STATUS_READY) + break; + + jiffies_left = wait_for_completion_timeout( + &mc_io->mc_command_done_completion, + timeout_jiffies); + if (jiffies_left == 0) + return -ETIMEDOUT; + } + + *mc_status = status; + return 0; +} + +static int mc_polling_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, + enum mc_cmd_status *mc_status) +{ + enum mc_cmd_status status; + unsigned long jiffies_until_timeout = + jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); - /* - * Wait for response from the MC hardware: - */ for (;;) { status = mc_read_response(mc_io->portal_virt_addr, cmd); if (status != MC_CMD_STATUS_READY) break; - /* - * TODO: When MC command completion interrupts are supported - * call wait function here instead of usleep_range() - */ if (preemptible()) { usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); @@ -283,11 +595,51 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) (unsigned int) MC_CMD_HDR_READ_CMDID(cmd->header)); - error = -ETIMEDOUT; - goto common_exit; + return -ETIMEDOUT; } } + *mc_status = status; + return 0; +} + +/** + * Sends a command to the MC device using the given MC I/O object + * + * @mc_io: MC I/O object to be used + * @cmd: command to be sent + * + * Returns '0' on Success; Error code otherwise. + */ +int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) +{ + int error; + enum mc_cmd_status status; + + if (WARN_ON(in_irq())) + return -EINVAL; + + if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) + spin_lock(&mc_io->spinlock); + else + mutex_lock(&mc_io->mutex); + + /* + * Send command to the MC hardware: + */ + mc_write_command(mc_io->portal_virt_addr, cmd); + + /* + * Wait for response from the MC hardware: + */ + if (mc_io->mc_command_done_irq_armed) + error = mc_completion_wait(mc_io, cmd, &status); + else + error = mc_polling_wait(mc_io, cmd, &status); + + if (error < 0) + goto common_exit; + if (status != MC_CMD_STATUS_OK) { pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", mc_io->portal_phys_addr, diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h index 0757258..90b059a 100644 --- a/drivers/staging/fsl-mc/include/mc-private.h +++ b/drivers/staging/fsl-mc/include/mc-private.h @@ -111,12 +111,14 @@ struct fsl_mc_resource_pool { * from the physical DPRC. * @irq_resources: Pointer to array of IRQ objects for the IRQ pool. * @scan_mutex: Serializes bus scanning + * @dprc_attr: DPRC attributes */ struct fsl_mc_bus { struct fsl_mc_device mc_dev; struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; struct fsl_mc_device_irq *irq_resources; struct mutex scan_mutex; /* serializes bus scanning */ + struct dprc_attributes dprc_attr; }; #define to_fsl_mc_bus(_mc_dev) \ @@ -134,10 +136,6 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, const char *driver_override, unsigned int *total_irq_count); -int dprc_lookup_object(struct fsl_mc_device *mc_bus_dev, - struct fsl_mc_device *child_dev, - uint32_t *child_obj_index); - int __init dprc_driver_init(void); void __exit dprc_driver_exit(void); diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h index 7ea12a6..fbf4b73 100644 --- a/drivers/staging/fsl-mc/include/mc-sys.h +++ b/drivers/staging/fsl-mc/include/mc-sys.h @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -57,9 +58,11 @@ struct mc_command; * @portal_size: MC command portal size in bytes * @portal_phys_addr: MC command portal physical address * @portal_virt_addr: MC command portal virtual address - * @resource: generic resource associated with the MC portal if - * the MC portal came from a resource pool, or NULL if the MC portal - * is permanently bound to a device (e.g., a DPRC) + * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. + * @mc_command_done_irq_armed: Flag indicating that the MC command done IRQ + * is currently armed. + * @mc_command_done_completion: Completion variable to be signaled when an MC + * command sent to the MC fw is completed. * @mutex: Mutex to serialize mc_send_command() calls that use the same MC * portal, if the fsl_mc_io object was created with the * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this @@ -75,21 +78,41 @@ struct fsl_mc_io { uint16_t portal_size; phys_addr_t portal_phys_addr; void __iomem *portal_virt_addr; - struct fsl_mc_resource *resource; + struct fsl_mc_device *dpmcp_dev; union { - struct mutex mutex; /* serializes mc_send_command() calls */ - spinlock_t spinlock; /* serializes mc_send_command() calls */ + /* + * These fields are only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set + */ + struct { + struct mutex mutex; /* serializes mc_send_command() */ + struct completion mc_command_done_completion; + bool mc_command_done_irq_armed; + }; + + /* + * This field is only meaningful if the + * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set + */ + spinlock_t spinlock; /* serializes mc_send_command() */ }; }; int __must_check fsl_create_mc_io(struct device *dev, phys_addr_t mc_portal_phys_addr, uint32_t mc_portal_size, - struct fsl_mc_resource *resource, + struct fsl_mc_device *dpmcp_dev, uint32_t flags, struct fsl_mc_io **new_mc_io); void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); +int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, + struct fsl_mc_device *dpmcp_dev); + +void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io); + +int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io); + int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); #endif /* _FSL_MC_SYS_H */ -- 2.3.3 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/