Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0F3B2C433FE for ; Mon, 6 Dec 2021 14:23:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1344124AbhLFO1R (ORCPT ); Mon, 6 Dec 2021 09:27:17 -0500 Received: from alexa-out-sd-02.qualcomm.com ([199.106.114.39]:27276 "EHLO alexa-out-sd-02.qualcomm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1344094AbhLFO1L (ORCPT ); Mon, 6 Dec 2021 09:27:11 -0500 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quicinc.com; i=@quicinc.com; q=dns/txt; s=qcdkim; t=1638800623; x=1670336623; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version; bh=ngCYcUx+P6Dtxjl7rqG1A3PsNFvM/gX6DS/1nmW25EY=; b=eds9FXFszcFNdi//MyEQBdRqM7GvkHjeidWBIeRknCLrNvI4R0hj9dT1 c/3bqKnBB+VjosH6H40+RwqiHDzetQerrmZVbxzlevpJxS+n3ClhBQvc5 cLkV4BvovCRXOZeFvopyj61GuW3Wbq4YwlarP0stztK2qB7Kdeb2M429T 0=; Received: from unknown (HELO ironmsg-SD-alpha.qualcomm.com) ([10.53.140.30]) by alexa-out-sd-02.qualcomm.com with ESMTP; 06 Dec 2021 06:23:43 -0800 X-QCInternal: smtphost Received: from nasanex01c.na.qualcomm.com ([10.47.97.222]) by ironmsg-SD-alpha.qualcomm.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 06 Dec 2021 06:23:42 -0800 Received: from nalasex01a.na.qualcomm.com (10.47.209.196) by nasanex01c.na.qualcomm.com (10.47.97.222) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.922.19; Mon, 6 Dec 2021 06:23:42 -0800 Received: from hu-srivasam-hyd.qualcomm.com (10.80.80.8) by nalasex01a.na.qualcomm.com (10.47.209.196) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.922.19; Mon, 6 Dec 2021 06:23:36 -0800 From: Srinivasa Rao Mandadapu To: , , , , , , , , , , , , , , , , CC: Srinivasa Rao Mandadapu , "Venkata Prasad Potturu" Subject: [RESEND, v9 06/10] ASoC: qcom: Add regmap config support for codec dma driver Date: Mon, 6 Dec 2021 19:52:43 +0530 Message-ID: <1638800567-27222-7-git-send-email-quic_srivasam@quicinc.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1638800567-27222-1-git-send-email-quic_srivasam@quicinc.com> References: <1638800567-27222-1-git-send-email-quic_srivasam@quicinc.com> MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.80.80.8] X-ClientProxiedBy: nasanex01b.na.qualcomm.com (10.46.141.250) To nalasex01a.na.qualcomm.com (10.47.209.196) Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Update regmap configuration for supporting headset playback and capture and DMIC capture using codec dma interface Signed-off-by: Srinivasa Rao Mandadapu Co-developed-by: Venkata Prasad Potturu Signed-off-by: Venkata Prasad Potturu Reviewed-by: Srinivas Kandagatla --- sound/soc/qcom/lpass-cpu.c | 185 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index 3bd9eb3..4fb9669 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -28,6 +28,8 @@ #define LPASS_CPU_I2S_SD2_3_MASK GENMASK(3, 2) #define LPASS_CPU_I2S_SD0_1_2_MASK GENMASK(2, 0) #define LPASS_CPU_I2S_SD0_1_2_3_MASK GENMASK(3, 0) +#define LPASS_REG_READ 1 +#define LPASS_REG_WRITE 0 /* * Channel maps for Quad channel playbacks on MI2S Secondary @@ -798,6 +800,189 @@ static struct regmap_config lpass_hdmi_regmap_config = { .cache_type = REGCACHE_FLAT, }; +static bool __lpass_rxtx_regmap_accessible(struct device *dev, unsigned int reg, bool rw) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->rxtx_irq_ports; ++i) { + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQEN_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->rxtx_rdma_channels; ++i) { + if (reg == LPAIF_CDC_RXTX_RDMACTL_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMABASE_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMABUFF_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + if (reg == LPAIF_CDC_RXTX_RDMAPER_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + if (reg == LPAIF_CDC_RXTX_RDMA_INTF_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + + for (i = 0; i < v->rxtx_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_RXTX_WRDMACTL_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMABASE_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMABUFF_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + } + if (reg == LPAIF_CDC_RXTX_WRDMAPER_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + if (reg == LPAIF_CDC_RXTX_WRDMA_INTF_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + } + return false; +} + +static bool lpass_rxtx_regmap_writeable(struct device *dev, unsigned int reg) +{ + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_WRITE); +} + +static bool lpass_rxtx_regmap_readable(struct device *dev, unsigned int reg) +{ + return __lpass_rxtx_regmap_accessible(dev, reg, LPASS_REG_READ); +} + +static bool lpass_rxtx_regmap_volatile(struct device *dev, unsigned int reg) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->rxtx_irq_ports; ++i) { + if (reg == LPAIF_RXTX_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_RXTX_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->rxtx_rdma_channels; ++i) + if (reg == LPAIF_CDC_RXTX_RDMACURR_REG(v, i, LPASS_CDC_DMA_RX0)) + return true; + + for (i = 0; i < v->rxtx_wrdma_channels; ++i) + if (reg == LPAIF_CDC_RXTX_WRDMACURR_REG(v, i + v->rxtx_wrdma_channel_start, + LPASS_CDC_DMA_TX3)) + return true; + + return false; +} + +static bool __lpass_va_regmap_accessible(struct device *dev, unsigned int reg, bool rw) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->va_irq_ports; ++i) { + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQEN_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->va_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_VA_WRDMACTL_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMABASE_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMABUFF_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (rw == LPASS_REG_READ) { + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + if (reg == LPAIF_CDC_VA_WRDMAPER_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + if (reg == LPAIF_CDC_VA_WRDMA_INTF_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + return false; +} + +static bool lpass_va_regmap_writeable(struct device *dev, unsigned int reg) +{ + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_WRITE); +} + +static bool lpass_va_regmap_readable(struct device *dev, unsigned int reg) +{ + return __lpass_va_regmap_accessible(dev, reg, LPASS_REG_READ); +} + +static bool lpass_va_regmap_volatile(struct device *dev, unsigned int reg) +{ + struct lpass_data *drvdata = dev_get_drvdata(dev); + struct lpass_variant *v = drvdata->variant; + int i; + + for (i = 0; i < v->va_irq_ports; ++i) { + if (reg == LPAIF_VA_IRQCLEAR_REG(v, i)) + return true; + if (reg == LPAIF_VA_IRQSTAT_REG(v, i)) + return true; + } + + for (i = 0; i < v->va_wrdma_channels; ++i) { + if (reg == LPAIF_CDC_VA_WRDMACURR_REG(v, i + v->va_wrdma_channel_start, + LPASS_CDC_DMA_VA_TX0)) + return true; + } + + return false; +} + +static struct regmap_config lpass_rxtx_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .writeable_reg = lpass_rxtx_regmap_writeable, + .readable_reg = lpass_rxtx_regmap_readable, + .volatile_reg = lpass_rxtx_regmap_volatile, + .cache_type = REGCACHE_FLAT, +}; + +static struct regmap_config lpass_va_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .writeable_reg = lpass_va_regmap_writeable, + .readable_reg = lpass_va_regmap_readable, + .volatile_reg = lpass_va_regmap_volatile, + .cache_type = REGCACHE_FLAT, +}; + static unsigned int of_lpass_cpu_parse_sd_lines(struct device *dev, struct device_node *node, const char *name) -- 2.7.4