Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S932521AbWJBPRK (ORCPT ); Mon, 2 Oct 2006 11:17:10 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S932524AbWJBPRK (ORCPT ); Mon, 2 Oct 2006 11:17:10 -0400 Received: from smtp106.sbc.mail.mud.yahoo.com ([68.142.198.205]:60248 "HELO smtp106.sbc.mail.mud.yahoo.com") by vger.kernel.org with SMTP id S932521AbWJBPRE (ORCPT ); Mon, 2 Oct 2006 11:17:04 -0400 DomainKey-Signature: a=rsa-sha1; q=dns; c=nofws; s=s1024; d=pacbell.net; h=Received:From:To:Subject:User-Agent:Cc:MIME-Version:Content-Disposition:Date:Content-Type:Content-Transfer-Encoding:Message-Id; b=OoKV136TgVZp53NnCtNrGf2tpYwvLphz4clZxtwj6fxDAJh5Ald3vZr0IPbWaWHn12vqwS02aaDtZL9QgTiE75fP6jBFi913VsWoGiFrho4ythELF7BhTutbte4SXXWwroiOQuoj51pl/A40ppmI1wE/x4D06wej90SdCBI0Pmw= ; From: David Brownell To: Andrew Morton Subject: [patch 2.6.18-git] SPI -- Freescale iMX SPI controller driver User-Agent: KMail/1.7.1 Cc: "Andrea Paterniani" , Linux Kernel list MIME-Version: 1.0 Content-Disposition: inline Date: Mon, 2 Oct 2006 08:16:58 -0700 Content-Type: text/plain; charset="iso-8859-1" Content-Transfer-Encoding: 7bit Message-Id: <200610020816.58985.david-b@pacbell.net> Sender: linux-kernel-owner@vger.kernel.org X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 58644 Lines: 2089 Subject: SPI controller driver for Freescale iMX From: Andrea Paterniani This patch adds a SPI controller driver for the Freescale i.MX(S/L/1). The code is inspired by pxa2xx_spi driver. Main features summary: - Per chip setup via board specific code and/or protocol driver. - Per transfer setup. - PIO transfers. - DMA transfers. - Managing of NULL tx / rx buffer for rd only / wr only transfers. Signed-off-by: Andrea Paterniani Signed-off-by: David Brownell Index: at91/drivers/spi/Kconfig =================================================================== --- at91.orig/drivers/spi/Kconfig 2006-09-30 19:05:12.000000000 -0700 +++ at91/drivers/spi/Kconfig 2006-10-01 23:45:10.000000000 -0700 @@ -75,6 +75,12 @@ config SPI_BUTTERFLY inexpensive battery powered microcontroller evaluation board. This same cable can be used to flash new firmware. +config SPI_IMX + tristate "Freescale iMX SPI controller driver" + depends on SPI_MASTER && ARCH_IMX && EXPERIMENTAL + help + This is the iMX SPI controller driver. + config SPI_MPC83xx tristate "Freescale MPC83xx SPI controller" depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL Index: at91/drivers/spi/Makefile =================================================================== --- at91.orig/drivers/spi/Makefile 2006-09-30 19:05:12.000000000 -0700 +++ at91/drivers/spi/Makefile 2006-10-01 20:06:53.000000000 -0700 @@ -13,6 +13,7 @@ obj-$(CONFIG_SPI_MASTER) += spi.o # SPI master controller drivers (bus) obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o +obj-$(CONFIG_SPI_IMX) += spi_imx.o obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o Index: at91/drivers/spi/spi_imx.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ at91/drivers/spi/spi_imx.c 2006-10-01 23:48:25.000000000 -0700 @@ -0,0 +1,1962 @@ +/* + * drivers/spi/spi_imx.c + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +/*-------------------------------------------------------------------------*/ +/* SPI Control Register Bit Fields & Masks */ +#define SPI_CONTROL_BITCOUNT (0xF) /* Bit Count Mask */ +#define SPI_CONTROL_BITCOUNT_1 (0x0) /* Bit Count = 1 */ +#define SPI_CONTROL_BITCOUNT_2 (0x1) /* Bit Count = 2 */ +#define SPI_CONTROL_BITCOUNT_3 (0x2) /* Bit Count = 3 */ +#define SPI_CONTROL_BITCOUNT_4 (0x3) /* Bit Count = 4 */ +#define SPI_CONTROL_BITCOUNT_5 (0x4) /* Bit Count = 5 */ +#define SPI_CONTROL_BITCOUNT_6 (0x5) /* Bit Count = 6 */ +#define SPI_CONTROL_BITCOUNT_7 (0x6) /* Bit Count = 7 */ +#define SPI_CONTROL_BITCOUNT_8 (0x7) /* Bit Count = 8 */ +#define SPI_CONTROL_BITCOUNT_9 (0x8) /* Bit Count = 9 */ +#define SPI_CONTROL_BITCOUNT_10 (0x9) /* Bit Count = 10 */ +#define SPI_CONTROL_BITCOUNT_11 (0xA) /* Bit Count = 11 */ +#define SPI_CONTROL_BITCOUNT_12 (0xB) /* Bit Count = 12 */ +#define SPI_CONTROL_BITCOUNT_13 (0xC) /* Bit Count = 13 */ +#define SPI_CONTROL_BITCOUNT_14 (0xD) /* Bit Count = 14 */ +#define SPI_CONTROL_BITCOUNT_15 (0xE) /* Bit Count = 15 */ +#define SPI_CONTROL_BITCOUNT_16 (0xF) /* Bit Count = 16 */ +#define SPI_CONTROL_POL (0x1 << 4) /* Clock Polarity Mask */ +#define SPI_CONTROL_POL_0 (0x0 << 4) /* Active high pol. (0=idle) */ +#define SPI_CONTROL_POL_1 (0x1 << 4) /* Active low pol. (1=idle) */ +#define SPI_CONTROL_PHA (0x1 << 5) /* Clock Phase Mask */ +#define SPI_CONTROL_PHA_0 (0x0 << 5) /* Clock Phase 0 */ +#define SPI_CONTROL_PHA_1 (0x1 << 5) /* Clock Phase 1 */ +#define SPI_CONTROL_SSCTL (0x1 << 6) /* /SS Waveform Select Mask */ +#define SPI_CONTROL_SSCTL_0 (0x0 << 6) /* Master: /SS stays low + between SPI burst + Slave: RXFIFO advanced by + BIT_COUNT */ +#define SPI_CONTROL_SSCTL_1 (0x1 << 6) /* Master: /SS insert pulse + between SPI burst + Slave: RXFIFO advanced by + /SS rising edge */ +#define SPI_CONTROL_SSPOL (0x1 << 7) /* /SS Polarity Select Mask */ +#define SPI_CONTROL_SSPOL_0 (0x0 << 7) /* /SS Active low */ +#define SPI_CONTROL_SSPOL_1 (0x1 << 7) /* /SS Active high */ +#define SPI_CONTROL_XCH (0x1 << 8) /* Exchange */ +#define SPI_CONTROL_SPIEN (0x1 << 9) /* SPI Module Enable */ +#define SPI_CONTROL_MODE (0x1 << 10) /* SPI Mode Select Mask */ +#define SPI_CONTROL_MODE_SLAVE (0x0 << 10) /* SPI Mode Slave */ +#define SPI_CONTROL_MODE_MASTER (0x1 << 10) /* SPI Mode Master */ +#define SPI_CONTROL_DRCTL (0x3 << 11) /* /SPI_RDY Control Mask */ +#define SPI_CONTROL_DRCTL_0 (0x0 << 11) /* Ignore /SPI_RDY */ +#define SPI_CONTROL_DRCTL_1 (0x1 << 11) /* /SPI_RDY falling edge + triggers input */ +#define SPI_CONTROL_DRCTL_2 (0x2 << 11) /* /SPI_RDY active low level + triggers input */ +#define SPI_CONTROL_DATARATE (0x7 << 13) /* Data Rate Mask */ +#define SPI_PERCLK2_DIV_MIN (0) /* PERCLK2:4 */ +#define SPI_PERCLK2_DIV_MAX (7) /* PERCLK2:512 */ +#define SPI_CONTROL_DATARATE_MIN (SPI_PERCLK2_DIV_MAX << 13) +#define SPI_CONTROL_DATARATE_MAX (SPI_PERCLK2_DIV_MIN << 13) +#define SPI_CONTROL_DATARATE_BAD (SPI_CONTROL_DATARATE_MIN + 1) + +/* SPI Interrupt/Status Register Bit Fields & Masks */ +#define SPI_STATUS_TE (0x1 << 0) /* TXFIFO Empty Status */ +#define SPI_STATUS_TH (0x1 << 1) /* TXFIFO Half Status */ +#define SPI_STATUS_TF (0x1 << 2) /* TXFIFO Full Status */ +#define SPI_STATUS_RR (0x1 << 3) /* RXFIFO Data Ready Status */ +#define SPI_STATUS_RH (0x1 << 4) /* RXFIFO Half Status */ +#define SPI_STATUS_RF (0x1 << 5) /* RXFIFO Full Status */ +#define SPI_STATUS_RO (0x1 << 6) /* RXFIFO Overflow */ +#define SPI_STATUS_BO (0x1 << 7) /* Bit Count Overflow */ +#define SPI_STATUS (0xFF) /* SPI Status Mask */ +#define SPI_INTEN_TE (0x1 << 8) /* TXFIFO Empty Interrupt + Enable */ +#define SPI_INTEN_TH (0x1 << 9) /* TXFIFO Half Interrupt + Enable */ +#define SPI_INTEN_TF (0x1 << 10) /* TXFIFO Full Interrupt + Enable */ +#define SPI_INTEN_RE (0x1 << 11) /* RXFIFO Data Ready Interrupt + Enable */ +#define SPI_INTEN_RH (0x1 << 12) /* RXFIFO Half Interrupt + Enable */ +#define SPI_INTEN_RF (0x1 << 13) /* RXFIFO Full Interrupt + Enable */ +#define SPI_INTEN_RO (0x1 << 14) /* RXFIFO Overflow Interrupt + Enable */ +#define SPI_INTEN_BO (0x1 << 15) /* Bit Count Overflow + Interrupt Enable */ +#define SPI_INTEN (0xFF << 8) /* SPI Interrupt Enable Mask */ + +/* SPI Test Register Bit Fields & Masks */ +#define SPI_TEST_TXCNT (0xF << 0) /* TXFIFO Counter */ +#define SPI_TEST_RXCNT_LSB (4) /* RXFIFO Counter LSB */ +#define SPI_TEST_RXCNT (0xF << 4) /* RXFIFO Counter */ +#define SPI_TEST_SSTATUS (0xF << 8) /* State Machine Status */ +#define SPI_TEST_LBC (0x1 << 14) /* Loop Back Control */ + +/* SPI Period Register Bit Fields & Masks */ +#define SPI_PERIOD_WAIT (0x7FFF << 0) /* Wait Between Transactions */ +#define SPI_PERIOD_MAX_WAIT (0x7FFF) /* Max Wait Between + Transactions */ +#define SPI_PERIOD_CSRC (0x1 << 15) /* Period Clock Source Mask */ +#define SPI_PERIOD_CSRC_BCLK (0x0 << 15) /* Period Clock Source is + Bit Clock */ +#define SPI_PERIOD_CSRC_32768 (0x1 << 15) /* Period Clock Source is + 32.768 KHz Clock */ + +/* SPI DMA Register Bit Fields & Masks */ +#define SPI_DMA_RHDMA (0xF << 4) /* RXFIFO Half Status */ +#define SPI_DMA_RFDMA (0x1 << 5) /* RXFIFO Full Status */ +#define SPI_DMA_TEDMA (0x1 << 6) /* TXFIFO Empty Status */ +#define SPI_DMA_THDMA (0x1 << 7) /* TXFIFO Half Status */ +#define SPI_DMA_RHDEN (0x1 << 12) /* RXFIFO Half DMA Request + Enable */ +#define SPI_DMA_RFDEN (0x1 << 13) /* RXFIFO Full DMA Request + Enable */ +#define SPI_DMA_TEDEN (0x1 << 14) /* TXFIFO Empty DMA Request + Enable */ +#define SPI_DMA_THDEN (0x1 << 15) /* TXFIFO Half DMA Request + Enable */ + +/* SPI Soft Reset Register Bit Fields & Masks */ +#define SPI_RESET_START (0x1 << 0) /* Start */ + +/* Default SPI configuration values */ +#define SPI_DEFAULT_CONTROL \ +( \ + SPI_CONTROL_BITCOUNT_16 | \ + SPI_CONTROL_POL_0 | \ + SPI_CONTROL_PHA_0 | \ + SPI_CONTROL_SSCTL_0 | \ + SPI_CONTROL_MODE_MASTER | \ + SPI_CONTROL_DRCTL_0 | \ + SPI_CONTROL_DATARATE_MIN \ +) +#define SPI_DEFAULT_ENABLE_LOOPBACK (0) +#define SPI_DEFAULT_ENABLE_DMA (0) +/*-------------------------------------------------------------------------*/ + + +/*-------------------------------------------------------------------------*/ +/* TX/RX SPI FIFO size */ +#define SPI_FIFO_DEPTH (8) +#define SPI_FIFO_BYTE_WIDTH (2) +#define SPI_FIFO_OVERFLOW_MARGIN (2) + +/* DMA burst lenght for half full/empty request trigger */ +#define SPI_DMA_BLR (SPI_FIFO_DEPTH * SPI_FIFO_BYTE_WIDTH / 2) + +/* Dummy char output to achieve reads. + Choosing something different from all zeors or all ones may help + pattern recognition for oscilloscope analysis. */ +#define SPI_DUMMY_u8 ('@') +#define SPI_DUMMY_u16 ((SPI_DUMMY_u8 << 8) | SPI_DUMMY_u8) +#define SPI_DUMMY_u32 ((SPI_DUMMY_u16 << 16) | SPI_DUMMY_u16) + +/** + * Macro to change a u32 field: + * @r : register to edit + * @m : bit mask + * @v : new value for the field correctly bit-alligned +*/ +#define u32_EDIT(r, m, v) r = (r & ~(m)) | (v) + +/* Message state */ +#define START_STATE ((void*)0) +#define RUNNING_STATE ((void*)1) +#define DONE_STATE ((void*)2) +#define ERROR_STATE ((void*)-1) + +/* Queue state */ +#define QUEUE_RUNNING (0) +#define QUEUE_STOPPED (1) + +#define IS_DMA_ALIGNED(x) (((u32)(x) & 0x03) == 0) +/*-------------------------------------------------------------------------*/ + + +/*-------------------------------------------------------------------------*/ +/* Inline functions for SPI controller registers access */ + +#define DEFINE_SPI_REG_RD(reg, off) \ + static inline u32 rd_##reg(void __iomem *p) \ + { \ + return readl(p + (off)); \ + } + +#define DEFINE_SPI_REG_WR(reg, off) \ + static inline void wr_##reg(u32 v, void __iomem *p) \ + { \ + writel(v, p + (off)); \ + } + +DEFINE_SPI_REG_RD(DATA, 0x00) +DEFINE_SPI_REG_WR(DATA, 0x04) +DEFINE_SPI_REG_RD(CONTROL, 0x08) +DEFINE_SPI_REG_WR(CONTROL, 0x08) +DEFINE_SPI_REG_RD(INT_STATUS, 0x0C) +DEFINE_SPI_REG_WR(INT_STATUS, 0x0C) +DEFINE_SPI_REG_RD(TEST, 0x10) +DEFINE_SPI_REG_WR(TEST, 0x10) +DEFINE_SPI_REG_RD(PERIOD, 0x14) +DEFINE_SPI_REG_WR(PERIOD, 0x14) +DEFINE_SPI_REG_RD(DMA, 0x18) +DEFINE_SPI_REG_WR(DMA, 0x18) +DEFINE_SPI_REG_WR(RESET, 0x1C) +/*-------------------------------------------------------------------------*/ + + +/*-------------------------------------------------------------------------*/ +/* Debug print macros */ + +#define PRINT_DMA_GLOBAL_REGS(dev) \ + dev_dbg(dev, \ + "DMA_GLOBAL\n" \ + " DCR = 0x%08X\n" \ + " DISR = 0x%08X\n" \ + " DIMR = 0x%08X\n" \ + " DBTOSR = 0x%08X\n" \ + " DRTOSR = 0x%08X\n" \ + " DSESR = 0x%08X\n" \ + " DBOSR = 0x%08X\n" \ + " DBTOCR = 0x%08X\n",\ + DCR, \ + DISR, \ + DIMR, \ + DBTOSR, \ + DRTOSR, \ + DSESR, \ + DBOSR, \ + DBTOCR) + +#define PRINT_DMA_CH_REGS(dev, channel) \ + dev_dbg(dev, \ + "DMA(%d)\n" \ + " SAR = 0x%08X\n" \ + " DAR = 0x%08X\n" \ + " CNTR = 0x%08X\n" \ + " CCR = 0x%08X\n" \ + " RSSR = 0x%08X\n" \ + " BLR = 0x%08X\n" \ + " RTOR = 0x%08X\n" \ + " BUCR = 0x%08X\n",\ + channel, \ + SAR(channel), \ + DAR(channel), \ + CNTR(channel), \ + CCR(channel), \ + RSSR(channel), \ + BLR(channel), \ + RTOR(channel), \ + BUCR(channel)) + +#define PRINT_SPI_REGS(dev, regs) \ + dev_dbg(dev, \ + "SPI_REGS\n" \ + " CONTROL = 0x%08X\n" \ + " INT_STATUS = 0x%08X\n" \ + " TEST = 0x%08X\n" \ + " PERIOD = 0x%08X\n" \ + " DMA = 0x%08X\n", \ + rd_CONTROL(regs), \ + rd_INT_STATUS(regs), \ + rd_TEST(regs), \ + rd_PERIOD(regs), \ + rd_DMA(regs)) +/*-------------------------------------------------------------------------*/ + + +/*-------------------------------------------------------------------------*/ +/* Driver data structs */ + +/* Context */ +struct driver_data { + /* Driver model hookup */ + struct platform_device *pdev; + + /* SPI framework hookup */ + struct spi_master *master; + + /* IMX hookup */ + struct spi_imx_master *master_info; + + /* Memory resources and SPI regs virtual address */ + struct resource *ioarea; + void __iomem *regs; + + /* SPI RX_DATA physical address */ + dma_addr_t rd_data_phys; + + /* Driver message queue */ + struct workqueue_struct *workqueue; + struct work_struct pump_messages; + spinlock_t lock; + struct list_head queue; + int busy; + int run; + + /* Message Transfer pump */ + struct tasklet_struct pump_transfers; + + /* Current message, transfer and state */ + struct spi_message *cur_msg; + struct spi_transfer *cur_transfer; + struct chip_data *cur_chip; + + /* Rd / Wr buffers pointers */ + size_t len; + void *tx; + void *tx_end; + void *rx; + void *rx_end; + + u8 n_bytes; + int cs_change; + + /* Function pointers */ + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + irqreturn_t (*transfer_handler)(struct driver_data *drv_data); + void (*cs_control)(u32 command); + + /* DMA setup */ + int rx_channel; + int tx_channel; + dma_addr_t rx_dma; + dma_addr_t tx_dma; + int rx_dma_needs_unmap; + int tx_dma_needs_unmap; + size_t tx_map_len; + u32 dummy_dma_buf ____cacheline_aligned; +}; + +/* Runtime state */ +struct chip_data { + u32 control; + u32 test; + + u8 enable_dma:1; + u8 bits_per_word; + u8 n_bytes; + u32 max_speed_hz; + + int (*write)(struct driver_data *drv_data); + int (*read)(struct driver_data *drv_data); + void (*cs_control)(u32 command); +}; +/*-------------------------------------------------------------------------*/ + + +static void pump_messages(void *data); + +static int flush(struct driver_data *drv_data) +{ + unsigned long limit = loops_per_jiffy << 1; + void __iomem *regs = drv_data->regs; + volatile u32 d; + + dev_dbg(&drv_data->pdev->dev, "flush\n"); + + do { + while (rd_INT_STATUS(regs) & SPI_STATUS_RR) + d = rd_DATA(regs); + } while ((rd_CONTROL(regs) & SPI_CONTROL_XCH) && limit--); + + return limit; +} + +static void restore_state(struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + struct chip_data *chip = drv_data->cur_chip; + + /* Temporary disable SPI */ + wr_CONTROL(rd_CONTROL(regs) & ~SPI_CONTROL_SPIEN, regs); + + /* Load chip registers */ + dev_dbg(&drv_data->pdev->dev, + "restore_state\n" + " test = 0x%08X\n" + " control = 0x%08X\n", + chip->test, + chip->control); + wr_TEST(chip->test, regs); + wr_PERIOD(0, regs); + wr_INT_STATUS(0, regs); + wr_CONTROL(chip->control, regs); +} + +static void null_cs_control(u32 command) +{ +} + +static inline u32 data_to_write(struct driver_data *drv_data) +{ + return ((u32)(drv_data->tx_end - drv_data->tx)) / drv_data->n_bytes; +} + +static inline u32 data_to_read(struct driver_data *drv_data) +{ + return ((u32)(drv_data->rx_end - drv_data->rx)) / drv_data->n_bytes; +} + +static int dummy_writer(struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + u8 *tx, *tx_end; + u32 remaining_data; + u32 fifo_avail_space; + u32 n; + + /* Compute how many fifo writes to do */ + tx = (u8*)drv_data->tx; + tx_end = (u8*)drv_data->tx_end; + remaining_data = (u32)(tx_end - tx) / drv_data->n_bytes; + fifo_avail_space = SPI_FIFO_DEPTH - (rd_TEST(regs) & SPI_TEST_TXCNT); + if (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN) + /* Fix misunderstood receive overflow */ + fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN; + n = min(remaining_data, fifo_avail_space); + dev_dbg(&drv_data->pdev->dev, + "dummy_writer\n" + " remaining data = %d\n" + " fifo avail space = %d\n" + " fifo writes = %d\n", + remaining_data, + fifo_avail_space, + n); + + if (n > 0) { + + /* Fill SPI TXFIFO */ + tx += n * drv_data->n_bytes; + while (n--) + wr_DATA(SPI_DUMMY_u16, regs); + + /* Trigger transfer */ + wr_CONTROL(rd_CONTROL(regs) | SPI_CONTROL_XCH, regs); + + /* Update tx pointer */ + drv_data->tx = tx; + } + + return (tx >= tx_end); +} + +static int u8_writer(struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + u8 *tx, *tx_end; + u32 remaining_data; + u32 fifo_avail_space; + u32 n; + + /* Compute how many fifo writes to do */ + tx = (u8*)drv_data->tx; + tx_end = (u8*)drv_data->tx_end; + remaining_data = (u32)(tx_end - tx); + fifo_avail_space = SPI_FIFO_DEPTH - (rd_TEST(regs) & SPI_TEST_TXCNT); + if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN)) + /* Fix misunderstood receive overflow */ + fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN; + n = min(remaining_data, fifo_avail_space); + dev_dbg(&drv_data->pdev->dev, + "u8_writer\n" + " remaining data = %d\n" + " fifo avail space = %d\n" + " fifo writes = %d\n", + remaining_data, + fifo_avail_space, + n); + + if (n > 0) { + /* Fill SPI TXFIFO */ + while (n--) + wr_DATA(*tx++, regs); + + /* Trigger transfer */ + wr_CONTROL(rd_CONTROL(regs) | SPI_CONTROL_XCH, regs); + + /* Update tx pointer */ + drv_data->tx = tx; + } + + return (tx >= tx_end); +} + +static int u8_reader(struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + u8 *rx, *rx_end; + u32 remaining_data; + u32 fifo_rxcnt; + u32 n; + + /* Compute how many fifo reads to do */ + rx = (u8*)drv_data->rx; + rx_end = (u8*)drv_data->rx_end; + remaining_data = (u32)(rx_end - rx); + fifo_rxcnt = (rd_TEST(regs) & SPI_TEST_RXCNT) >> SPI_TEST_RXCNT_LSB; + n = min(remaining_data, fifo_rxcnt); + dev_dbg(&drv_data->pdev->dev, + "u8_reader\n" + " remaining data = %d\n" + " fifo_rxcnt = %d\n" + " fifo reads = %d\n", + remaining_data, + fifo_rxcnt, + n); + + if (n > 0) { + /* Read SPI RXFIFO */ + while (n--) + *rx++ = rd_DATA(regs); + + /* Update rx pointer */ + drv_data->rx = rx; + } + + return (rx >= rx_end); +} + +static int u16_writer(struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + u16 *tx, *tx_end; + u32 remaining_data; + u32 fifo_avail_space; + u32 n; + + /* Compute how many fifo writes to do */ + tx = (u16*)drv_data->tx; + tx_end = (u16*)drv_data->tx_end; + remaining_data = (u32)(tx_end - tx); + fifo_avail_space = SPI_FIFO_DEPTH - (rd_TEST(regs) & SPI_TEST_TXCNT); + if (drv_data->rx && (fifo_avail_space > SPI_FIFO_OVERFLOW_MARGIN)) + /* Fix misunderstood receive overflow */ + fifo_avail_space -= SPI_FIFO_OVERFLOW_MARGIN; + n = min(remaining_data, fifo_avail_space); + dev_dbg(&drv_data->pdev->dev, + "u16_writer\n" + " remaining data = %d\n" + " fifo avail space = %d\n" + " fifo writes = %d\n", + remaining_data, + fifo_avail_space, + n); + + if (n > 0) { + /* Fill SPI TXFIFO */ + while (n--) + wr_DATA(*tx++, regs); + + /* Trigger transfer */ + wr_CONTROL(rd_CONTROL(regs) | SPI_CONTROL_XCH, regs); + + /* Update tx pointer */ + drv_data->tx = tx; + } + + return (tx >= tx_end); +} + +static int u16_reader(struct driver_data *drv_data) +{ + struct spi_regs __iomem *regs; + u16 *rx, *rx_end; + u32 remaining_data; + u32 fifo_rxcnt; + u32 n; + + regs = drv_data->regs; + + /* Compute how many fifo reads to do */ + rx = (u16*)drv_data->rx; + rx_end = (u16*)drv_data->rx_end; + remaining_data = (u32)(rx_end - rx); + fifo_rxcnt = (rd_TEST(regs) & SPI_TEST_RXCNT) >> SPI_TEST_RXCNT_LSB; + n = min(remaining_data, fifo_rxcnt); + dev_dbg(&drv_data->pdev->dev, + "u16_reader\n" + " remaining data = %d\n" + " fifo_rxcnt = %d\n" + " fifo reads = %d\n", + remaining_data, + fifo_rxcnt, + n); + + if (n > 0) { + /* Read SPI RXFIFO */ + while (n--) + *rx++ = rd_DATA(regs); + + /* Update rx pointer */ + drv_data->rx = rx; + } + + return (rx >= rx_end); +} + +static void *next_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct spi_transfer *trans = drv_data->cur_transfer; + + /* Move to next transfer */ + if (trans->transfer_list.next != &msg->transfers) { + drv_data->cur_transfer = + list_entry(trans->transfer_list.next, + struct spi_transfer, + transfer_list); + return RUNNING_STATE; + } + + return DONE_STATE; +} + +static int map_dma_buffers(struct driver_data *drv_data) +{ + struct spi_message *msg; + struct device *dev; + void *buf; + + drv_data->rx_dma_needs_unmap = 0; + drv_data->tx_dma_needs_unmap = 0; + + if (!drv_data->master_info->enable_dma || + !drv_data->cur_chip->enable_dma) + return 0; + + msg = drv_data->cur_msg; + dev = &msg->spi->dev; + if (msg->is_dma_mapped) { + if (drv_data->tx_dma) + /* The caller provided at least dma and cpu virtual + address for write; pump_transfers() will consider the + transfer as write only if cpu rx virtual address is + NULL */ + return 1; + + if (drv_data->rx_dma) { + /* The caller provided dma and cpu virtual address to + performe read only transfer --> + use drv_data->dummy_dma_buf for dummy writes to + achive reads */ + buf = &drv_data->dummy_dma_buf; + drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); + drv_data->tx_dma = dma_map_single(dev, + buf, + drv_data->tx_map_len, + DMA_TO_DEVICE); + if (dma_mapping_error(drv_data->tx_dma)) + return 0; + + drv_data->tx_dma_needs_unmap = 1; + + /* Flags transfer as rd_only for pump_transfers() DMA + regs programming (should be redundant) */ + drv_data->tx = NULL; + + return 1; + } + } + + if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) + return 0; + + /* NULL rx means write-only transfer and no map needed + since rx DMA will not be used */ + if (drv_data->rx) { + buf = drv_data->rx; + drv_data->rx_dma = dma_map_single( + dev, + buf, + drv_data->len, + DMA_FROM_DEVICE); + if (dma_mapping_error(drv_data->rx_dma)) + return 0; + drv_data->rx_dma_needs_unmap = 1; + } + + if (drv_data->tx == NULL) { + /* Read only message --> use drv_data->dummy_dma_buf for dummy + writes to achive reads */ + buf = &drv_data->dummy_dma_buf; + drv_data->tx_map_len = sizeof(drv_data->dummy_dma_buf); + } else { + buf = drv_data->tx; + drv_data->tx_map_len = drv_data->len; + } + drv_data->tx_dma = dma_map_single(dev, + buf, + drv_data->tx_map_len, + DMA_TO_DEVICE); + if (dma_mapping_error(drv_data->tx_dma)) { + if (drv_data->rx_dma) { + dma_unmap_single(dev, + drv_data->rx_dma, + drv_data->len, + DMA_FROM_DEVICE); + drv_data->rx_dma_needs_unmap = 0; + } + return 0; + } + drv_data->tx_dma_needs_unmap = 1; + + return 1; +} + +static void unmap_dma_buffers(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + struct device *dev = &msg->spi->dev; + + if (drv_data->rx_dma_needs_unmap) { + dma_unmap_single(dev, + drv_data->rx_dma, + drv_data->len, + DMA_FROM_DEVICE); + drv_data->rx_dma_needs_unmap = 0; + } + if (drv_data->tx_dma_needs_unmap) { + dma_unmap_single(dev, + drv_data->tx_dma, + drv_data->tx_map_len, + DMA_TO_DEVICE); + drv_data->tx_dma_needs_unmap = 0; + } +} + +/* Caller already set message->status (dma is already blocked) */ +static void giveback(struct spi_message *message, struct driver_data *drv_data) +{ + void __iomem *regs = drv_data->regs; + + /* Bring SPI to sleep; restore_state() and pump_transfer() + will do new setup */ + wr_CONTROL(0, regs); + wr_INT_STATUS(0, regs); + wr_DMA(0, regs); + + drv_data->cs_control(SPI_CS_DEASSERT); + + message->state = NULL; + if (message->complete) + message->complete(message->context); + + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + queue_work(drv_data->workqueue, &drv_data->pump_messages); +} + +static void dma_err_handler( + int channel, void *data, struct pt_regs *regs, int errcode) +{ + struct driver_data *drv_data = data; + struct spi_message *msg = drv_data->cur_msg; + + dev_dbg(&drv_data->pdev->dev, "dma_err_handler\n"); + + /* Disable both rx and tx dma channels */ + imx_dma_disable(drv_data->rx_channel); + imx_dma_disable(drv_data->tx_channel); + + if (flush(drv_data) == 0) + dev_err(&drv_data->pdev->dev, + "dma_err_handler - flush failed\n"); + + unmap_dma_buffers(drv_data); + + msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); +} + +static void dma_tx_handler(int channel, void *data, struct pt_regs *regs) +{ + struct driver_data *drv_data = data; + + dev_dbg(&drv_data->pdev->dev, "dma_tx_handler\n"); + + imx_dma_disable(channel); + + /* Now waits for TX FIFO empty */ + wr_INT_STATUS(rd_INT_STATUS(drv_data->regs) | SPI_INTEN_TE, + drv_data->regs); +} + +static irqreturn_t dma_transfer(struct driver_data *drv_data) +{ + u32 status; + struct spi_message *msg = drv_data->cur_msg; + void __iomem *regs = drv_data->regs; + unsigned long limit; + + status = rd_INT_STATUS(regs); + + if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) { + wr_INT_STATUS(status & ~SPI_INTEN, regs); + + imx_dma_disable(drv_data->rx_channel); + unmap_dma_buffers(drv_data); + + if (flush(drv_data) == 0) + dev_err(&drv_data->pdev->dev, + "dma_transfer - flush failed\n"); + + dev_warn(&drv_data->pdev->dev, + "dma_transfer - fifo overun\n"); + + msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); + + return IRQ_HANDLED; + } + + if (status & SPI_STATUS_TE) { + wr_INT_STATUS(status & ~SPI_INTEN_TE, regs); + + if (drv_data->rx) { + /* Wait end of transfer before read trailing data */ + limit = loops_per_jiffy << 1; + while ((rd_CONTROL(regs) & SPI_CONTROL_XCH) && limit--); + + if (limit == 0) + dev_err(&drv_data->pdev->dev, + "dma_transfer - end of tx failed\n"); + else + dev_dbg(&drv_data->pdev->dev, + "dma_transfer - end of tx\n"); + + imx_dma_disable(drv_data->rx_channel); + unmap_dma_buffers(drv_data); + + /* Calculate number of trailing data and read them */ + dev_dbg(&drv_data->pdev->dev, + "dma_transfer - test = 0x%08X\n", + rd_TEST(regs)); + drv_data->rx = drv_data->rx_end - + ((rd_TEST(regs) & SPI_TEST_RXCNT) >> + SPI_TEST_RXCNT_LSB)*drv_data->n_bytes; + drv_data->read(drv_data); + } else { + /* Write only transfer */ + unmap_dma_buffers(drv_data); + + if (flush(drv_data) == 0) + dev_err(&drv_data->pdev->dev, + "dma_transfer - flush failed\n"); + } + + /* End of transfer, update total byte transfered */ + msg->actual_length += drv_data->len; + + /* Release chip select if requested, transfer delays are + handled in pump_transfers() */ + if (drv_data->cs_change) + drv_data->cs_control(SPI_CS_DEASSERT); + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + return IRQ_HANDLED; + } + + /* Opps problem detected */ + return IRQ_NONE; +} + +static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + void __iomem *regs = drv_data->regs; + u32 status; + irqreturn_t handled = IRQ_NONE; + + status = rd_INT_STATUS(regs); + + while (status & SPI_STATUS_TH) { + dev_dbg(&drv_data->pdev->dev, + "interrupt_wronly_transfer - status = 0x%08X\n", status); + + /* Pump data */ + if (drv_data->write(drv_data)) { + wr_INT_STATUS(rd_INT_STATUS(regs) & ~SPI_INTEN, regs); + + dev_dbg(&drv_data->pdev->dev, + "interrupt_wronly_transfer - end of tx\n"); + + if (flush(drv_data) == 0) + dev_err(&drv_data->pdev->dev, + "interrupt_wronly_transfer - " + "flush failed\n"); + + /* End of transfer, update total byte transfered */ + msg->actual_length += drv_data->len; + + /* Release chip select if requested, transfer delays are + handled in pump_transfers */ + if (drv_data->cs_change) + drv_data->cs_control(SPI_CS_DEASSERT); + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + return IRQ_HANDLED; + } + + status = rd_INT_STATUS(regs); + + /* We did something */ + handled = IRQ_HANDLED; + } + + return handled; +} + +static irqreturn_t interrupt_transfer(struct driver_data *drv_data) +{ + struct spi_message *msg = drv_data->cur_msg; + void __iomem *regs = drv_data->regs; + u32 status; + irqreturn_t handled = IRQ_NONE; + unsigned long limit; + + status = rd_INT_STATUS(regs); + + while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) { + dev_dbg(&drv_data->pdev->dev, + "interrupt_transfer - status = 0x%08X\n", status); + + if (status & SPI_STATUS_RO) { + wr_INT_STATUS(rd_INT_STATUS(regs) & ~SPI_INTEN, regs); + + dev_warn(&drv_data->pdev->dev, + "interrupt_transfer - fifo overun\n" + " data not yet written = %d\n" + " data not yet read = %d\n", + data_to_write(drv_data), + data_to_read(drv_data)); + + if (flush(drv_data) == 0) + dev_err(&drv_data->pdev->dev, + "interrupt_transfer - flush failed\n"); + + msg->state = ERROR_STATE; + tasklet_schedule(&drv_data->pump_transfers); + + return IRQ_HANDLED; + } + + /* Pump data */ + drv_data->read(drv_data); + if (drv_data->write(drv_data)) { + wr_INT_STATUS(rd_INT_STATUS(regs) & ~SPI_INTEN, regs); + + dev_dbg(&drv_data->pdev->dev, + "interrupt_transfer - end of tx\n"); + + /* Read trailing bytes */ + limit = loops_per_jiffy << 1; + while ((drv_data->read(drv_data) == 0) && limit--); + + if (limit == 0) + dev_err(&drv_data->pdev->dev, + "interrupt_transfer - " + "trailing byte read failed\n"); + else + dev_dbg(&drv_data->pdev->dev, + "interrupt_transfer - end of rx\n"); + + /* End of transfer, update total byte transfered */ + msg->actual_length += drv_data->len; + + /* Release chip select if requested, transfer delays are + handled in pump_transfers */ + if (drv_data->cs_change) + drv_data->cs_control(SPI_CS_DEASSERT); + + /* Move to next transfer */ + msg->state = next_transfer(drv_data); + + /* Schedule transfer tasklet */ + tasklet_schedule(&drv_data->pump_transfers); + + return IRQ_HANDLED; + } + + status = rd_INT_STATUS(regs); + + /* We did something */ + handled = IRQ_HANDLED; + } + + return handled; +} + +static irqreturn_t spi_int(int irq, void *dev_id, struct pt_regs *regs) +{ + struct driver_data *drv_data = (struct driver_data *)dev_id; + + if (!drv_data->cur_msg) { + dev_err(&drv_data->pdev->dev, + "spi_int - bad message state\n"); + /* Never fail */ + return IRQ_HANDLED; + } + + return drv_data->transfer_handler(drv_data); +} + +static inline u32 spi_speed_hz(u32 data_rate) +{ + return imx_get_perclk2() / (4 << ((data_rate) >> 13)); +} + +static u32 spi_data_rate(u32 speed_hz) +{ + u32 div; + u32 quantized_hz = imx_get_perclk2() >> 2; + + for (div = SPI_PERCLK2_DIV_MIN; + div <= SPI_PERCLK2_DIV_MAX; + div++, quantized_hz >>= 1) { + if (quantized_hz <= speed_hz) + /* Max available speed LEQ required speed */ + return div << 13; + } + return SPI_CONTROL_DATARATE_BAD; +} + +static void pump_transfers(unsigned long data) +{ + struct driver_data *drv_data = (struct driver_data *)data; + struct spi_message *message; + struct spi_transfer *transfer, *previous; + struct chip_data *chip; + struct spi_regs __iomem *regs; + u32 tmp, control; + + dev_dbg(&drv_data->pdev->dev, "pump_transfer\n"); + + message = drv_data->cur_msg; + + /* Handle for abort */ + if (message->state == ERROR_STATE) { + message->status = -EIO; + giveback(message, drv_data); + return; + } + + /* Handle end of message */ + if (message->state == DONE_STATE) { + message->status = 0; + giveback(message, drv_data); + return; + } + + chip = drv_data->cur_chip; + + /* Delay if requested at end of transfer*/ + transfer = drv_data->cur_transfer; + if (message->state == RUNNING_STATE) { + previous = list_entry(transfer->transfer_list.prev, + struct spi_transfer, + transfer_list); + if (previous->delay_usecs) + udelay(previous->delay_usecs); + } else { + /* START_STATE */ + message->state = RUNNING_STATE; + drv_data->cs_control = chip->cs_control; + } + + transfer = drv_data->cur_transfer; + drv_data->tx = (void *)transfer->tx_buf; + drv_data->tx_end = drv_data->tx + transfer->len; + drv_data->rx = transfer->rx_buf; + drv_data->rx_end = drv_data->rx + transfer->len; + drv_data->rx_dma = transfer->rx_dma; + drv_data->tx_dma = transfer->tx_dma; + drv_data->len = transfer->len; + drv_data->cs_change = transfer->cs_change; + + regs = drv_data->regs; + control = rd_CONTROL(regs); + + /* Bits per word setup */ + tmp = transfer->bits_per_word; + if (tmp == 0) { + /* Use device setup */ + tmp = chip->bits_per_word; + drv_data->n_bytes = chip->n_bytes; + drv_data->write = drv_data->tx ? chip->write : dummy_writer; + drv_data->read = drv_data->rx ? chip->read : NULL; + } else { + /* Use per-transfer setup */ + if (tmp <= 8) { + drv_data->n_bytes = 1; + drv_data->write = drv_data->tx ? + u8_writer : dummy_writer; + drv_data->read = drv_data->rx ? u8_reader : NULL; + } else { + drv_data->n_bytes = 2; + drv_data->write = drv_data->tx ? + u16_writer : dummy_writer; + drv_data->read = drv_data->rx ? u16_reader : NULL; + } + } + u32_EDIT(control, SPI_CONTROL_BITCOUNT, tmp - 1); + + /* Speed setup (surely valid because already checked) */ + tmp = transfer->speed_hz; + if (tmp == 0) + tmp = chip->max_speed_hz; + tmp = spi_data_rate(tmp); + u32_EDIT(control, SPI_CONTROL_DATARATE, tmp); + + wr_CONTROL(control, regs); + + /* Assert device chip-select */ + drv_data->cs_control(SPI_CS_ASSERT); + + /* DMA cannot read/write SPI FIFOs other than 16 bits at a time; hence + if bits_per_word is less or equal 8 PIO transfers are performed. + Moreover DMA is convinient for transfer length bigger than FIFOs + byte size. */ + if ((drv_data->n_bytes == 2) && + (drv_data->len > SPI_FIFO_DEPTH*SPI_FIFO_BYTE_WIDTH) && + map_dma_buffers(drv_data)) { + dev_dbg(&drv_data->pdev->dev, + "pump dma transfer\n" + " tx = 0x%08X\n" + " tx_dma = 0x%08X\n" + " rx = 0x%08X\n" + " rx_dma = 0x%08X\n" + " len = %d\n", + (u32)drv_data->tx, + (u32)drv_data->tx_dma, + (u32)drv_data->rx, + (u32)drv_data->rx_dma, + (u32)drv_data->len); + + /* Ensure we have the correct interrupt handler */ + drv_data->transfer_handler = dma_transfer; + + /* Enable SPI and arm transfer */ + wr_CONTROL(rd_CONTROL(regs) | + SPI_CONTROL_SPIEN | SPI_CONTROL_XCH, + regs); + + /* Setup tx DMA */ + if (drv_data->tx) + /* Linear source address */ + CCR(drv_data->tx_channel) = + CCR_DMOD_FIFO | + CCR_SMOD_LINEAR | + CCR_SSIZ_32 | CCR_DSIZ_16 | + CCR_REN; + else + /* Read only transfer -> fixed source address for + dummy write to achive read */ + CCR(drv_data->tx_channel) = + CCR_DMOD_FIFO | + CCR_SMOD_FIFO | + CCR_SSIZ_32 | CCR_DSIZ_16 | + CCR_REN; + + imx_dma_setup_single( + drv_data->tx_channel, + drv_data->tx_dma, + drv_data->len, + drv_data->rd_data_phys + 4, + DMA_MODE_WRITE + ); + + if (drv_data->rx) { + /* Setup rx DMA for linear destination address */ + CCR(drv_data->rx_channel) = + CCR_DMOD_LINEAR | + CCR_SMOD_FIFO | + CCR_DSIZ_32 | CCR_SSIZ_16 | + CCR_REN; + imx_dma_setup_single( + drv_data->rx_channel, + drv_data->rx_dma, + drv_data->len, + drv_data->rd_data_phys, + DMA_MODE_READ); + imx_dma_enable(drv_data->rx_channel); + + /* Enable SPI interrupt */ + wr_INT_STATUS(SPI_INTEN_RO, regs); + + /* Set SPI to request DMA service on both + Rx and Tx half fifo watermark */ + wr_DMA(SPI_DMA_RHDEN | SPI_DMA_THDEN, regs); + } else + /* Write only access -> set SPI to request DMA + service on Tx half fifo watermark */ + wr_DMA(SPI_DMA_THDEN, regs); + + imx_dma_enable(drv_data->tx_channel); + } else { + dev_dbg(&drv_data->pdev->dev, + "pump pio transfer\n" + " tx = 0x%08X\n" + " rx = 0x%08X\n" + " len = %d\n", + (u32)drv_data->tx, + (u32)drv_data->rx, + (u32)drv_data->len); + + /* Ensure we have the correct interrupt handler */ + if (drv_data->rx) + drv_data->transfer_handler = interrupt_transfer; + else + drv_data->transfer_handler = interrupt_wronly_transfer; + + /* Enable SPI */ + wr_CONTROL(rd_CONTROL(regs) | SPI_CONTROL_SPIEN, regs); + + /* Enable SPI interrupt */ + if (drv_data->rx) + wr_INT_STATUS(SPI_INTEN_TH | SPI_INTEN_RO, regs); + else + wr_INT_STATUS(SPI_INTEN_TH, regs); + } +} + +static void pump_messages(void *data) +{ + struct driver_data *drv_data = data; + unsigned long flags; + + /* Lock queue and check for queue work */ + spin_lock_irqsave(&drv_data->lock, flags); + if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { + drv_data->busy = 0; + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Make sure we are not already running a message */ + if (drv_data->cur_msg) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return; + } + + /* Extract head of queue */ + drv_data->cur_msg = list_entry(drv_data->queue.next, + struct spi_message, queue); + list_del_init(&drv_data->cur_msg->queue); + drv_data->busy = 1; + spin_unlock_irqrestore(&drv_data->lock, flags); + + /* Initial message state */ + drv_data->cur_msg->state = START_STATE; + drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, + struct spi_transfer, + transfer_list); + + /* Setup the SPI using the per chip configuration */ + drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); + restore_state(drv_data); + + /* Mark as busy and launch transfers */ + tasklet_schedule(&drv_data->pump_transfers); +} + +static int transfer(struct spi_device *spi, struct spi_message *msg) +{ + struct driver_data *drv_data = spi_master_get_devdata(spi->master); + u32 min_speed_hz, max_speed_hz, tmp; + struct spi_transfer *trans; + unsigned long flags; + + msg->actual_length = 0; + + /* Per transfer setup check */ + min_speed_hz = spi_speed_hz(SPI_CONTROL_DATARATE_MIN); + max_speed_hz = spi->max_speed_hz; + list_for_each_entry(trans, &msg->transfers, transfer_list) { + tmp = trans->bits_per_word; + if (tmp > 16) { + dev_err(&drv_data->pdev->dev, + "message rejected : " + "invalid transfer bits_per_word (%d bits)\n", + tmp); + goto msg_rejected; + } + tmp = trans->speed_hz; + if (tmp) { + if (tmp < min_speed_hz) { + dev_err(&drv_data->pdev->dev, + "message rejected : " + "device min speed (%d Hz) exceeds " + "required transfer speed (%d Hz)\n", + min_speed_hz, + tmp); + goto msg_rejected; + } else if (tmp > max_speed_hz) { + dev_err(&drv_data->pdev->dev, + "message rejected : " + "transfer speed (%d Hz) exceeds " + "device max speed (%d Hz)\n", + tmp, + max_speed_hz); + goto msg_rejected; + } + } + } + + /* Message accepted */ + msg->status = -EINPROGRESS; + msg->state = START_STATE; + + spin_lock_irqsave(&drv_data->lock, flags); + if (drv_data->run == QUEUE_STOPPED) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -ESHUTDOWN; + } + + list_add_tail(&msg->queue, &drv_data->queue); + if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + spin_unlock_irqrestore(&drv_data->lock, flags); + return 0; + +msg_rejected: + /* Message rejected and not queued */ + msg->status = -EINVAL; + msg->state = ERROR_STATE; + if (msg->complete) + msg->complete(msg->context); + return -EINVAL; +} + +/* On first setup bad values must free chip_data memory since will cause + spi_new_device to fail. Bad value setup from protocol driver are simply not + applied and notified to the calling driver. */ +static int setup(struct spi_device *spi) +{ + struct spi_imx_chip *chip_info; + struct chip_data *chip; + int first_setup = 0; + u32 tmp; + int status = 0; + + /* Get controller data */ + chip_info = spi->controller_data; + + /* Get controller_state */ + chip = spi_get_ctldata(spi); + if (chip == NULL) { + first_setup = 1; + + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) { + dev_err(&spi->dev, + "setup - cannot allocate controller state"); + return -ENOMEM; + } + chip->control = SPI_DEFAULT_CONTROL; + + if (chip_info == NULL) { + /* spi_board_info.controller_data not is supplied */ + chip_info = kzalloc(sizeof(struct spi_imx_chip), + GFP_KERNEL); + if (!chip_info) { + dev_err(&spi->dev, + "setup - " + "cannot allocate controller data"); + status = -ENOMEM; + goto err_first_setup; + } + /* Set controller data default value */ + chip_info->enable_loopback = + SPI_DEFAULT_ENABLE_LOOPBACK; + chip_info->enable_dma = SPI_DEFAULT_ENABLE_DMA; + chip_info->cs_control = null_cs_control; + } + } + + /* Now set controller state based on controller data */ + + /* SPI loopback */ + if (chip_info->enable_loopback) + chip->test = SPI_TEST_LBC; + else + chip->test = 0; + + /* SPI dma driven */ + chip->enable_dma = chip_info->enable_dma; + + /* SPI mode */ + tmp = spi->mode; + if (tmp & SPI_LSB_FIRST) { + status = -EINVAL; + if (first_setup) { + dev_err(&spi->dev, + "setup - " + "HW doesn't support LSB first transfer\n"); + goto err_first_setup; + } else { + dev_err(&spi->dev, + "setup - " + "HW doesn't support LSB first transfer, " + "default to MSB first\n"); + spi->mode &= ~SPI_LSB_FIRST; + } + } + if (tmp & SPI_CS_HIGH) { + u32_EDIT(chip->control, + SPI_CONTROL_SSPOL, SPI_CONTROL_SSPOL_1); + } + switch (tmp & SPI_MODE_3) { + case SPI_MODE_0: + tmp = 0; + break; + case SPI_MODE_1: + tmp = SPI_CONTROL_PHA_1; + break; + case SPI_MODE_2: + tmp = SPI_CONTROL_POL_1; + break; + default: + /* SPI_MODE_3 */ + tmp = SPI_CONTROL_PHA_1 | SPI_CONTROL_POL_1; + break; + } + u32_EDIT(chip->control, SPI_CONTROL_POL | SPI_CONTROL_PHA, tmp); + + /* SPI word width */ + tmp = spi->bits_per_word; + if (tmp == 0) { + tmp = 8; + spi->bits_per_word = 8; + } else if (tmp > 16) { + status = -EINVAL; + dev_err(&spi->dev, + "setup - " + "invalid bits_per_word (%d)\n", + tmp); + if (first_setup) + goto err_first_setup; + else { + /* Undo setup using chip as backup copy */ + tmp = chip->bits_per_word; + spi->bits_per_word = tmp; + } + } + chip->bits_per_word = tmp; + u32_EDIT(chip->control, SPI_CONTROL_BITCOUNT, tmp - 1); + if (tmp <= 8) { + chip->n_bytes = 1; + chip->write = u8_writer; + chip->read = u8_reader; + } else { + chip->n_bytes = 2; + chip->write = u16_writer; + chip->read = u16_reader; + } + + /* SPI datarate */ + tmp = spi_data_rate(spi->max_speed_hz); + if (tmp == SPI_CONTROL_DATARATE_BAD) { + status = -EINVAL; + dev_err(&spi->dev, + "setup - " + "HW min speed (%d Hz) exceeds required " + "max speed (%d Hz)\n", + spi_speed_hz(SPI_CONTROL_DATARATE_MIN), + spi->max_speed_hz); + if (first_setup) + goto err_first_setup; + else + /* Undo setup using chip as backup copy */ + spi->max_speed_hz = spi_data_rate(chip->max_speed_hz); + } else { + u32_EDIT(chip->control, SPI_CONTROL_DATARATE, tmp); + /* Actual rounded max_speed_hz */ + tmp = spi_speed_hz(tmp); + spi->max_speed_hz = tmp; + chip->max_speed_hz = tmp; + } + + /* SPI chip-select management */ + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + /* Save controller_state */ + spi_set_ctldata(spi, chip); + + /* Summary */ + dev_dbg(&spi->dev, + "setup succeded\n" + " loopback enable = %s\n" + " dma enable = %s\n" + " mode = %d\n" + " bits per word = %d\n" + " min speed = %d Hz\n" + " rounded max speed = %d Hz\n", + chip->test & SPI_TEST_LBC ? "Yes" : "No", + chip->enable_dma ? "Yes" : "No", + spi->mode, + spi->bits_per_word, + spi_speed_hz(SPI_CONTROL_DATARATE_MIN), + spi->max_speed_hz); + return status; + +err_first_setup: + kfree(chip); + return status; +} + +static void cleanup(const struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); + kfree(chip); +} + +static int init_queue(struct driver_data *drv_data) +{ + INIT_LIST_HEAD(&drv_data->queue); + spin_lock_init(&drv_data->lock); + + drv_data->run = QUEUE_STOPPED; + drv_data->busy = 0; + + tasklet_init(&drv_data->pump_transfers, + pump_transfers, (unsigned long)drv_data); + + INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); + drv_data->workqueue = create_singlethread_workqueue( + drv_data->master->cdev.dev->bus_id); + if (drv_data->workqueue == NULL) + return -EBUSY; + + return 0; +} + +static int start_queue(struct driver_data *drv_data) +{ + unsigned long flags; + + spin_lock_irqsave(&drv_data->lock, flags); + + if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { + spin_unlock_irqrestore(&drv_data->lock, flags); + return -EBUSY; + } + + drv_data->run = QUEUE_RUNNING; + drv_data->cur_msg = NULL; + drv_data->cur_transfer = NULL; + drv_data->cur_chip = NULL; + spin_unlock_irqrestore(&drv_data->lock, flags); + + queue_work(drv_data->workqueue, &drv_data->pump_messages); + + return 0; +} + +static int stop_queue(struct driver_data *drv_data) +{ + unsigned long flags; + unsigned limit = 500; + int status = 0; + + spin_lock_irqsave(&drv_data->lock, flags); + + /* This is a bit lame, but is optimized for the common execution path. + * A wait_queue on the drv_data->busy could be used, but then the common + * execution path (pump_messages) would be required to call wake_up or + * friends on every SPI message. Do this instead */ + drv_data->run = QUEUE_STOPPED; + while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { + spin_unlock_irqrestore(&drv_data->lock, flags); + msleep(10); + spin_lock_irqsave(&drv_data->lock, flags); + } + + if (!list_empty(&drv_data->queue) || drv_data->busy) + status = -EBUSY; + + spin_unlock_irqrestore(&drv_data->lock, flags); + + return status; +} + +static int destroy_queue(struct driver_data *drv_data) +{ + int status; + + status = stop_queue(drv_data); + if (status != 0) + return status; + + destroy_workqueue(drv_data->workqueue); + + return 0; +} + +static int spi_imx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct spi_imx_master *platform_info; + struct spi_master *master; + struct driver_data *drv_data = NULL; + struct resource *res; + int irq, status = 0; + + platform_info = dev->platform_data; + if (platform_info == NULL) { + dev_err(&pdev->dev, "probe - no platform data supplied\n"); + status = -ENODEV; + goto err_no_pdata; + } + + /* Allocate master with space for drv_data */ + master = spi_alloc_master(dev, sizeof(struct driver_data)); + if (!master) { + dev_err(&pdev->dev, "probe - cannot alloc spi_master\n"); + status = -ENOMEM; + goto err_no_mem; + } + drv_data = spi_master_get_devdata(master); + drv_data->master = master; + drv_data->master_info = platform_info; + drv_data->pdev = pdev; + + master->bus_num = pdev->id; + master->num_chipselect = platform_info->num_chipselect; + master->cleanup = cleanup; + master->setup = setup; + master->transfer = transfer; + + drv_data->dummy_dma_buf = SPI_DUMMY_u32; + + /* Find and map resources */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "probe - MEM resources not defined\n"); + status = -ENODEV; + goto err_no_iores; + } + drv_data->ioarea = request_mem_region(res->start, + res->end - res->start + 1, + pdev->name); + if (drv_data->ioarea == NULL) { + dev_err(&pdev->dev, "probe - cannot reserve region\n"); + status = -ENXIO; + goto err_no_iores; + } + drv_data->regs = ioremap(res->start, res->end - res->start + 1); + if (drv_data->regs == NULL) { + dev_err(&pdev->dev, "probe - cannot map IO\n"); + status = -ENXIO; + goto err_no_iomap; + } + drv_data->rd_data_phys = (dma_addr_t)res->start; + + /* Attach to IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "probe - IRQ resource not defined\n"); + status = -ENODEV; + goto err_no_irqres; + } + status = request_irq(irq, spi_int, SA_INTERRUPT, dev->bus_id, drv_data); + if (status < 0) { + dev_err(&pdev->dev, "probe - cannot get IRQ (%d)\n", status); + goto err_no_irqres; + } + + /* Setup DMA if requested */ + drv_data->tx_channel = -1; + drv_data->rx_channel = -1; + if (platform_info->enable_dma) { + /* Get rx DMA channel */ + status = imx_dma_request_by_prio(&drv_data->rx_channel, + "spi_imx_rx", DMA_PRIO_HIGH); + if (status < 0) { + dev_err(dev, + "probe - problem (%d) requesting rx channel\n", + status); + goto err_no_rxdma; + } else + imx_dma_setup_handlers(drv_data->rx_channel, NULL, + dma_err_handler, drv_data); + + /* Get tx DMA channel */ + status = imx_dma_request_by_prio(&drv_data->tx_channel, + "spi_imx_tx", DMA_PRIO_MEDIUM); + if (status < 0) { + dev_err(dev, + "probe - problem (%d) requesting tx channel\n", + status); + goto err_no_txdma; + } else + imx_dma_setup_handlers(drv_data->tx_channel, + dma_tx_handler, dma_err_handler, + drv_data); + + /* Set request source and burst length for allocated channels */ + switch (drv_data->pdev->id) { + case 1: + /* Using SPI1 */ + RSSR(drv_data->rx_channel) = DMA_REQ_SPI1_R; + RSSR(drv_data->tx_channel) = DMA_REQ_SPI1_T; + break; + case 2: + /* Using SPI2 */ + RSSR(drv_data->rx_channel) = DMA_REQ_SPI2_R; + RSSR(drv_data->tx_channel) = DMA_REQ_SPI2_T; + break; + default: + status = -ENODEV; + dev_err(dev, "probe - bad SPI Id\n"); + goto err_no_devid; + } + BLR(drv_data->rx_channel) = SPI_DMA_BLR; + BLR(drv_data->tx_channel) = SPI_DMA_BLR; + } + + /* Load default SPI configuration */ + wr_RESET(SPI_RESET_START, drv_data->regs); + wr_RESET(0, drv_data->regs); + wr_CONTROL(SPI_DEFAULT_CONTROL, drv_data->regs); + + /* Initial and start queue */ + status = init_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "probe - problem initializing queue\n"); + goto err_init_queue; + } + status = start_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "probe - problem starting queue\n"); + goto err_start_queue; + } + + /* Register with the SPI framework */ + platform_set_drvdata(pdev, drv_data); + status = spi_register_master(master); + if (status != 0) { + dev_err(&pdev->dev, "probe - problem registering spi master\n"); + goto err_spi_register; + } + + dev_dbg(dev, "probe succeded\n"); + return 0; + +err_init_queue: +err_start_queue: +err_spi_register: + destroy_queue(drv_data); + +err_no_devid: + imx_dma_free(drv_data->tx_channel); + +err_no_txdma: + imx_dma_free(drv_data->rx_channel); + +err_no_rxdma: + free_irq(irq, drv_data); + +err_no_irqres: + iounmap(drv_data->regs); + +err_no_iomap: + release_resource(drv_data->ioarea); + kfree(drv_data->ioarea); + +err_no_iores: + spi_master_put(master); + +err_no_pdata: +err_no_mem: + return status; +} + +static int __devexit spi_imx_remove(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int irq; + int status = 0; + + if (!drv_data) + return 0; + + /* Remove the queue */ + status = destroy_queue(drv_data); + if (status != 0) { + dev_err(&pdev->dev, "queue remove failed (%d)\n", status); + return status; + } + + /* Reset SPI */ + wr_RESET(SPI_RESET_START, drv_data->regs); + wr_RESET(0, drv_data->regs); + + /* Release DMA */ + if (drv_data->master_info->enable_dma) { + RSSR(drv_data->rx_channel) = 0; + RSSR(drv_data->tx_channel) = 0; + imx_dma_free(drv_data->tx_channel); + imx_dma_free(drv_data->rx_channel); + } + + /* Release IRQ */ + irq = platform_get_irq(pdev, 0); + if (irq >= 0) + free_irq(irq, drv_data); + + /* Release map resources */ + iounmap(drv_data->regs); + release_resource(drv_data->ioarea); + kfree(drv_data->ioarea); + + /* Disconnect from the SPI framework */ + spi_unregister_master(drv_data->master); + spi_master_put(drv_data->master); + + /* Prevent double remove */ + platform_set_drvdata(pdev, NULL); + + dev_dbg(&pdev->dev, "remove succeded\n"); + + return 0; +} + +static void spi_imx_shutdown(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + + /* Reset SPI */ + wr_RESET(SPI_RESET_START, drv_data->regs); + wr_RESET(0, drv_data->regs); + + dev_dbg(&pdev->dev, "shutdown succeded\n"); +} + +#ifdef CONFIG_PM +static int suspend_devices(struct device *dev, void *pm_message) +{ + pm_message_t *state = pm_message; + + if (dev->power.power_state.event != state->event) { + dev_warn(dev, "pm state does not match request\n"); + return -1; + } + + return 0; +} + +static int spi_imx_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + status = stop_queue(drv_data); + if (status != 0) { + dev_warn(&pdev->dev, "suspend cannot stop queue\n"); + return status; + } + + dev_dbg(&pdev->dev, "suspended\n"); + + return 0; +} + +static int spi_imx_resume(struct platform_device *pdev) +{ + struct driver_data *drv_data = platform_get_drvdata(pdev); + int status = 0; + + /* Start the queue running */ + status = start_queue(drv_data); + if (status != 0) + dev_err(&pdev->dev, "problem starting queue (%d)\n", status); + else + dev_dbg(&pdev->dev, "resumed\n"); + + return status; +} +#else +#define spi_imx_suspend NULL +#define spi_imx_resume NULL +#endif /* CONFIG_PM */ + +static struct platform_driver driver = { + .driver = { + .name = "imx-spi", + .bus = &platform_bus_type, + .owner = THIS_MODULE, + }, + .probe = spi_imx_probe, + .remove = __devexit_p(spi_imx_remove), + .shutdown = spi_imx_shutdown, + .suspend = spi_imx_suspend, + .resume = spi_imx_resume, +}; + +static int __init spi_imx_init(void) +{ + return platform_driver_register(&driver); +} +module_init(spi_imx_init); + +static void __exit spi_imx_exit(void) +{ + platform_driver_unregister(&driver); +} +module_exit(spi_imx_exit); + +MODULE_AUTHOR("Andrea Paterniani, "); +MODULE_DESCRIPTION("iMX SPI Contoller Driver"); +MODULE_LICENSE("GPL"); Index: at91/include/asm-arm/arch-imx/spi_imx.h =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ at91/include/asm-arm/arch-imx/spi_imx.h 2006-10-01 20:06:53.000000000 -0700 @@ -0,0 +1,68 @@ +/* + * include/asm-arm/arch-imx/spi_imx.h + * + * Copyright (C) 2006 SWAPP + * Andrea Paterniani + * + * Initial version inspired by: + * linux-2.6.17-rc3-mm1/include/asm-arm/arch-pxa/pxa2xx_spi.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef SPI_IMX_H_ +#define SPI_IMX_H_ +#ifdef __KERNEL__ + +/** + * struct spi_imx_master - device.platform_data for SPI controller devices. + * @num_chipselect: chipselects are used to distinguish individual + * SPI slaves, and are numbered from zero to num_chipselects - 1. + * each slave has a chipselect signal, but it's common that not + * every chipselect is connected to a slave. + * @enable_dma: if true enables DMA driven transfers. +*/ +struct spi_imx_master { + u8 num_chipselect; + u8 enable_dma:1; +}; + +/** + * struct spi_imx_chip - spi_board_info.controller_data for SPI + * slave devices, copied to spi_device.controller_data. + * @enable_loopback : used for test purpouse to internally connect RX and TX + * sections. + * @enable_dma : enables dma transfer (provided that controller driver has + * dma enabled too). + * @mode : defines clock phase and polarity (SPI_MODE_0..SPI_MODE_3). + * @bits_per_word : data transfer word size (1..16). + * @ins_ss_pulse : enable /SS pulse insertion between SPI burst. + * @period_bclk : used to define wait time (number of bit clock) between data + * transactions (0..32767). + * @cs_control : function pointer to board-specific function to assert/deassert + * I/O port to control HW generation of devices chip-select. +*/ +struct spi_imx_chip { + u8 enable_loopback:1; + u8 enable_dma:1; + void (*cs_control)(u32 control); +}; + +/* Chip-select state */ +#define SPI_CS_ASSERT (1 << 0) +#define SPI_CS_DEASSERT (1 << 1) + +#endif /* __KERNEL__ */ +#endif /* SPI_IMX_H_*/ - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/