Skip to content
Permalink
Browse files
SPI DUAL QUAD mode fixes for Designware SPI
SPI DUAL and QUAD mode configurations and fixes done to ensure
transmit and receive of DUAL and QUAD mode is working.

DUAL QUAD Reception working fine with SPI-NOR flash.
Whereas DUAL QUAD transmission cannot work with
SPI-NOR flash since there is no write support
in the MTD SPI-NOR driver yet.
Hence the transmission is tested using SPI adapters.

Also, a GPIO fix is done as part of this commit.
With the GPIO fix, the SPI GPIO chipselect line
functionality issue is fixed.

Signed-off-by: Vaidya, Mahesh R <mahesh.r.vaidya@intel.com>
  • Loading branch information
maheshv1203 authored and zhoufuro committed Jun 3, 2021
1 parent e85efaf commit de6f8390867e1796aa6ad8d0603a8c7676e54fac
Show file tree
Hide file tree
Showing 4 changed files with 123 additions and 41 deletions.
@@ -1226,7 +1226,7 @@ static int keembay_gpio_set_direction_in(struct gpio_chip *gc, unsigned int pin)

raw_spin_lock_irqsave(&kpc->lock, flags);
val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
val |= FIELD_PREP(KEEMBAY_GPIO_MODE_DIR_MASK, 1);
val |= KEEMBAY_GPIO_MODE_DIR;
keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
raw_spin_unlock_irqrestore(&kpc->lock, flags);

@@ -1242,7 +1242,7 @@ static int keembay_gpio_set_direction_out(struct gpio_chip *gc,

raw_spin_lock_irqsave(&kpc->lock, flags);
val = keembay_read_reg(kpc->base1 + KEEMBAY_GPIO_MODE, pin);
val &= FIELD_PREP(KEEMBAY_GPIO_MODE_DIR_MASK, 0);
val &= ~KEEMBAY_GPIO_MODE_DIR;
keembay_write_reg(val, kpc->base1 + KEEMBAY_GPIO_MODE, pin);
raw_spin_unlock_irqrestore(&kpc->lock, flags);
keembay_gpio_set(gc, pin, value);
@@ -57,6 +57,59 @@ static const struct debugfs_reg32 dw_spi_dbgfs_regs[] = {
DW_SPI_DBGFS_REG("RX_SAMPLE_DLY", DW_SPI_RX_SAMPLE_DLY),
};

static u8 dw_spi_update_tmode(struct dw_spi *dws)
{
if (!dws->tx)
return SPI_TMOD_RO;

if (!dws->rx)
return SPI_TMOD_TO;

return SPI_TMOD_TR;
}

static u8 dw_spi_get_spimode(struct spi_transfer *transfer)
{
u8 width = 1;

if (transfer->tx_buf)
width = transfer->tx_nbits;
if (transfer->rx_buf)
width = transfer->rx_nbits;

switch (width) {
case SPI_NBITS_QUAD:
return SSI_QUAD_SPI;
case SPI_NBITS_DUAL:
return SSI_DUAL_SPI;
default:
return SSI_STD_SPI;
}
}

static void dw_spi_dual_quad_config(struct dw_spi *dws, struct spi_device *spi,
struct dw_spi_cfg *cfg, struct spi_transfer *transfer)
{
if (dws->caps & DW_SPI_CAP_DWC_SSI) {
struct chip_data *chip = spi_get_ctldata(spi);

/* Adjust Transfer Mode for DUAL and QUAD */
if (dw_spi_get_spimode(transfer)) {
cfg->tmode = dw_spi_update_tmode(dws);
/* Default values for Dual/Quad mode */
if (dws->tx)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0x20A);
else
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0);
}

/* CTRLR0[23:22] SPI Frame Format */
chip->cr0 &= ~DWC_SSI_CTRLR0_SPI_FRF_MASK;
chip->cr0 |= dw_spi_get_spimode(transfer) << DWC_SSI_CTRLR0_SPI_FRF_OFFSET;
}

}

static int dw_spi_debugfs_init(struct dw_spi *dws)
{
char name[32];
@@ -116,17 +169,20 @@ static inline u32 tx_max(struct dw_spi *dws)

tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);

/*
* Another concern is about the tx/rx mismatch, we
* though to use (dws->fifo_len - rxflr - txflr) as
* one maximum value for tx, but it doesn't cover the
* data which is out of tx/rx fifo and inside the
* shift registers. So a control from sw point of
* view is taken.
*/
rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);
if (dws->rx) {
/*
* Another concern is about the tx/rx mismatch, we
* though to use (dws->fifo_len - rxflr - txflr) as
* one maximum value for tx, but it doesn't cover the
* data which is out of tx/rx fifo and inside the
* shift registers. So a control from sw point of
* view is taken.
*/
rxtx_gap = dws->fifo_len - (dws->rx_len - dws->tx_len);

return min3((u32)dws->tx_len, tx_room, rxtx_gap);
return min3((u32)dws->tx_len, tx_room, rxtx_gap);
} else
return min((u32)dws->tx_len, tx_room);
}

/* Return the max entries we should read out of rx fifo */
@@ -163,16 +219,14 @@ static void dw_reader(struct dw_spi *dws)

while (max--) {
rxw = dw_read_io_reg(dws, DW_SPI_DR);
if (dws->rx) {
if (dws->n_bytes == 1)
*(u8 *)(dws->rx) = rxw;
else if (dws->n_bytes == 2)
*(u16 *)(dws->rx) = rxw;
else
*(u32 *)(dws->rx) = rxw;

dws->rx += dws->n_bytes;
}
if (dws->n_bytes == 1)
*(u8 *)(dws->rx) = rxw;
else if (dws->n_bytes == 2)
*(u16 *)(dws->rx) = rxw;
else
*(u32 *)(dws->rx) = rxw;

dws->rx += dws->n_bytes;
--dws->rx_len;
}
}
@@ -213,6 +267,11 @@ int dw_spi_check_status(struct dw_spi *dws, bool raw)
}
EXPORT_SYMBOL_GPL(dw_spi_check_status);

static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
{
return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
}

static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
{
u16 irq_status = dw_readl(dws, DW_SPI_ISR);
@@ -229,23 +288,35 @@ static irqreturn_t dw_spi_transfer_handler(struct dw_spi *dws)
* final stage of the transfer. By doing so we'll get the next IRQ
* right when the leftover incoming data is received.
*/
dw_reader(dws);
if (!dws->rx_len) {
spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->master);
} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR)) {
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
if (dws->rx) {
dw_reader(dws);
if (!dws->rx_len) {
spi_mask_intr(dws, 0xff);
spi_finalize_current_transfer(dws->master);
} else if (dws->rx_len <= dw_readl(dws, DW_SPI_RXFTLR))
dw_writel(dws, DW_SPI_RXFTLR, dws->rx_len - 1);
}

/*
* Send data out if Tx FIFO Empty IRQ is received. The IRQ will be
* disabled after the data transmission is finished so not to
* have the TXE IRQ flood at the final stage of the transfer.
*/
if (irq_status & SPI_INT_TXEI) {
dw_writer(dws);
if (!dws->tx_len)
if ((irq_status & SPI_INT_TXEI)) {
if (dws->tx_len)
dw_writer(dws);
else {
spi_mask_intr(dws, SPI_INT_TXEI);
if ((!dws->rx)) {
/* Check for transfer completion in Transmit only mode */
if (!(dw_readl(dws, DW_SPI_TXFLR)) &&
(!(dw_spi_ctlr_busy(dws)))) {
/* Finalize transfer, if transfer is complete */
spi_finalize_current_transfer(dws->master);
} else
spi_umask_intr(dws, SPI_INT_TXEI);
}
}
}

return IRQ_HANDLED;
@@ -419,15 +490,22 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->dma_mapped = 0;
dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
dws->tx = (void *)transfer->tx_buf;
dws->tx_len = transfer->len / dws->n_bytes;
dws->rx = transfer->rx_buf;
dws->rx_len = dws->tx_len;
dws->tx_len = transfer->len / dws->n_bytes;
if (dws->rx)
dws->rx_len = dws->tx_len;
else {
cfg.tmode = SPI_TMOD_TO;
dws->rx_len = 0;
}

/* Ensure the data above is visible for all CPUs */
smp_mb();

spi_enable_chip(dws, 0);

dw_spi_dual_quad_config(dws, spi, &cfg, transfer);

dw_spi_update_config(dws, spi, &cfg);

transfer->effective_speed_hz = dws->current_freq;
@@ -479,7 +557,7 @@ static int dw_spi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
static bool dw_spi_supports_mem_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (op->data.buswidth > 1 || op->addr.buswidth > 1 ||
if (op->data.buswidth > 4 || op->addr.buswidth > 1 ||
op->dummy.buswidth > 1 || op->cmd.buswidth > 1)
return false;

@@ -602,11 +680,6 @@ static int dw_spi_write_then_read(struct dw_spi *dws, struct spi_device *spi)
return 0;
}

static inline bool dw_spi_ctlr_busy(struct dw_spi *dws)
{
return dw_readl(dws, DW_SPI_SR) & SR_BUSY;
}

static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
{
int retry = SPI_WAIT_RETRIES;
@@ -868,7 +941,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
dw_spi_init_mem_ops(dws);

master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP |
SPI_TX_DUAL | SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
@@ -64,6 +64,8 @@
#define SPI_CFS_OFFSET 12

/* Bit fields in CTRLR0 based on DWC_ssi_databook.pdf v1.01a */
#define DWC_SSI_CTRLR0_SPI_FRF_OFFSET 22
#define DWC_SSI_CTRLR0_SPI_FRF_MASK GENMASK(23, 22)
#define DWC_SSI_CTRLR0_SRL_OFFSET 13
#define DWC_SSI_CTRLR0_TMOD_OFFSET 10
#define DWC_SSI_CTRLR0_TMOD_MASK GENMASK(11, 10)
@@ -122,6 +124,12 @@ enum dw_ssi_type {
#define DW_SPI_CAP_KEEMBAY_MST BIT(1)
#define DW_SPI_CAP_DWC_SSI BIT(2)

enum dw_ssi_spi_mode {
SSI_STD_SPI = 0,
SSI_DUAL_SPI,
SSI_QUAD_SPI,
};

/* Slave spi_transfer/spi_mem_op related */
struct dw_spi_cfg {
u8 tmode;
@@ -295,7 +295,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;

if (ctlr->mem_ops) {
if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;

0 comments on commit de6f839

Please sign in to comment.