From 028f915b20ec343dda88f1bcc99f07f6b428b4aa Mon Sep 17 00:00:00 2001 From: Matthew McClintock Date: Thu, 5 May 2016 10:07:11 -0500 Subject: [PATCH 13/69] spi: qup: allow mulitple DMA transactions per spi xfer Much like the block mode changes, we are breaking up DMA transactions into 64K chunks so we can reset the QUP engine. Signed-off-by: Matthew McClintock --- drivers/spi/spi-qup.c | 120 ++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 86 insertions(+), 34 deletions(-) --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_ return 0; } +static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents) +{ + struct scatterlist *sg; + int i; + unsigned int length = 0; + + if (!nents) + return 0; + + for_each_sg(sgl, sg, nents, i) + length += sg_dma_len(sg); + + return length; +} + static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer, unsigned long timeout) { @@ -573,53 +588,90 @@ unsigned long timeout) struct spi_qup *qup = spi_master_get_devdata(master); dma_async_tx_callback rx_done = NULL, tx_done = NULL; int ret; + struct scatterlist *tx_sgl, *rx_sgl; - ret = spi_qup_io_config(spi, xfer); - if (ret) - return ret; - - /* before issuing the descriptors, set the QUP to run */ - ret = spi_qup_set_state(qup, QUP_STATE_RUN); - if (ret) { - dev_warn(qup->dev, "cannot set RUN state\n"); - return ret; - } - - if (!qup->qup_v1) { - if (xfer->rx_buf) - rx_done = spi_qup_dma_done; - - if (xfer->tx_buf) - tx_done = spi_qup_dma_done; - } - - if (xfer->rx_buf) { - ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl, - xfer->rx_sg.nents, DMA_DEV_TO_MEM, - rx_done, &qup->done); - if (ret) - return ret; + rx_sgl = xfer->rx_sg.sgl; + tx_sgl = xfer->tx_sg.sgl; - dma_async_issue_pending(master->dma_rx); - } + do { + int rx_nents = 0, tx_nents = 0; - if (xfer->tx_buf) { - ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl, - xfer->tx_sg.nents, DMA_MEM_TO_DEV, - tx_done, &qup->dma_tx_done); + if (rx_sgl) { + rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER); + if (rx_nents < 0) + rx_nents = sg_nents(rx_sgl); + + qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) / + qup->w_size; + } + + if (tx_sgl) { + tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER); + if (tx_nents < 0) + tx_nents = sg_nents(tx_sgl); + + qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) / + qup->w_size; + } + + + ret = spi_qup_io_config(spi, xfer); if (ret) return ret; - dma_async_issue_pending(master->dma_tx); - } + /* before issuing the descriptors, set the QUP to run */ + ret = spi_qup_set_state(qup, QUP_STATE_RUN); + if (ret) { + dev_warn(qup->dev, "cannot set RUN state\n"); + return ret; + } + + if (!qup->qup_v1) { + if (rx_sgl) { + rx_done = spi_qup_dma_done; + } + + if (tx_sgl) { + tx_done = spi_qup_dma_done; + } + } + + if (rx_sgl) { + ret = spi_qup_prep_sg(master, rx_sgl, rx_nents, + DMA_DEV_TO_MEM, rx_done, + &qup->done); + if (ret) + return ret; + + dma_async_issue_pending(master->dma_rx); + } + + if (tx_sgl) { + ret = spi_qup_prep_sg(master, tx_sgl, tx_nents, + DMA_MEM_TO_DEV, tx_done, + &qup->dma_tx_done); + if (ret) + return ret; + + dma_async_issue_pending(master->dma_tx); + } + + if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) { + pr_emerg(" rx timed out"); + return -ETIMEDOUT; + } + + if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) { + pr_emerg(" tx timed out\n"); + return -ETIMEDOUT; + } - if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout)) - return -ETIMEDOUT; + for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl)); + for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl)); - if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) - ret = -ETIMEDOUT; + } while (rx_sgl || tx_sgl); - return ret; + return 0; } static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,