From 921234e0c5d77b510ccc22d9fcfa844f20de970a Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Fri, 8 Mar 2019 15:02:35 +0200 Subject: [PATCH] dmaengine: axi-dmac: Split too large segments The axi-dmac driver currently rejects transfers with segments that are larger than what the hardware can handle. Re-work the driver so that these large segments are split into multiple segments instead where each segment is smaller or equal to the maximum segment size. This allows the driver to handle transfers with segments of arbitrary size. Signed-off-by: Lars-Peter Clausen Signed-off-by: Bogdan Togorean Signed-off-by: Alexandru Ardelean Signed-off-by: Vinod Koul --- drivers/dma/dma-axi-dmac.c | 81 ++++++++++++++++++++++++++++---------- 1 file changed, 61 insertions(+), 20 deletions(-) diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index ffc0adc2f6ce44..0fe3a931d8d539 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) { - if (len == 0 || len > chan->max_length) + if (len == 0) return false; if ((len & chan->align_mask) != 0) /* Not aligned */ return false; @@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) return desc; } +static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan, + enum dma_transfer_direction direction, dma_addr_t addr, + unsigned int num_periods, unsigned int period_len, + struct axi_dmac_sg *sg) +{ + unsigned int num_segments, i; + unsigned int segment_size; + unsigned int len; + + /* Split into multiple equally sized segments if necessary */ + num_segments = DIV_ROUND_UP(period_len, chan->max_length); + segment_size = DIV_ROUND_UP(period_len, num_segments); + /* Take care of alignment */ + segment_size = ((segment_size - 1) | chan->align_mask) + 1; + + for (i = 0; i < num_periods; i++) { + len = period_len; + + while (len > segment_size) { + if (direction == DMA_DEV_TO_MEM) + sg->dest_addr = addr; + else + sg->src_addr = addr; + sg->x_len = segment_size; + sg->y_len = 1; + sg++; + addr += segment_size; + len -= segment_size; + } + + if (direction == DMA_DEV_TO_MEM) + sg->dest_addr = addr; + else + sg->src_addr = addr; + sg->x_len = len; + sg->y_len = 1; + sg++; + addr += len; + } + + return sg; +} + static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( struct dma_chan *c, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, @@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( { struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_desc *desc; + struct axi_dmac_sg *dsg; struct scatterlist *sg; + unsigned int num_sgs; unsigned int i; if (direction != chan->direction) return NULL; - desc = axi_dmac_alloc_desc(sg_len); + num_sgs = 0; + for_each_sg(sgl, sg, sg_len, i) + num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length); + + desc = axi_dmac_alloc_desc(num_sgs); if (!desc) return NULL; + dsg = desc->sg; + for_each_sg(sgl, sg, sg_len, i) { if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || !axi_dmac_check_len(chan, sg_dma_len(sg))) { @@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( return NULL; } - if (direction == DMA_DEV_TO_MEM) - desc->sg[i].dest_addr = sg_dma_address(sg); - else - desc->sg[i].src_addr = sg_dma_address(sg); - desc->sg[i].x_len = sg_dma_len(sg); - desc->sg[i].y_len = 1; + dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1, + sg_dma_len(sg), dsg); } desc->cyclic = false; @@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( { struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_desc *desc; - unsigned int num_periods, i; + unsigned int num_periods, num_segments; if (direction != chan->direction) return NULL; @@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( return NULL; num_periods = buf_len / period_len; + num_segments = DIV_ROUND_UP(period_len, chan->max_length); - desc = axi_dmac_alloc_desc(num_periods); + desc = axi_dmac_alloc_desc(num_periods * num_segments); if (!desc) return NULL; - for (i = 0; i < num_periods; i++) { - if (direction == DMA_DEV_TO_MEM) - desc->sg[i].dest_addr = buf_addr; - else - desc->sg[i].src_addr = buf_addr; - desc->sg[i].x_len = period_len; - desc->sg[i].y_len = 1; - buf_addr += period_len; - } + axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods, + period_len, desc->sg); desc->cyclic = true; @@ -647,7 +688,7 @@ static int axi_dmac_probe(struct platform_device *pdev) of_node_put(of_channels); pdev->dev.dma_parms = &dmac->dma_parms; - dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); + dma_set_max_seg_size(&pdev->dev, UINT_MAX); dma_dev = &dmac->dma_dev; dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);