summaryrefslogtreecommitdiff
path: root/drivers/dma/mmp_pdma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-01-18 13:38:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-01-18 13:38:31 -0800
commite90b81e8ff298b4341d2ee340fe785a6c321bcdd (patch)
tree72e2aa188160193ffb76724251dca1313f756007 /drivers/dma/mmp_pdma.c
parent3271b25e3d127bb9f45bce1e71c0f8987486a070 (diff)
parent76cba1e60b69c9cd53b9127d017a7dc5945455b1 (diff)
Merge tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine fixes from Vinod Koul: "A bunch of driver fixes for: - dma mask fix for mmp pdma driver - Xilinx regmap max register, uninitialized addr_width fix - device leak fix for bunch of drivers in the subsystem - stm32 dmamux, TI crossbar driver fixes for device & of node leak and route allocation cleanup - Tegra use afer free fix - Memory leak fix in Qualcomm gpi and omap-dma driver - compatible fix for apple driver" * tag 'dmaengine-fix-6.19' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (25 commits) dmaengine: apple-admac: Add "apple,t8103-admac" compatible dmaengine: omap-dma: fix dma_pool resource leak in error paths dmaengine: qcom: gpi: Fix memory leak in gpi_peripheral_config() dmaengine: sh: rz-dmac: Fix rz_dmac_terminate_all() dmaengine: xilinx_dma: Fix uninitialized addr_width when "xlnx,addrwidth" property is missing dmaengine: tegra-adma: Fix use-after-free dmaengine: fsl-edma: Fix clk leak on alloc_chan_resources failure dmaengine: mmp_pdma: Fix race condition in mmp_pdma_residue() dmaengine: ti: k3-udma: fix device leak on udma lookup dmaengine: ti: dma-crossbar: clean up dra7x route allocation error paths dmaengine: ti: dma-crossbar: fix device leak on am335x route allocation dmaengine: ti: dma-crossbar: fix device leak on dra7x route allocation dmaengine: stm32: dmamux: clean up route allocation error labels dmaengine: stm32: dmamux: fix OF node leak on route allocation failure dmaengine: stm32: dmamux: fix device leak on route allocation dmaengine: sh: rz-dmac: fix device leak on probe failure dmaengine: lpc32xx-dmamux: fix device leak on route allocation dmaengine: lpc18xx-dmamux: fix device leak on route allocation dmaengine: idxd: fix device leaks on compat bind and unbind dmaengine: dw: dmamux: fix OF node leak on route allocation failure ...
Diffstat (limited to 'drivers/dma/mmp_pdma.c')
-rw-r--r--drivers/dma/mmp_pdma.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index d07229a74886..d12e729ee12c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -152,8 +152,8 @@ struct mmp_pdma_phy {
*
* Controller Configuration:
* @run_bits: Control bits in DCSR register for channel start/stop
- * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
- * settings, or explicit mask like DMA_BIT_MASK(32/64)
+ * @dma_width: DMA addressing width in bits (32 or 64). Determines the
+ * DMA mask capability of the controller hardware.
*/
struct mmp_pdma_ops {
/* Hardware Register Operations */
@@ -173,7 +173,7 @@ struct mmp_pdma_ops {
/* Controller Configuration */
u32 run_bits;
- u64 dma_mask;
+ u32 dma_width;
};
struct mmp_pdma_device {
@@ -928,6 +928,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
{
struct mmp_pdma_desc_sw *sw;
struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
+ unsigned long flags;
u64 curr;
u32 residue = 0;
bool passed = false;
@@ -945,6 +946,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
else
curr = pdev->ops->read_src_addr(chan->phy);
+ spin_lock_irqsave(&chan->desc_lock, flags);
+
list_for_each_entry(sw, &chan->chain_running, node) {
u64 start, end;
u32 len;
@@ -989,6 +992,7 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
continue;
if (sw->async_tx.cookie == cookie) {
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return residue;
} else {
residue = 0;
@@ -996,6 +1000,8 @@ static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
}
}
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+
/* We should only get here in case of cyclic transactions */
return residue;
}
@@ -1172,7 +1178,7 @@ static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
.get_desc_src_addr = get_desc_src_addr_32,
.get_desc_dst_addr = get_desc_dst_addr_32,
.run_bits = (DCSR_RUN),
- .dma_mask = 0, /* let OF/platform set DMA mask */
+ .dma_width = 32,
};
static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
@@ -1185,7 +1191,7 @@ static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
.get_desc_src_addr = get_desc_src_addr_64,
.get_desc_dst_addr = get_desc_dst_addr_64,
.run_bits = (DCSR_RUN | DCSR_LPAEEN),
- .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
+ .dma_width = 64,
};
static const struct of_device_id mmp_pdma_dt_ids[] = {
@@ -1314,13 +1320,9 @@ static int mmp_pdma_probe(struct platform_device *op)
pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- /* Set DMA mask based on ops->dma_mask, or OF/platform */
- if (pdev->ops->dma_mask)
- dma_set_mask(pdev->dev, pdev->ops->dma_mask);
- else if (pdev->dev->coherent_dma_mask)
- dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
- else
- dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+ /* Set DMA mask based on controller hardware capabilities */
+ dma_set_mask_and_coherent(pdev->dev,
+ DMA_BIT_MASK(pdev->ops->dma_width));
ret = dma_async_device_register(&pdev->device);
if (ret) {