mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-01 12:04:08 +00:00
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (63 commits) ARM: PL08x: cleanup comments Update CONFIG_MD_RAID6_PQ to CONFIG_RAID6_PQ in drivers/dma/iop-adma.c ARM: PL08x: fix a warning Fix dmaengine_submit() return type dmaengine: at_hdmac: fix race while monitoring channel status dmaengine: at_hdmac: flags located in first descriptor dmaengine: at_hdmac: use subsys_initcall instead of module_init dmaengine: at_hdmac: no need set ACK in new descriptor dmaengine: at_hdmac: trivial add precision to unmapping comment dmaengine: at_hdmac: use dma_address to program DMA hardware pch_dma: support new device ML7213 IOH ARM: PL08x: prevent dma_set_runtime_config() reconfiguring memcpy channels ARM: PL08x: allow dma_set_runtime_config() to return errors ARM: PL08x: fix locking between prepare function and submit function ARM: PL08x: introduce 'phychan_hold' to hold on to physical channels ARM: PL08x: put txd's on the pending list in pl08x_tx_submit() ARM: PL08x: rename 'desc_list' as 'pend_list' ARM: PL08x: implement unmapping of memcpy buffers ARM: PL08x: store prep_* flags in async_tx structure ARM: PL08x: shrink srcbus/dstbus in txd structure ...
This commit is contained in:
commit
e1288cd72f
13 changed files with 996 additions and 846 deletions
|
@ -13,6 +13,14 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maxium size for a single dma descriptor
|
||||||
|
* Size is limited to 16 bits.
|
||||||
|
* Size is in the units of addr-widths (1,2,4,8 bytes)
|
||||||
|
* Larger transfers will be split up to multiple linked desc
|
||||||
|
*/
|
||||||
|
#define STEDMA40_MAX_SEG_SIZE 0xFFFF
|
||||||
|
|
||||||
/* dev types for memcpy */
|
/* dev types for memcpy */
|
||||||
#define STEDMA40_DEV_DST_MEMORY (-1)
|
#define STEDMA40_DEV_DST_MEMORY (-1)
|
||||||
#define STEDMA40_DEV_SRC_MEMORY (-1)
|
#define STEDMA40_DEV_SRC_MEMORY (-1)
|
||||||
|
|
|
@ -200,11 +200,16 @@ config PL330_DMA
|
||||||
platform_data for a dma-pl330 device.
|
platform_data for a dma-pl330 device.
|
||||||
|
|
||||||
config PCH_DMA
|
config PCH_DMA
|
||||||
tristate "Topcliff (Intel EG20T) PCH DMA support"
|
tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH DMA support"
|
||||||
depends on PCI && X86
|
depends on PCI && X86
|
||||||
select DMA_ENGINE
|
select DMA_ENGINE
|
||||||
help
|
help
|
||||||
Enable support for the Topcliff (Intel EG20T) PCH DMA engine.
|
Enable support for Intel EG20T PCH DMA engine.
|
||||||
|
|
||||||
|
This driver also can be used for OKI SEMICONDUCTOR ML7213 IOH(Input/
|
||||||
|
Output Hub) which is for IVI(In-Vehicle Infotainment) use.
|
||||||
|
ML7213 is companion chip for Intel Atom E6xx series.
|
||||||
|
ML7213 is completely compatible for Intel EG20T PCH.
|
||||||
|
|
||||||
config IMX_SDMA
|
config IMX_SDMA
|
||||||
tristate "i.MX SDMA support"
|
tristate "i.MX SDMA support"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -253,7 +253,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
|
||||||
/* move myself to free_list */
|
/* move myself to free_list */
|
||||||
list_move(&desc->desc_node, &atchan->free_list);
|
list_move(&desc->desc_node, &atchan->free_list);
|
||||||
|
|
||||||
/* unmap dma addresses */
|
/* unmap dma addresses (not on slave channels) */
|
||||||
if (!atchan->chan_common.private) {
|
if (!atchan->chan_common.private) {
|
||||||
struct device *parent = chan2parent(&atchan->chan_common);
|
struct device *parent = chan2parent(&atchan->chan_common);
|
||||||
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
|
||||||
|
@ -583,7 +583,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||||||
desc->lli.ctrlb = ctrlb;
|
desc->lli.ctrlb = ctrlb;
|
||||||
|
|
||||||
desc->txd.cookie = 0;
|
desc->txd.cookie = 0;
|
||||||
async_tx_ack(&desc->txd);
|
|
||||||
|
|
||||||
if (!first) {
|
if (!first) {
|
||||||
first = desc;
|
first = desc;
|
||||||
|
@ -604,7 +603,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
||||||
/* set end-of-link to the last link descriptor of list*/
|
/* set end-of-link to the last link descriptor of list*/
|
||||||
set_desc_eol(desc);
|
set_desc_eol(desc);
|
||||||
|
|
||||||
desc->txd.flags = flags; /* client is in control of this ack */
|
first->txd.flags = flags; /* client is in control of this ack */
|
||||||
|
|
||||||
return &first->txd;
|
return &first->txd;
|
||||||
|
|
||||||
|
@ -670,7 +669,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
if (!desc)
|
if (!desc)
|
||||||
goto err_desc_get;
|
goto err_desc_get;
|
||||||
|
|
||||||
mem = sg_phys(sg);
|
mem = sg_dma_address(sg);
|
||||||
len = sg_dma_len(sg);
|
len = sg_dma_len(sg);
|
||||||
mem_width = 2;
|
mem_width = 2;
|
||||||
if (unlikely(mem & 3 || len & 3))
|
if (unlikely(mem & 3 || len & 3))
|
||||||
|
@ -712,7 +711,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
if (!desc)
|
if (!desc)
|
||||||
goto err_desc_get;
|
goto err_desc_get;
|
||||||
|
|
||||||
mem = sg_phys(sg);
|
mem = sg_dma_address(sg);
|
||||||
len = sg_dma_len(sg);
|
len = sg_dma_len(sg);
|
||||||
mem_width = 2;
|
mem_width = 2;
|
||||||
if (unlikely(mem & 3 || len & 3))
|
if (unlikely(mem & 3 || len & 3))
|
||||||
|
@ -749,8 +748,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
||||||
first->txd.cookie = -EBUSY;
|
first->txd.cookie = -EBUSY;
|
||||||
first->len = total_len;
|
first->len = total_len;
|
||||||
|
|
||||||
/* last link descriptor of list is responsible of flags */
|
/* first link descriptor of list is responsible of flags */
|
||||||
prev->txd.flags = flags; /* client is in control of this ack */
|
first->txd.flags = flags; /* client is in control of this ack */
|
||||||
|
|
||||||
return &first->txd;
|
return &first->txd;
|
||||||
|
|
||||||
|
@ -854,11 +853,11 @@ static void atc_issue_pending(struct dma_chan *chan)
|
||||||
|
|
||||||
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
dev_vdbg(chan2dev(chan), "issue_pending\n");
|
||||||
|
|
||||||
|
spin_lock_bh(&atchan->lock);
|
||||||
if (!atc_chan_is_enabled(atchan)) {
|
if (!atc_chan_is_enabled(atchan)) {
|
||||||
spin_lock_bh(&atchan->lock);
|
|
||||||
atc_advance_work(atchan);
|
atc_advance_work(atchan);
|
||||||
spin_unlock_bh(&atchan->lock);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&atchan->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1210,7 +1209,7 @@ static int __init at_dma_init(void)
|
||||||
{
|
{
|
||||||
return platform_driver_probe(&at_dma_driver, at_dma_probe);
|
return platform_driver_probe(&at_dma_driver, at_dma_probe);
|
||||||
}
|
}
|
||||||
module_init(at_dma_init);
|
subsys_initcall(at_dma_init);
|
||||||
|
|
||||||
static void __exit at_dma_exit(void)
|
static void __exit at_dma_exit(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/*
|
/*
|
||||||
* Freescale MPC85xx, MPC83xx DMA Engine support
|
* Freescale MPC85xx, MPC83xx DMA Engine support
|
||||||
*
|
*
|
||||||
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
|
* Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
|
||||||
*
|
*
|
||||||
* Author:
|
* Author:
|
||||||
* Zhang Wei <wei.zhang@freescale.com>, Jul 2007
|
* Zhang Wei <wei.zhang@freescale.com>, Jul 2007
|
||||||
|
@ -1324,6 +1324,8 @@ static int __devinit fsldma_of_probe(struct platform_device *op,
|
||||||
fdev->common.device_control = fsl_dma_device_control;
|
fdev->common.device_control = fsl_dma_device_control;
|
||||||
fdev->common.dev = &op->dev;
|
fdev->common.dev = &op->dev;
|
||||||
|
|
||||||
|
dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
|
||||||
|
|
||||||
dev_set_drvdata(&op->dev, fdev);
|
dev_set_drvdata(&op->dev, fdev);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -664,11 +664,20 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
|
||||||
/*calculate CTL_LO*/
|
/*calculate CTL_LO*/
|
||||||
ctl_lo.ctl_lo = 0;
|
ctl_lo.ctl_lo = 0;
|
||||||
ctl_lo.ctlx.int_en = 1;
|
ctl_lo.ctlx.int_en = 1;
|
||||||
ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width;
|
|
||||||
ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width;
|
|
||||||
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
|
ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst;
|
||||||
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
|
ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here we need some translation from "enum dma_slave_buswidth"
|
||||||
|
* to the format for our dma controller
|
||||||
|
* standard intel_mid_dmac's format
|
||||||
|
* 1 Byte 0b000
|
||||||
|
* 2 Bytes 0b001
|
||||||
|
* 4 Bytes 0b010
|
||||||
|
*/
|
||||||
|
ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2;
|
||||||
|
ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2;
|
||||||
|
|
||||||
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
|
if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) {
|
||||||
ctl_lo.ctlx.tt_fc = 0;
|
ctl_lo.ctlx.tt_fc = 0;
|
||||||
ctl_lo.ctlx.sinc = 0;
|
ctl_lo.ctlx.sinc = 0;
|
||||||
|
@ -746,8 +755,18 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
|
||||||
BUG_ON(!mids);
|
BUG_ON(!mids);
|
||||||
|
|
||||||
if (!midc->dma->pimr_mask) {
|
if (!midc->dma->pimr_mask) {
|
||||||
pr_debug("MDMA: SG list is not supported by this controller\n");
|
/* We can still handle sg list with only one item */
|
||||||
return NULL;
|
if (sg_len == 1) {
|
||||||
|
txd = intel_mid_dma_prep_memcpy(chan,
|
||||||
|
mids->dma_slave.dst_addr,
|
||||||
|
mids->dma_slave.src_addr,
|
||||||
|
sgl->length,
|
||||||
|
flags);
|
||||||
|
return txd;
|
||||||
|
} else {
|
||||||
|
pr_warn("MDMA: SG list is not supported by this controller\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
|
pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
|
||||||
|
@ -758,6 +777,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
|
||||||
pr_err("MDMA: Prep memcpy failed\n");
|
pr_err("MDMA: Prep memcpy failed\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
desc = to_intel_mid_dma_desc(txd);
|
desc = to_intel_mid_dma_desc(txd);
|
||||||
desc->dirn = direction;
|
desc->dirn = direction;
|
||||||
ctl_lo.ctl_lo = desc->ctl_lo;
|
ctl_lo.ctl_lo = desc->ctl_lo;
|
||||||
|
@ -1021,11 +1041,6 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
|
||||||
|
|
||||||
/*DMA Interrupt*/
|
/*DMA Interrupt*/
|
||||||
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
|
pr_debug("MDMA:Got an interrupt on irq %d\n", irq);
|
||||||
if (!mid) {
|
|
||||||
pr_err("ERR_MDMA:null pointer mid\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
|
pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask);
|
||||||
tfr_status &= mid->intr_mask;
|
tfr_status &= mid->intr_mask;
|
||||||
if (tfr_status) {
|
if (tfr_status) {
|
||||||
|
|
|
@ -1261,7 +1261,7 @@ out:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MD_RAID6_PQ
|
#ifdef CONFIG_RAID6_PQ
|
||||||
static int __devinit
|
static int __devinit
|
||||||
iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
|
iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
|
||||||
{
|
{
|
||||||
|
@ -1584,7 +1584,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
|
||||||
|
|
||||||
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
|
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
|
||||||
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
|
dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
|
||||||
#ifdef CONFIG_MD_RAID6_PQ
|
#ifdef CONFIG_RAID6_PQ
|
||||||
ret = iop_adma_pq_zero_sum_self_test(adev);
|
ret = iop_adma_pq_zero_sum_self_test(adev);
|
||||||
dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
|
dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
/*
|
/*
|
||||||
* Topcliff PCH DMA controller driver
|
* Topcliff PCH DMA controller driver
|
||||||
* Copyright (c) 2010 Intel Corporation
|
* Copyright (c) 2010 Intel Corporation
|
||||||
|
* Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
|
||||||
*
|
*
|
||||||
* This program is free software; you can redistribute it and/or modify
|
* This program is free software; you can redistribute it and/or modify
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
@ -921,12 +922,19 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* PCI Device ID of DMA device */
|
/* PCI Device ID of DMA device */
|
||||||
#define PCI_DEVICE_ID_PCH_DMA_8CH 0x8810
|
#define PCI_VENDOR_ID_ROHM 0x10DB
|
||||||
#define PCI_DEVICE_ID_PCH_DMA_4CH 0x8815
|
#define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
|
||||||
|
#define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
|
||||||
|
#define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
|
||||||
|
#define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
|
||||||
|
#define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
|
||||||
|
|
||||||
static const struct pci_device_id pch_dma_id_table[] = {
|
static const struct pci_device_id pch_dma_id_table[] = {
|
||||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_8CH), 8 },
|
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
|
||||||
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_DMA_4CH), 4 },
|
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
|
||||||
|
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
|
||||||
|
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
|
||||||
|
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
|
||||||
{ 0, },
|
{ 0, },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -954,6 +962,7 @@ static void __exit pch_dma_exit(void)
|
||||||
module_init(pch_dma_init);
|
module_init(pch_dma_init);
|
||||||
module_exit(pch_dma_exit);
|
module_exit(pch_dma_exit);
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Topcliff PCH DMA controller driver");
|
MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
|
||||||
|
"DMA controller driver");
|
||||||
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
|
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
|
||||||
MODULE_LICENSE("GPL v2");
|
MODULE_LICENSE("GPL v2");
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (C) ST-Ericsson SA 2007-2010
|
* Copyright (C) Ericsson AB 2007-2008
|
||||||
|
* Copyright (C) ST-Ericsson SA 2008-2010
|
||||||
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
|
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
|
||||||
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
|
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
|
||||||
* License terms: GNU General Public License (GPL) version 2
|
* License terms: GNU General Public License (GPL) version 2
|
||||||
|
@ -554,8 +555,66 @@ static struct d40_desc *d40_last_queued(struct d40_chan *d40c)
|
||||||
return d;
|
return d;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Support functions for logical channels */
|
static int d40_psize_2_burst_size(bool is_log, int psize)
|
||||||
|
{
|
||||||
|
if (is_log) {
|
||||||
|
if (psize == STEDMA40_PSIZE_LOG_1)
|
||||||
|
return 1;
|
||||||
|
} else {
|
||||||
|
if (psize == STEDMA40_PSIZE_PHY_1)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 2 << psize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The dma only supports transmitting packages up to
|
||||||
|
* STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
|
||||||
|
* dma elements required to send the entire sg list
|
||||||
|
*/
|
||||||
|
static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
|
||||||
|
{
|
||||||
|
int dmalen;
|
||||||
|
u32 max_w = max(data_width1, data_width2);
|
||||||
|
u32 min_w = min(data_width1, data_width2);
|
||||||
|
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
|
||||||
|
|
||||||
|
if (seg_max > STEDMA40_MAX_SEG_SIZE)
|
||||||
|
seg_max -= (1 << max_w);
|
||||||
|
|
||||||
|
if (!IS_ALIGNED(size, 1 << max_w))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (size <= seg_max)
|
||||||
|
dmalen = 1;
|
||||||
|
else {
|
||||||
|
dmalen = size / seg_max;
|
||||||
|
if (dmalen * seg_max < size)
|
||||||
|
dmalen++;
|
||||||
|
}
|
||||||
|
return dmalen;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
|
||||||
|
u32 data_width1, u32 data_width2)
|
||||||
|
{
|
||||||
|
struct scatterlist *sg;
|
||||||
|
int i;
|
||||||
|
int len = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
for_each_sg(sgl, sg, sg_len, i) {
|
||||||
|
ret = d40_size_2_dmalen(sg_dma_len(sg),
|
||||||
|
data_width1, data_width2);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
len += ret;
|
||||||
|
}
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Support functions for logical channels */
|
||||||
|
|
||||||
static int d40_channel_execute_command(struct d40_chan *d40c,
|
static int d40_channel_execute_command(struct d40_chan *d40c,
|
||||||
enum d40_command command)
|
enum d40_command command)
|
||||||
|
@ -1241,6 +1300,21 @@ static int d40_validate_conf(struct d40_chan *d40c,
|
||||||
res = -EINVAL;
|
res = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
|
||||||
|
(1 << conf->src_info.data_width) !=
|
||||||
|
d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
|
||||||
|
(1 << conf->dst_info.data_width)) {
|
||||||
|
/*
|
||||||
|
* The DMAC hardware only supports
|
||||||
|
* src (burst x width) == dst (burst x width)
|
||||||
|
*/
|
||||||
|
|
||||||
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
"[%s] src (burst x width) != dst (burst x width)\n",
|
||||||
|
__func__);
|
||||||
|
res = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1638,13 +1712,21 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
|
||||||
if (d40d == NULL)
|
if (d40d == NULL)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
d40d->lli_len = sgl_len;
|
d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width);
|
||||||
|
if (d40d->lli_len < 0) {
|
||||||
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
"[%s] Unaligned size\n", __func__);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
d40d->lli_current = 0;
|
d40d->lli_current = 0;
|
||||||
d40d->txd.flags = dma_flags;
|
d40d->txd.flags = dma_flags;
|
||||||
|
|
||||||
if (d40c->log_num != D40_PHY_CHAN) {
|
if (d40c->log_num != D40_PHY_CHAN) {
|
||||||
|
|
||||||
if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1654,15 +1736,17 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
|
||||||
sgl_len,
|
sgl_len,
|
||||||
d40d->lli_log.src,
|
d40d->lli_log.src,
|
||||||
d40c->log_def.lcsp1,
|
d40c->log_def.lcsp1,
|
||||||
d40c->dma_cfg.src_info.data_width);
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width);
|
||||||
|
|
||||||
(void) d40_log_sg_to_lli(sgl_dst,
|
(void) d40_log_sg_to_lli(sgl_dst,
|
||||||
sgl_len,
|
sgl_len,
|
||||||
d40d->lli_log.dst,
|
d40d->lli_log.dst,
|
||||||
d40c->log_def.lcsp3,
|
d40c->log_def.lcsp3,
|
||||||
d40c->dma_cfg.dst_info.data_width);
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
|
d40c->dma_cfg.src_info.data_width);
|
||||||
} else {
|
} else {
|
||||||
if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
goto err;
|
goto err;
|
||||||
|
@ -1675,6 +1759,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
|
||||||
virt_to_phys(d40d->lli_phy.src),
|
virt_to_phys(d40d->lli_phy.src),
|
||||||
d40c->src_def_cfg,
|
d40c->src_def_cfg,
|
||||||
d40c->dma_cfg.src_info.data_width,
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
d40c->dma_cfg.src_info.psize);
|
d40c->dma_cfg.src_info.psize);
|
||||||
|
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
|
@ -1687,6 +1772,7 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
|
||||||
virt_to_phys(d40d->lli_phy.dst),
|
virt_to_phys(d40d->lli_phy.dst),
|
||||||
d40c->dst_def_cfg,
|
d40c->dst_def_cfg,
|
||||||
d40c->dma_cfg.dst_info.data_width,
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
d40c->dma_cfg.dst_info.psize);
|
d40c->dma_cfg.dst_info.psize);
|
||||||
|
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
|
@ -1826,7 +1912,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
struct d40_chan *d40c = container_of(chan, struct d40_chan,
|
struct d40_chan *d40c = container_of(chan, struct d40_chan,
|
||||||
chan);
|
chan);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int err = 0;
|
|
||||||
|
|
||||||
if (d40c->phy_chan == NULL) {
|
if (d40c->phy_chan == NULL) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
@ -1844,6 +1929,15 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
}
|
}
|
||||||
|
|
||||||
d40d->txd.flags = dma_flags;
|
d40d->txd.flags = dma_flags;
|
||||||
|
d40d->lli_len = d40_size_2_dmalen(size,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width);
|
||||||
|
if (d40d->lli_len < 0) {
|
||||||
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
"[%s] Unaligned size\n", __func__);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
dma_async_tx_descriptor_init(&d40d->txd, chan);
|
dma_async_tx_descriptor_init(&d40d->txd, chan);
|
||||||
|
|
||||||
|
@ -1851,37 +1945,40 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
|
|
||||||
if (d40c->log_num != D40_PHY_CHAN) {
|
if (d40c->log_num != D40_PHY_CHAN) {
|
||||||
|
|
||||||
if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
d40d->lli_len = 1;
|
|
||||||
d40d->lli_current = 0;
|
d40d->lli_current = 0;
|
||||||
|
|
||||||
d40_log_fill_lli(d40d->lli_log.src,
|
if (d40_log_buf_to_lli(d40d->lli_log.src,
|
||||||
src,
|
src,
|
||||||
size,
|
size,
|
||||||
d40c->log_def.lcsp1,
|
d40c->log_def.lcsp1,
|
||||||
d40c->dma_cfg.src_info.data_width,
|
d40c->dma_cfg.src_info.data_width,
|
||||||
true);
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
|
true) == NULL)
|
||||||
|
goto err;
|
||||||
|
|
||||||
d40_log_fill_lli(d40d->lli_log.dst,
|
if (d40_log_buf_to_lli(d40d->lli_log.dst,
|
||||||
dst,
|
dst,
|
||||||
size,
|
size,
|
||||||
d40c->log_def.lcsp3,
|
d40c->log_def.lcsp3,
|
||||||
d40c->dma_cfg.dst_info.data_width,
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
true);
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
true) == NULL)
|
||||||
|
goto err;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = d40_phy_fill_lli(d40d->lli_phy.src,
|
if (d40_phy_buf_to_lli(d40d->lli_phy.src,
|
||||||
src,
|
src,
|
||||||
size,
|
size,
|
||||||
d40c->dma_cfg.src_info.psize,
|
d40c->dma_cfg.src_info.psize,
|
||||||
|
@ -1889,11 +1986,11 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
d40c->src_def_cfg,
|
d40c->src_def_cfg,
|
||||||
true,
|
true,
|
||||||
d40c->dma_cfg.src_info.data_width,
|
d40c->dma_cfg.src_info.data_width,
|
||||||
false);
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
if (err)
|
false) == NULL)
|
||||||
goto err_fill_lli;
|
goto err;
|
||||||
|
|
||||||
err = d40_phy_fill_lli(d40d->lli_phy.dst,
|
if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
|
||||||
dst,
|
dst,
|
||||||
size,
|
size,
|
||||||
d40c->dma_cfg.dst_info.psize,
|
d40c->dma_cfg.dst_info.psize,
|
||||||
|
@ -1901,10 +1998,9 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
d40c->dst_def_cfg,
|
d40c->dst_def_cfg,
|
||||||
true,
|
true,
|
||||||
d40c->dma_cfg.dst_info.data_width,
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
false);
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
false) == NULL)
|
||||||
if (err)
|
goto err;
|
||||||
goto err_fill_lli;
|
|
||||||
|
|
||||||
(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
|
(void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
|
||||||
d40d->lli_pool.size, DMA_TO_DEVICE);
|
d40d->lli_pool.size, DMA_TO_DEVICE);
|
||||||
|
@ -1913,9 +2009,6 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
||||||
spin_unlock_irqrestore(&d40c->lock, flags);
|
spin_unlock_irqrestore(&d40c->lock, flags);
|
||||||
return &d40d->txd;
|
return &d40d->txd;
|
||||||
|
|
||||||
err_fill_lli:
|
|
||||||
dev_err(&d40c->chan.dev->device,
|
|
||||||
"[%s] Failed filling in PHY LLI\n", __func__);
|
|
||||||
err:
|
err:
|
||||||
if (d40d)
|
if (d40d)
|
||||||
d40_desc_free(d40c, d40d);
|
d40_desc_free(d40c, d40d);
|
||||||
|
@ -1945,13 +2038,21 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
|
||||||
dma_addr_t dev_addr = 0;
|
dma_addr_t dev_addr = 0;
|
||||||
int total_size;
|
int total_size;
|
||||||
|
|
||||||
if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
|
d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width);
|
||||||
|
if (d40d->lli_len < 0) {
|
||||||
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
"[%s] Unaligned size\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
d40d->lli_len = sg_len;
|
|
||||||
d40d->lli_current = 0;
|
d40d->lli_current = 0;
|
||||||
|
|
||||||
if (direction == DMA_FROM_DEVICE)
|
if (direction == DMA_FROM_DEVICE)
|
||||||
|
@ -1993,13 +2094,21 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
|
||||||
dma_addr_t dst_dev_addr;
|
dma_addr_t dst_dev_addr;
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
|
d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width);
|
||||||
|
if (d40d->lli_len < 0) {
|
||||||
|
dev_err(&d40c->chan.dev->device,
|
||||||
|
"[%s] Unaligned size\n", __func__);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) {
|
||||||
dev_err(&d40c->chan.dev->device,
|
dev_err(&d40c->chan.dev->device,
|
||||||
"[%s] Out of memory\n", __func__);
|
"[%s] Out of memory\n", __func__);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
d40d->lli_len = sgl_len;
|
|
||||||
d40d->lli_current = 0;
|
d40d->lli_current = 0;
|
||||||
|
|
||||||
if (direction == DMA_FROM_DEVICE) {
|
if (direction == DMA_FROM_DEVICE) {
|
||||||
|
@ -2024,6 +2133,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
|
||||||
virt_to_phys(d40d->lli_phy.src),
|
virt_to_phys(d40d->lli_phy.src),
|
||||||
d40c->src_def_cfg,
|
d40c->src_def_cfg,
|
||||||
d40c->dma_cfg.src_info.data_width,
|
d40c->dma_cfg.src_info.data_width,
|
||||||
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
d40c->dma_cfg.src_info.psize);
|
d40c->dma_cfg.src_info.psize);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
return res;
|
return res;
|
||||||
|
@ -2035,6 +2145,7 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
|
||||||
virt_to_phys(d40d->lli_phy.dst),
|
virt_to_phys(d40d->lli_phy.dst),
|
||||||
d40c->dst_def_cfg,
|
d40c->dst_def_cfg,
|
||||||
d40c->dma_cfg.dst_info.data_width,
|
d40c->dma_cfg.dst_info.data_width,
|
||||||
|
d40c->dma_cfg.src_info.data_width,
|
||||||
d40c->dma_cfg.dst_info.psize);
|
d40c->dma_cfg.dst_info.psize);
|
||||||
if (res < 0)
|
if (res < 0)
|
||||||
return res;
|
return res;
|
||||||
|
@ -2244,6 +2355,8 @@ static void d40_set_runtime_config(struct dma_chan *chan,
|
||||||
psize = STEDMA40_PSIZE_PHY_8;
|
psize = STEDMA40_PSIZE_PHY_8;
|
||||||
else if (config_maxburst >= 4)
|
else if (config_maxburst >= 4)
|
||||||
psize = STEDMA40_PSIZE_PHY_4;
|
psize = STEDMA40_PSIZE_PHY_4;
|
||||||
|
else if (config_maxburst >= 2)
|
||||||
|
psize = STEDMA40_PSIZE_PHY_2;
|
||||||
else
|
else
|
||||||
psize = STEDMA40_PSIZE_PHY_1;
|
psize = STEDMA40_PSIZE_PHY_1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (C) ST-Ericsson SA 2007-2010
|
* Copyright (C) ST-Ericsson SA 2007-2010
|
||||||
* Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson
|
* Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
|
||||||
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
|
* Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
|
||||||
* License terms: GNU General Public License (GPL) version 2
|
* License terms: GNU General Public License (GPL) version 2
|
||||||
*/
|
*/
|
||||||
|
@ -122,15 +122,15 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg,
|
||||||
*dst_cfg = dst;
|
*dst_cfg = dst;
|
||||||
}
|
}
|
||||||
|
|
||||||
int d40_phy_fill_lli(struct d40_phy_lli *lli,
|
static int d40_phy_fill_lli(struct d40_phy_lli *lli,
|
||||||
dma_addr_t data,
|
dma_addr_t data,
|
||||||
u32 data_size,
|
u32 data_size,
|
||||||
int psize,
|
int psize,
|
||||||
dma_addr_t next_lli,
|
dma_addr_t next_lli,
|
||||||
u32 reg_cfg,
|
u32 reg_cfg,
|
||||||
bool term_int,
|
bool term_int,
|
||||||
u32 data_width,
|
u32 data_width,
|
||||||
bool is_device)
|
bool is_device)
|
||||||
{
|
{
|
||||||
int num_elems;
|
int num_elems;
|
||||||
|
|
||||||
|
@ -139,13 +139,6 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
|
||||||
else
|
else
|
||||||
num_elems = 2 << psize;
|
num_elems = 2 << psize;
|
||||||
|
|
||||||
/*
|
|
||||||
* Size is 16bit. data_width is 8, 16, 32 or 64 bit
|
|
||||||
* Block large than 64 KiB must be split.
|
|
||||||
*/
|
|
||||||
if (data_size > (0xffff << data_width))
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* Must be aligned */
|
/* Must be aligned */
|
||||||
if (!IS_ALIGNED(data, 0x1 << data_width))
|
if (!IS_ALIGNED(data, 0x1 << data_width))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -187,55 +180,118 @@ int d40_phy_fill_lli(struct d40_phy_lli *lli,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int d40_seg_size(int size, int data_width1, int data_width2)
|
||||||
|
{
|
||||||
|
u32 max_w = max(data_width1, data_width2);
|
||||||
|
u32 min_w = min(data_width1, data_width2);
|
||||||
|
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
|
||||||
|
|
||||||
|
if (seg_max > STEDMA40_MAX_SEG_SIZE)
|
||||||
|
seg_max -= (1 << max_w);
|
||||||
|
|
||||||
|
if (size <= seg_max)
|
||||||
|
return size;
|
||||||
|
|
||||||
|
if (size <= 2 * seg_max)
|
||||||
|
return ALIGN(size / 2, 1 << max_w);
|
||||||
|
|
||||||
|
return seg_max;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
|
||||||
|
dma_addr_t addr,
|
||||||
|
u32 size,
|
||||||
|
int psize,
|
||||||
|
dma_addr_t lli_phys,
|
||||||
|
u32 reg_cfg,
|
||||||
|
bool term_int,
|
||||||
|
u32 data_width1,
|
||||||
|
u32 data_width2,
|
||||||
|
bool is_device)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
dma_addr_t next = lli_phys;
|
||||||
|
int size_rest = size;
|
||||||
|
int size_seg = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
size_seg = d40_seg_size(size_rest, data_width1, data_width2);
|
||||||
|
size_rest -= size_seg;
|
||||||
|
|
||||||
|
if (term_int && size_rest == 0)
|
||||||
|
next = 0;
|
||||||
|
else
|
||||||
|
next = ALIGN(next + sizeof(struct d40_phy_lli),
|
||||||
|
D40_LLI_ALIGN);
|
||||||
|
|
||||||
|
err = d40_phy_fill_lli(lli,
|
||||||
|
addr,
|
||||||
|
size_seg,
|
||||||
|
psize,
|
||||||
|
next,
|
||||||
|
reg_cfg,
|
||||||
|
!next,
|
||||||
|
data_width1,
|
||||||
|
is_device);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
lli++;
|
||||||
|
if (!is_device)
|
||||||
|
addr += size_seg;
|
||||||
|
} while (size_rest);
|
||||||
|
|
||||||
|
return lli;
|
||||||
|
|
||||||
|
err:
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
int d40_phy_sg_to_lli(struct scatterlist *sg,
|
int d40_phy_sg_to_lli(struct scatterlist *sg,
|
||||||
int sg_len,
|
int sg_len,
|
||||||
dma_addr_t target,
|
dma_addr_t target,
|
||||||
struct d40_phy_lli *lli,
|
struct d40_phy_lli *lli_sg,
|
||||||
dma_addr_t lli_phys,
|
dma_addr_t lli_phys,
|
||||||
u32 reg_cfg,
|
u32 reg_cfg,
|
||||||
u32 data_width,
|
u32 data_width1,
|
||||||
|
u32 data_width2,
|
||||||
int psize)
|
int psize)
|
||||||
{
|
{
|
||||||
int total_size = 0;
|
int total_size = 0;
|
||||||
int i;
|
int i;
|
||||||
struct scatterlist *current_sg = sg;
|
struct scatterlist *current_sg = sg;
|
||||||
dma_addr_t next_lli_phys;
|
|
||||||
dma_addr_t dst;
|
dma_addr_t dst;
|
||||||
int err = 0;
|
struct d40_phy_lli *lli = lli_sg;
|
||||||
|
dma_addr_t l_phys = lli_phys;
|
||||||
|
|
||||||
for_each_sg(sg, current_sg, sg_len, i) {
|
for_each_sg(sg, current_sg, sg_len, i) {
|
||||||
|
|
||||||
total_size += sg_dma_len(current_sg);
|
total_size += sg_dma_len(current_sg);
|
||||||
|
|
||||||
/* If this scatter list entry is the last one, no next link */
|
|
||||||
if (sg_len - 1 == i)
|
|
||||||
next_lli_phys = 0;
|
|
||||||
else
|
|
||||||
next_lli_phys = ALIGN(lli_phys + (i + 1) *
|
|
||||||
sizeof(struct d40_phy_lli),
|
|
||||||
D40_LLI_ALIGN);
|
|
||||||
|
|
||||||
if (target)
|
if (target)
|
||||||
dst = target;
|
dst = target;
|
||||||
else
|
else
|
||||||
dst = sg_phys(current_sg);
|
dst = sg_phys(current_sg);
|
||||||
|
|
||||||
err = d40_phy_fill_lli(&lli[i],
|
l_phys = ALIGN(lli_phys + (lli - lli_sg) *
|
||||||
dst,
|
sizeof(struct d40_phy_lli), D40_LLI_ALIGN);
|
||||||
sg_dma_len(current_sg),
|
|
||||||
psize,
|
lli = d40_phy_buf_to_lli(lli,
|
||||||
next_lli_phys,
|
dst,
|
||||||
reg_cfg,
|
sg_dma_len(current_sg),
|
||||||
!next_lli_phys,
|
psize,
|
||||||
data_width,
|
l_phys,
|
||||||
target == dst);
|
reg_cfg,
|
||||||
if (err)
|
sg_len - 1 == i,
|
||||||
goto err;
|
data_width1,
|
||||||
|
data_width2,
|
||||||
|
target == dst);
|
||||||
|
if (lli == NULL)
|
||||||
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return total_size;
|
return total_size;
|
||||||
err:
|
|
||||||
return err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -315,17 +371,20 @@ void d40_log_lli_lcla_write(struct d40_log_lli *lcla,
|
||||||
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
|
writel(lli_dst->lcsp13, &lcla[1].lcsp13);
|
||||||
}
|
}
|
||||||
|
|
||||||
void d40_log_fill_lli(struct d40_log_lli *lli,
|
static void d40_log_fill_lli(struct d40_log_lli *lli,
|
||||||
dma_addr_t data, u32 data_size,
|
dma_addr_t data, u32 data_size,
|
||||||
u32 reg_cfg,
|
u32 reg_cfg,
|
||||||
u32 data_width,
|
u32 data_width,
|
||||||
bool addr_inc)
|
bool addr_inc)
|
||||||
{
|
{
|
||||||
lli->lcsp13 = reg_cfg;
|
lli->lcsp13 = reg_cfg;
|
||||||
|
|
||||||
/* The number of elements to transfer */
|
/* The number of elements to transfer */
|
||||||
lli->lcsp02 = ((data_size >> data_width) <<
|
lli->lcsp02 = ((data_size >> data_width) <<
|
||||||
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
|
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
|
||||||
|
|
||||||
|
BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
|
||||||
|
|
||||||
/* 16 LSBs address of the current element */
|
/* 16 LSBs address of the current element */
|
||||||
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
|
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
|
||||||
/* 16 MSBs address of the current element */
|
/* 16 MSBs address of the current element */
|
||||||
|
@ -348,55 +407,94 @@ int d40_log_sg_to_dev(struct scatterlist *sg,
|
||||||
int total_size = 0;
|
int total_size = 0;
|
||||||
struct scatterlist *current_sg = sg;
|
struct scatterlist *current_sg = sg;
|
||||||
int i;
|
int i;
|
||||||
|
struct d40_log_lli *lli_src = lli->src;
|
||||||
|
struct d40_log_lli *lli_dst = lli->dst;
|
||||||
|
|
||||||
for_each_sg(sg, current_sg, sg_len, i) {
|
for_each_sg(sg, current_sg, sg_len, i) {
|
||||||
total_size += sg_dma_len(current_sg);
|
total_size += sg_dma_len(current_sg);
|
||||||
|
|
||||||
if (direction == DMA_TO_DEVICE) {
|
if (direction == DMA_TO_DEVICE) {
|
||||||
d40_log_fill_lli(&lli->src[i],
|
lli_src =
|
||||||
sg_phys(current_sg),
|
d40_log_buf_to_lli(lli_src,
|
||||||
sg_dma_len(current_sg),
|
sg_phys(current_sg),
|
||||||
lcsp->lcsp1, src_data_width,
|
sg_dma_len(current_sg),
|
||||||
true);
|
lcsp->lcsp1, src_data_width,
|
||||||
d40_log_fill_lli(&lli->dst[i],
|
dst_data_width,
|
||||||
dev_addr,
|
true);
|
||||||
sg_dma_len(current_sg),
|
lli_dst =
|
||||||
lcsp->lcsp3, dst_data_width,
|
d40_log_buf_to_lli(lli_dst,
|
||||||
false);
|
dev_addr,
|
||||||
|
sg_dma_len(current_sg),
|
||||||
|
lcsp->lcsp3, dst_data_width,
|
||||||
|
src_data_width,
|
||||||
|
false);
|
||||||
} else {
|
} else {
|
||||||
d40_log_fill_lli(&lli->dst[i],
|
lli_dst =
|
||||||
sg_phys(current_sg),
|
d40_log_buf_to_lli(lli_dst,
|
||||||
sg_dma_len(current_sg),
|
sg_phys(current_sg),
|
||||||
lcsp->lcsp3, dst_data_width,
|
sg_dma_len(current_sg),
|
||||||
true);
|
lcsp->lcsp3, dst_data_width,
|
||||||
d40_log_fill_lli(&lli->src[i],
|
src_data_width,
|
||||||
dev_addr,
|
true);
|
||||||
sg_dma_len(current_sg),
|
lli_src =
|
||||||
lcsp->lcsp1, src_data_width,
|
d40_log_buf_to_lli(lli_src,
|
||||||
false);
|
dev_addr,
|
||||||
|
sg_dma_len(current_sg),
|
||||||
|
lcsp->lcsp1, src_data_width,
|
||||||
|
dst_data_width,
|
||||||
|
false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
|
||||||
|
dma_addr_t addr,
|
||||||
|
int size,
|
||||||
|
u32 lcsp13, /* src or dst*/
|
||||||
|
u32 data_width1,
|
||||||
|
u32 data_width2,
|
||||||
|
bool addr_inc)
|
||||||
|
{
|
||||||
|
struct d40_log_lli *lli = lli_sg;
|
||||||
|
int size_rest = size;
|
||||||
|
int size_seg = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
size_seg = d40_seg_size(size_rest, data_width1, data_width2);
|
||||||
|
size_rest -= size_seg;
|
||||||
|
|
||||||
|
d40_log_fill_lli(lli,
|
||||||
|
addr,
|
||||||
|
size_seg,
|
||||||
|
lcsp13, data_width1,
|
||||||
|
addr_inc);
|
||||||
|
if (addr_inc)
|
||||||
|
addr += size_seg;
|
||||||
|
lli++;
|
||||||
|
} while (size_rest);
|
||||||
|
|
||||||
|
return lli;
|
||||||
|
}
|
||||||
|
|
||||||
int d40_log_sg_to_lli(struct scatterlist *sg,
|
int d40_log_sg_to_lli(struct scatterlist *sg,
|
||||||
int sg_len,
|
int sg_len,
|
||||||
struct d40_log_lli *lli_sg,
|
struct d40_log_lli *lli_sg,
|
||||||
u32 lcsp13, /* src or dst*/
|
u32 lcsp13, /* src or dst*/
|
||||||
u32 data_width)
|
u32 data_width1, u32 data_width2)
|
||||||
{
|
{
|
||||||
int total_size = 0;
|
int total_size = 0;
|
||||||
struct scatterlist *current_sg = sg;
|
struct scatterlist *current_sg = sg;
|
||||||
int i;
|
int i;
|
||||||
|
struct d40_log_lli *lli = lli_sg;
|
||||||
|
|
||||||
for_each_sg(sg, current_sg, sg_len, i) {
|
for_each_sg(sg, current_sg, sg_len, i) {
|
||||||
total_size += sg_dma_len(current_sg);
|
total_size += sg_dma_len(current_sg);
|
||||||
|
lli = d40_log_buf_to_lli(lli,
|
||||||
d40_log_fill_lli(&lli_sg[i],
|
sg_phys(current_sg),
|
||||||
sg_phys(current_sg),
|
sg_dma_len(current_sg),
|
||||||
sg_dma_len(current_sg),
|
lcsp13,
|
||||||
lcsp13, data_width,
|
data_width1, data_width2, true);
|
||||||
true);
|
|
||||||
}
|
}
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -292,18 +292,20 @@ int d40_phy_sg_to_lli(struct scatterlist *sg,
|
||||||
struct d40_phy_lli *lli,
|
struct d40_phy_lli *lli,
|
||||||
dma_addr_t lli_phys,
|
dma_addr_t lli_phys,
|
||||||
u32 reg_cfg,
|
u32 reg_cfg,
|
||||||
u32 data_width,
|
u32 data_width1,
|
||||||
|
u32 data_width2,
|
||||||
int psize);
|
int psize);
|
||||||
|
|
||||||
int d40_phy_fill_lli(struct d40_phy_lli *lli,
|
struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli,
|
||||||
dma_addr_t data,
|
dma_addr_t data,
|
||||||
u32 data_size,
|
u32 data_size,
|
||||||
int psize,
|
int psize,
|
||||||
dma_addr_t next_lli,
|
dma_addr_t next_lli,
|
||||||
u32 reg_cfg,
|
u32 reg_cfg,
|
||||||
bool term_int,
|
bool term_int,
|
||||||
u32 data_width,
|
u32 data_width1,
|
||||||
bool is_device);
|
u32 data_width2,
|
||||||
|
bool is_device);
|
||||||
|
|
||||||
void d40_phy_lli_write(void __iomem *virtbase,
|
void d40_phy_lli_write(void __iomem *virtbase,
|
||||||
u32 phy_chan_num,
|
u32 phy_chan_num,
|
||||||
|
@ -312,12 +314,12 @@ void d40_phy_lli_write(void __iomem *virtbase,
|
||||||
|
|
||||||
/* Logical channels */
|
/* Logical channels */
|
||||||
|
|
||||||
void d40_log_fill_lli(struct d40_log_lli *lli,
|
struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg,
|
||||||
dma_addr_t data,
|
dma_addr_t addr,
|
||||||
u32 data_size,
|
int size,
|
||||||
u32 reg_cfg,
|
u32 lcsp13, /* src or dst*/
|
||||||
u32 data_width,
|
u32 data_width1, u32 data_width2,
|
||||||
bool addr_inc);
|
bool addr_inc);
|
||||||
|
|
||||||
int d40_log_sg_to_dev(struct scatterlist *sg,
|
int d40_log_sg_to_dev(struct scatterlist *sg,
|
||||||
int sg_len,
|
int sg_len,
|
||||||
|
@ -332,7 +334,7 @@ int d40_log_sg_to_lli(struct scatterlist *sg,
|
||||||
int sg_len,
|
int sg_len,
|
||||||
struct d40_log_lli *lli_sg,
|
struct d40_log_lli *lli_sg,
|
||||||
u32 lcsp13, /* src or dst*/
|
u32 lcsp13, /* src or dst*/
|
||||||
u32 data_width);
|
u32 data_width1, u32 data_width2);
|
||||||
|
|
||||||
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
|
void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa,
|
||||||
struct d40_log_lli *lli_dst,
|
struct d40_log_lli *lli_dst,
|
||||||
|
|
|
@ -12,7 +12,6 @@
|
||||||
*
|
*
|
||||||
* Please credit ARM.com
|
* Please credit ARM.com
|
||||||
* Documentation: ARM DDI 0196D
|
* Documentation: ARM DDI 0196D
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef AMBA_PL08X_H
|
#ifndef AMBA_PL08X_H
|
||||||
|
@ -22,6 +21,15 @@
|
||||||
#include <linux/dmaengine.h>
|
#include <linux/dmaengine.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
|
||||||
|
struct pl08x_lli;
|
||||||
|
struct pl08x_driver_data;
|
||||||
|
|
||||||
|
/* Bitmasks for selecting AHB ports for DMA transfers */
|
||||||
|
enum {
|
||||||
|
PL08X_AHB1 = (1 << 0),
|
||||||
|
PL08X_AHB2 = (1 << 1)
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pl08x_channel_data - data structure to pass info between
|
* struct pl08x_channel_data - data structure to pass info between
|
||||||
* platform and PL08x driver regarding channel configuration
|
* platform and PL08x driver regarding channel configuration
|
||||||
|
@ -46,8 +54,10 @@
|
||||||
* @circular_buffer: whether the buffer passed in is circular and
|
* @circular_buffer: whether the buffer passed in is circular and
|
||||||
* shall simply be looped round round (like a record baby round
|
* shall simply be looped round round (like a record baby round
|
||||||
* round round round)
|
* round round round)
|
||||||
* @single: the device connected to this channel will request single
|
* @single: the device connected to this channel will request single DMA
|
||||||
* DMA transfers, not bursts. (Bursts are default.)
|
* transfers, not bursts. (Bursts are default.)
|
||||||
|
* @periph_buses: the device connected to this channel is accessible via
|
||||||
|
* these buses (use PL08X_AHB1 | PL08X_AHB2).
|
||||||
*/
|
*/
|
||||||
struct pl08x_channel_data {
|
struct pl08x_channel_data {
|
||||||
char *bus_id;
|
char *bus_id;
|
||||||
|
@ -55,10 +65,10 @@ struct pl08x_channel_data {
|
||||||
int max_signal;
|
int max_signal;
|
||||||
u32 muxval;
|
u32 muxval;
|
||||||
u32 cctl;
|
u32 cctl;
|
||||||
u32 ccfg;
|
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
bool circular_buffer;
|
bool circular_buffer;
|
||||||
bool single;
|
bool single;
|
||||||
|
u8 periph_buses;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -67,24 +77,23 @@ struct pl08x_channel_data {
|
||||||
* @addr: current address
|
* @addr: current address
|
||||||
* @maxwidth: the maximum width of a transfer on this bus
|
* @maxwidth: the maximum width of a transfer on this bus
|
||||||
* @buswidth: the width of this bus in bytes: 1, 2 or 4
|
* @buswidth: the width of this bus in bytes: 1, 2 or 4
|
||||||
* @fill_bytes: bytes required to fill to the next bus memory
|
* @fill_bytes: bytes required to fill to the next bus memory boundary
|
||||||
* boundary
|
|
||||||
*/
|
*/
|
||||||
struct pl08x_bus_data {
|
struct pl08x_bus_data {
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
u8 maxwidth;
|
u8 maxwidth;
|
||||||
u8 buswidth;
|
u8 buswidth;
|
||||||
u32 fill_bytes;
|
size_t fill_bytes;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pl08x_phy_chan - holder for the physical channels
|
* struct pl08x_phy_chan - holder for the physical channels
|
||||||
* @id: physical index to this channel
|
* @id: physical index to this channel
|
||||||
* @lock: a lock to use when altering an instance of this struct
|
* @lock: a lock to use when altering an instance of this struct
|
||||||
* @signal: the physical signal (aka channel) serving this
|
* @signal: the physical signal (aka channel) serving this physical channel
|
||||||
* physical channel right now
|
* right now
|
||||||
* @serving: the virtual channel currently being served by this
|
* @serving: the virtual channel currently being served by this physical
|
||||||
* physical channel
|
* channel
|
||||||
*/
|
*/
|
||||||
struct pl08x_phy_chan {
|
struct pl08x_phy_chan {
|
||||||
unsigned int id;
|
unsigned int id;
|
||||||
|
@ -92,11 +101,6 @@ struct pl08x_phy_chan {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
int signal;
|
int signal;
|
||||||
struct pl08x_dma_chan *serving;
|
struct pl08x_dma_chan *serving;
|
||||||
u32 csrc;
|
|
||||||
u32 cdst;
|
|
||||||
u32 clli;
|
|
||||||
u32 cctl;
|
|
||||||
u32 ccfg;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -108,26 +112,23 @@ struct pl08x_txd {
|
||||||
struct dma_async_tx_descriptor tx;
|
struct dma_async_tx_descriptor tx;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
enum dma_data_direction direction;
|
enum dma_data_direction direction;
|
||||||
struct pl08x_bus_data srcbus;
|
dma_addr_t src_addr;
|
||||||
struct pl08x_bus_data dstbus;
|
dma_addr_t dst_addr;
|
||||||
int len;
|
size_t len;
|
||||||
dma_addr_t llis_bus;
|
dma_addr_t llis_bus;
|
||||||
void *llis_va;
|
struct pl08x_lli *llis_va;
|
||||||
struct pl08x_channel_data *cd;
|
/* Default cctl value for LLIs */
|
||||||
bool active;
|
u32 cctl;
|
||||||
/*
|
/*
|
||||||
* Settings to be put into the physical channel when we
|
* Settings to be put into the physical channel when we
|
||||||
* trigger this txd
|
* trigger this txd. Other registers are in llis_va[0].
|
||||||
*/
|
*/
|
||||||
u32 csrc;
|
u32 ccfg;
|
||||||
u32 cdst;
|
|
||||||
u32 clli;
|
|
||||||
u32 cctl;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pl08x_dma_chan_state - holds the PL08x specific virtual
|
* struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
|
||||||
* channel states
|
* states
|
||||||
* @PL08X_CHAN_IDLE: the channel is idle
|
* @PL08X_CHAN_IDLE: the channel is idle
|
||||||
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
|
* @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
|
||||||
* channel and is running a transfer on it
|
* channel and is running a transfer on it
|
||||||
|
@ -147,6 +148,8 @@ enum pl08x_dma_chan_state {
|
||||||
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
|
* struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
|
||||||
* @chan: wrappped abstract channel
|
* @chan: wrappped abstract channel
|
||||||
* @phychan: the physical channel utilized by this channel, if there is one
|
* @phychan: the physical channel utilized by this channel, if there is one
|
||||||
|
* @phychan_hold: if non-zero, hold on to the physical channel even if we
|
||||||
|
* have no pending entries
|
||||||
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
|
* @tasklet: tasklet scheduled by the IRQ to handle actual work etc
|
||||||
* @name: name of channel
|
* @name: name of channel
|
||||||
* @cd: channel platform data
|
* @cd: channel platform data
|
||||||
|
@ -154,53 +157,49 @@ enum pl08x_dma_chan_state {
|
||||||
* @runtime_direction: current direction of this channel according to
|
* @runtime_direction: current direction of this channel according to
|
||||||
* runtime config
|
* runtime config
|
||||||
* @lc: last completed transaction on this channel
|
* @lc: last completed transaction on this channel
|
||||||
* @desc_list: queued transactions pending on this channel
|
* @pend_list: queued transactions pending on this channel
|
||||||
* @at: active transaction on this channel
|
* @at: active transaction on this channel
|
||||||
* @lockflags: sometimes we let a lock last between two function calls,
|
|
||||||
* especially prep/submit, and then we need to store the IRQ flags
|
|
||||||
* in the channel state, here
|
|
||||||
* @lock: a lock for this channel data
|
* @lock: a lock for this channel data
|
||||||
* @host: a pointer to the host (internal use)
|
* @host: a pointer to the host (internal use)
|
||||||
* @state: whether the channel is idle, paused, running etc
|
* @state: whether the channel is idle, paused, running etc
|
||||||
* @slave: whether this channel is a device (slave) or for memcpy
|
* @slave: whether this channel is a device (slave) or for memcpy
|
||||||
* @waiting: a TX descriptor on this channel which is waiting for
|
* @waiting: a TX descriptor on this channel which is waiting for a physical
|
||||||
* a physical channel to become available
|
* channel to become available
|
||||||
*/
|
*/
|
||||||
struct pl08x_dma_chan {
|
struct pl08x_dma_chan {
|
||||||
struct dma_chan chan;
|
struct dma_chan chan;
|
||||||
struct pl08x_phy_chan *phychan;
|
struct pl08x_phy_chan *phychan;
|
||||||
|
int phychan_hold;
|
||||||
struct tasklet_struct tasklet;
|
struct tasklet_struct tasklet;
|
||||||
char *name;
|
char *name;
|
||||||
struct pl08x_channel_data *cd;
|
struct pl08x_channel_data *cd;
|
||||||
dma_addr_t runtime_addr;
|
dma_addr_t runtime_addr;
|
||||||
enum dma_data_direction runtime_direction;
|
enum dma_data_direction runtime_direction;
|
||||||
atomic_t last_issued;
|
|
||||||
dma_cookie_t lc;
|
dma_cookie_t lc;
|
||||||
struct list_head desc_list;
|
struct list_head pend_list;
|
||||||
struct pl08x_txd *at;
|
struct pl08x_txd *at;
|
||||||
unsigned long lockflags;
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
void *host;
|
struct pl08x_driver_data *host;
|
||||||
enum pl08x_dma_chan_state state;
|
enum pl08x_dma_chan_state state;
|
||||||
bool slave;
|
bool slave;
|
||||||
struct pl08x_txd *waiting;
|
struct pl08x_txd *waiting;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct pl08x_platform_data - the platform configuration for the
|
* struct pl08x_platform_data - the platform configuration for the PL08x
|
||||||
* PL08x PrimeCells.
|
* PrimeCells.
|
||||||
* @slave_channels: the channels defined for the different devices on the
|
* @slave_channels: the channels defined for the different devices on the
|
||||||
* platform, all inclusive, including multiplexed channels. The available
|
* platform, all inclusive, including multiplexed channels. The available
|
||||||
* physical channels will be multiplexed around these signals as they
|
* physical channels will be multiplexed around these signals as they are
|
||||||
* are requested, just enumerate all possible channels.
|
* requested, just enumerate all possible channels.
|
||||||
* @get_signal: request a physical signal to be used for a DMA
|
* @get_signal: request a physical signal to be used for a DMA transfer
|
||||||
* transfer immediately: if there is some multiplexing or similar blocking
|
* immediately: if there is some multiplexing or similar blocking the use
|
||||||
* the use of the channel the transfer can be denied by returning
|
* of the channel the transfer can be denied by returning less than zero,
|
||||||
* less than zero, else it returns the allocated signal number
|
* else it returns the allocated signal number
|
||||||
* @put_signal: indicate to the platform that this physical signal is not
|
* @put_signal: indicate to the platform that this physical signal is not
|
||||||
* running any DMA transfer and multiplexing can be recycled
|
* running any DMA transfer and multiplexing can be recycled
|
||||||
* @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the
|
* @lli_buses: buses which LLIs can be fetched from: PL08X_AHB1 | PL08X_AHB2
|
||||||
* LLI addresses are on 0/1 Master 1/2.
|
* @mem_buses: buses which memory can be accessed from: PL08X_AHB1 | PL08X_AHB2
|
||||||
*/
|
*/
|
||||||
struct pl08x_platform_data {
|
struct pl08x_platform_data {
|
||||||
struct pl08x_channel_data *slave_channels;
|
struct pl08x_channel_data *slave_channels;
|
||||||
|
@ -208,6 +207,8 @@ struct pl08x_platform_data {
|
||||||
struct pl08x_channel_data memcpy_channel;
|
struct pl08x_channel_data memcpy_channel;
|
||||||
int (*get_signal)(struct pl08x_dma_chan *);
|
int (*get_signal)(struct pl08x_dma_chan *);
|
||||||
void (*put_signal)(struct pl08x_dma_chan *);
|
void (*put_signal)(struct pl08x_dma_chan *);
|
||||||
|
u8 lli_buses;
|
||||||
|
u8 mem_buses;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_AMBA_PL08X
|
#ifdef CONFIG_AMBA_PL08X
|
||||||
|
|
|
@ -532,7 +532,7 @@ static inline int dmaengine_resume(struct dma_chan *chan)
|
||||||
return dmaengine_device_control(chan, DMA_RESUME, 0);
|
return dmaengine_device_control(chan, DMA_RESUME, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
|
||||||
{
|
{
|
||||||
return desc->tx_submit(desc);
|
return desc->tx_submit(desc);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue