mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 15:27:29 +00:00
Merge branch 'remotes/lorenzo/pci/dwc'
- Add Kirin MSI support (Xiaowei Song) - Drop unnecessary root_bus_nr setting from exynos, imx6, keystone, armada8k, artpec6, designware-plat, histb, qcom, spear13xx (Shawn Guo) - Move link notification settings from DesignWare core to individual drivers (Gustavo Pimentel) - Add endpoint library MSI-X interfaces (Gustavo Pimentel) - Correct signature of endpoint library IRQ interfaces (Gustavo Pimentel) - Add DesignWare endpoint library MSI-X callbacks (Gustavo Pimentel) - Add endpoint library MSI-X test support (Gustavo Pimentel) * remotes/lorenzo/pci/dwc: PCI: endpoint: Add MSI set maximum restriction tools: PCI: Add MSI-X support pci_endpoint_test: Add 2 ioctl commands pci-epf-test/pci_endpoint_test: Add MSI-X support pci-epf-test/pci_endpoint_test: Use irq_type module parameter pci-epf-test/pci_endpoint_test: Cleanup PCI_ENDPOINT_TEST memspace PCI: dwc: Add legacy interrupt callback handler PCI: dwc: Rework MSI callbacks handler PCI: dwc: Add MSI-X callbacks handler PCI: Update xxx_pcie_ep_raise_irq() and pci_epc_raise_irq() signatures PCI: endpoint: Add MSI-X interfaces PCI: dwc: Fix EP link notification implementation PCI: spear13xx: Drop unnecessary root_bus_nr setting PCI: qcom: Drop unnecessary root_bus_nr setting PCI: histb: Drop unnecessary root_bus_nr setting PCI: designware-plat: Drop unnecessary root_bus_nr setting PCI: artpec6: Drop unnecessary root_bus_nr setting PCI: armada8k: Drop unnecessary root_bus_nr setting PCI: keystone: Drop unnecessary root_bus_nr setting PCI: imx6: Drop unnecessary root_bus_nr setting PCI: exynos: Drop unnecessary root_bus_nr setting PCI: kirin: Add MSI support
This commit is contained in:
commit
0c38011aba
30 changed files with 759 additions and 155 deletions
|
@ -370,7 +370,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
|
|||
}
|
||||
|
||||
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
enum pci_epc_irq_type type, u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci);
|
||||
|
|
|
@ -421,7 +421,6 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep,
|
|||
}
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &exynos_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
|
|
@ -667,7 +667,6 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
|
|||
}
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &imx6_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
|
|
@ -347,7 +347,6 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
|
|||
}
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &keystone_pcie_host_ops;
|
||||
ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
|
||||
if (ret) {
|
||||
|
|
|
@ -172,7 +172,6 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie,
|
|||
struct device *dev = &pdev->dev;
|
||||
int ret;
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &armada8k_pcie_host_ops;
|
||||
|
||||
pp->irq = platform_get_irq(pdev, 0);
|
||||
|
|
|
@ -399,7 +399,6 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
|
|||
}
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &artpec6_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
@ -428,7 +427,7 @@ static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
}
|
||||
|
||||
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
enum pci_epc_irq_type type, u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
|
|
|
@ -40,6 +40,39 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
|||
__dw_pcie_ep_reset_bar(pci, bar, 0);
|
||||
}
|
||||
|
||||
static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
|
||||
u8 cap)
|
||||
{
|
||||
u8 cap_id, next_cap_ptr;
|
||||
u16 reg;
|
||||
|
||||
reg = dw_pcie_readw_dbi(pci, cap_ptr);
|
||||
next_cap_ptr = (reg & 0xff00) >> 8;
|
||||
cap_id = (reg & 0x00ff);
|
||||
|
||||
if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
|
||||
return 0;
|
||||
|
||||
if (cap_id == cap)
|
||||
return cap_ptr;
|
||||
|
||||
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
|
||||
}
|
||||
|
||||
static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
|
||||
{
|
||||
u8 next_cap_ptr;
|
||||
u16 reg;
|
||||
|
||||
reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
|
||||
next_cap_ptr = (reg & 0x00ff);
|
||||
|
||||
if (!next_cap_ptr)
|
||||
return 0;
|
||||
|
||||
return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
|
||||
struct pci_epf_header *hdr)
|
||||
{
|
||||
|
@ -213,36 +246,84 @@ static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no,
|
|||
|
||||
static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no)
|
||||
{
|
||||
int val;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 val, reg;
|
||||
|
||||
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
if (!(val & MSI_CAP_MSI_EN_MASK))
|
||||
if (!ep->msi_cap)
|
||||
return -EINVAL;
|
||||
|
||||
val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT;
|
||||
reg = ep->msi_cap + PCI_MSI_FLAGS;
|
||||
val = dw_pcie_readw_dbi(pci, reg);
|
||||
if (!(val & PCI_MSI_FLAGS_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
val = (val & PCI_MSI_FLAGS_QSIZE) >> 4;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
|
||||
static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts)
|
||||
{
|
||||
int val;
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 val, reg;
|
||||
|
||||
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
val &= ~MSI_CAP_MMC_MASK;
|
||||
val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
|
||||
if (!ep->msi_cap)
|
||||
return -EINVAL;
|
||||
|
||||
reg = ep->msi_cap + PCI_MSI_FLAGS;
|
||||
val = dw_pcie_readw_dbi(pci, reg);
|
||||
val &= ~PCI_MSI_FLAGS_QMASK;
|
||||
val |= (interrupts << 1) & PCI_MSI_FLAGS_QMASK;
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
|
||||
dw_pcie_writew_dbi(pci, reg, val);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 val, reg;
|
||||
|
||||
if (!ep->msix_cap)
|
||||
return -EINVAL;
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_FLAGS;
|
||||
val = dw_pcie_readw_dbi(pci, reg);
|
||||
if (!(val & PCI_MSIX_FLAGS_ENABLE))
|
||||
return -EINVAL;
|
||||
|
||||
val &= PCI_MSIX_FLAGS_QSIZE;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
u32 val, reg;
|
||||
|
||||
if (!ep->msix_cap)
|
||||
return -EINVAL;
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_FLAGS;
|
||||
val = dw_pcie_readw_dbi(pci, reg);
|
||||
val &= ~PCI_MSIX_FLAGS_QSIZE;
|
||||
val |= interrupts;
|
||||
dw_pcie_dbi_ro_wr_en(pci);
|
||||
dw_pcie_writew_dbi(pci, reg, val);
|
||||
dw_pcie_dbi_ro_wr_dis(pci);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num)
|
||||
enum pci_epc_irq_type type, u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
|
||||
|
||||
|
@ -282,32 +363,52 @@ static const struct pci_epc_ops epc_ops = {
|
|||
.unmap_addr = dw_pcie_ep_unmap_addr,
|
||||
.set_msi = dw_pcie_ep_set_msi,
|
||||
.get_msi = dw_pcie_ep_get_msi,
|
||||
.set_msix = dw_pcie_ep_set_msix,
|
||||
.get_msix = dw_pcie_ep_get_msix,
|
||||
.raise_irq = dw_pcie_ep_raise_irq,
|
||||
.start = dw_pcie_ep_start,
|
||||
.stop = dw_pcie_ep_stop,
|
||||
};
|
||||
|
||||
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct device *dev = pci->dev;
|
||||
|
||||
dev_err(dev, "EP cannot trigger legacy IRQs\n");
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epc *epc = ep->epc;
|
||||
u16 msg_ctrl, msg_data;
|
||||
u32 msg_addr_lower, msg_addr_upper;
|
||||
u32 msg_addr_lower, msg_addr_upper, reg;
|
||||
u64 msg_addr;
|
||||
bool has_upper;
|
||||
int ret;
|
||||
|
||||
if (!ep->msi_cap)
|
||||
return -EINVAL;
|
||||
|
||||
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
|
||||
msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
|
||||
reg = ep->msi_cap + PCI_MSI_FLAGS;
|
||||
msg_ctrl = dw_pcie_readw_dbi(pci, reg);
|
||||
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
|
||||
msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
|
||||
reg = ep->msi_cap + PCI_MSI_ADDRESS_LO;
|
||||
msg_addr_lower = dw_pcie_readl_dbi(pci, reg);
|
||||
if (has_upper) {
|
||||
msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
|
||||
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
|
||||
reg = ep->msi_cap + PCI_MSI_ADDRESS_HI;
|
||||
msg_addr_upper = dw_pcie_readl_dbi(pci, reg);
|
||||
reg = ep->msi_cap + PCI_MSI_DATA_64;
|
||||
msg_data = dw_pcie_readw_dbi(pci, reg);
|
||||
} else {
|
||||
msg_addr_upper = 0;
|
||||
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
|
||||
reg = ep->msi_cap + PCI_MSI_DATA_32;
|
||||
msg_data = dw_pcie_readw_dbi(pci, reg);
|
||||
}
|
||||
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
|
||||
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
|
||||
|
@ -322,6 +423,64 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epc *epc = ep->epc;
|
||||
u16 tbl_offset, bir;
|
||||
u32 bar_addr_upper, bar_addr_lower;
|
||||
u32 msg_addr_upper, msg_addr_lower;
|
||||
u32 reg, msg_data, vec_ctrl;
|
||||
u64 tbl_addr, msg_addr, reg_u64;
|
||||
void __iomem *msix_tbl;
|
||||
int ret;
|
||||
|
||||
reg = ep->msix_cap + PCI_MSIX_TABLE;
|
||||
tbl_offset = dw_pcie_readl_dbi(pci, reg);
|
||||
bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
|
||||
tbl_offset &= PCI_MSIX_TABLE_OFFSET;
|
||||
tbl_offset >>= 3;
|
||||
|
||||
reg = PCI_BASE_ADDRESS_0 + (4 * bir);
|
||||
bar_addr_upper = 0;
|
||||
bar_addr_lower = dw_pcie_readl_dbi(pci, reg);
|
||||
reg_u64 = (bar_addr_lower & PCI_BASE_ADDRESS_MEM_TYPE_MASK);
|
||||
if (reg_u64 == PCI_BASE_ADDRESS_MEM_TYPE_64)
|
||||
bar_addr_upper = dw_pcie_readl_dbi(pci, reg + 4);
|
||||
|
||||
tbl_addr = ((u64) bar_addr_upper) << 32 | bar_addr_lower;
|
||||
tbl_addr += (tbl_offset + ((interrupt_num - 1) * PCI_MSIX_ENTRY_SIZE));
|
||||
tbl_addr &= PCI_BASE_ADDRESS_MEM_MASK;
|
||||
|
||||
msix_tbl = ioremap_nocache(ep->phys_base + tbl_addr,
|
||||
PCI_MSIX_ENTRY_SIZE);
|
||||
if (!msix_tbl)
|
||||
return -EINVAL;
|
||||
|
||||
msg_addr_lower = readl(msix_tbl + PCI_MSIX_ENTRY_LOWER_ADDR);
|
||||
msg_addr_upper = readl(msix_tbl + PCI_MSIX_ENTRY_UPPER_ADDR);
|
||||
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
|
||||
msg_data = readl(msix_tbl + PCI_MSIX_ENTRY_DATA);
|
||||
vec_ctrl = readl(msix_tbl + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
|
||||
iounmap(msix_tbl);
|
||||
|
||||
if (vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)
|
||||
return -EPERM;
|
||||
|
||||
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
|
||||
epc->mem->page_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
writel(msg_data, ep->msi_mem);
|
||||
|
||||
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct pci_epc *epc = ep->epc;
|
||||
|
@ -386,15 +545,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
return -ENOMEM;
|
||||
ep->outbound_addr = addr;
|
||||
|
||||
if (ep->ops->ep_init)
|
||||
ep->ops->ep_init(ep);
|
||||
|
||||
epc = devm_pci_epc_create(dev, &epc_ops);
|
||||
if (IS_ERR(epc)) {
|
||||
dev_err(dev, "Failed to create epc device\n");
|
||||
return PTR_ERR(epc);
|
||||
}
|
||||
|
||||
ep->epc = epc;
|
||||
epc_set_drvdata(epc, ep);
|
||||
|
||||
if (ep->ops->ep_init)
|
||||
ep->ops->ep_init(ep);
|
||||
|
||||
ret = of_property_read_u8(np, "max-functions", &epc->max_functions);
|
||||
if (ret < 0)
|
||||
epc->max_functions = 1;
|
||||
|
@ -409,15 +571,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|||
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
|
||||
epc->mem->page_size);
|
||||
if (!ep->msi_mem) {
|
||||
dev_err(dev, "Failed to reserve memory for MSI\n");
|
||||
dev_err(dev, "Failed to reserve memory for MSI/MSI-X\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
ep->msi_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSI);
|
||||
|
||||
epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER;
|
||||
EPC_FEATURE_SET_BAR(epc->features, BAR_0);
|
||||
ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX);
|
||||
|
||||
ep->epc = epc;
|
||||
epc_set_drvdata(epc, ep);
|
||||
dw_pcie_setup(pci);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -70,24 +70,29 @@ static const struct dw_pcie_ops dw_pcie_ops = {
|
|||
static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
struct pci_epc *epc = ep->epc;
|
||||
enum pci_barno bar;
|
||||
|
||||
for (bar = BAR_0; bar <= BAR_5; bar++)
|
||||
dw_pcie_ep_reset_bar(pci, bar);
|
||||
|
||||
epc->features |= EPC_FEATURE_NO_LINKUP_NOTIFIER;
|
||||
epc->features |= EPC_FEATURE_MSIX_AVAILABLE;
|
||||
}
|
||||
|
||||
static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type,
|
||||
u8 interrupt_num)
|
||||
u16 interrupt_num)
|
||||
{
|
||||
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
||||
|
||||
switch (type) {
|
||||
case PCI_EPC_IRQ_LEGACY:
|
||||
dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
|
||||
return -EINVAL;
|
||||
return dw_pcie_ep_raise_legacy_irq(ep, func_no);
|
||||
case PCI_EPC_IRQ_MSI:
|
||||
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
|
||||
case PCI_EPC_IRQ_MSIX:
|
||||
return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
|
||||
default:
|
||||
dev_err(pci->dev, "UNKNOWN IRQ type\n");
|
||||
}
|
||||
|
@ -118,7 +123,6 @@ static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie,
|
|||
return pp->msi_irq;
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &dw_plat_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
|
|
@ -96,17 +96,6 @@
|
|||
#define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \
|
||||
((0x3 << 20) | ((region) << 9) | (0x1 << 8))
|
||||
|
||||
#define MSI_MESSAGE_CONTROL 0x52
|
||||
#define MSI_CAP_MMC_SHIFT 1
|
||||
#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
|
||||
#define MSI_CAP_MME_SHIFT 4
|
||||
#define MSI_CAP_MSI_EN_MASK 0x1
|
||||
#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
|
||||
#define MSI_MESSAGE_ADDR_L32 0x54
|
||||
#define MSI_MESSAGE_ADDR_U32 0x58
|
||||
#define MSI_MESSAGE_DATA_32 0x58
|
||||
#define MSI_MESSAGE_DATA_64 0x5C
|
||||
|
||||
#define MAX_MSI_IRQS 256
|
||||
#define MAX_MSI_IRQS_PER_CTRL 32
|
||||
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
|
||||
|
@ -191,7 +180,7 @@ enum dw_pcie_as_type {
|
|||
struct dw_pcie_ep_ops {
|
||||
void (*ep_init)(struct dw_pcie_ep *ep);
|
||||
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
|
||||
enum pci_epc_irq_type type, u8 interrupt_num);
|
||||
enum pci_epc_irq_type type, u16 interrupt_num);
|
||||
};
|
||||
|
||||
struct dw_pcie_ep {
|
||||
|
@ -208,6 +197,8 @@ struct dw_pcie_ep {
|
|||
u32 num_ob_windows;
|
||||
void __iomem *msi_mem;
|
||||
phys_addr_t msi_mem_phys;
|
||||
u8 msi_cap; /* MSI capability offset */
|
||||
u8 msix_cap; /* MSI-X capability offset */
|
||||
};
|
||||
|
||||
struct dw_pcie_ops {
|
||||
|
@ -357,8 +348,11 @@ static inline int dw_pcie_allocate_domains(struct pcie_port *pp)
|
|||
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
|
||||
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
|
||||
int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no);
|
||||
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num);
|
||||
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u16 interrupt_num);
|
||||
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
|
||||
#else
|
||||
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
||||
|
@ -374,12 +368,23 @@ static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
|
|||
{
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u8 interrupt_num)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
||||
u16 interrupt_num)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -420,7 +420,6 @@ static int histb_pcie_probe(struct platform_device *pdev)
|
|||
phy_init(hipcie->phy);
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &histb_pcie_host_ops;
|
||||
|
||||
platform_set_drvdata(pdev, hipcie);
|
||||
|
|
|
@ -430,6 +430,9 @@ static int kirin_pcie_host_init(struct pcie_port *pp)
|
|||
{
|
||||
kirin_pcie_establish_link(pp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI))
|
||||
dw_pcie_msi_init(pp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -445,9 +448,34 @@ static const struct dw_pcie_host_ops kirin_pcie_host_ops = {
|
|||
.host_init = kirin_pcie_host_init,
|
||||
};
|
||||
|
||||
static int kirin_pcie_add_msi(struct dw_pcie *pci,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int irq;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"failed to get MSI IRQ (%d)\n", irq);
|
||||
return irq;
|
||||
}
|
||||
|
||||
pci->pp.msi_irq = irq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init kirin_add_pcie_port(struct dw_pcie *pci,
|
||||
struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = kirin_pcie_add_msi(pci, pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci->pp.ops = &kirin_pcie_host_ops;
|
||||
|
||||
return dw_pcie_host_init(&pci->pp);
|
||||
|
|
|
@ -1251,7 +1251,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &qcom_pcie_dw_ops;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
|
|
|
@ -210,7 +210,6 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie,
|
|||
return ret;
|
||||
}
|
||||
|
||||
pp->root_bus_nr = -1;
|
||||
pp->ops = &spear13xx_pcie_host_ops;
|
||||
|
||||
ret = dw_pcie_host_init(pp);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue