mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 05:31:15 +00:00
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (27 commits) x86: allocate space within a region top-down x86: update iomem_resource end based on CPU physical address capabilities x86/PCI: allocate space from the end of a region, not the beginning PCI: allocate bus resources from the top down resources: support allocating space within a region from the top down resources: handle overflow when aligning start of available area resources: ensure callback doesn't allocate outside available space resources: factor out resource_clip() to simplify find_resource() resources: add a default alignf to simplify find_resource() x86/PCI: MMCONFIG: fix region end calculation PCI: Add support for polling PME state on suspended legacy PCI devices PCI: Export some PCI PM functionality PCI: fix message typo PCI: log vendor/device ID always PCI: update Intel chipset names and defines PCI: use new ccflags variable in Makefile PCI: add PCI_MSIX_TABLE/PBA defines PCI: add PCI vendor id for STmicroelectronics x86/PCI: irq and pci_ids patch for Intel Patsburg DeviceIDs PCI: OLPC: Only enable PCI configuration type override on XO-1 ...
This commit is contained in:
commit
e9f29c9a56
27 changed files with 395 additions and 70 deletions
|
@ -2175,6 +2175,11 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||||
reset_devices [KNL] Force drivers to reset the underlying device
|
reset_devices [KNL] Force drivers to reset the underlying device
|
||||||
during initialization.
|
during initialization.
|
||||||
|
|
||||||
|
resource_alloc_from_bottom
|
||||||
|
Allocate new resources from the beginning of available
|
||||||
|
space, not the end. If you need to use this, please
|
||||||
|
report a bug.
|
||||||
|
|
||||||
resume= [SWSUSP]
|
resume= [SWSUSP]
|
||||||
Specify the partition device for software suspend
|
Specify the partition device for software suspend
|
||||||
|
|
||||||
|
|
|
@ -769,6 +769,8 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
x86_init.oem.arch_setup();
|
x86_init.oem.arch_setup();
|
||||||
|
|
||||||
|
resource_alloc_from_bottom = 0;
|
||||||
|
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
|
||||||
setup_memory_map();
|
setup_memory_map();
|
||||||
parse_setup_data();
|
parse_setup_data();
|
||||||
/* update the e820_saved too */
|
/* update the e820_saved too */
|
||||||
|
|
|
@ -65,16 +65,21 @@ pcibios_align_resource(void *data, const struct resource *res,
|
||||||
resource_size_t size, resource_size_t align)
|
resource_size_t size, resource_size_t align)
|
||||||
{
|
{
|
||||||
struct pci_dev *dev = data;
|
struct pci_dev *dev = data;
|
||||||
resource_size_t start = res->start;
|
resource_size_t start = round_down(res->end - size + 1, align);
|
||||||
|
|
||||||
if (res->flags & IORESOURCE_IO) {
|
if (res->flags & IORESOURCE_IO) {
|
||||||
if (skip_isa_ioresource_align(dev))
|
|
||||||
return start;
|
/*
|
||||||
if (start & 0x300)
|
* If we're avoiding ISA aliases, the largest contiguous I/O
|
||||||
start = (start + 0x3ff) & ~0x3ff;
|
* port space is 256 bytes. Clearing bits 9 and 10 preserves
|
||||||
|
* all 256-byte and smaller alignments, so the result will
|
||||||
|
* still be correctly aligned.
|
||||||
|
*/
|
||||||
|
if (!skip_isa_ioresource_align(dev))
|
||||||
|
start &= ~0x300;
|
||||||
} else if (res->flags & IORESOURCE_MEM) {
|
} else if (res->flags & IORESOURCE_MEM) {
|
||||||
if (start < BIOS_END)
|
if (start < BIOS_END)
|
||||||
start = BIOS_END;
|
start = res->end; /* fail; no space */
|
||||||
}
|
}
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
|
@ -584,27 +584,28 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route
|
||||||
case PCI_DEVICE_ID_INTEL_ICH9_3:
|
case PCI_DEVICE_ID_INTEL_ICH9_3:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH9_4:
|
case PCI_DEVICE_ID_INTEL_ICH9_4:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH9_5:
|
case PCI_DEVICE_ID_INTEL_ICH9_5:
|
||||||
case PCI_DEVICE_ID_INTEL_TOLAPAI_0:
|
case PCI_DEVICE_ID_INTEL_EP80579_0:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH10_0:
|
case PCI_DEVICE_ID_INTEL_ICH10_0:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH10_1:
|
case PCI_DEVICE_ID_INTEL_ICH10_1:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH10_2:
|
case PCI_DEVICE_ID_INTEL_ICH10_2:
|
||||||
case PCI_DEVICE_ID_INTEL_ICH10_3:
|
case PCI_DEVICE_ID_INTEL_ICH10_3:
|
||||||
|
case PCI_DEVICE_ID_INTEL_PATSBURG_LPC:
|
||||||
r->name = "PIIX/ICH";
|
r->name = "PIIX/ICH";
|
||||||
r->get = pirq_piix_get;
|
r->get = pirq_piix_get;
|
||||||
r->set = pirq_piix_set;
|
r->set = pirq_piix_set;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((device >= PCI_DEVICE_ID_INTEL_PCH_LPC_MIN) &&
|
if ((device >= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN) &&
|
||||||
(device <= PCI_DEVICE_ID_INTEL_PCH_LPC_MAX)) {
|
(device <= PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX)) {
|
||||||
r->name = "PIIX/ICH";
|
r->name = "PIIX/ICH";
|
||||||
r->get = pirq_piix_get;
|
r->get = pirq_piix_get;
|
||||||
r->set = pirq_piix_set;
|
r->set = pirq_piix_set;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((device >= PCI_DEVICE_ID_INTEL_CPT_LPC_MIN) &&
|
if ((device >= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN) &&
|
||||||
(device <= PCI_DEVICE_ID_INTEL_CPT_LPC_MAX)) {
|
(device <= PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX)) {
|
||||||
r->name = "PIIX/ICH";
|
r->name = "PIIX/ICH";
|
||||||
r->get = pirq_piix_get;
|
r->get = pirq_piix_get;
|
||||||
r->set = pirq_piix_set;
|
r->set = pirq_piix_set;
|
||||||
|
|
|
@ -65,7 +65,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
|
||||||
int end, u64 addr)
|
int end, u64 addr)
|
||||||
{
|
{
|
||||||
struct pci_mmcfg_region *new;
|
struct pci_mmcfg_region *new;
|
||||||
int num_buses;
|
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
|
||||||
if (addr == 0)
|
if (addr == 0)
|
||||||
|
@ -82,10 +81,9 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
|
||||||
|
|
||||||
list_add_sorted(new);
|
list_add_sorted(new);
|
||||||
|
|
||||||
num_buses = end - start + 1;
|
|
||||||
res = &new->res;
|
res = &new->res;
|
||||||
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
|
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
|
||||||
res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
|
res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
|
||||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||||
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
|
snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
|
||||||
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
|
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
|
||||||
|
|
|
@ -95,9 +95,9 @@ config I2C_I801
|
||||||
ESB2
|
ESB2
|
||||||
ICH8
|
ICH8
|
||||||
ICH9
|
ICH9
|
||||||
Tolapai
|
EP80579 (Tolapai)
|
||||||
ICH10
|
ICH10
|
||||||
3400/5 Series (PCH)
|
5/3400 Series (PCH)
|
||||||
Cougar Point (PCH)
|
Cougar Point (PCH)
|
||||||
|
|
||||||
This driver can also be built as a module. If so, the module
|
This driver can also be built as a module. If so, the module
|
||||||
|
|
|
@ -38,10 +38,10 @@
|
||||||
82801G (ICH7) 0x27da 32 hard yes yes yes
|
82801G (ICH7) 0x27da 32 hard yes yes yes
|
||||||
82801H (ICH8) 0x283e 32 hard yes yes yes
|
82801H (ICH8) 0x283e 32 hard yes yes yes
|
||||||
82801I (ICH9) 0x2930 32 hard yes yes yes
|
82801I (ICH9) 0x2930 32 hard yes yes yes
|
||||||
Tolapai 0x5032 32 hard yes yes yes
|
EP80579 (Tolapai) 0x5032 32 hard yes yes yes
|
||||||
ICH10 0x3a30 32 hard yes yes yes
|
ICH10 0x3a30 32 hard yes yes yes
|
||||||
ICH10 0x3a60 32 hard yes yes yes
|
ICH10 0x3a60 32 hard yes yes yes
|
||||||
3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
|
5/3400 Series (PCH) 0x3b30 32 hard yes yes yes
|
||||||
Cougar Point (PCH) 0x1c22 32 hard yes yes yes
|
Cougar Point (PCH) 0x1c22 32 hard yes yes yes
|
||||||
|
|
||||||
Features supported by this driver:
|
Features supported by this driver:
|
||||||
|
@ -587,11 +587,11 @@ static const struct pci_device_id i801_ids[] = {
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TOLAPAI_1) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
|
||||||
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
|
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
|
||||||
{ 0, }
|
{ 0, }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -65,6 +65,4 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
|
||||||
|
|
||||||
obj-$(CONFIG_PCI_STUB) += pci-stub.o
|
obj-$(CONFIG_PCI_STUB) += pci-stub.o
|
||||||
|
|
||||||
ifeq ($(CONFIG_PCI_DEBUG),y)
|
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
|
||||||
EXTRA_CFLAGS += -DDEBUG
|
|
||||||
endif
|
|
||||||
|
|
|
@ -64,6 +64,49 @@ void pci_bus_remove_resources(struct pci_bus *bus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the highest-address bus resource below the cursor "res". If the
|
||||||
|
* cursor is NULL, return the highest resource.
|
||||||
|
*/
|
||||||
|
static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus,
|
||||||
|
unsigned int type,
|
||||||
|
struct resource *res)
|
||||||
|
{
|
||||||
|
struct resource *r, *prev = NULL;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
pci_bus_for_each_resource(bus, r, i) {
|
||||||
|
if (!r)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if ((r->flags & IORESOURCE_TYPE_BITS) != type)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* If this resource is at or past the cursor, skip it */
|
||||||
|
if (res) {
|
||||||
|
if (r == res)
|
||||||
|
continue;
|
||||||
|
if (r->end > res->end)
|
||||||
|
continue;
|
||||||
|
if (r->end == res->end && r->start > res->start)
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!prev)
|
||||||
|
prev = r;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A small resource is higher than a large one that ends at
|
||||||
|
* the same address.
|
||||||
|
*/
|
||||||
|
if (r->end > prev->end ||
|
||||||
|
(r->end == prev->end && r->start > prev->start))
|
||||||
|
prev = r;
|
||||||
|
}
|
||||||
|
|
||||||
|
return prev;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_bus_alloc_resource - allocate a resource from a parent bus
|
* pci_bus_alloc_resource - allocate a resource from a parent bus
|
||||||
* @bus: PCI bus
|
* @bus: PCI bus
|
||||||
|
@ -89,9 +132,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
|
||||||
resource_size_t),
|
resource_size_t),
|
||||||
void *alignf_data)
|
void *alignf_data)
|
||||||
{
|
{
|
||||||
int i, ret = -ENOMEM;
|
int ret = -ENOMEM;
|
||||||
struct resource *r;
|
struct resource *r;
|
||||||
resource_size_t max = -1;
|
resource_size_t max = -1;
|
||||||
|
unsigned int type = res->flags & IORESOURCE_TYPE_BITS;
|
||||||
|
|
||||||
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
|
type_mask |= IORESOURCE_IO | IORESOURCE_MEM;
|
||||||
|
|
||||||
|
@ -99,10 +143,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
|
||||||
if (!(res->flags & IORESOURCE_MEM_64))
|
if (!(res->flags & IORESOURCE_MEM_64))
|
||||||
max = PCIBIOS_MAX_MEM_32;
|
max = PCIBIOS_MAX_MEM_32;
|
||||||
|
|
||||||
pci_bus_for_each_resource(bus, r, i) {
|
/* Look for space at highest addresses first */
|
||||||
if (!r)
|
r = pci_bus_find_resource_prev(bus, type, NULL);
|
||||||
continue;
|
for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) {
|
||||||
|
|
||||||
/* type_mask must match */
|
/* type_mask must match */
|
||||||
if ((res->flags ^ r->flags) & type_mask)
|
if ((res->flags ^ r->flags) & type_mask)
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -133,8 +133,8 @@ void __init ibmphp_hpc_initvars (void)
|
||||||
debug ("%s - Entry\n", __func__);
|
debug ("%s - Entry\n", __func__);
|
||||||
|
|
||||||
mutex_init(&sem_hpcaccess);
|
mutex_init(&sem_hpcaccess);
|
||||||
init_MUTEX (&semOperations);
|
sema_init(&semOperations, 1);
|
||||||
init_MUTEX_LOCKED (&sem_exit);
|
sema_init(&sem_exit, 0);
|
||||||
to_debug = 0;
|
to_debug = 0;
|
||||||
|
|
||||||
debug ("%s - Exit\n", __func__);
|
debug ("%s - Exit\n", __func__);
|
||||||
|
|
|
@ -22,8 +22,8 @@
|
||||||
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
|
#define is_64bit_address(control) (!!(control & PCI_MSI_FLAGS_64BIT))
|
||||||
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
|
#define is_mask_bit_support(control) (!!(control & PCI_MSI_FLAGS_MASKBIT))
|
||||||
|
|
||||||
#define msix_table_offset_reg(base) (base + 0x04)
|
#define msix_table_offset_reg(base) (base + PCI_MSIX_TABLE)
|
||||||
#define msix_pba_offset_reg(base) (base + 0x08)
|
#define msix_pba_offset_reg(base) (base + PCI_MSIX_PBA)
|
||||||
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
|
#define msix_table_size(control) ((control & PCI_MSIX_FLAGS_QSIZE)+1)
|
||||||
#define multi_msix_capable(control) msix_table_size((control))
|
#define multi_msix_capable(control) msix_table_size((control))
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,19 @@ EXPORT_SYMBOL(pci_pci_problems);
|
||||||
|
|
||||||
unsigned int pci_pm_d3_delay;
|
unsigned int pci_pm_d3_delay;
|
||||||
|
|
||||||
|
static void pci_pme_list_scan(struct work_struct *work);
|
||||||
|
|
||||||
|
static LIST_HEAD(pci_pme_list);
|
||||||
|
static DEFINE_MUTEX(pci_pme_list_mutex);
|
||||||
|
static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
|
||||||
|
|
||||||
|
struct pci_pme_device {
|
||||||
|
struct list_head list;
|
||||||
|
struct pci_dev *dev;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define PME_TIMEOUT 1000 /* How long between PME checks */
|
||||||
|
|
||||||
static void pci_dev_d3_sleep(struct pci_dev *dev)
|
static void pci_dev_d3_sleep(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
unsigned int delay = dev->d3_delay;
|
unsigned int delay = dev->d3_delay;
|
||||||
|
@ -1331,6 +1344,32 @@ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
|
||||||
return !!(dev->pme_support & (1 << state));
|
return !!(dev->pme_support & (1 << state));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pci_pme_list_scan(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct pci_pme_device *pme_dev;
|
||||||
|
|
||||||
|
mutex_lock(&pci_pme_list_mutex);
|
||||||
|
if (!list_empty(&pci_pme_list)) {
|
||||||
|
list_for_each_entry(pme_dev, &pci_pme_list, list)
|
||||||
|
pci_pme_wakeup(pme_dev->dev, NULL);
|
||||||
|
schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
|
||||||
|
}
|
||||||
|
mutex_unlock(&pci_pme_list_mutex);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* pci_external_pme - is a device an external PCI PME source?
|
||||||
|
* @dev: PCI device to check
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
static bool pci_external_pme(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
if (pci_is_pcie(dev) || dev->bus->number == 0)
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* pci_pme_active - enable or disable PCI device's PME# function
|
* pci_pme_active - enable or disable PCI device's PME# function
|
||||||
* @dev: PCI device to handle.
|
* @dev: PCI device to handle.
|
||||||
|
@ -1354,6 +1393,44 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
|
||||||
|
|
||||||
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
|
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
|
||||||
|
|
||||||
|
/* PCI (as opposed to PCIe) PME requires that the device have
|
||||||
|
its PME# line hooked up correctly. Not all hardware vendors
|
||||||
|
do this, so the PME never gets delivered and the device
|
||||||
|
remains asleep. The easiest way around this is to
|
||||||
|
periodically walk the list of suspended devices and check
|
||||||
|
whether any have their PME flag set. The assumption is that
|
||||||
|
we'll wake up often enough anyway that this won't be a huge
|
||||||
|
hit, and the power savings from the devices will still be a
|
||||||
|
win. */
|
||||||
|
|
||||||
|
if (pci_external_pme(dev)) {
|
||||||
|
struct pci_pme_device *pme_dev;
|
||||||
|
if (enable) {
|
||||||
|
pme_dev = kmalloc(sizeof(struct pci_pme_device),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!pme_dev)
|
||||||
|
goto out;
|
||||||
|
pme_dev->dev = dev;
|
||||||
|
mutex_lock(&pci_pme_list_mutex);
|
||||||
|
list_add(&pme_dev->list, &pci_pme_list);
|
||||||
|
if (list_is_singular(&pci_pme_list))
|
||||||
|
schedule_delayed_work(&pci_pme_work,
|
||||||
|
msecs_to_jiffies(PME_TIMEOUT));
|
||||||
|
mutex_unlock(&pci_pme_list_mutex);
|
||||||
|
} else {
|
||||||
|
mutex_lock(&pci_pme_list_mutex);
|
||||||
|
list_for_each_entry(pme_dev, &pci_pme_list, list) {
|
||||||
|
if (pme_dev->dev == dev) {
|
||||||
|
list_del(&pme_dev->list);
|
||||||
|
kfree(pme_dev);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
mutex_unlock(&pci_pme_list_mutex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
|
dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
|
||||||
enable ? "enabled" : "disabled");
|
enable ? "enabled" : "disabled");
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,11 +63,8 @@ struct pci_platform_pm_ops {
|
||||||
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
|
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
|
||||||
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
|
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
|
||||||
extern void pci_disable_enabled_device(struct pci_dev *dev);
|
extern void pci_disable_enabled_device(struct pci_dev *dev);
|
||||||
extern bool pci_check_pme_status(struct pci_dev *dev);
|
|
||||||
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
|
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
|
||||||
extern void pci_wakeup_event(struct pci_dev *dev);
|
|
||||||
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
|
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
|
||||||
extern void pci_pme_wakeup_bus(struct pci_bus *bus);
|
|
||||||
extern void pci_pm_init(struct pci_dev *dev);
|
extern void pci_pm_init(struct pci_dev *dev);
|
||||||
extern void platform_pci_wakeup_init(struct pci_dev *dev);
|
extern void platform_pci_wakeup_init(struct pci_dev *dev);
|
||||||
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
|
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
|
||||||
|
|
|
@ -416,7 +416,7 @@ static void aer_error_resume(struct pci_dev *dev)
|
||||||
*/
|
*/
|
||||||
static int __init aer_service_init(void)
|
static int __init aer_service_init(void)
|
||||||
{
|
{
|
||||||
if (!pci_aer_available())
|
if (!pci_aer_available() || aer_acpi_firmware_first())
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
return pcie_port_service_register(&aerdriver);
|
return pcie_port_service_register(&aerdriver);
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,6 +132,7 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
|
||||||
|
|
||||||
#ifdef CONFIG_ACPI_APEI
|
#ifdef CONFIG_ACPI_APEI
|
||||||
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
|
extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
|
||||||
|
extern bool aer_acpi_firmware_first(void);
|
||||||
#else
|
#else
|
||||||
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
|
static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
|
||||||
{
|
{
|
||||||
|
@ -139,6 +140,8 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
|
||||||
return pci_dev->__aer_firmware_first;
|
return pci_dev->__aer_firmware_first;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool aer_acpi_firmware_first(void) { return false; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
|
static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
|
||||||
|
|
|
@ -93,4 +93,38 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)
|
||||||
aer_set_firmware_first(dev);
|
aer_set_firmware_first(dev);
|
||||||
return dev->__aer_firmware_first;
|
return dev->__aer_firmware_first;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool aer_firmware_first;
|
||||||
|
|
||||||
|
static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data)
|
||||||
|
{
|
||||||
|
struct acpi_hest_aer_common *p;
|
||||||
|
|
||||||
|
if (aer_firmware_first)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
switch (hest_hdr->type) {
|
||||||
|
case ACPI_HEST_TYPE_AER_ROOT_PORT:
|
||||||
|
case ACPI_HEST_TYPE_AER_ENDPOINT:
|
||||||
|
case ACPI_HEST_TYPE_AER_BRIDGE:
|
||||||
|
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
|
||||||
|
aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
|
||||||
|
default:
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* aer_acpi_firmware_first - Check if APEI should control AER.
|
||||||
|
*/
|
||||||
|
bool aer_acpi_firmware_first(void)
|
||||||
|
{
|
||||||
|
static bool parsed = false;
|
||||||
|
|
||||||
|
if (!parsed) {
|
||||||
|
apei_hest_parse(aer_hest_parse_aff, NULL);
|
||||||
|
parsed = true;
|
||||||
|
}
|
||||||
|
return aer_firmware_first;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -754,7 +754,7 @@ void aer_isr(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
|
struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
|
||||||
struct pcie_device *p_device = rpc->rpd;
|
struct pcie_device *p_device = rpc->rpd;
|
||||||
struct aer_err_source e_src;
|
struct aer_err_source uninitialized_var(e_src);
|
||||||
|
|
||||||
mutex_lock(&rpc->rpc_mutex);
|
mutex_lock(&rpc->rpc_mutex);
|
||||||
while (get_e_source(rpc, &e_src))
|
while (get_e_source(rpc, &e_src))
|
||||||
|
|
|
@ -49,7 +49,7 @@ int pcie_port_acpi_setup(struct pci_dev *port, int *srv_mask)
|
||||||
| OSC_PCI_EXPRESS_PME_CONTROL;
|
| OSC_PCI_EXPRESS_PME_CONTROL;
|
||||||
|
|
||||||
if (pci_aer_available()) {
|
if (pci_aer_available()) {
|
||||||
if (pcie_aer_get_firmware_first(port))
|
if (aer_acpi_firmware_first())
|
||||||
dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
|
dev_dbg(&port->dev, "PCIe errors handled by BIOS.\n");
|
||||||
else
|
else
|
||||||
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
|
flags |= OSC_PCI_EXPRESS_AER_CONTROL;
|
||||||
|
|
|
@ -961,8 +961,8 @@ int pci_setup_device(struct pci_dev *dev)
|
||||||
dev->class = class;
|
dev->class = class;
|
||||||
class >>= 8;
|
class >>= 8;
|
||||||
|
|
||||||
dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
|
dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %d class %#08x\n",
|
||||||
dev->vendor, dev->device, class, dev->hdr_type);
|
dev->vendor, dev->device, dev->hdr_type, class);
|
||||||
|
|
||||||
/* need to have dev->class ready */
|
/* need to have dev->class ready */
|
||||||
dev->cfg_size = pci_cfg_space_size(dev);
|
dev->cfg_size = pci_cfg_space_size(dev);
|
||||||
|
|
|
@ -303,6 +303,7 @@ static const struct file_operations proc_bus_pci_operations = {
|
||||||
.read = proc_bus_pci_read,
|
.read = proc_bus_pci_read,
|
||||||
.write = proc_bus_pci_write,
|
.write = proc_bus_pci_write,
|
||||||
.unlocked_ioctl = proc_bus_pci_ioctl,
|
.unlocked_ioctl = proc_bus_pci_ioctl,
|
||||||
|
.compat_ioctl = proc_bus_pci_ioctl,
|
||||||
#ifdef HAVE_PCI_MMAP
|
#ifdef HAVE_PCI_MMAP
|
||||||
.open = proc_bus_pci_open,
|
.open = proc_bus_pci_open,
|
||||||
.release = proc_bus_pci_release,
|
.release = proc_bus_pci_release,
|
||||||
|
|
|
@ -2297,6 +2297,37 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
|
||||||
PCI_DEVICE_ID_NVIDIA_NVENET_15,
|
PCI_DEVICE_ID_NVIDIA_NVENET_15,
|
||||||
nvenet_msi_disable);
|
nvenet_msi_disable);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some versions of the MCP55 bridge from nvidia have a legacy irq routing
|
||||||
|
* config register. This register controls the routing of legacy interrupts
|
||||||
|
* from devices that route through the MCP55. If this register is misprogramed
|
||||||
|
* interrupts are only sent to the bsp, unlike conventional systems where the
|
||||||
|
* irq is broadxast to all online cpus. Not having this register set
|
||||||
|
* properly prevents kdump from booting up properly, so lets make sure that
|
||||||
|
* we have it set correctly.
|
||||||
|
* Note this is an undocumented register.
|
||||||
|
*/
|
||||||
|
static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
|
||||||
|
{
|
||||||
|
u32 cfg;
|
||||||
|
|
||||||
|
pci_read_config_dword(dev, 0x74, &cfg);
|
||||||
|
|
||||||
|
if (cfg & ((1 << 2) | (1 << 15))) {
|
||||||
|
printk(KERN_INFO "Rewriting irq routing register on MCP55\n");
|
||||||
|
cfg &= ~((1 << 2) | (1 << 15));
|
||||||
|
pci_write_config_dword(dev, 0x74, cfg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
|
||||||
|
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
|
||||||
|
nvbridge_check_legacy_irq_routing);
|
||||||
|
|
||||||
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
|
||||||
|
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
|
||||||
|
nvbridge_check_legacy_irq_routing);
|
||||||
|
|
||||||
static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
|
static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
|
||||||
{
|
{
|
||||||
int pos, ttl = 48;
|
int pos, ttl = 48;
|
||||||
|
|
|
@ -85,7 +85,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
res->flags &= ~IORESOURCE_UNSET;
|
res->flags &= ~IORESOURCE_UNSET;
|
||||||
dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
|
dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
|
||||||
resno, res, (unsigned long long)region.start,
|
resno, res, (unsigned long long)region.start,
|
||||||
(unsigned long long)region.end);
|
(unsigned long long)region.end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,7 @@ struct resource_list {
|
||||||
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
|
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
|
||||||
extern struct resource ioport_resource;
|
extern struct resource ioport_resource;
|
||||||
extern struct resource iomem_resource;
|
extern struct resource iomem_resource;
|
||||||
|
extern int resource_alloc_from_bottom;
|
||||||
|
|
||||||
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
|
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
|
||||||
extern int request_resource(struct resource *root, struct resource *new);
|
extern int request_resource(struct resource *root, struct resource *new);
|
||||||
|
|
|
@ -541,7 +541,7 @@ struct pci_error_handlers {
|
||||||
struct module;
|
struct module;
|
||||||
struct pci_driver {
|
struct pci_driver {
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
char *name;
|
const char *name;
|
||||||
const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
|
const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
|
||||||
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
|
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
|
||||||
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
|
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
|
||||||
|
@ -819,6 +819,9 @@ pci_power_t pci_target_state(struct pci_dev *dev);
|
||||||
int pci_prepare_to_sleep(struct pci_dev *dev);
|
int pci_prepare_to_sleep(struct pci_dev *dev);
|
||||||
int pci_back_from_sleep(struct pci_dev *dev);
|
int pci_back_from_sleep(struct pci_dev *dev);
|
||||||
bool pci_dev_run_wake(struct pci_dev *dev);
|
bool pci_dev_run_wake(struct pci_dev *dev);
|
||||||
|
bool pci_check_pme_status(struct pci_dev *dev);
|
||||||
|
void pci_wakeup_event(struct pci_dev *dev);
|
||||||
|
void pci_pme_wakeup_bus(struct pci_bus *bus);
|
||||||
|
|
||||||
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
|
static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
|
||||||
bool enable)
|
bool enable)
|
||||||
|
|
|
@ -767,6 +767,8 @@
|
||||||
#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
|
#define PCI_DEVICE_ID_ELSA_MICROLINK 0x1000
|
||||||
#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
|
#define PCI_DEVICE_ID_ELSA_QS3000 0x3000
|
||||||
|
|
||||||
|
#define PCI_VENDOR_ID_STMICRO 0x104A
|
||||||
|
|
||||||
#define PCI_VENDOR_ID_BUSLOGIC 0x104B
|
#define PCI_VENDOR_ID_BUSLOGIC 0x104B
|
||||||
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
|
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140
|
||||||
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
|
#define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040
|
||||||
|
@ -1251,6 +1253,8 @@
|
||||||
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
|
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
|
||||||
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
|
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
|
||||||
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
|
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
|
||||||
|
#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0 0x0360
|
||||||
|
#define PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4 0x0364
|
||||||
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
|
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
|
||||||
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
|
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
|
||||||
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
|
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
|
||||||
|
@ -2458,9 +2462,10 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
|
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
|
||||||
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
|
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
|
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
|
||||||
#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
|
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
|
||||||
#define PCI_DEVICE_ID_INTEL_CPT_LPC_MIN 0x1c41
|
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
|
||||||
#define PCI_DEVICE_ID_INTEL_CPT_LPC_MAX 0x1c5f
|
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC 0x1d40
|
||||||
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
|
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
|
||||||
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
|
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
|
||||||
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
|
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
|
||||||
|
@ -2669,9 +2674,9 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
|
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
|
||||||
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
|
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
|
||||||
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
|
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
|
||||||
#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
|
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
|
||||||
#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f
|
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
|
||||||
#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30
|
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
|
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
|
||||||
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
|
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
|
||||||
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
|
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
|
||||||
|
@ -2680,8 +2685,8 @@
|
||||||
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
|
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
|
||||||
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
|
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
|
||||||
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
|
#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
|
||||||
#define PCI_DEVICE_ID_INTEL_TOLAPAI_0 0x5031
|
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
|
||||||
#define PCI_DEVICE_ID_INTEL_TOLAPAI_1 0x5032
|
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
|
||||||
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
|
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
|
||||||
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
|
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
|
||||||
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
|
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
|
||||||
|
|
|
@ -300,11 +300,13 @@
|
||||||
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
|
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
|
||||||
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
|
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
|
||||||
|
|
||||||
/* MSI-X registers (these are at offset PCI_MSIX_FLAGS) */
|
/* MSI-X registers */
|
||||||
#define PCI_MSIX_FLAGS 2
|
#define PCI_MSIX_FLAGS 2
|
||||||
#define PCI_MSIX_FLAGS_QSIZE 0x7FF
|
#define PCI_MSIX_FLAGS_QSIZE 0x7FF
|
||||||
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
|
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
|
||||||
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
|
#define PCI_MSIX_FLAGS_MASKALL (1 << 14)
|
||||||
|
#define PCI_MSIX_TABLE 4
|
||||||
|
#define PCI_MSIX_PBA 8
|
||||||
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
|
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
|
||||||
|
|
||||||
/* CompactPCI Hotswap Register */
|
/* CompactPCI Hotswap Register */
|
||||||
|
|
|
@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource);
|
||||||
|
|
||||||
static DEFINE_RWLOCK(resource_lock);
|
static DEFINE_RWLOCK(resource_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* By default, we allocate free space bottom-up. The architecture can request
|
||||||
|
* top-down by clearing this flag. The user can override the architecture's
|
||||||
|
* choice with the "resource_alloc_from_bottom" kernel boot option, but that
|
||||||
|
* should only be a debugging tool.
|
||||||
|
*/
|
||||||
|
int resource_alloc_from_bottom = 1;
|
||||||
|
|
||||||
|
static __init int setup_alloc_from_bottom(char *s)
|
||||||
|
{
|
||||||
|
printk(KERN_INFO
|
||||||
|
"resource: allocating from bottom-up; please report a bug\n");
|
||||||
|
resource_alloc_from_bottom = 1;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("resource_alloc_from_bottom", setup_alloc_from_bottom);
|
||||||
|
|
||||||
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
static void *r_next(struct seq_file *m, void *v, loff_t *pos)
|
||||||
{
|
{
|
||||||
struct resource *p = v;
|
struct resource *p = v;
|
||||||
|
@ -357,8 +374,97 @@ int __weak page_is_ram(unsigned long pfn)
|
||||||
return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
|
return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static resource_size_t simple_align_resource(void *data,
|
||||||
|
const struct resource *avail,
|
||||||
|
resource_size_t size,
|
||||||
|
resource_size_t align)
|
||||||
|
{
|
||||||
|
return avail->start;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void resource_clip(struct resource *res, resource_size_t min,
|
||||||
|
resource_size_t max)
|
||||||
|
{
|
||||||
|
if (res->start < min)
|
||||||
|
res->start = min;
|
||||||
|
if (res->end > max)
|
||||||
|
res->end = max;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool resource_contains(struct resource *res1, struct resource *res2)
|
||||||
|
{
|
||||||
|
return res1->start <= res2->start && res1->end >= res2->end;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find the resource before "child" in the sibling list of "root" children.
|
||||||
|
*/
|
||||||
|
static struct resource *find_sibling_prev(struct resource *root, struct resource *child)
|
||||||
|
{
|
||||||
|
struct resource *this;
|
||||||
|
|
||||||
|
for (this = root->child; this; this = this->sibling)
|
||||||
|
if (this->sibling == child)
|
||||||
|
return this;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find empty slot in the resource tree given range and alignment.
|
* Find empty slot in the resource tree given range and alignment.
|
||||||
|
* This version allocates from the end of the root resource first.
|
||||||
|
*/
|
||||||
|
static int find_resource_from_top(struct resource *root, struct resource *new,
|
||||||
|
resource_size_t size, resource_size_t min,
|
||||||
|
resource_size_t max, resource_size_t align,
|
||||||
|
resource_size_t (*alignf)(void *,
|
||||||
|
const struct resource *,
|
||||||
|
resource_size_t,
|
||||||
|
resource_size_t),
|
||||||
|
void *alignf_data)
|
||||||
|
{
|
||||||
|
struct resource *this;
|
||||||
|
struct resource tmp, avail, alloc;
|
||||||
|
|
||||||
|
tmp.start = root->end;
|
||||||
|
tmp.end = root->end;
|
||||||
|
|
||||||
|
this = find_sibling_prev(root, NULL);
|
||||||
|
for (;;) {
|
||||||
|
if (this) {
|
||||||
|
if (this->end < root->end)
|
||||||
|
tmp.start = this->end + 1;
|
||||||
|
} else
|
||||||
|
tmp.start = root->start;
|
||||||
|
|
||||||
|
resource_clip(&tmp, min, max);
|
||||||
|
|
||||||
|
/* Check for overflow after ALIGN() */
|
||||||
|
avail = *new;
|
||||||
|
avail.start = ALIGN(tmp.start, align);
|
||||||
|
avail.end = tmp.end;
|
||||||
|
if (avail.start >= tmp.start) {
|
||||||
|
alloc.start = alignf(alignf_data, &avail, size, align);
|
||||||
|
alloc.end = alloc.start + size - 1;
|
||||||
|
if (resource_contains(&avail, &alloc)) {
|
||||||
|
new->start = alloc.start;
|
||||||
|
new->end = alloc.end;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this || this->start == root->start)
|
||||||
|
break;
|
||||||
|
|
||||||
|
tmp.end = this->start - 1;
|
||||||
|
this = find_sibling_prev(root, this);
|
||||||
|
}
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Find empty slot in the resource tree given range and alignment.
|
||||||
|
* This version allocates from the beginning of the root resource first.
|
||||||
*/
|
*/
|
||||||
static int find_resource(struct resource *root, struct resource *new,
|
static int find_resource(struct resource *root, struct resource *new,
|
||||||
resource_size_t size, resource_size_t min,
|
resource_size_t size, resource_size_t min,
|
||||||
|
@ -370,36 +476,43 @@ static int find_resource(struct resource *root, struct resource *new,
|
||||||
void *alignf_data)
|
void *alignf_data)
|
||||||
{
|
{
|
||||||
struct resource *this = root->child;
|
struct resource *this = root->child;
|
||||||
struct resource tmp = *new;
|
struct resource tmp = *new, avail, alloc;
|
||||||
|
|
||||||
tmp.start = root->start;
|
tmp.start = root->start;
|
||||||
/*
|
/*
|
||||||
* Skip past an allocated resource that starts at 0, since the assignment
|
* Skip past an allocated resource that starts at 0, since the
|
||||||
* of this->start - 1 to tmp->end below would cause an underflow.
|
* assignment of this->start - 1 to tmp->end below would cause an
|
||||||
|
* underflow.
|
||||||
*/
|
*/
|
||||||
if (this && this->start == 0) {
|
if (this && this->start == 0) {
|
||||||
tmp.start = this->end + 1;
|
tmp.start = this->end + 1;
|
||||||
this = this->sibling;
|
this = this->sibling;
|
||||||
}
|
}
|
||||||
for(;;) {
|
for (;;) {
|
||||||
if (this)
|
if (this)
|
||||||
tmp.end = this->start - 1;
|
tmp.end = this->start - 1;
|
||||||
else
|
else
|
||||||
tmp.end = root->end;
|
tmp.end = root->end;
|
||||||
if (tmp.start < min)
|
|
||||||
tmp.start = min;
|
resource_clip(&tmp, min, max);
|
||||||
if (tmp.end > max)
|
|
||||||
tmp.end = max;
|
/* Check for overflow after ALIGN() */
|
||||||
tmp.start = ALIGN(tmp.start, align);
|
avail = *new;
|
||||||
if (alignf)
|
avail.start = ALIGN(tmp.start, align);
|
||||||
tmp.start = alignf(alignf_data, &tmp, size, align);
|
avail.end = tmp.end;
|
||||||
if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) {
|
if (avail.start >= tmp.start) {
|
||||||
new->start = tmp.start;
|
alloc.start = alignf(alignf_data, &avail, size, align);
|
||||||
new->end = tmp.start + size - 1;
|
alloc.end = alloc.start + size - 1;
|
||||||
|
if (resource_contains(&avail, &alloc)) {
|
||||||
|
new->start = alloc.start;
|
||||||
|
new->end = alloc.end;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (!this)
|
if (!this)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
tmp.start = this->end + 1;
|
tmp.start = this->end + 1;
|
||||||
this = this->sibling;
|
this = this->sibling;
|
||||||
}
|
}
|
||||||
|
@ -428,8 +541,14 @@ int allocate_resource(struct resource *root, struct resource *new,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
if (!alignf)
|
||||||
|
alignf = simple_align_resource;
|
||||||
|
|
||||||
write_lock(&resource_lock);
|
write_lock(&resource_lock);
|
||||||
|
if (resource_alloc_from_bottom)
|
||||||
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
|
err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
|
||||||
|
else
|
||||||
|
err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data);
|
||||||
if (err >= 0 && __request_resource(root, new))
|
if (err >= 0 && __request_resource(root, new))
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
write_unlock(&resource_lock);
|
write_unlock(&resource_lock);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue