Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6

* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (72 commits)
  Revert "x86/PCI: ACPI based PCI gap calculation"
  PCI: remove unnecessary volatile in PCIe hotplug struct controller
  x86/PCI: ACPI based PCI gap calculation
  PCI: include linux/pm_wakeup.h for device_set_wakeup_capable
  PCI PM: Fix pci_prepare_to_sleep
  x86/PCI: Fix PCI config space for domains > 0
  Fix acpi_pm_device_sleep_wake() by providing a stub for CONFIG_PM_SLEEP=n
  PCI: Simplify PCI device PM code
  PCI PM: Introduce pci_prepare_to_sleep and pci_back_from_sleep
  PCI ACPI: Rework PCI handling of wake-up
  ACPI: Introduce new device wakeup flag 'prepared'
  ACPI: Introduce acpi_device_sleep_wake function
  PCI: rework pci_set_power_state function to call platform first
  PCI: Introduce platform_pci_power_manageable function
  ACPI: Introduce acpi_bus_power_manageable function
  PCI: make pci_name use dev_name
  PCI: handle pci_name() being const
  PCI: add stub for pci_set_consistent_dma_mask()
  PCI: remove unused arch pcibios_update_resource() functions
  PCI: fix pci_setup_device()'s sprinting into a const buffer
  ...

Fixed up conflicts in various files (arch/x86/kernel/setup_64.c,
arch/x86/pci/irq.c, arch/x86/pci/pci.h, drivers/acpi/sleep/main.c,
drivers/pci/pci.c, drivers/pci/pci.h, include/acpi/acpi_bus.h) from x86
and ACPI updates manually.
This commit is contained in:
Linus Torvalds 2008-07-16 17:25:46 -07:00
commit dc7c65db28
84 changed files with 4005 additions and 1758 deletions

View file

@ -147,10 +147,14 @@ and is between 256 and 4096 characters. It is defined in the file
default: 0 default: 0
acpi_sleep= [HW,ACPI] Sleep options acpi_sleep= [HW,ACPI] Sleep options
Format: { s3_bios, s3_mode, s3_beep } Format: { s3_bios, s3_mode, s3_beep, old_ordering }
See Documentation/power/video.txt for s3_bios and s3_mode. See Documentation/power/video.txt for s3_bios and s3_mode.
s3_beep is for debugging; it makes the PC's speaker beep s3_beep is for debugging; it makes the PC's speaker beep
as soon as the kernel's real-mode entry point is called. as soon as the kernel's real-mode entry point is called.
old_ordering causes the ACPI 1.0 ordering of the _PTS
control method, wrt putting devices into low power
states, to be enforced (the ACPI 2.0 ordering of _PTS is
used by default).
acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode
Format: { level | edge | high | low } Format: { level | edge | high | low }
@ -1537,6 +1541,9 @@ and is between 256 and 4096 characters. It is defined in the file
Use with caution as certain devices share Use with caution as certain devices share
address decoders between ROMs and other address decoders between ROMs and other
resources. resources.
norom [X86-32,X86_64] Do not assign address space to
expansion ROMs that do not already have
BIOS assigned address ranges.
irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be irqmask=0xMMMM [X86-32] Set a bit mask of IRQs allowed to be
assigned automatically to PCI devices. You can assigned automatically to PCI devices. You can
make the kernel exclude IRQs of your ISA cards make the kernel exclude IRQs of your ISA cards

View file

@ -248,7 +248,7 @@ S: Supported
ACPI PCI HOTPLUG DRIVER ACPI PCI HOTPLUG DRIVER
P: Kristen Carlson Accardi P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com M: kristen.c.accardi@intel.com
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
ACPI THERMAL DRIVER ACPI THERMAL DRIVER
@ -1145,21 +1145,21 @@ COMPACTPCI HOTPLUG CORE
P: Scott Murray P: Scott Murray
M: scottm@somanetworks.com M: scottm@somanetworks.com
M: scott@spiteful.org M: scott@spiteful.org
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER COMPACTPCI HOTPLUG ZIATECH ZT5550 DRIVER
P: Scott Murray P: Scott Murray
M: scottm@somanetworks.com M: scottm@somanetworks.com
M: scott@spiteful.org M: scott@spiteful.org
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
COMPACTPCI HOTPLUG GENERIC DRIVER COMPACTPCI HOTPLUG GENERIC DRIVER
P: Scott Murray P: Scott Murray
M: scottm@somanetworks.com M: scottm@somanetworks.com
M: scott@spiteful.org M: scott@spiteful.org
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
COMPAL LAPTOP SUPPORT COMPAL LAPTOP SUPPORT
@ -3219,7 +3219,7 @@ S: Supported
PCIE HOTPLUG DRIVER PCIE HOTPLUG DRIVER
P: Kristen Carlson Accardi P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com M: kristen.c.accardi@intel.com
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
PCMCIA SUBSYSTEM PCMCIA SUBSYSTEM
@ -3865,7 +3865,7 @@ S: Maintained
SHPC HOTPLUG DRIVER SHPC HOTPLUG DRIVER
P: Kristen Carlson Accardi P: Kristen Carlson Accardi
M: kristen.c.accardi@intel.com M: kristen.c.accardi@intel.com
L: pcihpd-discuss@lists.sourceforge.net L: linux-pci@vger.kernel.org
S: Supported S: Supported
SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER SECURE DIGITAL HOST CONTROLLER INTERFACE DRIVER

View file

@ -19,36 +19,6 @@
#include "pci-frv.h" #include "pci-frv.h"
#if 0
void
pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check;
int reg;
new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4*resource;
} else if (resource == PCI_ROM_RESOURCE) {
res->flags |= IORESOURCE_ROM_ENABLE;
new |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/* Somebody might have asked allocation of a non-standard resource */
return;
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
printk(KERN_ERR "PCI: Error while updating region "
"%s/%d (%08x != %08x)\n", pci_name(dev), resource,
new, check);
}
}
#endif
/* /*
* We need to avoid collisions with `mirrored' VGA ports * We need to avoid collisions with `mirrored' VGA ports
* and other strange ISA hardware, so we always want the * and other strange ISA hardware, so we always want the

View file

@ -373,15 +373,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return(0); return(0);
} }
/*****************************************************************************/
void pcibios_update_resource(struct pci_dev *dev, struct resource *root, struct resource *r, int resource)
{
printk(KERN_WARNING "%s(%d): no support for changing PCI resources...\n",
__FILE__, __LINE__);
}
/*****************************************************************************/ /*****************************************************************************/
/* /*

View file

@ -345,42 +345,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pcibios_enable_resources(dev); return pcibios_enable_resources(dev);
} }
void pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check;
int reg;
return;
new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4 * resource;
} else if (resource == PCI_ROM_RESOURCE) {
res->flags |= IORESOURCE_ROM_ENABLE;
reg = dev->rom_base_reg;
} else {
/*
* Somebody might have asked allocation of a non-standard
* resource
*/
return;
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) &
((new & PCI_BASE_ADDRESS_SPACE_IO) ? PCI_BASE_ADDRESS_IO_MASK :
PCI_BASE_ADDRESS_MEM_MASK)) {
printk(KERN_ERR "PCI: Error while updating region "
"%s/%d (%08x != %08x)\n", pci_name(dev), resource,
new, check);
}
}
void pcibios_align_resource(void *data, struct resource *res, void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align) resource_size_t size, resource_size_t align)
{ {

View file

@ -76,38 +76,6 @@ void __devinit __weak pcibios_fixup_bus(struct pci_bus *bus)
pci_read_bridge_bases(bus); pci_read_bridge_bases(bus);
} }
void
pcibios_update_resource(struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
u32 new, check;
int reg;
new = res->start | (res->flags & PCI_REGION_FLAG_MASK);
if (resource < 6) {
reg = PCI_BASE_ADDRESS_0 + 4*resource;
} else if (resource == PCI_ROM_RESOURCE) {
res->flags |= IORESOURCE_ROM_ENABLE;
new |= PCI_ROM_ADDRESS_ENABLE;
reg = dev->rom_base_reg;
} else {
/*
* Somebody might have asked allocation of a non-standard
* resource
*/
return;
}
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & ((new & PCI_BASE_ADDRESS_SPACE_IO) ?
PCI_BASE_ADDRESS_IO_MASK : PCI_BASE_ADDRESS_MEM_MASK)) {
printk(KERN_ERR "PCI: Error while updating region "
"%s/%d (%08x != %08x)\n", pci_name(dev), resource,
new, check);
}
}
void pcibios_align_resource(void *data, struct resource *res, void pcibios_align_resource(void *data, struct resource *res,
resource_size_t size, resource_size_t align) resource_size_t size, resource_size_t align)
__attribute__ ((weak)); __attribute__ ((weak));

View file

@ -408,7 +408,7 @@ struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->class = class >> 8; dev->class = class >> 8;
dev->revision = class & 0xff; dev->revision = class & 0xff;
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), sprintf(dev->dev.bus_id, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
if (ofpci_verbose) if (ofpci_verbose)

View file

@ -158,6 +158,8 @@ static int __init acpi_sleep_setup(char *str)
acpi_realmode_flags |= 2; acpi_realmode_flags |= 2;
if (strncmp(str, "s3_beep", 7) == 0) if (strncmp(str, "s3_beep", 7) == 0)
acpi_realmode_flags |= 4; acpi_realmode_flags |= 4;
if (strncmp(str, "old_ordering", 12) == 0)
acpi_old_suspend_ordering();
str = strchr(str, ','); str = strchr(str, ',');
if (str != NULL) if (str != NULL)
str += strspn(str, ", \t"); str += strspn(str, ", \t");

View file

@ -1213,9 +1213,9 @@ static int suspend(int vetoable)
if (err != APM_SUCCESS) if (err != APM_SUCCESS)
apm_error("suspend", err); apm_error("suspend", err);
err = (err == APM_SUCCESS) ? 0 : -EIO; err = (err == APM_SUCCESS) ? 0 : -EIO;
device_power_up(); device_power_up(PMSG_RESUME);
local_irq_enable(); local_irq_enable();
device_resume(); device_resume(PMSG_RESUME);
queue_event(APM_NORMAL_RESUME, NULL); queue_event(APM_NORMAL_RESUME, NULL);
spin_lock(&user_list_lock); spin_lock(&user_list_lock);
for (as = user_list; as != NULL; as = as->next) { for (as = user_list; as != NULL; as = as->next) {
@ -1240,7 +1240,7 @@ static void standby(void)
apm_error("standby", err); apm_error("standby", err);
local_irq_disable(); local_irq_disable();
device_power_up(); device_power_up(PMSG_RESUME);
local_irq_enable(); local_irq_enable();
} }
@ -1326,7 +1326,7 @@ static void check_events(void)
ignore_bounce = 1; ignore_bounce = 1;
if ((event != APM_NORMAL_RESUME) if ((event != APM_NORMAL_RESUME)
|| (ignore_normal_resume == 0)) { || (ignore_normal_resume == 0)) {
device_resume(); device_resume(PMSG_RESUME);
queue_event(event, NULL); queue_event(event, NULL);
} }
ignore_normal_resume = 0; ignore_normal_resume = 0;

View file

@ -120,7 +120,18 @@ static struct chipset early_qrk[] __initdata = {
{} {}
}; };
static void __init check_dev_quirk(int num, int slot, int func) /**
* check_dev_quirk - apply early quirks to a given PCI device
* @num: bus number
* @slot: slot number
* @func: PCI function
*
* Check the vendor & device ID against the early quirks table.
*
* If the device is single function, let early_quirks() know so we don't
* poke at this device again.
*/
static int __init check_dev_quirk(int num, int slot, int func)
{ {
u16 class; u16 class;
u16 vendor; u16 vendor;
@ -131,7 +142,7 @@ static void __init check_dev_quirk(int num, int slot, int func)
class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE); class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
if (class == 0xffff) if (class == 0xffff)
return; return -1; /* no class, treat as single function */
vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID); vendor = read_pci_config_16(num, slot, func, PCI_VENDOR_ID);
@ -154,7 +165,9 @@ static void __init check_dev_quirk(int num, int slot, int func)
type = read_pci_config_byte(num, slot, func, type = read_pci_config_byte(num, slot, func,
PCI_HEADER_TYPE); PCI_HEADER_TYPE);
if (!(type & 0x80)) if (!(type & 0x80))
return; return -1;
return 0;
} }
void __init early_quirks(void) void __init early_quirks(void)
@ -167,6 +180,9 @@ void __init early_quirks(void)
/* Poor man's PCI discovery */ /* Poor man's PCI discovery */
for (num = 0; num < 32; num++) for (num = 0; num < 32; num++)
for (slot = 0; slot < 32; slot++) for (slot = 0; slot < 32; slot++)
for (func = 0; func < 8; func++) for (func = 0; func < 8; func++) {
check_dev_quirk(num, slot, func); /* Only probe function 0 on single fn devices */
if (check_dev_quirk(num, slot, func))
break;
}
} }

View file

@ -684,6 +684,11 @@ void __init setup_arch(char **cmdline_p)
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
} }
#ifdef CONFIG_PCI
if (pci_early_dump_regs)
early_dump_pci_devices();
#endif
finish_e820_parsing(); finish_e820_parsing();
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32

View file

@ -20,6 +20,7 @@
unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 | unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
PCI_PROBE_MMCONF; PCI_PROBE_MMCONF;
unsigned int pci_early_dump_regs;
static int pci_bf_sort; static int pci_bf_sort;
int pci_routeirq; int pci_routeirq;
int pcibios_last_bus = -1; int pcibios_last_bus = -1;
@ -31,7 +32,7 @@ struct pci_raw_ops *raw_pci_ext_ops;
int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val) int reg, int len, u32 *val)
{ {
if (reg < 256 && raw_pci_ops) if (domain == 0 && reg < 256 && raw_pci_ops)
return raw_pci_ops->read(domain, bus, devfn, reg, len, val); return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
if (raw_pci_ext_ops) if (raw_pci_ext_ops)
return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val); return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
@ -41,7 +42,7 @@ int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 val) int reg, int len, u32 val)
{ {
if (reg < 256 && raw_pci_ops) if (domain == 0 && reg < 256 && raw_pci_ops)
return raw_pci_ops->write(domain, bus, devfn, reg, len, val); return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
if (raw_pci_ext_ops) if (raw_pci_ext_ops)
return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val); return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
@ -121,6 +122,21 @@ void __init dmi_check_skip_isa_align(void)
dmi_check_system(can_skip_pciprobe_dmi_table); dmi_check_system(can_skip_pciprobe_dmi_table);
} }
static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
{
struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
if (pci_probe & PCI_NOASSIGN_ROMS) {
if (rom_r->parent)
return;
if (rom_r->start) {
/* we deal with BIOS assigned ROM later */
return;
}
rom_r->start = rom_r->end = rom_r->flags = 0;
}
}
/* /*
* Called after each bus is probed, but before its children * Called after each bus is probed, but before its children
* are examined. * are examined.
@ -128,7 +144,11 @@ void __init dmi_check_skip_isa_align(void)
void __devinit pcibios_fixup_bus(struct pci_bus *b) void __devinit pcibios_fixup_bus(struct pci_bus *b)
{ {
struct pci_dev *dev;
pci_read_bridge_bases(b); pci_read_bridge_bases(b);
list_for_each_entry(dev, &b->devices, bus_list)
pcibios_fixup_device_resources(dev);
} }
/* /*
@ -481,12 +501,18 @@ char * __devinit pcibios_setup(char *str)
else if (!strcmp(str, "rom")) { else if (!strcmp(str, "rom")) {
pci_probe |= PCI_ASSIGN_ROMS; pci_probe |= PCI_ASSIGN_ROMS;
return NULL; return NULL;
} else if (!strcmp(str, "norom")) {
pci_probe |= PCI_NOASSIGN_ROMS;
return NULL;
} else if (!strcmp(str, "assign-busses")) { } else if (!strcmp(str, "assign-busses")) {
pci_probe |= PCI_ASSIGN_ALL_BUSSES; pci_probe |= PCI_ASSIGN_ALL_BUSSES;
return NULL; return NULL;
} else if (!strcmp(str, "use_crs")) { } else if (!strcmp(str, "use_crs")) {
pci_probe |= PCI_USE__CRS; pci_probe |= PCI_USE__CRS;
return NULL; return NULL;
} else if (!strcmp(str, "earlydump")) {
pci_early_dump_regs = 1;
return NULL;
} else if (!strcmp(str, "routeirq")) { } else if (!strcmp(str, "routeirq")) {
pci_routeirq = 1; pci_routeirq = 1;
return NULL; return NULL;

View file

@ -49,7 +49,14 @@ void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
{ {
PDprintk("%x writing to %x: %x\n", slot, offset, val); PDprintk("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outb(val, 0xcfc); outb(val, 0xcfc + (offset&3));
}
void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
{
PDprintk("%x writing to %x: %x\n", slot, offset, val);
outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
outw(val, 0xcfc + (offset&2));
} }
int early_pci_allowed(void) int early_pci_allowed(void)
@ -57,3 +64,54 @@ int early_pci_allowed(void)
return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) == return (pci_probe & (PCI_PROBE_CONF1|PCI_PROBE_NOEARLY)) ==
PCI_PROBE_CONF1; PCI_PROBE_CONF1;
} }
void early_dump_pci_device(u8 bus, u8 slot, u8 func)
{
int i;
int j;
u32 val;
printk("PCI: %02x:%02x:%02x", bus, slot, func);
for (i = 0; i < 256; i += 4) {
if (!(i & 0x0f))
printk("\n%04x:",i);
val = read_pci_config(bus, slot, func, i);
for (j = 0; j < 4; j++) {
printk(" %02x", val & 0xff);
val >>= 8;
}
}
printk("\n");
}
void early_dump_pci_devices(void)
{
unsigned bus, slot, func;
if (!early_pci_allowed())
return;
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
u32 class;
u8 type;
class = read_pci_config(bus, slot, func,
PCI_CLASS_REVISION);
if (class == 0xffffffff)
break;
early_dump_pci_device(bus, slot, func);
/* No multi-function device? */
type = read_pci_config_byte(bus, slot, func,
PCI_HEADER_TYPE);
if (!(type & 0x80))
break;
}
}
}
}

View file

@ -45,7 +45,8 @@ struct irq_router {
char *name; char *name;
u16 vendor, device; u16 vendor, device;
int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq); int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new); int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq,
int new);
}; };
struct irq_router_handler { struct irq_router_handler {
@ -77,7 +78,8 @@ static inline struct irq_routing_table *pirq_check_routing_table(u8 *addr)
for (i = 0; i < rt->size; i++) for (i = 0; i < rt->size; i++)
sum += addr[i]; sum += addr[i];
if (!sum) { if (!sum) {
DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt); DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n",
rt);
return rt; return rt;
} }
return NULL; return NULL;
@ -183,7 +185,8 @@ static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset,
return (nr & 1) ? (x >> 4) : (x & 0xf); return (nr & 1) ? (x >> 4) : (x & 0xf);
} }
static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val) static void write_config_nybble(struct pci_dev *router, unsigned offset,
unsigned nr, unsigned int val)
{ {
u8 x; u8 x;
unsigned reg = offset + (nr >> 1); unsigned reg = offset + (nr >> 1);
@ -467,7 +470,8 @@ static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int
return inb(0xc01) & 0xf; return inb(0xc01) & 0xf;
} }
static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq) static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev,
int pirq, int irq)
{ {
outb(pirq, 0xc00); outb(pirq, 0xc00);
outb(irq, 0xc01); outb(irq, 0xc01);
@ -660,7 +664,8 @@ static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router
} }
static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device) static __init int serverworks_router_probe(struct irq_router *r,
struct pci_dev *router, u16 device)
{ {
switch (device) { switch (device) {
case PCI_DEVICE_ID_SERVERWORKS_OSB4: case PCI_DEVICE_ID_SERVERWORKS_OSB4:
@ -827,10 +832,12 @@ static void __init pirq_find_router(struct irq_router *r)
for (h = pirq_routers; h->vendor; h++) { for (h = pirq_routers; h->vendor; h++) {
/* First look for a router match */ /* First look for a router match */
if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device)) if (rt->rtr_vendor == h->vendor &&
h->probe(r, pirq_router_dev, rt->rtr_device))
break; break;
/* Fall back to a device match */ /* Fall back to a device match */
if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device)) if (pirq_router_dev->vendor == h->vendor &&
h->probe(r, pirq_router_dev, pirq_router_dev->device))
break; break;
} }
printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n", printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
@ -845,11 +852,13 @@ static void __init pirq_find_router(struct irq_router *r)
static struct irq_info *pirq_get_info(struct pci_dev *dev) static struct irq_info *pirq_get_info(struct pci_dev *dev)
{ {
struct irq_routing_table *rt = pirq_table; struct irq_routing_table *rt = pirq_table;
int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); int entries = (rt->size - sizeof(struct irq_routing_table)) /
sizeof(struct irq_info);
struct irq_info *info; struct irq_info *info;
for (info = rt->slots; entries--; info++) for (info = rt->slots; entries--; info++)
if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn)) if (info->bus == dev->bus->number &&
PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
return info; return info;
return NULL; return NULL;
} }
@ -890,7 +899,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
DBG(" -> not routed\n" KERN_DEBUG); DBG(" -> not routed\n" KERN_DEBUG);
return 0; return 0;
} }
DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs); DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask,
pirq_table->exclusive_irqs);
mask &= pcibios_irq_mask; mask &= pcibios_irq_mask;
/* Work around broken HP Pavilion Notebooks which assign USB to /* Work around broken HP Pavilion Notebooks which assign USB to
@ -903,7 +913,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
} }
/* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) { if (acer_tm360_irqrouting && dev->irq == 11 &&
dev->vendor == PCI_VENDOR_ID_O2) {
pirq = 0x68; pirq = 0x68;
mask = 0x400; mask = 0x400;
dev->irq = r->get(pirq_router_dev, dev, pirq); dev->irq = r->get(pirq_router_dev, dev, pirq);
@ -920,15 +931,16 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
newirq = 0; newirq = 0;
else else
printk("\n" KERN_WARNING printk("\n" KERN_WARNING
"PCI: IRQ %i for device %s doesn't match PIRQ mask " "PCI: IRQ %i for device %s doesn't match PIRQ mask - try pci=usepirqmask\n"
"- try pci=usepirqmask\n" KERN_DEBUG, newirq, KERN_DEBUG, newirq,
pci_name(dev)); pci_name(dev));
} }
if (!newirq && assign) { if (!newirq && assign) {
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
if (!(mask & (1 << i))) if (!(mask & (1 << i)))
continue; continue;
if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED)) if (pirq_penalty[i] < pirq_penalty[newirq] &&
can_request_irq(i, IRQF_SHARED))
newirq = i; newirq = i;
} }
} }
@ -944,7 +956,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
DBG(" -> got IRQ %d\n", irq); DBG(" -> got IRQ %d\n", irq);
msg = "Found"; msg = "Found";
eisa_set_level_irq(irq); eisa_set_level_irq(irq);
} else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) { } else if (newirq && r->set &&
(dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
DBG(" -> assigning IRQ %d", newirq); DBG(" -> assigning IRQ %d", newirq);
if (r->set(pirq_router_dev, dev, pirq, newirq)) { if (r->set(pirq_router_dev, dev, pirq, newirq)) {
eisa_set_level_irq(newirq); eisa_set_level_irq(newirq);
@ -962,7 +975,8 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
} else } else
return 0; return 0;
} }
printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev)); printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq,
pci_name(dev));
/* Update IRQ for all devices with the same pirq value */ /* Update IRQ for all devices with the same pirq value */
while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) { while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
@ -974,7 +988,10 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
if (!info) if (!info)
continue; continue;
if (info->irq[pin].link == pirq) { if (info->irq[pin].link == pirq) {
/* We refuse to override the dev->irq information. Give a warning! */ /*
* We refuse to override the dev->irq
* information. Give a warning!
*/
if (dev2->irq && dev2->irq != irq && \ if (dev2->irq && dev2->irq != irq && \
(!(pci_probe & PCI_USE_PIRQ_MASK) || \ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
((1 << dev2->irq) & mask))) { ((1 << dev2->irq) & mask))) {
@ -987,7 +1004,9 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
dev2->irq = irq; dev2->irq = irq;
pirq_penalty[irq]++; pirq_penalty[irq]++;
if (dev != dev2) if (dev != dev2)
printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2)); printk(KERN_INFO
"PCI: Sharing IRQ %d with %s\n",
irq, pci_name(dev2));
} }
} }
return 1; return 1;
@ -1001,15 +1020,21 @@ static void __init pcibios_fixup_irqs(void)
DBG(KERN_DEBUG "PCI: IRQ fixup\n"); DBG(KERN_DEBUG "PCI: IRQ fixup\n");
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
/* /*
* If the BIOS has set an out of range IRQ number, just ignore it. * If the BIOS has set an out of range IRQ number, just
* Also keep track of which IRQ's are already in use. * ignore it. Also keep track of which IRQ's are
* already in use.
*/ */
if (dev->irq >= 16) { if (dev->irq >= 16) {
DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq); DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n",
pci_name(dev), dev->irq);
dev->irq = 0; dev->irq = 0;
} }
/* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */ /*
if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000) * If the IRQ is already assigned to a PCI device,
* ignore its ISA use penalty
*/
if (pirq_penalty[dev->irq] >= 100 &&
pirq_penalty[dev->irq] < 100000)
pirq_penalty[dev->irq] = 0; pirq_penalty[dev->irq] = 0;
pirq_penalty[dev->irq]++; pirq_penalty[dev->irq]++;
} }
@ -1025,8 +1050,13 @@ static void __init pcibios_fixup_irqs(void)
int irq; int irq;
if (pin) { if (pin) {
pin--; /* interrupt pins are numbered starting from 1 */ /*
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin); * interrupt pins are numbered starting
* from 1
*/
pin--;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
PCI_SLOT(dev->devfn), pin);
/* /*
* Busses behind bridges are typically not listed in the MP-table. * Busses behind bridges are typically not listed in the MP-table.
* In this case we have to look up the IRQ based on the parent bus, * In this case we have to look up the IRQ based on the parent bus,
@ -1067,7 +1097,8 @@ static int __init fix_broken_hp_bios_irq9(const struct dmi_system_id *d)
{ {
if (!broken_hp_bios_irq9) { if (!broken_hp_bios_irq9) {
broken_hp_bios_irq9 = 1; broken_hp_bios_irq9 = 1;
printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
d->ident);
} }
return 0; return 0;
} }
@ -1080,7 +1111,8 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
{ {
if (!acer_tm360_irqrouting) { if (!acer_tm360_irqrouting) {
acer_tm360_irqrouting = 1; acer_tm360_irqrouting = 1;
printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident); printk(KERN_INFO "%s detected - fixing broken IRQ routing\n",
d->ident);
} }
return 0; return 0;
} }
@ -1092,7 +1124,8 @@ static struct dmi_system_id __initdata pciirq_dmi_table[] = {
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"), DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"), DMI_MATCH(DMI_PRODUCT_VERSION,
"HP Pavilion Notebook Model GE"),
DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
}, },
}, },
@ -1131,7 +1164,10 @@ int __init pcibios_irq_init(void)
if (!(pirq_table->exclusive_irqs & (1 << i))) if (!(pirq_table->exclusive_irqs & (1 << i)))
pirq_penalty[i] += 100; pirq_penalty[i] += 100;
} }
/* If we're using the I/O APIC, avoid using the PCI IRQ routing table */ /*
* If we're using the I/O APIC, avoid using the PCI IRQ
* routing table
*/
if (io_apic_assign_pci_irqs) if (io_apic_assign_pci_irqs)
pirq_table = NULL; pirq_table = NULL;
} }
@ -1175,7 +1211,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
char *msg = ""; char *msg = "";
pin--; /* interrupt pins are numbered starting from 1 */ pin--; /* interrupt pins are numbered starting from 1 */
if (io_apic_assign_pci_irqs) { if (io_apic_assign_pci_irqs) {
int irq; int irq;
@ -1195,13 +1231,16 @@ static int pirq_enable_irq(struct pci_dev *dev)
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number, irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
PCI_SLOT(bridge->devfn), pin); PCI_SLOT(bridge->devfn), pin);
if (irq >= 0) if (irq >= 0)
printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n", printk(KERN_WARNING
pci_name(bridge), 'A' + pin, irq); "PCI: using PPB %s[%c] to get irq %d\n",
pci_name(bridge),
'A' + pin, irq);
dev = bridge; dev = bridge;
} }
dev = temp_dev; dev = temp_dev;
if (irq >= 0) { if (irq >= 0) {
printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n", printk(KERN_INFO
"PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
pci_name(dev), 'A' + pin, irq); pci_name(dev), 'A' + pin, irq);
dev->irq = irq; dev->irq = irq;
return 0; return 0;
@ -1212,12 +1251,17 @@ static int pirq_enable_irq(struct pci_dev *dev)
else else
msg = " Please try using pci=biosirq."; msg = " Please try using pci=biosirq.";
/* With IDE legacy devices the IRQ lookup failure is not a problem.. */ /*
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5)) * With IDE legacy devices the IRQ lookup failure is not
* a problem..
*/
if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE &&
!(dev->class & 0x5))
return 0; return 0;
printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n", printk(KERN_WARNING
'A' + pin, pci_name(dev), msg); "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
'A' + pin, pci_name(dev), msg);
} }
return 0; return 0;
} }

View file

@ -28,6 +28,7 @@
#define PCI_USE__CRS 0x10000 #define PCI_USE__CRS 0x10000
#define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000 #define PCI_CHECK_ENABLE_AMD_MMCONF 0x20000
#define PCI_HAS_IO_ECS 0x40000 #define PCI_HAS_IO_ECS 0x40000
#define PCI_NOASSIGN_ROMS 0x80000
extern unsigned int pci_probe; extern unsigned int pci_probe;
extern unsigned long pirq_table_addr; extern unsigned long pirq_table_addr;

View file

@ -336,6 +336,15 @@ config ACPI_EC
the battery and thermal drivers. If you are compiling for a the battery and thermal drivers. If you are compiling for a
mobile system, say Y. mobile system, say Y.
config ACPI_PCI_SLOT
tristate "PCI slot detection driver"
default n
help
This driver will attempt to discover all PCI slots in your system,
and creates entries in /sys/bus/pci/slots/. This feature can
help you correlate PCI bus addresses with the physical geography
of your slots. If you are unsure, say N.
config ACPI_POWER config ACPI_POWER
bool bool
default y default y

View file

@ -48,6 +48,7 @@ obj-$(CONFIG_ACPI_DOCK) += dock.o
obj-$(CONFIG_ACPI_BAY) += bay.o obj-$(CONFIG_ACPI_BAY) += bay.o
obj-$(CONFIG_ACPI_VIDEO) += video.o obj-$(CONFIG_ACPI_VIDEO) += video.o
obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o obj-y += pci_root.o pci_link.o pci_irq.o pci_bind.o
obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o
obj-$(CONFIG_ACPI_POWER) += power.o obj-$(CONFIG_ACPI_POWER) += power.o
obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-$(CONFIG_ACPI_PROCESSOR) += processor.o
obj-$(CONFIG_ACPI_CONTAINER) += container.o obj-$(CONFIG_ACPI_CONTAINER) += container.o

View file

@ -295,6 +295,28 @@ int acpi_bus_set_power(acpi_handle handle, int state)
EXPORT_SYMBOL(acpi_bus_set_power); EXPORT_SYMBOL(acpi_bus_set_power);
bool acpi_bus_power_manageable(acpi_handle handle)
{
struct acpi_device *device;
int result;
result = acpi_bus_get_device(handle, &device);
return result ? false : device->flags.power_manageable;
}
EXPORT_SYMBOL(acpi_bus_power_manageable);
bool acpi_bus_can_wakeup(acpi_handle handle)
{
struct acpi_device *device;
int result;
result = acpi_bus_get_device(handle, &device);
return result ? false : device->wakeup.flags.valid;
}
EXPORT_SYMBOL(acpi_bus_can_wakeup);
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Event Management Event Management
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */

View file

@ -166,6 +166,8 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
"firmware_node"); "firmware_node");
ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
"physical_node"); "physical_node");
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
} }
return 0; return 0;

368
drivers/acpi/pci_slot.c Normal file
View file

@ -0,0 +1,368 @@
/*
* pci_slot.c - ACPI PCI Slot Driver
*
* The code here is heavily leveraged from the acpiphp module.
* Thanks to Matthew Wilcox <matthew@wil.cx> for much guidance.
* Thanks to Kenji Kaneshige <kaneshige.kenji@jp.fujitsu.com> for code
* review and fixes.
*
* Copyright (C) 2007 Alex Chiang <achiang@hp.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
static int debug;
static int check_sta_before_sun;
#define DRIVER_VERSION "0.1"
#define DRIVER_AUTHOR "Alex Chiang <achiang@hp.com>"
#define DRIVER_DESC "ACPI PCI Slot Detection Driver"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
module_param(debug, bool, 0644);
#define _COMPONENT ACPI_PCI_COMPONENT
ACPI_MODULE_NAME("pci_slot");
#define MY_NAME "pci_slot"
#define err(format, arg...) printk(KERN_ERR "%s: " format , MY_NAME , ## arg)
#define info(format, arg...) printk(KERN_INFO "%s: " format , MY_NAME , ## arg)
#define dbg(format, arg...) \
do { \
if (debug) \
printk(KERN_DEBUG "%s: " format, \
MY_NAME , ## arg); \
} while (0)
#define SLOT_NAME_SIZE 20 /* Inspired by #define in acpiphp.h */
struct acpi_pci_slot {
acpi_handle root_handle; /* handle of the root bridge */
struct pci_slot *pci_slot; /* corresponding pci_slot */
struct list_head list; /* node in the list of slots */
};
static int acpi_pci_slot_add(acpi_handle handle);
static void acpi_pci_slot_remove(acpi_handle handle);
static LIST_HEAD(slot_list);
static DEFINE_MUTEX(slot_list_lock);
static struct acpi_pci_driver acpi_pci_slot_driver = {
.add = acpi_pci_slot_add,
.remove = acpi_pci_slot_remove,
};
static int
check_slot(acpi_handle handle, int *device, unsigned long *sun)
{
int retval = 0;
unsigned long adr, sta;
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
dbg("Checking slot on path: %s\n", (char *)buffer.pointer);
if (check_sta_before_sun) {
/* If SxFy doesn't have _STA, we just assume it's there */
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && !(sta & ACPI_STA_DEVICE_PRESENT)) {
retval = -1;
goto out;
}
}
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
dbg("_ADR returned %d on %s\n", status, (char *)buffer.pointer);
retval = -1;
goto out;
}
*device = (adr >> 16) & 0xffff;
/* No _SUN == not a slot == bail */
status = acpi_evaluate_integer(handle, "_SUN", NULL, sun);
if (ACPI_FAILURE(status)) {
dbg("_SUN returned %d on %s\n", status, (char *)buffer.pointer);
retval = -1;
goto out;
}
out:
kfree(buffer.pointer);
return retval;
}
struct callback_args {
acpi_walk_callback user_function; /* only for walk_p2p_bridge */
struct pci_bus *pci_bus;
acpi_handle root_handle;
};
/*
* register_slot
*
* Called once for each SxFy object in the namespace. Don't worry about
* calling pci_create_slot multiple times for the same pci_bus:device,
* since each subsequent call simply bumps the refcount on the pci_slot.
*
* The number of calls to pci_destroy_slot from unregister_slot is
* symmetrical.
*/
static acpi_status
register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int device;
unsigned long sun;
char name[SLOT_NAME_SIZE];
struct acpi_pci_slot *slot;
struct pci_slot *pci_slot;
struct callback_args *parent_context = context;
struct pci_bus *pci_bus = parent_context->pci_bus;
if (check_slot(handle, &device, &sun))
return AE_OK;
slot = kmalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
err("%s: cannot allocate memory\n", __func__);
return AE_OK;
}
snprintf(name, sizeof(name), "%u", (u32)sun);
pci_slot = pci_create_slot(pci_bus, device, name);
if (IS_ERR(pci_slot)) {
err("pci_create_slot returned %ld\n", PTR_ERR(pci_slot));
kfree(slot);
}
slot->root_handle = parent_context->root_handle;
slot->pci_slot = pci_slot;
INIT_LIST_HEAD(&slot->list);
mutex_lock(&slot_list_lock);
list_add(&slot->list, &slot_list);
mutex_unlock(&slot_list_lock);
dbg("pci_slot: %p, pci_bus: %x, device: %d, name: %s\n",
pci_slot, pci_bus->number, device, name);
return AE_OK;
}
/*
* walk_p2p_bridge - discover and walk p2p bridges
* @handle: points to an acpi_pci_root
* @context: p2p_bridge_context pointer
*
* Note that when we call ourselves recursively, we pass a different
* value of pci_bus in the child_context.
*/
static acpi_status
walk_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
{
int device, function;
unsigned long adr;
acpi_status status;
acpi_handle dummy_handle;
acpi_walk_callback user_function;
struct pci_dev *dev;
struct pci_bus *pci_bus;
struct callback_args child_context;
struct callback_args *parent_context = context;
pci_bus = parent_context->pci_bus;
user_function = parent_context->user_function;
status = acpi_get_handle(handle, "_ADR", &dummy_handle);
if (ACPI_FAILURE(status))
return AE_OK;
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status))
return AE_OK;
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
if (!dev || !dev->subordinate)
goto out;
child_context.pci_bus = dev->subordinate;
child_context.user_function = user_function;
child_context.root_handle = parent_context->root_handle;
dbg("p2p bridge walk, pci_bus = %x\n", dev->subordinate->number);
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
user_function, &child_context, NULL);
if (ACPI_FAILURE(status))
goto out;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
walk_p2p_bridge, &child_context, NULL);
out:
pci_dev_put(dev);
return AE_OK;
}
/*
* walk_root_bridge - generic root bridge walker
* @handle: points to an acpi_pci_root
* @user_function: user callback for slot objects
*
* Call user_function for all objects underneath this root bridge.
* Walk p2p bridges underneath us and call user_function on those too.
*/
static int
walk_root_bridge(acpi_handle handle, acpi_walk_callback user_function)
{
int seg, bus;
unsigned long tmp;
acpi_status status;
acpi_handle dummy_handle;
struct pci_bus *pci_bus;
struct callback_args context;
/* If the bridge doesn't have _STA, we assume it is always there */
status = acpi_get_handle(handle, "_STA", &dummy_handle);
if (ACPI_SUCCESS(status)) {
status = acpi_evaluate_integer(handle, "_STA", NULL, &tmp);
if (ACPI_FAILURE(status)) {
info("%s: _STA evaluation failure\n", __func__);
return 0;
}
if ((tmp & ACPI_STA_DEVICE_FUNCTIONING) == 0)
/* don't register this object */
return 0;
}
status = acpi_evaluate_integer(handle, "_SEG", NULL, &tmp);
seg = ACPI_SUCCESS(status) ? tmp : 0;
status = acpi_evaluate_integer(handle, "_BBN", NULL, &tmp);
bus = ACPI_SUCCESS(status) ? tmp : 0;
pci_bus = pci_find_bus(seg, bus);
if (!pci_bus)
return 0;
context.pci_bus = pci_bus;
context.user_function = user_function;
context.root_handle = handle;
dbg("root bridge walk, pci_bus = %x\n", pci_bus->number);
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
user_function, &context, NULL);
if (ACPI_FAILURE(status))
return status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
walk_p2p_bridge, &context, NULL);
if (ACPI_FAILURE(status))
err("%s: walk_p2p_bridge failure - %d\n", __func__, status);
return status;
}
/*
* acpi_pci_slot_add
* @handle: points to an acpi_pci_root
*/
static int
acpi_pci_slot_add(acpi_handle handle)
{
acpi_status status;
status = walk_root_bridge(handle, register_slot);
if (ACPI_FAILURE(status))
err("%s: register_slot failure - %d\n", __func__, status);
return status;
}
/*
* acpi_pci_slot_remove
* @handle: points to an acpi_pci_root
*/
static void
acpi_pci_slot_remove(acpi_handle handle)
{
struct acpi_pci_slot *slot, *tmp;
mutex_lock(&slot_list_lock);
list_for_each_entry_safe(slot, tmp, &slot_list, list) {
if (slot->root_handle == handle) {
list_del(&slot->list);
pci_destroy_slot(slot->pci_slot);
kfree(slot);
}
}
mutex_unlock(&slot_list_lock);
}
static int do_sta_before_sun(const struct dmi_system_id *d)
{
info("%s detected: will evaluate _STA before calling _SUN\n", d->ident);
check_sta_before_sun = 1;
return 0;
}
static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
/*
* Fujitsu Primequest machines will return 1023 to indicate an
* error if the _SUN method is evaluated on SxFy objects that
* are not present (as indicated by _STA), so for those machines,
* we want to check _STA before evaluating _SUN.
*/
{
.callback = do_sta_before_sun,
.ident = "Fujitsu PRIMEQUEST",
.matches = {
DMI_MATCH(DMI_BIOS_VENDOR, "FUJITSU LIMITED"),
DMI_MATCH(DMI_BIOS_VERSION, "PRIMEQUEST"),
},
},
{}
};
static int __init
acpi_pci_slot_init(void)
{
dmi_check_system(acpi_pci_slot_dmi_table);
acpi_pci_register_driver(&acpi_pci_slot_driver);
return 0;
}
static void __exit
acpi_pci_slot_exit(void)
{
acpi_pci_unregister_driver(&acpi_pci_slot_driver);
}
module_init(acpi_pci_slot_init);
module_exit(acpi_pci_slot_exit);

View file

@ -292,69 +292,135 @@ static int acpi_power_off_device(acpi_handle handle, struct acpi_device *dev)
return 0; return 0;
} }
/**
* acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in
* ACPI 3.0) _PSW (Power State Wake)
* @dev: Device to handle.
* @enable: 0 - disable, 1 - enable the wake capabilities of the device.
* @sleep_state: Target sleep state of the system.
* @dev_state: Target power state of the device.
*
* Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present. On failure reset the device's
* wakeup.flags.valid flag.
*
* RETURN VALUE:
* 0 if either _DSW or _PSW has been successfully executed
* 0 if neither _DSW nor _PSW has been found
* -ENODEV if the execution of either _DSW or _PSW has failed
*/
int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state)
{
union acpi_object in_arg[3];
struct acpi_object_list arg_list = { 3, in_arg };
acpi_status status = AE_OK;
/*
* Try to execute _DSW first.
*
* Three agruments are needed for the _DSW object:
* Argument 0: enable/disable the wake capabilities
* Argument 1: target system state
* Argument 2: target device state
* When _DSW object is called to disable the wake capabilities, maybe
* the first argument is filled. The values of the other two agruments
* are meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
in_arg[0].integer.value = enable;
in_arg[1].type = ACPI_TYPE_INTEGER;
in_arg[1].integer.value = sleep_state;
in_arg[2].type = ACPI_TYPE_INTEGER;
in_arg[2].integer.value = dev_state;
status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL);
if (ACPI_SUCCESS(status)) {
return 0;
} else if (status != AE_NOT_FOUND) {
printk(KERN_ERR PREFIX "_DSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
/* Execute _PSW */
arg_list.count = 1;
in_arg[0].integer.value = enable;
status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
printk(KERN_ERR PREFIX "_PSW execution failed\n");
dev->wakeup.flags.valid = 0;
return -ENODEV;
}
return 0;
}
/* /*
* Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229):
* 1. Power on the power resources required for the wakeup device * 1. Power on the power resources required for the wakeup device
* 2. Enable _PSW (power state wake) for the device if present * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
*/ */
int acpi_enable_wakeup_device_power(struct acpi_device *dev) int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{ {
union acpi_object arg = { ACPI_TYPE_INTEGER }; int i, err;
struct acpi_object_list arg_list = { 1, &arg };
acpi_status status = AE_OK;
int i;
int ret = 0;
if (!dev || !dev->wakeup.flags.valid) if (!dev || !dev->wakeup.flags.valid)
return -1; return -EINVAL;
/*
* Do not execute the code below twice in a row without calling
* acpi_disable_wakeup_device_power() in between for the same device
*/
if (dev->wakeup.flags.prepared)
return 0;
arg.integer.value = 1;
/* Open power resource */ /* Open power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) { for (i = 0; i < dev->wakeup.resources.count; i++) {
ret = acpi_power_on(dev->wakeup.resources.handles[i], dev); int ret = acpi_power_on(dev->wakeup.resources.handles[i], dev);
if (ret) { if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n"); printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0; dev->wakeup.flags.valid = 0;
return -1; return -ENODEV;
} }
} }
/* Execute PSW */ /*
status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); * Passing 3 as the third argument below means the device may be placed
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { * in arbitrary power state afterwards.
printk(KERN_ERR PREFIX "Evaluate _PSW\n"); */
dev->wakeup.flags.valid = 0; err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
ret = -1; if (!err)
} dev->wakeup.flags.prepared = 1;
return ret; return err;
} }
/* /*
* Shutdown a wakeup device, counterpart of above method * Shutdown a wakeup device, counterpart of above method
* 1. Disable _PSW (power state wake) * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power
* State Wake) for the device, if present
* 2. Shutdown down the power resources * 2. Shutdown down the power resources
*/ */
int acpi_disable_wakeup_device_power(struct acpi_device *dev) int acpi_disable_wakeup_device_power(struct acpi_device *dev)
{ {
union acpi_object arg = { ACPI_TYPE_INTEGER }; int i, ret;
struct acpi_object_list arg_list = { 1, &arg };
acpi_status status = AE_OK;
int i;
int ret = 0;
if (!dev || !dev->wakeup.flags.valid) if (!dev || !dev->wakeup.flags.valid)
return -1; return -EINVAL;
arg.integer.value = 0; /*
/* Execute PSW */ * Do not execute the code below twice in a row without calling
status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL); * acpi_enable_wakeup_device_power() in between for the same device
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { */
printk(KERN_ERR PREFIX "Evaluate _PSW\n"); if (!dev->wakeup.flags.prepared)
dev->wakeup.flags.valid = 0; return 0;
return -1;
} dev->wakeup.flags.prepared = 0;
ret = acpi_device_sleep_wake(dev, 0, 0, 0);
if (ret)
return ret;
/* Close power resource */ /* Close power resource */
for (i = 0; i < dev->wakeup.resources.count; i++) { for (i = 0; i < dev->wakeup.resources.count; i++) {
@ -362,7 +428,7 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
if (ret) { if (ret) {
printk(KERN_ERR PREFIX "Transition power state\n"); printk(KERN_ERR PREFIX "Transition power state\n");
dev->wakeup.flags.valid = 0; dev->wakeup.flags.valid = 0;
return -1; return -ENODEV;
} }
} }

View file

@ -703,9 +703,7 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
acpi_status status = 0; acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL; union acpi_object *package = NULL;
union acpi_object in_arg[3]; int psw_error;
struct acpi_object_list arg_list = { 3, in_arg };
acpi_status psw_status = AE_OK;
struct acpi_device_id button_device_ids[] = { struct acpi_device_id button_device_ids[] = {
{"PNP0C0D", 0}, {"PNP0C0D", 0},
@ -737,39 +735,11 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
* So it is necessary to call _DSW object first. Only when it is not * So it is necessary to call _DSW object first. Only when it is not
* present will the _PSW object used. * present will the _PSW object used.
*/ */
/* psw_error = acpi_device_sleep_wake(device, 0, 0, 0);
* Three agruments are needed for the _DSW object. if (psw_error)
* Argument 0: enable/disable the wake capabilities ACPI_DEBUG_PRINT((ACPI_DB_INFO,
* When _DSW object is called to disable the wake capabilities, maybe "error in _DSW or _PSW evaluation\n"));
* the first argument is filled. The value of the other two agruments
* is meaningless.
*/
in_arg[0].type = ACPI_TYPE_INTEGER;
in_arg[0].integer.value = 0;
in_arg[1].type = ACPI_TYPE_INTEGER;
in_arg[1].integer.value = 0;
in_arg[2].type = ACPI_TYPE_INTEGER;
in_arg[2].integer.value = 0;
psw_status = acpi_evaluate_object(device->handle, "_DSW",
&arg_list, NULL);
if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n"));
/*
* When the _DSW object is not present, OSPM will call _PSW object.
*/
if (psw_status == AE_NOT_FOUND) {
/*
* Only one agruments is required for the _PSW object.
* agrument 0: enable/disable the wake capabilities
*/
arg_list.count = 1;
in_arg[0].integer.value = 0;
psw_status = acpi_evaluate_object(device->handle, "_PSW",
&arg_list, NULL);
if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in "
"evaluate _PSW\n"));
}
/* Power button, Lid switch always enable wakeup */ /* Power button, Lid switch always enable wakeup */
if (!acpi_match_device_ids(device, button_device_ids)) if (!acpi_match_device_ids(device, button_device_ids))
device->wakeup.flags.run_wake = 1; device->wakeup.flags.run_wake = 1;

View file

@ -24,10 +24,6 @@
u8 sleep_states[ACPI_S_STATE_COUNT]; u8 sleep_states[ACPI_S_STATE_COUNT];
#ifdef CONFIG_PM_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
#endif
static int acpi_sleep_prepare(u32 acpi_state) static int acpi_sleep_prepare(u32 acpi_state)
{ {
#ifdef CONFIG_ACPI_SLEEP #ifdef CONFIG_ACPI_SLEEP
@ -49,9 +45,96 @@ static int acpi_sleep_prepare(u32 acpi_state)
return 0; return 0;
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_PM_SLEEP
static struct platform_suspend_ops acpi_suspend_ops; static u32 acpi_target_sleep_state = ACPI_STATE_S0;
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
* kernel command line option that causes the following variable to be set.
*/
static bool old_suspend_ordering;
void __init acpi_old_suspend_ordering(void)
{
old_suspend_ordering = true;
}
/**
* acpi_pm_disable_gpes - Disable the GPEs.
*/
static int acpi_pm_disable_gpes(void)
{
acpi_hw_disable_all_gpes();
return 0;
}
/**
* __acpi_pm_prepare - Prepare the platform to enter the target state.
*
* If necessary, set the firmware waking vector and do arch-specific
* nastiness to get the wakeup code to the waking vector.
*/
static int __acpi_pm_prepare(void)
{
int error = acpi_sleep_prepare(acpi_target_sleep_state);
if (error)
acpi_target_sleep_state = ACPI_STATE_S0;
return error;
}
/**
* acpi_pm_prepare - Prepare the platform to enter the target sleep
* state and disable the GPEs.
*/
static int acpi_pm_prepare(void)
{
int error = __acpi_pm_prepare();
if (!error)
acpi_hw_disable_all_gpes();
return error;
}
/**
* acpi_pm_finish - Instruct the platform to leave a sleep state.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
static void acpi_pm_finish(void)
{
u32 acpi_state = acpi_target_sleep_state;
if (acpi_state == ACPI_STATE_S0)
return;
printk(KERN_INFO PREFIX "Waking up from system sleep state S%d\n",
acpi_state);
acpi_disable_wakeup_device(acpi_state);
acpi_leave_sleep_state(acpi_state);
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
}
/**
* acpi_pm_end - Finish up suspend sequence.
*/
static void acpi_pm_end(void)
{
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
*/
acpi_target_sleep_state = ACPI_STATE_S0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
extern void do_suspend_lowlevel(void); extern void do_suspend_lowlevel(void);
static u32 acpi_suspend_states[] = { static u32 acpi_suspend_states[] = {
@ -65,7 +148,6 @@ static u32 acpi_suspend_states[] = {
* acpi_suspend_begin - Set the target system sleep state to the state * acpi_suspend_begin - Set the target system sleep state to the state
* associated with given @pm_state, if supported. * associated with given @pm_state, if supported.
*/ */
static int acpi_suspend_begin(suspend_state_t pm_state) static int acpi_suspend_begin(suspend_state_t pm_state)
{ {
u32 acpi_state = acpi_suspend_states[pm_state]; u32 acpi_state = acpi_suspend_states[pm_state];
@ -81,25 +163,6 @@ static int acpi_suspend_begin(suspend_state_t pm_state)
return error; return error;
} }
/**
* acpi_suspend_prepare - Do preliminary suspend work.
*
* If necessary, set the firmware waking vector and do arch-specific
* nastiness to get the wakeup code to the waking vector.
*/
static int acpi_suspend_prepare(void)
{
int error = acpi_sleep_prepare(acpi_target_sleep_state);
if (error) {
acpi_target_sleep_state = ACPI_STATE_S0;
return error;
}
return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
}
/** /**
* acpi_suspend_enter - Actually enter a sleep state. * acpi_suspend_enter - Actually enter a sleep state.
* @pm_state: ignored * @pm_state: ignored
@ -108,7 +171,6 @@ static int acpi_suspend_prepare(void)
* assembly, which in turn call acpi_enter_sleep_state(). * assembly, which in turn call acpi_enter_sleep_state().
* It's unfortunate, but it works. Please fix if you're feeling frisky. * It's unfortunate, but it works. Please fix if you're feeling frisky.
*/ */
static int acpi_suspend_enter(suspend_state_t pm_state) static int acpi_suspend_enter(suspend_state_t pm_state)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
@ -165,39 +227,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
return ACPI_SUCCESS(status) ? 0 : -EFAULT; return ACPI_SUCCESS(status) ? 0 : -EFAULT;
} }
/**
* acpi_suspend_finish - Instruct the platform to leave a sleep state.
*
* This is called after we wake back up (or if entering the sleep state
* failed).
*/
static void acpi_suspend_finish(void)
{
u32 acpi_state = acpi_target_sleep_state;
acpi_disable_wakeup_device(acpi_state);
acpi_leave_sleep_state(acpi_state);
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
}
/**
* acpi_suspend_end - Finish up suspend sequence.
*/
static void acpi_suspend_end(void)
{
/*
* This is necessary in case acpi_suspend_finish() is not called during a
* failing transition to a sleep state.
*/
acpi_target_sleep_state = ACPI_STATE_S0;
}
static int acpi_suspend_state_valid(suspend_state_t pm_state) static int acpi_suspend_state_valid(suspend_state_t pm_state)
{ {
u32 acpi_state; u32 acpi_state;
@ -217,10 +246,39 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
static struct platform_suspend_ops acpi_suspend_ops = { static struct platform_suspend_ops acpi_suspend_ops = {
.valid = acpi_suspend_state_valid, .valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin, .begin = acpi_suspend_begin,
.prepare = acpi_suspend_prepare, .prepare = acpi_pm_prepare,
.enter = acpi_suspend_enter, .enter = acpi_suspend_enter,
.finish = acpi_suspend_finish, .finish = acpi_pm_finish,
.end = acpi_suspend_end, .end = acpi_pm_end,
};
/**
* acpi_suspend_begin_old - Set the target system sleep state to the
* state associated with given @pm_state, if supported, and
* execute the _PTS control method. This function is used if the
* pre-ACPI 2.0 suspend ordering has been requested.
*/
static int acpi_suspend_begin_old(suspend_state_t pm_state)
{
int error = acpi_suspend_begin(pm_state);
if (!error)
error = __acpi_pm_prepare();
return error;
}
/*
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
static struct platform_suspend_ops acpi_suspend_ops_old = {
.valid = acpi_suspend_state_valid,
.begin = acpi_suspend_begin_old,
.prepare = acpi_pm_disable_gpes,
.enter = acpi_suspend_enter,
.finish = acpi_pm_finish,
.end = acpi_pm_end,
.recover = acpi_pm_finish,
}; };
#endif /* CONFIG_SUSPEND */ #endif /* CONFIG_SUSPEND */
@ -228,22 +286,9 @@ static struct platform_suspend_ops acpi_suspend_ops = {
static int acpi_hibernation_begin(void) static int acpi_hibernation_begin(void)
{ {
acpi_target_sleep_state = ACPI_STATE_S4; acpi_target_sleep_state = ACPI_STATE_S4;
return 0; return 0;
} }
static int acpi_hibernation_prepare(void)
{
int error = acpi_sleep_prepare(ACPI_STATE_S4);
if (error) {
acpi_target_sleep_state = ACPI_STATE_S0;
return error;
}
return ACPI_SUCCESS(acpi_hw_disable_all_gpes()) ? 0 : -EFAULT;
}
static int acpi_hibernation_enter(void) static int acpi_hibernation_enter(void)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
@ -273,52 +318,55 @@ static void acpi_hibernation_leave(void)
acpi_leave_sleep_state_prep(ACPI_STATE_S4); acpi_leave_sleep_state_prep(ACPI_STATE_S4);
} }
static void acpi_hibernation_finish(void) static void acpi_pm_enable_gpes(void)
{
acpi_disable_wakeup_device(ACPI_STATE_S4);
acpi_leave_sleep_state(ACPI_STATE_S4);
/* reset firmware waking vector */
acpi_set_firmware_waking_vector((acpi_physical_address) 0);
acpi_target_sleep_state = ACPI_STATE_S0;
}
static void acpi_hibernation_end(void)
{
/*
* This is necessary in case acpi_hibernation_finish() is not called
* during a failing transition to the sleep state.
*/
acpi_target_sleep_state = ACPI_STATE_S0;
}
static int acpi_hibernation_pre_restore(void)
{
acpi_status status;
status = acpi_hw_disable_all_gpes();
return ACPI_SUCCESS(status) ? 0 : -EFAULT;
}
static void acpi_hibernation_restore_cleanup(void)
{ {
acpi_hw_enable_all_runtime_gpes(); acpi_hw_enable_all_runtime_gpes();
} }
static struct platform_hibernation_ops acpi_hibernation_ops = { static struct platform_hibernation_ops acpi_hibernation_ops = {
.begin = acpi_hibernation_begin, .begin = acpi_hibernation_begin,
.end = acpi_hibernation_end, .end = acpi_pm_end,
.pre_snapshot = acpi_hibernation_prepare, .pre_snapshot = acpi_pm_prepare,
.finish = acpi_hibernation_finish, .finish = acpi_pm_finish,
.prepare = acpi_hibernation_prepare, .prepare = acpi_pm_prepare,
.enter = acpi_hibernation_enter, .enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave, .leave = acpi_hibernation_leave,
.pre_restore = acpi_hibernation_pre_restore, .pre_restore = acpi_pm_disable_gpes,
.restore_cleanup = acpi_hibernation_restore_cleanup, .restore_cleanup = acpi_pm_enable_gpes,
}; };
#endif /* CONFIG_HIBERNATION */
/**
* acpi_hibernation_begin_old - Set the target system sleep state to
* ACPI_STATE_S4 and execute the _PTS control method. This
* function is used if the pre-ACPI 2.0 suspend ordering has been
* requested.
*/
static int acpi_hibernation_begin_old(void)
{
int error = acpi_sleep_prepare(ACPI_STATE_S4);
if (!error)
acpi_target_sleep_state = ACPI_STATE_S4;
return error;
}
/*
* The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
* been requested.
*/
static struct platform_hibernation_ops acpi_hibernation_ops_old = {
.begin = acpi_hibernation_begin_old,
.end = acpi_pm_end,
.pre_snapshot = acpi_pm_disable_gpes,
.finish = acpi_pm_finish,
.prepare = acpi_pm_disable_gpes,
.enter = acpi_hibernation_enter,
.leave = acpi_hibernation_leave,
.pre_restore = acpi_pm_disable_gpes,
.restore_cleanup = acpi_pm_enable_gpes,
.recover = acpi_pm_finish,
};
#endif /* CONFIG_HIBERNATION */
int acpi_suspend(u32 acpi_state) int acpi_suspend(u32 acpi_state)
{ {
@ -419,6 +467,31 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
*d_min_p = d_min; *d_min_p = d_min;
return d_max; return d_max;
} }
/**
* acpi_pm_device_sleep_wake - enable or disable the system wake-up
* capability of given device
* @dev: device to handle
* @enable: 'true' - enable, 'false' - disable the wake-up capability
*/
int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
acpi_handle handle;
struct acpi_device *adev;
if (!device_may_wakeup(dev))
return -EINVAL;
handle = DEVICE_ACPI_HANDLE(dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
printk(KERN_DEBUG "ACPI handle has no context!\n");
return -ENODEV;
}
return enable ?
acpi_enable_wakeup_device_power(adev, acpi_target_sleep_state) :
acpi_disable_wakeup_device_power(adev);
}
#endif #endif
static void acpi_power_off_prepare(void) static void acpi_power_off_prepare(void)
@ -460,13 +533,15 @@ int __init acpi_sleep_init(void)
} }
} }
suspend_set_ops(&acpi_suspend_ops); suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
#endif #endif
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION
status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
hibernation_set_ops(&acpi_hibernation_ops); hibernation_set_ops(old_suspend_ordering ?
&acpi_hibernation_ops_old : &acpi_hibernation_ops);
sleep_states[ACPI_STATE_S4] = 1; sleep_states[ACPI_STATE_S4] = 1;
printk(" S4"); printk(" S4");
} }

View file

@ -42,7 +42,7 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state)
continue; continue;
spin_unlock(&acpi_device_lock); spin_unlock(&acpi_device_lock);
acpi_enable_wakeup_device_power(dev); acpi_enable_wakeup_device_power(dev, sleep_state);
spin_lock(&acpi_device_lock); spin_lock(&acpi_device_lock);
} }
spin_unlock(&acpi_device_lock); spin_unlock(&acpi_device_lock);
@ -66,13 +66,15 @@ void acpi_enable_wakeup_device(u8 sleep_state)
list_for_each_safe(node, next, &acpi_wakeup_device_list) { list_for_each_safe(node, next, &acpi_wakeup_device_list) {
struct acpi_device *dev = struct acpi_device *dev =
container_of(node, struct acpi_device, wakeup_list); container_of(node, struct acpi_device, wakeup_list);
if (!dev->wakeup.flags.valid) if (!dev->wakeup.flags.valid)
continue; continue;
/* If users want to disable run-wake GPE, /* If users want to disable run-wake GPE,
* we only disable it for wake and leave it for runtime * we only disable it for wake and leave it for runtime
*/ */
if (!dev->wakeup.state.enabled || if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
sleep_state > (u32) dev->wakeup.sleep_state) { || sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) { if (dev->wakeup.flags.run_wake) {
spin_unlock(&acpi_device_lock); spin_unlock(&acpi_device_lock);
/* set_gpe_type will disable GPE, leave it like that */ /* set_gpe_type will disable GPE, leave it like that */
@ -110,8 +112,9 @@ void acpi_disable_wakeup_device(u8 sleep_state)
if (!dev->wakeup.flags.valid) if (!dev->wakeup.flags.valid)
continue; continue;
if (!dev->wakeup.state.enabled ||
sleep_state > (u32) dev->wakeup.sleep_state) { if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared)
|| sleep_state > (u32) dev->wakeup.sleep_state) {
if (dev->wakeup.flags.run_wake) { if (dev->wakeup.flags.run_wake) {
spin_unlock(&acpi_device_lock); spin_unlock(&acpi_device_lock);
acpi_set_gpe_type(dev->wakeup.gpe_device, acpi_set_gpe_type(dev->wakeup.gpe_device,

View file

@ -453,6 +453,8 @@ int platform_driver_register(struct platform_driver *drv)
drv->driver.suspend = platform_drv_suspend; drv->driver.suspend = platform_drv_suspend;
if (drv->resume) if (drv->resume)
drv->driver.resume = platform_drv_resume; drv->driver.resume = platform_drv_resume;
if (drv->pm)
drv->driver.pm = &drv->pm->base;
return driver_register(&drv->driver); return driver_register(&drv->driver);
} }
EXPORT_SYMBOL_GPL(platform_driver_register); EXPORT_SYMBOL_GPL(platform_driver_register);
@ -560,7 +562,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0); return (strncmp(pdev->name, drv->name, BUS_ID_SIZE) == 0);
} }
static int platform_suspend(struct device *dev, pm_message_t mesg) #ifdef CONFIG_PM_SLEEP
static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
{ {
int ret = 0; int ret = 0;
@ -570,7 +574,7 @@ static int platform_suspend(struct device *dev, pm_message_t mesg)
return ret; return ret;
} }
static int platform_suspend_late(struct device *dev, pm_message_t mesg) static int platform_legacy_suspend_late(struct device *dev, pm_message_t mesg)
{ {
struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_driver *drv = to_platform_driver(dev->driver);
struct platform_device *pdev; struct platform_device *pdev;
@ -583,7 +587,7 @@ static int platform_suspend_late(struct device *dev, pm_message_t mesg)
return ret; return ret;
} }
static int platform_resume_early(struct device *dev) static int platform_legacy_resume_early(struct device *dev)
{ {
struct platform_driver *drv = to_platform_driver(dev->driver); struct platform_driver *drv = to_platform_driver(dev->driver);
struct platform_device *pdev; struct platform_device *pdev;
@ -596,7 +600,7 @@ static int platform_resume_early(struct device *dev)
return ret; return ret;
} }
static int platform_resume(struct device *dev) static int platform_legacy_resume(struct device *dev)
{ {
int ret = 0; int ret = 0;
@ -606,15 +610,291 @@ static int platform_resume(struct device *dev)
return ret; return ret;
} }
static int platform_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm && drv->pm->prepare)
ret = drv->pm->prepare(dev);
return ret;
}
static void platform_pm_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
#ifdef CONFIG_SUSPEND
static int platform_pm_suspend(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm) {
if (drv->pm->suspend)
ret = drv->pm->suspend(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_SUSPEND);
}
return ret;
}
static int platform_pm_suspend_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->suspend_noirq)
ret = pdrv->pm->suspend_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_SUSPEND);
}
return ret;
}
static int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm) {
if (drv->pm->resume)
ret = drv->pm->resume(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
static int platform_pm_resume_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->resume_noirq)
ret = pdrv->pm->resume_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
return ret;
}
#else /* !CONFIG_SUSPEND */
#define platform_pm_suspend NULL
#define platform_pm_resume NULL
#define platform_pm_suspend_noirq NULL
#define platform_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
static int platform_pm_freeze(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (!drv)
return 0;
if (drv->pm) {
if (drv->pm->freeze)
ret = drv->pm->freeze(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_FREEZE);
}
return ret;
}
static int platform_pm_freeze_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->freeze_noirq)
ret = pdrv->pm->freeze_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_FREEZE);
}
return ret;
}
static int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm) {
if (drv->pm->thaw)
ret = drv->pm->thaw(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
static int platform_pm_thaw_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->thaw_noirq)
ret = pdrv->pm->thaw_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
return ret;
}
static int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm) {
if (drv->pm->poweroff)
ret = drv->pm->poweroff(dev);
} else {
ret = platform_legacy_suspend(dev, PMSG_HIBERNATE);
}
return ret;
}
static int platform_pm_poweroff_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->poweroff_noirq)
ret = pdrv->pm->poweroff_noirq(dev);
} else {
ret = platform_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
return ret;
}
static int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
int ret = 0;
if (drv && drv->pm) {
if (drv->pm->restore)
ret = drv->pm->restore(dev);
} else {
ret = platform_legacy_resume(dev);
}
return ret;
}
static int platform_pm_restore_noirq(struct device *dev)
{
struct platform_driver *pdrv;
int ret = 0;
if (!dev->driver)
return 0;
pdrv = to_platform_driver(dev->driver);
if (pdrv->pm) {
if (pdrv->pm->restore_noirq)
ret = pdrv->pm->restore_noirq(dev);
} else {
ret = platform_legacy_resume_early(dev);
}
return ret;
}
#else /* !CONFIG_HIBERNATION */
#define platform_pm_freeze NULL
#define platform_pm_thaw NULL
#define platform_pm_poweroff NULL
#define platform_pm_restore NULL
#define platform_pm_freeze_noirq NULL
#define platform_pm_thaw_noirq NULL
#define platform_pm_poweroff_noirq NULL
#define platform_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
struct pm_ext_ops platform_pm_ops = {
.base = {
.prepare = platform_pm_prepare,
.complete = platform_pm_complete,
.suspend = platform_pm_suspend,
.resume = platform_pm_resume,
.freeze = platform_pm_freeze,
.thaw = platform_pm_thaw,
.poweroff = platform_pm_poweroff,
.restore = platform_pm_restore,
},
.suspend_noirq = platform_pm_suspend_noirq,
.resume_noirq = platform_pm_resume_noirq,
.freeze_noirq = platform_pm_freeze_noirq,
.thaw_noirq = platform_pm_thaw_noirq,
.poweroff_noirq = platform_pm_poweroff_noirq,
.restore_noirq = platform_pm_restore_noirq,
};
#define PLATFORM_PM_OPS_PTR &platform_pm_ops
#else /* !CONFIG_PM_SLEEP */
#define PLATFORM_PM_OPS_PTR NULL
#endif /* !CONFIG_PM_SLEEP */
struct bus_type platform_bus_type = { struct bus_type platform_bus_type = {
.name = "platform", .name = "platform",
.dev_attrs = platform_dev_attrs, .dev_attrs = platform_dev_attrs,
.match = platform_match, .match = platform_match,
.uevent = platform_uevent, .uevent = platform_uevent,
.suspend = platform_suspend, .pm = PLATFORM_PM_OPS_PTR,
.suspend_late = platform_suspend_late,
.resume_early = platform_resume_early,
.resume = platform_resume,
}; };
EXPORT_SYMBOL_GPL(platform_bus_type); EXPORT_SYMBOL_GPL(platform_bus_type);

View file

@ -12,11 +12,9 @@
* and add it to the list of power-controlled devices. sysfs entries for * and add it to the list of power-controlled devices. sysfs entries for
* controlling device power management will also be added. * controlling device power management will also be added.
* *
* A different set of lists than the global subsystem list are used to * A separate list is used for keeping track of power info, because the power
* keep track of power info because we use different lists to hold * domain dependencies may differ from the ancestral dependencies that the
* devices based on what stage of the power management process they * subsystem list maintains.
* are in. The power domain dependencies may also differ from the
* ancestral dependencies that the subsystem list maintains.
*/ */
#include <linux/device.h> #include <linux/device.h>
@ -30,31 +28,40 @@
#include "power.h" #include "power.h"
/* /*
* The entries in the dpm_active list are in a depth first order, simply * The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and * because children are guaranteed to be discovered after parents, and
* are inserted at the back of the list on discovery. * are inserted at the back of the list on discovery.
* *
* All the other lists are kept in the same order, for consistency.
* However the lists aren't always traversed in the same order.
* Semaphores must be acquired from the top (i.e., front) down
* and released in the opposite order. Devices must be suspended
* from the bottom (i.e., end) up and resumed in the opposite order.
* That way no parent will be suspended while it still has an active
* child.
*
* Since device_pm_add() may be called with a device semaphore held, * Since device_pm_add() may be called with a device semaphore held,
* we must never try to acquire a device semaphore while holding * we must never try to acquire a device semaphore while holding
* dpm_list_mutex. * dpm_list_mutex.
*/ */
LIST_HEAD(dpm_active); LIST_HEAD(dpm_list);
static LIST_HEAD(dpm_off);
static LIST_HEAD(dpm_off_irq);
static DEFINE_MUTEX(dpm_list_mtx); static DEFINE_MUTEX(dpm_list_mtx);
/* 'true' if all devices have been suspended, protected by dpm_list_mtx */ /*
static bool all_sleeping; * Set once the preparation of devices for a PM transition has started, reset
* before starting to resume devices. Protected by dpm_list_mtx.
*/
static bool transition_started;
/**
* device_pm_lock - lock the list of active devices used by the PM core
*/
void device_pm_lock(void)
{
mutex_lock(&dpm_list_mtx);
}
/**
* device_pm_unlock - unlock the list of active devices used by the PM core
*/
void device_pm_unlock(void)
{
mutex_unlock(&dpm_list_mtx);
}
/** /**
* device_pm_add - add a device to the list of active devices * device_pm_add - add a device to the list of active devices
@ -68,17 +75,25 @@ int device_pm_add(struct device *dev)
dev->bus ? dev->bus->name : "No Bus", dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj)); kobject_name(&dev->kobj));
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
if ((dev->parent && dev->parent->power.sleeping) || all_sleeping) { if (dev->parent) {
if (dev->parent->power.sleeping) if (dev->parent->power.status >= DPM_SUSPENDING) {
dev_warn(dev, "parent %s is sleeping\n", dev_warn(dev, "parent %s is sleeping, will not add\n",
dev->parent->bus_id); dev->parent->bus_id);
else WARN_ON(true);
dev_warn(dev, "all devices are sleeping\n"); }
} else if (transition_started) {
/*
* We refuse to register parentless devices while a PM
* transition is in progress in order to avoid leaving them
* unhandled down the road
*/
WARN_ON(true); WARN_ON(true);
} }
error = dpm_sysfs_add(dev); error = dpm_sysfs_add(dev);
if (!error) if (!error) {
list_add_tail(&dev->power.entry, &dpm_active); dev->power.status = DPM_ON;
list_add_tail(&dev->power.entry, &dpm_list);
}
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
return error; return error;
} }
@ -100,73 +115,243 @@ void device_pm_remove(struct device *dev)
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
} }
/**
* pm_op - execute the PM operation appropiate for given PM event
* @dev: Device.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
{
int error = 0;
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
if (ops->suspend) {
error = ops->suspend(dev);
suspend_report_result(ops->suspend, error);
}
break;
case PM_EVENT_RESUME:
if (ops->resume) {
error = ops->resume(dev);
suspend_report_result(ops->resume, error);
}
break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze) {
error = ops->freeze(dev);
suspend_report_result(ops->freeze, error);
}
break;
case PM_EVENT_HIBERNATE:
if (ops->poweroff) {
error = ops->poweroff(dev);
suspend_report_result(ops->poweroff, error);
}
break;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
if (ops->thaw) {
error = ops->thaw(dev);
suspend_report_result(ops->thaw, error);
}
break;
case PM_EVENT_RESTORE:
if (ops->restore) {
error = ops->restore(dev);
suspend_report_result(ops->restore, error);
}
break;
#endif /* CONFIG_HIBERNATION */
default:
error = -EINVAL;
}
return error;
}
/**
* pm_noirq_op - execute the PM operation appropiate for given PM event
* @dev: Device.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The operation is executed with interrupts disabled by the only remaining
* functional CPU in the system.
*/
static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
pm_message_t state)
{
int error = 0;
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
if (ops->suspend_noirq) {
error = ops->suspend_noirq(dev);
suspend_report_result(ops->suspend_noirq, error);
}
break;
case PM_EVENT_RESUME:
if (ops->resume_noirq) {
error = ops->resume_noirq(dev);
suspend_report_result(ops->resume_noirq, error);
}
break;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
if (ops->freeze_noirq) {
error = ops->freeze_noirq(dev);
suspend_report_result(ops->freeze_noirq, error);
}
break;
case PM_EVENT_HIBERNATE:
if (ops->poweroff_noirq) {
error = ops->poweroff_noirq(dev);
suspend_report_result(ops->poweroff_noirq, error);
}
break;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
if (ops->thaw_noirq) {
error = ops->thaw_noirq(dev);
suspend_report_result(ops->thaw_noirq, error);
}
break;
case PM_EVENT_RESTORE:
if (ops->restore_noirq) {
error = ops->restore_noirq(dev);
suspend_report_result(ops->restore_noirq, error);
}
break;
#endif /* CONFIG_HIBERNATION */
default:
error = -EINVAL;
}
return error;
}
static char *pm_verb(int event)
{
switch (event) {
case PM_EVENT_SUSPEND:
return "suspend";
case PM_EVENT_RESUME:
return "resume";
case PM_EVENT_FREEZE:
return "freeze";
case PM_EVENT_QUIESCE:
return "quiesce";
case PM_EVENT_HIBERNATE:
return "hibernate";
case PM_EVENT_THAW:
return "thaw";
case PM_EVENT_RESTORE:
return "restore";
case PM_EVENT_RECOVER:
return "recover";
default:
return "(unknown PM event)";
}
}
static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
", may wakeup" : "");
}
static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
int error)
{
printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
kobject_name(&dev->kobj), pm_verb(state.event), info, error);
}
/*------------------------- Resume routines -------------------------*/ /*------------------------- Resume routines -------------------------*/
/** /**
* resume_device_early - Power on one device (early resume). * resume_device_noirq - Power on one device (early resume).
* @dev: Device. * @dev: Device.
* @state: PM transition of the system being carried out.
* *
* Must be called with interrupts disabled. * Must be called with interrupts disabled.
*/ */
static int resume_device_early(struct device *dev) static int resume_device_noirq(struct device *dev, pm_message_t state)
{ {
int error = 0; int error = 0;
TRACE_DEVICE(dev); TRACE_DEVICE(dev);
TRACE_RESUME(0); TRACE_RESUME(0);
if (dev->bus && dev->bus->resume_early) { if (!dev->bus)
dev_dbg(dev, "EARLY resume\n"); goto End;
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
} else if (dev->bus->resume_early) {
pm_dev_dbg(dev, state, "legacy EARLY ");
error = dev->bus->resume_early(dev); error = dev->bus->resume_early(dev);
} }
End:
TRACE_RESUME(error); TRACE_RESUME(error);
return error; return error;
} }
/** /**
* dpm_power_up - Power on all regular (non-sysdev) devices. * dpm_power_up - Power on all regular (non-sysdev) devices.
* @state: PM transition of the system being carried out.
* *
* Walk the dpm_off_irq list and power each device up. This * Execute the appropriate "noirq resume" callback for all devices marked
* is used for devices that required they be powered down with * as DPM_OFF_IRQ.
* interrupts disabled. As devices are powered on, they are moved
* to the dpm_off list.
* *
* Must be called with interrupts disabled and only one CPU running. * Must be called with interrupts disabled and only one CPU running.
*/ */
static void dpm_power_up(void) static void dpm_power_up(pm_message_t state)
{ {
struct device *dev;
while (!list_empty(&dpm_off_irq)) { list_for_each_entry(dev, &dpm_list, power.entry)
struct list_head *entry = dpm_off_irq.next; if (dev->power.status > DPM_OFF) {
struct device *dev = to_device(entry); int error;
list_move_tail(entry, &dpm_off); dev->power.status = DPM_OFF;
resume_device_early(dev); error = resume_device_noirq(dev, state);
} if (error)
pm_dev_err(dev, state, " early", error);
}
} }
/** /**
* device_power_up - Turn on all devices that need special attention. * device_power_up - Turn on all devices that need special attention.
* @state: PM transition of the system being carried out.
* *
* Power on system devices, then devices that required we shut them down * Power on system devices, then devices that required we shut them down
* with interrupts disabled. * with interrupts disabled.
* *
* Must be called with interrupts disabled. * Must be called with interrupts disabled.
*/ */
void device_power_up(void) void device_power_up(pm_message_t state)
{ {
sysdev_resume(); sysdev_resume();
dpm_power_up(); dpm_power_up(state);
} }
EXPORT_SYMBOL_GPL(device_power_up); EXPORT_SYMBOL_GPL(device_power_up);
/** /**
* resume_device - Restore state for one device. * resume_device - Restore state for one device.
* @dev: Device. * @dev: Device.
* * @state: PM transition of the system being carried out.
*/ */
static int resume_device(struct device *dev) static int resume_device(struct device *dev, pm_message_t state)
{ {
int error = 0; int error = 0;
@ -175,21 +360,40 @@ static int resume_device(struct device *dev)
down(&dev->sem); down(&dev->sem);
if (dev->bus && dev->bus->resume) { if (dev->bus) {
dev_dbg(dev,"resuming\n"); if (dev->bus->pm) {
error = dev->bus->resume(dev); pm_dev_dbg(dev, state, "");
error = pm_op(dev, &dev->bus->pm->base, state);
} else if (dev->bus->resume) {
pm_dev_dbg(dev, state, "legacy ");
error = dev->bus->resume(dev);
}
if (error)
goto End;
} }
if (!error && dev->type && dev->type->resume) { if (dev->type) {
dev_dbg(dev,"resuming\n"); if (dev->type->pm) {
error = dev->type->resume(dev); pm_dev_dbg(dev, state, "type ");
error = pm_op(dev, dev->type->pm, state);
} else if (dev->type->resume) {
pm_dev_dbg(dev, state, "legacy type ");
error = dev->type->resume(dev);
}
if (error)
goto End;
} }
if (!error && dev->class && dev->class->resume) { if (dev->class) {
dev_dbg(dev,"class resume\n"); if (dev->class->pm) {
error = dev->class->resume(dev); pm_dev_dbg(dev, state, "class ");
error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->resume) {
pm_dev_dbg(dev, state, "legacy class ");
error = dev->class->resume(dev);
}
} }
End:
up(&dev->sem); up(&dev->sem);
TRACE_RESUME(error); TRACE_RESUME(error);
@ -198,78 +402,161 @@ static int resume_device(struct device *dev)
/** /**
* dpm_resume - Resume every device. * dpm_resume - Resume every device.
* @state: PM transition of the system being carried out.
* *
* Resume the devices that have either not gone through * Execute the appropriate "resume" callback for all devices the status of
* the late suspend, or that did go through it but also * which indicates that they are inactive.
* went through the early resume.
*
* Take devices from the dpm_off_list, resume them,
* and put them on the dpm_locked list.
*/ */
static void dpm_resume(void) static void dpm_resume(pm_message_t state)
{ {
mutex_lock(&dpm_list_mtx); struct list_head list;
all_sleeping = false;
while(!list_empty(&dpm_off)) {
struct list_head *entry = dpm_off.next;
struct device *dev = to_device(entry);
list_move_tail(entry, &dpm_active); INIT_LIST_HEAD(&list);
dev->power.sleeping = false; mutex_lock(&dpm_list_mtx);
mutex_unlock(&dpm_list_mtx); transition_started = false;
resume_device(dev); while (!list_empty(&dpm_list)) {
mutex_lock(&dpm_list_mtx); struct device *dev = to_device(dpm_list.next);
get_device(dev);
if (dev->power.status >= DPM_OFF) {
int error;
dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
error = resume_device(dev, state);
mutex_lock(&dpm_list_mtx);
if (error)
pm_dev_err(dev, state, "", error);
} else if (dev->power.status == DPM_SUSPENDING) {
/* Allow new children of the device to be registered */
dev->power.status = DPM_RESUMING;
}
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &list);
put_device(dev);
} }
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
}
/**
* complete_device - Complete a PM transition for given device
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
static void complete_device(struct device *dev, pm_message_t state)
{
down(&dev->sem);
if (dev->class && dev->class->pm && dev->class->pm->complete) {
pm_dev_dbg(dev, state, "completing class ");
dev->class->pm->complete(dev);
}
if (dev->type && dev->type->pm && dev->type->pm->complete) {
pm_dev_dbg(dev, state, "completing type ");
dev->type->pm->complete(dev);
}
if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
pm_dev_dbg(dev, state, "completing ");
dev->bus->pm->base.complete(dev);
}
up(&dev->sem);
}
/**
* dpm_complete - Complete a PM transition for all devices.
* @state: PM transition of the system being carried out.
*
* Execute the ->complete() callbacks for all devices that are not marked
* as DPM_ON.
*/
static void dpm_complete(pm_message_t state)
{
struct list_head list;
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
if (dev->power.status > DPM_ON) {
dev->power.status = DPM_ON;
mutex_unlock(&dpm_list_mtx);
complete_device(dev, state);
mutex_lock(&dpm_list_mtx);
}
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
} }
/** /**
* device_resume - Restore state of each device in system. * device_resume - Restore state of each device in system.
* @state: PM transition of the system being carried out.
* *
* Resume all the devices, unlock them all, and allow new * Resume all the devices, unlock them all, and allow new
* devices to be registered once again. * devices to be registered once again.
*/ */
void device_resume(void) void device_resume(pm_message_t state)
{ {
might_sleep(); might_sleep();
dpm_resume(); dpm_resume(state);
dpm_complete(state);
} }
EXPORT_SYMBOL_GPL(device_resume); EXPORT_SYMBOL_GPL(device_resume);
/*------------------------- Suspend routines -------------------------*/ /*------------------------- Suspend routines -------------------------*/
static inline char *suspend_verb(u32 event) /**
* resume_event - return a PM message representing the resume event
* corresponding to given sleep state.
* @sleep_state: PM message representing a sleep state.
*/
static pm_message_t resume_event(pm_message_t sleep_state)
{ {
switch (event) { switch (sleep_state.event) {
case PM_EVENT_SUSPEND: return "suspend"; case PM_EVENT_SUSPEND:
case PM_EVENT_FREEZE: return "freeze"; return PMSG_RESUME;
case PM_EVENT_PRETHAW: return "prethaw"; case PM_EVENT_FREEZE:
default: return "(unknown suspend event)"; case PM_EVENT_QUIESCE:
return PMSG_RECOVER;
case PM_EVENT_HIBERNATE:
return PMSG_RESTORE;
} }
} return PMSG_ON;
static void
suspend_device_dbg(struct device *dev, pm_message_t state, char *info)
{
dev_dbg(dev, "%s%s%s\n", info, suspend_verb(state.event),
((state.event == PM_EVENT_SUSPEND) && device_may_wakeup(dev)) ?
", may wakeup" : "");
} }
/** /**
* suspend_device_late - Shut down one device (late suspend). * suspend_device_noirq - Shut down one device (late suspend).
* @dev: Device. * @dev: Device.
* @state: Power state device is entering. * @state: PM transition of the system being carried out.
* *
* This is called with interrupts off and only a single CPU running. * This is called with interrupts off and only a single CPU running.
*/ */
static int suspend_device_late(struct device *dev, pm_message_t state) static int suspend_device_noirq(struct device *dev, pm_message_t state)
{ {
int error = 0; int error = 0;
if (dev->bus && dev->bus->suspend_late) { if (!dev->bus)
suspend_device_dbg(dev, state, "LATE "); return 0;
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
} else if (dev->bus->suspend_late) {
pm_dev_dbg(dev, state, "legacy LATE ");
error = dev->bus->suspend_late(dev, state); error = dev->bus->suspend_late(dev, state);
suspend_report_result(dev->bus->suspend_late, error); suspend_report_result(dev->bus->suspend_late, error);
} }
@ -278,37 +565,30 @@ static int suspend_device_late(struct device *dev, pm_message_t state)
/** /**
* device_power_down - Shut down special devices. * device_power_down - Shut down special devices.
* @state: Power state to enter. * @state: PM transition of the system being carried out.
* *
* Power down devices that require interrupts to be disabled * Power down devices that require interrupts to be disabled.
* and move them from the dpm_off list to the dpm_off_irq list.
* Then power down system devices. * Then power down system devices.
* *
* Must be called with interrupts disabled and only one CPU running. * Must be called with interrupts disabled and only one CPU running.
*/ */
int device_power_down(pm_message_t state) int device_power_down(pm_message_t state)
{ {
struct device *dev;
int error = 0; int error = 0;
while (!list_empty(&dpm_off)) { list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
struct list_head *entry = dpm_off.prev; error = suspend_device_noirq(dev, state);
struct device *dev = to_device(entry);
error = suspend_device_late(dev, state);
if (error) { if (error) {
printk(KERN_ERR "Could not power down device %s: " pm_dev_err(dev, state, " late", error);
"error %d\n",
kobject_name(&dev->kobj), error);
break; break;
} }
if (!list_empty(&dev->power.entry)) dev->power.status = DPM_OFF_IRQ;
list_move(&dev->power.entry, &dpm_off_irq);
} }
if (!error) if (!error)
error = sysdev_suspend(state); error = sysdev_suspend(state);
if (error) if (error)
dpm_power_up(); dpm_power_up(resume_event(state));
return error; return error;
} }
EXPORT_SYMBOL_GPL(device_power_down); EXPORT_SYMBOL_GPL(device_power_down);
@ -316,7 +596,7 @@ EXPORT_SYMBOL_GPL(device_power_down);
/** /**
* suspend_device - Save state of one device. * suspend_device - Save state of one device.
* @dev: Device. * @dev: Device.
* @state: Power state device is entering. * @state: PM transition of the system being carried out.
*/ */
static int suspend_device(struct device *dev, pm_message_t state) static int suspend_device(struct device *dev, pm_message_t state)
{ {
@ -324,24 +604,43 @@ static int suspend_device(struct device *dev, pm_message_t state)
down(&dev->sem); down(&dev->sem);
if (dev->class && dev->class->suspend) { if (dev->class) {
suspend_device_dbg(dev, state, "class "); if (dev->class->pm) {
error = dev->class->suspend(dev, state); pm_dev_dbg(dev, state, "class ");
suspend_report_result(dev->class->suspend, error); error = pm_op(dev, dev->class->pm, state);
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = dev->class->suspend(dev, state);
suspend_report_result(dev->class->suspend, error);
}
if (error)
goto End;
} }
if (!error && dev->type && dev->type->suspend) { if (dev->type) {
suspend_device_dbg(dev, state, "type "); if (dev->type->pm) {
error = dev->type->suspend(dev, state); pm_dev_dbg(dev, state, "type ");
suspend_report_result(dev->type->suspend, error); error = pm_op(dev, dev->type->pm, state);
} else if (dev->type->suspend) {
pm_dev_dbg(dev, state, "legacy type ");
error = dev->type->suspend(dev, state);
suspend_report_result(dev->type->suspend, error);
}
if (error)
goto End;
} }
if (!error && dev->bus && dev->bus->suspend) { if (dev->bus) {
suspend_device_dbg(dev, state, ""); if (dev->bus->pm) {
error = dev->bus->suspend(dev, state); pm_dev_dbg(dev, state, "");
suspend_report_result(dev->bus->suspend, error); error = pm_op(dev, &dev->bus->pm->base, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy ");
error = dev->bus->suspend(dev, state);
suspend_report_result(dev->bus->suspend, error);
}
} }
End:
up(&dev->sem); up(&dev->sem);
return error; return error;
@ -349,67 +648,139 @@ static int suspend_device(struct device *dev, pm_message_t state)
/** /**
* dpm_suspend - Suspend every device. * dpm_suspend - Suspend every device.
* @state: Power state to put each device in. * @state: PM transition of the system being carried out.
* *
* Walk the dpm_locked list. Suspend each device and move it * Execute the appropriate "suspend" callbacks for all devices.
* to the dpm_off list.
*
* (For historical reasons, if it returns -EAGAIN, that used to mean
* that the device would be called again with interrupts disabled.
* These days, we use the "suspend_late()" callback for that, so we
* print a warning and consider it an error).
*/ */
static int dpm_suspend(pm_message_t state) static int dpm_suspend(pm_message_t state)
{ {
struct list_head list;
int error = 0; int error = 0;
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_active)) { while (!list_empty(&dpm_list)) {
struct list_head *entry = dpm_active.prev; struct device *dev = to_device(dpm_list.prev);
struct device *dev = to_device(entry);
WARN_ON(dev->parent && dev->parent->power.sleeping); get_device(dev);
dev->power.sleeping = true;
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
error = suspend_device(dev, state); error = suspend_device(dev, state);
mutex_lock(&dpm_list_mtx); mutex_lock(&dpm_list_mtx);
if (error) { if (error) {
printk(KERN_ERR "Could not suspend device %s: " pm_dev_err(dev, state, "", error);
"error %d%s\n", put_device(dev);
kobject_name(&dev->kobj),
error,
(error == -EAGAIN ?
" (please convert to suspend_late)" :
""));
dev->power.sleeping = false;
break; break;
} }
dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry)) if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &dpm_off); list_move(&dev->power.entry, &list);
put_device(dev);
} }
if (!error) list_splice(&list, dpm_list.prev);
all_sleeping = true;
mutex_unlock(&dpm_list_mtx); mutex_unlock(&dpm_list_mtx);
return error;
}
/**
* prepare_device - Execute the ->prepare() callback(s) for given device.
* @dev: Device.
* @state: PM transition of the system being carried out.
*/
static int prepare_device(struct device *dev, pm_message_t state)
{
int error = 0;
down(&dev->sem);
if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
pm_dev_dbg(dev, state, "preparing ");
error = dev->bus->pm->base.prepare(dev);
suspend_report_result(dev->bus->pm->base.prepare, error);
if (error)
goto End;
}
if (dev->type && dev->type->pm && dev->type->pm->prepare) {
pm_dev_dbg(dev, state, "preparing type ");
error = dev->type->pm->prepare(dev);
suspend_report_result(dev->type->pm->prepare, error);
if (error)
goto End;
}
if (dev->class && dev->class->pm && dev->class->pm->prepare) {
pm_dev_dbg(dev, state, "preparing class ");
error = dev->class->pm->prepare(dev);
suspend_report_result(dev->class->pm->prepare, error);
}
End:
up(&dev->sem);
return error; return error;
} }
/** /**
* device_suspend - Save state and stop all devices in system. * dpm_prepare - Prepare all devices for a PM transition.
* @state: new power management state * @state: PM transition of the system being carried out.
* *
* Prevent new devices from being registered, then lock all devices * Execute the ->prepare() callback for all devices.
* and suspend them. */
static int dpm_prepare(pm_message_t state)
{
struct list_head list;
int error = 0;
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
transition_started = true;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
get_device(dev);
dev->power.status = DPM_PREPARING;
mutex_unlock(&dpm_list_mtx);
error = prepare_device(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
dev->power.status = DPM_ON;
if (error == -EAGAIN) {
put_device(dev);
continue;
}
printk(KERN_ERR "PM: Failed to prepare device %s "
"for power transition: error %d\n",
kobject_name(&dev->kobj), error);
put_device(dev);
break;
}
dev->power.status = DPM_SUSPENDING;
if (!list_empty(&dev->power.entry))
list_move_tail(&dev->power.entry, &list);
put_device(dev);
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
return error;
}
/**
* device_suspend - Save state and stop all devices in system.
* @state: PM transition of the system being carried out.
*
* Prepare and suspend all devices.
*/ */
int device_suspend(pm_message_t state) int device_suspend(pm_message_t state)
{ {
int error; int error;
might_sleep(); might_sleep();
error = dpm_suspend(state); error = dpm_prepare(state);
if (error) if (!error)
device_resume(); error = dpm_suspend(state);
return error; return error;
} }
EXPORT_SYMBOL_GPL(device_suspend); EXPORT_SYMBOL_GPL(device_suspend);

View file

@ -4,7 +4,7 @@
* main.c * main.c
*/ */
extern struct list_head dpm_active; /* The active device list */ extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry) static inline struct device *to_device(struct list_head *entry)
{ {

View file

@ -6,9 +6,6 @@
#include <linux/string.h> #include <linux/string.h>
#include "power.h" #include "power.h"
int (*platform_enable_wakeup)(struct device *dev, int is_on);
/* /*
* wakeup - Report/change current wakeup option for device * wakeup - Report/change current wakeup option for device
* *

View file

@ -188,9 +188,9 @@ static int show_file_hash(unsigned int value)
static int show_dev_hash(unsigned int value) static int show_dev_hash(unsigned int value)
{ {
int match = 0; int match = 0;
struct list_head * entry = dpm_active.prev; struct list_head *entry = dpm_list.prev;
while (entry != &dpm_active) { while (entry != &dpm_list) {
struct device * dev = to_device(entry); struct device * dev = to_device(entry);
unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH); unsigned int hash = hash_string(DEVSEED, dev->bus_id, DEVHASH);
if (hash == value) { if (hash == value) {

View file

@ -2,7 +2,7 @@
# Makefile for the PCI bus specific drivers. # Makefile for the PCI bus specific drivers.
# #
obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o pci-driver.o search.o pci-sysfs.o rom.o setup-res.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o

View file

@ -30,6 +30,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci_hotplug.h> #include <linux/pci_hotplug.h>
#include <linux/pci-acpi.h>
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#include <acpi/actypes.h> #include <acpi/actypes.h>
@ -299,7 +300,7 @@ free_and_return:
* *
* @handle - the handle of the hotplug controller. * @handle - the handle of the hotplug controller.
*/ */
acpi_status acpi_run_oshp(acpi_handle handle) static acpi_status acpi_run_oshp(acpi_handle handle)
{ {
acpi_status status; acpi_status status;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
@ -322,9 +323,6 @@ acpi_status acpi_run_oshp(acpi_handle handle)
kfree(string.pointer); kfree(string.pointer);
return status; return status;
} }
EXPORT_SYMBOL_GPL(acpi_run_oshp);
/* acpi_get_hp_params_from_firmware /* acpi_get_hp_params_from_firmware
* *
@ -374,6 +372,85 @@ acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
} }
EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware);
/**
* acpi_get_hp_hw_control_from_firmware
* @dev: the pci_dev of the bridge that has a hotplug controller
* @flags: requested control bits for _OSC
*
* Attempt to take hotplug control from firmware.
*/
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags)
{
acpi_status status;
acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
struct pci_dev *pdev = dev;
struct pci_bus *parent;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
flags &= (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
OSC_SHPC_NATIVE_HP_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (!flags) {
err("Invalid flags %u specified!\n", flags);
return -EINVAL;
}
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it. If
* an _OSC is missing, we look for an OSHP to do the same thing.
* To handle different BIOS behavior, we look for _OSC and OSHP
* within the scope of the hotplug controller and its parents,
* upto the host bridge under which this controller exists.
*/
while (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
* space at all. Try to get acpi handle of parent pci bus.
*/
if (!pdev || !pdev->bus->parent)
break;
parent = pdev->bus->parent;
dbg("Could not find %s in acpi namespace, trying parent\n",
pci_name(pdev));
if (!parent->self)
/* Parent must be a host bridge */
handle = acpi_get_pci_rootbridge_handle(
pci_domain_nr(parent),
parent->number);
else
handle = DEVICE_ACPI_HANDLE(&(parent->self->dev));
pdev = parent->self;
}
while (handle) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s \n",
(char *)string.pointer);
status = pci_osc_control_set(handle, flags);
if (status == AE_NOT_FOUND)
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status)) {
dbg("Gained control for hotplug HW for pci %s (%s)\n",
pci_name(dev), (char *)string.pointer);
kfree(string.pointer);
return 0;
}
if (acpi_root_bridge(handle))
break;
chandle = handle;
status = acpi_get_parent(chandle, &handle);
if (ACPI_FAILURE(status))
break;
}
dbg("Cannot get control of hotplug hardware for pci %s\n",
pci_name(dev));
kfree(string.pointer);
return -ENODEV;
}
EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware);
/* acpi_root_bridge - check to see if this acpi object is a root bridge /* acpi_root_bridge - check to see if this acpi object is a root bridge
* *

View file

@ -215,7 +215,6 @@ extern u8 acpiphp_get_power_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_attention_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
extern u32 acpiphp_get_address (struct acpiphp_slot *slot);
/* variables */ /* variables */
extern int acpiphp_debug; extern int acpiphp_debug;

View file

@ -70,7 +70,6 @@ static int disable_slot (struct hotplug_slot *slot);
static int set_attention_status (struct hotplug_slot *slot, u8 value); static int set_attention_status (struct hotplug_slot *slot, u8 value);
static int get_power_status (struct hotplug_slot *slot, u8 *value); static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_address (struct hotplug_slot *slot, u32 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
@ -83,7 +82,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
.get_attention_status = get_attention_status, .get_attention_status = get_attention_status,
.get_latch_status = get_latch_status, .get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status, .get_adapter_status = get_adapter_status,
.get_address = get_address,
}; };
@ -274,23 +272,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0; return 0;
} }
/**
* get_address - get pci address of a slot
* @hotplug_slot: slot to get status
* @value: pointer to struct pci_busdev (seg, bus, dev)
*/
static int get_address(struct hotplug_slot *hotplug_slot, u32 *value)
{
struct slot *slot = hotplug_slot->private;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
*value = acpiphp_get_address(slot->acpi_slot);
return 0;
}
static int __init init_acpi(void) static int __init init_acpi(void)
{ {
int retval; int retval;
@ -357,7 +338,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
acpiphp_slot->slot = slot; acpiphp_slot->slot = slot;
snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun);
retval = pci_hp_register(slot->hotplug_slot); retval = pci_hp_register(slot->hotplug_slot,
acpiphp_slot->bridge->pci_bus,
acpiphp_slot->device);
if (retval == -EBUSY)
goto error_hpslot;
if (retval) { if (retval) {
err("pci_hp_register failed with error %d\n", retval); err("pci_hp_register failed with error %d\n", retval);
goto error_hpslot; goto error_hpslot;

View file

@ -258,7 +258,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
bridge->pci_bus->number, slot->device); bridge->pci_bus->number, slot->device);
retval = acpiphp_register_hotplug_slot(slot); retval = acpiphp_register_hotplug_slot(slot);
if (retval) { if (retval) {
warn("acpiphp_register_hotplug_slot failed(err code = 0x%x)\n", retval); if (retval == -EBUSY)
warn("Slot %d already registered by another "
"hotplug driver\n", slot->sun);
else
warn("acpiphp_register_hotplug_slot failed "
"(err code = 0x%x)\n", retval);
goto err_exit; goto err_exit;
} }
} }
@ -1878,19 +1883,3 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
return (sta == 0) ? 0 : 1; return (sta == 0) ? 0 : 1;
} }
/*
* pci address (seg/bus/dev)
*/
u32 acpiphp_get_address(struct acpiphp_slot *slot)
{
u32 address;
struct pci_bus *pci_bus = slot->bridge->pci_bus;
address = (pci_domain_nr(pci_bus) << 16) |
(pci_bus->number << 8) |
slot->device;
return address;
}

View file

@ -33,8 +33,10 @@
#include <linux/kobject.h> #include <linux/kobject.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/pci.h>
#include "acpiphp.h" #include "acpiphp.h"
#include "../pci.h"
#define DRIVER_VERSION "1.0.1" #define DRIVER_VERSION "1.0.1"
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>" #define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
@ -430,7 +432,7 @@ static int __init ibm_acpiphp_init(void)
int retval = 0; int retval = 0;
acpi_status status; acpi_status status;
struct acpi_device *device; struct acpi_device *device;
struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; struct kobject *sysdir = &pci_slots_kset->kobj;
dbg("%s\n", __func__); dbg("%s\n", __func__);
@ -477,7 +479,7 @@ init_return:
static void __exit ibm_acpiphp_exit(void) static void __exit ibm_acpiphp_exit(void)
{ {
acpi_status status; acpi_status status;
struct kobject *sysdir = &pci_hotplug_slots_kset->kobj; struct kobject *sysdir = &pci_slots_kset->kobj;
dbg("%s\n", __func__); dbg("%s\n", __func__);

View file

@ -285,7 +285,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
info->attention_status = cpci_get_attention_status(slot); info->attention_status = cpci_get_attention_status(slot);
dbg("registering slot %s", slot->hotplug_slot->name); dbg("registering slot %s", slot->hotplug_slot->name);
status = pci_hp_register(slot->hotplug_slot); status = pci_hp_register(slot->hotplug_slot, bus, i);
if (status) { if (status) {
err("pci_hp_register failed with error %d", status); err("pci_hp_register failed with error %d", status);
goto error_name; goto error_name;

View file

@ -434,7 +434,9 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->bus, slot->device, slot->bus, slot->device,
slot->number, ctrl->slot_device_offset, slot->number, ctrl->slot_device_offset,
slot_number); slot_number);
result = pci_hp_register(hotplug_slot); result = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate,
slot->device);
if (result) { if (result) {
err("pci_hp_register failed with error %d\n", result); err("pci_hp_register failed with error %d\n", result);
goto error_name; goto error_name;

View file

@ -66,6 +66,7 @@ struct dummy_slot {
struct pci_dev *dev; struct pci_dev *dev;
struct work_struct remove_work; struct work_struct remove_work;
unsigned long removed; unsigned long removed;
char name[8];
}; };
static int debug; static int debug;
@ -100,6 +101,7 @@ static int add_slot(struct pci_dev *dev)
struct dummy_slot *dslot; struct dummy_slot *dslot;
struct hotplug_slot *slot; struct hotplug_slot *slot;
int retval = -ENOMEM; int retval = -ENOMEM;
static int count = 1;
slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
if (!slot) if (!slot)
@ -113,18 +115,18 @@ static int add_slot(struct pci_dev *dev)
slot->info->max_bus_speed = PCI_SPEED_UNKNOWN; slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
slot->name = &dev->dev.bus_id[0];
dbg("slot->name = %s\n", slot->name);
dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL); dslot = kzalloc(sizeof(struct dummy_slot), GFP_KERNEL);
if (!dslot) if (!dslot)
goto error_info; goto error_info;
slot->name = dslot->name;
snprintf(slot->name, sizeof(dslot->name), "fake%d", count++);
dbg("slot->name = %s\n", slot->name);
slot->ops = &dummy_hotplug_slot_ops; slot->ops = &dummy_hotplug_slot_ops;
slot->release = &dummy_release; slot->release = &dummy_release;
slot->private = dslot; slot->private = dslot;
retval = pci_hp_register(slot); retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn));
if (retval) { if (retval) {
err("pci_hp_register failed with error %d\n", retval); err("pci_hp_register failed with error %d\n", retval);
goto error_dslot; goto error_dslot;
@ -148,17 +150,17 @@ error:
static int __init pci_scan_buses(void) static int __init pci_scan_buses(void)
{ {
struct pci_dev *dev = NULL; struct pci_dev *dev = NULL;
int retval = 0; int lastslot = 0;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
retval = add_slot(dev); if (PCI_FUNC(dev->devfn) > 0 &&
if (retval) { lastslot == PCI_SLOT(dev->devfn))
pci_dev_put(dev); continue;
break; lastslot = PCI_SLOT(dev->devfn);
} add_slot(dev);
} }
return retval; return 0;
} }
static void remove_slot(struct dummy_slot *dslot) static void remove_slot(struct dummy_slot *dslot)
@ -296,23 +298,9 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
return 0; return 0;
} }
/* find the hotplug_slot for the pci_dev */
static struct hotplug_slot *get_slot_from_dev(struct pci_dev *dev)
{
struct dummy_slot *dslot;
list_for_each_entry(dslot, &slot_list, node) {
if (dslot->dev == dev)
return dslot->slot;
}
return NULL;
}
static int disable_slot(struct hotplug_slot *slot) static int disable_slot(struct hotplug_slot *slot)
{ {
struct dummy_slot *dslot; struct dummy_slot *dslot;
struct hotplug_slot *hslot;
struct pci_dev *dev; struct pci_dev *dev;
int func; int func;
@ -322,41 +310,27 @@ static int disable_slot(struct hotplug_slot *slot)
dbg("%s - physical_slot = %s\n", __func__, slot->name); dbg("%s - physical_slot = %s\n", __func__, slot->name);
/* don't disable bridged devices just yet, we can't handle them easily... */ for (func = 7; func >= 0; func--) {
if (dslot->dev->subordinate) { dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func);
err("Can't remove PCI devices with other PCI devices behind it yet.\n"); if (!dev)
return -ENODEV; continue;
}
if (test_and_set_bit(0, &dslot->removed)) { if (test_and_set_bit(0, &dslot->removed)) {
dbg("Slot already scheduled for removal\n"); dbg("Slot already scheduled for removal\n");
return -ENODEV; return -ENODEV;
}
/* search for subfunctions and disable them first */
if (!(dslot->dev->devfn & 7)) {
for (func = 1; func < 8; func++) {
dev = pci_get_slot(dslot->dev->bus,
dslot->dev->devfn + func);
if (dev) {
hslot = get_slot_from_dev(dev);
if (hslot)
disable_slot(hslot);
else {
err("Hotplug slot not found for subfunction of PCI device\n");
return -ENODEV;
}
pci_dev_put(dev);
} else
dbg("No device in slot found\n");
} }
/* queue work item to blow away this sysfs entry and other
* parts.
*/
INIT_WORK(&dslot->remove_work, remove_slot_worker);
queue_work(dummyphp_wq, &dslot->remove_work);
/* blow away this sysfs entry and other parts. */
remove_slot(dslot);
pci_dev_put(dev);
} }
/* remove the device from the pci core */
pci_remove_bus_device(dslot->dev);
/* queue work item to blow away this sysfs entry and other parts. */
INIT_WORK(&dslot->remove_work, remove_slot_worker);
queue_work(dummyphp_wq, &dslot->remove_work);
return 0; return 0;
} }

View file

@ -1001,7 +1001,8 @@ static int __init ebda_rsrc_controller (void)
tmp_slot = list_entry (list, struct slot, ibm_slot_list); tmp_slot = list_entry (list, struct slot, ibm_slot_list);
snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot));
pci_hp_register (tmp_slot->hotplug_slot); pci_hp_register(tmp_slot->hotplug_slot,
pci_find_bus(0, tmp_slot->bus), tmp_slot->device);
} }
print_ebda_hpc (); print_ebda_hpc ();

View file

@ -40,6 +40,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci_hotplug.h> #include <linux/pci_hotplug.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "../pci.h"
#define MY_NAME "pci_hotplug" #define MY_NAME "pci_hotplug"
@ -60,41 +61,7 @@ static int debug;
////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////
static LIST_HEAD(pci_hotplug_slot_list); static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock);
struct kset *pci_hotplug_slots_kset;
static ssize_t hotplug_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct hotplug_slot *slot = to_hotplug_slot(kobj);
struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
return attribute->show ? attribute->show(slot, buf) : -EIO;
}
static ssize_t hotplug_slot_attr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t len)
{
struct hotplug_slot *slot = to_hotplug_slot(kobj);
struct hotplug_slot_attribute *attribute = to_hotplug_attr(attr);
return attribute->store ? attribute->store(slot, buf, len) : -EIO;
}
static struct sysfs_ops hotplug_slot_sysfs_ops = {
.show = hotplug_slot_attr_show,
.store = hotplug_slot_attr_store,
};
static void hotplug_slot_release(struct kobject *kobj)
{
struct hotplug_slot *slot = to_hotplug_slot(kobj);
if (slot->release)
slot->release(slot);
}
static struct kobj_type hotplug_slot_ktype = {
.sysfs_ops = &hotplug_slot_sysfs_ops,
.release = &hotplug_slot_release,
};
/* these strings match up with the values in pci_bus_speed */ /* these strings match up with the values in pci_bus_speed */
static char *pci_bus_speed_strings[] = { static char *pci_bus_speed_strings[] = {
@ -149,16 +116,15 @@ GET_STATUS(power_status, u8)
GET_STATUS(attention_status, u8) GET_STATUS(attention_status, u8)
GET_STATUS(latch_status, u8) GET_STATUS(latch_status, u8)
GET_STATUS(adapter_status, u8) GET_STATUS(adapter_status, u8)
GET_STATUS(address, u32)
GET_STATUS(max_bus_speed, enum pci_bus_speed) GET_STATUS(max_bus_speed, enum pci_bus_speed)
GET_STATUS(cur_bus_speed, enum pci_bus_speed) GET_STATUS(cur_bus_speed, enum pci_bus_speed)
static ssize_t power_read_file (struct hotplug_slot *slot, char *buf) static ssize_t power_read_file(struct pci_slot *slot, char *buf)
{ {
int retval; int retval;
u8 value; u8 value;
retval = get_power_status (slot, &value); retval = get_power_status(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
retval = sprintf (buf, "%d\n", value); retval = sprintf (buf, "%d\n", value);
@ -166,9 +132,10 @@ exit:
return retval; return retval;
} }
static ssize_t power_write_file (struct hotplug_slot *slot, const char *buf, static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count) size_t count)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long lpower; unsigned long lpower;
u8 power; u8 power;
int retval = 0; int retval = 0;
@ -204,29 +171,30 @@ exit:
return count; return count;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_power = { static struct pci_slot_attribute hotplug_slot_attr_power = {
.attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .attr = {.name = "power", .mode = S_IFREG | S_IRUGO | S_IWUSR},
.show = power_read_file, .show = power_read_file,
.store = power_write_file .store = power_write_file
}; };
static ssize_t attention_read_file (struct hotplug_slot *slot, char *buf) static ssize_t attention_read_file(struct pci_slot *slot, char *buf)
{ {
int retval; int retval;
u8 value; u8 value;
retval = get_attention_status (slot, &value); retval = get_attention_status(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
retval = sprintf (buf, "%d\n", value); retval = sprintf(buf, "%d\n", value);
exit: exit:
return retval; return retval;
} }
static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf, static ssize_t attention_write_file(struct pci_slot *slot, const char *buf,
size_t count) size_t count)
{ {
struct hotplug_slot_ops *ops = slot->hotplug->ops;
unsigned long lattention; unsigned long lattention;
u8 attention; u8 attention;
int retval = 0; int retval = 0;
@ -235,13 +203,13 @@ static ssize_t attention_write_file (struct hotplug_slot *slot, const char *buf,
attention = (u8)(lattention & 0xff); attention = (u8)(lattention & 0xff);
dbg (" - attention = %d\n", attention); dbg (" - attention = %d\n", attention);
if (!try_module_get(slot->ops->owner)) { if (!try_module_get(ops->owner)) {
retval = -ENODEV; retval = -ENODEV;
goto exit; goto exit;
} }
if (slot->ops->set_attention_status) if (ops->set_attention_status)
retval = slot->ops->set_attention_status(slot, attention); retval = ops->set_attention_status(slot->hotplug, attention);
module_put(slot->ops->owner); module_put(ops->owner);
exit: exit:
if (retval) if (retval)
@ -249,18 +217,18 @@ exit:
return count; return count;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_attention = { static struct pci_slot_attribute hotplug_slot_attr_attention = {
.attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .attr = {.name = "attention", .mode = S_IFREG | S_IRUGO | S_IWUSR},
.show = attention_read_file, .show = attention_read_file,
.store = attention_write_file .store = attention_write_file
}; };
static ssize_t latch_read_file (struct hotplug_slot *slot, char *buf) static ssize_t latch_read_file(struct pci_slot *slot, char *buf)
{ {
int retval; int retval;
u8 value; u8 value;
retval = get_latch_status (slot, &value); retval = get_latch_status(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
retval = sprintf (buf, "%d\n", value); retval = sprintf (buf, "%d\n", value);
@ -269,17 +237,17 @@ exit:
return retval; return retval;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_latch = { static struct pci_slot_attribute hotplug_slot_attr_latch = {
.attr = {.name = "latch", .mode = S_IFREG | S_IRUGO}, .attr = {.name = "latch", .mode = S_IFREG | S_IRUGO},
.show = latch_read_file, .show = latch_read_file,
}; };
static ssize_t presence_read_file (struct hotplug_slot *slot, char *buf) static ssize_t presence_read_file(struct pci_slot *slot, char *buf)
{ {
int retval; int retval;
u8 value; u8 value;
retval = get_adapter_status (slot, &value); retval = get_adapter_status(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
retval = sprintf (buf, "%d\n", value); retval = sprintf (buf, "%d\n", value);
@ -288,42 +256,20 @@ exit:
return retval; return retval;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_presence = { static struct pci_slot_attribute hotplug_slot_attr_presence = {
.attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO}, .attr = {.name = "adapter", .mode = S_IFREG | S_IRUGO},
.show = presence_read_file, .show = presence_read_file,
}; };
static ssize_t address_read_file (struct hotplug_slot *slot, char *buf)
{
int retval;
u32 address;
retval = get_address (slot, &address);
if (retval)
goto exit;
retval = sprintf (buf, "%04x:%02x:%02x\n",
(address >> 16) & 0xffff,
(address >> 8) & 0xff,
address & 0xff);
exit:
return retval;
}
static struct hotplug_slot_attribute hotplug_slot_attr_address = {
.attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
.show = address_read_file,
};
static char *unknown_speed = "Unknown bus speed"; static char *unknown_speed = "Unknown bus speed";
static ssize_t max_bus_speed_read_file (struct hotplug_slot *slot, char *buf) static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
{ {
char *speed_string; char *speed_string;
int retval; int retval;
enum pci_bus_speed value; enum pci_bus_speed value;
retval = get_max_bus_speed (slot, &value); retval = get_max_bus_speed(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
@ -338,18 +284,18 @@ exit:
return retval; return retval;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_max_bus_speed = { static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
.attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO}, .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
.show = max_bus_speed_read_file, .show = max_bus_speed_read_file,
}; };
static ssize_t cur_bus_speed_read_file (struct hotplug_slot *slot, char *buf) static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
{ {
char *speed_string; char *speed_string;
int retval; int retval;
enum pci_bus_speed value; enum pci_bus_speed value;
retval = get_cur_bus_speed (slot, &value); retval = get_cur_bus_speed(slot->hotplug, &value);
if (retval) if (retval)
goto exit; goto exit;
@ -364,14 +310,15 @@ exit:
return retval; return retval;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_cur_bus_speed = { static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
.attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO}, .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
.show = cur_bus_speed_read_file, .show = cur_bus_speed_read_file,
}; };
static ssize_t test_write_file (struct hotplug_slot *slot, const char *buf, static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count) size_t count)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
unsigned long ltest; unsigned long ltest;
u32 test; u32 test;
int retval = 0; int retval = 0;
@ -394,13 +341,14 @@ exit:
return count; return count;
} }
static struct hotplug_slot_attribute hotplug_slot_attr_test = { static struct pci_slot_attribute hotplug_slot_attr_test = {
.attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR}, .attr = {.name = "test", .mode = S_IFREG | S_IRUGO | S_IWUSR},
.store = test_write_file .store = test_write_file
}; };
static int has_power_file (struct hotplug_slot *slot) static int has_power_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if ((slot->ops->enable_slot) || if ((slot->ops->enable_slot) ||
@ -410,8 +358,9 @@ static int has_power_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_attention_file (struct hotplug_slot *slot) static int has_attention_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if ((slot->ops->set_attention_status) || if ((slot->ops->set_attention_status) ||
@ -420,8 +369,9 @@ static int has_attention_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_latch_file (struct hotplug_slot *slot) static int has_latch_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if (slot->ops->get_latch_status) if (slot->ops->get_latch_status)
@ -429,8 +379,9 @@ static int has_latch_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_adapter_file (struct hotplug_slot *slot) static int has_adapter_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if (slot->ops->get_adapter_status) if (slot->ops->get_adapter_status)
@ -438,17 +389,9 @@ static int has_adapter_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_address_file (struct hotplug_slot *slot) static int has_max_bus_speed_file(struct pci_slot *pci_slot)
{
if ((!slot) || (!slot->ops))
return -ENODEV;
if (slot->ops->get_address)
return 0;
return -ENOENT;
}
static int has_max_bus_speed_file (struct hotplug_slot *slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if (slot->ops->get_max_bus_speed) if (slot->ops->get_max_bus_speed)
@ -456,8 +399,9 @@ static int has_max_bus_speed_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_cur_bus_speed_file (struct hotplug_slot *slot) static int has_cur_bus_speed_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if (slot->ops->get_cur_bus_speed) if (slot->ops->get_cur_bus_speed)
@ -465,8 +409,9 @@ static int has_cur_bus_speed_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int has_test_file (struct hotplug_slot *slot) static int has_test_file(struct pci_slot *pci_slot)
{ {
struct hotplug_slot *slot = pci_slot->hotplug;
if ((!slot) || (!slot->ops)) if ((!slot) || (!slot->ops))
return -ENODEV; return -ENODEV;
if (slot->ops->hardware_test) if (slot->ops->hardware_test)
@ -474,7 +419,7 @@ static int has_test_file (struct hotplug_slot *slot)
return -ENOENT; return -ENOENT;
} }
static int fs_add_slot (struct hotplug_slot *slot) static int fs_add_slot(struct pci_slot *slot)
{ {
int retval = 0; int retval = 0;
@ -505,13 +450,6 @@ static int fs_add_slot (struct hotplug_slot *slot)
goto exit_adapter; goto exit_adapter;
} }
if (has_address_file(slot) == 0) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_address.attr);
if (retval)
goto exit_address;
}
if (has_max_bus_speed_file(slot) == 0) { if (has_max_bus_speed_file(slot) == 0) {
retval = sysfs_create_file(&slot->kobj, retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_max_bus_speed.attr); &hotplug_slot_attr_max_bus_speed.attr);
@ -544,10 +482,6 @@ exit_cur_speed:
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
exit_max_speed: exit_max_speed:
if (has_address_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
exit_address:
if (has_adapter_file(slot) == 0) if (has_adapter_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
@ -567,7 +501,7 @@ exit:
return retval; return retval;
} }
static void fs_remove_slot (struct hotplug_slot *slot) static void fs_remove_slot(struct pci_slot *slot)
{ {
if (has_power_file(slot) == 0) if (has_power_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr); sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_power.attr);
@ -581,9 +515,6 @@ static void fs_remove_slot (struct hotplug_slot *slot)
if (has_adapter_file(slot) == 0) if (has_adapter_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr); sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_presence.attr);
if (has_address_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_address.attr);
if (has_max_bus_speed_file(slot) == 0) if (has_max_bus_speed_file(slot) == 0)
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr); sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_max_bus_speed.attr);
@ -599,27 +530,33 @@ static struct hotplug_slot *get_slot_from_name (const char *name)
struct hotplug_slot *slot; struct hotplug_slot *slot;
struct list_head *tmp; struct list_head *tmp;
spin_lock(&pci_hotplug_slot_list_lock);
list_for_each (tmp, &pci_hotplug_slot_list) { list_for_each (tmp, &pci_hotplug_slot_list) {
slot = list_entry (tmp, struct hotplug_slot, slot_list); slot = list_entry (tmp, struct hotplug_slot, slot_list);
if (strcmp(slot->name, name) == 0) if (strcmp(slot->name, name) == 0)
return slot; goto out;
} }
return NULL; slot = NULL;
out:
spin_unlock(&pci_hotplug_slot_list_lock);
return slot;
} }
/** /**
* pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem * pci_hp_register - register a hotplug_slot with the PCI hotplug subsystem
* @bus: bus this slot is on
* @slot: pointer to the &struct hotplug_slot to register * @slot: pointer to the &struct hotplug_slot to register
* @slot_nr: slot number
* *
* Registers a hotplug slot with the pci hotplug subsystem, which will allow * Registers a hotplug slot with the pci hotplug subsystem, which will allow
* userspace interaction to the slot. * userspace interaction to the slot.
* *
* Returns 0 if successful, anything else for an error. * Returns 0 if successful, anything else for an error.
*/ */
int pci_hp_register (struct hotplug_slot *slot) int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr)
{ {
int result; int result;
struct hotplug_slot *tmp; struct pci_slot *pci_slot;
if (slot == NULL) if (slot == NULL)
return -ENODEV; return -ENODEV;
@ -632,57 +569,89 @@ int pci_hp_register (struct hotplug_slot *slot)
} }
/* Check if we have already registered a slot with the same name. */ /* Check if we have already registered a slot with the same name. */
tmp = get_slot_from_name(slot->name); if (get_slot_from_name(slot->name))
if (tmp)
return -EEXIST; return -EEXIST;
slot->kobj.kset = pci_hotplug_slots_kset; /*
result = kobject_init_and_add(&slot->kobj, &hotplug_slot_ktype, NULL, * No problems if we call this interface from both ACPI_PCI_SLOT
"%s", slot->name); * driver and call it here again. If we've already created the
if (result) { * pci_slot, the interface will simply bump the refcount.
err("Unable to register kobject '%s'", slot->name); */
return -EINVAL; pci_slot = pci_create_slot(bus, slot_nr, slot->name);
if (IS_ERR(pci_slot))
return PTR_ERR(pci_slot);
if (pci_slot->hotplug) {
dbg("%s: already claimed\n", __func__);
pci_destroy_slot(pci_slot);
return -EBUSY;
} }
list_add (&slot->slot_list, &pci_hotplug_slot_list); slot->pci_slot = pci_slot;
pci_slot->hotplug = slot;
/*
* Allow pcihp drivers to override the ACPI_PCI_SLOT name.
*/
if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) {
result = kobject_rename(&pci_slot->kobj, slot->name);
if (result) {
pci_destroy_slot(pci_slot);
return result;
}
}
spin_lock(&pci_hotplug_slot_list_lock);
list_add(&slot->slot_list, &pci_hotplug_slot_list);
spin_unlock(&pci_hotplug_slot_list_lock);
result = fs_add_slot(pci_slot);
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
dbg("Added slot %s to the list\n", slot->name);
result = fs_add_slot (slot);
kobject_uevent(&slot->kobj, KOBJ_ADD);
dbg ("Added slot %s to the list\n", slot->name);
return result; return result;
} }
/** /**
* pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem * pci_hp_deregister - deregister a hotplug_slot with the PCI hotplug subsystem
* @slot: pointer to the &struct hotplug_slot to deregister * @hotplug: pointer to the &struct hotplug_slot to deregister
* *
* The @slot must have been registered with the pci hotplug subsystem * The @slot must have been registered with the pci hotplug subsystem
* previously with a call to pci_hp_register(). * previously with a call to pci_hp_register().
* *
* Returns 0 if successful, anything else for an error. * Returns 0 if successful, anything else for an error.
*/ */
int pci_hp_deregister (struct hotplug_slot *slot) int pci_hp_deregister(struct hotplug_slot *hotplug)
{ {
struct hotplug_slot *temp; struct hotplug_slot *temp;
struct pci_slot *slot;
if (slot == NULL) if (!hotplug)
return -ENODEV; return -ENODEV;
temp = get_slot_from_name (slot->name); temp = get_slot_from_name(hotplug->name);
if (temp != slot) { if (temp != hotplug)
return -ENODEV; return -ENODEV;
}
list_del (&slot->slot_list);
fs_remove_slot (slot); spin_lock(&pci_hotplug_slot_list_lock);
dbg ("Removed slot %s from the list\n", slot->name); list_del(&hotplug->slot_list);
kobject_put(&slot->kobj); spin_unlock(&pci_hotplug_slot_list_lock);
slot = hotplug->pci_slot;
fs_remove_slot(slot);
dbg("Removed slot %s from the list\n", hotplug->name);
hotplug->release(hotplug);
slot->hotplug = NULL;
pci_destroy_slot(slot);
return 0; return 0;
} }
/** /**
* pci_hp_change_slot_info - changes the slot's information structure in the core * pci_hp_change_slot_info - changes the slot's information structure in the core
* @slot: pointer to the slot whose info has changed * @hotplug: pointer to the slot whose info has changed
* @info: pointer to the info copy into the slot's info structure * @info: pointer to the info copy into the slot's info structure
* *
* @slot must have been registered with the pci * @slot must have been registered with the pci
@ -690,13 +659,15 @@ int pci_hp_deregister (struct hotplug_slot *slot)
* *
* Returns 0 if successful, anything else for an error. * Returns 0 if successful, anything else for an error.
*/ */
int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot, int __must_check pci_hp_change_slot_info(struct hotplug_slot *hotplug,
struct hotplug_slot_info *info) struct hotplug_slot_info *info)
{ {
if ((slot == NULL) || (info == NULL)) struct pci_slot *slot;
if (!hotplug || !info)
return -ENODEV; return -ENODEV;
slot = hotplug->pci_slot;
memcpy (slot->info, info, sizeof (struct hotplug_slot_info)); memcpy(hotplug->info, info, sizeof(struct hotplug_slot_info));
return 0; return 0;
} }
@ -704,36 +675,22 @@ int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
static int __init pci_hotplug_init (void) static int __init pci_hotplug_init (void)
{ {
int result; int result;
struct kset *pci_bus_kset;
pci_bus_kset = bus_get_kset(&pci_bus_type);
pci_hotplug_slots_kset = kset_create_and_add("slots", NULL,
&pci_bus_kset->kobj);
if (!pci_hotplug_slots_kset) {
result = -ENOMEM;
err("Register subsys error\n");
goto exit;
}
result = cpci_hotplug_init(debug); result = cpci_hotplug_init(debug);
if (result) { if (result) {
err ("cpci_hotplug_init with error %d\n", result); err ("cpci_hotplug_init with error %d\n", result);
goto err_subsys; goto err_cpci;
} }
info (DRIVER_DESC " version: " DRIVER_VERSION "\n"); info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
goto exit;
err_subsys: err_cpci:
kset_unregister(pci_hotplug_slots_kset);
exit:
return result; return result;
} }
static void __exit pci_hotplug_exit (void) static void __exit pci_hotplug_exit (void)
{ {
cpci_hotplug_exit(); cpci_hotplug_exit();
kset_unregister(pci_hotplug_slots_kset);
} }
module_init(pci_hotplug_init); module_init(pci_hotplug_init);
@ -745,7 +702,6 @@ MODULE_LICENSE("GPL");
module_param(debug, bool, 0644); module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
EXPORT_SYMBOL_GPL(pci_hotplug_slots_kset);
EXPORT_SYMBOL_GPL(pci_hp_register); EXPORT_SYMBOL_GPL(pci_hp_register);
EXPORT_SYMBOL_GPL(pci_hp_deregister); EXPORT_SYMBOL_GPL(pci_hp_deregister);
EXPORT_SYMBOL_GPL(pci_hp_change_slot_info); EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);

View file

@ -43,6 +43,7 @@ extern int pciehp_poll_mode;
extern int pciehp_poll_time; extern int pciehp_poll_time;
extern int pciehp_debug; extern int pciehp_debug;
extern int pciehp_force; extern int pciehp_force;
extern int pciehp_slot_with_bus;
extern struct workqueue_struct *pciehp_wq; extern struct workqueue_struct *pciehp_wq;
#define dbg(format, arg...) \ #define dbg(format, arg...) \
@ -96,7 +97,7 @@ struct controller {
u32 slot_cap; u32 slot_cap;
u8 cap_base; u8 cap_base;
struct timer_list poll_timer; struct timer_list poll_timer;
volatile int cmd_busy; int cmd_busy;
unsigned int no_cmd_complete:1; unsigned int no_cmd_complete:1;
}; };
@ -156,10 +157,10 @@ extern u8 pciehp_handle_power_fault(struct slot *p_slot);
extern int pciehp_configure_device(struct slot *p_slot); extern int pciehp_configure_device(struct slot *p_slot);
extern int pciehp_unconfigure_device(struct slot *p_slot); extern int pciehp_unconfigure_device(struct slot *p_slot);
extern void pciehp_queue_pushbutton_work(struct work_struct *work); extern void pciehp_queue_pushbutton_work(struct work_struct *work);
int pcie_init(struct controller *ctrl, struct pcie_device *dev); struct controller *pcie_init(struct pcie_device *dev);
int pciehp_enable_slot(struct slot *p_slot); int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot);
int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev); int pcie_enable_notification(struct controller *ctrl);
static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
{ {
@ -202,8 +203,13 @@ struct hpc_ops {
#include <acpi/actypes.h> #include <acpi/actypes.h>
#include <linux/pci-acpi.h> #include <linux/pci-acpi.h>
#define pciehp_get_hp_hw_control_from_firmware(dev) \ static inline int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
pciehp_acpi_get_hp_hw_control_from_firmware(dev) {
u32 flags = (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL |
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev, static inline int pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
struct hotplug_params *hpp) struct hotplug_params *hpp)
{ {

View file

@ -72,7 +72,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static int get_address (struct hotplug_slot *slot, u32 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
@ -85,7 +84,6 @@ static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
.get_attention_status = get_attention_status, .get_attention_status = get_attention_status,
.get_latch_status = get_latch_status, .get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status, .get_adapter_status = get_adapter_status,
.get_address = get_address,
.get_max_bus_speed = get_max_bus_speed, .get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed, .get_cur_bus_speed = get_cur_bus_speed,
}; };
@ -185,23 +183,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = {
*/ */
static void release_slot(struct hotplug_slot *hotplug_slot) static void release_slot(struct hotplug_slot *hotplug_slot)
{ {
struct slot *slot = hotplug_slot->private;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
kfree(slot->hotplug_slot->info); kfree(hotplug_slot->info);
kfree(slot->hotplug_slot); kfree(hotplug_slot);
kfree(slot);
}
static void make_slot_name(struct slot *slot)
{
if (pciehp_slot_with_bus)
snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
slot->bus, slot->number);
else
snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
slot->number);
} }
static int init_slots(struct controller *ctrl) static int init_slots(struct controller *ctrl)
@ -210,49 +195,34 @@ static int init_slots(struct controller *ctrl)
struct hotplug_slot *hotplug_slot; struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info; struct hotplug_slot_info *info;
int retval = -ENOMEM; int retval = -ENOMEM;
int i;
for (i = 0; i < ctrl->num_slots; i++) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
goto error;
list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL); hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
if (!hotplug_slot) if (!hotplug_slot)
goto error_slot; goto error;
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info) if (!info)
goto error_hpslot; goto error_hpslot;
hotplug_slot->info = info;
hotplug_slot->name = slot->name;
slot->hp_slot = i;
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + i;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot;
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
/* register this slot with the hotplug pci core */ /* register this slot with the hotplug pci core */
hotplug_slot->info = info;
hotplug_slot->name = slot->name;
hotplug_slot->private = slot; hotplug_slot->private = slot;
hotplug_slot->release = &release_slot; hotplug_slot->release = &release_slot;
make_slot_name(slot);
hotplug_slot->ops = &pciehp_hotplug_slot_ops; hotplug_slot->ops = &pciehp_hotplug_slot_ops;
get_power_status(hotplug_slot, &info->power_status); get_power_status(hotplug_slot, &info->power_status);
get_attention_status(hotplug_slot, &info->attention_status); get_attention_status(hotplug_slot, &info->attention_status);
get_latch_status(hotplug_slot, &info->latch_status); get_latch_status(hotplug_slot, &info->latch_status);
get_adapter_status(hotplug_slot, &info->adapter_status); get_adapter_status(hotplug_slot, &info->adapter_status);
slot->hotplug_slot = hotplug_slot;
dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device, "slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset); slot->hp_slot, slot->number, ctrl->slot_device_offset);
retval = pci_hp_register(hotplug_slot); retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate,
slot->device);
if (retval) { if (retval) {
err("pci_hp_register failed with error %d\n", retval); err("pci_hp_register failed with error %d\n", retval);
if (retval == -EEXIST) if (retval == -EEXIST)
@ -263,7 +233,7 @@ static int init_slots(struct controller *ctrl)
} }
/* create additional sysfs entries */ /* create additional sysfs entries */
if (EMI(ctrl)) { if (EMI(ctrl)) {
retval = sysfs_create_file(&hotplug_slot->kobj, retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj,
&hotplug_slot_attr_lock.attr); &hotplug_slot_attr_lock.attr);
if (retval) { if (retval) {
pci_hp_deregister(hotplug_slot); pci_hp_deregister(hotplug_slot);
@ -271,8 +241,6 @@ static int init_slots(struct controller *ctrl)
goto error_info; goto error_info;
} }
} }
list_add(&slot->slot_list, &ctrl->slot_list);
} }
return 0; return 0;
@ -280,27 +248,18 @@ error_info:
kfree(info); kfree(info);
error_hpslot: error_hpslot:
kfree(hotplug_slot); kfree(hotplug_slot);
error_slot:
kfree(slot);
error: error:
return retval; return retval;
} }
static void cleanup_slots(struct controller *ctrl) static void cleanup_slots(struct controller *ctrl)
{ {
struct list_head *tmp;
struct list_head *next;
struct slot *slot; struct slot *slot;
list_for_each_safe(tmp, next, &ctrl->slot_list) { list_for_each_entry(slot, &ctrl->slot_list, slot_list) {
slot = list_entry(tmp, struct slot, slot_list);
list_del(&slot->slot_list);
if (EMI(ctrl)) if (EMI(ctrl))
sysfs_remove_file(&slot->hotplug_slot->kobj, sysfs_remove_file(&slot->hotplug_slot->pci_slot->kobj,
&hotplug_slot_attr_lock.attr); &hotplug_slot_attr_lock.attr);
cancel_delayed_work(&slot->work);
flush_scheduled_work();
flush_workqueue(pciehp_wq);
pci_hp_deregister(slot->hotplug_slot); pci_hp_deregister(slot->hotplug_slot);
} }
} }
@ -398,19 +357,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0; return 0;
} }
static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
{ enum pci_bus_speed *value)
struct slot *slot = hotplug_slot->private;
struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
*value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
return 0;
}
static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{ {
struct slot *slot = hotplug_slot->private; struct slot *slot = hotplug_slot->private;
int retval; int retval;
@ -444,34 +392,30 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
struct controller *ctrl; struct controller *ctrl;
struct slot *t_slot; struct slot *t_slot;
u8 value; u8 value;
struct pci_dev *pdev; struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (pciehp_force)
dbg("Bypassing BIOS check for pciehp use on %s\n",
pci_name(pdev));
else if (pciehp_get_hp_hw_control_from_firmware(pdev))
goto err_out_none;
ctrl = pcie_init(dev);
if (!ctrl) { if (!ctrl) {
err("%s : out of memory\n", __func__); dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
goto err_out_none; goto err_out_none;
} }
INIT_LIST_HEAD(&ctrl->slot_list); set_service_data(dev, ctrl);
pdev = dev->port;
ctrl->pci_dev = pdev;
rc = pcie_init(ctrl, dev);
if (rc) {
dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
goto err_out_free_ctrl;
}
pci_set_drvdata(pdev, ctrl);
dbg("%s: ctrl bus=0x%x, device=%x, function=%x, irq=%x\n",
__func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn), pdev->irq);
/* Setup the slot information structures */ /* Setup the slot information structures */
rc = init_slots(ctrl); rc = init_slots(ctrl);
if (rc) { if (rc) {
err("%s: slot initialization failed\n", PCIE_MODULE_NAME); if (rc == -EBUSY)
warn("%s: slot already registered by another "
"hotplug driver\n", PCIE_MODULE_NAME);
else
err("%s: slot initialization failed\n",
PCIE_MODULE_NAME);
goto err_out_release_ctlr; goto err_out_release_ctlr;
} }
@ -495,20 +439,16 @@ err_out_free_ctrl_slot:
cleanup_slots(ctrl); cleanup_slots(ctrl);
err_out_release_ctlr: err_out_release_ctlr:
ctrl->hpc_ops->release_ctlr(ctrl); ctrl->hpc_ops->release_ctlr(ctrl);
err_out_free_ctrl:
kfree(ctrl);
err_out_none: err_out_none:
return -ENODEV; return -ENODEV;
} }
static void pciehp_remove (struct pcie_device *dev) static void pciehp_remove (struct pcie_device *dev)
{ {
struct pci_dev *pdev = dev->port; struct controller *ctrl = get_service_data(dev);
struct controller *ctrl = pci_get_drvdata(pdev);
cleanup_slots(ctrl); cleanup_slots(ctrl);
ctrl->hpc_ops->release_ctlr(ctrl); ctrl->hpc_ops->release_ctlr(ctrl);
kfree(ctrl);
} }
#ifdef CONFIG_PM #ifdef CONFIG_PM
@ -522,13 +462,12 @@ static int pciehp_resume (struct pcie_device *dev)
{ {
printk("%s ENTRY\n", __func__); printk("%s ENTRY\n", __func__);
if (pciehp_force) { if (pciehp_force) {
struct pci_dev *pdev = dev->port; struct controller *ctrl = get_service_data(dev);
struct controller *ctrl = pci_get_drvdata(pdev);
struct slot *t_slot; struct slot *t_slot;
u8 status; u8 status;
/* reinitialize the chipset's event detection logic */ /* reinitialize the chipset's event detection logic */
pcie_init_hardware_part2(ctrl, dev); pcie_enable_notification(ctrl);
t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); t_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);

View file

@ -247,30 +247,32 @@ static inline void pciehp_free_irq(struct controller *ctrl)
free_irq(ctrl->pci_dev->irq, ctrl); free_irq(ctrl->pci_dev->irq, ctrl);
} }
static inline int pcie_poll_cmd(struct controller *ctrl) static int pcie_poll_cmd(struct controller *ctrl)
{ {
u16 slot_status; u16 slot_status;
int timeout = 1000; int timeout = 1000;
if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
if (slot_status & CMD_COMPLETED) if (slot_status & CMD_COMPLETED) {
goto completed; pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
for (timeout = 1000; timeout > 0; timeout -= 100) { return 1;
msleep(100); }
if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) }
if (slot_status & CMD_COMPLETED) while (timeout > 1000) {
goto completed; msleep(10);
timeout -= 10;
if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) {
if (slot_status & CMD_COMPLETED) {
pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
return 1;
}
}
} }
return 0; /* timeout */ return 0; /* timeout */
completed:
pciehp_writew(ctrl, SLOTSTATUS, CMD_COMPLETED);
return timeout;
} }
static inline int pcie_wait_cmd(struct controller *ctrl, int poll) static void pcie_wait_cmd(struct controller *ctrl, int poll)
{ {
int retval = 0;
unsigned int msecs = pciehp_poll_mode ? 2500 : 1000; unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
unsigned long timeout = msecs_to_jiffies(msecs); unsigned long timeout = msecs_to_jiffies(msecs);
int rc; int rc;
@ -278,16 +280,9 @@ static inline int pcie_wait_cmd(struct controller *ctrl, int poll)
if (poll) if (poll)
rc = pcie_poll_cmd(ctrl); rc = pcie_poll_cmd(ctrl);
else else
rc = wait_event_interruptible_timeout(ctrl->queue, rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
!ctrl->cmd_busy, timeout);
if (!rc) if (!rc)
dbg("Command not completed in 1000 msec\n"); dbg("Command not completed in 1000 msec\n");
else if (rc < 0) {
retval = -EINTR;
info("Command was interrupted by a signal\n");
}
return retval;
} }
/** /**
@ -342,10 +337,6 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
slot_ctrl &= ~mask; slot_ctrl &= ~mask;
slot_ctrl |= (cmd & mask); slot_ctrl |= (cmd & mask);
/* Don't enable command completed if caller is changing it. */
if (!(mask & CMD_CMPL_INTR_ENABLE))
slot_ctrl |= CMD_CMPL_INTR_ENABLE;
ctrl->cmd_busy = 1; ctrl->cmd_busy = 1;
smp_mb(); smp_mb();
retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
@ -365,7 +356,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
if (!(slot_ctrl & HP_INTR_ENABLE) || if (!(slot_ctrl & HP_INTR_ENABLE) ||
!(slot_ctrl & CMD_CMPL_INTR_ENABLE)) !(slot_ctrl & CMD_CMPL_INTR_ENABLE))
poll = 1; poll = 1;
retval = pcie_wait_cmd(ctrl, poll); pcie_wait_cmd(ctrl, poll);
} }
out: out:
mutex_unlock(&ctrl->ctrl_lock); mutex_unlock(&ctrl->ctrl_lock);
@ -614,23 +605,6 @@ static void hpc_set_green_led_blink(struct slot *slot)
__func__, ctrl->cap_base + SLOTCTRL, slot_cmd); __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
} }
static void hpc_release_ctlr(struct controller *ctrl)
{
/* Mask Hot-plug Interrupt Enable */
if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE))
err("%s: Cannot mask hotplut interrupt enable\n", __func__);
/* Free interrupt handler or interrupt polling timer */
pciehp_free_irq(ctrl);
/*
* If this is the last controller to be released, destroy the
* pciehp work queue
*/
if (atomic_dec_and_test(&pciehp_num_controllers))
destroy_workqueue(pciehp_wq);
}
static int hpc_power_on_slot(struct slot * slot) static int hpc_power_on_slot(struct slot * slot)
{ {
struct controller *ctrl = slot->ctrl; struct controller *ctrl = slot->ctrl;
@ -785,7 +759,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
intr_loc |= detected; intr_loc |= detected;
if (!intr_loc) if (!intr_loc)
return IRQ_NONE; return IRQ_NONE;
if (pciehp_writew(ctrl, SLOTSTATUS, detected)) { if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) {
err("%s: Cannot write to SLOTSTATUS\n", __func__); err("%s: Cannot write to SLOTSTATUS\n", __func__);
return IRQ_NONE; return IRQ_NONE;
} }
@ -797,25 +771,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
if (intr_loc & CMD_COMPLETED) { if (intr_loc & CMD_COMPLETED) {
ctrl->cmd_busy = 0; ctrl->cmd_busy = 0;
smp_mb(); smp_mb();
wake_up_interruptible(&ctrl->queue); wake_up(&ctrl->queue);
} }
if (!(intr_loc & ~CMD_COMPLETED)) if (!(intr_loc & ~CMD_COMPLETED))
return IRQ_HANDLED; return IRQ_HANDLED;
/*
* Return without handling events if this handler routine is
* called before controller initialization is done. This may
* happen if hotplug event or another interrupt that shares
* the IRQ with pciehp arrives before slot initialization is
* done after interrupt handler is registered.
*
* FIXME - Need more structural fixes. We need to be ready to
* handle the event before installing interrupt handler.
*/
p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset); p_slot = pciehp_find_slot(ctrl, ctrl->slot_device_offset);
if (!p_slot || !p_slot->hpc_ops)
return IRQ_HANDLED;
/* Check MRL Sensor Changed */ /* Check MRL Sensor Changed */
if (intr_loc & MRL_SENS_CHANGED) if (intr_loc & MRL_SENS_CHANGED)
@ -992,6 +954,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot,
return retval; return retval;
} }
static void pcie_release_ctrl(struct controller *ctrl);
static struct hpc_ops pciehp_hpc_ops = { static struct hpc_ops pciehp_hpc_ops = {
.power_on_slot = hpc_power_on_slot, .power_on_slot = hpc_power_on_slot,
.power_off_slot = hpc_power_off_slot, .power_off_slot = hpc_power_off_slot,
@ -1013,97 +976,11 @@ static struct hpc_ops pciehp_hpc_ops = {
.green_led_off = hpc_set_green_led_off, .green_led_off = hpc_set_green_led_off,
.green_led_blink = hpc_set_green_led_blink, .green_led_blink = hpc_set_green_led_blink,
.release_ctlr = hpc_release_ctlr, .release_ctlr = pcie_release_ctrl,
.check_lnk_status = hpc_check_lnk_status, .check_lnk_status = hpc_check_lnk_status,
}; };
#ifdef CONFIG_ACPI int pcie_enable_notification(struct controller *ctrl)
static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
acpi_status status;
acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
struct pci_dev *pdev = dev;
struct pci_bus *parent;
struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
/*
* Per PCI firmware specification, we should run the ACPI _OSC
* method to get control of hotplug hardware before using it.
* If an _OSC is missing, we look for an OSHP to do the same thing.
* To handle different BIOS behavior, we look for _OSC and OSHP
* within the scope of the hotplug controller and its parents, upto
* the host bridge under which this controller exists.
*/
while (!handle) {
/*
* This hotplug controller was not listed in the ACPI name
* space at all. Try to get acpi handle of parent pci bus.
*/
if (!pdev || !pdev->bus->parent)
break;
parent = pdev->bus->parent;
dbg("Could not find %s in acpi namespace, trying parent\n",
pci_name(pdev));
if (!parent->self)
/* Parent must be a host bridge */
handle = acpi_get_pci_rootbridge_handle(
pci_domain_nr(parent),
parent->number);
else
handle = DEVICE_ACPI_HANDLE(
&(parent->self->dev));
pdev = parent->self;
}
while (handle) {
acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
dbg("Trying to get hotplug control for %s \n",
(char *)string.pointer);
status = pci_osc_control_set(handle,
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL |
OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
if (status == AE_NOT_FOUND)
status = acpi_run_oshp(handle);
if (ACPI_SUCCESS(status)) {
dbg("Gained control for hotplug HW for pci %s (%s)\n",
pci_name(dev), (char *)string.pointer);
kfree(string.pointer);
return 0;
}
if (acpi_root_bridge(handle))
break;
chandle = handle;
status = acpi_get_parent(chandle, &handle);
if (ACPI_FAILURE(status))
break;
}
dbg("Cannot get control of hotplug hardware for pci %s\n",
pci_name(dev));
kfree(string.pointer);
return -1;
}
#endif
static int pcie_init_hardware_part1(struct controller *ctrl,
struct pcie_device *dev)
{
/* Clear all remaining event bits in Slot Status register */
if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) {
err("%s: Cannot write to SLOTSTATUS register\n", __func__);
return -1;
}
/* Mask Hot-plug Interrupt Enable */
if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) {
err("%s: Cannot mask hotplug interrupt enable\n", __func__);
return -1;
}
return 0;
}
int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
{ {
u16 cmd, mask; u16 cmd, mask;
@ -1115,30 +992,83 @@ int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
if (MRL_SENS(ctrl)) if (MRL_SENS(ctrl))
cmd |= MRL_DETECT_ENABLE; cmd |= MRL_DETECT_ENABLE;
if (!pciehp_poll_mode) if (!pciehp_poll_mode)
cmd |= HP_INTR_ENABLE; cmd |= HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE; PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
if (pcie_write_cmd(ctrl, cmd, mask)) { if (pcie_write_cmd(ctrl, cmd, mask)) {
err("%s: Cannot enable software notification\n", __func__); err("%s: Cannot enable software notification\n", __func__);
goto abort; return -1;
} }
if (pciehp_force)
dbg("Bypassing BIOS check for pciehp use on %s\n",
pci_name(ctrl->pci_dev));
else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev))
goto abort_disable_intr;
return 0; return 0;
}
/* We end up here for the many possible ways to fail this API. */ static void pcie_disable_notification(struct controller *ctrl)
abort_disable_intr: {
if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE)) u16 mask;
err("%s : disabling interrupts failed\n", __func__); mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE |
abort: PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE;
return -1; if (pcie_write_cmd(ctrl, 0, mask))
warn("%s: Cannot disable software notification\n", __func__);
}
static int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
return -1;
if (pcie_enable_notification(ctrl)) {
pciehp_free_irq(ctrl);
return -1;
}
return 0;
}
static void pcie_shutdown_notification(struct controller *ctrl)
{
pcie_disable_notification(ctrl);
pciehp_free_irq(ctrl);
}
static void make_slot_name(struct slot *slot)
{
if (pciehp_slot_with_bus)
snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d",
slot->bus, slot->number);
else
snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number);
}
static int pcie_init_slot(struct controller *ctrl)
{
struct slot *slot;
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
slot->hp_slot = 0;
slot->ctrl = ctrl;
slot->bus = ctrl->pci_dev->subordinate->number;
slot->device = ctrl->slot_device_offset + slot->hp_slot;
slot->hpc_ops = ctrl->hpc_ops;
slot->number = ctrl->first_slot;
make_slot_name(slot);
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
list_add(&slot->slot_list, &ctrl->slot_list);
return 0;
}
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot;
slot = list_first_entry(&ctrl->slot_list, struct slot, slot_list);
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
flush_scheduled_work();
flush_workqueue(pciehp_wq);
kfree(slot);
} }
static inline void dbg_ctrl(struct controller *ctrl) static inline void dbg_ctrl(struct controller *ctrl)
@ -1176,15 +1106,23 @@ static inline void dbg_ctrl(struct controller *ctrl)
dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes");
pciehp_readw(ctrl, SLOTSTATUS, &reg16); pciehp_readw(ctrl, SLOTSTATUS, &reg16);
dbg("Slot Status : 0x%04x\n", reg16); dbg("Slot Status : 0x%04x\n", reg16);
pciehp_readw(ctrl, SLOTSTATUS, &reg16); pciehp_readw(ctrl, SLOTCTRL, &reg16);
dbg("Slot Control : 0x%04x\n", reg16); dbg("Slot Control : 0x%04x\n", reg16);
} }
int pcie_init(struct controller *ctrl, struct pcie_device *dev) struct controller *pcie_init(struct pcie_device *dev)
{ {
struct controller *ctrl;
u32 slot_cap; u32 slot_cap;
struct pci_dev *pdev = dev->port; struct pci_dev *pdev = dev->port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
err("%s : out of memory\n", __func__);
goto abort;
}
INIT_LIST_HEAD(&ctrl->slot_list);
ctrl->pci_dev = pdev; ctrl->pci_dev = pdev;
ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!ctrl->cap_base) { if (!ctrl->cap_base) {
@ -1215,15 +1153,12 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
!(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
ctrl->no_cmd_complete = 1; ctrl->no_cmd_complete = 1;
info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", /* Clear all remaining event bits in Slot Status register */
pdev->vendor, pdev->device, if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f))
pdev->subsystem_vendor, pdev->subsystem_device); goto abort_ctrl;
if (pcie_init_hardware_part1(ctrl, dev)) /* Disable sotfware notification */
goto abort; pcie_disable_notification(ctrl);
if (pciehp_request_irq(ctrl))
goto abort;
/* /*
* If this is the first controller to be initialized, * If this is the first controller to be initialized,
@ -1231,18 +1166,39 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
*/ */
if (atomic_add_return(1, &pciehp_num_controllers) == 1) { if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
pciehp_wq = create_singlethread_workqueue("pciehpd"); pciehp_wq = create_singlethread_workqueue("pciehpd");
if (!pciehp_wq) { if (!pciehp_wq)
goto abort_free_irq; goto abort_ctrl;
}
} }
if (pcie_init_hardware_part2(ctrl, dev)) info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
goto abort_free_irq; pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
return 0; if (pcie_init_slot(ctrl))
goto abort_ctrl;
abort_free_irq: if (pcie_init_notification(ctrl))
pciehp_free_irq(ctrl); goto abort_slot;
return ctrl;
abort_slot:
pcie_cleanup_slot(ctrl);
abort_ctrl:
kfree(ctrl);
abort: abort:
return -1; return NULL;
}
void pcie_release_ctrl(struct controller *ctrl)
{
pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
/*
* If this is the last controller to be released, destroy the
* pciehp work queue
*/
if (atomic_dec_and_test(&pciehp_num_controllers))
destroy_workqueue(pciehp_wq);
kfree(ctrl);
} }

View file

@ -14,8 +14,10 @@
*/ */
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h> #include <linux/pci_hotplug.h>
#include "rpadlpar.h" #include "rpadlpar.h"
#include "../pci.h"
#define DLPAR_KOBJ_NAME "control" #define DLPAR_KOBJ_NAME "control"
@ -27,7 +29,6 @@
#define MAX_DRC_NAME_LEN 64 #define MAX_DRC_NAME_LEN 64
static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t nbytes) const char *buf, size_t nbytes)
{ {
@ -112,7 +113,7 @@ int dlpar_sysfs_init(void)
int error; int error;
dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME,
&pci_hotplug_slots_kset->kobj); &pci_slots_kset->kobj);
if (!dlpar_kobj) if (!dlpar_kobj)
return -EINVAL; return -EINVAL;

View file

@ -33,33 +33,6 @@
#include <asm/rtas.h> #include <asm/rtas.h>
#include "rpaphp.h" #include "rpaphp.h"
static ssize_t address_read_file (struct hotplug_slot *php_slot, char *buf)
{
int retval;
struct slot *slot = (struct slot *)php_slot->private;
struct pci_bus *bus;
if (!slot)
return -ENOENT;
bus = slot->bus;
if (!bus)
return -ENOENT;
if (bus->self)
retval = sprintf(buf, pci_name(bus->self));
else
retval = sprintf(buf, "%04x:%02x:00.0",
pci_domain_nr(bus), bus->number);
return retval;
}
static struct hotplug_slot_attribute php_attr_address = {
.attr = {.name = "address", .mode = S_IFREG | S_IRUGO},
.show = address_read_file,
};
/* free up the memory used by a slot */ /* free up the memory used by a slot */
static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot)
{ {
@ -135,9 +108,6 @@ int rpaphp_deregister_slot(struct slot *slot)
list_del(&slot->rpaphp_slot_list); list_del(&slot->rpaphp_slot_list);
/* remove "address" file */
sysfs_remove_file(&php_slot->kobj, &php_attr_address.attr);
retval = pci_hp_deregister(php_slot); retval = pci_hp_deregister(php_slot);
if (retval) if (retval)
err("Problem unregistering a slot %s\n", slot->name); err("Problem unregistering a slot %s\n", slot->name);
@ -151,6 +121,7 @@ int rpaphp_register_slot(struct slot *slot)
{ {
struct hotplug_slot *php_slot = slot->hotplug_slot; struct hotplug_slot *php_slot = slot->hotplug_slot;
int retval; int retval;
int slotno;
dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n",
__func__, slot->dn->full_name, slot->index, slot->name, __func__, slot->dn->full_name, slot->index, slot->name,
@ -162,19 +133,16 @@ int rpaphp_register_slot(struct slot *slot)
return -EAGAIN; return -EAGAIN;
} }
retval = pci_hp_register(php_slot); if (slot->dn->child)
slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn);
else
slotno = -1;
retval = pci_hp_register(php_slot, slot->bus, slotno);
if (retval) { if (retval) {
err("pci_hp_register failed with error %d\n", retval); err("pci_hp_register failed with error %d\n", retval);
return retval; return retval;
} }
/* create "address" file */
retval = sysfs_create_file(&php_slot->kobj, &php_attr_address.attr);
if (retval) {
err("sysfs_create_file failed with error %d\n", retval);
goto sysfs_fail;
}
/* add slot to our internal list */ /* add slot to our internal list */
list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head);
info("Slot [%s] registered\n", slot->name); info("Slot [%s] registered\n", slot->name);

View file

@ -197,13 +197,15 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
static struct hotplug_slot * sn_hp_destroy(void) static struct hotplug_slot * sn_hp_destroy(void)
{ {
struct slot *slot; struct slot *slot;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot = NULL; struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) { list_for_each_entry(slot, &sn_hp_list, hp_list) {
bss_hotplug_slot = slot->hotplug_slot; bss_hotplug_slot = slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
list_del(&((struct slot *)bss_hotplug_slot->private)-> list_del(&((struct slot *)bss_hotplug_slot->private)->
hp_list); hp_list);
sysfs_remove_file(&bss_hotplug_slot->kobj, sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr); &sn_slot_path_attr.attr);
break; break;
} }
@ -614,6 +616,7 @@ static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
static int sn_hotplug_slot_register(struct pci_bus *pci_bus) static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
{ {
int device; int device;
struct pci_slot *pci_slot;
struct hotplug_slot *bss_hotplug_slot; struct hotplug_slot *bss_hotplug_slot;
int rc = 0; int rc = 0;
@ -650,11 +653,12 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
bss_hotplug_slot->ops = &sn_hotplug_slot_ops; bss_hotplug_slot->ops = &sn_hotplug_slot_ops;
bss_hotplug_slot->release = &sn_release_slot; bss_hotplug_slot->release = &sn_release_slot;
rc = pci_hp_register(bss_hotplug_slot); rc = pci_hp_register(bss_hotplug_slot, pci_bus, device);
if (rc) if (rc)
goto register_err; goto register_err;
rc = sysfs_create_file(&bss_hotplug_slot->kobj, pci_slot = bss_hotplug_slot->pci_slot;
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr); &sn_slot_path_attr.attr);
if (rc) if (rc)
goto register_err; goto register_err;
@ -664,7 +668,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
register_err: register_err:
dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n", dev_dbg(&pci_bus->self->dev, "bus failed to register with err = %d\n",
rc); rc);
alloc_err: alloc_err:
if (rc == -ENOMEM) if (rc == -ENOMEM)

View file

@ -170,6 +170,7 @@ extern void shpchp_queue_pushbutton_work(struct work_struct *work);
extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
#include <linux/pci-acpi.h>
static inline int get_hp_params_from_firmware(struct pci_dev *dev, static inline int get_hp_params_from_firmware(struct pci_dev *dev,
struct hotplug_params *hpp) struct hotplug_params *hpp)
{ {
@ -177,14 +178,15 @@ static inline int get_hp_params_from_firmware(struct pci_dev *dev,
return -ENODEV; return -ENODEV;
return 0; return 0;
} }
#define get_hp_hw_control_from_firmware(pdev) \
do { \ static inline int get_hp_hw_control_from_firmware(struct pci_dev *dev)
if (DEVICE_ACPI_HANDLE(&(pdev->dev))) \ {
acpi_run_oshp(DEVICE_ACPI_HANDLE(&(pdev->dev)));\ u32 flags = OSC_SHPC_NATIVE_HP_CONTROL;
} while (0) return acpi_get_hp_hw_control_from_firmware(dev, flags);
}
#else #else
#define get_hp_params_from_firmware(dev, hpp) (-ENODEV) #define get_hp_params_from_firmware(dev, hpp) (-ENODEV)
#define get_hp_hw_control_from_firmware(dev) do { } while (0) #define get_hp_hw_control_from_firmware(dev) (0)
#endif #endif
struct ctrl_reg { struct ctrl_reg {

View file

@ -39,7 +39,7 @@
int shpchp_debug; int shpchp_debug;
int shpchp_poll_mode; int shpchp_poll_mode;
int shpchp_poll_time; int shpchp_poll_time;
int shpchp_slot_with_bus; static int shpchp_slot_with_bus;
struct workqueue_struct *shpchp_wq; struct workqueue_struct *shpchp_wq;
#define DRIVER_VERSION "0.4" #define DRIVER_VERSION "0.4"
@ -68,7 +68,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value); static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value); static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value); static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
static int get_address (struct hotplug_slot *slot, u32 *value);
static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
@ -81,7 +80,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_attention_status = get_attention_status, .get_attention_status = get_attention_status,
.get_latch_status = get_latch_status, .get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status, .get_adapter_status = get_adapter_status,
.get_address = get_address,
.get_max_bus_speed = get_max_bus_speed, .get_max_bus_speed = get_max_bus_speed,
.get_cur_bus_speed = get_cur_bus_speed, .get_cur_bus_speed = get_cur_bus_speed,
}; };
@ -159,7 +157,8 @@ static int init_slots(struct controller *ctrl)
dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x "
"slot_device_offset=%x\n", slot->bus, slot->device, "slot_device_offset=%x\n", slot->bus, slot->device,
slot->hp_slot, slot->number, ctrl->slot_device_offset); slot->hp_slot, slot->number, ctrl->slot_device_offset);
retval = pci_hp_register(slot->hotplug_slot); retval = pci_hp_register(slot->hotplug_slot,
ctrl->pci_dev->subordinate, slot->device);
if (retval) { if (retval) {
err("pci_hp_register failed with error %d\n", retval); err("pci_hp_register failed with error %d\n", retval);
if (retval == -EEXIST) if (retval == -EEXIST)
@ -288,19 +287,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
return 0; return 0;
} }
static int get_address (struct hotplug_slot *hotplug_slot, u32 *value) static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
{ enum pci_bus_speed *value)
struct slot *slot = get_slot(hotplug_slot);
struct pci_bus *bus = slot->ctrl->pci_dev->subordinate;
dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name);
*value = (pci_domain_nr(bus) << 16) | (slot->bus << 8) | slot->device;
return 0;
}
static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
{ {
struct slot *slot = get_slot(hotplug_slot); struct slot *slot = get_slot(hotplug_slot);
int retval; int retval;
@ -330,13 +318,14 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp
static int is_shpc_capable(struct pci_dev *dev) static int is_shpc_capable(struct pci_dev *dev)
{ {
if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device == if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
PCI_DEVICE_ID_AMD_GOLAM_7450)) PCI_DEVICE_ID_AMD_GOLAM_7450))
return 1; return 1;
if (pci_find_capability(dev, PCI_CAP_ID_SHPC)) if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
return 1; return 0;
if (get_hp_hw_control_from_firmware(dev))
return 0; return 0;
return 1;
} }
static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)

View file

@ -1084,7 +1084,6 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__,
pdev->bus->number, PCI_SLOT(pdev->devfn), pdev->bus->number, PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn), pdev->irq); PCI_FUNC(pdev->devfn), pdev->irq);
get_hp_hw_control_from_firmware(pdev);
/* /*
* If this is the first controller to be initialized, * If this is the first controller to be initialized,

View file

@ -1748,7 +1748,6 @@ int __init init_dmars(void)
deferred_flush = kzalloc(g_num_of_iommus * deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL); sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) { if (!deferred_flush) {
kfree(g_iommus);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }

View file

@ -70,12 +70,10 @@ arch_teardown_msi_irqs(struct pci_dev *dev)
} }
} }
static void msi_set_enable(struct pci_dev *dev, int enable) static void __msi_set_enable(struct pci_dev *dev, int pos, int enable)
{ {
int pos;
u16 control; u16 control;
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) { if (pos) {
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
control &= ~PCI_MSI_FLAGS_ENABLE; control &= ~PCI_MSI_FLAGS_ENABLE;
@ -85,6 +83,11 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
} }
} }
static void msi_set_enable(struct pci_dev *dev, int enable)
{
__msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable);
}
static void msix_set_enable(struct pci_dev *dev, int enable) static void msix_set_enable(struct pci_dev *dev, int enable)
{ {
int pos; int pos;
@ -141,7 +144,8 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
mask_bits |= flag & mask; mask_bits |= flag & mask;
pci_write_config_dword(entry->dev, pos, mask_bits); pci_write_config_dword(entry->dev, pos, mask_bits);
} else { } else {
msi_set_enable(entry->dev, !flag); __msi_set_enable(entry->dev, entry->msi_attrib.pos,
!flag);
} }
break; break;
case PCI_CAP_ID_MSIX: case PCI_CAP_ID_MSIX:
@ -561,9 +565,8 @@ int pci_enable_msi(struct pci_dev* dev)
/* Check whether driver already requested for MSI-X irqs */ /* Check whether driver already requested for MSI-X irqs */
if (dev->msix_enabled) { if (dev->msix_enabled) {
printk(KERN_INFO "PCI: %s: Can't enable MSI. " dev_info(&dev->dev, "can't enable MSI "
"Device already has MSI-X enabled\n", "(MSI-X already enabled)\n");
pci_name(dev));
return -EINVAL; return -EINVAL;
} }
status = msi_capability_init(dev); status = msi_capability_init(dev);
@ -686,9 +689,8 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
/* Check whether driver already requested for MSI irq */ /* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) { if (dev->msi_enabled) {
printk(KERN_INFO "PCI: %s: Can't enable MSI-X. " dev_info(&dev->dev, "can't enable MSI-X "
"Device already has an MSI irq assigned\n", "(MSI IRQ already assigned)\n");
pci_name(dev));
return -EINVAL; return -EINVAL;
} }
status = msix_capability_init(dev, entries, nvec); status = msix_capability_init(dev, entries, nvec);

View file

@ -21,12 +21,19 @@
struct acpi_osc_data { struct acpi_osc_data {
acpi_handle handle; acpi_handle handle;
u32 ctrlset_buf[3]; u32 support_set;
u32 global_ctrlsets; u32 control_set;
int is_queried;
u32 query_result;
struct list_head sibiling; struct list_head sibiling;
}; };
static LIST_HEAD(acpi_osc_data_list); static LIST_HEAD(acpi_osc_data_list);
struct acpi_osc_args {
u32 capbuf[3];
u32 query_result;
};
static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
{ {
struct acpi_osc_data *data; struct acpi_osc_data *data;
@ -44,25 +51,80 @@ static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle)
return data; return data;
} }
static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40, 0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66}; static u8 OSC_UUID[16] = {0x5B, 0x4D, 0xDB, 0x33, 0xF7, 0x1F, 0x1C, 0x40,
0x96, 0x57, 0x74, 0x41, 0xC0, 0x3D, 0xD7, 0x66};
static acpi_status static acpi_status acpi_run_osc(acpi_handle handle,
acpi_query_osc ( struct acpi_osc_args *osc_args)
acpi_handle handle,
u32 level,
void *context,
void **retval )
{ {
acpi_status status; acpi_status status;
struct acpi_object_list input; struct acpi_object_list input;
union acpi_object in_params[4]; union acpi_object in_params[4];
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *out_obj; union acpi_object *out_obj;
u32 osc_dw0; u32 osc_dw0, flags = osc_args->capbuf[OSC_QUERY_TYPE];
acpi_status *ret_status = (acpi_status *)retval;
/* Setting up input parameters */
input.count = 4;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = 1;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = 3;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 12;
in_params[3].buffer.pointer = (u8 *)osc_args->capbuf;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
return status;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG "Evaluate _OSC returns wrong type\n");
status = AE_TYPE;
goto out_kfree;
}
osc_dw0 = *((u32 *)out_obj->buffer.pointer);
if (osc_dw0) {
if (osc_dw0 & OSC_REQUEST_ERROR)
printk(KERN_DEBUG "_OSC request fails\n");
if (osc_dw0 & OSC_INVALID_UUID_ERROR)
printk(KERN_DEBUG "_OSC invalid UUID\n");
if (osc_dw0 & OSC_INVALID_REVISION_ERROR)
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
if (flags & OSC_QUERY_ENABLE)
goto out_success;
printk(KERN_DEBUG "_OSC FW not grant req. control\n");
status = AE_SUPPORT;
goto out_kfree;
}
status = AE_ERROR;
goto out_kfree;
}
out_success:
if (flags & OSC_QUERY_ENABLE)
osc_args->query_result =
*((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
out_kfree:
kfree(output.pointer);
return status;
}
static acpi_status acpi_query_osc(acpi_handle handle,
u32 level, void *context, void **retval)
{
acpi_status status;
struct acpi_osc_data *osc_data; struct acpi_osc_data *osc_data;
u32 flags = (unsigned long)context, temp; u32 flags = (unsigned long)context, support_set;
acpi_handle tmp; acpi_handle tmp;
struct acpi_osc_args osc_args;
status = acpi_get_handle(handle, "_OSC", &tmp); status = acpi_get_handle(handle, "_OSC", &tmp);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
@ -74,134 +136,19 @@ acpi_query_osc (
return AE_ERROR; return AE_ERROR;
} }
osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] |= (flags & OSC_SUPPORT_MASKS);
/* do _OSC query for all possible controls */ /* do _OSC query for all possible controls */
temp = osc_data->ctrlset_buf[OSC_CONTROL_TYPE]; support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS);
osc_data->ctrlset_buf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE;
osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set;
osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS;
/* Setting up input parameters */ status = acpi_run_osc(handle, &osc_args);
input.count = 4; if (ACPI_SUCCESS(status)) {
input.pointer = in_params; osc_data->support_set = support_set;
in_params[0].type = ACPI_TYPE_BUFFER; osc_data->query_result = osc_args.query_result;
in_params[0].buffer.length = 16; osc_data->is_queried = 1;
in_params[0].buffer.pointer = OSC_UUID;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = 1;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = 3;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 12;
in_params[3].buffer.pointer = (u8 *)osc_data->ctrlset_buf;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
if (ACPI_FAILURE(status))
goto out_nofree;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG
"Evaluate _OSC returns wrong type\n");
status = AE_TYPE;
goto query_osc_out;
}
osc_dw0 = *((u32 *) out_obj->buffer.pointer);
if (osc_dw0) {
if (osc_dw0 & OSC_REQUEST_ERROR)
printk(KERN_DEBUG "_OSC request fails\n");
if (osc_dw0 & OSC_INVALID_UUID_ERROR)
printk(KERN_DEBUG "_OSC invalid UUID\n");
if (osc_dw0 & OSC_INVALID_REVISION_ERROR)
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
/* Update Global Control Set */
osc_data->global_ctrlsets =
*((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
goto query_osc_out;
}
status = AE_ERROR;
goto query_osc_out;
} }
/* Update Global Control Set */
osc_data->global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8));
status = AE_OK;
query_osc_out:
kfree(output.pointer);
out_nofree:
*ret_status = status;
osc_data->ctrlset_buf[OSC_QUERY_TYPE] = !OSC_QUERY_ENABLE;
osc_data->ctrlset_buf[OSC_CONTROL_TYPE] = temp;
if (ACPI_FAILURE(status)) {
/* no osc support at all */
osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] = 0;
}
return status;
}
static acpi_status
acpi_run_osc (
acpi_handle handle,
void *context)
{
acpi_status status;
struct acpi_object_list input;
union acpi_object in_params[4];
struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
union acpi_object *out_obj;
u32 osc_dw0;
/* Setting up input parameters */
input.count = 4;
input.pointer = in_params;
in_params[0].type = ACPI_TYPE_BUFFER;
in_params[0].buffer.length = 16;
in_params[0].buffer.pointer = OSC_UUID;
in_params[1].type = ACPI_TYPE_INTEGER;
in_params[1].integer.value = 1;
in_params[2].type = ACPI_TYPE_INTEGER;
in_params[2].integer.value = 3;
in_params[3].type = ACPI_TYPE_BUFFER;
in_params[3].buffer.length = 12;
in_params[3].buffer.pointer = (u8 *)context;
status = acpi_evaluate_object(handle, "_OSC", &input, &output);
if (ACPI_FAILURE (status))
return status;
out_obj = output.pointer;
if (out_obj->type != ACPI_TYPE_BUFFER) {
printk(KERN_DEBUG
"Evaluate _OSC returns wrong type\n");
status = AE_TYPE;
goto run_osc_out;
}
osc_dw0 = *((u32 *) out_obj->buffer.pointer);
if (osc_dw0) {
if (osc_dw0 & OSC_REQUEST_ERROR)
printk(KERN_DEBUG "_OSC request fails\n");
if (osc_dw0 & OSC_INVALID_UUID_ERROR)
printk(KERN_DEBUG "_OSC invalid UUID\n");
if (osc_dw0 & OSC_INVALID_REVISION_ERROR)
printk(KERN_DEBUG "_OSC invalid revision\n");
if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) {
printk(KERN_DEBUG "_OSC FW not grant req. control\n");
status = AE_SUPPORT;
goto run_osc_out;
}
status = AE_ERROR;
goto run_osc_out;
}
status = AE_OK;
run_osc_out:
kfree(output.pointer);
return status; return status;
} }
@ -215,15 +162,11 @@ run_osc_out:
**/ **/
acpi_status __pci_osc_support_set(u32 flags, const char *hid) acpi_status __pci_osc_support_set(u32 flags, const char *hid)
{ {
acpi_status retval = AE_NOT_FOUND; if (!(flags & OSC_SUPPORT_MASKS))
if (!(flags & OSC_SUPPORT_MASKS)) {
return AE_TYPE; return AE_TYPE;
}
acpi_get_devices(hid, acpi_get_devices(hid, acpi_query_osc,
acpi_query_osc, (void *)(unsigned long)flags, NULL);
(void *)(unsigned long)flags,
(void **) &retval );
return AE_OK; return AE_OK;
} }
@ -236,10 +179,11 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid)
**/ **/
acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{ {
acpi_status status; acpi_status status;
u32 ctrlset; u32 ctrlset, control_set;
acpi_handle tmp; acpi_handle tmp;
struct acpi_osc_data *osc_data; struct acpi_osc_data *osc_data;
struct acpi_osc_args osc_args;
status = acpi_get_handle(handle, "_OSC", &tmp); status = acpi_get_handle(handle, "_OSC", &tmp);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
@ -252,24 +196,25 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
} }
ctrlset = (flags & OSC_CONTROL_MASKS); ctrlset = (flags & OSC_CONTROL_MASKS);
if (!ctrlset) { if (!ctrlset)
return AE_TYPE; return AE_TYPE;
}
if (osc_data->ctrlset_buf[OSC_SUPPORT_TYPE] && if (osc_data->is_queried &&
((osc_data->global_ctrlsets & ctrlset) != ctrlset)) { ((osc_data->query_result & ctrlset) != ctrlset))
return AE_SUPPORT; return AE_SUPPORT;
}
osc_data->ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset; control_set = osc_data->control_set | ctrlset;
status = acpi_run_osc(handle, osc_data->ctrlset_buf); osc_args.capbuf[OSC_QUERY_TYPE] = 0;
if (ACPI_FAILURE (status)) { osc_args.capbuf[OSC_SUPPORT_TYPE] = osc_data->support_set;
osc_data->ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset; osc_args.capbuf[OSC_CONTROL_TYPE] = control_set;
} status = acpi_run_osc(handle, &osc_args);
if (ACPI_SUCCESS(status))
osc_data->control_set = control_set;
return status; return status;
} }
EXPORT_SYMBOL(pci_osc_control_set); EXPORT_SYMBOL(pci_osc_control_set);
#ifdef CONFIG_ACPI_SLEEP
/* /*
* _SxD returns the D-state with the highest power * _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x". * (lowest D-state number) supported in the S-state "x".
@ -313,7 +258,13 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
} }
return PCI_POWER_ERROR; return PCI_POWER_ERROR;
} }
#endif
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_power_manageable(handle) : false;
}
static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{ {
@ -326,12 +277,11 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
[PCI_D3hot] = ACPI_STATE_D3, [PCI_D3hot] = ACPI_STATE_D3,
[PCI_D3cold] = ACPI_STATE_D3 [PCI_D3cold] = ACPI_STATE_D3
}; };
int error = -EINVAL;
if (!handle)
return -ENODEV;
/* If the ACPI device has _EJ0, ignore the device */ /* If the ACPI device has _EJ0, ignore the device */
if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp))) if (!handle || ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
return 0; return -ENODEV;
switch (state) { switch (state) {
case PCI_D0: case PCI_D0:
@ -339,11 +289,41 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
case PCI_D2: case PCI_D2:
case PCI_D3hot: case PCI_D3hot:
case PCI_D3cold: case PCI_D3cold:
return acpi_bus_set_power(handle, state_conv[state]); error = acpi_bus_set_power(handle, state_conv[state]);
} }
return -EINVAL;
if (!error)
dev_printk(KERN_INFO, &dev->dev,
"power state changed by ACPI to D%d\n", state);
return error;
} }
static bool acpi_pci_can_wakeup(struct pci_dev *dev)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev);
return handle ? acpi_bus_can_wakeup(handle) : false;
}
static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
int error = acpi_pm_device_sleep_wake(&dev->dev, enable);
if (!error)
dev_printk(KERN_INFO, &dev->dev,
"wake-up capability %s by ACPI\n",
enable ? "enabled" : "disabled");
return error;
}
static struct pci_platform_pm_ops acpi_pci_platform_pm = {
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.choose_state = acpi_pci_choose_state,
.can_wakeup = acpi_pci_can_wakeup,
.sleep_wake = acpi_pci_sleep_wake,
};
/* ACPI bus type */ /* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
@ -395,10 +375,7 @@ static int __init acpi_pci_init(void)
ret = register_acpi_bus_type(&acpi_pci_bus); ret = register_acpi_bus_type(&acpi_pci_bus);
if (ret) if (ret)
return 0; return 0;
#ifdef CONFIG_ACPI_SLEEP pci_set_platform_pm(&acpi_pci_platform_pm);
platform_pci_choose_state = acpi_pci_choose_state;
#endif
platform_pci_set_power_state = acpi_pci_set_power_state;
return 0; return 0;
} }
arch_initcall(acpi_pci_init); arch_initcall(acpi_pci_init);

View file

@ -274,7 +274,57 @@ static int pci_device_remove(struct device * dev)
return 0; return 0;
} }
static int pci_device_suspend(struct device * dev, pm_message_t state) static void pci_device_shutdown(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
if (drv && drv->shutdown)
drv->shutdown(pci_dev);
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
}
#ifdef CONFIG_PM_SLEEP
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all.
*/
static void pci_default_pm_suspend(struct pci_dev *pci_dev)
{
pci_save_state(pci_dev);
/*
* mark its power state as "unknown", since we don't know if
* e.g. the BIOS will change its device state when we suspend.
*/
if (pci_dev->current_state == PCI_D0)
pci_dev->current_state = PCI_UNKNOWN;
}
/*
* Default "resume" method for devices that have no driver provided resume,
* or not even a driver at all.
*/
static int pci_default_pm_resume(struct pci_dev *pci_dev)
{
int retval = 0;
/* restore the PCI config space */
pci_restore_state(pci_dev);
/* if the device was enabled before suspend, reenable */
retval = pci_reenable_device(pci_dev);
/*
* if the device was busmaster before the suspend, make it busmaster
* again
*/
if (pci_dev->is_busmaster)
pci_set_master(pci_dev);
return retval;
}
static int pci_legacy_suspend(struct device *dev, pm_message_t state)
{ {
struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver; struct pci_driver * drv = pci_dev->driver;
@ -284,18 +334,12 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
i = drv->suspend(pci_dev, state); i = drv->suspend(pci_dev, state);
suspend_report_result(drv->suspend, i); suspend_report_result(drv->suspend, i);
} else { } else {
pci_save_state(pci_dev); pci_default_pm_suspend(pci_dev);
/*
* mark its power state as "unknown", since we don't know if
* e.g. the BIOS will change its device state when we suspend.
*/
if (pci_dev->current_state == PCI_D0)
pci_dev->current_state = PCI_UNKNOWN;
} }
return i; return i;
} }
static int pci_device_suspend_late(struct device * dev, pm_message_t state) static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
{ {
struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver; struct pci_driver * drv = pci_dev->driver;
@ -308,26 +352,7 @@ static int pci_device_suspend_late(struct device * dev, pm_message_t state)
return i; return i;
} }
/* static int pci_legacy_resume(struct device *dev)
* Default resume method for devices that have no driver provided resume,
* or not even a driver at all.
*/
static int pci_default_resume(struct pci_dev *pci_dev)
{
int retval = 0;
/* restore the PCI config space */
pci_restore_state(pci_dev);
/* if the device was enabled before suspend, reenable */
retval = pci_reenable_device(pci_dev);
/* if the device was busmaster before the suspend, make it busmaster again */
if (pci_dev->is_busmaster)
pci_set_master(pci_dev);
return retval;
}
static int pci_device_resume(struct device * dev)
{ {
int error; int error;
struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_dev * pci_dev = to_pci_dev(dev);
@ -336,34 +361,313 @@ static int pci_device_resume(struct device * dev)
if (drv && drv->resume) if (drv && drv->resume)
error = drv->resume(pci_dev); error = drv->resume(pci_dev);
else else
error = pci_default_resume(pci_dev); error = pci_default_pm_resume(pci_dev);
return error; return error;
} }
static int pci_device_resume_early(struct device * dev) static int pci_legacy_resume_early(struct device *dev)
{ {
int error = 0; int error = 0;
struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver; struct pci_driver * drv = pci_dev->driver;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->resume_early) if (drv && drv->resume_early)
error = drv->resume_early(pci_dev); error = drv->resume_early(pci_dev);
return error; return error;
} }
static void pci_device_shutdown(struct device *dev) static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
return error;
}
static void pci_pm_complete(struct device *dev)
{
struct device_driver *drv = dev->driver;
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
}
#ifdef CONFIG_SUSPEND
static int pci_pm_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->suspend) {
error = drv->pm->suspend(dev);
suspend_report_result(drv->pm->suspend, error);
} else {
pci_default_pm_suspend(pci_dev);
}
} else {
error = pci_legacy_suspend(dev, PMSG_SUSPEND);
}
pci_fixup_device(pci_fixup_suspend, pci_dev);
return error;
}
static int pci_pm_suspend_noirq(struct device *dev)
{ {
struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver; struct pci_driver *drv = pci_dev->driver;
int error = 0;
if (drv && drv->shutdown) if (drv && drv->pm) {
drv->shutdown(pci_dev); if (drv->pm->suspend_noirq) {
pci_msi_shutdown(pci_dev); error = drv->pm->suspend_noirq(dev);
pci_msix_shutdown(pci_dev); suspend_report_result(drv->pm->suspend_noirq, error);
}
} else {
error = pci_legacy_suspend_late(dev, PMSG_SUSPEND);
}
return error;
} }
static int pci_pm_resume(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->pm) {
error = drv->pm->resume ? drv->pm->resume(dev) :
pci_default_pm_resume(pci_dev);
} else {
error = pci_legacy_resume(dev);
}
return error;
}
static int pci_pm_resume_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume_early, pci_dev);
if (drv && drv->pm) {
if (drv->pm->resume_noirq)
error = drv->pm->resume_noirq(dev);
} else {
error = pci_legacy_resume_early(dev);
}
return error;
}
#else /* !CONFIG_SUSPEND */
#define pci_pm_suspend NULL
#define pci_pm_suspend_noirq NULL
#define pci_pm_resume NULL
#define pci_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATION
static int pci_pm_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->freeze) {
error = drv->pm->freeze(dev);
suspend_report_result(drv->pm->freeze, error);
} else {
pci_default_pm_suspend(pci_dev);
}
} else {
error = pci_legacy_suspend(dev, PMSG_FREEZE);
pci_fixup_device(pci_fixup_suspend, pci_dev);
}
return error;
}
static int pci_pm_freeze_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->freeze_noirq) {
error = drv->pm->freeze_noirq(dev);
suspend_report_result(drv->pm->freeze_noirq, error);
}
} else {
error = pci_legacy_suspend_late(dev, PMSG_FREEZE);
}
return error;
}
static int pci_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw)
error = drv->pm->thaw(dev);
} else {
pci_fixup_device(pci_fixup_resume, to_pci_dev(dev));
error = pci_legacy_resume(dev);
}
return error;
}
static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
} else {
pci_fixup_device(pci_fixup_resume_early, pci_dev);
error = pci_legacy_resume_early(dev);
}
return error;
}
static int pci_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev));
if (drv && drv->pm) {
if (drv->pm->poweroff) {
error = drv->pm->poweroff(dev);
suspend_report_result(drv->pm->poweroff, error);
}
} else {
error = pci_legacy_suspend(dev, PMSG_HIBERNATE);
}
return error;
}
static int pci_pm_poweroff_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
int error = 0;
if (drv && drv->pm) {
if (drv->pm->poweroff_noirq) {
error = drv->pm->poweroff_noirq(dev);
suspend_report_result(drv->pm->poweroff_noirq, error);
}
} else {
error = pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
}
return error;
}
static int pci_pm_restore(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct device_driver *drv = dev->driver;
int error;
if (drv && drv->pm) {
error = drv->pm->restore ? drv->pm->restore(dev) :
pci_default_pm_resume(pci_dev);
} else {
error = pci_legacy_resume(dev);
}
pci_fixup_device(pci_fixup_resume, pci_dev);
return error;
}
static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = pci_dev->driver;
int error = 0;
pci_fixup_device(pci_fixup_resume, pci_dev);
if (drv && drv->pm) {
if (drv->pm->restore_noirq)
error = drv->pm->restore_noirq(dev);
} else {
error = pci_legacy_resume_early(dev);
}
pci_fixup_device(pci_fixup_resume_early, pci_dev);
return error;
}
#else /* !CONFIG_HIBERNATION */
#define pci_pm_freeze NULL
#define pci_pm_freeze_noirq NULL
#define pci_pm_thaw NULL
#define pci_pm_thaw_noirq NULL
#define pci_pm_poweroff NULL
#define pci_pm_poweroff_noirq NULL
#define pci_pm_restore NULL
#define pci_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATION */
struct pm_ext_ops pci_pm_ops = {
.base = {
.prepare = pci_pm_prepare,
.complete = pci_pm_complete,
.suspend = pci_pm_suspend,
.resume = pci_pm_resume,
.freeze = pci_pm_freeze,
.thaw = pci_pm_thaw,
.poweroff = pci_pm_poweroff,
.restore = pci_pm_restore,
},
.suspend_noirq = pci_pm_suspend_noirq,
.resume_noirq = pci_pm_resume_noirq,
.freeze_noirq = pci_pm_freeze_noirq,
.thaw_noirq = pci_pm_thaw_noirq,
.poweroff_noirq = pci_pm_poweroff_noirq,
.restore_noirq = pci_pm_restore_noirq,
};
#define PCI_PM_OPS_PTR &pci_pm_ops
#else /* !CONFIG_PM_SLEEP */
#define PCI_PM_OPS_PTR NULL
#endif /* !CONFIG_PM_SLEEP */
/** /**
* __pci_register_driver - register a new pci driver * __pci_register_driver - register a new pci driver
* @drv: the driver structure to register * @drv: the driver structure to register
@ -386,6 +690,9 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner,
drv->driver.owner = owner; drv->driver.owner = owner;
drv->driver.mod_name = mod_name; drv->driver.mod_name = mod_name;
if (drv->pm)
drv->driver.pm = &drv->pm->base;
spin_lock_init(&drv->dynids.lock); spin_lock_init(&drv->dynids.lock);
INIT_LIST_HEAD(&drv->dynids.list); INIT_LIST_HEAD(&drv->dynids.list);
@ -511,12 +818,9 @@ struct bus_type pci_bus_type = {
.uevent = pci_uevent, .uevent = pci_uevent,
.probe = pci_device_probe, .probe = pci_device_probe,
.remove = pci_device_remove, .remove = pci_device_remove,
.suspend = pci_device_suspend,
.suspend_late = pci_device_suspend_late,
.resume_early = pci_device_resume_early,
.resume = pci_device_resume,
.shutdown = pci_device_shutdown, .shutdown = pci_device_shutdown,
.dev_attrs = pci_dev_attrs, .dev_attrs = pci_dev_attrs,
.pm = PCI_PM_OPS_PTR,
}; };
static int __init pci_driver_init(void) static int __init pci_driver_init(void)

View file

@ -1,6 +1,4 @@
/* /*
* $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
*
* PCI Bus Services, see include/linux/pci.h for further explanation. * PCI Bus Services, see include/linux/pci.h for further explanation.
* *
* Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
@ -19,6 +17,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */ #include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h" #include "pci.h"
@ -378,74 +377,90 @@ pci_restore_bars(struct pci_dev *dev)
pci_update_resource(dev, &dev->resource[i], i); pci_update_resource(dev, &dev->resource[i], i);
} }
int (*platform_pci_set_power_state)(struct pci_dev *dev, pci_power_t t); static struct pci_platform_pm_ops *pci_platform_pm;
int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
{
if (!ops->is_manageable || !ops->set_state || !ops->choose_state
|| !ops->sleep_wake || !ops->can_wakeup)
return -EINVAL;
pci_platform_pm = ops;
return 0;
}
static inline bool platform_pci_power_manageable(struct pci_dev *dev)
{
return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
}
static inline int platform_pci_set_power_state(struct pci_dev *dev,
pci_power_t t)
{
return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
}
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
return pci_platform_pm ?
pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
}
static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
{
return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
}
static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
{
return pci_platform_pm ?
pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
}
/** /**
* pci_set_power_state - Set the power state of a PCI device * pci_raw_set_power_state - Use PCI PM registers to set the power state of
* @dev: PCI device to be suspended * given PCI device
* @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering * @dev: PCI device to handle.
* * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
* Transition a device to a new power state, using the Power Management
* Capabilities in the device's config space.
* *
* RETURN VALUE: * RETURN VALUE:
* -EINVAL if trying to enter a lower state than we're already in. * -EINVAL if the requested state is invalid.
* 0 if we're already in the requested state. * -EIO if device does not support PCI PM or its PM capabilities register has a
* -EIO if device does not support PCI PM. * wrong version, or device doesn't support the requested state.
* 0 if we can successfully change the power state. * 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/ */
int static int
pci_set_power_state(struct pci_dev *dev, pci_power_t state) pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
{ {
int pm, need_restore = 0; u16 pmcsr;
u16 pmcsr, pmc; bool need_restore = false;
/* bound the state we're entering */ if (!dev->pm_cap)
if (state > PCI_D3hot)
state = PCI_D3hot;
/*
* If the device or the parent bridge can't support PCI PM, ignore
* the request if we're doing anything besides putting it into D0
* (which would only happen on boot).
*/
if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
return 0;
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
/* abort if the device doesn't support PM capabilities */
if (!pm)
return -EIO; return -EIO;
if (state < PCI_D0 || state > PCI_D3hot)
return -EINVAL;
/* Validate current state: /* Validate current state:
* Can enter D0 from any state, but if we can only go deeper * Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state * to sleep if we're already in a low power state
*/ */
if (state != PCI_D0 && dev->current_state > state) { if (dev->current_state == state) {
printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n", /* we're already there */
__func__, pci_name(dev), state, dev->current_state); return 0;
} else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
&& dev->current_state > state) {
dev_err(&dev->dev, "invalid power transition "
"(from state %d to %d)\n", dev->current_state, state);
return -EINVAL; return -EINVAL;
} else if (dev->current_state == state)
return 0; /* we're already there */
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
printk(KERN_DEBUG
"PCI: %s has unsupported PM cap regs version (%u)\n",
pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
return -EIO;
} }
/* check if this device supports the desired state */ /* check if this device supports the desired state */
if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1)) if ((state == PCI_D1 && !dev->d1_support)
return -EIO; || (state == PCI_D2 && !dev->d2_support))
else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
return -EIO; return -EIO;
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
/* If we're (effectively) in D3, force entire word to 0. /* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and * This doesn't affect PME_Status, disables PME_En, and
@ -461,7 +476,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
case PCI_UNKNOWN: /* Boot-up */ case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1; need_restore = true;
/* Fall-through: force to D0 */ /* Fall-through: force to D0 */
default: default:
pmcsr = 0; pmcsr = 0;
@ -469,7 +484,7 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
} }
/* enter specified state */ /* enter specified state */
pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
/* Mandatory power management transition delays */ /* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */ /* see PCI PM 1.1 5.6.1 table 18 */
@ -478,13 +493,6 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
else if (state == PCI_D2 || dev->current_state == PCI_D2) else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(200); udelay(200);
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
dev->current_state = state; dev->current_state = state;
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
@ -508,7 +516,76 @@ pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return 0; return 0;
} }
pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev); /**
* pci_update_current_state - Read PCI power state of given device from its
* PCI PM registers and cache it
* @dev: PCI device to handle.
*/
static void pci_update_current_state(struct pci_dev *dev)
{
if (dev->pm_cap) {
u16 pmcsr;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
}
}
/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to handle.
* @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
*
* Transition a device to a new power state, using the platform formware and/or
* the device's PCI PM registers.
*
* RETURN VALUE:
* -EINVAL if the requested state is invalid.
* -EIO if device does not support PCI PM or its PM capabilities register has a
* wrong version, or device doesn't support the requested state.
* 0 if device already is in the requested state.
* 0 if device's power state has been successfully changed.
*/
int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
int error;
/* bound the state we're entering */
if (state > PCI_D3hot)
state = PCI_D3hot;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
/*
* If the device or the parent bridge do not support PCI PM,
* ignore the request if we're doing anything other than putting
* it into D0 (which would only happen on boot).
*/
return 0;
if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
/*
* Allow the platform to change the state, for example via ACPI
* _PR0, _PS0 and some such, but do not trust it.
*/
int ret = platform_pci_set_power_state(dev, PCI_D0);
if (!ret)
pci_update_current_state(dev);
}
error = pci_raw_set_power_state(dev, state);
if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
/* Allow the platform to finalize the transition */
int ret = platform_pci_set_power_state(dev, state);
if (!ret) {
pci_update_current_state(dev);
error = 0;
}
}
return error;
}
/** /**
* pci_choose_state - Choose the power state of a PCI device * pci_choose_state - Choose the power state of a PCI device
@ -527,11 +604,9 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
if (!pci_find_capability(dev, PCI_CAP_ID_PM)) if (!pci_find_capability(dev, PCI_CAP_ID_PM))
return PCI_D0; return PCI_D0;
if (platform_pci_choose_state) { ret = platform_pci_choose_state(dev);
ret = platform_pci_choose_state(dev); if (ret != PCI_POWER_ERROR)
if (ret != PCI_POWER_ERROR) return ret;
return ret;
}
switch (state.event) { switch (state.event) {
case PM_EVENT_ON: case PM_EVENT_ON:
@ -543,7 +618,8 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
case PM_EVENT_HIBERNATE: case PM_EVENT_HIBERNATE:
return PCI_D3hot; return PCI_D3hot;
default: default:
printk("Unrecognized suspend event %d\n", state.event); dev_info(&dev->dev, "unrecognized suspend event %d\n",
state.event);
BUG(); BUG();
} }
return PCI_D0; return PCI_D0;
@ -568,7 +644,7 @@ static int pci_save_pcie_state(struct pci_dev *dev)
else else
found = 1; found = 1;
if (!save_state) { if (!save_state) {
dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
return -ENOMEM; return -ENOMEM;
} }
cap = (u16 *)&save_state->data[0]; cap = (u16 *)&save_state->data[0];
@ -619,7 +695,7 @@ static int pci_save_pcix_state(struct pci_dev *dev)
else else
found = 1; found = 1;
if (!save_state) { if (!save_state) {
dev_err(&dev->dev, "Out of memory in pci_save_pcie_state\n"); dev_err(&dev->dev, "out of memory in pci_save_pcie_state\n");
return -ENOMEM; return -ENOMEM;
} }
cap = (u16 *)&save_state->data[0]; cap = (u16 *)&save_state->data[0];
@ -685,10 +761,9 @@ pci_restore_state(struct pci_dev *dev)
for (i = 15; i >= 0; i--) { for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val); pci_read_config_dword(dev, i * 4, &val);
if (val != dev->saved_config_space[i]) { if (val != dev->saved_config_space[i]) {
printk(KERN_DEBUG "PM: Writing back config space on " dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
"device %s at offset %x (was %x, writing %x)\n", "space at offset %#x (was %#x, writing %#x)\n",
pci_name(dev), i, i, val, (int)dev->saved_config_space[i]);
val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4, pci_write_config_dword(dev,i * 4,
dev->saved_config_space[i]); dev->saved_config_space[i]);
} }
@ -960,6 +1035,46 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
return pcibios_set_pcie_reset_state(dev, state); return pcibios_set_pcie_reset_state(dev, state);
} }
/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
* @state: PCI state from which device will issue PME#.
*/
static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
{
if (!dev->pm_cap)
return false;
return !!(dev->pme_support & (1 << state));
}
/**
* pci_pme_active - enable or disable PCI device's PME# function
* @dev: PCI device to handle.
* @enable: 'true' to enable PME# generation; 'false' to disable it.
*
* The caller must verify that the device is capable of generating PME# before
* calling this function with @enable equal to 'true'.
*/
static void pci_pme_active(struct pci_dev *dev, bool enable)
{
u16 pmcsr;
if (!dev->pm_cap)
return;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
/* Clear PME_Status by writing 1 to it and enable PME# */
pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
if (!enable)
pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
/** /**
* pci_enable_wake - enable PCI device as wakeup event source * pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected * @dev: PCI device affected
@ -971,66 +1086,173 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
* called automatically by this routine. * called automatically by this routine.
* *
* Devices with legacy power management (no standard PCI PM capabilities) * Devices with legacy power management (no standard PCI PM capabilities)
* always require such platform hooks. Depending on the platform, devices * always require such platform hooks.
* supporting the standard PCI PME# signal may require such platform hooks;
* they always update bits in config space to allow PME# generation.
* *
* -EIO is returned if the device can't ever be a wakeup event source. * RETURN VALUE:
* -EINVAL is returned if the device can't generate wakeup events from * 0 is returned on success
* the specified PCI state. Returns zero if the operation is successful. * -EINVAL is returned if device is not supposed to wake up the system
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/ */
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
{ {
int pm; int error = 0;
int status; bool pme_done = false;
u16 value;
/* Note that drivers should verify device_may_wakeup(&dev->dev) if (!device_may_wakeup(&dev->dev))
* before calling this function. Platform code should report return -EINVAL;
* errors when drivers try to enable wakeup on devices that
* can't issue wakeups, or on which wakeups were disabled by /*
* userspace updating the /sys/devices.../power/wakeup file. * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
* Anderson we should be doing PME# wake enable followed by ACPI wake
* enable. To disable wake-up we call the platform first, for symmetry.
*/ */
status = call_platform_enable_wakeup(&dev->dev, enable); if (!enable && platform_pci_can_wakeup(dev))
error = platform_pci_sleep_wake(dev, false);
if (!enable || pci_pme_capable(dev, state)) {
pci_pme_active(dev, enable);
pme_done = true;
}
if (enable && platform_pci_can_wakeup(dev))
error = platform_pci_sleep_wake(dev, true);
return pme_done ? 0 : error;
}
/**
* pci_prepare_to_sleep - prepare PCI device for system-wide transition into
* a sleep state
* @dev: Device to handle.
*
* Choose the power state appropriate for the device depending on whether
* it can wake up the system and/or is power manageable by the platform
* (PCI_D3hot is the default) and put the device into that state.
*/
int pci_prepare_to_sleep(struct pci_dev *dev)
{
pci_power_t target_state = PCI_D3hot;
int error;
if (platform_pci_power_manageable(dev)) {
/*
* Call the platform to choose the target state of the device
* and enable wake-up from this state if supported.
*/
pci_power_t state = platform_pci_choose_state(dev);
switch (state) {
case PCI_POWER_ERROR:
case PCI_UNKNOWN:
break;
case PCI_D1:
case PCI_D2:
if (pci_no_d1d2(dev))
break;
default:
target_state = state;
}
} else if (device_may_wakeup(&dev->dev)) {
/*
* Find the deepest state from which the device can generate
* wake-up events, make it the target state and enable device
* to generate PME#.
*/
if (!dev->pm_cap)
return -EIO;
if (dev->pme_support) {
while (target_state
&& !(dev->pme_support & (1 << target_state)))
target_state--;
}
}
pci_enable_wake(dev, target_state, true);
error = pci_set_power_state(dev, target_state);
if (error)
pci_enable_wake(dev, target_state, false);
return error;
}
/**
* pci_back_from_sleep - turn PCI device on during system-wide transition into
* the working state a sleep state
* @dev: Device to handle.
*
* Disable device's sytem wake-up capability and put it into D0.
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
pci_enable_wake(dev, PCI_D0, false);
return pci_set_power_state(dev, PCI_D0);
}
/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
void pci_pm_init(struct pci_dev *dev)
{
int pm;
u16 pmc;
dev->pm_cap = 0;
/* find PCI PM capability in list */ /* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM); pm = pci_find_capability(dev, PCI_CAP_ID_PM);
/* If device doesn't support PM Capabilities, but caller wants to
* disable wake events, it's a NOP. Otherwise fail unless the
* platform hooks handled this legacy device already.
*/
if (!pm) if (!pm)
return enable ? status : 0; return;
/* Check device's ability to generate PME# */ /* Check device's ability to generate PME# */
pci_read_config_word(dev,pm+PCI_PM_PMC,&value); pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
value &= PCI_PM_CAP_PME_MASK; if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */ dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
pmc & PCI_PM_CAP_VER_MASK);
/* Check if it can generate PME# from requested state. */ return;
if (!value || !(value & (1 << state))) {
/* if it can't, revert what the platform hook changed,
* always reporting the base "EINVAL, can't PME#" error
*/
if (enable)
call_platform_enable_wakeup(&dev->dev, 0);
return enable ? -EINVAL : 0;
} }
pci_read_config_word(dev, pm + PCI_PM_CTRL, &value); dev->pm_cap = pm;
/* Clear PME_Status by writing 1 to it and enable PME# */ dev->d1_support = false;
value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; dev->d2_support = false;
if (!pci_no_d1d2(dev)) {
if (pmc & PCI_PM_CAP_D1) {
dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n");
dev->d1_support = true;
}
if (pmc & PCI_PM_CAP_D2) {
dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n");
dev->d2_support = true;
}
}
if (!enable) pmc &= PCI_PM_CAP_PME_MASK;
value &= ~PCI_PM_CTRL_PME_ENABLE; if (pmc) {
dev_printk(KERN_INFO, &dev->dev,
pci_write_config_word(dev, pm + PCI_PM_CTRL, value); "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
return 0; (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
(pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
/*
* Make device's PM flags reflect the wake-up capability, but
* let the user space enable it to wake up the system as needed.
*/
device_set_wakeup_capable(&dev->dev, true);
device_set_wakeup_enable(&dev->dev, false);
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
} else {
dev->pme_support = 0;
}
} }
int int
@ -1116,13 +1338,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
return 0; return 0;
err_out: err_out:
printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%llx@%llx " dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n",
"for device %s\n", bar,
pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
bar + 1, /* PCI BAR # */ (unsigned long long)pci_resource_start(pdev, bar),
(unsigned long long)pci_resource_len(pdev, bar), (unsigned long long)pci_resource_end(pdev, bar));
(unsigned long long)pci_resource_start(pdev, bar),
pci_name(pdev));
return -EBUSY; return -EBUSY;
} }
@ -1214,7 +1434,7 @@ pci_set_master(struct pci_dev *dev)
pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (! (cmd & PCI_COMMAND_MASTER)) { if (! (cmd & PCI_COMMAND_MASTER)) {
pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev)); dev_dbg(&dev->dev, "enabling bus mastering\n");
cmd |= PCI_COMMAND_MASTER; cmd |= PCI_COMMAND_MASTER;
pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd);
} }
@ -1279,8 +1499,8 @@ pci_set_cacheline_size(struct pci_dev *dev)
if (cacheline_size == pci_cache_line_size) if (cacheline_size == pci_cache_line_size)
return 0; return 0;
printk(KERN_DEBUG "PCI: cache line size of %d is not supported " dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
"by device %s\n", pci_cache_line_size << 2, pci_name(dev)); "supported\n", pci_cache_line_size << 2);
return -EINVAL; return -EINVAL;
} }
@ -1305,8 +1525,7 @@ pci_set_mwi(struct pci_dev *dev)
pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (! (cmd & PCI_COMMAND_INVALIDATE)) { if (! (cmd & PCI_COMMAND_INVALIDATE)) {
pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
pci_name(dev));
cmd |= PCI_COMMAND_INVALIDATE; cmd |= PCI_COMMAND_INVALIDATE;
pci_write_config_word(dev, PCI_COMMAND, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd);
} }
@ -1702,5 +1921,7 @@ EXPORT_SYMBOL(pci_set_power_state);
EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state); EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_enable_wake); EXPORT_SYMBOL(pci_enable_wake);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);

View file

@ -5,10 +5,36 @@ extern int pci_create_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev); extern void pci_remove_sysfs_dev_files(struct pci_dev *pdev);
extern void pci_cleanup_rom(struct pci_dev *dev); extern void pci_cleanup_rom(struct pci_dev *dev);
/* Firmware callbacks */ /**
extern pci_power_t (*platform_pci_choose_state)(struct pci_dev *dev); * Firmware PM callbacks
extern int (*platform_pci_set_power_state)(struct pci_dev *dev, *
pci_power_t state); * @is_manageable - returns 'true' if given device is power manageable by the
* platform firmware
*
* @set_state - invokes the platform firmware to set the device's power state
*
* @choose_state - returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
*
* @can_wakeup - returns 'true' if given device is capable of waking up the
* system from a sleeping state
*
* @sleep_wake - enables/disables the system wake up capability of given device
*
* If given platform is generally capable of power managing PCI devices, all of
* these callbacks are mandatory.
*/
struct pci_platform_pm_ops {
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*choose_state)(struct pci_dev *dev);
bool (*can_wakeup)(struct pci_dev *dev);
int (*sleep_wake)(struct pci_dev *dev, bool enable);
};
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_pm_init(struct pci_dev *dev);
extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
@ -105,3 +131,16 @@ pci_match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
} }
struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
/* PCI slot sysfs helper code */
#define to_pci_slot(s) container_of(s, struct pci_slot, kobj)
extern struct kset *pci_slots_kset;
struct pci_slot_attribute {
struct attribute attr;
ssize_t (*show)(struct pci_slot *, char *);
ssize_t (*store)(struct pci_slot *, const char *, size_t);
};
#define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr)

View file

@ -26,6 +26,7 @@
#include <linux/pcieport_if.h> #include <linux/pcieport_if.h>
#include "aerdrv.h" #include "aerdrv.h"
#include "../../pci.h"
/* /*
* Version Information * Version Information
@ -219,8 +220,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
/* Alloc rpc data structure */ /* Alloc rpc data structure */
if (!(rpc = aer_alloc_rpc(dev))) { if (!(rpc = aer_alloc_rpc(dev))) {
printk(KERN_DEBUG "%s: Alloc rpc fails on PCIE device[%s]\n", dev_printk(KERN_DEBUG, device, "alloc rpc failed\n");
__func__, device->bus_id);
aer_remove(dev); aer_remove(dev);
return -ENOMEM; return -ENOMEM;
} }
@ -228,8 +228,7 @@ static int __devinit aer_probe (struct pcie_device *dev,
/* Request IRQ ISR */ /* Request IRQ ISR */
if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", if ((status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv",
dev))) { dev))) {
printk(KERN_DEBUG "%s: Request ISR fails on PCIE device[%s]\n", dev_printk(KERN_DEBUG, device, "request IRQ failed\n");
__func__, device->bus_id);
aer_remove(dev); aer_remove(dev);
return status; return status;
} }
@ -273,7 +272,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
* to issue Configuration Requests to those devices. * to issue Configuration Requests to those devices.
*/ */
msleep(200); msleep(200);
printk(KERN_DEBUG "Complete link reset at Root[%s]\n", dev->dev.bus_id); dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
/* Enable Root Port's interrupt in response to error messages */ /* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);

View file

@ -50,10 +50,10 @@ int aer_osc_setup(struct pcie_device *pciedev)
} }
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_DEBUG "AER service couldn't init device %s - %s\n", dev_printk(KERN_DEBUG, &pciedev->device, "AER service couldn't "
pciedev->device.bus_id, "init device: %s\n",
(status == AE_SUPPORT || status == AE_NOT_FOUND) ? (status == AE_SUPPORT || status == AE_NOT_FOUND) ?
"no _OSC support" : "Run ACPI _OSC fails"); "no _OSC support" : "_OSC failed");
return -1; return -1;
} }

View file

@ -221,9 +221,9 @@ static void report_error_detected(struct pci_dev *dev, void *data)
* of a driver for this device is unaware of * of a driver for this device is unaware of
* its hw state. * its hw state.
*/ */
printk(KERN_DEBUG "Device ID[%s] has %s\n", dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
dev->dev.bus_id, (dev->driver) ? dev->driver ?
"no AER-aware driver" : "no driver"); "no AER-aware driver" : "no driver");
} }
return; return;
} }
@ -304,7 +304,7 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
{ {
struct aer_broadcast_data result_data; struct aer_broadcast_data result_data;
printk(KERN_DEBUG "Broadcast %s message\n", error_mesg); dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
result_data.state = state; result_data.state = state;
if (cb == report_error_detected) if (cb == report_error_detected)
result_data.result = PCI_ERS_RESULT_CAN_RECOVER; result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
@ -404,18 +404,16 @@ static pci_ers_result_t reset_link(struct pcie_device *aerdev,
data.aer_driver = data.aer_driver =
to_service_driver(aerdev->device.driver); to_service_driver(aerdev->device.driver);
} else { } else {
printk(KERN_DEBUG "No link-reset support to Device ID" dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
"[%s]\n", "support\n");
dev->dev.bus_id);
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
} }
status = data.aer_driver->reset_link(udev); status = data.aer_driver->reset_link(udev);
if (status != PCI_ERS_RESULT_RECOVERED) { if (status != PCI_ERS_RESULT_RECOVERED) {
printk(KERN_DEBUG "Link reset at upstream Device ID" dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
"[%s] failed\n", "device %s failed\n", pci_name(udev));
udev->dev.bus_id);
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
} }
@ -511,10 +509,12 @@ static void handle_error_source(struct pcie_device * aerdev,
} else { } else {
status = do_recovery(aerdev, dev, info.severity); status = do_recovery(aerdev, dev, info.severity);
if (status == PCI_ERS_RESULT_RECOVERED) { if (status == PCI_ERS_RESULT_RECOVERED) {
printk(KERN_DEBUG "AER driver successfully recovered\n"); dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
"successfully recovered\n");
} else { } else {
/* TODO: Should kernel panic here? */ /* TODO: Should kernel panic here? */
printk(KERN_DEBUG "AER driver didn't recover\n"); dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
"recover\n");
} }
} }
} }

View file

@ -13,6 +13,7 @@
#include <linux/pm.h> #include <linux/pm.h>
#include <linux/pcieport_if.h> #include <linux/pcieport_if.h>
#include "portdrv.h"
static int pcie_port_bus_match(struct device *dev, struct device_driver *drv); static int pcie_port_bus_match(struct device *dev, struct device_driver *drv);
static int pcie_port_bus_suspend(struct device *dev, pm_message_t state); static int pcie_port_bus_suspend(struct device *dev, pm_message_t state);

View file

@ -23,20 +23,20 @@ static int pcie_port_probe_service(struct device *dev)
{ {
struct pcie_device *pciedev; struct pcie_device *pciedev;
struct pcie_port_service_driver *driver; struct pcie_port_service_driver *driver;
int status = -ENODEV; int status;
if (!dev || !dev->driver) if (!dev || !dev->driver)
return status; return -ENODEV;
driver = to_service_driver(dev->driver); driver = to_service_driver(dev->driver);
if (!driver || !driver->probe) if (!driver || !driver->probe)
return status; return -ENODEV;
pciedev = to_pcie_device(dev); pciedev = to_pcie_device(dev);
status = driver->probe(pciedev, driver->id_table); status = driver->probe(pciedev, driver->id_table);
if (!status) { if (!status) {
printk(KERN_DEBUG "Load service driver %s on pcie device %s\n", dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n",
driver->name, dev->bus_id); driver->name);
get_device(dev); get_device(dev);
} }
return status; return status;
@ -53,8 +53,8 @@ static int pcie_port_remove_service(struct device *dev)
pciedev = to_pcie_device(dev); pciedev = to_pcie_device(dev);
driver = to_service_driver(dev->driver); driver = to_service_driver(dev->driver);
if (driver && driver->remove) { if (driver && driver->remove) {
printk(KERN_DEBUG "Unload service driver %s on pcie device %s\n", dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
driver->name, dev->bus_id); driver->name);
driver->remove(pciedev); driver->remove(pciedev);
put_device(dev); put_device(dev);
} }
@ -103,7 +103,7 @@ static int pcie_port_resume_service(struct device *dev)
*/ */
static void release_pcie_device(struct device *dev) static void release_pcie_device(struct device *dev)
{ {
printk(KERN_DEBUG "Free Port Service[%s]\n", dev->bus_id); dev_printk(KERN_DEBUG, dev, "free port service\n");
kfree(to_pcie_device(dev)); kfree(to_pcie_device(dev));
} }
@ -150,7 +150,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (pos) { if (pos) {
struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] = struct msix_entry msix_entries[PCIE_PORT_DEVICE_MAXSERVICES] =
{{0, 0}, {0, 1}, {0, 2}, {0, 3}}; {{0, 0}, {0, 1}, {0, 2}, {0, 3}};
printk("%s Found MSIX capability\n", __func__); dev_info(&dev->dev, "found MSI-X capability\n");
status = pci_enable_msix(dev, msix_entries, nvec); status = pci_enable_msix(dev, msix_entries, nvec);
if (!status) { if (!status) {
int j = 0; int j = 0;
@ -165,7 +165,7 @@ static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask)
if (status) { if (status) {
pos = pci_find_capability(dev, PCI_CAP_ID_MSI); pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) { if (pos) {
printk("%s Found MSI capability\n", __func__); dev_info(&dev->dev, "found MSI capability\n");
status = pci_enable_msi(dev); status = pci_enable_msi(dev);
if (!status) { if (!status) {
interrupt_mode = PCIE_PORT_MSI_MODE; interrupt_mode = PCIE_PORT_MSI_MODE;
@ -252,7 +252,7 @@ static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
return NULL; return NULL;
pcie_device_init(parent, device, port_type, service_type, irq,irq_mode); pcie_device_init(parent, device, port_type, service_type, irq,irq_mode);
printk(KERN_DEBUG "Allocate Port Service[%s]\n", device->device.bus_id); dev_printk(KERN_DEBUG, &device->device, "allocate port service\n");
return device; return device;
} }

View file

@ -91,9 +91,8 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
pci_set_master(dev); pci_set_master(dev);
if (!dev->irq && dev->pin) { if (!dev->irq && dev->pin) {
printk(KERN_WARNING dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; "
"%s->Dev[%04x:%04x] has invalid IRQ. Check vendor BIOS\n", "check vendor BIOS\n", dev->vendor, dev->device);
__func__, dev->vendor, dev->device);
} }
if (pcie_port_device_register(dev)) { if (pcie_port_device_register(dev)) {
pci_disable_device(dev); pci_disable_device(dev);

View file

@ -277,8 +277,8 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
res->end = res->start + sz64; res->end = res->start + sz64;
#else #else
if (sz64 > 0x100000000ULL) { if (sz64 > 0x100000000ULL) {
printk(KERN_ERR "PCI: Unable to handle 64-bit " dev_err(&dev->dev, "BAR %d: can't handle 64-bit"
"BAR for device %s\n", pci_name(dev)); " BAR\n", pos);
res->start = 0; res->start = 0;
res->flags = 0; res->flags = 0;
} else if (lhi) { } else if (lhi) {
@ -329,7 +329,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
return; return;
if (dev->transparent) { if (dev->transparent) {
printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); dev_info(&dev->dev, "transparent bridge\n");
for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
child->resource[i] = child->parent->resource[i - 3]; child->resource[i] = child->parent->resource[i - 3];
} }
@ -392,7 +392,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
limit |= ((long) mem_limit_hi) << 32; limit |= ((long) mem_limit_hi) << 32;
#else #else
if (mem_base_hi || mem_limit_hi) { if (mem_base_hi || mem_limit_hi) {
printk(KERN_ERR "PCI: Unable to handle 64-bit address space for bridge %s\n", pci_name(dev)); dev_err(&dev->dev, "can't handle 64-bit "
"address space for bridge\n");
return; return;
} }
#endif #endif
@ -414,6 +415,7 @@ static struct pci_bus * pci_alloc_bus(void)
INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->node);
INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices); INIT_LIST_HEAD(&b->devices);
INIT_LIST_HEAD(&b->slots);
} }
return b; return b;
} }
@ -511,8 +513,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
pr_debug("PCI: Scanning behind PCI bridge %s, config %06x, pass %d\n", dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
pci_name(dev), buses & 0xffffff, pass); buses & 0xffffff, pass);
/* Disable MasterAbortMode during probing to avoid reporting /* Disable MasterAbortMode during probing to avoid reporting
of bus errors (in some architectures) */ of bus errors (in some architectures) */
@ -535,8 +537,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
* ignore it. This can happen with the i450NX chipset. * ignore it. This can happen with the i450NX chipset.
*/ */
if (pci_find_bus(pci_domain_nr(bus), busnr)) { if (pci_find_bus(pci_domain_nr(bus), busnr)) {
printk(KERN_INFO "PCI: Bus %04x:%02x already known\n", dev_info(&dev->dev, "bus %04x:%02x already known\n",
pci_domain_nr(bus), busnr); pci_domain_nr(bus), busnr);
goto out; goto out;
} }
@ -711,8 +713,9 @@ static int pci_setup_device(struct pci_dev * dev)
{ {
u32 class; u32 class;
sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); dev->bus->number, PCI_SLOT(dev->devfn),
PCI_FUNC(dev->devfn));
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
dev->revision = class & 0xff; dev->revision = class & 0xff;
@ -720,7 +723,7 @@ static int pci_setup_device(struct pci_dev * dev)
dev->class = class; dev->class = class;
class >>= 8; class >>= 8;
pr_debug("PCI: Found %s [%04x/%04x] %06x %02x\n", pci_name(dev), dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n",
dev->vendor, dev->device, class, dev->hdr_type); dev->vendor, dev->device, class, dev->hdr_type);
/* "Unknown power state" */ /* "Unknown power state" */
@ -788,13 +791,13 @@ static int pci_setup_device(struct pci_dev * dev)
break; break;
default: /* unknown header */ default: /* unknown header */
printk(KERN_ERR "PCI: device %s has unknown header type %02x, ignoring.\n", dev_err(&dev->dev, "unknown header type %02x, "
pci_name(dev), dev->hdr_type); "ignoring device\n", dev->hdr_type);
return -1; return -1;
bad: bad:
printk(KERN_ERR "PCI: %s: class %x doesn't match header type %02x. Ignoring class.\n", dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
pci_name(dev), class, dev->hdr_type); "type %02x)\n", class, dev->hdr_type);
dev->class = PCI_CLASS_NOT_DEFINED; dev->class = PCI_CLASS_NOT_DEFINED;
} }
@ -927,7 +930,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
return NULL; return NULL;
/* Card hasn't responded in 60 seconds? Must be stuck. */ /* Card hasn't responded in 60 seconds? Must be stuck. */
if (delay > 60 * 1000) { if (delay > 60 * 1000) {
printk(KERN_WARNING "Device %04x:%02x:%02x.%d not " printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
"responding\n", pci_domain_nr(bus), "responding\n", pci_domain_nr(bus),
bus->number, PCI_SLOT(devfn), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn)); PCI_FUNC(devfn));
@ -984,6 +987,9 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
/* Fix up broken headers */ /* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev); pci_fixup_device(pci_fixup_header, dev);
/* Initialize power management of the device */
pci_pm_init(dev);
/* /*
* Add the device to our list of discovered devices * Add the device to our list of discovered devices
* and the bus list for fixup functions, etc. * and the bus list for fixup functions, etc.

View file

@ -1,6 +1,4 @@
/* /*
* $Id: proc.c,v 1.13 1998/05/12 07:36:07 mj Exp $
*
* Procfs interface for the PCI bus. * Procfs interface for the PCI bus.
* *
* Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz> * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
@ -482,5 +480,5 @@ static int __init pci_proc_init(void)
return 0; return 0;
} }
__initcall(pci_proc_init); device_initcall(pci_proc_init);

View file

@ -556,7 +556,7 @@ static void quirk_via_ioapic(struct pci_dev *dev)
pci_write_config_byte (dev, 0x58, tmp); pci_write_config_byte (dev, 0x58, tmp);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
/* /*
* VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit.
@ -576,7 +576,7 @@ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
} }
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
/* /*
* The AMD io apic can hang the box when an apic irq is masked. * The AMD io apic can hang the box when an apic irq is masked.
@ -622,7 +622,7 @@ static void quirk_amd_8131_ioapic(struct pci_dev *dev)
} }
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic);
#endif /* CONFIG_X86_IO_APIC */ #endif /* CONFIG_X86_IO_APIC */
/* /*
@ -774,7 +774,7 @@ static void quirk_cardbus_legacy(struct pci_dev *dev)
pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
} }
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy);
/* /*
* Following the PCI ordering rules is optional on the AMD762. I'm not * Following the PCI ordering rules is optional on the AMD762. I'm not
@ -797,7 +797,7 @@ static void quirk_amd_ordering(struct pci_dev *dev)
} }
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
/* /*
* DreamWorks provided workaround for Dunord I-3000 problem * DreamWorks provided workaround for Dunord I-3000 problem
@ -865,7 +865,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
} }
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
{ {
@ -885,9 +885,9 @@ static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
/* /*
* Serverworks CSB5 IDE does not fully support native mode * Serverworks CSB5 IDE does not fully support native mode
@ -1054,6 +1054,20 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
* its on-board VGA controller */ * its on-board VGA controller */
asus_hides_smbus = 1; asus_hides_smbus = 1;
} }
else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_IG)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
asus_hides_smbus = 1;
}
else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
switch (dev->subsystem_device) {
case 0x001A: /* Compaq Deskpro EN SSF P667 815E */
/* Motherboard doesn't have host bridge
* subvendor/subdevice IDs, therefore checking
* its on-board VGA controller */
asus_hides_smbus = 1;
}
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
@ -1068,6 +1082,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, as
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_IG, asus_hides_smbus_hostbridge);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
static void asus_hides_smbus_lpc(struct pci_dev *dev) static void asus_hides_smbus_lpc(struct pci_dev *dev)
{ {
@ -1093,31 +1109,61 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asu
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) /* It appears we just have one such device. If not, we have a warning */
static void __iomem *asus_rcba_base;
static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
{ {
u32 val, rcba; u32 rcba;
void __iomem *base;
if (likely(!asus_hides_smbus)) if (likely(!asus_hides_smbus))
return; return;
WARN_ON(asus_rcba_base);
pci_read_config_dword(dev, 0xF0, &rcba); pci_read_config_dword(dev, 0xF0, &rcba);
base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ /* use bits 31:14, 16 kB aligned */
if (base == NULL) return; asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ if (asus_rcba_base == NULL)
writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ return;
iounmap(base); }
static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
{
u32 val;
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
/* read the Function Disable register, dword mode only */
val = readl(asus_rcba_base + 0x3418);
writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); /* enable the SMBus device */
}
static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
{
if (likely(!asus_hides_smbus || !asus_rcba_base))
return;
iounmap(asus_rcba_base);
asus_rcba_base = NULL;
dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n"); dev_info(&dev->dev, "Enabled ICH6/i801 SMBus device\n");
} }
static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
{
asus_hides_smbus_lpc_ich6_suspend(dev);
asus_hides_smbus_lpc_ich6_resume_early(dev);
asus_hides_smbus_lpc_ich6_resume(dev);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
/* /*
* SiS 96x south bridge: BIOS typically hides SMBus device... * SiS 96x south bridge: BIOS typically hides SMBus device...
@ -1135,10 +1181,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
/* /*
* ... This is further complicated by the fact that some SiS96x south * ... This is further complicated by the fact that some SiS96x south
@ -1172,7 +1218,7 @@ static void quirk_sis_503(struct pci_dev *dev)
quirk_sis_96x_smbus(dev); quirk_sis_96x_smbus(dev);
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
/* /*
@ -1205,7 +1251,7 @@ static void asus_hides_ac97_lpc(struct pci_dev *dev)
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
@ -1270,12 +1316,12 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, qui
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
#endif #endif
@ -1521,6 +1567,10 @@ extern struct pci_fixup __start_pci_fixups_enable[];
extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[];
extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __start_pci_fixups_resume[];
extern struct pci_fixup __end_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[];
extern struct pci_fixup __start_pci_fixups_resume_early[];
extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
@ -1553,6 +1603,16 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
end = __end_pci_fixups_resume; end = __end_pci_fixups_resume;
break; break;
case pci_fixup_resume_early:
start = __start_pci_fixups_resume_early;
end = __end_pci_fixups_resume_early;
break;
case pci_fixup_suspend:
start = __start_pci_fixups_suspend;
end = __end_pci_fixups_suspend;
break;
default: default:
/* stupid compiler warning, you would think with an enum... */ /* stupid compiler warning, you would think with an enum... */
return; return;
@ -1629,7 +1689,7 @@ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
} }
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap); quirk_nvidia_ck804_pcie_aer_ext_cap);
DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_pcie_aer_ext_cap); quirk_nvidia_ck804_pcie_aer_ext_cap);
static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)

View file

@ -27,13 +27,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#define DEBUG_CONFIG 1
#if DEBUG_CONFIG
#define DBG(x...) printk(x)
#else
#define DBG(x...)
#endif
static void pbus_assign_resources_sorted(struct pci_bus *bus) static void pbus_assign_resources_sorted(struct pci_bus *bus)
{ {
struct pci_dev *dev; struct pci_dev *dev;
@ -81,8 +74,8 @@ void pci_setup_cardbus(struct pci_bus *bus)
struct pci_dev *bridge = bus->self; struct pci_dev *bridge = bus->self;
struct pci_bus_region region; struct pci_bus_region region;
printk("PCI: Bus %d, cardbus bridge: %s\n", dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n",
bus->number, pci_name(bridge)); pci_domain_nr(bus), bus->number);
pcibios_resource_to_bus(bridge, &region, bus->resource[0]); pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
if (bus->resource[0]->flags & IORESOURCE_IO) { if (bus->resource[0]->flags & IORESOURCE_IO) {
@ -90,7 +83,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
* The IO resource is allocated a range twice as large as it * The IO resource is allocated a range twice as large as it
* would normally need. This allows us to set both IO regs. * would normally need. This allows us to set both IO regs.
*/ */
printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
@ -101,7 +94,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
pcibios_resource_to_bus(bridge, &region, bus->resource[1]); pcibios_resource_to_bus(bridge, &region, bus->resource[1]);
if (bus->resource[1]->flags & IORESOURCE_IO) { if (bus->resource[1]->flags & IORESOURCE_IO) {
printk(KERN_INFO " IO window: 0x%08lx-0x%08lx\n", dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
@ -112,7 +105,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
pcibios_resource_to_bus(bridge, &region, bus->resource[2]); pcibios_resource_to_bus(bridge, &region, bus->resource[2]);
if (bus->resource[2]->flags & IORESOURCE_MEM) { if (bus->resource[2]->flags & IORESOURCE_MEM) {
printk(KERN_INFO " PREFETCH window: 0x%08lx-0x%08lx\n", dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
@ -123,7 +116,7 @@ void pci_setup_cardbus(struct pci_bus *bus)
pcibios_resource_to_bus(bridge, &region, bus->resource[3]); pcibios_resource_to_bus(bridge, &region, bus->resource[3]);
if (bus->resource[3]->flags & IORESOURCE_MEM) { if (bus->resource[3]->flags & IORESOURCE_MEM) {
printk(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
@ -151,7 +144,8 @@ static void pci_setup_bridge(struct pci_bus *bus)
struct pci_bus_region region; struct pci_bus_region region;
u32 l, bu, lu, io_upper16; u32 l, bu, lu, io_upper16;
DBG(KERN_INFO "PCI: Bridge: %s\n", pci_name(bridge)); dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n",
pci_domain_nr(bus), bus->number);
/* Set up the top and bottom of the PCI I/O segment for this bus. */ /* Set up the top and bottom of the PCI I/O segment for this bus. */
pcibios_resource_to_bus(bridge, &region, bus->resource[0]); pcibios_resource_to_bus(bridge, &region, bus->resource[0]);
@ -162,7 +156,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
l |= region.end & 0xf000; l |= region.end & 0xf000;
/* Set up upper 16 bits of I/O base/limit. */ /* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
DBG(KERN_INFO " IO window: %04lx-%04lx\n", dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
} }
@ -170,7 +164,7 @@ static void pci_setup_bridge(struct pci_bus *bus)
/* Clear upper 16 bits of I/O base/limit. */ /* Clear upper 16 bits of I/O base/limit. */
io_upper16 = 0; io_upper16 = 0;
l = 0x00f0; l = 0x00f0;
DBG(KERN_INFO " IO window: disabled.\n"); dev_info(&bridge->dev, " IO window: disabled\n");
} }
/* Temporarily disable the I/O range before updating PCI_IO_BASE. */ /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@ -185,13 +179,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
if (bus->resource[1]->flags & IORESOURCE_MEM) { if (bus->resource[1]->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0; l = (region.start >> 16) & 0xfff0;
l |= region.end & 0xfff00000; l |= region.end & 0xfff00000;
DBG(KERN_INFO " MEM window: 0x%08lx-0x%08lx\n", dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n",
(unsigned long)region.start, (unsigned long)region.start,
(unsigned long)region.end); (unsigned long)region.end);
} }
else { else {
l = 0x0000fff0; l = 0x0000fff0;
DBG(KERN_INFO " MEM window: disabled.\n"); dev_info(&bridge->dev, " MEM window: disabled\n");
} }
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
@ -208,13 +202,13 @@ static void pci_setup_bridge(struct pci_bus *bus)
l |= region.end & 0xfff00000; l |= region.end & 0xfff00000;
bu = upper_32_bits(region.start); bu = upper_32_bits(region.start);
lu = upper_32_bits(region.end); lu = upper_32_bits(region.end);
DBG(KERN_INFO " PREFETCH window: 0x%016llx-0x%016llx\n", dev_info(&bridge->dev, " PREFETCH window: %#016llx-%#016llx\n",
(unsigned long long)region.start, (unsigned long long)region.start,
(unsigned long long)region.end); (unsigned long long)region.end);
} }
else { else {
l = 0x0000fff0; l = 0x0000fff0;
DBG(KERN_INFO " PREFETCH window: disabled.\n"); dev_info(&bridge->dev, " PREFETCH window: disabled\n");
} }
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
@ -361,9 +355,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long
align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start;
order = __ffs(align) - 20; order = __ffs(align) - 20;
if (order > 11) { if (order > 11) {
printk(KERN_WARNING "PCI: region %s/%d " dev_warn(&dev->dev, "BAR %d too large: "
"too large: 0x%016llx-0x%016llx\n", "%#016llx-%#016llx\n", i,
pci_name(dev), i,
(unsigned long long)r->start, (unsigned long long)r->start,
(unsigned long long)r->end); (unsigned long long)r->end);
r->flags = 0; r->flags = 0;
@ -529,8 +522,8 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus)
break; break;
default: default:
printk(KERN_INFO "PCI: not setting up bridge %s " dev_info(&dev->dev, "not setting up bridge for bus "
"for bus %d\n", pci_name(dev), b->number); "%04x:%02x\n", pci_domain_nr(b), b->number);
break; break;
} }
} }

View file

@ -47,8 +47,7 @@ pdev_fixup_irq(struct pci_dev *dev,
} }
dev->irq = irq; dev->irq = irq;
pr_debug("PCI: fixup irq: (%s) got %d\n", dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq);
kobject_name(&dev->dev.kobj), dev->irq);
/* Always tell the device, so the driver knows what is /* Always tell the device, so the driver knows what is
the real IRQ to use; the device does not use it. */ the real IRQ to use; the device does not use it. */

View file

@ -26,8 +26,7 @@
#include "pci.h" #include "pci.h"
void void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
{ {
struct pci_bus_region region; struct pci_bus_region region;
u32 new, check, mask; u32 new, check, mask;
@ -43,20 +42,20 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
/* /*
* Ignore non-moveable resources. This might be legacy resources for * Ignore non-moveable resources. This might be legacy resources for
* which no functional BAR register exists or another important * which no functional BAR register exists or another important
* system resource we should better not move around in system address * system resource we shouldn't move around.
* space.
*/ */
if (res->flags & IORESOURCE_PCI_FIXED) if (res->flags & IORESOURCE_PCI_FIXED)
return; return;
pcibios_resource_to_bus(dev, &region, res); pcibios_resource_to_bus(dev, &region, res);
pr_debug(" got res [%llx:%llx] bus [%llx:%llx] flags %lx for " dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] "
"BAR %d of %s\n", (unsigned long long)res->start, "flags %#lx\n", resno,
(unsigned long long)res->start,
(unsigned long long)res->end, (unsigned long long)res->end,
(unsigned long long)region.start, (unsigned long long)region.start,
(unsigned long long)region.end, (unsigned long long)region.end,
(unsigned long)res->flags, resno, pci_name(dev)); (unsigned long)res->flags);
new = region.start | (res->flags & PCI_REGION_FLAG_MASK); new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
if (res->flags & IORESOURCE_IO) if (res->flags & IORESOURCE_IO)
@ -81,9 +80,8 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
pci_read_config_dword(dev, reg, &check); pci_read_config_dword(dev, reg, &check);
if ((new ^ check) & mask) { if ((new ^ check) & mask) {
printk(KERN_ERR "PCI: Error while updating region " dev_err(&dev->dev, "BAR %d: error updating (%#08x != %#08x)\n",
"%s/%d (%08x != %08x)\n", pci_name(dev), resno, resno, new, check);
new, check);
} }
if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == if ((new & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
@ -92,15 +90,14 @@ pci_update_resource(struct pci_dev *dev, struct resource *res, int resno)
pci_write_config_dword(dev, reg + 4, new); pci_write_config_dword(dev, reg + 4, new);
pci_read_config_dword(dev, reg + 4, &check); pci_read_config_dword(dev, reg + 4, &check);
if (check != new) { if (check != new) {
printk(KERN_ERR "PCI: Error updating region " dev_err(&dev->dev, "BAR %d: error updating "
"%s/%d (high %08x != %08x)\n", "(high %#08x != %#08x)\n", resno, new, check);
pci_name(dev), resno, new, check);
} }
} }
res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_UNSET;
pr_debug("PCI: moved device %s resource %d (%lx) to %x\n", dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n",
pci_name(dev), resno, res->flags, resno, (unsigned long long)region.start,
new & ~PCI_REGION_FLAG_MASK); (unsigned long long)region.end, res->flags);
} }
int pci_claim_resource(struct pci_dev *dev, int resource) int pci_claim_resource(struct pci_dev *dev, int resource)
@ -117,10 +114,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
err = insert_resource(root, res); err = insert_resource(root, res);
if (err) { if (err) {
printk(KERN_ERR "PCI: %s region %d of %s %s [%llx:%llx]\n", dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n",
root ? "Address space collision on" : resource,
"No parent found for", root ? "address space collision on" :
resource, dtype, pci_name(dev), "no parent found for",
dtype,
(unsigned long long)res->start, (unsigned long long)res->start,
(unsigned long long)res->end); (unsigned long long)res->end);
} }
@ -140,11 +138,10 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
align = resource_alignment(res); align = resource_alignment(res);
if (!align) { if (!align) {
printk(KERN_ERR "PCI: Cannot allocate resource (bogus " dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus "
"alignment) %d [%llx:%llx] (flags %lx) of %s\n", "alignment) [%#llx-%#llx] flags %#lx\n",
resno, (unsigned long long)res->start, resno, (unsigned long long)res->start,
(unsigned long long)res->end, res->flags, (unsigned long long)res->end, res->flags);
pci_name(dev));
return -EINVAL; return -EINVAL;
} }
@ -165,11 +162,11 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
} }
if (ret) { if (ret) {
printk(KERN_ERR "PCI: Failed to allocate %s resource " dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
"#%d:%llx@%llx for %s\n", "[%#llx-%#llx]\n", resno,
res->flags & IORESOURCE_IO ? "I/O" : "mem", res->flags & IORESOURCE_IO ? "I/O" : "mem",
resno, (unsigned long long)size, (unsigned long long)res->start,
(unsigned long long)res->start, pci_name(dev)); (unsigned long long)res->end);
} else { } else {
res->flags &= ~IORESOURCE_STARTALIGN; res->flags &= ~IORESOURCE_STARTALIGN;
if (resno < PCI_BRIDGE_RESOURCES) if (resno < PCI_BRIDGE_RESOURCES)
@ -205,11 +202,11 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno)
} }
if (ret) { if (ret) {
printk(KERN_ERR "PCI: Failed to allocate %s resource " dev_err(&dev->dev, "BAR %d: can't allocate %s resource "
"#%d:%llx@%llx for %s\n", "[%#llx-%#llx\n]", resno,
res->flags & IORESOURCE_IO ? "I/O" : "mem", res->flags & IORESOURCE_IO ? "I/O" : "mem",
resno, (unsigned long long)(res->end - res->start + 1), (unsigned long long)res->start,
(unsigned long long)res->start, pci_name(dev)); (unsigned long long)res->end);
} else if (resno < PCI_BRIDGE_RESOURCES) { } else if (resno < PCI_BRIDGE_RESOURCES) {
pci_update_resource(dev, res, resno); pci_update_resource(dev, res, resno);
} }
@ -239,11 +236,10 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
r_align = resource_alignment(r); r_align = resource_alignment(r);
if (!r_align) { if (!r_align) {
printk(KERN_WARNING "PCI: bogus alignment of resource " dev_warn(&dev->dev, "BAR %d: bogus alignment "
"%d [%llx:%llx] (flags %lx) of %s\n", "[%#llx-%#llx] flags %#lx\n",
i, (unsigned long long)r->start, i, (unsigned long long)r->start,
(unsigned long long)r->end, r->flags, (unsigned long long)r->end, r->flags);
pci_name(dev));
continue; continue;
} }
for (list = head; ; list = list->next) { for (list = head; ; list = list->next) {
@ -291,7 +287,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
if (!r->parent) { if (!r->parent) {
dev_err(&dev->dev, "device not available because of " dev_err(&dev->dev, "device not available because of "
"BAR %d [%llx:%llx] collisions\n", i, "BAR %d [%#llx-%#llx] collisions\n", i,
(unsigned long long) r->start, (unsigned long long) r->start,
(unsigned long long) r->end); (unsigned long long) r->end);
return -EINVAL; return -EINVAL;

233
drivers/pci/slot.c Normal file
View file

@ -0,0 +1,233 @@
/*
* drivers/pci/slot.c
* Copyright (C) 2006 Matthew Wilcox <matthew@wil.cx>
* Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P.
* Alex Chiang <achiang@hp.com>
*/
#include <linux/kobject.h>
#include <linux/pci.h>
#include <linux/err.h>
#include "pci.h"
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct pci_slot *slot = to_pci_slot(kobj);
struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
return attribute->show ? attribute->show(slot, buf) : -EIO;
}
static ssize_t pci_slot_attr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t len)
{
struct pci_slot *slot = to_pci_slot(kobj);
struct pci_slot_attribute *attribute = to_pci_slot_attr(attr);
return attribute->store ? attribute->store(slot, buf, len) : -EIO;
}
static struct sysfs_ops pci_slot_sysfs_ops = {
.show = pci_slot_attr_show,
.store = pci_slot_attr_store,
};
static ssize_t address_read_file(struct pci_slot *slot, char *buf)
{
if (slot->number == 0xff)
return sprintf(buf, "%04x:%02x\n",
pci_domain_nr(slot->bus),
slot->bus->number);
else
return sprintf(buf, "%04x:%02x:%02x\n",
pci_domain_nr(slot->bus),
slot->bus->number,
slot->number);
}
static void pci_slot_release(struct kobject *kobj)
{
struct pci_slot *slot = to_pci_slot(kobj);
pr_debug("%s: releasing pci_slot on %x:%d\n", __func__,
slot->bus->number, slot->number);
list_del(&slot->list);
kfree(slot);
}
static struct pci_slot_attribute pci_slot_attr_address =
__ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
NULL,
};
static struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
.default_attrs = pci_slot_default_attrs,
};
/**
* pci_create_slot - create or increment refcount for physical PCI slot
* @parent: struct pci_bus of parent bridge
* @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder
* @name: user visible string presented in /sys/bus/pci/slots/<name>
*
* PCI slots have first class attributes such as address, speed, width,
* and a &struct pci_slot is used to manage them. This interface will
* either return a new &struct pci_slot to the caller, or if the pci_slot
* already exists, its refcount will be incremented.
*
* Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple.
*
* Placeholder slots:
* In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify
* a slot. There is one notable exception - pSeries (rpaphp), where the
* @slot_nr cannot be determined until a device is actually inserted into
* the slot. In this scenario, the caller may pass -1 for @slot_nr.
*
* The following semantics are imposed when the caller passes @slot_nr ==
* -1. First, the check for existing %struct pci_slot is skipped, as the
* caller may know about several unpopulated slots on a given %struct
* pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for
* these slots is then determined by the @name parameter. We expect
* kobject_init_and_add() to warn us if the caller attempts to create
* multiple slots with the same name. The other change in semantics is
* user-visible, which is the 'address' parameter presented in sysfs will
* consist solely of a dddd:bb tuple, where dddd is the PCI domain of the
* %struct pci_bus and bb is the bus number. In other words, the devfn of
* the 'placeholder' slot will not be displayed.
*/
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name)
{
struct pci_slot *slot;
int err;
down_write(&pci_bus_sem);
if (slot_nr == -1)
goto placeholder;
/* If we've already created this slot, bump refcount and return. */
list_for_each_entry(slot, &parent->slots, list) {
if (slot->number == slot_nr) {
kobject_get(&slot->kobj);
pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n",
__func__,
atomic_read(&slot->kobj.kref.refcount),
pci_domain_nr(parent), parent->number,
slot_nr);
goto out;
}
}
placeholder:
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
slot = ERR_PTR(-ENOMEM);
goto out;
}
slot->bus = parent;
slot->number = slot_nr;
slot->kobj.kset = pci_slots_kset;
err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
"%s", name);
if (err) {
printk(KERN_ERR "Unable to register kobject %s\n", name);
goto err;
}
INIT_LIST_HEAD(&slot->list);
list_add(&slot->list, &parent->slots);
/* Don't care if debug printk has a -1 for slot_nr */
pr_debug("%s: created pci_slot on %04x:%02x:%02x\n",
__func__, pci_domain_nr(parent), parent->number, slot_nr);
out:
up_write(&pci_bus_sem);
return slot;
err:
kfree(slot);
slot = ERR_PTR(err);
goto out;
}
EXPORT_SYMBOL_GPL(pci_create_slot);
/**
* pci_update_slot_number - update %struct pci_slot -> number
* @slot - %struct pci_slot to update
* @slot_nr - new number for slot
*
* The primary purpose of this interface is to allow callers who earlier
* created a placeholder slot in pci_create_slot() by passing a -1 as
* slot_nr, to update their %struct pci_slot with the correct @slot_nr.
*/
void pci_update_slot_number(struct pci_slot *slot, int slot_nr)
{
int name_count = 0;
struct pci_slot *tmp;
down_write(&pci_bus_sem);
list_for_each_entry(tmp, &slot->bus->slots, list) {
WARN_ON(tmp->number == slot_nr);
if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj)))
name_count++;
}
if (name_count > 1)
printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj));
slot->number = slot_nr;
up_write(&pci_bus_sem);
}
EXPORT_SYMBOL_GPL(pci_update_slot_number);
/**
* pci_destroy_slot - decrement refcount for physical PCI slot
* @slot: struct pci_slot to decrement
*
* %struct pci_slot is refcounted, so destroying them is really easy; we
* just call kobject_put on its kobj and let our release methods do the
* rest.
*/
void pci_destroy_slot(struct pci_slot *slot)
{
pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__,
atomic_read(&slot->kobj.kref.refcount) - 1,
pci_domain_nr(slot->bus), slot->bus->number, slot->number);
down_write(&pci_bus_sem);
kobject_put(&slot->kobj);
up_write(&pci_bus_sem);
}
EXPORT_SYMBOL_GPL(pci_destroy_slot);
static int pci_slot_init(void)
{
struct kset *pci_bus_kset;
pci_bus_kset = bus_get_kset(&pci_bus_type);
pci_slots_kset = kset_create_and_add("slots", NULL,
&pci_bus_kset->kobj);
if (!pci_slots_kset) {
printk(KERN_ERR "PCI: Slot initialization failure\n");
return -ENOMEM;
}
return 0;
}
subsys_initcall(pci_slot_init);

View file

@ -259,6 +259,7 @@ struct acpi_device_perf {
/* Wakeup Management */ /* Wakeup Management */
struct acpi_device_wakeup_flags { struct acpi_device_wakeup_flags {
u8 valid:1; /* Can successfully enable wakeup? */ u8 valid:1; /* Can successfully enable wakeup? */
u8 prepared:1; /* Has the wake-up capability been enabled? */
u8 run_wake:1; /* Run-Wake GPE devices */ u8 run_wake:1; /* Run-Wake GPE devices */
}; };
@ -335,6 +336,8 @@ void acpi_bus_data_handler(acpi_handle handle, u32 function, void *context);
int acpi_bus_get_status(struct acpi_device *device); int acpi_bus_get_status(struct acpi_device *device);
int acpi_bus_get_power(acpi_handle handle, int *state); int acpi_bus_get_power(acpi_handle handle, int *state);
int acpi_bus_set_power(acpi_handle handle, int state); int acpi_bus_set_power(acpi_handle handle, int state);
bool acpi_bus_power_manageable(acpi_handle handle);
bool acpi_bus_can_wakeup(acpi_handle handle);
#ifdef CONFIG_ACPI_PROC_EVENT #ifdef CONFIG_ACPI_PROC_EVENT
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data); int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
@ -377,6 +380,7 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
int acpi_pm_device_sleep_state(struct device *, int *); int acpi_pm_device_sleep_state(struct device *, int *);
int acpi_pm_device_sleep_wake(struct device *, bool);
#else /* !CONFIG_PM_SLEEP */ #else /* !CONFIG_PM_SLEEP */
static inline int acpi_pm_device_sleep_state(struct device *d, int *p) static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
{ {
@ -384,6 +388,10 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
*p = ACPI_STATE_D0; *p = ACPI_STATE_D0;
return ACPI_STATE_D3; return ACPI_STATE_D3;
} }
static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
{
return -ENODEV;
}
#endif /* !CONFIG_PM_SLEEP */ #endif /* !CONFIG_PM_SLEEP */
#endif /* CONFIG_ACPI */ #endif /* CONFIG_ACPI */

View file

@ -87,7 +87,9 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
#ifdef CONFIG_ACPI_POWER #ifdef CONFIG_ACPI_POWER
int acpi_enable_wakeup_device_power(struct acpi_device *dev); int acpi_device_sleep_wake(struct acpi_device *dev,
int enable, int sleep_state, int dev_state);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev); int acpi_disable_wakeup_device_power(struct acpi_device *dev);
int acpi_power_get_inferred_state(struct acpi_device *device); int acpi_power_get_inferred_state(struct acpi_device *device);
int acpi_power_transition(struct acpi_device *device, int state); int acpi_power_transition(struct acpi_device *device, int state);

View file

@ -86,6 +86,12 @@
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
*(.pci_fixup_resume) \ *(.pci_fixup_resume) \
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
*(.pci_fixup_resume_early) \
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
*(.pci_fixup_suspend) \
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
} \ } \
\ \
/* Built-in firmware blobs */ \ /* Built-in firmware blobs */ \

View file

@ -11,7 +11,11 @@ extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset);
extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset);
extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val);
extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val);
extern void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val);
extern int early_pci_allowed(void); extern int early_pci_allowed(void);
extern unsigned int pci_early_dump_regs;
extern void early_dump_pci_device(u8 bus, u8 slot, u8 func);
extern void early_dump_pci_devices(void);
#endif #endif

View file

@ -235,6 +235,9 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
int acpi_check_mem_region(resource_size_t start, resource_size_t n, int acpi_check_mem_region(resource_size_t start, resource_size_t n,
const char *name); const char *name);
#ifdef CONFIG_PM_SLEEP
void __init acpi_old_suspend_ordering(void);
#endif /* CONFIG_PM_SLEEP */
#else /* CONFIG_ACPI */ #else /* CONFIG_ACPI */
static inline int early_acpi_boot_init(void) static inline int early_acpi_boot_init(void)

View file

@ -68,6 +68,8 @@ struct bus_type {
int (*resume_early)(struct device *dev); int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev); int (*resume)(struct device *dev);
struct pm_ext_ops *pm;
struct bus_type_private *p; struct bus_type_private *p;
}; };
@ -131,6 +133,8 @@ struct device_driver {
int (*resume) (struct device *dev); int (*resume) (struct device *dev);
struct attribute_group **groups; struct attribute_group **groups;
struct pm_ops *pm;
struct driver_private *p; struct driver_private *p;
}; };
@ -197,6 +201,8 @@ struct class {
int (*suspend)(struct device *dev, pm_message_t state); int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev); int (*resume)(struct device *dev);
struct pm_ops *pm;
}; };
extern int __must_check class_register(struct class *class); extern int __must_check class_register(struct class *class);
@ -248,8 +254,11 @@ struct device_type {
struct attribute_group **groups; struct attribute_group **groups;
int (*uevent)(struct device *dev, struct kobj_uevent_env *env); int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
void (*release)(struct device *dev); void (*release)(struct device *dev);
int (*suspend)(struct device *dev, pm_message_t state); int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev); int (*resume)(struct device *dev);
struct pm_ops *pm;
}; };
/* interface for exporting device attributes */ /* interface for exporting device attributes */

View file

@ -17,8 +17,7 @@
#ifndef LINUX_PCI_H #ifndef LINUX_PCI_H
#define LINUX_PCI_H #define LINUX_PCI_H
/* Include the pci register defines */ #include <linux/pci_regs.h> /* The pci register defines */
#include <linux/pci_regs.h>
/* /*
* The PCI interface treats multi-function devices as independent * The PCI interface treats multi-function devices as independent
@ -49,12 +48,22 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/kobject.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <linux/device.h> #include <linux/device.h>
/* Include the ID list */ /* Include the ID list */
#include <linux/pci_ids.h> #include <linux/pci_ids.h>
/* pci_slot represents a physical slot */
struct pci_slot {
struct pci_bus *bus; /* The bus this slot is on */
struct list_head list; /* node in list of slots on this bus */
struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */
unsigned char number; /* PCI_SLOT(pci_dev->devfn) */
struct kobject kobj;
};
/* File state for mmap()s on /proc/bus/pci/X/Y */ /* File state for mmap()s on /proc/bus/pci/X/Y */
enum pci_mmap_state { enum pci_mmap_state {
pci_mmap_io, pci_mmap_io,
@ -142,6 +151,7 @@ struct pci_dev {
void *sysdata; /* hook for sys-specific extension */ void *sysdata; /* hook for sys-specific extension */
struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */
struct pci_slot *slot; /* Physical slot this device is in */
unsigned int devfn; /* encoded device & function index */ unsigned int devfn; /* encoded device & function index */
unsigned short vendor; unsigned short vendor;
@ -167,6 +177,13 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI-speak, pci_power_t current_state; /* Current operating state. In ACPI-speak,
this is D0-D3, D0 being fully functional, this is D0-D3, D0 being fully functional,
and D3 being off. */ and D3 being off. */
int pm_cap; /* PM capability offset in the
configuration space */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
#ifdef CONFIG_PCIEASPM #ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */ struct pcie_link_state *link_state; /* ASPM link state. */
@ -191,7 +208,6 @@ struct pci_dev {
unsigned int is_added:1; unsigned int is_added:1;
unsigned int is_busmaster:1; /* device is busmaster */ unsigned int is_busmaster:1; /* device is busmaster */
unsigned int no_msi:1; /* device may not use msi */ unsigned int no_msi:1; /* device may not use msi */
unsigned int no_d1d2:1; /* only allow d0 or d3 */
unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
unsigned int broken_parity_status:1; /* Device generates false positive parity */ unsigned int broken_parity_status:1; /* Device generates false positive parity */
unsigned int msi_enabled:1; unsigned int msi_enabled:1;
@ -267,6 +283,7 @@ struct pci_bus {
struct list_head children; /* list of child buses */ struct list_head children; /* list of child buses */
struct list_head devices; /* list of devices on this bus */ struct list_head devices; /* list of devices on this bus */
struct pci_dev *self; /* bridge device as seen by parent */ struct pci_dev *self; /* bridge device as seen by parent */
struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BUS_NUM_RESOURCES]; struct resource *resource[PCI_BUS_NUM_RESOURCES];
/* address space routed to this bus */ /* address space routed to this bus */
@ -328,7 +345,7 @@ struct pci_bus_region {
struct pci_dynids { struct pci_dynids {
spinlock_t lock; /* protects list, index */ spinlock_t lock; /* protects list, index */
struct list_head list; /* for IDs added at runtime */ struct list_head list; /* for IDs added at runtime */
unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
}; };
/* ---------------------------------------------------------------- */ /* ---------------------------------------------------------------- */
@ -390,7 +407,7 @@ struct pci_driver {
int (*resume_early) (struct pci_dev *dev); int (*resume_early) (struct pci_dev *dev);
int (*resume) (struct pci_dev *dev); /* Device woken up */ int (*resume) (struct pci_dev *dev); /* Device woken up */
void (*shutdown) (struct pci_dev *dev); void (*shutdown) (struct pci_dev *dev);
struct pm_ext_ops *pm;
struct pci_error_handlers *err_handler; struct pci_error_handlers *err_handler;
struct device_driver driver; struct device_driver driver;
struct pci_dynids dynids; struct pci_dynids dynids;
@ -489,6 +506,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata); struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr); int busnr);
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name);
void pci_destroy_slot(struct pci_slot *slot);
void pci_update_slot_number(struct pci_slot *slot, int slot_nr);
int pci_scan_slot(struct pci_bus *bus, int devfn); int pci_scan_slot(struct pci_bus *bus, int devfn);
struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@ -618,6 +639,8 @@ int pci_restore_state(struct pci_dev *dev);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
int pci_prepare_to_sleep(struct pci_dev *dev);
int pci_back_from_sleep(struct pci_dev *dev);
/* Functions for PCI Hotplug drivers to use */ /* Functions for PCI Hotplug drivers to use */
int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
@ -839,6 +862,11 @@ static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
return -EIO; return -EIO;
} }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
return -EIO;
}
static inline int pci_set_dma_max_seg_size(struct pci_dev *dev, static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
unsigned int size) unsigned int size)
{ {
@ -977,9 +1005,9 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data)
/* If you want to know what to call your pci_dev, ask this function. /* If you want to know what to call your pci_dev, ask this function.
* Again, it's a wrapper around the generic device. * Again, it's a wrapper around the generic device.
*/ */
static inline char *pci_name(struct pci_dev *pdev) static inline const char *pci_name(struct pci_dev *pdev)
{ {
return pdev->dev.bus_id; return dev_name(&pdev->dev);
} }
@ -1014,7 +1042,9 @@ enum pci_fixup_pass {
pci_fixup_header, /* After reading configuration header */ pci_fixup_header, /* After reading configuration header */
pci_fixup_final, /* Final phase of device fixups */ pci_fixup_final, /* Final phase of device fixups */
pci_fixup_enable, /* pci_enable_device() time */ pci_fixup_enable, /* pci_enable_device() time */
pci_fixup_resume, /* pci_enable_device() time */ pci_fixup_resume, /* pci_device_resume() */
pci_fixup_suspend, /* pci_device_suspend */
pci_fixup_resume_early, /* pci_device_resume_early() */
}; };
/* Anonymous variables would be nice... */ /* Anonymous variables would be nice... */
@ -1036,6 +1066,12 @@ enum pci_fixup_pass {
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
resume##vendor##device##hook, vendor, device, hook) resume##vendor##device##hook, vendor, device, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
resume_early##vendor##device##hook, vendor, device, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
suspend##vendor##device##hook, vendor, device, hook)
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
@ -1060,7 +1096,10 @@ extern int pci_pci_problems;
extern unsigned long pci_cardbus_io_size; extern unsigned long pci_cardbus_io_size;
extern unsigned long pci_cardbus_mem_size; extern unsigned long pci_cardbus_mem_size;
extern int pcibios_add_platform_entries(struct pci_dev *dev); int pcibios_add_platform_entries(struct pci_dev *dev);
void pcibios_disable_device(struct pci_dev *dev);
int pcibios_set_pcie_reset_state(struct pci_dev *dev,
enum pcie_reset_state state);
#ifdef CONFIG_PCI_MMCONFIG #ifdef CONFIG_PCI_MMCONFIG
extern void __init pci_mmcfg_early_init(void); extern void __init pci_mmcfg_early_init(void);

View file

@ -95,9 +95,6 @@ struct hotplug_slot_attribute {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not. * @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info * If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user. * will be used when this value is requested by a user.
* @get_address: Called to get pci address of a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
* @get_max_bus_speed: Called to get the max bus speed for a slot. * @get_max_bus_speed: Called to get the max bus speed for a slot.
* If this field is NULL, the value passed in the struct hotplug_slot_info * If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user. * will be used when this value is requested by a user.
@ -120,7 +117,6 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
int (*get_address) (struct hotplug_slot *slot, u32 *value);
int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value); int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
}; };
@ -140,7 +136,6 @@ struct hotplug_slot_info {
u8 attention_status; u8 attention_status;
u8 latch_status; u8 latch_status;
u8 adapter_status; u8 adapter_status;
u32 address;
enum pci_bus_speed max_bus_speed; enum pci_bus_speed max_bus_speed;
enum pci_bus_speed cur_bus_speed; enum pci_bus_speed cur_bus_speed;
}; };
@ -166,15 +161,14 @@ struct hotplug_slot {
/* Variables below this are for use only by the hotplug pci core. */ /* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list; struct list_head slot_list;
struct kobject kobj; struct pci_slot *pci_slot;
}; };
#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) #define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
extern int pci_hp_register (struct hotplug_slot *slot); extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr);
extern int pci_hp_deregister (struct hotplug_slot *slot); extern int pci_hp_deregister(struct hotplug_slot *slot);
extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
struct hotplug_slot_info *info); struct hotplug_slot_info *info);
extern struct kset *pci_hotplug_slots_kset;
/* PCI Setting Record (Type 0) */ /* PCI Setting Record (Type 0) */
struct hpp_type0 { struct hpp_type0 {
@ -227,9 +221,9 @@ struct hotplug_params {
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acpi_bus.h> #include <acpi/acpi_bus.h>
#include <acpi/actypes.h> #include <acpi/actypes.h>
extern acpi_status acpi_run_oshp(acpi_handle handle);
extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus, extern acpi_status acpi_get_hp_params_from_firmware(struct pci_bus *bus,
struct hotplug_params *hpp); struct hotplug_params *hpp);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags);
int acpi_root_bridge(acpi_handle handle); int acpi_root_bridge(acpi_handle handle);
#endif #endif
#endif #endif

View file

@ -231,6 +231,7 @@
#define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */ #define PCI_PM_CAP_PME_D2 0x2000 /* PME# from D2 */
#define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */ #define PCI_PM_CAP_PME_D3 0x4000 /* PME# from D3 (hot) */
#define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */ #define PCI_PM_CAP_PME_D3cold 0x8000 /* PME# from D3 (cold) */
#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
#define PCI_PM_CTRL 4 /* PM control and status register */ #define PCI_PM_CTRL 4 /* PM control and status register */
#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ #define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */

View file

@ -53,6 +53,7 @@ struct platform_driver {
int (*suspend_late)(struct platform_device *, pm_message_t state); int (*suspend_late)(struct platform_device *, pm_message_t state);
int (*resume_early)(struct platform_device *); int (*resume_early)(struct platform_device *);
int (*resume)(struct platform_device *); int (*resume)(struct platform_device *);
struct pm_ext_ops *pm;
struct device_driver driver; struct device_driver driver;
}; };

View file

@ -112,7 +112,9 @@ typedef struct pm_message {
int event; int event;
} pm_message_t; } pm_message_t;
/* /**
* struct pm_ops - device PM callbacks
*
* Several driver power state transitions are externally visible, affecting * Several driver power state transitions are externally visible, affecting
* the state of pending I/O queues and (for drivers that touch hardware) * the state of pending I/O queues and (for drivers that touch hardware)
* interrupts, wakeups, DMA, and other hardware state. There may also be * interrupts, wakeups, DMA, and other hardware state. There may also be
@ -120,6 +122,284 @@ typedef struct pm_message {
* to the rest of the driver stack (such as a driver that's ON gating off * to the rest of the driver stack (such as a driver that's ON gating off
* clocks which are not in active use). * clocks which are not in active use).
* *
* The externally visible transitions are handled with the help of the following
* callbacks included in this structure:
*
* @prepare: Prepare the device for the upcoming transition, but do NOT change
* its hardware state. Prevent new children of the device from being
* registered after @prepare() returns (the driver's subsystem and
* generally the rest of the kernel is supposed to prevent new calls to the
* probe method from being made too once @prepare() has succeeded). If
* @prepare() detects a situation it cannot handle (e.g. registration of a
* child already in progress), it may return -EAGAIN, so that the PM core
* can execute it once again (e.g. after the new child has been registered)
* to recover from the race condition. This method is executed for all
* kinds of suspend transitions and is followed by one of the suspend
* callbacks: @suspend(), @freeze(), or @poweroff().
* The PM core executes @prepare() for all devices before starting to
* execute suspend callbacks for any of them, so drivers may assume all of
* the other devices to be present and functional while @prepare() is being
* executed. In particular, it is safe to make GFP_KERNEL memory
* allocations from within @prepare(). However, drivers may NOT assume
* anything about the availability of the user space at that time and it
* is not correct to request firmware from within @prepare() (it's too
* late to do that). [To work around this limitation, drivers may
* register suspend and hibernation notifiers that are executed before the
* freezing of tasks.]
*
* @complete: Undo the changes made by @prepare(). This method is executed for
* all kinds of resume transitions, following one of the resume callbacks:
* @resume(), @thaw(), @restore(). Also called if the state transition
* fails before the driver's suspend callback (@suspend(), @freeze(),
* @poweroff()) can be executed (e.g. if the suspend callback fails for one
* of the other devices that the PM core has unsuccessfully attempted to
* suspend earlier).
* The PM core executes @complete() after it has executed the appropriate
* resume callback for all devices.
*
* @suspend: Executed before putting the system into a sleep state in which the
* contents of main memory are preserved. Quiesce the device, put it into
* a low power state appropriate for the upcoming system state (such as
* PCI_D3hot), and enable wakeup events as appropriate.
*
* @resume: Executed after waking the system up from a sleep state in which the
* contents of main memory were preserved. Put the device into the
* appropriate state, according to the information saved in memory by the
* preceding @suspend(). The driver starts working again, responding to
* hardware events and software requests. The hardware may have gone
* through a power-off reset, or it may have maintained state from the
* previous suspend() which the driver may rely on while resuming. On most
* platforms, there are no restrictions on availability of resources like
* clocks during @resume().
*
* @freeze: Hibernation-specific, executed before creating a hibernation image.
* Quiesce operations so that a consistent image can be created, but do NOT
* otherwise put the device into a low power device state and do NOT emit
* system wakeup events. Save in main memory the device settings to be
* used by @restore() during the subsequent resume from hibernation or by
* the subsequent @thaw(), if the creation of the image or the restoration
* of main memory contents from it fails.
*
* @thaw: Hibernation-specific, executed after creating a hibernation image OR
* if the creation of the image fails. Also executed after a failing
* attempt to restore the contents of main memory from such an image.
* Undo the changes made by the preceding @freeze(), so the device can be
* operated in the same way as immediately before the call to @freeze().
*
* @poweroff: Hibernation-specific, executed after saving a hibernation image.
* Quiesce the device, put it into a low power state appropriate for the
* upcoming system state (such as PCI_D3hot), and enable wakeup events as
* appropriate.
*
* @restore: Hibernation-specific, executed after restoring the contents of main
* memory from a hibernation image. Driver starts working again,
* responding to hardware events and software requests. Drivers may NOT
* make ANY assumptions about the hardware state right prior to @restore().
* On most platforms, there are no restrictions on availability of
* resources like clocks during @restore().
*
* All of the above callbacks, except for @complete(), return error codes.
* However, the error codes returned by the resume operations, @resume(),
* @thaw(), and @restore(), do not cause the PM core to abort the resume
* transition during which they are returned. The error codes returned in
* that cases are only printed by the PM core to the system logs for debugging
* purposes. Still, it is recommended that drivers only return error codes
* from their resume methods in case of an unrecoverable failure (i.e. when the
* device being handled refuses to resume and becomes unusable) to allow us to
* modify the PM core in the future, so that it can avoid attempting to handle
* devices that failed to resume and their children.
*
* It is allowed to unregister devices while the above callbacks are being
* executed. However, it is not allowed to unregister a device from within any
* of its own callbacks.
*/
struct pm_ops {
int (*prepare)(struct device *dev);
void (*complete)(struct device *dev);
int (*suspend)(struct device *dev);
int (*resume)(struct device *dev);
int (*freeze)(struct device *dev);
int (*thaw)(struct device *dev);
int (*poweroff)(struct device *dev);
int (*restore)(struct device *dev);
};
/**
* struct pm_ext_ops - extended device PM callbacks
*
* Some devices require certain operations related to suspend and hibernation
* to be carried out with interrupts disabled. Thus, 'struct pm_ext_ops' below
* is defined, adding callbacks to be executed with interrupts disabled to
* 'struct pm_ops'.
*
* The following callbacks included in 'struct pm_ext_ops' are executed with
* the nonboot CPUs switched off and with interrupts disabled on the only
* functional CPU. They also are executed with the PM core list of devices
* locked, so they must NOT unregister any devices.
*
* @suspend_noirq: Complete the operations of ->suspend() by carrying out any
* actions required for suspending the device that need interrupts to be
* disabled
*
* @resume_noirq: Prepare for the execution of ->resume() by carrying out any
* actions required for resuming the device that need interrupts to be
* disabled
*
* @freeze_noirq: Complete the operations of ->freeze() by carrying out any
* actions required for freezing the device that need interrupts to be
* disabled
*
* @thaw_noirq: Prepare for the execution of ->thaw() by carrying out any
* actions required for thawing the device that need interrupts to be
* disabled
*
* @poweroff_noirq: Complete the operations of ->poweroff() by carrying out any
* actions required for handling the device that need interrupts to be
* disabled
*
* @restore_noirq: Prepare for the execution of ->restore() by carrying out any
* actions required for restoring the operations of the device that need
* interrupts to be disabled
*
* All of the above callbacks return error codes, but the error codes returned
* by the resume operations, @resume_noirq(), @thaw_noirq(), and
* @restore_noirq(), do not cause the PM core to abort the resume transition
* during which they are returned. The error codes returned in that cases are
* only printed by the PM core to the system logs for debugging purposes.
* Still, as stated above, it is recommended that drivers only return error
* codes from their resume methods if the device being handled fails to resume
* and is not usable any more.
*/
struct pm_ext_ops {
struct pm_ops base;
int (*suspend_noirq)(struct device *dev);
int (*resume_noirq)(struct device *dev);
int (*freeze_noirq)(struct device *dev);
int (*thaw_noirq)(struct device *dev);
int (*poweroff_noirq)(struct device *dev);
int (*restore_noirq)(struct device *dev);
};
/**
* PM_EVENT_ messages
*
* The following PM_EVENT_ messages are defined for the internal use of the PM
* core, in order to provide a mechanism allowing the high level suspend and
* hibernation code to convey the necessary information to the device PM core
* code:
*
* ON No transition.
*
* FREEZE System is going to hibernate, call ->prepare() and ->freeze()
* for all devices.
*
* SUSPEND System is going to suspend, call ->prepare() and ->suspend()
* for all devices.
*
* HIBERNATE Hibernation image has been saved, call ->prepare() and
* ->poweroff() for all devices.
*
* QUIESCE Contents of main memory are going to be restored from a (loaded)
* hibernation image, call ->prepare() and ->freeze() for all
* devices.
*
* RESUME System is resuming, call ->resume() and ->complete() for all
* devices.
*
* THAW Hibernation image has been created, call ->thaw() and
* ->complete() for all devices.
*
* RESTORE Contents of main memory have been restored from a hibernation
* image, call ->restore() and ->complete() for all devices.
*
* RECOVER Creation of a hibernation image or restoration of the main
* memory contents from a hibernation image has failed, call
* ->thaw() and ->complete() for all devices.
*/
#define PM_EVENT_ON 0x0000
#define PM_EVENT_FREEZE 0x0001
#define PM_EVENT_SUSPEND 0x0002
#define PM_EVENT_HIBERNATE 0x0004
#define PM_EVENT_QUIESCE 0x0008
#define PM_EVENT_RESUME 0x0010
#define PM_EVENT_THAW 0x0020
#define PM_EVENT_RESTORE 0x0040
#define PM_EVENT_RECOVER 0x0080
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_QUIESCE ((struct pm_message){ .event = PM_EVENT_QUIESCE, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_RESUME ((struct pm_message){ .event = PM_EVENT_RESUME, })
#define PMSG_THAW ((struct pm_message){ .event = PM_EVENT_THAW, })
#define PMSG_RESTORE ((struct pm_message){ .event = PM_EVENT_RESTORE, })
#define PMSG_RECOVER ((struct pm_message){ .event = PM_EVENT_RECOVER, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
/**
* Device power management states
*
* These state labels are used internally by the PM core to indicate the current
* status of a device with respect to the PM core operations.
*
* DPM_ON Device is regarded as operational. Set this way
* initially and when ->complete() is about to be called.
* Also set when ->prepare() fails.
*
* DPM_PREPARING Device is going to be prepared for a PM transition. Set
* when ->prepare() is about to be called.
*
* DPM_RESUMING Device is going to be resumed. Set when ->resume(),
* ->thaw(), or ->restore() is about to be called.
*
* DPM_SUSPENDING Device has been prepared for a power transition. Set
* when ->prepare() has just succeeded.
*
* DPM_OFF Device is regarded as inactive. Set immediately after
* ->suspend(), ->freeze(), or ->poweroff() has succeeded.
* Also set when ->resume()_noirq, ->thaw_noirq(), or
* ->restore_noirq() is about to be called.
*
* DPM_OFF_IRQ Device is in a "deep sleep". Set immediately after
* ->suspend_noirq(), ->freeze_noirq(), or
* ->poweroff_noirq() has just succeeded.
*/
enum dpm_state {
DPM_INVALID,
DPM_ON,
DPM_PREPARING,
DPM_RESUMING,
DPM_SUSPENDING,
DPM_OFF,
DPM_OFF_IRQ,
};
struct dev_pm_info {
pm_message_t power_state;
unsigned can_wakeup:1;
unsigned should_wakeup:1;
enum dpm_state status; /* Owned by the PM core */
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
#endif
};
/*
* The PM_EVENT_ messages are also used by drivers implementing the legacy
* suspend framework, based on the ->suspend() and ->resume() callbacks common
* for suspend and hibernation transitions, according to the rules below.
*/
/* Necessary, because several drivers use PM_EVENT_PRETHAW */
#define PM_EVENT_PRETHAW PM_EVENT_QUIESCE
/*
* One transition is triggered by resume(), after a suspend() call; the * One transition is triggered by resume(), after a suspend() call; the
* message is implicit: * message is implicit:
* *
@ -164,35 +444,13 @@ typedef struct pm_message {
* or from system low-power states such as standby or suspend-to-RAM. * or from system low-power states such as standby or suspend-to-RAM.
*/ */
#define PM_EVENT_ON 0
#define PM_EVENT_FREEZE 1
#define PM_EVENT_SUSPEND 2
#define PM_EVENT_HIBERNATE 4
#define PM_EVENT_PRETHAW 8
#define PM_EVENT_SLEEP (PM_EVENT_SUSPEND | PM_EVENT_HIBERNATE)
#define PMSG_FREEZE ((struct pm_message){ .event = PM_EVENT_FREEZE, })
#define PMSG_PRETHAW ((struct pm_message){ .event = PM_EVENT_PRETHAW, })
#define PMSG_SUSPEND ((struct pm_message){ .event = PM_EVENT_SUSPEND, })
#define PMSG_HIBERNATE ((struct pm_message){ .event = PM_EVENT_HIBERNATE, })
#define PMSG_ON ((struct pm_message){ .event = PM_EVENT_ON, })
struct dev_pm_info {
pm_message_t power_state;
unsigned can_wakeup:1;
unsigned should_wakeup:1;
bool sleeping:1; /* Owned by the PM core */
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
#endif
};
extern int device_power_down(pm_message_t state);
extern void device_power_up(void);
extern void device_resume(void);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
extern void device_power_up(pm_message_t state);
extern void device_resume(pm_message_t state);
extern void device_pm_unlock(void);
extern int device_power_down(pm_message_t state);
extern int device_suspend(pm_message_t state); extern int device_suspend(pm_message_t state);
extern int device_prepare_suspend(pm_message_t state); extern int device_prepare_suspend(pm_message_t state);

View file

@ -35,6 +35,11 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = dev->power.should_wakeup = !!val; dev->power.can_wakeup = dev->power.should_wakeup = !!val;
} }
static inline void device_set_wakeup_capable(struct device *dev, int val)
{
dev->power.can_wakeup = !!val;
}
static inline int device_can_wakeup(struct device *dev) static inline int device_can_wakeup(struct device *dev)
{ {
return dev->power.can_wakeup; return dev->power.can_wakeup;
@ -47,21 +52,7 @@ static inline void device_set_wakeup_enable(struct device *dev, int val)
static inline int device_may_wakeup(struct device *dev) static inline int device_may_wakeup(struct device *dev)
{ {
return dev->power.can_wakeup & dev->power.should_wakeup; return dev->power.can_wakeup && dev->power.should_wakeup;
}
/*
* Platform hook to activate device wakeup capability, if that's not already
* handled by enable_irq_wake() etc.
* Returns zero on success, else negative errno
*/
extern int (*platform_enable_wakeup)(struct device *dev, int is_on);
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
{
if (platform_enable_wakeup)
return (*platform_enable_wakeup)(dev, is_on);
return 0;
} }
#else /* !CONFIG_PM */ #else /* !CONFIG_PM */
@ -72,6 +63,8 @@ static inline void device_init_wakeup(struct device *dev, int val)
dev->power.can_wakeup = !!val; dev->power.can_wakeup = !!val;
} }
static inline void device_set_wakeup_capable(struct device *dev, int val) { }
static inline int device_can_wakeup(struct device *dev) static inline int device_can_wakeup(struct device *dev)
{ {
return dev->power.can_wakeup; return dev->power.can_wakeup;
@ -80,11 +73,6 @@ static inline int device_can_wakeup(struct device *dev)
#define device_set_wakeup_enable(dev, val) do {} while (0) #define device_set_wakeup_enable(dev, val) do {} while (0)
#define device_may_wakeup(dev) 0 #define device_may_wakeup(dev) 0
static inline int call_platform_enable_wakeup(struct device *dev, int is_on)
{
return 0;
}
#endif /* !CONFIG_PM */ #endif /* !CONFIG_PM */
#endif /* _LINUX_PM_WAKEUP_H */ #endif /* _LINUX_PM_WAKEUP_H */

View file

@ -86,6 +86,11 @@ typedef int __bitwise suspend_state_t;
* that implement @begin(), but platforms implementing @begin() should * that implement @begin(), but platforms implementing @begin() should
* also provide a @end() which cleans up transitions aborted before * also provide a @end() which cleans up transitions aborted before
* @enter(). * @enter().
*
* @recover: Recover the platform from a suspend failure.
* Called by the PM core if the suspending of devices fails.
* This callback is optional and should only be implemented by platforms
* which require special recovery actions in that situation.
*/ */
struct platform_suspend_ops { struct platform_suspend_ops {
int (*valid)(suspend_state_t state); int (*valid)(suspend_state_t state);
@ -94,6 +99,7 @@ struct platform_suspend_ops {
int (*enter)(suspend_state_t state); int (*enter)(suspend_state_t state);
void (*finish)(void); void (*finish)(void);
void (*end)(void); void (*end)(void);
void (*recover)(void);
}; };
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
@ -149,7 +155,7 @@ extern void mark_free_pages(struct zone *zone);
* The methods in this structure allow a platform to carry out special * The methods in this structure allow a platform to carry out special
* operations required by it during a hibernation transition. * operations required by it during a hibernation transition.
* *
* All the methods below must be implemented. * All the methods below, except for @recover(), must be implemented.
* *
* @begin: Tell the platform driver that we're starting hibernation. * @begin: Tell the platform driver that we're starting hibernation.
* Called right after shrinking memory and before freezing devices. * Called right after shrinking memory and before freezing devices.
@ -189,6 +195,11 @@ extern void mark_free_pages(struct zone *zone);
* @restore_cleanup: Clean up after a failing image restoration. * @restore_cleanup: Clean up after a failing image restoration.
* Called right after the nonboot CPUs have been enabled and before * Called right after the nonboot CPUs have been enabled and before
* thawing devices (runs with IRQs on). * thawing devices (runs with IRQs on).
*
* @recover: Recover the platform from a failure to suspend devices.
* Called by the PM core if the suspending of devices during hibernation
* fails. This callback is optional and should only be implemented by
* platforms which require special recovery actions in that situation.
*/ */
struct platform_hibernation_ops { struct platform_hibernation_ops {
int (*begin)(void); int (*begin)(void);
@ -200,6 +211,7 @@ struct platform_hibernation_ops {
void (*leave)(void); void (*leave)(void);
int (*pre_restore)(void); int (*pre_restore)(void);
void (*restore_cleanup)(void); void (*restore_cleanup)(void);
void (*recover)(void);
}; };
#ifdef CONFIG_HIBERNATION #ifdef CONFIG_HIBERNATION

View file

@ -179,6 +179,17 @@ static void platform_restore_cleanup(int platform_mode)
hibernation_ops->restore_cleanup(); hibernation_ops->restore_cleanup();
} }
/**
* platform_recover - recover the platform from a failure to suspend
* devices.
*/
static void platform_recover(int platform_mode)
{
if (platform_mode && hibernation_ops && hibernation_ops->recover)
hibernation_ops->recover();
}
/** /**
* create_image - freeze devices that need to be frozen with interrupts * create_image - freeze devices that need to be frozen with interrupts
* off, create the hibernation image and thaw those devices. Control * off, create the hibernation image and thaw those devices. Control
@ -193,6 +204,7 @@ static int create_image(int platform_mode)
if (error) if (error)
return error; return error;
device_pm_lock();
local_irq_disable(); local_irq_disable();
/* At this point, device_suspend() has been called, but *not* /* At this point, device_suspend() has been called, but *not*
* device_power_down(). We *must* call device_power_down() now. * device_power_down(). We *must* call device_power_down() now.
@ -224,9 +236,11 @@ static int create_image(int platform_mode)
/* NOTE: device_power_up() is just a resume() for devices /* NOTE: device_power_up() is just a resume() for devices
* that suspended with irqs off ... no overall powerup. * that suspended with irqs off ... no overall powerup.
*/ */
device_power_up(); device_power_up(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
Enable_irqs: Enable_irqs:
local_irq_enable(); local_irq_enable();
device_pm_unlock();
return error; return error;
} }
@ -255,10 +269,10 @@ int hibernation_snapshot(int platform_mode)
suspend_console(); suspend_console();
error = device_suspend(PMSG_FREEZE); error = device_suspend(PMSG_FREEZE);
if (error) if (error)
goto Resume_console; goto Recover_platform;
if (hibernation_test(TEST_DEVICES)) if (hibernation_test(TEST_DEVICES))
goto Resume_devices; goto Recover_platform;
error = platform_pre_snapshot(platform_mode); error = platform_pre_snapshot(platform_mode);
if (error || hibernation_test(TEST_PLATFORM)) if (error || hibernation_test(TEST_PLATFORM))
@ -280,12 +294,16 @@ int hibernation_snapshot(int platform_mode)
Finish: Finish:
platform_finish(platform_mode); platform_finish(platform_mode);
Resume_devices: Resume_devices:
device_resume(); device_resume(in_suspend ?
Resume_console: (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
resume_console(); resume_console();
Close: Close:
platform_end(platform_mode); platform_end(platform_mode);
return error; return error;
Recover_platform:
platform_recover(platform_mode);
goto Resume_devices;
} }
/** /**
@ -300,8 +318,9 @@ static int resume_target_kernel(void)
{ {
int error; int error;
device_pm_lock();
local_irq_disable(); local_irq_disable();
error = device_power_down(PMSG_PRETHAW); error = device_power_down(PMSG_QUIESCE);
if (error) { if (error) {
printk(KERN_ERR "PM: Some devices failed to power down, " printk(KERN_ERR "PM: Some devices failed to power down, "
"aborting resume\n"); "aborting resume\n");
@ -329,9 +348,10 @@ static int resume_target_kernel(void)
swsusp_free(); swsusp_free();
restore_processor_state(); restore_processor_state();
touch_softlockup_watchdog(); touch_softlockup_watchdog();
device_power_up(); device_power_up(PMSG_RECOVER);
Enable_irqs: Enable_irqs:
local_irq_enable(); local_irq_enable();
device_pm_unlock();
return error; return error;
} }
@ -350,7 +370,7 @@ int hibernation_restore(int platform_mode)
pm_prepare_console(); pm_prepare_console();
suspend_console(); suspend_console();
error = device_suspend(PMSG_PRETHAW); error = device_suspend(PMSG_QUIESCE);
if (error) if (error)
goto Finish; goto Finish;
@ -362,7 +382,7 @@ int hibernation_restore(int platform_mode)
enable_nonboot_cpus(); enable_nonboot_cpus();
} }
platform_restore_cleanup(platform_mode); platform_restore_cleanup(platform_mode);
device_resume(); device_resume(PMSG_RECOVER);
Finish: Finish:
resume_console(); resume_console();
pm_restore_console(); pm_restore_console();
@ -392,8 +412,11 @@ int hibernation_platform_enter(void)
suspend_console(); suspend_console();
error = device_suspend(PMSG_HIBERNATE); error = device_suspend(PMSG_HIBERNATE);
if (error) if (error) {
goto Resume_console; if (hibernation_ops->recover)
hibernation_ops->recover();
goto Resume_devices;
}
error = hibernation_ops->prepare(); error = hibernation_ops->prepare();
if (error) if (error)
@ -403,6 +426,7 @@ int hibernation_platform_enter(void)
if (error) if (error)
goto Finish; goto Finish;
device_pm_lock();
local_irq_disable(); local_irq_disable();
error = device_power_down(PMSG_HIBERNATE); error = device_power_down(PMSG_HIBERNATE);
if (!error) { if (!error) {
@ -411,6 +435,7 @@ int hibernation_platform_enter(void)
while (1); while (1);
} }
local_irq_enable(); local_irq_enable();
device_pm_unlock();
/* /*
* We don't need to reenable the nonboot CPUs or resume consoles, since * We don't need to reenable the nonboot CPUs or resume consoles, since
@ -419,8 +444,7 @@ int hibernation_platform_enter(void)
Finish: Finish:
hibernation_ops->finish(); hibernation_ops->finish();
Resume_devices: Resume_devices:
device_resume(); device_resume(PMSG_RESTORE);
Resume_console:
resume_console(); resume_console();
Close: Close:
hibernation_ops->end(); hibernation_ops->end();

View file

@ -228,6 +228,7 @@ static int suspend_enter(suspend_state_t state)
{ {
int error = 0; int error = 0;
device_pm_lock();
arch_suspend_disable_irqs(); arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
@ -239,10 +240,11 @@ static int suspend_enter(suspend_state_t state)
if (!suspend_test(TEST_CORE)) if (!suspend_test(TEST_CORE))
error = suspend_ops->enter(state); error = suspend_ops->enter(state);
device_power_up(); device_power_up(PMSG_RESUME);
Done: Done:
arch_suspend_enable_irqs(); arch_suspend_enable_irqs();
BUG_ON(irqs_disabled()); BUG_ON(irqs_disabled());
device_pm_unlock();
return error; return error;
} }
@ -267,11 +269,11 @@ int suspend_devices_and_enter(suspend_state_t state)
error = device_suspend(PMSG_SUSPEND); error = device_suspend(PMSG_SUSPEND);
if (error) { if (error) {
printk(KERN_ERR "PM: Some devices failed to suspend\n"); printk(KERN_ERR "PM: Some devices failed to suspend\n");
goto Resume_console; goto Recover_platform;
} }
if (suspend_test(TEST_DEVICES)) if (suspend_test(TEST_DEVICES))
goto Resume_devices; goto Recover_platform;
if (suspend_ops->prepare) { if (suspend_ops->prepare) {
error = suspend_ops->prepare(); error = suspend_ops->prepare();
@ -291,13 +293,17 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_ops->finish) if (suspend_ops->finish)
suspend_ops->finish(); suspend_ops->finish();
Resume_devices: Resume_devices:
device_resume(); device_resume(PMSG_RESUME);
Resume_console:
resume_console(); resume_console();
Close: Close:
if (suspend_ops->end) if (suspend_ops->end)
suspend_ops->end(); suspend_ops->end();
return error; return error;
Recover_platform:
if (suspend_ops->recover)
suspend_ops->recover();
goto Resume_devices;
} }
/** /**

View file

@ -439,6 +439,7 @@ out:
return error; return error;
} }
EXPORT_SYMBOL_GPL(kobject_rename);
/** /**
* kobject_move - move object to another parent * kobject_move - move object to another parent