Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (92 commits)
  powerpc: Remove unused 'protect4gb' boot parameter
  powerpc: Build-in e1000e for pseries & ppc64_defconfig
  powerpc/pseries: Make request_ras_irqs() available to other pseries code
  powerpc/numa: Use ibm,architecture-vec-5 to detect form 1 affinity
  powerpc/numa: Set a smaller value for RECLAIM_DISTANCE to enable zone reclaim
  powerpc: Use smt_snooze_delay=-1 to always busy loop
  powerpc: Remove check of ibm,smt-snooze-delay OF property
  powerpc/kdump: Fix race in kdump shutdown
  powerpc/kexec: Fix race in kexec shutdown
  powerpc/kexec: Speedup kexec hash PTE tear down
  powerpc/pseries: Add hcall to read 4 ptes at a time in real mode
  powerpc: Use more accurate limit for first segment memory allocations
  powerpc/kdump: Use chip->shutdown to disable IRQs
  powerpc/kdump: CPUs assume the context of the oopsing CPU
  powerpc/crashdump: Do not fail on NULL pointer dereferencing
  powerpc/eeh: Fix oops when probing in early boot
  powerpc/pci: Check devices status property when scanning OF tree
  powerpc/vio: Switch VIO Bus PM to use generic helpers
  powerpc: Avoid bad relocations in iSeries code
  powerpc: Use common cpu_die (fixes SMP+SUSPEND build)
  ...
This commit is contained in:
Linus Torvalds 2010-05-21 11:17:05 -07:00
commit 79c4581262
118 changed files with 4145 additions and 1140 deletions

View file

@ -0,0 +1,18 @@
Reboot property to control system reboot on PPC4xx systems:
By setting "reset_type" to one of the following values, the default
software reset mechanism may be overidden. Here the possible values of
"reset_type":
1 - PPC4xx core reset
2 - PPC4xx chip reset
3 - PPC4xx system reset (default)
Example:
cpu@0 {
device_type = "cpu";
model = "PowerPC,440SPe";
...
reset-type = <2>; /* Use chip-reset */
};

View file

@ -11,7 +11,7 @@ Required properties:
83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx. 83xx, "fsl,mpc8572-gpio" for 85xx and "fsl,mpc8610-gpio" for 86xx.
- #gpio-cells : Should be two. The first cell is the pin number and the - #gpio-cells : Should be two. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused). second cell is used to specify optional parameters (currently unused).
- interrupts : Interrupt mapping for GPIO IRQ (currently unused). - interrupts : Interrupt mapping for GPIO IRQ.
- interrupt-parent : Phandle for the interrupt controller that - interrupt-parent : Phandle for the interrupt controller that
services interrupts for this device. services interrupts for this device.
- gpio-controller : Marks the port as GPIO controller. - gpio-controller : Marks the port as GPIO controller.
@ -38,3 +38,23 @@ Example of gpio-controller nodes for a MPC8347 SoC:
See booting-without-of.txt for details of how to specify GPIO See booting-without-of.txt for details of how to specify GPIO
information for devices. information for devices.
To use GPIO pins as interrupt sources for peripherals, specify the
GPIO controller as the interrupt parent and define GPIO number +
trigger mode using the interrupts property, which is defined like
this:
interrupts = <number trigger>, where:
- number: GPIO pin (0..31)
- trigger: trigger mode:
2 = trigger on falling edge
3 = trigger on both edges
Example of device using this is:
funkyfpga@0 {
compatible = "funky-fpga";
...
interrupts = <4 3>;
interrupt-parent = <&gpio1>;
};

View file

@ -140,6 +140,7 @@ config PPC
select HAVE_SYSCALL_WRAPPERS if PPC64 select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32 select GENERIC_ATOMIC64 if PPC32
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
config EARLY_PRINTK config EARLY_PRINTK
bool bool

View file

@ -44,6 +44,18 @@ config DEBUG_STACK_USAGE
This option will slow down process creation somewhat. This option will slow down process creation somewhat.
config DEBUG_PER_CPU_MAPS
bool "Debug access to per_cpu maps"
depends on DEBUG_KERNEL
depends on SMP
default n
---help---
Say Y to verify that the per_cpu map being accessed has
been setup. Adds a fair amount of code to kernel memory
and decreases performance.
Say N if unsure.
config HCALL_STATS config HCALL_STATS
bool "Hypervisor call instrumentation" bool "Hypervisor call instrumentation"
depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS

View file

@ -44,6 +44,7 @@ $(obj)/cuboot-taishan.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-katmai.o: BOOTCFLAGS += -mcpu=405
$(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405 $(obj)/cuboot-acadia.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405 $(obj)/treeboot-walnut.o: BOOTCFLAGS += -mcpu=405
$(obj)/treeboot-iss4xx.o: BOOTCFLAGS += -mcpu=405
$(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405 $(obj)/virtex405-head.o: BOOTAFLAGS += -mcpu=405
@ -77,7 +78,7 @@ src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c
cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \ cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \ virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \ cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
gamecube-head.S gamecube.c wii-head.S wii.c gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c
src-boot := $(src-wlib) $(src-plat) empty.c src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot)) src-boot := $(addprefix $(obj)/, $(src-boot))
@ -169,7 +170,7 @@ quiet_cmd_wrap = WRAP $@
$(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux $(if $3, -s $3)$(if $4, -d $4)$(if $5, -i $5) vmlinux
image-$(CONFIG_PPC_PSERIES) += zImage.pseries image-$(CONFIG_PPC_PSERIES) += zImage.pseries
image-$(CONFIG_PPC_MAPLE) += zImage.pseries image-$(CONFIG_PPC_MAPLE) += zImage.maple
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3 image-$(CONFIG_PPC_PS3) += dtbImage.ps3
image-$(CONFIG_PPC_CELLEB) += zImage.pseries image-$(CONFIG_PPC_CELLEB) += zImage.pseries
@ -206,6 +207,8 @@ image-$(CONFIG_TAISHAN) += cuImage.taishan
image-$(CONFIG_KATMAI) += cuImage.katmai image-$(CONFIG_KATMAI) += cuImage.katmai
image-$(CONFIG_WARP) += cuImage.warp image-$(CONFIG_WARP) += cuImage.warp
image-$(CONFIG_YOSEMITE) += cuImage.yosemite image-$(CONFIG_YOSEMITE) += cuImage.yosemite
image-$(CONFIG_ISS4xx) += treeImage.iss4xx \
treeImage.iss4xx-mpic
# Board ports in arch/powerpc/platform/8xx/Kconfig # Board ports in arch/powerpc/platform/8xx/Kconfig
image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads image-$(CONFIG_MPC86XADS) += cuImage.mpc866ads
@ -351,7 +354,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \ zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
zImage.iseries zImage.miboot zImage.pmac zImage.pseries \ zImage.iseries zImage.miboot zImage.pmac zImage.pseries \
simpleImage.* otheros.bld *.dtb zImage.maple simpleImage.* otheros.bld *.dtb
# clean up files cached by wrapper # clean up files cached by wrapper
clean-kernel := vmlinux.strip vmlinux.bin clean-kernel := vmlinux.strip vmlinux.bin

View file

@ -0,0 +1,155 @@
/*
* Device Tree Source for IBM Embedded PPC 476 Platform
*
* Copyright 2010 Torez Smith, IBM Corporation.
*
* Based on earlier code:
* Copyright (c) 2006, 2007 IBM Corp.
* Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without
* any warranty of any kind, whether express or implied.
*/
/dts-v1/;
/memreserve/ 0x01f00000 0x00100000;
/ {
#address-cells = <2>;
#size-cells = <1>;
model = "ibm,iss-4xx";
compatible = "ibm,iss-4xx";
dcr-parent = <&{/cpus/cpu@0}>;
aliases {
serial0 = &UART0;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "PowerPC,4xx"; // real CPU changed in sim
reg = <0>;
clock-frequency = <100000000>; // 100Mhz :-)
timebase-frequency = <100000000>;
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <32768>;
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
status = "ok";
};
cpu@1 {
device_type = "cpu";
model = "PowerPC,4xx"; // real CPU changed in sim
reg = <1>;
clock-frequency = <100000000>; // 100Mhz :-)
timebase-frequency = <100000000>;
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <32768>;
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
status = "disabled";
enable-method = "spin-table";
cpu-release-addr = <0 0x01f00100>;
};
cpu@2 {
device_type = "cpu";
model = "PowerPC,4xx"; // real CPU changed in sim
reg = <2>;
clock-frequency = <100000000>; // 100Mhz :-)
timebase-frequency = <100000000>;
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <32768>;
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
status = "disabled";
enable-method = "spin-table";
cpu-release-addr = <0 0x01f00200>;
};
cpu@3 {
device_type = "cpu";
model = "PowerPC,4xx"; // real CPU changed in sim
reg = <3>;
clock-frequency = <100000000>; // 100Mhz :-)
timebase-frequency = <100000000>;
i-cache-line-size = <32>;
d-cache-line-size = <32>;
i-cache-size = <32768>;
d-cache-size = <32768>;
dcr-controller;
dcr-access-method = "native";
status = "disabled";
enable-method = "spin-table";
cpu-release-addr = <0 0x01f00300>;
};
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
};
MPIC: interrupt-controller {
compatible = "chrp,open-pic";
interrupt-controller;
dcr-reg = <0xffc00000 0x00030000>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
};
plb {
compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
#address-cells = <2>;
#size-cells = <1>;
ranges;
clock-frequency = <0>; // Filled in by zImage
POB0: opb {
compatible = "ibm,opb-4xx", "ibm,opb";
#address-cells = <1>;
#size-cells = <1>;
/* Wish there was a nicer way of specifying a full 32-bit
range */
ranges = <0x00000000 0x00000001 0x00000000 0x80000000
0x80000000 0x00000001 0x80000000 0x80000000>;
clock-frequency = <0>; // Filled in by zImage
UART0: serial@40000200 {
device_type = "serial";
compatible = "ns16550a";
reg = <0x40000200 0x00000008>;
virtual-reg = <0xe0000200>;
clock-frequency = <11059200>;
current-speed = <115200>;
interrupt-parent = <&MPIC>;
interrupts = <0x0 0x2>;
};
};
};
nvrtc {
compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
reg = <0 0xEF703000 0x2000>;
};
iss-block {
compatible = "ibm,iss-sim-block-device";
reg = <0 0xEF701000 0x1000>;
};
chosen {
linux,stdout-path = "/plb/opb/serial@40000200";
};
};

View file

@ -0,0 +1,116 @@
/*
* Device Tree Source for IBM Embedded PPC 476 Platform
*
* Copyright 2010 Torez Smith, IBM Corporation.
*
* Based on earlier code:
* Copyright (c) 2006, 2007 IBM Corp.
* Josh Boyer <jwboyer@linux.vnet.ibm.com>, David Gibson <dwg@au1.ibm.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without
* any warranty of any kind, whether express or implied.
*/
/dts-v1/;
/ {
#address-cells = <2>;
#size-cells = <1>;
model = "ibm,iss-4xx";
compatible = "ibm,iss-4xx";
dcr-parent = <&{/cpus/cpu@0}>;
aliases {
serial0 = &UART0;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
model = "PowerPC,4xx"; // real CPU changed in sim
reg = <0x00000000>;
clock-frequency = <100000000>; // 100Mhz :-)
timebase-frequency = <100000000>;
i-cache-line-size = <32>; // may need fixup in sim
d-cache-line-size = <32>; // may need fixup in sim
i-cache-size = <32768>; /* may need fixup in sim */
d-cache-size = <32768>; /* may need fixup in sim */
dcr-controller;
dcr-access-method = "native";
};
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000>; // Filled in by zImage
};
UIC0: interrupt-controller0 {
compatible = "ibm,uic-4xx", "ibm,uic";
interrupt-controller;
cell-index = <0>;
dcr-reg = <0x0c0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
};
UIC1: interrupt-controller1 {
compatible = "ibm,uic-4xx", "ibm,uic";
interrupt-controller;
cell-index = <1>;
dcr-reg = <0x0d0 0x009>;
#address-cells = <0>;
#size-cells = <0>;
#interrupt-cells = <2>;
interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */
interrupt-parent = <&UIC0>;
};
plb {
compatible = "ibm,plb-4xx", "ibm,plb4"; /* Could be PLB6, doesn't matter */
#address-cells = <2>;
#size-cells = <1>;
ranges;
clock-frequency = <0>; // Filled in by zImage
POB0: opb {
compatible = "ibm,opb-4xx", "ibm,opb";
#address-cells = <1>;
#size-cells = <1>;
/* Wish there was a nicer way of specifying a full 32-bit
range */
ranges = <0x00000000 0x00000001 0x00000000 0x80000000
0x80000000 0x00000001 0x80000000 0x80000000>;
clock-frequency = <0>; // Filled in by zImage
UART0: serial@40000200 {
device_type = "serial";
compatible = "ns16550a";
reg = <0x40000200 0x00000008>;
virtual-reg = <0xe0000200>;
clock-frequency = <11059200>;
current-speed = <115200>;
interrupt-parent = <&UIC0>;
interrupts = <0x0 0x4>;
};
};
};
nvrtc {
compatible = "ds1743-nvram", "ds1743", "rtc-ds1743";
reg = <0 0xEF703000 0x2000>;
};
iss-block {
compatible = "ibm,iss-sim-block-device";
reg = <0 0xEF701000 0x1000>;
};
chosen {
linux,stdout-path = "/plb/opb/serial@40000200";
};
};

View file

@ -292,7 +292,7 @@
fsl,num-channels = <4>; fsl,num-channels = <4>;
fsl,channel-fifo-len = <24>; fsl,channel-fifo-len = <24>;
fsl,exec-units-mask = <0x97c>; fsl,exec-units-mask = <0x97c>;
fsl,descriptor-types-mask = <0x3ab0abf>; fsl,descriptor-types-mask = <0x3a30abf>;
}; };
sata@18000 { sata@18000 {
@ -463,4 +463,18 @@
0 0x00800000>; 0 0x00800000>;
}; };
}; };
leds {
compatible = "gpio-leds";
pwr {
gpios = <&mcu_pio 0 0>;
default-state = "on";
};
hdd {
gpios = <&mcu_pio 1 0>;
linux,default-trigger = "ide-disk";
};
};
}; };

View file

@ -486,4 +486,18 @@
0 0x00800000>; 0 0x00800000>;
}; };
}; };
leds {
compatible = "gpio-leds";
pwr {
gpios = <&mcu_pio 0 0>;
default-state = "on";
};
hdd {
gpios = <&mcu_pio 1 0>;
linux,default-trigger = "ide-disk";
};
};
}; };

View file

@ -470,4 +470,18 @@
0 0x00800000>; 0 0x00800000>;
}; };
}; };
leds {
compatible = "gpio-leds";
pwr {
gpios = <&mcu_pio 0 0>;
default-state = "on";
};
hdd {
gpios = <&mcu_pio 1 0>;
linux,default-trigger = "ide-disk";
};
};
}; };

View file

@ -436,4 +436,18 @@
compatible = "fsl,mpc8349-pci"; compatible = "fsl,mpc8349-pci";
device_type = "pci"; device_type = "pci";
}; };
leds {
compatible = "gpio-leds";
pwr {
gpios = <&mcu_pio 0 0>;
default-state = "on";
};
hdd {
gpios = <&mcu_pio 1 0>;
linux,default-trigger = "ide-disk";
};
};
}; };

View file

@ -19,6 +19,9 @@
aliases { aliases {
serial0 = &serial0; serial0 = &serial0;
serial1 = &serial1; serial1 = &serial1;
ethernet0 = &enet0;
ethernet1 = &enet1;
ethernet2 = &enet2;
pci0 = &pci0; pci0 = &pci0;
pci1 = &pci1; pci1 = &pci1;
}; };
@ -346,6 +349,122 @@
}; };
}; };
mdio@24000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,etsec2-mdio";
reg = <0x24000 0x1000 0xb0030 0x4>;
phy0: ethernet-phy@0 {
interrupt-parent = <&mpic>;
interrupts = <3 1>;
reg = <0x0>;
};
phy1: ethernet-phy@1 {
interrupt-parent = <&mpic>;
interrupts = <2 1>;
reg = <0x1>;
};
};
mdio@25000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "fsl,etsec2-tbi";
reg = <0x25000 0x1000 0xb1030 0x4>;
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
};
};
enet0: ethernet@b0000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "network";
model = "eTSEC";
compatible = "fsl,etsec2";
fsl,num_rx_queues = <0x8>;
fsl,num_tx_queues = <0x8>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupt-parent = <&mpic>;
fixed-link = <1 1 1000 0 0>;
phy-connection-type = "rgmii-id";
queue-group@0 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb0000 0x1000>;
interrupts = <29 2 30 2 34 2>;
};
queue-group@1 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb4000 0x1000>;
interrupts = <17 2 18 2 24 2>;
};
};
enet1: ethernet@b1000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "network";
model = "eTSEC";
compatible = "fsl,etsec2";
fsl,num_rx_queues = <0x8>;
fsl,num_tx_queues = <0x8>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupt-parent = <&mpic>;
phy-handle = <&phy0>;
tbi-handle = <&tbi0>;
phy-connection-type = "sgmii";
queue-group@0 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb1000 0x1000>;
interrupts = <35 2 36 2 40 2>;
};
queue-group@1 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb5000 0x1000>;
interrupts = <51 2 52 2 67 2>;
};
};
enet2: ethernet@b2000 {
#address-cells = <1>;
#size-cells = <1>;
device_type = "network";
model = "eTSEC";
compatible = "fsl,etsec2";
fsl,num_rx_queues = <0x8>;
fsl,num_tx_queues = <0x8>;
local-mac-address = [ 00 00 00 00 00 00 ];
interrupt-parent = <&mpic>;
phy-handle = <&phy1>;
phy-connection-type = "rgmii-id";
queue-group@0 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb2000 0x1000>;
interrupts = <31 2 32 2 33 2>;
};
queue-group@1 {
#address-cells = <1>;
#size-cells = <1>;
reg = <0xb6000 0x1000>;
interrupts = <25 2 26 2 27 2>;
};
};
usb@22000 { usb@22000 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
@ -356,6 +475,11 @@
phy_type = "ulpi"; phy_type = "ulpi";
}; };
/* USB2 is shared with localbus, so it must be disabled
by default. We can't put 'status = "disabled";' here
since U-Boot doesn't clear the status property when
it enables USB2. OTOH, U-Boot does create a new node
when there isn't any. So, just comment it out.
usb@23000 { usb@23000 {
#address-cells = <1>; #address-cells = <1>;
#size-cells = <0>; #size-cells = <0>;
@ -365,6 +489,7 @@
interrupts = <46 0x2>; interrupts = <46 0x2>;
phy_type = "ulpi"; phy_type = "ulpi";
}; };
*/
sdhci@2e000 { sdhci@2e000 {
compatible = "fsl,p1020-esdhc", "fsl,esdhc"; compatible = "fsl,p1020-esdhc", "fsl,esdhc";

View file

@ -0,0 +1,56 @@
/*
* Copyright 2010 Ben. Herrenschmidt, IBM Corporation.
*
* Based on earlier code:
* Copyright (C) Paul Mackerras 1997.
*
* Matt Porter <mporter@kernel.crashing.org>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
* Copyright (c) 2003, 2004 Zultys Technologies
*
* Copyright 2007 David Gibson, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <stdarg.h>
#include <stddef.h>
#include "types.h"
#include "elf.h"
#include "string.h"
#include "stdio.h"
#include "page.h"
#include "ops.h"
#include "reg.h"
#include "io.h"
#include "dcr.h"
#include "4xx.h"
#include "44x.h"
#include "libfdt.h"
BSS_STACK(4096);
static void iss_4xx_fixups(void)
{
ibm4xx_sdram_fixup_memsize();
}
#define SPRN_PIR 0x11E /* Processor Indentification Register */
void platform_init(void)
{
unsigned long end_of_ram = 0x08000000;
unsigned long avail_ram = end_of_ram - (unsigned long)_end;
u32 pir_reg;
simple_alloc_init(_end, avail_ram, 128, 64);
platform_ops.fixups = iss_4xx_fixups;
platform_ops.exit = ibm44x_dbcr_reset;
pir_reg = mfspr(SPRN_PIR);
fdt_set_boot_cpuid_phys(_dtb_start, pir_reg);
fdt_init(_dtb_start);
serial_console_init();
}

View file

@ -149,6 +149,10 @@ pseries)
platformo=$object/of.o platformo=$object/of.o
link_address='0x4000000' link_address='0x4000000'
;; ;;
maple)
platformo=$object/of.o
link_address='0x400000'
;;
pmac|chrp) pmac|chrp)
platformo=$object/of.o platformo=$object/of.o
;; ;;
@ -237,6 +241,9 @@ gamecube|wii)
link_address='0x600000' link_address='0x600000'
platformo="$object/$platform-head.o $object/$platform.o" platformo="$object/$platform-head.o $object/$platform.o"
;; ;;
treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
esac esac
vmz="$tmpdir/`basename \"$kernel\"`.$ext" vmz="$tmpdir/`basename \"$kernel\"`.$ext"
@ -321,7 +328,7 @@ fi
# post-processing needed for some platforms # post-processing needed for some platforms
case "$platform" in case "$platform" in
pseries|chrp) pseries|chrp|maple)
$objbin/addnote "$ofile" $objbin/addnote "$ofile"
;; ;;
coff) coff)

File diff suppressed because it is too large Load diff

View file

@ -988,7 +988,7 @@ CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set # CONFIG_DL2K is not set
CONFIG_E1000=y CONFIG_E1000=y
CONFIG_E1000E=m CONFIG_E1000E=y
# CONFIG_IP1000 is not set # CONFIG_IP1000 is not set
# CONFIG_IGB is not set # CONFIG_IGB is not set
# CONFIG_NS83820 is not set # CONFIG_NS83820 is not set

View file

@ -804,7 +804,7 @@ CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set # CONFIG_DL2K is not set
CONFIG_E1000=y CONFIG_E1000=y
CONFIG_E1000E=m CONFIG_E1000E=y
# CONFIG_IP1000 is not set # CONFIG_IP1000 is not set
# CONFIG_IGB is not set # CONFIG_IGB is not set
# CONFIG_NS83820 is not set # CONFIG_NS83820 is not set

View file

@ -12,8 +12,12 @@
#define L1_CACHE_SHIFT 6 #define L1_CACHE_SHIFT 6
#define MAX_COPY_PREFETCH 4 #define MAX_COPY_PREFETCH 4
#elif defined(CONFIG_PPC32) #elif defined(CONFIG_PPC32)
#define L1_CACHE_SHIFT 5
#define MAX_COPY_PREFETCH 4 #define MAX_COPY_PREFETCH 4
#if defined(CONFIG_PPC_47x)
#define L1_CACHE_SHIFT 7
#else
#define L1_CACHE_SHIFT 5
#endif
#else /* CONFIG_PPC64 */ #else /* CONFIG_PPC64 */
#define L1_CACHE_SHIFT 7 #define L1_CACHE_SHIFT 7
#endif #endif

View file

@ -72,6 +72,7 @@ extern int machine_check_4xx(struct pt_regs *regs);
extern int machine_check_440A(struct pt_regs *regs); extern int machine_check_440A(struct pt_regs *regs);
extern int machine_check_e500(struct pt_regs *regs); extern int machine_check_e500(struct pt_regs *regs);
extern int machine_check_e200(struct pt_regs *regs); extern int machine_check_e200(struct pt_regs *regs);
extern int machine_check_47x(struct pt_regs *regs);
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */ /* NOTE WELL: Update identify_cpu() if fields are added or removed! */
struct cpu_spec { struct cpu_spec {
@ -365,6 +366,7 @@ extern const char *powerpc_base_platform;
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) #define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ #define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
CPU_FTR_INDEXED_DCR) CPU_FTR_INDEXED_DCR)
#define CPU_FTRS_47X (CPU_FTRS_440x6)
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ #define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@ -453,6 +455,9 @@ enum {
#ifdef CONFIG_44x #ifdef CONFIG_44x
CPU_FTRS_44X | CPU_FTRS_440x6 | CPU_FTRS_44X | CPU_FTRS_440x6 |
#endif #endif
#ifdef CONFIG_PPC_47x
CPU_FTRS_47X |
#endif
#ifdef CONFIG_E200 #ifdef CONFIG_E200
CPU_FTRS_E200 | CPU_FTRS_E200 |
#endif #endif

View file

@ -228,6 +228,7 @@
#define H_JOIN 0x298 #define H_JOIN 0x298
#define H_VASI_STATE 0x2A4 #define H_VASI_STATE 0x2A4
#define H_ENABLE_CRQ 0x2B0 #define H_ENABLE_CRQ 0x2B0
#define H_GET_EM_PARMS 0x2B8
#define H_SET_MPP 0x2D0 #define H_SET_MPP 0x2D0
#define H_GET_MPP 0x2D4 #define H_GET_MPP 0x2D4
#define MAX_HCALL_OPCODE H_GET_MPP #define MAX_HCALL_OPCODE H_GET_MPP
@ -281,6 +282,7 @@ long plpar_hcall_raw(unsigned long opcode, unsigned long *retbuf, ...);
*/ */
#define PLPAR_HCALL9_BUFSIZE 9 #define PLPAR_HCALL9_BUFSIZE 9
long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...); long plpar_hcall9(unsigned long opcode, unsigned long *retbuf, ...);
long plpar_hcall9_raw(unsigned long opcode, unsigned long *retbuf, ...);
/* For hcall instrumentation. One structure per-hcall, per-CPU */ /* For hcall instrumentation. One structure per-hcall, per-CPU */
struct hcall_stats { struct hcall_stats {

View file

@ -31,6 +31,10 @@
#define KEXEC_ARCH KEXEC_ARCH_PPC #define KEXEC_ARCH KEXEC_ARCH_PPC
#endif #endif
#define KEXEC_STATE_NONE 0
#define KEXEC_STATE_IRQS_OFF 1
#define KEXEC_STATE_REAL_MODE 2
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/reg.h> #include <asm/reg.h>

View file

@ -40,7 +40,7 @@
#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ #define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ #define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ #define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */ #define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
#define PPC44x_TLB_PERM_MASK 0x0000003f #define PPC44x_TLB_PERM_MASK 0x0000003f
#define PPC44x_TLB_UX 0x00000020 /* User execution */ #define PPC44x_TLB_UX 0x00000020 /* User execution */
@ -53,6 +53,52 @@
/* Number of TLB entries */ /* Number of TLB entries */
#define PPC44x_TLB_SIZE 64 #define PPC44x_TLB_SIZE 64
/* 47x bits */
#define PPC47x_MMUCR_TID 0x0000ffff
#define PPC47x_MMUCR_STS 0x00010000
/* Page identification fields */
#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
#define PPC47x_TLB0_4K 0x00000000
#define PPC47x_TLB0_16K 0x00000010
#define PPC47x_TLB0_64K 0x00000030
#define PPC47x_TLB0_1M 0x00000070
#define PPC47x_TLB0_16M 0x000000f0
#define PPC47x_TLB0_256M 0x000001f0
#define PPC47x_TLB0_1G 0x000003f0
#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
/* Translation fields */
#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
#define PPC47x_TLB1_ERPN_MASK 0x000003ff
/* Storage attribute and access control fields */
#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
#define PPC47x_TLB2_PERM_MASK 0x0000003f
#define PPC47x_TLB2_UX 0x00000020 /* User execution */
#define PPC47x_TLB2_UW 0x00000010 /* User write */
#define PPC47x_TLB2_UR 0x00000008 /* User read */
#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
#define PPC47x_TLB2_SW 0x00000002 /* Super write */
#define PPC47x_TLB2_SR 0x00000001 /* Super read */
#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern unsigned int tlb_44x_hwater; extern unsigned int tlb_44x_hwater;
@ -79,12 +125,15 @@ typedef struct {
#if (PAGE_SHIFT == 12) #if (PAGE_SHIFT == 12)
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K #define PPC44x_TLBE_SIZE PPC44x_TLB_4K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
#define mmu_virtual_psize MMU_PAGE_4K #define mmu_virtual_psize MMU_PAGE_4K
#elif (PAGE_SHIFT == 14) #elif (PAGE_SHIFT == 14)
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K #define PPC44x_TLBE_SIZE PPC44x_TLB_16K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
#define mmu_virtual_psize MMU_PAGE_16K #define mmu_virtual_psize MMU_PAGE_16K
#elif (PAGE_SHIFT == 16) #elif (PAGE_SHIFT == 16)
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K #define PPC44x_TLBE_SIZE PPC44x_TLB_64K
#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
#define mmu_virtual_psize MMU_PAGE_64K #define mmu_virtual_psize MMU_PAGE_64K
#elif (PAGE_SHIFT == 18) #elif (PAGE_SHIFT == 18)
#define PPC44x_TLBE_SIZE PPC44x_TLB_256K #define PPC44x_TLBE_SIZE PPC44x_TLB_256K

View file

@ -18,6 +18,7 @@
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) #define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) #define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) #define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
/* /*
* This is individual features * This is individual features

View file

@ -30,7 +30,7 @@ extern struct pglist_data *node_data[];
*/ */
extern int numa_cpu_lookup_table[]; extern int numa_cpu_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[]; extern cpumask_var_t node_to_cpumask_map[];
#ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_MEMORY_HOTPLUG
extern unsigned long max_pfn; extern unsigned long max_pfn;
#endif #endif

View file

@ -463,9 +463,6 @@ extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */ /* Request IPIs on primary mpic */
extern void mpic_request_ipis(void); extern void mpic_request_ipis(void);
/* Send an IPI (non offseted number 0..3) */
extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask);
/* Send a message (IPI) to a given target (cpu number or MSG_*) */ /* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg); void smp_mpic_message_pass(int target, int msg);

View file

@ -82,6 +82,7 @@ struct paca_struct {
s16 hw_cpu_id; /* Physical processor number */ s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */ u8 cpu_start; /* At startup, processor spins until */
/* this becomes non-zero. */ /* this becomes non-zero. */
u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
struct slb_shadow *slb_shadow_ptr; struct slb_shadow *slb_shadow_ptr;

View file

@ -19,6 +19,8 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
u32 io1, io2; u32 io1, io2;
int propsize; int propsize;
int count = 0; int count = 0;
int virq;
for (np = NULL; (np = of_find_compatible_node(np, for (np = NULL; (np = of_find_compatible_node(np,
"parallel", "parallel",
"pnpPNP,400")) != NULL;) { "pnpPNP,400")) != NULL;) {
@ -26,10 +28,13 @@ static int __devinit parport_pc_find_nonpci_ports (int autoirq, int autodma)
if (!prop || propsize > 6*sizeof(u32)) if (!prop || propsize > 6*sizeof(u32))
continue; continue;
io1 = prop[1]; io2 = prop[2]; io1 = prop[1]; io2 = prop[2];
prop = of_get_property(np, "interrupts", NULL);
if (!prop) virq = irq_of_parse_and_map(np, 0);
if (virq == NO_IRQ)
continue; continue;
if (parport_pc_probe_port(io1, io2, prop[0], autodma, NULL, 0) != NULL)
if (parport_pc_probe_port(io1, io2, virq, autodma, NULL, 0)
!= NULL)
count++; count++;
} }
return count; return count;

View file

@ -11,6 +11,12 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
struct vmemmap_backing {
struct vmemmap_backing *list;
unsigned long phys;
unsigned long virt_addr;
};
/* /*
* Functions that deal with pagetables that could be at any level of * Functions that deal with pagetables that could be at any level of
* the table need to be passed an "index_size" so they know how to * the table need to be passed an "index_size" so they know how to

View file

@ -287,7 +287,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) \ #define pmd_page(pmd) \
(mem_map + (pmd_val(pmd) >> PAGE_SHIFT)) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#else #else
#define pmd_page_vaddr(pmd) \ #define pmd_page_vaddr(pmd) \
((unsigned long) (pmd_val(pmd) & PAGE_MASK)) ((unsigned long) (pmd_val(pmd) & PAGE_MASK))

View file

@ -89,6 +89,7 @@ struct pt_regs {
#define instruction_pointer(regs) ((regs)->nip) #define instruction_pointer(regs) ((regs)->nip)
#define user_stack_pointer(regs) ((regs)->gpr[1]) #define user_stack_pointer(regs) ((regs)->gpr[1])
#define kernel_stack_pointer(regs) ((regs)->gpr[1])
#define regs_return_value(regs) ((regs)->gpr[3]) #define regs_return_value(regs) ((regs)->gpr[3])
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -141,6 +142,69 @@ do { \
#define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601)) #define arch_has_block_step() (!cpu_has_feature(CPU_FTR_601))
#define ARCH_HAS_USER_SINGLE_STEP_INFO #define ARCH_HAS_USER_SINGLE_STEP_INFO
/*
* kprobe-based event tracer support
*/
#include <linux/stddef.h>
#include <linux/thread_info.h>
extern int regs_query_register_offset(const char *name);
extern const char *regs_query_register_name(unsigned int offset);
#define MAX_REG_OFFSET (offsetof(struct pt_regs, dsisr))
/**
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
* @offset: offset number of the register.
*
* regs_get_register returns the value of a register whose offset from @regs.
* The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static inline bool regs_within_kernel_stack(struct pt_regs *regs,
unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View file

@ -817,6 +817,7 @@
#define PVR_403GC 0x00200200 #define PVR_403GC 0x00200200
#define PVR_403GCX 0x00201400 #define PVR_403GCX 0x00201400
#define PVR_405GP 0x40110000 #define PVR_405GP 0x40110000
#define PVR_476 0x11a52000
#define PVR_STB03XXX 0x40310000 #define PVR_STB03XXX 0x40310000
#define PVR_NP405H 0x41410000 #define PVR_NP405H 0x41410000
#define PVR_NP405L 0x41610000 #define PVR_NP405L 0x41610000
@ -853,6 +854,9 @@
#define PVR_8245 0x80811014 #define PVR_8245 0x80811014
#define PVR_8260 PVR_8240 #define PVR_8260 PVR_8240
/* 476 Simulator seems to currently have the PVR of the 602... */
#define PVR_476_ISS 0x00052000
/* 64-bit processors */ /* 64-bit processors */
/* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */ /* XXX the prefix should be PVR_, we'll do a global sweep to fix it one day */
#define PV_NORTHSTAR 0x0033 #define PV_NORTHSTAR 0x0033

View file

@ -191,6 +191,10 @@
#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */ #define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */ #define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
#ifdef CONFIG_E500 #ifdef CONFIG_E500
#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ #define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ #define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@ -604,5 +608,25 @@
#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ #define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ #define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
#endif /* 403GCX */ #endif /* 403GCX */
/* Some 476 specific registers */
#define SPRN_SSPCR 830
#define SPRN_USPCR 831
#define SPRN_ISPCR 829
#define SPRN_MMUBE0 820
#define MMUBE0_IBE0_SHIFT 24
#define MMUBE0_IBE1_SHIFT 16
#define MMUBE0_IBE2_SHIFT 8
#define MMUBE0_VBE0 0x00000004
#define MMUBE0_VBE1 0x00000002
#define MMUBE0_VBE2 0x00000001
#define SPRN_MMUBE1 821
#define MMUBE1_IBE3_SHIFT 24
#define MMUBE1_IBE4_SHIFT 16
#define MMUBE1_IBE5_SHIFT 8
#define MMUBE1_VBE3 0x00000004
#define MMUBE1_VBE4 0x00000002
#define MMUBE1_VBE5 0x00000001
#endif /* __ASM_POWERPC_REG_BOOKE_H__ */ #endif /* __ASM_POWERPC_REG_BOOKE_H__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View file

@ -40,7 +40,7 @@ extern void smp_message_recv(int);
DECLARE_PER_CPU(unsigned int, cpu_pvr); DECLARE_PER_CPU(unsigned int, cpu_pvr);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
extern void fixup_irqs(cpumask_t map); extern void fixup_irqs(const struct cpumask *map);
int generic_cpu_disable(void); int generic_cpu_disable(void);
int generic_cpu_enable(unsigned int cpu); int generic_cpu_enable(unsigned int cpu);
void generic_cpu_die(unsigned int cpu); void generic_cpu_die(unsigned int cpu);
@ -68,8 +68,19 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
} }
#endif #endif
DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_t, cpu_core_map); DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
return per_cpu(cpu_sibling_map, cpu);
}
static inline struct cpumask *cpu_core_mask(int cpu)
{
return per_cpu(cpu_core_map, cpu);
}
extern int cpu_to_core_id(int cpu); extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@ -93,7 +104,6 @@ void smp_init_pSeries(void);
void smp_init_cell(void); void smp_init_cell(void);
void smp_init_celleb(void); void smp_init_celleb(void);
void smp_setup_cpu_maps(void); void smp_setup_cpu_maps(void);
void smp_setup_cpu_sibling_map(void);
extern int __cpu_disable(void); extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu); extern void __cpu_die(unsigned int cpu);

View file

@ -8,6 +8,26 @@ struct device_node;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
/*
* Before going off node we want the VM to try and reclaim from the local
* node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
* With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
* 20, we never reclaim and go off node straight away.
*
* To fix this we choose a smaller value of RECLAIM_DISTANCE.
*/
#define RECLAIM_DISTANCE 10
/*
* Before going off node we want the VM to try and reclaim from the local
* node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
* With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
* 20, we never reclaim and go off node straight away.
*
* To fix this we choose a smaller value of RECLAIM_DISTANCE.
*/
#define RECLAIM_DISTANCE 10
#include <asm/mmzone.h> #include <asm/mmzone.h>
static inline int cpu_to_node(int cpu) static inline int cpu_to_node(int cpu)
@ -19,7 +39,7 @@ static inline int cpu_to_node(int cpu)
#define cpumask_of_node(node) ((node) == -1 ? \ #define cpumask_of_node(node) ((node) == -1 ? \
cpu_all_mask : \ cpu_all_mask : \
&numa_cpumask_lookup_table[node]) node_to_cpumask_map[node])
int of_node_to_nid(struct device_node *device); int of_node_to_nid(struct device_node *device);
@ -102,8 +122,8 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/smp.h> #include <asm/smp.h>
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu)) #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_core_id(cpu) (cpu_to_core_id(cpu)) #define topology_core_id(cpu) (cpu_to_core_id(cpu))
#endif #endif
#endif #endif

View file

@ -183,6 +183,7 @@ int main(void)
#endif /* CONFIG_PPC_STD_MMU_64 */ #endif /* CONFIG_PPC_STD_MMU_64 */
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state));
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
@ -447,6 +448,14 @@ int main(void)
DEFINE(PGD_T_LOG2, PGD_T_LOG2); DEFINE(PGD_T_LOG2, PGD_T_LOG2);
DEFINE(PTE_T_LOG2, PTE_T_LOG2); DEFINE(PTE_T_LOG2, PTE_T_LOG2);
#endif #endif
#ifdef CONFIG_FSL_BOOKE
DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam));
DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0));
DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1));
DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2));
DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
#endif
#ifdef CONFIG_KVM_EXIT_TIMING #ifdef CONFIG_KVM_EXIT_TIMING
DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,

View file

@ -1701,6 +1701,35 @@ static struct cpu_spec __initdata cpu_specs[] = {
.machine_check = machine_check_440A, .machine_check = machine_check_440A,
.platform = "ppc440", .platform = "ppc440",
}, },
{ /* 476 core */
.pvr_mask = 0xffff0000,
.pvr_value = 0x11a50000,
.cpu_name = "476",
.cpu_features = CPU_FTRS_47X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.mmu_features = MMU_FTR_TYPE_47x |
MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 32,
.dcache_bsize = 128,
.machine_check = machine_check_47x,
.platform = "ppc470",
},
{ /* 476 iss */
.pvr_mask = 0xffff0000,
.pvr_value = 0x00050000,
.cpu_name = "476",
.cpu_features = CPU_FTRS_47X,
.cpu_user_features = COMMON_USER_BOOKE |
PPC_FEATURE_HAS_FPU,
.cpu_user_features = COMMON_USER_BOOKE,
.mmu_features = MMU_FTR_TYPE_47x |
MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
.icache_bsize = 32,
.dcache_bsize = 128,
.machine_check = machine_check_47x,
.platform = "ppc470",
},
{ /* default match */ { /* default match */
.pvr_mask = 0x00000000, .pvr_mask = 0x00000000,
.pvr_value = 0x00000000, .pvr_value = 0x00000000,

View file

@ -162,6 +162,32 @@ static void crash_kexec_prepare_cpus(int cpu)
/* Leave the IPI callback set */ /* Leave the IPI callback set */
} }
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
static void crash_kexec_wait_realmode(int cpu)
{
unsigned int msecs;
int i;
msecs = 10000;
for (i=0; i < NR_CPUS && msecs > 0; i++) {
if (i == cpu)
continue;
while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
barrier();
if (!cpu_possible(i)) {
break;
}
if (!cpu_online(i)) {
break;
}
msecs--;
mdelay(1);
}
}
mb();
}
/* /*
* This function will be called by secondary cpus or by kexec cpu * This function will be called by secondary cpus or by kexec cpu
* if soft-reset is activated to stop some CPUs. * if soft-reset is activated to stop some CPUs.
@ -347,9 +373,11 @@ int crash_shutdown_unregister(crash_shutdown_t handler)
EXPORT_SYMBOL(crash_shutdown_unregister); EXPORT_SYMBOL(crash_shutdown_unregister);
static unsigned long crash_shutdown_buf[JMP_BUF_LEN]; static unsigned long crash_shutdown_buf[JMP_BUF_LEN];
static int crash_shutdown_cpu = -1;
static int handle_fault(struct pt_regs *regs) static int handle_fault(struct pt_regs *regs)
{ {
if (crash_shutdown_cpu == smp_processor_id())
longjmp(crash_shutdown_buf, 1); longjmp(crash_shutdown_buf, 1);
return 0; return 0;
} }
@ -375,11 +403,14 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
for_each_irq(i) { for_each_irq(i) {
struct irq_desc *desc = irq_to_desc(i); struct irq_desc *desc = irq_to_desc(i);
if (!desc || !desc->chip || !desc->chip->eoi)
continue;
if (desc->status & IRQ_INPROGRESS) if (desc->status & IRQ_INPROGRESS)
desc->chip->eoi(i); desc->chip->eoi(i);
if (!(desc->status & IRQ_DISABLED)) if (!(desc->status & IRQ_DISABLED))
desc->chip->disable(i); desc->chip->shutdown(i);
} }
/* /*
@ -388,6 +419,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
*/ */
old_handler = __debugger_fault_handler; old_handler = __debugger_fault_handler;
__debugger_fault_handler = handle_fault; __debugger_fault_handler = handle_fault;
crash_shutdown_cpu = smp_processor_id();
for (i = 0; crash_shutdown_handles[i]; i++) { for (i = 0; crash_shutdown_handles[i]; i++) {
if (setjmp(crash_shutdown_buf) == 0) { if (setjmp(crash_shutdown_buf) == 0) {
/* /*
@ -401,6 +433,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
asm volatile("sync; isync"); asm volatile("sync; isync");
} }
} }
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler; __debugger_fault_handler = old_handler;
/* /*
@ -412,6 +445,7 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_kexec_prepare_cpus(crashing_cpu); crash_kexec_prepare_cpus(crashing_cpu);
cpu_set(crashing_cpu, cpus_in_crash); cpu_set(crashing_cpu, cpus_in_crash);
crash_kexec_stop_spus(); crash_kexec_stop_spus();
crash_kexec_wait_realmode(crashing_cpu);
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0); ppc_md.kexec_cpu_down(1, 0);
} }

View file

@ -373,11 +373,13 @@ syscall_exit_cont:
bnel- load_dbcr0 bnel- load_dbcr0
#endif #endif
#ifdef CONFIG_44x #ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
lis r4,icache_44x_need_flush@ha lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4) lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0 cmplwi cr0,r5,0
bne- 2f bne- 2f
1: 1:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
#endif /* CONFIG_44x */ #endif /* CONFIG_44x */
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
lwarx r7,0,r1 lwarx r7,0,r1
@ -848,6 +850,9 @@ resume_kernel:
/* interrupts are hard-disabled at this point */ /* interrupts are hard-disabled at this point */
restore: restore:
#ifdef CONFIG_44x #ifdef CONFIG_44x
BEGIN_MMU_FTR_SECTION
b 1f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
lis r4,icache_44x_need_flush@ha lis r4,icache_44x_need_flush@ha
lwz r5,icache_44x_need_flush@l(r4) lwz r5,icache_44x_need_flush@l(r4)
cmplwi cr0,r5,0 cmplwi cr0,r5,0

View file

@ -735,8 +735,11 @@ _STATIC(do_hash_page)
std r3,_DAR(r1) std r3,_DAR(r1)
std r4,_DSISR(r1) std r4,_DSISR(r1)
andis. r0,r4,0xa450 /* weird error? */ andis. r0,r4,0xa410 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */ bne- handle_page_fault /* if not, try to insert a HPTE */
andis. r0,r4,DSISR_DABRMATCH@h
bne- handle_dabr_fault
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
andis. r0,r4,0x0020 /* Is it a segment table fault? */ andis. r0,r4,0x0020 /* Is it a segment table fault? */
bne- do_ste_alloc /* If so handle it */ bne- do_ste_alloc /* If so handle it */
@ -823,6 +826,14 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
bl .raw_local_irq_restore bl .raw_local_irq_restore
b 11f b 11f
/* We have a data breakpoint exception - handle it */
handle_dabr_fault:
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
bl .do_dabr
b .ret_from_except_lite
/* Here we have a page fault that hash_page can't handle. */ /* Here we have a page fault that hash_page can't handle. */
handle_page_fault: handle_page_fault:
ENABLE_INTS ENABLE_INTS

File diff suppressed because it is too large Load diff

View file

@ -71,9 +71,6 @@ _ENTRY(_start);
* in the first level table, but that would require many changes to the * in the first level table, but that would require many changes to the
* Linux page directory/table functions that I don't want to do right now. * Linux page directory/table functions that I don't want to do right now.
* *
* I used to use SPRG2 for a temporary register in the TLB handler, but it
* has since been put to other uses. I now use a hack to save a register
* and the CCR at memory location 0.....Someday I'll fix this.....
* -- Dan * -- Dan
*/ */
.globl __start .globl __start
@ -302,8 +299,13 @@ InstructionTLBMiss:
DO_8xx_CPU6(0x3f80, r3) DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10 mfcr r10
#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0) stw r10, 0(r0)
stw r11, 4(r0) stw r11, 4(r0)
#else
mtspr SPRN_DAR, r10
mtspr SPRN_SPRG2, r11
#endif
mfspr r10, SPRN_SRR0 /* Get effective address of fault */ mfspr r10, SPRN_SRR0 /* Get effective address of fault */
#ifdef CONFIG_8xx_CPU15 #ifdef CONFIG_8xx_CPU15
addi r11, r10, 0x1000 addi r11, r10, 0x1000
@ -318,12 +320,16 @@ InstructionTLBMiss:
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
* kernel page tables. * kernel page tables.
*/ */
#ifdef CONFIG_MODULES
/* Only modules will cause ITLB Misses as we always
* pin the first 8MB of kernel memory */
andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ andi. r11, r10, 0x0800 /* Address >= 0x80000000 */
beq 3f beq 3f
lis r11, swapper_pg_dir@h lis r11, swapper_pg_dir@h
ori r11, r11, swapper_pg_dir@l ori r11, r11, swapper_pg_dir@l
rlwimi r10, r11, 0, 2, 19 rlwimi r10, r11, 0, 2, 19
3: 3:
#endif
lwz r11, 0(r10) /* Get the level 1 entry */ lwz r11, 0(r10) /* Get the level 1 entry */
rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
beq 2f /* If zero, don't try to find a pte */ beq 2f /* If zero, don't try to find a pte */
@ -339,31 +345,35 @@ InstructionTLBMiss:
mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
lwz r10, 0(r11) /* Get the pte */ lwz r10, 0(r11) /* Get the pte */
#ifdef CONFIG_SWAP
andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
bne- cr0, 2f bne- cr0, 2f
#endif
/* Clear PP lsb, 0x400 */
rlwinm r10, r10, 0, 22, 20
/* The Linux PTE won't go exactly into the MMU TLB. /* The Linux PTE won't go exactly into the MMU TLB.
* Software indicator bits 22 and 28 must be clear. * Software indicator bits 21 and 28 must be clear.
* Software indicator bits 24, 25, 26, and 27 must be * Software indicator bits 24, 25, 26, and 27 must be
* set. All other Linux PTE bits control the behavior * set. All other Linux PTE bits control the behavior
* of the MMU. * of the MMU.
*/ */
li r11, 0x00f0 li r11, 0x00f0
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
DO_8xx_CPU6(0x2d80, r3) DO_8xx_CPU6(0x2d80, r3)
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */ mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
mfspr r10, SPRN_M_TW /* Restore registers */ /* Restore registers */
#ifndef CONFIG_8xx_CPU6
mfspr r10, SPRN_DAR
mtcr r10
mtspr SPRN_DAR, r11 /* Tag DAR */
mfspr r11, SPRN_SPRG2
#else
lwz r11, 0(r0) lwz r11, 0(r0)
mtcr r11 mtcr r11
lwz r11, 4(r0) lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0) lwz r3, 8(r0)
#endif #endif
mfspr r10, SPRN_M_TW
rfi rfi
2: 2:
mfspr r11, SPRN_SRR1 mfspr r11, SPRN_SRR1
@ -373,13 +383,20 @@ InstructionTLBMiss:
rlwinm r11, r11, 0, 0xffff rlwinm r11, r11, 0, 0xffff
mtspr SPRN_SRR1, r11 mtspr SPRN_SRR1, r11
mfspr r10, SPRN_M_TW /* Restore registers */ /* Restore registers */
#ifndef CONFIG_8xx_CPU6
mfspr r10, SPRN_DAR
mtcr r10
li r11, 0x00f0
mtspr SPRN_DAR, r11 /* Tag DAR */
mfspr r11, SPRN_SPRG2
#else
lwz r11, 0(r0) lwz r11, 0(r0)
mtcr r11 mtcr r11
lwz r11, 4(r0) lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0) lwz r3, 8(r0)
#endif #endif
mfspr r10, SPRN_M_TW
b InstructionAccess b InstructionAccess
. = 0x1200 . = 0x1200
@ -390,8 +407,13 @@ DataStoreTLBMiss:
DO_8xx_CPU6(0x3f80, r3) DO_8xx_CPU6(0x3f80, r3)
mtspr SPRN_M_TW, r10 /* Save a couple of working registers */ mtspr SPRN_M_TW, r10 /* Save a couple of working registers */
mfcr r10 mfcr r10
#ifdef CONFIG_8xx_CPU6
stw r10, 0(r0) stw r10, 0(r0)
stw r11, 4(r0) stw r11, 4(r0)
#else
mtspr SPRN_DAR, r10
mtspr SPRN_SPRG2, r11
#endif
mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
/* If we are faulting a kernel address, we have to use the /* If we are faulting a kernel address, we have to use the
@ -438,15 +460,14 @@ DataStoreTLBMiss:
* r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
* r10 = (r10 & ~PRESENT) | r11; * r10 = (r10 & ~PRESENT) | r11;
*/ */
#ifdef CONFIG_SWAP
rlwinm r11, r10, 32-5, _PAGE_PRESENT rlwinm r11, r10, 32-5, _PAGE_PRESENT
and r11, r11, r10 and r11, r11, r10
rlwimi r10, r11, 0, _PAGE_PRESENT rlwimi r10, r11, 0, _PAGE_PRESENT
#endif
/* Honour kernel RO, User NA */ /* Honour kernel RO, User NA */
/* 0x200 == Extended encoding, bit 22 */ /* 0x200 == Extended encoding, bit 22 */
/* r11 = (r10 & _PAGE_USER) >> 2 */ rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
rlwinm r11, r10, 32-2, 0x200
or r10, r11, r10
/* r11 = (r10 & _PAGE_RW) >> 1 */ /* r11 = (r10 & _PAGE_RW) >> 1 */
rlwinm r11, r10, 32-1, 0x200 rlwinm r11, r10, 32-1, 0x200
or r10, r11, r10 or r10, r11, r10
@ -460,18 +481,24 @@ DataStoreTLBMiss:
* of the MMU. * of the MMU.
*/ */
2: li r11, 0x00f0 2: li r11, 0x00f0
mtspr SPRN_DAR,r11 /* Tag DAR */
rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
DO_8xx_CPU6(0x3d80, r3) DO_8xx_CPU6(0x3d80, r3)
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */ mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
mfspr r10, SPRN_M_TW /* Restore registers */ /* Restore registers */
#ifndef CONFIG_8xx_CPU6
mfspr r10, SPRN_DAR
mtcr r10
mtspr SPRN_DAR, r11 /* Tag DAR */
mfspr r11, SPRN_SPRG2
#else
mtspr SPRN_DAR, r11 /* Tag DAR */
lwz r11, 0(r0) lwz r11, 0(r0)
mtcr r11 mtcr r11
lwz r11, 4(r0) lwz r11, 4(r0)
#ifdef CONFIG_8xx_CPU6
lwz r3, 8(r0) lwz r3, 8(r0)
#endif #endif
mfspr r10, SPRN_M_TW
rfi rfi
/* This is an instruction TLB error on the MPC8xx. This could be due /* This is an instruction TLB error on the MPC8xx. This could be due
@ -683,9 +710,6 @@ start_here:
tophys(r4,r2) tophys(r4,r2)
addi r4,r4,THREAD /* init task's THREAD */ addi r4,r4,THREAD /* init task's THREAD */
mtspr SPRN_SPRG_THREAD,r4 mtspr SPRN_SPRG_THREAD,r4
li r3,0
/* XXX What is that for ? SPRG2 appears otherwise unused on 8xx */
mtspr SPRN_SPRG2,r3 /* 0 => r1 has kernel sp */
/* stack */ /* stack */
lis r1,init_thread_union@ha lis r1,init_thread_union@ha

View file

@ -1,6 +1,7 @@
#ifndef __HEAD_BOOKE_H__ #ifndef __HEAD_BOOKE_H__
#define __HEAD_BOOKE_H__ #define __HEAD_BOOKE_H__
#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
/* /*
* Macros used for common Book-e exception handling * Macros used for common Book-e exception handling
*/ */
@ -48,6 +49,9 @@
stw r10,0(r11); \ stw r10,0(r11); \
rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\ rlwinm r9,r9,0,14,12; /* clear MSR_WE (necessary?) */\
stw r0,GPR0(r11); \ stw r0,GPR0(r11); \
lis r10, STACK_FRAME_REGS_MARKER@ha;/* exception frame marker */ \
addi r10, r10, STACK_FRAME_REGS_MARKER@l; \
stw r10, 8(r11); \
SAVE_4GPRS(3, r11); \ SAVE_4GPRS(3, r11); \
SAVE_2GPRS(7, r11) SAVE_2GPRS(7, r11)

View file

@ -639,6 +639,13 @@ interrupt_base:
rlwinm r12,r12,0,16,1 rlwinm r12,r12,0,16,1
mtspr SPRN_MAS1,r12 mtspr SPRN_MAS1,r12
/* Make up the required permissions for kernel code */
#ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT | _PAGE_BAP_SX
oris r13,r13,_PAGE_ACCESSED@h
#else
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif
b 4f b 4f
/* Get the PGD for the current thread */ /* Get the PGD for the current thread */
@ -646,15 +653,15 @@ interrupt_base:
mfspr r11,SPRN_SPRG_THREAD mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11) lwz r11,PGDIR(r11)
4: /* Make up the required permissions for user code */
/* Make up the required permissions */
#ifdef CONFIG_PTE_64BIT #ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT | _PAGE_EXEC li r13,_PAGE_PRESENT | _PAGE_BAP_UX
oris r13,r13,_PAGE_ACCESSED@h oris r13,r13,_PAGE_ACCESSED@h
#else #else
li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
#endif #endif
4:
FIND_PTE FIND_PTE
andc. r13,r13,r11 /* Check permission */ andc. r13,r13,r11 /* Check permission */

View file

@ -43,20 +43,9 @@
#define DBG(...) #define DBG(...)
static int novmerge; static int novmerge;
static int protect4gb = 1;
static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int); static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
static int __init setup_protect4gb(char *str)
{
if (strcmp(str, "on") == 0)
protect4gb = 1;
else if (strcmp(str, "off") == 0)
protect4gb = 0;
return 1;
}
static int __init setup_iommu(char *str) static int __init setup_iommu(char *str)
{ {
if (!strcmp(str, "novmerge")) if (!strcmp(str, "novmerge"))
@ -66,7 +55,6 @@ static int __init setup_iommu(char *str)
return 1; return 1;
} }
__setup("protect4gb=", setup_protect4gb);
__setup("iommu=", setup_iommu); __setup("iommu=", setup_iommu);
static unsigned long iommu_range_alloc(struct device *dev, static unsigned long iommu_range_alloc(struct device *dev,

View file

@ -284,30 +284,33 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map) void fixup_irqs(const struct cpumask *map)
{ {
struct irq_desc *desc; struct irq_desc *desc;
unsigned int irq; unsigned int irq;
static int warned; static int warned;
cpumask_var_t mask;
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) { for_each_irq(irq) {
cpumask_t mask;
desc = irq_to_desc(irq); desc = irq_to_desc(irq);
if (desc && desc->status & IRQ_PER_CPU) if (desc && desc->status & IRQ_PER_CPU)
continue; continue;
cpumask_and(&mask, desc->affinity, &map); cpumask_and(mask, desc->affinity, map);
if (any_online_cpu(mask) == NR_CPUS) { if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq); printk("Breaking affinity for irq %i\n", irq);
mask = map; cpumask_copy(mask, map);
} }
if (desc->chip->set_affinity) if (desc->chip->set_affinity)
desc->chip->set_affinity(irq, &mask); desc->chip->set_affinity(irq, mask);
else if (desc->action && !(warned++)) else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq); printk("Cannot set affinity for irq %i\n", irq);
} }
free_cpumask_var(mask);
local_irq_enable(); local_irq_enable();
mdelay(1); mdelay(1);
local_irq_disable(); local_irq_disable();

View file

@ -114,6 +114,9 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
regs->msr &= ~MSR_CE; regs->msr &= ~MSR_CE;
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM); mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
#ifdef CONFIG_PPC_47x
isync();
#endif
#endif #endif
/* /*

View file

@ -38,7 +38,7 @@
#include <asm/vio.h> #include <asm/vio.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#define MODULE_VERS "1.8" #define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg" #define MODULE_NAME "lparcfg"
/* #define LPARCFG_DEBUG */ /* #define LPARCFG_DEBUG */
@ -487,6 +487,14 @@ static void splpar_dispatch_data(struct seq_file *m)
seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions); seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
} }
static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
static int pseries_lparcfg_data(struct seq_file *m, void *v) static int pseries_lparcfg_data(struct seq_file *m, void *v)
{ {
int partition_potential_processors; int partition_potential_processors;
@ -541,6 +549,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "slb_size=%d\n", mmu_slb_size); seq_printf(m, "slb_size=%d\n", mmu_slb_size);
parse_em_data(m);
return 0; return 0;
} }

View file

@ -155,33 +155,38 @@ void kexec_copy_flush(struct kimage *image)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* FIXME: we should schedule this function to be called on all cpus based static int kexec_all_irq_disabled = 0;
* on calling the interrupts, but we would like to call it off irq level
* so that the interrupt controller is clean.
*/
static void kexec_smp_down(void *arg) static void kexec_smp_down(void *arg)
{ {
local_irq_disable();
mb(); /* make sure our irqs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
while(kexec_all_irq_disabled == 0)
cpu_relax();
mb(); /* make sure all irqs are disabled before this */
/*
* Now every CPU has IRQs off, we can clear out any pending
* IPIs and be sure that no more will come in after this.
*/
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1); ppc_md.kexec_cpu_down(0, 1);
local_irq_disable();
kexec_smp_wait(); kexec_smp_wait();
/* NOTREACHED */ /* NOTREACHED */
} }
static void kexec_prepare_cpus(void) static void kexec_prepare_cpus_wait(int wait_state)
{ {
int my_cpu, i, notified=-1; int my_cpu, i, notified=-1;
smp_call_function(kexec_smp_down, NULL, /* wait */0);
my_cpu = get_cpu(); my_cpu = get_cpu();
/* Make sure each CPU has atleast made it to the state we need */
/* check the others cpus are now down (via paca hw cpu id == -1) */
for (i=0; i < NR_CPUS; i++) { for (i=0; i < NR_CPUS; i++) {
if (i == my_cpu) if (i == my_cpu)
continue; continue;
while (paca[i].hw_cpu_id != -1) { while (paca[i].kexec_state < wait_state) {
barrier(); barrier();
if (!cpu_possible(i)) { if (!cpu_possible(i)) {
printk("kexec: cpu %d hw_cpu_id %d is not" printk("kexec: cpu %d hw_cpu_id %d is not"
@ -201,20 +206,35 @@ static void kexec_prepare_cpus(void)
} }
if (i != notified) { if (i != notified) {
printk( "kexec: waiting for cpu %d (physical" printk( "kexec: waiting for cpu %d (physical"
" %d) to go down\n", " %d) to enter %i state\n",
i, paca[i].hw_cpu_id); i, paca[i].hw_cpu_id, wait_state);
notified = i; notified = i;
} }
} }
} }
mb();
}
static void kexec_prepare_cpus(void)
{
smp_call_function(kexec_smp_down, NULL, /* wait */0);
local_irq_disable();
mb(); /* make sure IRQs are disabled before we say they are */
get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
/* we are sure every CPU has IRQs off at this point */
kexec_all_irq_disabled = 1;
/* after we tell the others to go down */ /* after we tell the others to go down */
if (ppc_md.kexec_cpu_down) if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0); ppc_md.kexec_cpu_down(0, 0);
put_cpu(); /* Before removing MMU mapings make sure all CPUs have entered real mode */
kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
local_irq_disable(); put_cpu();
} }
#else /* ! SMP */ #else /* ! SMP */

View file

@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
addi r3,r3,L1_CACHE_BYTES addi r3,r3,L1_CACHE_BYTES
bdnz 0b bdnz 0b
sync sync
#ifndef CONFIG_44x #ifdef CONFIG_44x
/* We don't flush the icache on 44x. Those have a virtual icache /* We don't flush the icache on 44x. Those have a virtual icache
* and we don't have access to the virtual address here (it's * and we don't have access to the virtual address here (it's
* not the page vaddr but where it's mapped in user space). The * not the page vaddr but where it's mapped in user space). The
@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
* a change in the address space occurs, before returning to * a change in the address space occurs, before returning to
* user space * user space
*/ */
BEGIN_MMU_FTR_SECTION
blr
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
#endif /* CONFIG_44x */
mtctr r4 mtctr r4
1: icbi 0,r6 1: icbi 0,r6
addi r6,r6,L1_CACHE_BYTES addi r6,r6,L1_CACHE_BYTES
bdnz 1b bdnz 1b
sync sync
isync isync
#endif /* CONFIG_44x */
blr blr
#ifndef CONFIG_BOOKE
/* /*
* Flush a particular page from the data cache to RAM, identified * Flush a particular page from the data cache to RAM, identified
* by its physical address. We turn off the MMU so we can just use * by its physical address. We turn off the MMU so we can just use
@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
mtmsr r10 /* restore DR */ mtmsr r10 /* restore DR */
isync isync
blr blr
#endif /* CONFIG_BOOKE */
/* /*
* Clear pages using the dcbz instruction, which doesn't cause any * Clear pages using the dcbz instruction, which doesn't cause any

View file

@ -24,6 +24,7 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/kexec.h>
.text .text
@ -471,6 +472,10 @@ _GLOBAL(kexec_wait)
1: mflr r5 1: mflr r5
addi r5,r5,kexec_flag-1b addi r5,r5,kexec_flag-1b
li r4,KEXEC_STATE_REAL_MODE
stb r4,PACAKEXECSTATE(r13)
SYNC
99: HMT_LOW 99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */ #ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5) lwz r4,0(r5)
@ -494,14 +499,11 @@ kexec_flag:
* note: this is a terminal routine, it does not save lr * note: this is a terminal routine, it does not save lr
* *
* get phys id from paca * get phys id from paca
* set paca id to -1 to say we got here
* switch to real mode * switch to real mode
* join other cpus in kexec_wait(phys_id) * join other cpus in kexec_wait(phys_id)
*/ */
_GLOBAL(kexec_smp_wait) _GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13) lhz r3,PACAHWCPUID(r13)
li r4,-1
sth r4,PACAHWCPUID(r13) /* let others know we left */
bl real_mode bl real_mode
b .kexec_wait b .kexec_wait

View file

@ -18,6 +18,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/iseries/lpar_map.h> #include <asm/iseries/lpar_map.h>
#include <asm/iseries/hv_types.h> #include <asm/iseries/hv_types.h>
#include <asm/kexec.h>
/* This symbol is provided by the linker - let it fill in the paca /* This symbol is provided by the linker - let it fill in the paca
* field correctly */ * field correctly */
@ -97,6 +98,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->kernelbase = (unsigned long) _stext; new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL; new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff; new_paca->hw_cpu_id = 0xffff;
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task; new_paca->__current = &init_task;
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
new_paca->slb_shadow_ptr = &slb_shadow[cpu]; new_paca->slb_shadow_ptr = &slb_shadow[cpu];

View file

@ -310,6 +310,8 @@ static void __devinit __of_scan_bus(struct device_node *node,
/* Scan direct children */ /* Scan direct children */
for_each_child_of_node(node, child) { for_each_child_of_node(node, child) {
pr_debug(" * %s\n", child->full_name); pr_debug(" * %s\n", child->full_name);
if (!of_device_is_available(child))
continue;
reg = of_get_property(child, "reg", &reglen); reg = of_get_property(child, "reg", &reglen);
if (reg == NULL || reglen < 20) if (reg == NULL || reglen < 20)
continue; continue;

View file

@ -371,6 +371,9 @@ int set_dabr(unsigned long dabr)
/* XXX should we have a CPU_FTR_HAS_DABR ? */ /* XXX should we have a CPU_FTR_HAS_DABR ? */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS #ifdef CONFIG_PPC_ADV_DEBUG_REGS
mtspr(SPRN_DAC1, dabr); mtspr(SPRN_DAC1, dabr);
#ifdef CONFIG_PPC_47x
isync();
#endif
#elif defined(CONFIG_PPC_BOOK3S) #elif defined(CONFIG_PPC_BOOK3S)
mtspr(SPRN_DABR, dabr); mtspr(SPRN_DABR, dabr);
#endif #endif

View file

@ -38,6 +38,109 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/system.h> #include <asm/system.h>
/*
* The parameter save area on the stack is used to store arguments being passed
* to callee function and is located at fixed offset from stack pointer.
*/
#ifdef CONFIG_PPC32
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
#else /* CONFIG_PPC32 */
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define STR(s) #s /* convert to string */
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define GPR_OFFSET_NAME(num) \
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
GPR_OFFSET_NAME(0),
GPR_OFFSET_NAME(1),
GPR_OFFSET_NAME(2),
GPR_OFFSET_NAME(3),
GPR_OFFSET_NAME(4),
GPR_OFFSET_NAME(5),
GPR_OFFSET_NAME(6),
GPR_OFFSET_NAME(7),
GPR_OFFSET_NAME(8),
GPR_OFFSET_NAME(9),
GPR_OFFSET_NAME(10),
GPR_OFFSET_NAME(11),
GPR_OFFSET_NAME(12),
GPR_OFFSET_NAME(13),
GPR_OFFSET_NAME(14),
GPR_OFFSET_NAME(15),
GPR_OFFSET_NAME(16),
GPR_OFFSET_NAME(17),
GPR_OFFSET_NAME(18),
GPR_OFFSET_NAME(19),
GPR_OFFSET_NAME(20),
GPR_OFFSET_NAME(21),
GPR_OFFSET_NAME(22),
GPR_OFFSET_NAME(23),
GPR_OFFSET_NAME(24),
GPR_OFFSET_NAME(25),
GPR_OFFSET_NAME(26),
GPR_OFFSET_NAME(27),
GPR_OFFSET_NAME(28),
GPR_OFFSET_NAME(29),
GPR_OFFSET_NAME(30),
GPR_OFFSET_NAME(31),
REG_OFFSET_NAME(nip),
REG_OFFSET_NAME(msr),
REG_OFFSET_NAME(ctr),
REG_OFFSET_NAME(link),
REG_OFFSET_NAME(xer),
REG_OFFSET_NAME(ccr),
#ifdef CONFIG_PPC64
REG_OFFSET_NAME(softe),
#else
REG_OFFSET_NAME(mq),
#endif
REG_OFFSET_NAME(trap),
REG_OFFSET_NAME(dar),
REG_OFFSET_NAME(dsisr),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/* /*
* does not yet catch signals sent when the child dies. * does not yet catch signals sent when the child dies.
* in exit.c or in signal.c. * in exit.c or in signal.c.

View file

@ -691,10 +691,14 @@ void rtas_os_term(char *str)
{ {
int status; int status;
if (panic_timeout) /*
return; * Firmware with the ibm,extended-os-term property is guaranteed
* to always return from an ibm,os-term call. Earlier versions without
if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term")) * this property may terminate the partition which we want to avoid
* since it interferes with panic_timeout.
*/
if (RTAS_UNKNOWN_SERVICE == rtas_token("ibm,os-term") ||
RTAS_UNKNOWN_SERVICE == rtas_token("ibm,extended-os-term"))
return; return;
snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str); snprintf(rtas_os_term_buf, 2048, "OS panic: %s", str);
@ -705,8 +709,7 @@ void rtas_os_term(char *str)
} while (rtas_busy_delay(status)); } while (rtas_busy_delay(status));
if (status != 0) if (status != 0)
printk(KERN_EMERG "ibm,os-term call failed %d\n", printk(KERN_EMERG "ibm,os-term call failed %d\n", status);
status);
} }
static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE; static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;

View file

@ -411,9 +411,9 @@ static void rtas_event_scan(struct work_struct *w)
get_online_cpus(); get_online_cpus();
cpu = next_cpu(smp_processor_id(), cpu_online_map); cpu = cpumask_next(smp_processor_id(), cpu_online_mask);
if (cpu == NR_CPUS) { if (cpu >= nr_cpu_ids) {
cpu = first_cpu(cpu_online_map); cpu = cpumask_first(cpu_online_mask);
if (first_pass) { if (first_pass) {
first_pass = 0; first_pass = 0;
@ -466,8 +466,8 @@ static void start_event_scan(void)
/* Retreive errors from nvram if any */ /* Retreive errors from nvram if any */
retreive_nvram_error_log(); retreive_nvram_error_log();
schedule_delayed_work_on(first_cpu(cpu_online_map), &event_scan_work, schedule_delayed_work_on(cpumask_first(cpu_online_mask),
event_scan_delay); &event_scan_work, event_scan_delay);
} }
static int __init rtas_init(void) static int __init rtas_init(void)
@ -490,6 +490,12 @@ static int __init rtas_init(void)
return -ENODEV; return -ENODEV;
} }
if (!rtas_event_scan_rate) {
/* Broken firmware: take a rate of zero to mean don't scan */
printk(KERN_DEBUG "rtasd: scan rate is 0, not scanning\n");
return 0;
}
/* Make room for the sequence number */ /* Make room for the sequence number */
rtas_error_log_max = rtas_get_error_log_max(); rtas_error_log_max = rtas_get_error_log_max();
rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int); rtas_error_log_buffer_max = rtas_error_log_max + sizeof(int);

View file

@ -161,14 +161,8 @@ extern u32 cpu_temp_both(unsigned long cpu);
DEFINE_PER_CPU(unsigned int, cpu_pvr); DEFINE_PER_CPU(unsigned int, cpu_pvr);
#endif #endif
static int show_cpuinfo(struct seq_file *m, void *v) static void show_cpuinfo_summary(struct seq_file *m)
{ {
unsigned long cpu_id = (unsigned long)v - 1;
unsigned int pvr;
unsigned short maj;
unsigned short min;
if (cpu_id == NR_CPUS) {
struct device_node *root; struct device_node *root;
const char *model = NULL; const char *model = NULL;
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) #if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
@ -197,9 +191,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "Memory\t\t: %d MB\n", seq_printf(m, "Memory\t\t: %d MB\n",
(unsigned int)(total_memory / (1024 * 1024))); (unsigned int)(total_memory / (1024 * 1024)));
#endif #endif
}
return 0; static int show_cpuinfo(struct seq_file *m, void *v)
} {
unsigned long cpu_id = (unsigned long)v - 1;
unsigned int pvr;
unsigned short maj;
unsigned short min;
/* We only show online cpus: disable preempt (overzealous, I /* We only show online cpus: disable preempt (overzealous, I
* knew) to prevent cpu going down. */ * knew) to prevent cpu going down. */
@ -308,19 +307,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#endif #endif
preempt_enable(); preempt_enable();
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
show_cpuinfo_summary(m);
return 0; return 0;
} }
static void *c_start(struct seq_file *m, loff_t *pos) static void *c_start(struct seq_file *m, loff_t *pos)
{ {
unsigned long i = *pos; if (*pos == 0) /* just in case, cpu 0 is not the first */
*pos = cpumask_first(cpu_online_mask);
return i <= NR_CPUS ? (void *)(i + 1) : NULL; else
*pos = cpumask_next(*pos - 1, cpu_online_mask);
if ((*pos) < nr_cpu_ids)
return (void *)(unsigned long)(*pos + 1);
return NULL;
} }
static void *c_next(struct seq_file *m, void *v, loff_t *pos) static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{ {
++*pos; (*pos)++;
return c_start(m, pos); return c_start(m, pos);
} }
@ -386,14 +394,14 @@ static void __init cpu_init_thread_core_maps(int tpc)
/** /**
* setup_cpu_maps - initialize the following cpu maps: * setup_cpu_maps - initialize the following cpu maps:
* cpu_possible_map * cpu_possible_mask
* cpu_present_map * cpu_present_mask
* *
* Having the possible map set up early allows us to restrict allocations * Having the possible map set up early allows us to restrict allocations
* of things like irqstacks to num_possible_cpus() rather than NR_CPUS. * of things like irqstacks to num_possible_cpus() rather than NR_CPUS.
* *
* We do not initialize the online map here; cpus set their own bits in * We do not initialize the online map here; cpus set their own bits in
* cpu_online_map as they come up. * cpu_online_mask as they come up.
* *
* This function is valid only for Open Firmware systems. finish_device_tree * This function is valid only for Open Firmware systems. finish_device_tree
* must be called before using this. * must be called before using this.

View file

@ -424,9 +424,18 @@ void __init setup_system(void)
DBG(" <- setup_system()\n"); DBG(" <- setup_system()\n");
} }
static u64 slb0_limit(void)
{
if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
return 1UL << SID_SHIFT_1T;
}
return 1UL << SID_SHIFT;
}
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
static void __init irqstack_early_init(void) static void __init irqstack_early_init(void)
{ {
u64 limit = slb0_limit();
unsigned int i; unsigned int i;
/* /*
@ -436,10 +445,10 @@ static void __init irqstack_early_init(void)
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
softirq_ctx[i] = (struct thread_info *) softirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE, __va(lmb_alloc_base(THREAD_SIZE,
THREAD_SIZE, 0x10000000)); THREAD_SIZE, limit));
hardirq_ctx[i] = (struct thread_info *) hardirq_ctx[i] = (struct thread_info *)
__va(lmb_alloc_base(THREAD_SIZE, __va(lmb_alloc_base(THREAD_SIZE,
THREAD_SIZE, 0x10000000)); THREAD_SIZE, limit));
} }
} }
#else #else
@ -470,7 +479,7 @@ static void __init exc_lvl_early_init(void)
*/ */
static void __init emergency_stack_init(void) static void __init emergency_stack_init(void)
{ {
unsigned long limit; u64 limit;
unsigned int i; unsigned int i;
/* /*
@ -482,7 +491,7 @@ static void __init emergency_stack_init(void)
* bringup, we need to get at them in real mode. This means they * bringup, we need to get at them in real mode. This means they
* must also be within the RMO region. * must also be within the RMO region.
*/ */
limit = min(0x10000000ULL, lmb.rmo_size); limit = min(slb0_limit(), lmb.rmo_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
unsigned long sp; unsigned long sp;
@ -573,12 +582,6 @@ void ppc64_boot_msg(unsigned int src, const char *msg)
printk("[boot]%04x %s\n", src, msg); printk("[boot]%04x %s\n", src, msg);
} }
void cpu_die(void)
{
if (ppc_md.cpu_die)
ppc_md.cpu_die();
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define PCPU_DYN_SIZE () #define PCPU_DYN_SIZE ()

View file

@ -59,8 +59,8 @@
struct thread_info *secondary_ti; struct thread_info *secondary_ti;
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DEFINE_PER_CPU(cpumask_t, cpu_core_map) = CPU_MASK_NONE; DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map); EXPORT_PER_CPU_SYMBOL(cpu_core_map);
@ -271,6 +271,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
smp_store_cpu_info(boot_cpuid); smp_store_cpu_info(boot_cpuid);
cpu_callin_map[boot_cpuid] = 1; cpu_callin_map[boot_cpuid] = 1;
for_each_possible_cpu(cpu) {
zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
if (smp_ops) if (smp_ops)
if (smp_ops->probe) if (smp_ops->probe)
max_cpus = smp_ops->probe(); max_cpus = smp_ops->probe();
@ -289,10 +299,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_prepare_boot_cpu(void)
{ {
BUG_ON(smp_processor_id() != boot_cpuid); BUG_ON(smp_processor_id() != boot_cpuid);
set_cpu_online(boot_cpuid, true);
cpu_set(boot_cpuid, per_cpu(cpu_sibling_map, boot_cpuid));
cpu_set(boot_cpuid, per_cpu(cpu_core_map, boot_cpuid));
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
paca[boot_cpuid].__current = current; paca[boot_cpuid].__current = current;
#endif #endif
@ -313,7 +319,7 @@ int generic_cpu_disable(void)
set_cpu_online(cpu, false); set_cpu_online(cpu, false);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
vdso_data->processorCount--; vdso_data->processorCount--;
fixup_irqs(cpu_online_map); fixup_irqs(cpu_online_mask);
#endif #endif
return 0; return 0;
} }
@ -333,7 +339,7 @@ int generic_cpu_enable(unsigned int cpu)
cpu_relax(); cpu_relax();
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
fixup_irqs(cpu_online_map); fixup_irqs(cpu_online_mask);
/* counter the irq disable in fixup_irqs */ /* counter the irq disable in fixup_irqs */
local_irq_enable(); local_irq_enable();
#endif #endif
@ -462,7 +468,7 @@ out:
return id; return id;
} }
/* Must be called when no change can occur to cpu_present_map, /* Must be called when no change can occur to cpu_present_mask,
* i.e. during cpu online or offline. * i.e. during cpu online or offline.
*/ */
static struct device_node *cpu_to_l2cache(int cpu) static struct device_node *cpu_to_l2cache(int cpu)
@ -495,6 +501,14 @@ int __devinit start_secondary(void *unused)
current->active_mm = &init_mm; current->active_mm = &init_mm;
smp_store_cpu_info(cpu); smp_store_cpu_info(cpu);
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
/* Enable decrementer interrupt */
mtspr(SPRN_TCR, TCR_DIE);
#endif
set_dec(tb_ticks_per_jiffy); set_dec(tb_ticks_per_jiffy);
preempt_disable(); preempt_disable();
cpu_callin_map[cpu] = 1; cpu_callin_map[cpu] = 1;
@ -517,15 +531,15 @@ int __devinit start_secondary(void *unused)
for (i = 0; i < threads_per_core; i++) { for (i = 0; i < threads_per_core; i++) {
if (cpu_is_offline(base + i)) if (cpu_is_offline(base + i))
continue; continue;
cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
/* cpu_core_map should be a superset of /* cpu_core_map should be a superset of
* cpu_sibling_map even if we don't have cache * cpu_sibling_map even if we don't have cache
* information, so update the former here, too. * information, so update the former here, too.
*/ */
cpu_set(cpu, per_cpu(cpu_core_map, base +i)); cpumask_set_cpu(cpu, cpu_core_mask(base + i));
cpu_set(base + i, per_cpu(cpu_core_map, cpu)); cpumask_set_cpu(base + i, cpu_core_mask(cpu));
} }
l2_cache = cpu_to_l2cache(cpu); l2_cache = cpu_to_l2cache(cpu);
for_each_online_cpu(i) { for_each_online_cpu(i) {
@ -533,8 +547,8 @@ int __devinit start_secondary(void *unused)
if (!np) if (!np)
continue; continue;
if (np == l2_cache) { if (np == l2_cache) {
cpu_set(cpu, per_cpu(cpu_core_map, i)); cpumask_set_cpu(cpu, cpu_core_mask(i));
cpu_set(i, per_cpu(cpu_core_map, cpu)); cpumask_set_cpu(i, cpu_core_mask(cpu));
} }
of_node_put(np); of_node_put(np);
} }
@ -554,19 +568,22 @@ int setup_profiling_timer(unsigned int multiplier)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
cpumask_t old_mask; cpumask_var_t old_mask;
/* We want the setup_cpu() here to be called from CPU 0, but our /* We want the setup_cpu() here to be called from CPU 0, but our
* init thread may have been "borrowed" by another CPU in the meantime * init thread may have been "borrowed" by another CPU in the meantime
* se we pin us down to CPU 0 for a short while * se we pin us down to CPU 0 for a short while
*/ */
old_mask = current->cpus_allowed; alloc_cpumask_var(&old_mask, GFP_NOWAIT);
set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); cpumask_copy(old_mask, &current->cpus_allowed);
set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
if (smp_ops && smp_ops->setup_cpu) if (smp_ops && smp_ops->setup_cpu)
smp_ops->setup_cpu(boot_cpuid); smp_ops->setup_cpu(boot_cpuid);
set_cpus_allowed(current, old_mask); set_cpus_allowed_ptr(current, old_mask);
free_cpumask_var(old_mask);
snapshot_timebases(); snapshot_timebases();
@ -591,10 +608,10 @@ int __cpu_disable(void)
/* Update sibling maps */ /* Update sibling maps */
base = cpu_first_thread_in_core(cpu); base = cpu_first_thread_in_core(cpu);
for (i = 0; i < threads_per_core; i++) { for (i = 0; i < threads_per_core; i++) {
cpu_clear(cpu, per_cpu(cpu_sibling_map, base + i)); cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i));
cpu_clear(base + i, per_cpu(cpu_sibling_map, cpu)); cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu));
cpu_clear(cpu, per_cpu(cpu_core_map, base +i)); cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
cpu_clear(base + i, per_cpu(cpu_core_map, cpu)); cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
} }
l2_cache = cpu_to_l2cache(cpu); l2_cache = cpu_to_l2cache(cpu);
@ -603,8 +620,8 @@ int __cpu_disable(void)
if (!np) if (!np)
continue; continue;
if (np == l2_cache) { if (np == l2_cache) {
cpu_clear(cpu, per_cpu(cpu_core_map, i)); cpumask_clear_cpu(cpu, cpu_core_mask(i));
cpu_clear(i, per_cpu(cpu_core_map, cpu)); cpumask_clear_cpu(i, cpu_core_mask(cpu));
} }
of_node_put(np); of_node_put(np);
} }
@ -631,4 +648,10 @@ void cpu_hotplug_driver_unlock()
{ {
mutex_unlock(&powerpc_cpu_hotplug_driver_mutex); mutex_unlock(&powerpc_cpu_hotplug_driver_mutex);
} }
void cpu_die(void)
{
if (ppc_md.cpu_die)
ppc_md.cpu_die();
}
#endif #endif

View file

@ -35,7 +35,7 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
/* Time in microseconds we delay before sleeping in the idle loop */ /* Time in microseconds we delay before sleeping in the idle loop */
DEFINE_PER_CPU(unsigned long, smt_snooze_delay) = { 100 }; DEFINE_PER_CPU(long, smt_snooze_delay) = { 100 };
static ssize_t store_smt_snooze_delay(struct sys_device *dev, static ssize_t store_smt_snooze_delay(struct sys_device *dev,
struct sysdev_attribute *attr, struct sysdev_attribute *attr,
@ -44,9 +44,9 @@ static ssize_t store_smt_snooze_delay(struct sys_device *dev,
{ {
struct cpu *cpu = container_of(dev, struct cpu, sysdev); struct cpu *cpu = container_of(dev, struct cpu, sysdev);
ssize_t ret; ssize_t ret;
unsigned long snooze; long snooze;
ret = sscanf(buf, "%lu", &snooze); ret = sscanf(buf, "%ld", &snooze);
if (ret != 1) if (ret != 1)
return -EINVAL; return -EINVAL;
@ -61,53 +61,23 @@ static ssize_t show_smt_snooze_delay(struct sys_device *dev,
{ {
struct cpu *cpu = container_of(dev, struct cpu, sysdev); struct cpu *cpu = container_of(dev, struct cpu, sysdev);
return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id)); return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
} }
static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay, static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
store_smt_snooze_delay); store_smt_snooze_delay);
/* Only parse OF options if the matching cmdline option was not specified */
static int smt_snooze_cmdline;
static int __init smt_setup(void)
{
struct device_node *options;
const unsigned int *val;
unsigned int cpu;
if (!cpu_has_feature(CPU_FTR_SMT))
return -ENODEV;
options = of_find_node_by_path("/options");
if (!options)
return -ENODEV;
val = of_get_property(options, "ibm,smt-snooze-delay", NULL);
if (!smt_snooze_cmdline && val) {
for_each_possible_cpu(cpu)
per_cpu(smt_snooze_delay, cpu) = *val;
}
of_node_put(options);
return 0;
}
__initcall(smt_setup);
static int __init setup_smt_snooze_delay(char *str) static int __init setup_smt_snooze_delay(char *str)
{ {
unsigned int cpu; unsigned int cpu;
int snooze; long snooze;
if (!cpu_has_feature(CPU_FTR_SMT)) if (!cpu_has_feature(CPU_FTR_SMT))
return 1; return 1;
smt_snooze_cmdline = 1; snooze = simple_strtol(str, NULL, 10);
if (get_option(&str, &snooze)) {
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(smt_snooze_delay, cpu) = snooze; per_cpu(smt_snooze_delay, cpu) = snooze;
}
return 1; return 1;
} }

View file

@ -380,6 +380,46 @@ int machine_check_440A(struct pt_regs *regs)
} }
return 0; return 0;
} }
int machine_check_47x(struct pt_regs *regs)
{
unsigned long reason = get_mc_reason(regs);
u32 mcsr;
printk(KERN_ERR "Machine check in kernel mode.\n");
if (reason & ESR_IMCP) {
printk(KERN_ERR
"Instruction Synchronous Machine Check exception\n");
mtspr(SPRN_ESR, reason & ~ESR_IMCP);
return 0;
}
mcsr = mfspr(SPRN_MCSR);
if (mcsr & MCSR_IB)
printk(KERN_ERR "Instruction Read PLB Error\n");
if (mcsr & MCSR_DRB)
printk(KERN_ERR "Data Read PLB Error\n");
if (mcsr & MCSR_DWB)
printk(KERN_ERR "Data Write PLB Error\n");
if (mcsr & MCSR_TLBP)
printk(KERN_ERR "TLB Parity Error\n");
if (mcsr & MCSR_ICP) {
flush_instruction_cache();
printk(KERN_ERR "I-Cache Parity Error\n");
}
if (mcsr & MCSR_DCSP)
printk(KERN_ERR "D-Cache Search Parity Error\n");
if (mcsr & PPC47x_MCSR_GPR)
printk(KERN_ERR "GPR Parity Error\n");
if (mcsr & PPC47x_MCSR_FPR)
printk(KERN_ERR "FPR Parity Error\n");
if (mcsr & PPC47x_MCSR_IPR)
printk(KERN_ERR "Machine Check exception is imprecise\n");
/* Clear MCSR */
mtspr(SPRN_MCSR, mcsr);
return 0;
}
#elif defined(CONFIG_E500) #elif defined(CONFIG_E500)
int machine_check_e500(struct pt_regs *regs) int machine_check_e500(struct pt_regs *regs)
{ {

View file

@ -645,8 +645,10 @@ void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
found = 1; found = 1;
break; break;
} }
if (!found) if (!found) {
spin_unlock_irqrestore(&vio_cmo.lock, flags);
return; return;
}
/* Increase/decrease in desired device entitlement */ /* Increase/decrease in desired device entitlement */
if (desired >= viodev->cmo.desired) { if (desired >= viodev->cmo.desired) {
@ -958,9 +960,12 @@ viodev_cmo_rd_attr(allocated);
static ssize_t name_show(struct device *, struct device_attribute *, char *); static ssize_t name_show(struct device *, struct device_attribute *, char *);
static ssize_t devspec_show(struct device *, struct device_attribute *, char *); static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf);
static struct device_attribute vio_cmo_dev_attrs[] = { static struct device_attribute vio_cmo_dev_attrs[] = {
__ATTR_RO(name), __ATTR_RO(name),
__ATTR_RO(devspec), __ATTR_RO(devspec),
__ATTR_RO(modalias),
__ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH, __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
viodev_cmo_desired_show, viodev_cmo_desired_set), viodev_cmo_desired_show, viodev_cmo_desired_set),
__ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL), __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
@ -1320,9 +1325,27 @@ static ssize_t devspec_show(struct device *dev,
return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none"); return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
} }
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
const struct vio_dev *vio_dev = to_vio_dev(dev);
struct device_node *dn;
const char *cp;
dn = dev->archdata.of_node;
if (!dn)
return -ENODEV;
cp = of_get_property(dn, "compatible", NULL);
if (!cp)
return -ENODEV;
return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
}
static struct device_attribute vio_dev_attrs[] = { static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(name), __ATTR_RO(name),
__ATTR_RO(devspec), __ATTR_RO(devspec),
__ATTR_RO(modalias),
__ATTR_NULL __ATTR_NULL
}; };
@ -1365,6 +1388,7 @@ static struct bus_type vio_bus_type = {
.match = vio_bus_match, .match = vio_bus_match,
.probe = vio_bus_probe, .probe = vio_bus_probe,
.remove = vio_bus_remove, .remove = vio_bus_remove,
.pm = GENERIC_SUBSYS_PM_OPS,
}; };
/** /**

View file

@ -28,7 +28,7 @@ _GLOBAL(strcpy)
/* This clears out any unused part of the destination buffer, /* This clears out any unused part of the destination buffer,
just as the libc version does. -- paulus */ just as the libc version does. -- paulus */
_GLOBAL(strncpy) _GLOBAL(strncpy)
cmpwi 0,r5,0 PPC_LCMPI 0,r5,0
beqlr beqlr
mtctr r5 mtctr r5
addi r6,r3,-1 addi r6,r3,-1
@ -39,7 +39,7 @@ _GLOBAL(strncpy)
bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */ bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
bnelr /* if we didn't hit a null char, we're done */ bnelr /* if we didn't hit a null char, we're done */
mfctr r5 mfctr r5
cmpwi 0,r5,0 /* any space left in destination buffer? */ PPC_LCMPI 0,r5,0 /* any space left in destination buffer? */
beqlr /* we know r0 == 0 here */ beqlr /* we know r0 == 0 here */
2: stbu r0,1(r6) /* clear it out if so */ 2: stbu r0,1(r6) /* clear it out if so */
bdnz 2b bdnz 2b
@ -70,8 +70,8 @@ _GLOBAL(strcmp)
blr blr
_GLOBAL(strncmp) _GLOBAL(strncmp)
PPC_LCMPI r5,0 PPC_LCMPI 0,r5,0
beqlr beq- 2f
mtctr r5 mtctr r5
addi r5,r3,-1 addi r5,r3,-1
addi r4,r4,-1 addi r4,r4,-1
@ -82,6 +82,8 @@ _GLOBAL(strncmp)
beqlr 1 beqlr 1
bdnzt eq,1b bdnzt eq,1b
blr blr
2: li r3,0
blr
_GLOBAL(strlen) _GLOBAL(strlen)
addi r4,r3,-1 addi r4,r3,-1
@ -92,8 +94,8 @@ _GLOBAL(strlen)
blr blr
_GLOBAL(memcmp) _GLOBAL(memcmp)
cmpwi 0,r5,0 PPC_LCMPI 0,r5,0
ble- 2f beq- 2f
mtctr r5 mtctr r5
addi r6,r3,-1 addi r6,r3,-1
addi r4,r4,-1 addi r4,r4,-1
@ -106,8 +108,8 @@ _GLOBAL(memcmp)
blr blr
_GLOBAL(memchr) _GLOBAL(memchr)
cmpwi 0,r5,0 PPC_LCMPI 0,r5,0
ble- 2f beq- 2f
mtctr r5 mtctr r5
addi r3,r3,-1 addi r3,r3,-1
1: lbzu r0,1(r3) 1: lbzu r0,1(r3)

View file

@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
int icache_44x_need_flush; int icache_44x_need_flush;
static void __init ppc44x_update_tlb_hwater(void) unsigned long tlb_47x_boltmap[1024/8];
static void __cpuinit ppc44x_update_tlb_hwater(void)
{ {
extern unsigned int tlb_44x_patch_hwater_D[]; extern unsigned int tlb_44x_patch_hwater_D[];
extern unsigned int tlb_44x_patch_hwater_I[]; extern unsigned int tlb_44x_patch_hwater_I[];
@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
} }
/* /*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
*/ */
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
{ {
@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
ppc44x_update_tlb_hwater(); ppc44x_update_tlb_hwater();
mtspr(SPRN_MMUCR, 0);
__asm__ __volatile__( __asm__ __volatile__(
"tlbwe %2,%3,%4\n" "tlbwe %2,%3,%4\n"
"tlbwe %1,%3,%5\n" "tlbwe %1,%3,%5\n"
"tlbwe %0,%3,%6\n" "tlbwe %0,%3,%6\n"
: :
#ifdef CONFIG_PPC47x
: "r" (PPC47x_TLB2_S_RWX),
#else
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
#endif
"r" (phys), "r" (phys),
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
"r" (entry), "r" (entry),
@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
"i" (PPC44x_TLB_ATTRIB)); "i" (PPC44x_TLB_ATTRIB));
} }
static int __init ppc47x_find_free_bolted(void)
{
unsigned int mmube0 = mfspr(SPRN_MMUBE0);
unsigned int mmube1 = mfspr(SPRN_MMUBE1);
if (!(mmube0 & MMUBE0_VBE0))
return 0;
if (!(mmube0 & MMUBE0_VBE1))
return 1;
if (!(mmube0 & MMUBE0_VBE2))
return 2;
if (!(mmube1 & MMUBE1_VBE3))
return 3;
if (!(mmube1 & MMUBE1_VBE4))
return 4;
if (!(mmube1 & MMUBE1_VBE5))
return 5;
return -1;
}
static void __init ppc47x_update_boltmap(void)
{
unsigned int mmube0 = mfspr(SPRN_MMUBE0);
unsigned int mmube1 = mfspr(SPRN_MMUBE1);
if (mmube0 & MMUBE0_VBE0)
__set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube0 & MMUBE0_VBE1)
__set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube0 & MMUBE0_VBE2)
__set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE3)
__set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE4)
__set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
tlb_47x_boltmap);
if (mmube1 & MMUBE1_VBE5)
__set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
tlb_47x_boltmap);
}
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
/* Base rA is HW way select, way 0, bolted bit set */
rA = 0x88000000;
/* Look for a bolted entry slot */
bolted = ppc47x_find_free_bolted();
BUG_ON(bolted < 0);
/* Insert bolted slot number */
rA |= bolted << 24;
pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
virt, phys, bolted);
mtspr(SPRN_MMUCR, 0);
__asm__ __volatile__(
"tlbwe %2,%3,0\n"
"tlbwe %1,%3,1\n"
"tlbwe %0,%3,2\n"
:
: "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
PPC47x_TLB2_SX
#ifdef CONFIG_SMP
| PPC47x_TLB2_M
#endif
),
"r" (phys),
"r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
"r" (rA));
}
void __init MMU_init_hw(void) void __init MMU_init_hw(void)
{ {
/* This is not useful on 47x but won't hurt either */
ppc44x_update_tlb_hwater(); ppc44x_update_tlb_hwater();
flush_instruction_cache(); flush_instruction_cache();
@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
/* Pin in enough TLBs to cover any lowmem not covered by the /* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S */ * initial 256M mapping established in head_44x.S */
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
else
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
}
if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
ppc47x_update_boltmap();
#ifdef DEBUG
{
int i;
printk(KERN_DEBUG "bolted entries: ");
for (i = 0; i < 255; i++) {
if (test_bit(i, tlb_47x_boltmap))
printk("%d ", i);
}
printk("\n");
}
#endif /* DEBUG */
}
return total_lowmem; return total_lowmem;
} }
#ifdef CONFIG_SMP
void __cpuinit mmu_init_secondary(int cpu)
{
unsigned long addr;
/* Pin in enough TLBs to cover any lowmem not covered by the
* initial 256M mapping established in head_44x.S
*
* WARNING: This is called with only the first 256M of the
* linear mapping in the TLB and we can't take faults yet
* so beware of what this code uses. It runs off a temporary
* stack. current (r2) isn't initialized, smp_processor_id()
* will not work, current thread info isn't accessible, ...
*/
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
addr += PPC_PIN_SIZE) {
if (mmu_has_feature(MMU_FTR_TYPE_47x))
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
else
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
}
}
#endif /* CONFIG_SMP */

View file

@ -151,13 +151,14 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
if (!user_mode(regs) && (address >= TASK_SIZE)) if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV; return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) { if (error_code & DSISR_DABRMATCH) {
/* DABR match */ /* DABR match */
do_dabr(regs, address, error_code); do_dabr(regs, address, error_code);
return 0; return 0;
} }
#endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ #endif
if (in_atomic() || mm == NULL) { if (in_atomic() || mm == NULL) {
if (!user_mode(regs)) if (!user_mode(regs))
@ -307,7 +308,6 @@ good_area:
* make sure we exit gracefully rather than endlessly redo * make sure we exit gracefully rather than endlessly redo
* the fault. * the fault.
*/ */
survive:
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) { if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM) if (ret & VM_FAULT_OOM)
@ -359,15 +359,10 @@ bad_area_nosemaphore:
*/ */
out_of_memory: out_of_memory:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
if (is_global_init(current)) { if (!user_mode(regs))
yield();
down_read(&mm->mmap_sem);
goto survive;
}
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_group_exit(SIGKILL);
return SIGKILL; return SIGKILL;
pagefault_out_of_memory();
return 0;
do_sigbus: do_sigbus:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);

View file

@ -2,7 +2,7 @@
* Modifications by Kumar Gala (galak@kernel.crashing.org) to support * Modifications by Kumar Gala (galak@kernel.crashing.org) to support
* E500 Book E processors. * E500 Book E processors.
* *
* Copyright 2004 Freescale Semiconductor, Inc * Copyright 2004,2010 Freescale Semiconductor, Inc.
* *
* This file contains the routines for initializing the MMU * This file contains the routines for initializing the MMU
* on the 4xx series of chips. * on the 4xx series of chips.
@ -56,19 +56,13 @@
unsigned int tlbcam_index; unsigned int tlbcam_index;
#define NUM_TLBCAMS (64)
#if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS)
#error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS"
#endif #endif
struct tlbcam { #define NUM_TLBCAMS (64)
u32 MAS0; struct tlbcam TLBCAM[NUM_TLBCAMS];
u32 MAS1;
unsigned long MAS2;
u32 MAS3;
u32 MAS7;
} TLBCAM[NUM_TLBCAMS];
struct tlbcamrange { struct tlbcamrange {
unsigned long start; unsigned long start;
@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa)
return 0; return 0;
} }
void loadcam_entry(int idx)
{
mtspr(SPRN_MAS0, TLBCAM[idx].MAS0);
mtspr(SPRN_MAS1, TLBCAM[idx].MAS1);
mtspr(SPRN_MAS2, TLBCAM[idx].MAS2);
mtspr(SPRN_MAS3, TLBCAM[idx].MAS3);
if (mmu_has_feature(MMU_FTR_BIG_PHYS))
mtspr(SPRN_MAS7, TLBCAM[idx].MAS7);
asm volatile("isync;tlbwe;isync" : : : "memory");
}
/* /*
* Set up one of the I/D BAT (block address translation) register pairs. * Set up one of the I/D BAT (block address translation) register pairs.
* The parameters are not checked; in particular size must be a power * The parameters are not checked; in particular size must be a power

View file

@ -252,6 +252,47 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
} }
#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PPC_BOOK3E */
struct vmemmap_backing *vmemmap_list;
static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
{
static struct vmemmap_backing *next;
static int num_left;
/* allocate a page when required and hand out chunks */
if (!next || !num_left) {
next = vmemmap_alloc_block(PAGE_SIZE, node);
if (unlikely(!next)) {
WARN_ON(1);
return NULL;
}
num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
}
num_left--;
return next++;
}
static __meminit void vmemmap_list_populate(unsigned long phys,
unsigned long start,
int node)
{
struct vmemmap_backing *vmem_back;
vmem_back = vmemmap_list_alloc(node);
if (unlikely(!vmem_back)) {
WARN_ON(1);
return;
}
vmem_back->phys = phys;
vmem_back->virt_addr = start;
vmem_back->list = vmemmap_list;
vmemmap_list = vmem_back;
}
int __meminit vmemmap_populate(struct page *start_page, int __meminit vmemmap_populate(struct page *start_page,
unsigned long nr_pages, int node) unsigned long nr_pages, int node)
{ {
@ -276,6 +317,8 @@ int __meminit vmemmap_populate(struct page *start_page,
if (!p) if (!p)
return -ENOMEM; return -ENOMEM;
vmemmap_list_populate(__pa(p), start, node);
pr_debug(" * %016lx..%016lx allocated at %p\n", pr_debug(" * %016lx..%016lx allocated at %p\n",
start, start + page_size, p); start, start + page_size, p);

View file

@ -395,10 +395,18 @@ void __init mmu_context_init(void)
* the PID/TID comparison is disabled, so we can use a TID of zero * the PID/TID comparison is disabled, so we can use a TID of zero
* to represent all kernel pages as shared among all contexts. * to represent all kernel pages as shared among all contexts.
* -- Dan * -- Dan
*
* The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
* should normally never have to steal though the facility is
* present if needed.
* -- BenH
*/ */
if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
first_context = 0; first_context = 0;
last_context = 15; last_context = 15;
} else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
first_context = 1;
last_context = 65535;
} else { } else {
first_context = 1; first_context = 1;
last_context = 255; last_context = 255;

View file

@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
} }
#endif /* CONIFG_8xx */ #endif /* CONIFG_8xx */
/* #if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
* As of today, we don't support tlbivax broadcast on any
* implementation. When that becomes the case, this will be
* an extern.
*/
#ifdef CONFIG_PPC_BOOK3E
extern void _tlbivax_bcast(unsigned long address, unsigned int pid, extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind); unsigned int tsize, unsigned int ind);
#else #else
@ -149,7 +144,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top);
extern void MMU_init_hw(void); extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top); extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void); extern void adjust_total_lowmem(void);
extern void loadcam_entry(unsigned int index);
struct tlbcam {
u32 MAS0;
u32 MAS1;
unsigned long MAS2;
u32 MAS3;
u32 MAS7;
};
#elif defined(CONFIG_PPC32) #elif defined(CONFIG_PPC32)
/* anything 32-bit except 4xx or 8xx */ /* anything 32-bit except 4xx or 8xx */
extern void MMU_init_hw(void); extern void MMU_init_hw(void);

View file

@ -33,16 +33,41 @@ static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
int numa_cpu_lookup_table[NR_CPUS]; int numa_cpu_lookup_table[NR_CPUS];
cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
struct pglist_data *node_data[MAX_NUMNODES]; struct pglist_data *node_data[MAX_NUMNODES];
EXPORT_SYMBOL(numa_cpu_lookup_table); EXPORT_SYMBOL(numa_cpu_lookup_table);
EXPORT_SYMBOL(numa_cpumask_lookup_table); EXPORT_SYMBOL(node_to_cpumask_map);
EXPORT_SYMBOL(node_data); EXPORT_SYMBOL(node_data);
static int min_common_depth; static int min_common_depth;
static int n_mem_addr_cells, n_mem_size_cells; static int n_mem_addr_cells, n_mem_size_cells;
/*
* Allocate node_to_cpumask_map based on number of available nodes
* Requires node_possible_map to be valid.
*
* Note: node_to_cpumask() is not valid until after this is done.
*/
static void __init setup_node_to_cpumask_map(void)
{
unsigned int node, num = 0;
/* setup nr_node_ids if not done yet */
if (nr_node_ids == MAX_NUMNODES) {
for_each_node_mask(node, node_possible_map)
num = node;
nr_node_ids = num + 1;
}
/* allocate the map */
for (node = 0; node < nr_node_ids; node++)
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
/* cpumask_of_node() will now work */
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid) unsigned int *nid)
{ {
@ -138,8 +163,8 @@ static void __cpuinit map_cpu_to_node(int cpu, int node)
dbg("adding cpu %d to node %d\n", cpu, node); dbg("adding cpu %d to node %d\n", cpu, node);
if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
cpu_set(cpu, numa_cpumask_lookup_table[node]); cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
@ -149,8 +174,8 @@ static void unmap_cpu_from_node(unsigned long cpu)
dbg("removing cpu %lu from node %d\n", cpu, node); dbg("removing cpu %lu from node %d\n", cpu, node);
if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
cpu_clear(cpu, numa_cpumask_lookup_table[node]); cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
} else { } else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node); cpu, node);
@ -246,7 +271,8 @@ static int __init find_min_common_depth(void)
const unsigned int *ref_points; const unsigned int *ref_points;
struct device_node *rtas_root; struct device_node *rtas_root;
unsigned int len; unsigned int len;
struct device_node *options; struct device_node *chosen;
const char *vec5;
rtas_root = of_find_node_by_path("/rtas"); rtas_root = of_find_node_by_path("/rtas");
@ -264,15 +290,18 @@ static int __init find_min_common_depth(void)
"ibm,associativity-reference-points", &len); "ibm,associativity-reference-points", &len);
/* /*
* For type 1 affinity information we want the first field * For form 1 affinity information we want the first field
*/ */
options = of_find_node_by_path("/options"); #define VEC5_AFFINITY_BYTE 5
if (options) { #define VEC5_AFFINITY 0x80
const char *str; chosen = of_find_node_by_path("/chosen");
str = of_get_property(options, "ibm,associativity-form", NULL); if (chosen) {
if (str && !strcmp(str, "1")) vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
dbg("Using form 1 affinity\n");
index = 0; index = 0;
} }
}
if ((len >= 2 * sizeof(unsigned int)) && ref_points) { if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
depth = ref_points[index]; depth = ref_points[index];
@ -750,8 +779,9 @@ void __init dump_numa_cpu_topology(void)
* If we used a CPU iterator here we would miss printing * If we used a CPU iterator here we would miss printing
* the holes in the cpumap. * the holes in the cpumap.
*/ */
for (cpu = 0; cpu < NR_CPUS; cpu++) { for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { if (cpumask_test_cpu(cpu,
node_to_cpumask_map[node])) {
if (count == 0) if (count == 0)
printk(" %u", cpu); printk(" %u", cpu);
++count; ++count;
@ -763,7 +793,7 @@ void __init dump_numa_cpu_topology(void)
} }
if (count > 1) if (count > 1)
printk("-%u", NR_CPUS - 1); printk("-%u", nr_cpu_ids - 1);
printk("\n"); printk("\n");
} }
} }
@ -939,10 +969,6 @@ void __init do_init_bootmem(void)
else else
dump_numa_memory_topology(); dump_numa_memory_topology();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
for_each_online_node(nid) { for_each_online_node(nid) {
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
void *bootmem_vaddr; void *bootmem_vaddr;
@ -996,6 +1022,16 @@ void __init do_init_bootmem(void)
} }
init_bootmem_done = 1; init_bootmem_done = 1;
/*
* Now bootmem is initialised we can create the node to cpumask
* lookup tables and setup the cpu callback to populate them.
*/
setup_node_to_cpumask_map();
register_cpu_notifier(&ppc64_numa_nb);
cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
(void *)(unsigned long)boot_cpuid);
} }
void __init paging_init(void) void __init paging_init(void)

View file

@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC); flags &= ~(_PAGE_USER | _PAGE_EXEC);
#ifdef _PAGE_BAP_SR
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
* which means that we just cleared supervisor access... oops ;-) This
* restores it
*/
flags |= _PAGE_BAP_SR;
#endif
return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
} }
EXPORT_SYMBOL(ioremap_flags); EXPORT_SYMBOL(ioremap_flags);
@ -385,11 +393,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
return -EINVAL; return -EINVAL;
__set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
wmb(); wmb();
#ifdef CONFIG_PPC_STD_MMU
flush_hash_pages(0, address, pmd_val(*kpmd), 1);
#else
flush_tlb_page(NULL, address); flush_tlb_page(NULL, address);
#endif
pte_unmap(kpte); pte_unmap(kpte);
return 0; return 0;

View file

@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
flags &= ~(_PAGE_USER | _PAGE_EXEC); flags &= ~(_PAGE_USER | _PAGE_EXEC);
#ifdef _PAGE_BAP_SR
/* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
* which means that we just cleared supervisor access... oops ;-) This
* restores it
*/
flags |= _PAGE_BAP_SR;
#endif
if (ppc_md.ioremap) if (ppc_md.ioremap)
return ppc_md.ioremap(addr, size, flags, caller); return ppc_md.ioremap(addr, size, flags, caller);
return __ioremap_caller(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller);

View file

@ -10,7 +10,7 @@
* - tlbil_va * - tlbil_va
* - tlbil_pid * - tlbil_pid
* - tlbil_all * - tlbil_all
* - tlbivax_bcast (not yet) * - tlbivax_bcast
* *
* Code mostly moved over from misc_32.S * Code mostly moved over from misc_32.S
* *
@ -33,6 +33,7 @@
#include <asm/ppc_asm.h> #include <asm/ppc_asm.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/bug.h>
#if defined(CONFIG_40x) #if defined(CONFIG_40x)
@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
* Nothing to do for 8xx, everything is inline * Nothing to do for 8xx, everything is inline
*/ */
#elif defined(CONFIG_44x) #elif defined(CONFIG_44x) /* Includes 47x */
/* /*
* 440 implementation uses tlbsx/we for tlbil_va and a full sweep * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
*/ */
_GLOBAL(__tlbil_va) _GLOBAL(__tlbil_va)
mfspr r5,SPRN_MMUCR mfspr r5,SPRN_MMUCR
rlwimi r5,r4,0,24,31 /* Set TID */ mfmsr r10
/*
* We write 16 bits of STID since 47x supports that much, we
* will never be passed out of bounds values on 440 (hopefully)
*/
rlwimi r5,r4,0,16,31
/* We have to run the search with interrupts disabled, otherwise /* We have to run the search with interrupts disabled, otherwise
* an interrupt which causes a TLB miss can clobber the MMUCR * an interrupt which causes a TLB miss can clobber the MMUCR
@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
* and restoring MMUCR, so only normal interrupts have to be * and restoring MMUCR, so only normal interrupts have to be
* taken care of. * taken care of.
*/ */
mfmsr r4
wrteei 0 wrteei 0
mtspr SPRN_MMUCR,r5 mtspr SPRN_MMUCR,r5
tlbsx. r3, 0, r3 tlbsx. r6,0,r3
wrtee r4 bne 10f
bne 1f
sync sync
/* There are only 64 TLB entries, so r3 < 64, BEGIN_MMU_FTR_SECTION
* which means bit 22, is clear. Since 22 is b 2f
* the V bit in the TLB_PAGEID, loading this END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
/* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
* 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
* value will invalidate the TLB entry. * value will invalidate the TLB entry.
*/ */
tlbwe r3, r3, PPC44x_TLB_PAGEID tlbwe r6,r6,PPC44x_TLB_PAGEID
isync isync
1: blr 10: wrtee r10
blr
2:
#ifdef CONFIG_PPC_47x
oris r7,r6,0x8000 /* specify way explicitely */
clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
ori r4,r4,PPC47x_TLBE_SIZE
tlbwe r4,r7,0 /* write it */
isync
wrtee r10
blr
#else /* CONFIG_PPC_47x */
1: trap
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
#endif /* !CONFIG_PPC_47x */
_GLOBAL(_tlbil_all) _GLOBAL(_tlbil_all)
_GLOBAL(_tlbil_pid) _GLOBAL(_tlbil_pid)
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
li r3,0 li r3,0
sync sync
@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
isync isync
blr blr
2:
#ifdef CONFIG_PPC_47x
/* 476 variant. There's not simple way to do this, hopefully we'll
* try to limit the amount of such full invalidates
*/
mfmsr r11 /* Interrupts off */
wrteei 0
li r3,-1 /* Current set */
lis r10,tlb_47x_boltmap@h
ori r10,r10,tlb_47x_boltmap@l
lis r7,0x8000 /* Specify way explicitely */
b 9f /* For each set */
1: li r9,4 /* Number of ways */
li r4,0 /* Current way */
li r6,0 /* Default entry value 0 */
andi. r0,r8,1 /* Check if way 0 is bolted */
mtctr r9 /* Load way counter */
bne- 3f /* Bolted, skip loading it */
2: /* For each way */
or r5,r3,r4 /* Make way|index for tlbre */
rlwimi r5,r5,16,8,15 /* Copy index into position */
tlbre r6,r5,0 /* Read entry */
3: addis r4,r4,0x2000 /* Next way */
andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
beq 4f /* Nope, skip it */
rlwimi r7,r5,0,1,2 /* Insert way number */
rlwinm r6,r6,0,21,19 /* Clear V */
tlbwe r6,r7,0 /* Write it */
4: bdnz 2b /* Loop for each way */
srwi r8,r8,1 /* Next boltmap bit */
9: cmpwi cr1,r3,255 /* Last set done ? */
addi r3,r3,1 /* Next set */
beq cr1,1f /* End of loop */
andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
bne 1b /* No, loop */
lwz r8,0(r10) /* Load boltmap entry */
addi r10,r10,4 /* Next word */
b 1b /* Then loop */
1: isync /* Sync shadows */
wrtee r11
#else /* CONFIG_PPC_47x */
1: trap
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
#endif /* !CONFIG_PPC_47x */
blr
#ifdef CONFIG_PPC_47x
/*
* _tlbivax_bcast is only on 47x. We don't bother doing a runtime
* check though, it will blow up soon enough if we mistakenly try
* to use it on a 440.
*/
_GLOBAL(_tlbivax_bcast)
mfspr r5,SPRN_MMUCR
mfmsr r10
rlwimi r5,r4,0,16,31
wrteei 0
mtspr SPRN_MMUCR,r5
/* tlbivax 0,r3 - use .long to avoid binutils deps */
.long 0x7c000624 | (r3 << 11)
isync
eieio
tlbsync
sync
wrtee r10
blr
#endif /* CONFIG_PPC_47x */
#elif defined(CONFIG_FSL_BOOKE) #elif defined(CONFIG_FSL_BOOKE)
/* /*
@ -271,3 +365,31 @@ _GLOBAL(set_context)
#else #else
#error Unsupported processor type ! #error Unsupported processor type !
#endif #endif
#if defined(CONFIG_FSL_BOOKE)
/*
* extern void loadcam_entry(unsigned int index)
*
* Load TLBCAM[index] entry in to the L2 CAM MMU
*/
_GLOBAL(loadcam_entry)
LOAD_REG_ADDR(r4, TLBCAM)
mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,TLBCAM_MAS0(r3)
mtspr SPRN_MAS0,r4
lwz r4,TLBCAM_MAS1(r3)
mtspr SPRN_MAS1,r4
PPC_LL r4,TLBCAM_MAS2(r3)
mtspr SPRN_MAS2,r4
lwz r4,TLBCAM_MAS3(r3)
mtspr SPRN_MAS3,r4
BEGIN_MMU_FTR_SECTION
lwz r4,TLBCAM_MAS7(r3)
mtspr SPRN_MAS7,r4
END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
isync
tlbwe
isync
blr
#endif

View file

@ -1,3 +1,12 @@
config PPC_47x
bool "Support for 47x variant"
depends on 44x
default n
select MPIC
help
This option enables support for the 47x family of processors and is
not currently compatible with other 44x or 46x varients
config BAMBOO config BAMBOO
bool "Bamboo" bool "Bamboo"
depends on 44x depends on 44x
@ -151,6 +160,17 @@ config YOSEMITE
help help
This option enables support for the AMCC PPC440EP evaluation board. This option enables support for the AMCC PPC440EP evaluation board.
config ISS4xx
bool "ISS 4xx Simulator"
depends on (44x || 40x)
default n
select 405GP if 40x
select 440GP if 44x && !PPC_47x
select PPC_FPU
select OF_RTC
help
This option enables support for the IBM ISS simulation environment
#config LUAN #config LUAN
# bool "Luan" # bool "Luan"
# depends on 44x # depends on 44x

View file

@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP) += sam440ep.o
obj-$(CONFIG_WARP) += warp.o obj-$(CONFIG_WARP) += warp.o
obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
obj-$(CONFIG_ISS4xx) += iss4xx.o

View file

@ -0,0 +1,167 @@
/*
* PPC476 board specific routines
*
* Copyright 2010 Torez Smith, IBM Corporation.
*
* Based on earlier code:
* Matt Porter <mporter@kernel.crashing.org>
* Copyright 2002-2005 MontaVista Software Inc.
*
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
* Copyright (c) 2003-2005 Zultys Technologies
*
* Rewritten and ported to the merged powerpc tree:
* Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/rtc.h>
#include <asm/machdep.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/time.h>
#include <asm/uic.h>
#include <asm/ppc4xx.h>
#include <asm/mpic.h>
#include <asm/mmu.h>
static __initdata struct of_device_id iss4xx_of_bus[] = {
{ .compatible = "ibm,plb4", },
{ .compatible = "ibm,plb6", },
{ .compatible = "ibm,opb", },
{ .compatible = "ibm,ebc", },
{},
};
static int __init iss4xx_device_probe(void)
{
of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
of_instantiate_rtc();
return 0;
}
machine_device_initcall(iss4xx, iss4xx_device_probe);
/* We can have either UICs or MPICs */
static void __init iss4xx_init_irq(void)
{
struct device_node *np;
/* Find top level interrupt controller */
for_each_node_with_property(np, "interrupt-controller") {
if (of_get_property(np, "interrupts", NULL) == NULL)
break;
}
if (np == NULL)
panic("Can't find top level interrupt controller");
/* Check type and do appropriate initialization */
if (of_device_is_compatible(np, "ibm,uic")) {
uic_init_tree();
ppc_md.get_irq = uic_get_irq;
#ifdef CONFIG_MPIC
} else if (of_device_is_compatible(np, "chrp,open-pic")) {
/* The MPIC driver will get everything it needs from the
* device-tree, just pass 0 to all arguments
*/
struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
" MPIC ");
BUG_ON(mpic == NULL);
mpic_init(mpic);
ppc_md.get_irq = mpic_get_irq;
#endif
} else
panic("Unrecognized top level interrupt controller");
}
#ifdef CONFIG_SMP
static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
{
mpic_setup_this_cpu();
}
static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
{
struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
const u64 *spin_table_addr_prop;
u32 *spin_table;
extern void start_secondary_47x(void);
BUG_ON(cpunode == NULL);
/* Assume spin table. We could test for the enable-method in
* the device-tree but currently there's little point as it's
* our only supported method
*/
spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
NULL);
if (spin_table_addr_prop == NULL) {
pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
return;
}
/* Assume it's mapped as part of the linear mapping. This is a bit
* fishy but will work fine for now
*/
spin_table = (u32 *)__va(*spin_table_addr_prop);
pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
spin_table[3] = cpu;
smp_wmb();
spin_table[1] = __pa(start_secondary_47x);
mb();
}
static struct smp_ops_t iss_smp_ops = {
.probe = smp_mpic_probe,
.message_pass = smp_mpic_message_pass,
.setup_cpu = smp_iss4xx_setup_cpu,
.kick_cpu = smp_iss4xx_kick_cpu,
.give_timebase = smp_generic_give_timebase,
.take_timebase = smp_generic_take_timebase,
};
static void __init iss4xx_smp_init(void)
{
if (mmu_has_feature(MMU_FTR_TYPE_47x))
smp_ops = &iss_smp_ops;
}
#else /* CONFIG_SMP */
static void __init iss4xx_smp_init(void) { }
#endif /* CONFIG_SMP */
static void __init iss4xx_setup_arch(void)
{
iss4xx_smp_init();
}
/*
* Called very early, MMU is off, device-tree isn't unflattened
*/
static int __init iss4xx_probe(void)
{
unsigned long root = of_get_flat_dt_root();
if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
return 0;
return 1;
}
define_machine(iss4xx) {
.name = "ISS-4xx",
.probe = iss4xx_probe,
.progress = udbg_progress,
.init_IRQ = iss4xx_init_irq,
.setup_arch = iss4xx_setup_arch,
.restart = ppc4xx_reset_system,
.calibrate_decr = generic_calibrate_decr,
};

View file

@ -74,6 +74,7 @@ static int __init mpc831x_rdb_probe(void)
static struct of_device_id __initdata of_bus_ids[] = { static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "simple-bus" }, { .compatible = "simple-bus" },
{ .compatible = "gianfar" }, { .compatible = "gianfar" },
{ .compatible = "gpio-leds", },
{}, {},
}; };

View file

@ -72,6 +72,7 @@ static struct of_device_id mpc837x_ids[] = {
{ .compatible = "soc", }, { .compatible = "soc", },
{ .compatible = "simple-bus", }, { .compatible = "simple-bus", },
{ .compatible = "gianfar", }, { .compatible = "gianfar", },
{ .compatible = "gpio-leds", },
{}, {},
}; };

View file

@ -83,7 +83,8 @@ static struct of_device_id __initdata mpc8610_ids[] = {
{ .compatible = "fsl,mpc8610-immr", }, { .compatible = "fsl,mpc8610-immr", },
{ .compatible = "fsl,mpc8610-guts", }, { .compatible = "fsl,mpc8610-guts", },
{ .compatible = "simple-bus", }, { .compatible = "simple-bus", },
{ .compatible = "gianfar", }, /* So that the DMA channel nodes can be probed individually: */
{ .compatible = "fsl,eloplus-dma", },
{} {}
}; };

View file

@ -43,7 +43,7 @@ config 40x
select PPC_PCI_CHOICE select PPC_PCI_CHOICE
config 44x config 44x
bool "AMCC 44x" bool "AMCC 44x, 46x or 47x"
select PPC_DCR_NATIVE select PPC_DCR_NATIVE
select PPC_UDBG_16550 select PPC_UDBG_16550
select 4xx_SOC select 4xx_SOC
@ -294,7 +294,7 @@ config PPC_PERF_CTRS
This enables the powerpc-specific perf_event back-end. This enables the powerpc-specific perf_event back-end.
config SMP config SMP
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
bool "Symmetric multi-processing support" bool "Symmetric multi-processing support"
---help--- ---help---
This enables support for systems with more than one CPU. If you have This enables support for systems with more than one CPU. If you have
@ -322,6 +322,7 @@ config NR_CPUS
config NOT_COHERENT_CACHE config NOT_COHERENT_CACHE
bool bool
depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
default n if PPC_47x
default y default y
config CHECK_CACHE_COHERENCY config CHECK_CACHE_COHERENCY

View file

@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->cur = cbe_freqs[cur_pmode].frequency; policy->cur = cbe_freqs[cur_pmode].frequency;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
#endif #endif
cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);

View file

@ -252,8 +252,8 @@ decrementer_iSeries_masked:
li r11,1 li r11,1
ld r12,PACALPPACAPTR(r13) ld r12,PACALPPACAPTR(r13)
stb r11,LPPACADECRINT(r12) stb r11,LPPACADECRINT(r12)
LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) li r12,-1
lwz r12,0(r12) clrldi r12,r12,33 /* set DEC to 0x7fffffff */
mtspr SPRN_DEC,r12 mtspr SPRN_DEC,r12
/* fall through */ /* fall through */

View file

@ -32,6 +32,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/ratelimit.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/io.h> #include <asm/io.h>
@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address(
orig_addr = (unsigned long __force)addr; orig_addr = (unsigned long __force)addr;
if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) { if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
static unsigned long last_jiffies; static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
static int num_printed;
if (time_after(jiffies, last_jiffies + 60 * HZ)) { if (__ratelimit(&ratelimit))
last_jiffies = jiffies;
num_printed = 0;
}
if (num_printed++ < 10)
printk(KERN_ERR printk(KERN_ERR
"iSeries_%s: invalid access at IO address %p\n", "iSeries_%s: invalid access at IO address %p\n",
func, addr); func, addr);

View file

@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg)
static int smp_iSeries_probe(void) static int smp_iSeries_probe(void)
{ {
return cpus_weight(cpu_possible_map); return cpumask_weight(cpu_possible_mask);
} }
static void smp_iSeries_kick_cpu(int nr) static void smp_iSeries_kick_cpu(int nr)

View file

@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
pr_debug("current astate is at %d\n",cur_astate); pr_debug("current astate is at %d\n",cur_astate);
policy->cur = pas_freqs[cur_astate].frequency; policy->cur = pas_freqs[cur_astate].frequency;
cpumask_copy(policy->cpus, &cpu_online_map); cpumask_copy(policy->cpus, cpu_online_mask);
ppc_proc_freq = policy->cur * 1000ul; ppc_proc_freq = policy->cur * 1000ul;

View file

@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the /* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that * cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */ * it actually must be one policy together with all others. */
cpumask_copy(policy->cpus, &cpu_online_map); cpumask_copy(policy->cpus, cpu_online_mask);
cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy, return cpufreq_frequency_table_cpuinfo(policy,

View file

@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void)
/* Probe keywest-i2c busses */ /* Probe keywest-i2c busses */
for_each_compatible_node(np, "i2c","keywest-i2c") { for_each_compatible_node(np, "i2c","keywest-i2c") {
struct pmac_i2c_host_kw *host; struct pmac_i2c_host_kw *host;
int multibus, chans, i; int multibus;
/* Found one, init a host structure */ /* Found one, init a host structure */
host = kw_i2c_host_init(np); host = kw_i2c_host_init(np);
@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void)
* parent type * parent type
*/ */
if (multibus) { if (multibus) {
int chans, i;
parent = of_get_parent(np); parent = of_get_parent(np);
if (parent == NULL) if (parent == NULL)
continue; continue;
@ -1258,7 +1260,6 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
if (inst == NULL) if (inst == NULL)
return; return;
pmac_i2c_close(inst->bus); pmac_i2c_close(inst->bus);
if (inst)
kfree(inst); kfree(inst);
} }

View file

@ -33,6 +33,8 @@ extern void pmac_setup_pci_dma(void);
extern void pmac_check_ht_link(void); extern void pmac_check_ht_link(void);
extern void pmac_setup_smp(void); extern void pmac_setup_smp(void);
extern void pmac32_cpu_die(void);
extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void); extern int pmac_nvram_init(void);
extern void pmac_pic_init(void); extern void pmac_pic_init(void);

View file

@ -480,7 +480,7 @@ static void __init pmac_init_early(void)
#endif #endif
/* SMP Init has to be done early as we need to patch up /* SMP Init has to be done early as we need to patch up
* cpu_possible_map before interrupt stacks are allocated * cpu_possible_mask before interrupt stacks are allocated
* or kaboom... * or kaboom...
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -646,7 +646,7 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
/* access per cpu vars from generic smp.c */ /* access per cpu vars from generic smp.c */
DECLARE_PER_CPU(int, cpu_state); DECLARE_PER_CPU(int, cpu_state);
static void pmac_cpu_die(void) static void pmac64_cpu_die(void)
{ {
/* /*
* turn off as much as possible, we'll be * turn off as much as possible, we'll be
@ -717,8 +717,13 @@ define_machine(powermac) {
.pcibios_after_init = pmac_pcibios_after_init, .pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot, .phys_mem_access_prot = pci_phys_mem_access_prot,
#endif #endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64) #ifdef CONFIG_HOTPLUG_CPU
.cpu_die = pmac_cpu_die, #ifdef CONFIG_PPC64
.cpu_die = pmac64_cpu_die,
#endif
#ifdef CONFIG_PPC32
.cpu_die = pmac32_cpu_die,
#endif
#endif #endif
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
.cpu_die = generic_mach_cpu_die, .cpu_die = generic_mach_cpu_die,

View file

@ -53,6 +53,8 @@
#include <asm/pmac_low_i2c.h> #include <asm/pmac_low_i2c.h>
#include <asm/pmac_pfunc.h> #include <asm/pmac_pfunc.h>
#include "pmac.h"
#undef DEBUG #undef DEBUG
#ifdef DEBUG #ifdef DEBUG
@ -315,7 +317,7 @@ static int __init smp_psurge_probe(void)
/* This is necessary because OF doesn't know about the /* This is necessary because OF doesn't know about the
* secondary cpu(s), and thus there aren't nodes in the * secondary cpu(s), and thus there aren't nodes in the
* device tree for them, and smp_setup_cpu_maps hasn't * device tree for them, and smp_setup_cpu_maps hasn't
* set their bits in cpu_present_map. * set their bits in cpu_present_mask.
*/ */
if (ncpus > NR_CPUS) if (ncpus > NR_CPUS)
ncpus = NR_CPUS; ncpus = NR_CPUS;
@ -878,10 +880,9 @@ int smp_core99_cpu_disable(void)
return 0; return 0;
} }
extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
static int cpu_dead[NR_CPUS]; static int cpu_dead[NR_CPUS];
void cpu_die(void) void pmac32_cpu_die(void)
{ {
local_irq_disable(); local_irq_disable();
cpu_dead[smp_processor_id()] = 1; cpu_dead[smp_processor_id()] = 1;
@ -944,7 +945,7 @@ void __init pmac_setup_smp(void)
} }
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
else { else {
/* We have to set bits in cpu_possible_map here since the /* We have to set bits in cpu_possible_mask here since the
* secondary CPU(s) aren't in the device tree. Various * secondary CPU(s) aren't in the device tree. Various
* things won't be initialized for CPUs not in the possible * things won't be initialized for CPUs not in the possible
* map, so we really need to fix it up here. * map, so we really need to fix it up here.

View file

@ -7,7 +7,7 @@ EXTRA_CFLAGS += -DDEBUG
endif endif
obj-y := lpar.o hvCall.o nvram.o reconfig.o \ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
setup.o iommu.o ras.o \ setup.o iommu.o event_sources.o ras.o \
firmware.o power.o dlpar.o firmware.o power.o dlpar.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_XICS) += xics.o obj-$(CONFIG_XICS) += xics.o

View file

@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
* prepend this to the full_name. * prepend this to the full_name.
*/ */
name = (char *)ccwa + ccwa->name_offset; name = (char *)ccwa + ccwa->name_offset;
dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
if (!dn->full_name) { if (!dn->full_name) {
kfree(dn); kfree(dn);
return NULL; return NULL;
} }
sprintf(dn->full_name, "/%s", name);
return dn; return dn;
} }
@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
* directory of the device tree. CPUs actually live in the * directory of the device tree. CPUs actually live in the
* cpus directory so we need to fixup the full_name. * cpus directory so we need to fixup the full_name.
*/ */
cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
GFP_KERNEL);
if (!cpu_name) { if (!cpu_name) {
dlpar_free_cc_nodes(dn); dlpar_free_cc_nodes(dn);
rc = -ENOMEM; rc = -ENOMEM;
goto out; goto out;
} }
sprintf(cpu_name, "/cpus%s", dn->full_name);
kfree(dn->full_name); kfree(dn->full_name);
dn->full_name = cpu_name; dn->full_name = cpu_name;
@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
if (rc) { if (rc) {
dlpar_release_drc(drc_index); dlpar_release_drc(drc_index);
dlpar_free_cc_nodes(dn); dlpar_free_cc_nodes(dn);
goto out;
} }
rc = dlpar_online_cpu(dn); rc = dlpar_online_cpu(dn);

View file

@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
/* Determine type of EEH reset required by device, /* Determine type of EEH reset required by device,
* default hot reset or fundamental reset * default hot reset or fundamental reset
*/ */
if (dev->needs_freset) if (dev && dev->needs_freset)
rtas_pci_slot_reset(pdn, 3); rtas_pci_slot_reset(pdn, 3);
else else
rtas_pci_slot_reset(pdn, 1); rtas_pci_slot_reset(pdn, 1);

View file

@ -0,0 +1,79 @@
/*
* Copyright (C) 2001 Dave Engebretsen IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/prom.h>
#include "pseries.h"
void request_event_sources_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
int i, index, count = 0;
struct of_irq oirq;
const u32 *opicprop;
unsigned int opicplen;
unsigned int virqs[16];
/* Check for obsolete "open-pic-interrupt" property. If present, then
* map those interrupts using the default interrupt host and default
* trigger
*/
opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
if (opicprop) {
opicplen /= sizeof(u32);
for (i = 0; i < opicplen; i++) {
if (count > 15)
break;
virqs[count] = irq_create_mapping(NULL, *(opicprop++));
if (virqs[count] == NO_IRQ)
printk(KERN_ERR "Unable to allocate interrupt "
"number for %s\n", np->full_name);
else
count++;
}
}
/* Else use normal interrupt tree parsing */
else {
/* First try to do a proper OF tree parsing */
for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
index++) {
if (count > 15)
break;
virqs[count] = irq_create_of_mapping(oirq.controller,
oirq.specifier,
oirq.size);
if (virqs[count] == NO_IRQ)
printk(KERN_ERR "Unable to allocate interrupt "
"number for %s\n", np->full_name);
else
count++;
}
}
/* Now request them */
for (i = 0; i < count; i++) {
if (request_irq(virqs[i], handler, 0, name, NULL)) {
printk(KERN_ERR "Unable to request interrupt %d for "
"%s\n", virqs[i], np->full_name);
return;
}
}
}

View file

@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
for(;;); for(;;);
} }
static int qcss_tok; /* query-cpu-stopped-state token */
/* Get state of physical CPU.
* Return codes:
* 0 - The processor is in the RTAS stopped state
* 1 - stop-self is in progress
* 2 - The processor is not in the RTAS stopped state
* -1 - Hardware Error
* -2 - Hardware Busy, Try again later.
*/
static int query_cpu_stopped(unsigned int pcpu)
{
int cpu_status, status;
status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
if (status != 0) {
printk(KERN_ERR
"RTAS query-cpu-stopped-state failed: %i\n", status);
return status;
}
return cpu_status;
}
static int pseries_cpu_disable(void) static int pseries_cpu_disable(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
@ -187,7 +163,7 @@ static int pseries_cpu_disable(void)
/*fix boot_cpuid here*/ /*fix boot_cpuid here*/
if (cpu == boot_cpuid) if (cpu == boot_cpuid)
boot_cpuid = any_online_cpu(cpu_online_map); boot_cpuid = cpumask_any(cpu_online_mask);
/* FIXME: abstract this to not be platform specific later on */ /* FIXME: abstract this to not be platform specific later on */
xics_migrate_irqs_away(); xics_migrate_irqs_away();
@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
} else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
for (tries = 0; tries < 25; tries++) { for (tries = 0; tries < 25; tries++) {
cpu_status = query_cpu_stopped(pcpu); cpu_status = smp_query_cpu_stopped(pcpu);
if (cpu_status == 0 || cpu_status == -1) if (cpu_status == QCSS_STOPPED ||
cpu_status == QCSS_HARDWARE_ERROR)
break; break;
cpu_relax(); cpu_relax();
} }
@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu)
} }
/* /*
* Update cpu_present_map and paca(s) for a new cpu node. The wrinkle * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
* here is that a cpu device node may represent up to two logical cpus * here is that a cpu device node may represent up to two logical cpus
* in the SMT case. We must honor the assumption in other code that * in the SMT case. We must honor the assumption in other code that
* the logical ids for sibling SMT threads x and y are adjacent, such * the logical ids for sibling SMT threads x and y are adjacent, such
@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu)
static int pseries_add_processor(struct device_node *np) static int pseries_add_processor(struct device_node *np)
{ {
unsigned int cpu; unsigned int cpu;
cpumask_t candidate_map, tmp = CPU_MASK_NONE; cpumask_var_t candidate_mask, tmp;
int err = -ENOSPC, len, nthreads, i; int err = -ENOSPC, len, nthreads, i;
const u32 *intserv; const u32 *intserv;
@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np)
if (!intserv) if (!intserv)
return 0; return 0;
zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
zalloc_cpumask_var(&tmp, GFP_KERNEL);
nthreads = len / sizeof(u32); nthreads = len / sizeof(u32);
for (i = 0; i < nthreads; i++) for (i = 0; i < nthreads; i++)
cpu_set(i, tmp); cpumask_set_cpu(i, tmp);
cpu_maps_update_begin(); cpu_maps_update_begin();
BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
/* Get a bitmap of unoccupied slots. */ /* Get a bitmap of unoccupied slots. */
cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
if (cpus_empty(candidate_map)) { if (cpumask_empty(candidate_mask)) {
/* If we get here, it most likely means that NR_CPUS is /* If we get here, it most likely means that NR_CPUS is
* less than the partition's max processors setting. * less than the partition's max processors setting.
*/ */
printk(KERN_ERR "Cannot add cpu %s; this system configuration" printk(KERN_ERR "Cannot add cpu %s; this system configuration"
" supports %d logical cpus.\n", np->full_name, " supports %d logical cpus.\n", np->full_name,
cpus_weight(cpu_possible_map)); cpumask_weight(cpu_possible_mask));
goto out_unlock; goto out_unlock;
} }
while (!cpus_empty(tmp)) while (!cpumask_empty(tmp))
if (cpus_subset(tmp, candidate_map)) if (cpumask_subset(tmp, candidate_mask))
/* Found a range where we can insert the new cpu(s) */ /* Found a range where we can insert the new cpu(s) */
break; break;
else else
cpus_shift_left(tmp, tmp, nthreads); cpumask_shift_left(tmp, tmp, nthreads);
if (cpus_empty(tmp)) { if (cpumask_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_map for" printk(KERN_ERR "Unable to find space in cpu_present_mask for"
" processor %s with %d thread(s)\n", np->name, " processor %s with %d thread(s)\n", np->name,
nthreads); nthreads);
goto out_unlock; goto out_unlock;
} }
for_each_cpu_mask(cpu, tmp) { for_each_cpu(cpu, tmp) {
BUG_ON(cpu_isset(cpu, cpu_present_map)); BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
set_cpu_present(cpu, true); set_cpu_present(cpu, true);
set_hard_smp_processor_id(cpu, *intserv++); set_hard_smp_processor_id(cpu, *intserv++);
} }
err = 0; err = 0;
out_unlock: out_unlock:
cpu_maps_update_done(); cpu_maps_update_done();
free_cpumask_var(candidate_mask);
free_cpumask_var(tmp);
return err; return err;
} }
@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np)
set_hard_smp_processor_id(cpu, -1); set_hard_smp_processor_id(cpu, -1);
break; break;
} }
if (cpu == NR_CPUS) if (cpu >= nr_cpu_ids)
printk(KERN_WARNING "Could not find cpu to remove " printk(KERN_WARNING "Could not find cpu to remove "
"with physical id 0x%x\n", intserv[i]); "with physical id 0x%x\n", intserv[i]);
} }
@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void)
struct device_node *np; struct device_node *np;
const char *typep; const char *typep;
int cpu; int cpu;
int qcss_tok;
for_each_node_by_name(np, "interrupt-controller") { for_each_node_by_name(np, "interrupt-controller") {
typep = of_get_property(np, "compatible", NULL); typep = of_get_property(np, "compatible", NULL);

View file

@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9)
mtcrf 0xff,r0 mtcrf 0xff,r0
blr /* return r3 = status */ blr /* return r3 = status */
/* See plpar_hcall_raw to see why this is needed */
_GLOBAL(plpar_hcall9_raw)
HMT_MEDIUM
mfcr r0
stw r0,8(r1)
std r4,STK_PARM(r4)(r1) /* Save ret buffer */
mr r4,r5
mr r5,r6
mr r6,r7
mr r7,r8
mr r8,r9
mr r9,r10
ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
HVSC /* invoke the hypervisor */
mr r0,r12
ld r12,STK_PARM(r4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
std r7, 24(r12)
std r8, 32(r12)
std r9, 40(r12)
std r10,48(r12)
std r11,56(r12)
std r0, 64(r12)
lwz r0,8(r1)
mtcrf 0xff,r0
blr /* return r3 = status */

View file

@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void)
{ {
unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long size_bytes = 1UL << ppc64_pft_size;
unsigned long hpte_count = size_bytes >> 4; unsigned long hpte_count = size_bytes >> 4;
unsigned long dummy1, dummy2, dword0; struct {
unsigned long pteh;
unsigned long ptel;
} ptes[4];
long lpar_rc; long lpar_rc;
int i; int i, j;
/* TODO: Use bulk call */ /* Read in batches of 4,
for (i = 0; i < hpte_count; i++) { * invalidate only valid entries not in the VRMA
/* dont remove HPTEs with VRMA mappings */ * hpte_count will be a multiple of 4
lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, */
&dummy1, &dummy2); for (i = 0; i < hpte_count; i += 4) {
if (lpar_rc == H_NOT_FOUND) { lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); if (lpar_rc != H_SUCCESS)
if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) continue;
!= HPTE_V_VRMA_MASK)) for (j = 0; j < 4; j++){
/* Can be hpte for 1TB Seg. So remove it */ if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); HPTE_V_VRMA_MASK)
continue;
if (ptes[j].pteh & HPTE_V_VALID)
plpar_pte_remove_raw(0, i + j, 0,
&(ptes[j].pteh), &(ptes[j].ptel));
} }
} }
} }

View file

@ -4,6 +4,14 @@
#include <asm/hvcall.h> #include <asm/hvcall.h>
#include <asm/page.h> #include <asm/page.h>
/* Get state of physical CPU from query_cpu_stopped */
int smp_query_cpu_stopped(unsigned int pcpu);
#define QCSS_STOPPED 0
#define QCSS_STOPPING 1
#define QCSS_NOT_STOPPED 2
#define QCSS_HARDWARE_ERROR -1
#define QCSS_HARDWARE_BUSY -2
static inline long poll_pending(void) static inline long poll_pending(void)
{ {
return plpar_hcall_norets(H_POLL_PENDING); return plpar_hcall_norets(H_POLL_PENDING);
@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
return rc; return rc;
} }
/*
* plpar_pte_read_4_raw can be called in real mode.
* ptes must be 8*sizeof(unsigned long)
*/
static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
unsigned long *ptes)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
memcpy(ptes, retbuf, 8*sizeof(unsigned long));
return rc;
}
static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
unsigned long avpn) unsigned long avpn)
{ {

View file

@ -10,6 +10,13 @@
#ifndef _PSERIES_PSERIES_H #ifndef _PSERIES_PSERIES_H
#define _PSERIES_PSERIES_H #define _PSERIES_PSERIES_H
#include <linux/interrupt.h>
struct device_node;
extern void request_event_sources_irqs(struct device_node *np,
irq_handler_t handler, const char *name);
extern void __init fw_feature_init(const char *hypertas, unsigned long len); extern void __init fw_feature_init(const char *hypertas, unsigned long len);
struct pt_regs; struct pt_regs;

View file

@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id); static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
static void request_ras_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
int i, index, count = 0;
struct of_irq oirq;
const u32 *opicprop;
unsigned int opicplen;
unsigned int virqs[16];
/* Check for obsolete "open-pic-interrupt" property. If present, then
* map those interrupts using the default interrupt host and default
* trigger
*/
opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
if (opicprop) {
opicplen /= sizeof(u32);
for (i = 0; i < opicplen; i++) {
if (count > 15)
break;
virqs[count] = irq_create_mapping(NULL, *(opicprop++));
if (virqs[count] == NO_IRQ)
printk(KERN_ERR "Unable to allocate interrupt "
"number for %s\n", np->full_name);
else
count++;
}
}
/* Else use normal interrupt tree parsing */
else {
/* First try to do a proper OF tree parsing */
for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
index++) {
if (count > 15)
break;
virqs[count] = irq_create_of_mapping(oirq.controller,
oirq.specifier,
oirq.size);
if (virqs[count] == NO_IRQ)
printk(KERN_ERR "Unable to allocate interrupt "
"number for %s\n", np->full_name);
else
count++;
}
}
/* Now request them */
for (i = 0; i < count; i++) {
if (request_irq(virqs[i], handler, 0, name, NULL)) {
printk(KERN_ERR "Unable to request interrupt %d for "
"%s\n", virqs[i], np->full_name);
return;
}
}
}
/* /*
* Initialize handlers for the set of interrupts caused by hardware errors * Initialize handlers for the set of interrupts caused by hardware errors
* and power system events. * and power system events.
@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void)
/* Internal Errors */ /* Internal Errors */
np = of_find_node_by_path("/event-sources/internal-errors"); np = of_find_node_by_path("/event-sources/internal-errors");
if (np != NULL) { if (np != NULL) {
request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); request_event_sources_irqs(np, ras_error_interrupt,
"RAS_ERROR");
of_node_put(np); of_node_put(np);
} }
/* EPOW Events */ /* EPOW Events */
np = of_find_node_by_path("/event-sources/epow-events"); np = of_find_node_by_path("/event-sources/epow-events");
if (np != NULL) { if (np != NULL) {
request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
of_node_put(np); of_node_put(np);
} }

Some files were not shown because too many files have changed in this diff Show more