mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-20 13:41:30 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/geneve.c Here we had an overlapping change, where in 'net' the extraneous stats bump was being removed whilst in 'net-next' the final argument to udp_tunnel6_xmit_skb() was being changed. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b3e0d3d7ba
550 changed files with 4569 additions and 2912 deletions
|
@ -22,8 +22,7 @@ Required properties:
|
||||||
Optional properties:
|
Optional properties:
|
||||||
- ti,hwmods: Name of the hwmods associated to the eDMA CC
|
- ti,hwmods: Name of the hwmods associated to the eDMA CC
|
||||||
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
|
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
|
||||||
these channels will be SW triggered channels. The list must
|
these channels will be SW triggered channels. See example.
|
||||||
contain 16 bits numbers, see example.
|
|
||||||
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
|
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
|
||||||
the driver, they are allocated to be used by for example the
|
the driver, they are allocated to be used by for example the
|
||||||
DSP. See example.
|
DSP. See example.
|
||||||
|
@ -56,10 +55,9 @@ edma: edma@49000000 {
|
||||||
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
|
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
|
||||||
|
|
||||||
/* Channel 20 and 21 is allocated for memcpy */
|
/* Channel 20 and 21 is allocated for memcpy */
|
||||||
ti,edma-memcpy-channels = /bits/ 16 <20 21>;
|
ti,edma-memcpy-channels = <20 21>;
|
||||||
/* The following PaRAM slots are reserved: 35-45 and 100-110 */
|
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
|
||||||
ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
|
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
|
||||||
/bits/ 16 <100 10>;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
edma_tptc0: tptc@49800000 {
|
edma_tptc0: tptc@49800000 {
|
||||||
|
|
|
@ -11,6 +11,10 @@ Required properties:
|
||||||
0 = active high
|
0 = active high
|
||||||
1 = active low
|
1 = active low
|
||||||
|
|
||||||
|
Optional properties:
|
||||||
|
- little-endian : GPIO registers are used as little endian. If not
|
||||||
|
present registers are used as big endian by default.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
gpio0: gpio@1100 {
|
gpio0: gpio@1100 {
|
||||||
|
|
|
@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
|
||||||
If an issue is identified with the released source code on the supported
|
If an issue is identified with the released source code on the supported
|
||||||
kernel with a supported adapter, email the specific information related to the
|
kernel with a supported adapter, email the specific information related to the
|
||||||
issue to e1000-devel@lists.sourceforge.net.
|
issue to e1000-devel@lists.sourceforge.net.
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
=======
|
|
||||||
|
|
||||||
This software program is released under the terms of a license agreement
|
|
||||||
between you ('Licensee') and Intel. Do not use or load this software or any
|
|
||||||
associated materials (collectively, the 'Software') until you have carefully
|
|
||||||
read the full terms and conditions of the file COPYING located in this software
|
|
||||||
package. By loading or using the Software, you agree to the terms of this
|
|
||||||
Agreement. If you do not agree with the terms of this Agreement, do not install
|
|
||||||
or use the Software.
|
|
||||||
|
|
||||||
* Other names and brands may be claimed as the property of others.
|
|
||||||
|
|
18
MAINTAINERS
18
MAINTAINERS
|
@ -318,7 +318,7 @@ M: Zhang Rui <rui.zhang@intel.com>
|
||||||
L: linux-acpi@vger.kernel.org
|
L: linux-acpi@vger.kernel.org
|
||||||
W: https://01.org/linux-acpi
|
W: https://01.org/linux-acpi
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/acpi/video.c
|
F: drivers/acpi/acpi_video.c
|
||||||
|
|
||||||
ACPI WMI DRIVER
|
ACPI WMI DRIVER
|
||||||
L: platform-driver-x86@vger.kernel.org
|
L: platform-driver-x86@vger.kernel.org
|
||||||
|
@ -2984,6 +2984,7 @@ F: kernel/cpuset.c
|
||||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||||
M: Michal Hocko <mhocko@kernel.org>
|
M: Michal Hocko <mhocko@kernel.org>
|
||||||
|
M: Vladimir Davydov <vdavydov@virtuozzo.com>
|
||||||
L: cgroups@vger.kernel.org
|
L: cgroups@vger.kernel.org
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -5586,7 +5587,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||||
R: Shannon Nelson <shannon.nelson@intel.com>
|
R: Shannon Nelson <shannon.nelson@intel.com>
|
||||||
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||||
R: Don Skidmore <donald.c.skidmore@intel.com>
|
R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||||
R: Matthew Vick <matthew.vick@intel.com>
|
R: Bruce Allan <bruce.w.allan@intel.com>
|
||||||
R: John Ronciak <john.ronciak@intel.com>
|
R: John Ronciak <john.ronciak@intel.com>
|
||||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||||
L: intel-wired-lan@lists.osuosl.org
|
L: intel-wired-lan@lists.osuosl.org
|
||||||
|
@ -8302,7 +8303,7 @@ F: include/linux/delayacct.h
|
||||||
F: kernel/delayacct.c
|
F: kernel/delayacct.c
|
||||||
|
|
||||||
PERFORMANCE EVENTS SUBSYSTEM
|
PERFORMANCE EVENTS SUBSYSTEM
|
||||||
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
M: Peter Zijlstra <peterz@infradead.org>
|
||||||
M: Ingo Molnar <mingo@redhat.com>
|
M: Ingo Molnar <mingo@redhat.com>
|
||||||
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
|
@ -8961,6 +8962,13 @@ F: drivers/rpmsg/
|
||||||
F: Documentation/rpmsg.txt
|
F: Documentation/rpmsg.txt
|
||||||
F: include/linux/rpmsg.h
|
F: include/linux/rpmsg.h
|
||||||
|
|
||||||
|
RENESAS ETHERNET DRIVERS
|
||||||
|
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
|
||||||
|
L: netdev@vger.kernel.org
|
||||||
|
L: linux-sh@vger.kernel.org
|
||||||
|
F: drivers/net/ethernet/renesas/
|
||||||
|
F: include/linux/sh_eth.h
|
||||||
|
|
||||||
RESET CONTROLLER FRAMEWORK
|
RESET CONTROLLER FRAMEWORK
|
||||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
@ -9443,8 +9451,10 @@ F: include/scsi/sg.h
|
||||||
|
|
||||||
SCSI SUBSYSTEM
|
SCSI SUBSYSTEM
|
||||||
M: "James E.J. Bottomley" <JBottomley@odin.com>
|
M: "James E.J. Bottomley" <JBottomley@odin.com>
|
||||||
L: linux-scsi@vger.kernel.org
|
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
|
||||||
|
M: "Martin K. Petersen" <martin.petersen@oracle.com>
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
|
||||||
|
L: linux-scsi@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/scsi/
|
F: drivers/scsi/
|
||||||
F: include/scsi/
|
F: include/scsi/
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 4
|
PATCHLEVEL = 4
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc3
|
EXTRAVERSION = -rc5
|
||||||
NAME = Blurry Fish Butt
|
NAME = Blurry Fish Butt
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
reg = <0x48240200 0x100>;
|
reg = <0x48240200 0x100>;
|
||||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-parent = <&gic>;
|
interrupt-parent = <&gic>;
|
||||||
clocks = <&dpll_mpu_m2_ck>;
|
clocks = <&mpu_periphclk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
local_timer: timer@48240600 {
|
local_timer: timer@48240600 {
|
||||||
|
@ -82,7 +82,7 @@
|
||||||
reg = <0x48240600 0x100>;
|
reg = <0x48240600 0x100>;
|
||||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
interrupt-parent = <&gic>;
|
interrupt-parent = <&gic>;
|
||||||
clocks = <&dpll_mpu_m2_ck>;
|
clocks = <&mpu_periphclk>;
|
||||||
};
|
};
|
||||||
|
|
||||||
l2-cache-controller@48242000 {
|
l2-cache-controller@48242000 {
|
||||||
|
|
|
@ -259,6 +259,14 @@
|
||||||
ti,invert-autoidle-bit;
|
ti,invert-autoidle-bit;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
mpu_periphclk: mpu_periphclk {
|
||||||
|
#clock-cells = <0>;
|
||||||
|
compatible = "fixed-factor-clock";
|
||||||
|
clocks = <&dpll_mpu_m2_ck>;
|
||||||
|
clock-mult = <1>;
|
||||||
|
clock-div = <2>;
|
||||||
|
};
|
||||||
|
|
||||||
dpll_ddr_ck: dpll_ddr_ck {
|
dpll_ddr_ck: dpll_ddr_ck {
|
||||||
#clock-cells = <0>;
|
#clock-cells = <0>;
|
||||||
compatible = "ti,am3-dpll-clock";
|
compatible = "ti,am3-dpll-clock";
|
||||||
|
|
|
@ -184,6 +184,7 @@
|
||||||
regulator-name = "VDD_SDHC_1V8";
|
regulator-name = "VDD_SDHC_1V8";
|
||||||
regulator-min-microvolt = <1800000>;
|
regulator-min-microvolt = <1800000>;
|
||||||
regulator-max-microvolt = <1800000>;
|
regulator-max-microvolt = <1800000>;
|
||||||
|
regulator-always-on;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -118,7 +118,8 @@
|
||||||
sdhci0: sdhci@ab0000 {
|
sdhci0: sdhci@ab0000 {
|
||||||
compatible = "mrvl,pxav3-mmc";
|
compatible = "mrvl,pxav3-mmc";
|
||||||
reg = <0xab0000 0x200>;
|
reg = <0xab0000 0x200>;
|
||||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||||
|
clock-names = "io", "core";
|
||||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@ -126,7 +127,8 @@
|
||||||
sdhci1: sdhci@ab0800 {
|
sdhci1: sdhci@ab0800 {
|
||||||
compatible = "mrvl,pxav3-mmc";
|
compatible = "mrvl,pxav3-mmc";
|
||||||
reg = <0xab0800 0x200>;
|
reg = <0xab0800 0x200>;
|
||||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||||
|
clock-names = "io", "core";
|
||||||
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
@ -135,7 +137,7 @@
|
||||||
compatible = "mrvl,pxav3-mmc";
|
compatible = "mrvl,pxav3-mmc";
|
||||||
reg = <0xab1000 0x200>;
|
reg = <0xab1000 0x200>;
|
||||||
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
|
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
|
||||||
clock-names = "io", "core";
|
clock-names = "io", "core";
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
|
@ -218,6 +218,7 @@
|
||||||
reg = <0x480c8000 0x2000>;
|
reg = <0x480c8000 0x2000>;
|
||||||
interrupts = <77>;
|
interrupts = <77>;
|
||||||
ti,hwmods = "mailbox";
|
ti,hwmods = "mailbox";
|
||||||
|
#mbox-cells = <1>;
|
||||||
ti,mbox-num-users = <4>;
|
ti,mbox-num-users = <4>;
|
||||||
ti,mbox-num-fifos = <12>;
|
ti,mbox-num-fifos = <12>;
|
||||||
mbox_dsp: mbox_dsp {
|
mbox_dsp: mbox_dsp {
|
||||||
|
@ -279,8 +280,11 @@
|
||||||
ti,spi-num-cs = <4>;
|
ti,spi-num-cs = <4>;
|
||||||
ti,hwmods = "mcspi1";
|
ti,hwmods = "mcspi1";
|
||||||
dmas = <&edma 16 &edma 17
|
dmas = <&edma 16 &edma 17
|
||||||
&edma 18 &edma 19>;
|
&edma 18 &edma 19
|
||||||
dma-names = "tx0", "rx0", "tx1", "rx1";
|
&edma 20 &edma 21
|
||||||
|
&edma 22 &edma 23>;
|
||||||
|
dma-names = "tx0", "rx0", "tx1", "rx1",
|
||||||
|
"tx2", "rx2", "tx3", "rx3";
|
||||||
};
|
};
|
||||||
|
|
||||||
mmc1: mmc@48060000 {
|
mmc1: mmc@48060000 {
|
||||||
|
|
|
@ -18,8 +18,3 @@
|
||||||
reg = <0x80000000 0x10000000>;
|
reg = <0x80000000 0x10000000>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
&L2 {
|
|
||||||
arm,data-latency = <2 1 2>;
|
|
||||||
arm,tag-latency = <3 2 3>;
|
|
||||||
};
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
reg = <0x40006000 0x1000>;
|
reg = <0x40006000 0x1000>;
|
||||||
cache-unified;
|
cache-unified;
|
||||||
cache-level = <2>;
|
cache-level = <2>;
|
||||||
arm,data-latency = <1 1 1>;
|
arm,data-latency = <3 3 3>;
|
||||||
arm,tag-latency = <2 2 2>;
|
arm,tag-latency = <2 2 2>;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -178,8 +178,10 @@
|
||||||
compatible = "fsl,vf610-sai";
|
compatible = "fsl,vf610-sai";
|
||||||
reg = <0x40031000 0x1000>;
|
reg = <0x40031000 0x1000>;
|
||||||
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
clocks = <&clks VF610_CLK_SAI2>;
|
clocks = <&clks VF610_CLK_SAI2>,
|
||||||
clock-names = "sai";
|
<&clks VF610_CLK_SAI2_DIV>,
|
||||||
|
<&clks 0>, <&clks 0>;
|
||||||
|
clock-names = "bus", "mclk1", "mclk2", "mclk3";
|
||||||
dma-names = "tx", "rx";
|
dma-names = "tx", "rx";
|
||||||
dmas = <&edma0 0 21>,
|
dmas = <&edma0 0 21>,
|
||||||
<&edma0 0 20>;
|
<&edma0 0 20>;
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
||||||
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
||||||
|
|
|
@ -28,6 +28,18 @@
|
||||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||||
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
|
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
||||||
|
u8 reg_num)
|
||||||
|
{
|
||||||
|
return *vcpu_reg(vcpu, reg_num);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||||
|
unsigned long val)
|
||||||
|
{
|
||||||
|
*vcpu_reg(vcpu, reg_num) = val;
|
||||||
|
}
|
||||||
|
|
||||||
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
|
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
|
||||||
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
||||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||||
static inline unsigned long __must_check
|
static inline unsigned long __must_check
|
||||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||||
n = arm_copy_to_user(to, from, n);
|
n = arm_copy_to_user(to, from, n);
|
||||||
uaccess_restore(__ua_flags);
|
uaccess_restore(__ua_flags);
|
||||||
return n;
|
return n;
|
||||||
|
#else
|
||||||
|
return arm_copy_to_user(to, from, n);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
extern unsigned long __must_check
|
extern unsigned long __must_check
|
||||||
|
|
|
@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
char buf[64];
|
char buf[64];
|
||||||
|
#ifndef CONFIG_CPU_V7M
|
||||||
|
unsigned int domain;
|
||||||
|
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||||
|
/*
|
||||||
|
* Get the domain register for the parent context. In user
|
||||||
|
* mode, we don't save the DACR, so lets use what it should
|
||||||
|
* be. For other modes, we place it after the pt_regs struct.
|
||||||
|
*/
|
||||||
|
if (user_mode(regs))
|
||||||
|
domain = DACR_UACCESS_ENABLE;
|
||||||
|
else
|
||||||
|
domain = *(unsigned int *)(regs + 1);
|
||||||
|
#else
|
||||||
|
domain = get_domain();
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
show_regs_print_info(KERN_DEFAULT);
|
show_regs_print_info(KERN_DEFAULT);
|
||||||
|
|
||||||
|
@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
|
||||||
|
|
||||||
#ifndef CONFIG_CPU_V7M
|
#ifndef CONFIG_CPU_V7M
|
||||||
{
|
{
|
||||||
unsigned int domain = get_domain();
|
|
||||||
const char *segment;
|
const char *segment;
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
|
||||||
/*
|
|
||||||
* Get the domain register for the parent context. In user
|
|
||||||
* mode, we don't save the DACR, so lets use what it should
|
|
||||||
* be. For other modes, we place it after the pt_regs struct.
|
|
||||||
*/
|
|
||||||
if (user_mode(regs))
|
|
||||||
domain = DACR_UACCESS_ENABLE;
|
|
||||||
else
|
|
||||||
domain = *(unsigned int *)(regs + 1);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if ((domain & domain_mask(DOMAIN_USER)) ==
|
if ((domain & domain_mask(DOMAIN_USER)) ==
|
||||||
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||||
segment = "none";
|
segment = "none";
|
||||||
|
@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
|
||||||
buf[0] = '\0';
|
buf[0] = '\0';
|
||||||
#ifdef CONFIG_CPU_CP15_MMU
|
#ifdef CONFIG_CPU_CP15_MMU
|
||||||
{
|
{
|
||||||
unsigned int transbase, dac = get_domain();
|
unsigned int transbase;
|
||||||
asm("mrc p15, 0, %0, c2, c0\n\t"
|
asm("mrc p15, 0, %0, c2, c0\n\t"
|
||||||
: "=r" (transbase));
|
: "=r" (transbase));
|
||||||
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
||||||
transbase, dac);
|
transbase, domain);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
|
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
|
||||||
|
|
|
@ -36,10 +36,10 @@
|
||||||
*/
|
*/
|
||||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||||
__asm__ __volatile__( \
|
__asm__ __volatile__( \
|
||||||
" mov %2, %1\n" \
|
"0: ldrex"B" %2, [%3]\n" \
|
||||||
"0: ldrex"B" %1, [%3]\n" \
|
"1: strex"B" %0, %1, [%3]\n" \
|
||||||
"1: strex"B" %0, %2, [%3]\n" \
|
|
||||||
" cmp %0, #0\n" \
|
" cmp %0, #0\n" \
|
||||||
|
" moveq %1, %2\n" \
|
||||||
" movne %0, %4\n" \
|
" movne %0, %4\n" \
|
||||||
"2:\n" \
|
"2:\n" \
|
||||||
" .section .text.fixup,\"ax\"\n" \
|
" .section .text.fixup,\"ax\"\n" \
|
||||||
|
|
|
@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
||||||
data);
|
data);
|
||||||
data = vcpu_data_host_to_guest(vcpu, data, len);
|
data = vcpu_data_host_to_guest(vcpu, data, len);
|
||||||
*vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data;
|
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||||
rt = vcpu->arch.mmio_decode.rt;
|
rt = vcpu->arch.mmio_decode.rt;
|
||||||
|
|
||||||
if (is_write) {
|
if (is_write) {
|
||||||
data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
|
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
||||||
|
len);
|
||||||
|
|
||||||
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
|
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
|
||||||
mmio_write_buf(data_buf, len, data);
|
mmio_write_buf(data_buf, len, data);
|
||||||
|
|
|
@ -218,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
|
||||||
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
kvm_tlb_flush_vmid_ipa(kvm, addr);
|
||||||
|
|
||||||
/* No need to invalidate the cache for device mappings */
|
/* No need to invalidate the cache for device mappings */
|
||||||
if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
|
if (!kvm_is_device_pfn(pte_pfn(old_pte)))
|
||||||
kvm_flush_dcache_pte(old_pte);
|
kvm_flush_dcache_pte(old_pte);
|
||||||
|
|
||||||
put_page(virt_to_page(pte));
|
put_page(virt_to_page(pte));
|
||||||
|
@ -310,7 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
|
||||||
|
|
||||||
pte = pte_offset_kernel(pmd, addr);
|
pte = pte_offset_kernel(pmd, addr);
|
||||||
do {
|
do {
|
||||||
if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
|
if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
|
||||||
kvm_flush_dcache_pte(*pte);
|
kvm_flush_dcache_pte(*pte);
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
}
|
}
|
||||||
|
|
|
@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||||
unsigned long context_id;
|
unsigned long context_id;
|
||||||
phys_addr_t target_pc;
|
phys_addr_t target_pc;
|
||||||
|
|
||||||
cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
|
||||||
if (vcpu_mode_is_32bit(source_vcpu))
|
if (vcpu_mode_is_32bit(source_vcpu))
|
||||||
cpu_id &= ~((u32) 0);
|
cpu_id &= ~((u32) 0);
|
||||||
|
|
||||||
|
@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||||
return PSCI_RET_INVALID_PARAMS;
|
return PSCI_RET_INVALID_PARAMS;
|
||||||
}
|
}
|
||||||
|
|
||||||
target_pc = *vcpu_reg(source_vcpu, 2);
|
target_pc = vcpu_get_reg(source_vcpu, 2);
|
||||||
context_id = *vcpu_reg(source_vcpu, 3);
|
context_id = vcpu_get_reg(source_vcpu, 3);
|
||||||
|
|
||||||
kvm_reset_vcpu(vcpu);
|
kvm_reset_vcpu(vcpu);
|
||||||
|
|
||||||
|
@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||||
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
* NOTE: We always update r0 (or x0) because for PSCI v0.1
|
||||||
* the general puspose registers are undefined upon CPU_ON.
|
* the general puspose registers are undefined upon CPU_ON.
|
||||||
*/
|
*/
|
||||||
*vcpu_reg(vcpu, 0) = context_id;
|
vcpu_set_reg(vcpu, 0, context_id);
|
||||||
vcpu->arch.power_off = false;
|
vcpu->arch.power_off = false;
|
||||||
smp_mb(); /* Make sure the above is visible */
|
smp_mb(); /* Make sure the above is visible */
|
||||||
|
|
||||||
|
@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
|
||||||
struct kvm *kvm = vcpu->kvm;
|
struct kvm *kvm = vcpu->kvm;
|
||||||
struct kvm_vcpu *tmp;
|
struct kvm_vcpu *tmp;
|
||||||
|
|
||||||
target_affinity = *vcpu_reg(vcpu, 1);
|
target_affinity = vcpu_get_reg(vcpu, 1);
|
||||||
lowest_affinity_level = *vcpu_reg(vcpu, 2);
|
lowest_affinity_level = vcpu_get_reg(vcpu, 2);
|
||||||
|
|
||||||
/* Determine target affinity mask */
|
/* Determine target affinity mask */
|
||||||
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
|
target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
|
||||||
|
@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
|
||||||
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
int ret = 1;
|
int ret = 1;
|
||||||
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
switch (psci_fn) {
|
switch (psci_fn) {
|
||||||
|
@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vcpu_reg(vcpu, 0) = val;
|
vcpu_set_reg(vcpu, 0, val);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
|
|
||||||
switch (psci_fn) {
|
switch (psci_fn) {
|
||||||
|
@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
*vcpu_reg(vcpu, 0) = val;
|
vcpu_set_reg(vcpu, 0, val);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
|
||||||
static unsigned long noinline
|
static unsigned long noinline
|
||||||
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
|
unsigned long ua_flags;
|
||||||
int atomic;
|
int atomic;
|
||||||
|
|
||||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||||
|
@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||||
if (tocopy > n)
|
if (tocopy > n)
|
||||||
tocopy = n;
|
tocopy = n;
|
||||||
|
|
||||||
|
ua_flags = uaccess_save_and_enable();
|
||||||
memcpy((void *)to, from, tocopy);
|
memcpy((void *)to, from, tocopy);
|
||||||
|
uaccess_restore(ua_flags);
|
||||||
to += tocopy;
|
to += tocopy;
|
||||||
from += tocopy;
|
from += tocopy;
|
||||||
n -= tocopy;
|
n -= tocopy;
|
||||||
|
@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
* With frame pointer disabled, tail call optimization kicks in
|
* With frame pointer disabled, tail call optimization kicks in
|
||||||
* as well making this test almost invisible.
|
* as well making this test almost invisible.
|
||||||
*/
|
*/
|
||||||
if (n < 64)
|
if (n < 64) {
|
||||||
return __copy_to_user_std(to, from, n);
|
unsigned long ua_flags = uaccess_save_and_enable();
|
||||||
return __copy_to_user_memcpy(to, from, n);
|
n = __copy_to_user_std(to, from, n);
|
||||||
|
uaccess_restore(ua_flags);
|
||||||
|
} else {
|
||||||
|
n = __copy_to_user_memcpy(to, from, n);
|
||||||
|
}
|
||||||
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long noinline
|
static unsigned long noinline
|
||||||
__clear_user_memset(void __user *addr, unsigned long n)
|
__clear_user_memset(void __user *addr, unsigned long n)
|
||||||
{
|
{
|
||||||
|
unsigned long ua_flags;
|
||||||
|
|
||||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||||
memset((void *)addr, 0, n);
|
memset((void *)addr, 0, n);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
||||||
if (tocopy > n)
|
if (tocopy > n)
|
||||||
tocopy = n;
|
tocopy = n;
|
||||||
|
|
||||||
|
ua_flags = uaccess_save_and_enable();
|
||||||
memset((void *)addr, 0, tocopy);
|
memset((void *)addr, 0, tocopy);
|
||||||
|
uaccess_restore(ua_flags);
|
||||||
addr += tocopy;
|
addr += tocopy;
|
||||||
n -= tocopy;
|
n -= tocopy;
|
||||||
|
|
||||||
|
@ -193,9 +205,14 @@ out:
|
||||||
unsigned long arm_clear_user(void __user *addr, unsigned long n)
|
unsigned long arm_clear_user(void __user *addr, unsigned long n)
|
||||||
{
|
{
|
||||||
/* See rational for this in __copy_to_user() above. */
|
/* See rational for this in __copy_to_user() above. */
|
||||||
if (n < 64)
|
if (n < 64) {
|
||||||
return __clear_user_std(addr, n);
|
unsigned long ua_flags = uaccess_save_and_enable();
|
||||||
return __clear_user_memset(addr, n);
|
n = __clear_user_std(addr, n);
|
||||||
|
uaccess_restore(ua_flags);
|
||||||
|
} else {
|
||||||
|
n = __clear_user_memset(addr, n);
|
||||||
|
}
|
||||||
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
|
|
|
@ -4,7 +4,6 @@ menuconfig ARCH_AT91
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select COMMON_CLK_AT91
|
select COMMON_CLK_AT91
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
select PINCTRL_AT91
|
|
||||||
select SOC_BUS
|
select SOC_BUS
|
||||||
|
|
||||||
if ARCH_AT91
|
if ARCH_AT91
|
||||||
|
@ -17,6 +16,7 @@ config SOC_SAMA5D2
|
||||||
select HAVE_AT91_USB_CLK
|
select HAVE_AT91_USB_CLK
|
||||||
select HAVE_AT91_H32MX
|
select HAVE_AT91_H32MX
|
||||||
select HAVE_AT91_GENERATED_CLK
|
select HAVE_AT91_GENERATED_CLK
|
||||||
|
select PINCTRL_AT91PIO4
|
||||||
help
|
help
|
||||||
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
|
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ config SOC_SAMA5D3
|
||||||
select HAVE_AT91_UTMI
|
select HAVE_AT91_UTMI
|
||||||
select HAVE_AT91_SMD
|
select HAVE_AT91_SMD
|
||||||
select HAVE_AT91_USB_CLK
|
select HAVE_AT91_USB_CLK
|
||||||
|
select PINCTRL_AT91
|
||||||
help
|
help
|
||||||
Select this if you are using one of Atmel's SAMA5D3 family SoC.
|
Select this if you are using one of Atmel's SAMA5D3 family SoC.
|
||||||
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
|
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
|
||||||
|
@ -40,6 +41,7 @@ config SOC_SAMA5D4
|
||||||
select HAVE_AT91_SMD
|
select HAVE_AT91_SMD
|
||||||
select HAVE_AT91_USB_CLK
|
select HAVE_AT91_USB_CLK
|
||||||
select HAVE_AT91_H32MX
|
select HAVE_AT91_H32MX
|
||||||
|
select PINCTRL_AT91
|
||||||
help
|
help
|
||||||
Select this if you are using one of Atmel's SAMA5D4 family SoC.
|
Select this if you are using one of Atmel's SAMA5D4 family SoC.
|
||||||
|
|
||||||
|
@ -50,6 +52,7 @@ config SOC_AT91RM9200
|
||||||
select CPU_ARM920T
|
select CPU_ARM920T
|
||||||
select HAVE_AT91_USB_CLK
|
select HAVE_AT91_USB_CLK
|
||||||
select MIGHT_HAVE_PCI
|
select MIGHT_HAVE_PCI
|
||||||
|
select PINCTRL_AT91
|
||||||
select SOC_SAM_V4_V5
|
select SOC_SAM_V4_V5
|
||||||
select SRAM if PM
|
select SRAM if PM
|
||||||
help
|
help
|
||||||
|
@ -65,6 +68,7 @@ config SOC_AT91SAM9
|
||||||
select HAVE_AT91_UTMI
|
select HAVE_AT91_UTMI
|
||||||
select HAVE_FB_ATMEL
|
select HAVE_FB_ATMEL
|
||||||
select MEMORY
|
select MEMORY
|
||||||
|
select PINCTRL_AT91
|
||||||
select SOC_SAM_V4_V5
|
select SOC_SAM_V4_V5
|
||||||
select SRAM if PM
|
select SRAM if PM
|
||||||
help
|
help
|
||||||
|
|
|
@ -41,8 +41,10 @@
|
||||||
* implementation should be moved down into the pinctrl driver and get
|
* implementation should be moved down into the pinctrl driver and get
|
||||||
* called as part of the generic suspend/resume path.
|
* called as part of the generic suspend/resume path.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_PINCTRL_AT91
|
||||||
extern void at91_pinctrl_gpio_suspend(void);
|
extern void at91_pinctrl_gpio_suspend(void);
|
||||||
extern void at91_pinctrl_gpio_resume(void);
|
extern void at91_pinctrl_gpio_resume(void);
|
||||||
|
#endif
|
||||||
|
|
||||||
static struct {
|
static struct {
|
||||||
unsigned long uhp_udp_mask;
|
unsigned long uhp_udp_mask;
|
||||||
|
@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
|
||||||
|
|
||||||
static int at91_pm_enter(suspend_state_t state)
|
static int at91_pm_enter(suspend_state_t state)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_PINCTRL_AT91
|
||||||
at91_pinctrl_gpio_suspend();
|
at91_pinctrl_gpio_suspend();
|
||||||
|
#endif
|
||||||
switch (state) {
|
switch (state) {
|
||||||
/*
|
/*
|
||||||
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
|
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
|
||||||
|
@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
|
||||||
error:
|
error:
|
||||||
target_state = PM_SUSPEND_ON;
|
target_state = PM_SUSPEND_ON;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PINCTRL_AT91
|
||||||
at91_pinctrl_gpio_resume();
|
at91_pinctrl_gpio_resume();
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
|
||||||
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
|
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
const struct exynos_pmu_data *pmu_data;
|
||||||
|
|
||||||
const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
|
if (!pmu_context)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pmu_data = pmu_context->pmu_data;
|
||||||
|
|
||||||
if (pmu_data->powerdown_conf)
|
if (pmu_data->powerdown_conf)
|
||||||
pmu_data->powerdown_conf(mode);
|
pmu_data->powerdown_conf(mode);
|
||||||
|
|
|
@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
|
||||||
writel(*vaddr++, bus_addr);
|
writel(*vaddr++, bus_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
|
static inline u8 __indirect_readb(const volatile void __iomem *p)
|
||||||
{
|
{
|
||||||
u32 addr = (u32)p;
|
u32 addr = (u32)p;
|
||||||
u32 n, byte_enables, data;
|
u32 n, byte_enables, data;
|
||||||
|
@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
|
||||||
*vaddr++ = readb(bus_addr);
|
*vaddr++ = readb(bus_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
|
static inline u16 __indirect_readw(const volatile void __iomem *p)
|
||||||
{
|
{
|
||||||
u32 addr = (u32)p;
|
u32 addr = (u32)p;
|
||||||
u32 n, byte_enables, data;
|
u32 n, byte_enables, data;
|
||||||
|
@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
|
||||||
*vaddr++ = readw(bus_addr);
|
*vaddr++ = readw(bus_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
|
static inline u32 __indirect_readl(const volatile void __iomem *p)
|
||||||
{
|
{
|
||||||
u32 addr = (__force u32)p;
|
u32 addr = (__force u32)p;
|
||||||
u32 data;
|
u32 data;
|
||||||
|
@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
|
||||||
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
|
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
|
||||||
|
|
||||||
#define ioread8(p) ioread8(p)
|
#define ioread8(p) ioread8(p)
|
||||||
static inline unsigned int ioread8(const void __iomem *addr)
|
static inline u8 ioread8(const void __iomem *addr)
|
||||||
{
|
{
|
||||||
unsigned long port = (unsigned long __force)addr;
|
unsigned long port = (unsigned long __force)addr;
|
||||||
if (__is_io_address(port))
|
if (__is_io_address(port))
|
||||||
|
@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ioread16(p) ioread16(p)
|
#define ioread16(p) ioread16(p)
|
||||||
static inline unsigned int ioread16(const void __iomem *addr)
|
static inline u16 ioread16(const void __iomem *addr)
|
||||||
{
|
{
|
||||||
unsigned long port = (unsigned long __force)addr;
|
unsigned long port = (unsigned long __force)addr;
|
||||||
if (__is_io_address(port))
|
if (__is_io_address(port))
|
||||||
|
@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
#define ioread32(p) ioread32(p)
|
#define ioread32(p) ioread32(p)
|
||||||
static inline unsigned int ioread32(const void __iomem *addr)
|
static inline u32 ioread32(const void __iomem *addr)
|
||||||
{
|
{
|
||||||
unsigned long port = (unsigned long __force)addr;
|
unsigned long port = (unsigned long __force)addr;
|
||||||
if (__is_io_address(port))
|
if (__is_io_address(port))
|
||||||
|
|
|
@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
|
||||||
select NEON if CPU_V7
|
select NEON if CPU_V7
|
||||||
select PM
|
select PM
|
||||||
select REGULATOR
|
select REGULATOR
|
||||||
|
select REGULATOR_FIXED_VOLTAGE
|
||||||
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
|
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
|
||||||
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
|
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
|
||||||
select VFP
|
select VFP
|
||||||
|
@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
|
||||||
depends on ARCH_OMAP3
|
depends on ARCH_OMAP3
|
||||||
default y
|
default y
|
||||||
select OMAP_PACKAGE_CBB
|
select OMAP_PACKAGE_CBB
|
||||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
|
||||||
|
|
||||||
config MACH_NOKIA_N810
|
config MACH_NOKIA_N810
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -889,6 +889,7 @@ static void __init e680_init(void)
|
||||||
|
|
||||||
pxa_set_keypad_info(&e680_keypad_platform_data);
|
pxa_set_keypad_info(&e680_keypad_platform_data);
|
||||||
|
|
||||||
|
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
|
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
|
||||||
}
|
}
|
||||||
|
@ -956,6 +957,7 @@ static void __init a1200_init(void)
|
||||||
|
|
||||||
pxa_set_keypad_info(&a1200_keypad_platform_data);
|
pxa_set_keypad_info(&a1200_keypad_platform_data);
|
||||||
|
|
||||||
|
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
|
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
|
||||||
}
|
}
|
||||||
|
@ -1148,6 +1150,7 @@ static void __init a910_init(void)
|
||||||
platform_device_register(&a910_camera);
|
platform_device_register(&a910_camera);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
|
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
|
||||||
}
|
}
|
||||||
|
@ -1215,6 +1218,7 @@ static void __init e6_init(void)
|
||||||
|
|
||||||
pxa_set_keypad_info(&e6_keypad_platform_data);
|
pxa_set_keypad_info(&e6_keypad_platform_data);
|
||||||
|
|
||||||
|
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
|
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
|
||||||
}
|
}
|
||||||
|
@ -1256,6 +1260,7 @@ static void __init e2_init(void)
|
||||||
|
|
||||||
pxa_set_keypad_info(&e2_keypad_platform_data);
|
pxa_set_keypad_info(&e2_keypad_platform_data);
|
||||||
|
|
||||||
|
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||||
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
|
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
#include <plat/cpu.h>
|
#include <plat/cpu.h>
|
||||||
#include <plat/cpu-freq-core.h>
|
#include <plat/cpu-freq-core.h>
|
||||||
|
|
||||||
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
|
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
|
||||||
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
|
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
|
||||||
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
|
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
|
||||||
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
|
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
#include <plat/cpu.h>
|
#include <plat/cpu.h>
|
||||||
#include <plat/cpu-freq-core.h>
|
#include <plat/cpu-freq-core.h>
|
||||||
|
|
||||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
|
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
|
||||||
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
|
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
|
||||||
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
|
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
|
||||||
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
|
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
|
||||||
|
|
|
@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
|
||||||
__flush_icache_all();
|
__flush_icache_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int is_reserved_asid(u64 asid)
|
static bool check_update_reserved_asid(u64 asid, u64 newasid)
|
||||||
{
|
{
|
||||||
int cpu;
|
int cpu;
|
||||||
for_each_possible_cpu(cpu)
|
bool hit = false;
|
||||||
if (per_cpu(reserved_asids, cpu) == asid)
|
|
||||||
return 1;
|
/*
|
||||||
return 0;
|
* Iterate over the set of reserved ASIDs looking for a match.
|
||||||
|
* If we find one, then we can update our mm to use newasid
|
||||||
|
* (i.e. the same ASID in the current generation) but we can't
|
||||||
|
* exit the loop early, since we need to ensure that all copies
|
||||||
|
* of the old ASID are updated to reflect the mm. Failure to do
|
||||||
|
* so could result in us missing the reserved ASID in a future
|
||||||
|
* generation.
|
||||||
|
*/
|
||||||
|
for_each_possible_cpu(cpu) {
|
||||||
|
if (per_cpu(reserved_asids, cpu) == asid) {
|
||||||
|
hit = true;
|
||||||
|
per_cpu(reserved_asids, cpu) = newasid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
|
@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
u64 generation = atomic64_read(&asid_generation);
|
u64 generation = atomic64_read(&asid_generation);
|
||||||
|
|
||||||
if (asid != 0) {
|
if (asid != 0) {
|
||||||
|
u64 newasid = generation | (asid & ~ASID_MASK);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If our current ASID was active during a rollover, we
|
* If our current ASID was active during a rollover, we
|
||||||
* can continue to use it and this was just a false alarm.
|
* can continue to use it and this was just a false alarm.
|
||||||
*/
|
*/
|
||||||
if (is_reserved_asid(asid))
|
if (check_update_reserved_asid(asid, newasid))
|
||||||
return generation | (asid & ~ASID_MASK);
|
return newasid;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We had a valid ASID in a previous life, so try to re-use
|
* We had a valid ASID in a previous life, so try to re-use
|
||||||
|
@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
*/
|
*/
|
||||||
asid &= ~ASID_MASK;
|
asid &= ~ASID_MASK;
|
||||||
if (!__test_and_set_bit(asid, asid_map))
|
if (!__test_and_set_bit(asid, asid_map))
|
||||||
goto bump_gen;
|
return newasid;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||||
|
|
||||||
__set_bit(asid, asid_map);
|
__set_bit(asid, asid_map);
|
||||||
cur_idx = asid;
|
cur_idx = asid;
|
||||||
|
|
||||||
bump_gen:
|
|
||||||
asid |= generation;
|
|
||||||
cpumask_clear(mm_cpumask(mm));
|
cpumask_clear(mm_cpumask(mm));
|
||||||
return asid;
|
return asid | generation;
|
||||||
}
|
}
|
||||||
|
|
||||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||||
|
|
|
@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||||
|
|
||||||
if (!is_coherent &&
|
if (!is_coherent &&
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/dma-contiguous.h>
|
#include <linux/dma-contiguous.h>
|
||||||
#include <linux/sizes.h>
|
#include <linux/sizes.h>
|
||||||
|
#include <linux/stop_machine.h>
|
||||||
|
|
||||||
#include <asm/cp15.h>
|
#include <asm/cp15.h>
|
||||||
#include <asm/mach-types.h>
|
#include <asm/mach-types.h>
|
||||||
|
@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
|
||||||
* safe to be called with preemption disabled, as under stop_machine().
|
* safe to be called with preemption disabled, as under stop_machine().
|
||||||
*/
|
*/
|
||||||
static inline void section_update(unsigned long addr, pmdval_t mask,
|
static inline void section_update(unsigned long addr, pmdval_t mask,
|
||||||
pmdval_t prot)
|
pmdval_t prot, struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm;
|
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
|
|
||||||
mm = current->active_mm;
|
|
||||||
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
||||||
|
|
||||||
#ifdef CONFIG_ARM_LPAE
|
#ifdef CONFIG_ARM_LPAE
|
||||||
|
@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
|
||||||
return !!(get_cr() & CR_XP);
|
return !!(get_cr() & CR_XP);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define set_section_perms(perms, field) { \
|
void set_section_perms(struct section_perm *perms, int n, bool set,
|
||||||
size_t i; \
|
struct mm_struct *mm)
|
||||||
unsigned long addr; \
|
{
|
||||||
\
|
size_t i;
|
||||||
if (!arch_has_strict_perms()) \
|
unsigned long addr;
|
||||||
return; \
|
|
||||||
\
|
if (!arch_has_strict_perms())
|
||||||
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
|
return;
|
||||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
|
|
||||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
|
for (i = 0; i < n; i++) {
|
||||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
|
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
|
||||||
perms[i].start, perms[i].end, \
|
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
|
||||||
SECTION_SIZE); \
|
pr_err("BUG: section %lx-%lx not aligned to %lx\n",
|
||||||
continue; \
|
perms[i].start, perms[i].end,
|
||||||
} \
|
SECTION_SIZE);
|
||||||
\
|
continue;
|
||||||
for (addr = perms[i].start; \
|
}
|
||||||
addr < perms[i].end; \
|
|
||||||
addr += SECTION_SIZE) \
|
for (addr = perms[i].start;
|
||||||
section_update(addr, perms[i].mask, \
|
addr < perms[i].end;
|
||||||
perms[i].field); \
|
addr += SECTION_SIZE)
|
||||||
} \
|
section_update(addr, perms[i].mask,
|
||||||
|
set ? perms[i].prot : perms[i].clear, mm);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void fix_kernmem_perms(void)
|
static void update_sections_early(struct section_perm perms[], int n)
|
||||||
{
|
{
|
||||||
set_section_perms(nx_perms, prot);
|
struct task_struct *t, *s;
|
||||||
|
|
||||||
|
read_lock(&tasklist_lock);
|
||||||
|
for_each_process(t) {
|
||||||
|
if (t->flags & PF_KTHREAD)
|
||||||
|
continue;
|
||||||
|
for_each_thread(t, s)
|
||||||
|
set_section_perms(perms, n, true, s->mm);
|
||||||
|
}
|
||||||
|
read_unlock(&tasklist_lock);
|
||||||
|
set_section_perms(perms, n, true, current->active_mm);
|
||||||
|
set_section_perms(perms, n, true, &init_mm);
|
||||||
|
}
|
||||||
|
|
||||||
|
int __fix_kernmem_perms(void *unused)
|
||||||
|
{
|
||||||
|
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void fix_kernmem_perms(void)
|
||||||
|
{
|
||||||
|
stop_machine(__fix_kernmem_perms, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_RODATA
|
#ifdef CONFIG_DEBUG_RODATA
|
||||||
|
int __mark_rodata_ro(void *unused)
|
||||||
|
{
|
||||||
|
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void mark_rodata_ro(void)
|
void mark_rodata_ro(void)
|
||||||
{
|
{
|
||||||
set_section_perms(ro_perms, prot);
|
stop_machine(__mark_rodata_ro, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_kernel_text_rw(void)
|
void set_kernel_text_rw(void)
|
||||||
{
|
{
|
||||||
set_section_perms(ro_perms, clear);
|
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
||||||
|
current->active_mm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_kernel_text_ro(void)
|
void set_kernel_text_ro(void)
|
||||||
{
|
{
|
||||||
set_section_perms(ro_perms, prot);
|
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
||||||
|
current->active_mm);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_DEBUG_RODATA */
|
#endif /* CONFIG_DEBUG_RODATA */
|
||||||
|
|
||||||
|
|
|
@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
|
||||||
.equ cpu_v7_suspend_size, 4 * 9
|
.equ cpu_v7_suspend_size, 4 * 9
|
||||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||||
ENTRY(cpu_v7_do_suspend)
|
ENTRY(cpu_v7_do_suspend)
|
||||||
stmfd sp!, {r4 - r10, lr}
|
stmfd sp!, {r4 - r11, lr}
|
||||||
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
||||||
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
|
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
|
||||||
stmia r0!, {r4 - r5}
|
stmia r0!, {r4 - r5}
|
||||||
|
@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
|
||||||
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
|
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
|
||||||
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
|
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
|
||||||
stmia r0, {r5 - r11}
|
stmia r0, {r5 - r11}
|
||||||
ldmfd sp!, {r4 - r10, pc}
|
ldmfd sp!, {r4 - r11, pc}
|
||||||
ENDPROC(cpu_v7_do_suspend)
|
ENDPROC(cpu_v7_do_suspend)
|
||||||
|
|
||||||
ENTRY(cpu_v7_do_resume)
|
ENTRY(cpu_v7_do_resume)
|
||||||
|
|
|
@ -269,6 +269,7 @@
|
||||||
clock-frequency = <0>; /* Updated by bootloader */
|
clock-frequency = <0>; /* Updated by bootloader */
|
||||||
voltage-ranges = <1800 1800 3300 3300>;
|
voltage-ranges = <1800 1800 3300 3300>;
|
||||||
sdhci,auto-cmd12;
|
sdhci,auto-cmd12;
|
||||||
|
little-endian;
|
||||||
bus-width = <4>;
|
bus-width = <4>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -277,6 +278,7 @@
|
||||||
reg = <0x0 0x2300000 0x0 0x10000>;
|
reg = <0x0 0x2300000 0x0 0x10000>;
|
||||||
interrupts = <0 36 0x4>; /* Level high type */
|
interrupts = <0 36 0x4>; /* Level high type */
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
little-endian;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -287,6 +289,7 @@
|
||||||
reg = <0x0 0x2310000 0x0 0x10000>;
|
reg = <0x0 0x2310000 0x0 0x10000>;
|
||||||
interrupts = <0 36 0x4>; /* Level high type */
|
interrupts = <0 36 0x4>; /* Level high type */
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
little-endian;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -297,6 +300,7 @@
|
||||||
reg = <0x0 0x2320000 0x0 0x10000>;
|
reg = <0x0 0x2320000 0x0 0x10000>;
|
||||||
interrupts = <0 37 0x4>; /* Level high type */
|
interrupts = <0 37 0x4>; /* Level high type */
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
little-endian;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
@ -307,6 +311,7 @@
|
||||||
reg = <0x0 0x2330000 0x0 0x10000>;
|
reg = <0x0 0x2330000 0x0 0x10000>;
|
||||||
interrupts = <0 37 0x4>; /* Level high type */
|
interrupts = <0 37 0x4>; /* Level high type */
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
little-endian;
|
||||||
#gpio-cells = <2>;
|
#gpio-cells = <2>;
|
||||||
interrupt-controller;
|
interrupt-controller;
|
||||||
#interrupt-cells = <2>;
|
#interrupt-cells = <2>;
|
||||||
|
|
|
@ -77,6 +77,7 @@
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
#include <linux/stringify.h>
|
#include <linux/stringify.h>
|
||||||
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Low-level accessors
|
* Low-level accessors
|
||||||
|
|
|
@ -100,13 +100,21 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* vcpu_reg should always be passed a register number coming from a
|
* vcpu_get_reg and vcpu_set_reg should always be passed a register number
|
||||||
* read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
|
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
|
||||||
* with banked registers.
|
* AArch32 with banked registers.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
|
static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
|
||||||
|
u8 reg_num)
|
||||||
{
|
{
|
||||||
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||||
|
unsigned long val)
|
||||||
|
{
|
||||||
|
if (reg_num != 31)
|
||||||
|
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get vcpu SPSR for current mode */
|
/* Get vcpu SPSR for current mode */
|
||||||
|
|
|
@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||||
* hardware updates of the pte (ptep_set_access_flags safely changes
|
* hardware updates of the pte (ptep_set_access_flags safely changes
|
||||||
* valid ptes without going through an invalid entry).
|
* valid ptes without going through an invalid entry).
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
||||||
pte_valid(*ptep)) {
|
pte_valid(*ptep) && pte_valid(pte)) {
|
||||||
BUG_ON(!pte_young(pte));
|
VM_WARN_ONCE(!pte_young(pte),
|
||||||
BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
|
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
|
||||||
|
__func__, pte_val(*ptep), pte_val(pte));
|
||||||
|
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
|
||||||
|
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
|
||||||
|
__func__, pte_val(*ptep), pte_val(pte));
|
||||||
}
|
}
|
||||||
|
|
||||||
set_pte(ptep, pte);
|
set_pte(ptep, pte);
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <asm-generic/vmlinux.lds.h>
|
#include <asm-generic/vmlinux.lds.h>
|
||||||
|
#include <asm/cache.h>
|
||||||
#include <asm/kernel-pgtable.h>
|
#include <asm/kernel-pgtable.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
|
@ -140,7 +141,7 @@ SECTIONS
|
||||||
ARM_EXIT_KEEP(EXIT_DATA)
|
ARM_EXIT_KEEP(EXIT_DATA)
|
||||||
}
|
}
|
||||||
|
|
||||||
PERCPU_SECTION(64)
|
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||||
|
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
__init_end = .;
|
__init_end = .;
|
||||||
|
@ -158,7 +159,7 @@ SECTIONS
|
||||||
. = ALIGN(PAGE_SIZE);
|
. = ALIGN(PAGE_SIZE);
|
||||||
_data = .;
|
_data = .;
|
||||||
_sdata = .;
|
_sdata = .;
|
||||||
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
|
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||||
PECOFF_EDATA_PADDING
|
PECOFF_EDATA_PADDING
|
||||||
_edata = .;
|
_edata = .;
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
|
trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
|
||||||
kvm_vcpu_hvc_get_imm(vcpu));
|
kvm_vcpu_hvc_get_imm(vcpu));
|
||||||
|
|
||||||
ret = kvm_psci_call(vcpu);
|
ret = kvm_psci_call(vcpu);
|
||||||
|
|
|
@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
|
||||||
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
|
* See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
|
||||||
*/
|
*/
|
||||||
static bool access_dcsw(struct kvm_vcpu *vcpu,
|
static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (!p->is_write)
|
if (!p->is_write)
|
||||||
|
@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||||
* sys_regs and leave it in complete control of the caches.
|
* sys_regs and leave it in complete control of the caches.
|
||||||
*/
|
*/
|
||||||
static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
|
||||||
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
bool was_enabled = vcpu_has_cache_enabled(vcpu);
|
||||||
|
|
||||||
BUG_ON(!p->is_write);
|
BUG_ON(!p->is_write);
|
||||||
|
|
||||||
val = *vcpu_reg(vcpu, p->Rt);
|
|
||||||
if (!p->is_aarch32) {
|
if (!p->is_aarch32) {
|
||||||
vcpu_sys_reg(vcpu, r->reg) = val;
|
vcpu_sys_reg(vcpu, r->reg) = p->regval;
|
||||||
} else {
|
} else {
|
||||||
if (!p->is_32bit)
|
if (!p->is_32bit)
|
||||||
vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
|
vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
|
||||||
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
|
vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
|
||||||
}
|
}
|
||||||
|
|
||||||
kvm_toggle_cache(vcpu, was_enabled);
|
kvm_toggle_cache(vcpu, was_enabled);
|
||||||
|
@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||||
* for both AArch64 and AArch32 accesses.
|
* for both AArch64 and AArch32 accesses.
|
||||||
*/
|
*/
|
||||||
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
u64 val;
|
|
||||||
|
|
||||||
if (!p->is_write)
|
if (!p->is_write)
|
||||||
return read_from_write_only(vcpu, p);
|
return read_from_write_only(vcpu, p);
|
||||||
|
|
||||||
val = *vcpu_reg(vcpu, p->Rt);
|
vgic_v3_dispatch_sgi(vcpu, p->regval);
|
||||||
vgic_v3_dispatch_sgi(vcpu, val);
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write)
|
if (p->is_write)
|
||||||
|
@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
|
static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
return ignore_write(vcpu, p);
|
return ignore_write(vcpu, p);
|
||||||
} else {
|
} else {
|
||||||
*vcpu_reg(vcpu, p->Rt) = (1 << 3);
|
p->regval = (1 << 3);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
|
static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
|
@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
|
||||||
} else {
|
} else {
|
||||||
u32 val;
|
u32 val;
|
||||||
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
|
asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
|
||||||
*vcpu_reg(vcpu, p->Rt) = val;
|
p->regval = val;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
|
||||||
* now use the debug registers.
|
* now use the debug registers.
|
||||||
*/
|
*/
|
||||||
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
|
vcpu_sys_reg(vcpu, r->reg) = p->regval;
|
||||||
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||||
} else {
|
} else {
|
||||||
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
|
p->regval = vcpu_sys_reg(vcpu, r->reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
|
trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
|
||||||
* hyp.S code switches between host and guest values in future.
|
* hyp.S code switches between host and guest values in future.
|
||||||
*/
|
*/
|
||||||
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
|
static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
u64 *dbg_reg)
|
u64 *dbg_reg)
|
||||||
{
|
{
|
||||||
u64 val = *vcpu_reg(vcpu, p->Rt);
|
u64 val = p->regval;
|
||||||
|
|
||||||
if (p->is_32bit) {
|
if (p->is_32bit) {
|
||||||
val &= 0xffffffffUL;
|
val &= 0xffffffffUL;
|
||||||
|
@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
|
static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
u64 *dbg_reg)
|
u64 *dbg_reg)
|
||||||
{
|
{
|
||||||
u64 val = *dbg_reg;
|
p->regval = *dbg_reg;
|
||||||
|
|
||||||
if (p->is_32bit)
|
if (p->is_32bit)
|
||||||
val &= 0xffffffffUL;
|
p->regval &= 0xffffffffUL;
|
||||||
|
|
||||||
*vcpu_reg(vcpu, p->Rt) = val;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
|
static inline bool trap_bvr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
||||||
|
@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
|
static inline bool trap_bcr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
|
||||||
|
@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
|
static inline bool trap_wvr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
|
||||||
|
@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
|
static inline bool trap_wcr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
|
||||||
|
@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
|
@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||||
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
|
u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
|
||||||
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
|
u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
|
||||||
|
|
||||||
*vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
|
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
|
||||||
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
|
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
|
||||||
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) |
|
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
|
||||||
(6 << 16) | (el3 << 14) | (el3 << 12));
|
| (6 << 16) | (el3 << 14) | (el3 << 12));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool trap_debug32(struct kvm_vcpu *vcpu,
|
static bool trap_debug32(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
|
vcpu_cp14(vcpu, r->reg) = p->regval;
|
||||||
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||||
} else {
|
} else {
|
||||||
*vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
|
p->regval = vcpu_cp14(vcpu, r->reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
|
static inline bool trap_xvr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *rd)
|
const struct sys_reg_desc *rd)
|
||||||
{
|
{
|
||||||
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
|
||||||
|
@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
|
||||||
u64 val = *dbg_reg;
|
u64 val = *dbg_reg;
|
||||||
|
|
||||||
val &= 0xffffffffUL;
|
val &= 0xffffffffUL;
|
||||||
val |= *vcpu_reg(vcpu, p->Rt) << 32;
|
val |= p->regval << 32;
|
||||||
*dbg_reg = val;
|
*dbg_reg = val;
|
||||||
|
|
||||||
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
|
||||||
} else {
|
} else {
|
||||||
*vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
|
p->regval = *dbg_reg >> 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
|
trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
|
||||||
|
@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
* Return 0 if the access has been handled, and -1 if not.
|
* Return 0 if the access has been handled, and -1 if not.
|
||||||
*/
|
*/
|
||||||
static int emulate_cp(struct kvm_vcpu *vcpu,
|
static int emulate_cp(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *params,
|
struct sys_reg_params *params,
|
||||||
const struct sys_reg_desc *table,
|
const struct sys_reg_desc *table,
|
||||||
size_t num)
|
size_t num)
|
||||||
{
|
{
|
||||||
|
@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||||
{
|
{
|
||||||
struct sys_reg_params params;
|
struct sys_reg_params params;
|
||||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||||
|
int Rt = (hsr >> 5) & 0xf;
|
||||||
int Rt2 = (hsr >> 10) & 0xf;
|
int Rt2 = (hsr >> 10) & 0xf;
|
||||||
|
|
||||||
params.is_aarch32 = true;
|
params.is_aarch32 = true;
|
||||||
params.is_32bit = false;
|
params.is_32bit = false;
|
||||||
params.CRm = (hsr >> 1) & 0xf;
|
params.CRm = (hsr >> 1) & 0xf;
|
||||||
params.Rt = (hsr >> 5) & 0xf;
|
|
||||||
params.is_write = ((hsr & 1) == 0);
|
params.is_write = ((hsr & 1) == 0);
|
||||||
|
|
||||||
params.Op0 = 0;
|
params.Op0 = 0;
|
||||||
|
@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||||
params.CRn = 0;
|
params.CRn = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Massive hack here. Store Rt2 in the top 32bits so we only
|
* Make a 64-bit value out of Rt and Rt2. As we use the same trap
|
||||||
* have one register to deal with. As we use the same trap
|
|
||||||
* backends between AArch32 and AArch64, we get away with it.
|
* backends between AArch32 and AArch64, we get away with it.
|
||||||
*/
|
*/
|
||||||
if (params.is_write) {
|
if (params.is_write) {
|
||||||
u64 val = *vcpu_reg(vcpu, params.Rt);
|
params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
|
||||||
val &= 0xffffffff;
|
params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
|
||||||
val |= *vcpu_reg(vcpu, Rt2) << 32;
|
|
||||||
*vcpu_reg(vcpu, params.Rt) = val;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
|
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
|
||||||
|
@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
|
||||||
unhandled_cp_access(vcpu, ¶ms);
|
unhandled_cp_access(vcpu, ¶ms);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* Do the opposite hack for the read side */
|
/* Split up the value between registers for the read side */
|
||||||
if (!params.is_write) {
|
if (!params.is_write) {
|
||||||
u64 val = *vcpu_reg(vcpu, params.Rt);
|
vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
|
||||||
val >>= 32;
|
vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
|
||||||
*vcpu_reg(vcpu, Rt2) = val;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
|
||||||
{
|
{
|
||||||
struct sys_reg_params params;
|
struct sys_reg_params params;
|
||||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||||
|
int Rt = (hsr >> 5) & 0xf;
|
||||||
|
|
||||||
params.is_aarch32 = true;
|
params.is_aarch32 = true;
|
||||||
params.is_32bit = true;
|
params.is_32bit = true;
|
||||||
params.CRm = (hsr >> 1) & 0xf;
|
params.CRm = (hsr >> 1) & 0xf;
|
||||||
params.Rt = (hsr >> 5) & 0xf;
|
params.regval = vcpu_get_reg(vcpu, Rt);
|
||||||
params.is_write = ((hsr & 1) == 0);
|
params.is_write = ((hsr & 1) == 0);
|
||||||
params.CRn = (hsr >> 10) & 0xf;
|
params.CRn = (hsr >> 10) & 0xf;
|
||||||
params.Op0 = 0;
|
params.Op0 = 0;
|
||||||
params.Op1 = (hsr >> 14) & 0x7;
|
params.Op1 = (hsr >> 14) & 0x7;
|
||||||
params.Op2 = (hsr >> 17) & 0x7;
|
params.Op2 = (hsr >> 17) & 0x7;
|
||||||
|
|
||||||
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific))
|
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
|
||||||
return 1;
|
!emulate_cp(vcpu, ¶ms, global, nr_global)) {
|
||||||
if (!emulate_cp(vcpu, ¶ms, global, nr_global))
|
if (!params.is_write)
|
||||||
|
vcpu_set_reg(vcpu, Rt, params.regval);
|
||||||
return 1;
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
unhandled_cp_access(vcpu, ¶ms);
|
unhandled_cp_access(vcpu, ¶ms);
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
|
static int emulate_sys_reg(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *params)
|
struct sys_reg_params *params)
|
||||||
{
|
{
|
||||||
size_t num;
|
size_t num;
|
||||||
const struct sys_reg_desc *table, *r;
|
const struct sys_reg_desc *table, *r;
|
||||||
|
@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
{
|
{
|
||||||
struct sys_reg_params params;
|
struct sys_reg_params params;
|
||||||
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
|
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
|
||||||
|
int Rt = (esr >> 5) & 0x1f;
|
||||||
|
int ret;
|
||||||
|
|
||||||
trace_kvm_handle_sys_reg(esr);
|
trace_kvm_handle_sys_reg(esr);
|
||||||
|
|
||||||
|
@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||||
params.CRn = (esr >> 10) & 0xf;
|
params.CRn = (esr >> 10) & 0xf;
|
||||||
params.CRm = (esr >> 1) & 0xf;
|
params.CRm = (esr >> 1) & 0xf;
|
||||||
params.Op2 = (esr >> 17) & 0x7;
|
params.Op2 = (esr >> 17) & 0x7;
|
||||||
params.Rt = (esr >> 5) & 0x1f;
|
params.regval = vcpu_get_reg(vcpu, Rt);
|
||||||
params.is_write = !(esr & 1);
|
params.is_write = !(esr & 1);
|
||||||
|
|
||||||
return emulate_sys_reg(vcpu, ¶ms);
|
ret = emulate_sys_reg(vcpu, ¶ms);
|
||||||
|
|
||||||
|
if (!params.is_write)
|
||||||
|
vcpu_set_reg(vcpu, Rt, params.regval);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
|
|
|
@ -28,7 +28,7 @@ struct sys_reg_params {
|
||||||
u8 CRn;
|
u8 CRn;
|
||||||
u8 CRm;
|
u8 CRm;
|
||||||
u8 Op2;
|
u8 Op2;
|
||||||
u8 Rt;
|
u64 regval;
|
||||||
bool is_write;
|
bool is_write;
|
||||||
bool is_aarch32;
|
bool is_aarch32;
|
||||||
bool is_32bit; /* Only valid if is_aarch32 is true */
|
bool is_32bit; /* Only valid if is_aarch32 is true */
|
||||||
|
@ -44,7 +44,7 @@ struct sys_reg_desc {
|
||||||
|
|
||||||
/* Trapped access from guest, if non-NULL. */
|
/* Trapped access from guest, if non-NULL. */
|
||||||
bool (*access)(struct kvm_vcpu *,
|
bool (*access)(struct kvm_vcpu *,
|
||||||
const struct sys_reg_params *,
|
struct sys_reg_params *,
|
||||||
const struct sys_reg_desc *);
|
const struct sys_reg_desc *);
|
||||||
|
|
||||||
/* Initialization for vcpu. */
|
/* Initialization for vcpu. */
|
||||||
|
@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool read_zero(struct kvm_vcpu *vcpu,
|
static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p)
|
struct sys_reg_params *p)
|
||||||
{
|
{
|
||||||
*vcpu_reg(vcpu, p->Rt) = 0;
|
p->regval = 0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,13 +31,13 @@
|
||||||
#include "sys_regs.h"
|
#include "sys_regs.h"
|
||||||
|
|
||||||
static bool access_actlr(struct kvm_vcpu *vcpu,
|
static bool access_actlr(struct kvm_vcpu *vcpu,
|
||||||
const struct sys_reg_params *p,
|
struct sys_reg_params *p,
|
||||||
const struct sys_reg_desc *r)
|
const struct sys_reg_desc *r)
|
||||||
{
|
{
|
||||||
if (p->is_write)
|
if (p->is_write)
|
||||||
return ignore_write(vcpu, p);
|
return ignore_write(vcpu, p);
|
||||||
|
|
||||||
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1);
|
p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
*
|
*
|
||||||
* ppc:
|
* ppc:
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#define NR_syscalls 322 /* length of syscall table */
|
#define NR_syscalls 323 /* length of syscall table */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following defines stop scripts/checksyscalls.sh from complaining about
|
* The following defines stop scripts/checksyscalls.sh from complaining about
|
||||||
|
|
|
@ -335,5 +335,6 @@
|
||||||
#define __NR_userfaultfd 1343
|
#define __NR_userfaultfd 1343
|
||||||
#define __NR_membarrier 1344
|
#define __NR_membarrier 1344
|
||||||
#define __NR_kcmp 1345
|
#define __NR_kcmp 1345
|
||||||
|
#define __NR_mlock2 1346
|
||||||
|
|
||||||
#endif /* _UAPI_ASM_IA64_UNISTD_H */
|
#endif /* _UAPI_ASM_IA64_UNISTD_H */
|
||||||
|
|
|
@ -1771,5 +1771,6 @@ sys_call_table:
|
||||||
data8 sys_userfaultfd
|
data8 sys_userfaultfd
|
||||||
data8 sys_membarrier
|
data8 sys_membarrier
|
||||||
data8 sys_kcmp // 1345
|
data8 sys_kcmp // 1345
|
||||||
|
data8 sys_mlock2
|
||||||
|
|
||||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||||
|
|
|
@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
/* FIXME this part of code is untested */
|
/* FIXME this part of code is untested */
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
sg->dma_address = sg_phys(sg);
|
sg->dma_address = sg_phys(sg);
|
||||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||||
|
sg->length, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
return nents;
|
return nents;
|
||||||
|
|
|
@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
|
||||||
gfp = massage_gfp_flags(dev, gfp);
|
gfp = massage_gfp_flags(dev, gfp);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
|
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
|
||||||
page = dma_alloc_from_contiguous(dev,
|
page = dma_alloc_from_contiguous(dev,
|
||||||
count, get_order(size));
|
count, get_order(size));
|
||||||
if (!page)
|
if (!page)
|
||||||
|
|
|
@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
||||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
|
#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
|
||||||
|
(parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
|
||||||
#else
|
#else
|
||||||
#define pte_huge(pte) (0)
|
#define pte_huge(pte) (0)
|
||||||
#define pte_mkhuge(pte) (pte)
|
#define pte_mkhuge(pte) (pte)
|
||||||
|
|
|
@ -360,8 +360,9 @@
|
||||||
#define __NR_execveat (__NR_Linux + 342)
|
#define __NR_execveat (__NR_Linux + 342)
|
||||||
#define __NR_membarrier (__NR_Linux + 343)
|
#define __NR_membarrier (__NR_Linux + 343)
|
||||||
#define __NR_userfaultfd (__NR_Linux + 344)
|
#define __NR_userfaultfd (__NR_Linux + 344)
|
||||||
|
#define __NR_mlock2 (__NR_Linux + 345)
|
||||||
|
|
||||||
#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
|
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
|
||||||
|
|
||||||
|
|
||||||
#define __IGNORE_select /* newselect */
|
#define __IGNORE_select /* newselect */
|
||||||
|
|
|
@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void __init pcibios_init_bus(struct pci_bus *bus)
|
|
||||||
{
|
|
||||||
struct pci_dev *dev = bus->self;
|
|
||||||
unsigned short bridge_ctl;
|
|
||||||
|
|
||||||
/* We deal only with pci controllers and pci-pci bridges. */
|
|
||||||
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* PCI-PCI bridge - set the cache line and default latency
|
|
||||||
(32) for primary and secondary buses. */
|
|
||||||
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
|
|
||||||
|
|
||||||
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
|
||||||
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
|
|
||||||
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* pcibios align resources() is called every time generic PCI code
|
* pcibios align resources() is called every time generic PCI code
|
||||||
* wants to generate a new address. The process of looking for
|
* wants to generate a new address. The process of looking for
|
||||||
|
|
|
@ -440,6 +440,7 @@
|
||||||
ENTRY_COMP(execveat)
|
ENTRY_COMP(execveat)
|
||||||
ENTRY_SAME(membarrier)
|
ENTRY_SAME(membarrier)
|
||||||
ENTRY_SAME(userfaultfd)
|
ENTRY_SAME(userfaultfd)
|
||||||
|
ENTRY_SAME(mlock2) /* 345 */
|
||||||
|
|
||||||
|
|
||||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||||
|
|
|
@ -227,23 +227,15 @@
|
||||||
reg = <0x520 0x20>;
|
reg = <0x520 0x20>;
|
||||||
|
|
||||||
phy0: ethernet-phy@1f {
|
phy0: ethernet-phy@1f {
|
||||||
interrupt-parent = <&mpic>;
|
|
||||||
interrupts = <10 1>;
|
|
||||||
reg = <0x1f>;
|
reg = <0x1f>;
|
||||||
};
|
};
|
||||||
phy1: ethernet-phy@0 {
|
phy1: ethernet-phy@0 {
|
||||||
interrupt-parent = <&mpic>;
|
|
||||||
interrupts = <10 1>;
|
|
||||||
reg = <0>;
|
reg = <0>;
|
||||||
};
|
};
|
||||||
phy2: ethernet-phy@1 {
|
phy2: ethernet-phy@1 {
|
||||||
interrupt-parent = <&mpic>;
|
|
||||||
interrupts = <10 1>;
|
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
};
|
};
|
||||||
phy3: ethernet-phy@2 {
|
phy3: ethernet-phy@2 {
|
||||||
interrupt-parent = <&mpic>;
|
|
||||||
interrupts = <10 1>;
|
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
};
|
};
|
||||||
tbi0: tbi-phy@11 {
|
tbi0: tbi-phy@11 {
|
||||||
|
|
|
@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||||
eeh_ops->configure_bridge(pe);
|
eeh_ops->configure_bridge(pe);
|
||||||
eeh_pe_restore_bars(pe);
|
eeh_pe_restore_bars(pe);
|
||||||
|
|
||||||
/*
|
/* Clear frozen state */
|
||||||
* If it's PHB PE, the frozen state on all available PEs should have
|
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||||
* been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
|
if (rc)
|
||||||
* child PEs because they might be in frozen state.
|
return rc;
|
||||||
*/
|
|
||||||
if (!(pe->type & EEH_PE_PHB)) {
|
|
||||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Give the system 5 seconds to finish running the user-space
|
/* Give the system 5 seconds to finish running the user-space
|
||||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||||
|
|
|
@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
|
||||||
static unsigned int *opal_irqs;
|
static unsigned int *opal_irqs;
|
||||||
|
|
||||||
static void opal_handle_irq_work(struct irq_work *work);
|
static void opal_handle_irq_work(struct irq_work *work);
|
||||||
static __be64 last_outstanding_events;
|
static u64 last_outstanding_events;
|
||||||
static struct irq_work opal_event_irq_work = {
|
static struct irq_work opal_event_irq_work = {
|
||||||
.func = opal_handle_irq_work,
|
.func = opal_handle_irq_work,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void opal_handle_events(uint64_t events)
|
||||||
|
{
|
||||||
|
int virq, hwirq = 0;
|
||||||
|
u64 mask = opal_event_irqchip.mask;
|
||||||
|
|
||||||
|
if (!in_irq() && (events & mask)) {
|
||||||
|
last_outstanding_events = events;
|
||||||
|
irq_work_queue(&opal_event_irq_work);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (events & mask) {
|
||||||
|
hwirq = fls64(events) - 1;
|
||||||
|
if (BIT_ULL(hwirq) & mask) {
|
||||||
|
virq = irq_find_mapping(opal_event_irqchip.domain,
|
||||||
|
hwirq);
|
||||||
|
if (virq)
|
||||||
|
generic_handle_irq(virq);
|
||||||
|
}
|
||||||
|
events &= ~BIT_ULL(hwirq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void opal_event_mask(struct irq_data *d)
|
static void opal_event_mask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
clear_bit(d->hwirq, &opal_event_irqchip.mask);
|
clear_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||||
|
@ -55,12 +78,12 @@ static void opal_event_mask(struct irq_data *d)
|
||||||
|
|
||||||
static void opal_event_unmask(struct irq_data *d)
|
static void opal_event_unmask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
|
__be64 events;
|
||||||
|
|
||||||
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||||
|
|
||||||
opal_poll_events(&last_outstanding_events);
|
opal_poll_events(&events);
|
||||||
if (last_outstanding_events & opal_event_irqchip.mask)
|
opal_handle_events(be64_to_cpu(events));
|
||||||
/* Need to retrigger the interrupt */
|
|
||||||
irq_work_queue(&opal_event_irq_work);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
|
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
|
||||||
|
@ -96,29 +119,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void opal_handle_events(uint64_t events)
|
|
||||||
{
|
|
||||||
int virq, hwirq = 0;
|
|
||||||
u64 mask = opal_event_irqchip.mask;
|
|
||||||
|
|
||||||
if (!in_irq() && (events & mask)) {
|
|
||||||
last_outstanding_events = events;
|
|
||||||
irq_work_queue(&opal_event_irq_work);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
while (events & mask) {
|
|
||||||
hwirq = fls64(events) - 1;
|
|
||||||
if (BIT_ULL(hwirq) & mask) {
|
|
||||||
virq = irq_find_mapping(opal_event_irqchip.domain,
|
|
||||||
hwirq);
|
|
||||||
if (virq)
|
|
||||||
generic_handle_irq(virq);
|
|
||||||
}
|
|
||||||
events &= ~BIT_ULL(hwirq);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static irqreturn_t opal_interrupt(int irq, void *data)
|
static irqreturn_t opal_interrupt(int irq, void *data)
|
||||||
{
|
{
|
||||||
__be64 events;
|
__be64 events;
|
||||||
|
@ -131,7 +131,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
|
||||||
|
|
||||||
static void opal_handle_irq_work(struct irq_work *work)
|
static void opal_handle_irq_work(struct irq_work *work)
|
||||||
{
|
{
|
||||||
opal_handle_events(be64_to_cpu(last_outstanding_events));
|
opal_handle_events(last_outstanding_events);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int opal_event_match(struct irq_domain *h, struct device_node *node,
|
static int opal_event_match(struct irq_domain *h, struct device_node *node,
|
||||||
|
|
|
@ -278,7 +278,7 @@
|
||||||
#define __NR_fsetxattr 256
|
#define __NR_fsetxattr 256
|
||||||
#define __NR_getxattr 257
|
#define __NR_getxattr 257
|
||||||
#define __NR_lgetxattr 258
|
#define __NR_lgetxattr 258
|
||||||
#define __NR_fgetxattr 269
|
#define __NR_fgetxattr 259
|
||||||
#define __NR_listxattr 260
|
#define __NR_listxattr 260
|
||||||
#define __NR_llistxattr 261
|
#define __NR_llistxattr 261
|
||||||
#define __NR_flistxattr 262
|
#define __NR_flistxattr 262
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
*
|
*
|
||||||
* ppc:
|
* ppc:
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/perf_event.h>
|
#include <linux/perf_event.h>
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
|
||||||
# The wrappers will select whether using "malloc" or the kernel allocator.
|
# The wrappers will select whether using "malloc" or the kernel allocator.
|
||||||
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
|
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
|
||||||
|
|
||||||
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
|
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
|
||||||
|
|
||||||
# Used by link-vmlinux.sh which has special support for um link
|
# Used by link-vmlinux.sh which has special support for um link
|
||||||
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
|
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
|
||||||
|
|
|
@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
|
||||||
|
|
||||||
char *split_if_spec(char *str, ...)
|
char *split_if_spec(char *str, ...)
|
||||||
{
|
{
|
||||||
char **arg, *end;
|
char **arg, *end, *ret = NULL;
|
||||||
va_list ap;
|
va_list ap;
|
||||||
|
|
||||||
va_start(ap, str);
|
va_start(ap, str);
|
||||||
while ((arg = va_arg(ap, char **)) != NULL) {
|
while ((arg = va_arg(ap, char **)) != NULL) {
|
||||||
if (*str == '\0')
|
if (*str == '\0')
|
||||||
return NULL;
|
goto out;
|
||||||
end = strchr(str, ',');
|
end = strchr(str, ',');
|
||||||
if (end != str)
|
if (end != str)
|
||||||
*arg = str;
|
*arg = str;
|
||||||
if (end == NULL)
|
if (end == NULL)
|
||||||
return NULL;
|
goto out;
|
||||||
*end++ = '\0';
|
*end++ = '\0';
|
||||||
str = end;
|
str = end;
|
||||||
}
|
}
|
||||||
|
ret = str;
|
||||||
|
out:
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
return str;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
|
||||||
struct ksignal ksig;
|
struct ksignal ksig;
|
||||||
int handled_sig = 0;
|
int handled_sig = 0;
|
||||||
|
|
||||||
while (get_signal(&ksig)) {
|
if (get_signal(&ksig)) {
|
||||||
handled_sig = 1;
|
handled_sig = 1;
|
||||||
/* Whee! Actually deliver the signal. */
|
/* Whee! Actually deliver the signal. */
|
||||||
handle_signal(&ksig, regs);
|
handle_signal(&ksig, regs);
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
#include <stdarg.h>
|
#include <stdarg.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/edd.h>
|
#include <linux/edd.h>
|
||||||
#include <asm/boot.h>
|
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include "bitops.h"
|
#include "bitops.h"
|
||||||
#include "ctype.h"
|
#include "ctype.h"
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "vesa.h"
|
#include "vesa.h"
|
||||||
|
|
||||||
|
#include <uapi/asm/boot.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Common variables
|
* Common variables
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -13,6 +13,8 @@
|
||||||
* Select video mode
|
* Select video mode
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <uapi/asm/boot.h>
|
||||||
|
|
||||||
#include "boot.h"
|
#include "boot.h"
|
||||||
#include "video.h"
|
#include "video.h"
|
||||||
#include "vesa.h"
|
#include "vesa.h"
|
||||||
|
|
|
@ -509,6 +509,17 @@ END(irq_entries_start)
|
||||||
* tracking that we're in kernel mode.
|
* tracking that we're in kernel mode.
|
||||||
*/
|
*/
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to tell lockdep that IRQs are off. We can't do this until
|
||||||
|
* we fix gsbase, and we should do it before enter_from_user_mode
|
||||||
|
* (which can take locks). Since TRACE_IRQS_OFF idempotent,
|
||||||
|
* the simplest way to handle it is to just call it twice if
|
||||||
|
* we enter from user mode. There's no reason to optimize this since
|
||||||
|
* TRACE_IRQS_OFF is a no-op if lockdep is off.
|
||||||
|
*/
|
||||||
|
TRACE_IRQS_OFF
|
||||||
|
|
||||||
#ifdef CONFIG_CONTEXT_TRACKING
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
call enter_from_user_mode
|
call enter_from_user_mode
|
||||||
#endif
|
#endif
|
||||||
|
@ -1049,12 +1060,18 @@ ENTRY(error_entry)
|
||||||
SWAPGS
|
SWAPGS
|
||||||
|
|
||||||
.Lerror_entry_from_usermode_after_swapgs:
|
.Lerror_entry_from_usermode_after_swapgs:
|
||||||
|
/*
|
||||||
|
* We need to tell lockdep that IRQs are off. We can't do this until
|
||||||
|
* we fix gsbase, and we should do it before enter_from_user_mode
|
||||||
|
* (which can take locks).
|
||||||
|
*/
|
||||||
|
TRACE_IRQS_OFF
|
||||||
#ifdef CONFIG_CONTEXT_TRACKING
|
#ifdef CONFIG_CONTEXT_TRACKING
|
||||||
call enter_from_user_mode
|
call enter_from_user_mode
|
||||||
#endif
|
#endif
|
||||||
|
ret
|
||||||
|
|
||||||
.Lerror_entry_done:
|
.Lerror_entry_done:
|
||||||
|
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
ret
|
ret
|
||||||
|
|
||||||
|
|
|
@ -9,20 +9,22 @@
|
||||||
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
|
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
|
||||||
#define PAGE_MASK (~(PAGE_SIZE-1))
|
#define PAGE_MASK (~(PAGE_SIZE-1))
|
||||||
|
|
||||||
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
|
|
||||||
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
|
|
||||||
|
|
||||||
/* Cast PAGE_MASK to a signed type so that it is sign-extended if
|
|
||||||
virtual addresses are 32-bits but physical addresses are larger
|
|
||||||
(ie, 32-bit PAE). */
|
|
||||||
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
|
|
||||||
|
|
||||||
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
|
#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
|
||||||
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
|
#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
|
||||||
|
|
||||||
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
|
||||||
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
|
||||||
|
|
||||||
|
#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
|
||||||
|
#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
|
||||||
|
|
||||||
|
/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
|
||||||
|
virtual addresses are 32-bits but physical addresses are larger
|
||||||
|
(ie, 32-bit PAE). */
|
||||||
|
#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
|
||||||
|
#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
|
||||||
|
#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
|
||||||
|
|
||||||
#define HPAGE_SHIFT PMD_SHIFT
|
#define HPAGE_SHIFT PMD_SHIFT
|
||||||
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
|
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
|
||||||
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
|
||||||
|
|
|
@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
|
||||||
static inline pudval_t pud_pfn_mask(pud_t pud)
|
static inline pudval_t pud_pfn_mask(pud_t pud)
|
||||||
{
|
{
|
||||||
if (native_pud_val(pud) & _PAGE_PSE)
|
if (native_pud_val(pud) & _PAGE_PSE)
|
||||||
return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK;
|
return PHYSICAL_PUD_PAGE_MASK;
|
||||||
else
|
else
|
||||||
return PTE_PFN_MASK;
|
return PTE_PFN_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pudval_t pud_flags_mask(pud_t pud)
|
static inline pudval_t pud_flags_mask(pud_t pud)
|
||||||
{
|
{
|
||||||
if (native_pud_val(pud) & _PAGE_PSE)
|
return ~pud_pfn_mask(pud);
|
||||||
return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
|
|
||||||
else
|
|
||||||
return ~PTE_PFN_MASK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pudval_t pud_flags(pud_t pud)
|
static inline pudval_t pud_flags(pud_t pud)
|
||||||
|
@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
|
||||||
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
|
static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
|
||||||
{
|
{
|
||||||
if (native_pmd_val(pmd) & _PAGE_PSE)
|
if (native_pmd_val(pmd) & _PAGE_PSE)
|
||||||
return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK;
|
return PHYSICAL_PMD_PAGE_MASK;
|
||||||
else
|
else
|
||||||
return PTE_PFN_MASK;
|
return PTE_PFN_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
|
static inline pmdval_t pmd_flags_mask(pmd_t pmd)
|
||||||
{
|
{
|
||||||
if (native_pmd_val(pmd) & _PAGE_PSE)
|
return ~pmd_pfn_mask(pmd);
|
||||||
return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
|
|
||||||
else
|
|
||||||
return ~PTE_PFN_MASK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline pmdval_t pmd_flags(pmd_t pmd)
|
static inline pmdval_t pmd_flags(pmd_t pmd)
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
#ifndef _ASM_X86_PLATFORM_H
|
#ifndef _ASM_X86_PLATFORM_H
|
||||||
#define _ASM_X86_PLATFORM_H
|
#define _ASM_X86_PLATFORM_H
|
||||||
|
|
||||||
#include <asm/pgtable_types.h>
|
|
||||||
#include <asm/bootparam.h>
|
#include <asm/bootparam.h>
|
||||||
|
|
||||||
struct mpc_bus;
|
struct mpc_bus;
|
||||||
|
|
|
@ -698,3 +698,4 @@ int __init microcode_init(void)
|
||||||
return error;
|
return error;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
late_initcall(microcode_init);
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||||
*
|
*
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||||
*
|
*
|
||||||
|
@ -387,7 +387,7 @@ struct cpu_hw_events {
|
||||||
/* Check flags and event code/umask, and set the HSW N/A flag */
|
/* Check flags and event code/umask, and set the HSW N/A flag */
|
||||||
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
|
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
|
||||||
__EVENT_CONSTRAINT(code, n, \
|
__EVENT_CONSTRAINT(code, n, \
|
||||||
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
|
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
|
||||||
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
|
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
|
||||||
|
|
||||||
|
|
||||||
|
@ -627,6 +627,7 @@ struct x86_perf_task_context {
|
||||||
u64 lbr_from[MAX_LBR_ENTRIES];
|
u64 lbr_from[MAX_LBR_ENTRIES];
|
||||||
u64 lbr_to[MAX_LBR_ENTRIES];
|
u64 lbr_to[MAX_LBR_ENTRIES];
|
||||||
u64 lbr_info[MAX_LBR_ENTRIES];
|
u64 lbr_info[MAX_LBR_ENTRIES];
|
||||||
|
int tos;
|
||||||
int lbr_callstack_users;
|
int lbr_callstack_users;
|
||||||
int lbr_stack_state;
|
int lbr_stack_state;
|
||||||
};
|
};
|
||||||
|
|
|
@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
||||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
|
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||||
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||||
|
|
|
@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
||||||
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
||||||
{
|
{
|
||||||
if (event->attach_state & PERF_ATTACH_TASK)
|
if (event->attach_state & PERF_ATTACH_TASK)
|
||||||
return perf_cgroup_from_task(event->hw.target);
|
return perf_cgroup_from_task(event->hw.target, event->ctx);
|
||||||
|
|
||||||
return event->cgrp;
|
return event->cgrp;
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
mask = x86_pmu.lbr_nr - 1;
|
mask = x86_pmu.lbr_nr - 1;
|
||||||
tos = intel_pmu_lbr_tos();
|
tos = task_ctx->tos;
|
||||||
for (i = 0; i < tos; i++) {
|
for (i = 0; i < tos; i++) {
|
||||||
lbr_idx = (tos - i) & mask;
|
lbr_idx = (tos - i) & mask;
|
||||||
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||||
|
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||||
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||||
}
|
}
|
||||||
|
wrmsrl(x86_pmu.lbr_tos, tos);
|
||||||
task_ctx->lbr_stack_state = LBR_NONE;
|
task_ctx->lbr_stack_state = LBR_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
||||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||||
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||||
}
|
}
|
||||||
|
task_ctx->tos = tos;
|
||||||
task_ctx->lbr_stack_state = LBR_VALID;
|
task_ctx->lbr_stack_state = LBR_VALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/*
|
/*
|
||||||
* x86 specific code for irq_work
|
* x86 specific code for irq_work
|
||||||
*
|
*
|
||||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
|
@ -4,10 +4,22 @@
|
||||||
*/
|
*/
|
||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/ioport.h>
|
||||||
|
|
||||||
|
static int found(u64 start, u64 end, void *data)
|
||||||
|
{
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
static __init int register_e820_pmem(void)
|
static __init int register_e820_pmem(void)
|
||||||
{
|
{
|
||||||
|
char *pmem = "Persistent Memory (legacy)";
|
||||||
struct platform_device *pdev;
|
struct platform_device *pdev;
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
|
||||||
|
if (rc <= 0)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See drivers/nvdimm/e820.c for the implementation, this is
|
* See drivers/nvdimm/e820.c for the implementation, this is
|
||||||
|
|
|
@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
|
||||||
if (efi_enabled(EFI_BOOT))
|
if (efi_enabled(EFI_BOOT))
|
||||||
efi_apply_memmap_quirks();
|
efi_apply_memmap_quirks();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
microcode_init();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
|
|
@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
|
||||||
signal_setup_done(failed, ksig, stepping);
|
signal_setup_done(failed, ksig, stepping);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
|
||||||
#define NR_restart_syscall __NR_restart_syscall
|
{
|
||||||
#else /* !CONFIG_X86_32 */
|
#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
|
||||||
#define NR_restart_syscall \
|
return __NR_restart_syscall;
|
||||||
test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall
|
#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
|
||||||
#endif /* CONFIG_X86_32 */
|
return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
|
||||||
|
__NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
|
||||||
|
#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
||||||
|
@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case -ERESTART_RESTARTBLOCK:
|
case -ERESTART_RESTARTBLOCK:
|
||||||
regs->ax = NR_restart_syscall;
|
regs->ax = get_nr_restart_syscall(regs);
|
||||||
regs->ip -= 2;
|
regs->ip -= 2;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
|
||||||
*/
|
*/
|
||||||
#define UDELAY_10MS_DEFAULT 10000
|
#define UDELAY_10MS_DEFAULT 10000
|
||||||
|
|
||||||
static unsigned int init_udelay = INT_MAX;
|
static unsigned int init_udelay = UINT_MAX;
|
||||||
|
|
||||||
static int __init cpu_init_udelay(char *str)
|
static int __init cpu_init_udelay(char *str)
|
||||||
{
|
{
|
||||||
|
@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
|
||||||
static void __init smp_quirk_init_udelay(void)
|
static void __init smp_quirk_init_udelay(void)
|
||||||
{
|
{
|
||||||
/* if cmdline changed it from default, leave it alone */
|
/* if cmdline changed it from default, leave it alone */
|
||||||
if (init_udelay != INT_MAX)
|
if (init_udelay != UINT_MAX)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* if modern processor, use no delay */
|
/* if modern processor, use no delay */
|
||||||
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
|
if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
|
||||||
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
|
((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
|
||||||
init_udelay = 0;
|
init_udelay = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
/* else, use legacy delay */
|
/* else, use legacy delay */
|
||||||
init_udelay = UDELAY_10MS_DEFAULT;
|
init_udelay = UDELAY_10MS_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
|
||||||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
||||||
{ 0/*VMALLOC_END*/, "vmalloc() End" },
|
{ 0/*VMALLOC_END*/, "vmalloc() End" },
|
||||||
# ifdef CONFIG_HIGHMEM
|
# ifdef CONFIG_HIGHMEM
|
||||||
{ 0/*PKMAP_BASE*/, "Persisent kmap() Area" },
|
{ 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
|
||||||
# endif
|
# endif
|
||||||
{ 0/*FIXADDR_START*/, "Fixmap Area" },
|
{ 0/*FIXADDR_START*/, "Fixmap Area" },
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case REG_TYPE_RM:
|
case REG_TYPE_RM:
|
||||||
regno = X86_MODRM_RM(insn->modrm.value);
|
regno = X86_MODRM_RM(insn->modrm.value);
|
||||||
if (X86_REX_B(insn->rex_prefix.value) == 1)
|
if (X86_REX_B(insn->rex_prefix.value))
|
||||||
regno += 8;
|
regno += 8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case REG_TYPE_INDEX:
|
case REG_TYPE_INDEX:
|
||||||
regno = X86_SIB_INDEX(insn->sib.value);
|
regno = X86_SIB_INDEX(insn->sib.value);
|
||||||
if (X86_REX_X(insn->rex_prefix.value) == 1)
|
if (X86_REX_X(insn->rex_prefix.value))
|
||||||
regno += 8;
|
regno += 8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case REG_TYPE_BASE:
|
case REG_TYPE_BASE:
|
||||||
regno = X86_SIB_BASE(insn->sib.value);
|
regno = X86_SIB_BASE(insn->sib.value);
|
||||||
if (X86_REX_B(insn->rex_prefix.value) == 1)
|
if (X86_REX_B(insn->rex_prefix.value))
|
||||||
regno += 8;
|
regno += 8;
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
|
|
@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
|
||||||
if (!found)
|
if (!found)
|
||||||
pci_add_resource(resources, &info->busn);
|
pci_add_resource(resources, &info->busn);
|
||||||
|
|
||||||
list_for_each_entry(root_res, &info->resources, list) {
|
list_for_each_entry(root_res, &info->resources, list)
|
||||||
struct resource *res;
|
pci_add_resource(resources, &root_res->res);
|
||||||
struct resource *root;
|
|
||||||
|
|
||||||
res = &root_res->res;
|
|
||||||
pci_add_resource(resources, res);
|
|
||||||
if (res->flags & IORESOURCE_IO)
|
|
||||||
root = &ioport_resource;
|
|
||||||
else
|
|
||||||
root = &iomem_resource;
|
|
||||||
insert_resource(root, res);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
default_resources:
|
default_resources:
|
||||||
|
|
|
@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
|
||||||
if (err)
|
if (err)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
err = convert_fxsr_from_user(&fpx, sc.fpstate);
|
err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
|
||||||
if (err)
|
if (err)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
|
||||||
{
|
{
|
||||||
struct user_i387_struct fp;
|
struct user_i387_struct fp;
|
||||||
|
|
||||||
err = copy_from_user(&fp, sc.fpstate,
|
err = copy_from_user(&fp, (void *)sc.fpstate,
|
||||||
sizeof(struct user_i387_struct));
|
sizeof(struct user_i387_struct));
|
||||||
if (err)
|
if (err)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
|
||||||
#endif
|
#endif
|
||||||
#undef PUTREG
|
#undef PUTREG
|
||||||
sc.oldmask = mask;
|
sc.oldmask = mask;
|
||||||
sc.fpstate = to_fp;
|
sc.fpstate = (unsigned long)to_fp;
|
||||||
|
|
||||||
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
|
err = copy_to_user(to, &sc, sizeof(struct sigcontext));
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -468,12 +468,10 @@ long sys_sigreturn(void)
|
||||||
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
|
struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
|
||||||
sigset_t set;
|
sigset_t set;
|
||||||
struct sigcontext __user *sc = &frame->sc;
|
struct sigcontext __user *sc = &frame->sc;
|
||||||
unsigned long __user *oldmask = &sc->oldmask;
|
|
||||||
unsigned long __user *extramask = frame->extramask;
|
|
||||||
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
|
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
|
||||||
|
|
||||||
if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) ||
|
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
|
||||||
copy_from_user(&set.sig[1], extramask, sig_size))
|
copy_from_user(&set.sig[1], frame->extramask, sig_size))
|
||||||
goto segfault;
|
goto segfault;
|
||||||
|
|
||||||
set_current_blocked(&set);
|
set_current_blocked(&set);
|
||||||
|
@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
|
||||||
{
|
{
|
||||||
struct rt_sigframe __user *frame;
|
struct rt_sigframe __user *frame;
|
||||||
int err = 0, sig = ksig->sig;
|
int err = 0, sig = ksig->sig;
|
||||||
|
unsigned long fp_to;
|
||||||
|
|
||||||
frame = (struct rt_sigframe __user *)
|
frame = (struct rt_sigframe __user *)
|
||||||
round_down(stack_top - sizeof(struct rt_sigframe), 16);
|
round_down(stack_top - sizeof(struct rt_sigframe), 16);
|
||||||
|
@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
|
||||||
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
|
err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
|
||||||
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
|
err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
|
||||||
set->sig[0]);
|
set->sig[0]);
|
||||||
err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate);
|
|
||||||
|
fp_to = (unsigned long)&frame->fpstate;
|
||||||
|
|
||||||
|
err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
|
||||||
if (sizeof(*set) == 16) {
|
if (sizeof(*set) == 16) {
|
||||||
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
|
||||||
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
|
||||||
|
|
|
@ -1127,15 +1127,15 @@ void blkcg_exit_queue(struct request_queue *q)
|
||||||
* of the main cic data structures. For now we allow a task to change
|
* of the main cic data structures. For now we allow a task to change
|
||||||
* its cgroup only if it's the only owner of its ioc.
|
* its cgroup only if it's the only owner of its ioc.
|
||||||
*/
|
*/
|
||||||
static int blkcg_can_attach(struct cgroup_subsys_state *css,
|
static int blkcg_can_attach(struct cgroup_taskset *tset)
|
||||||
struct cgroup_taskset *tset)
|
|
||||||
{
|
{
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
|
struct cgroup_subsys_state *dst_css;
|
||||||
struct io_context *ioc;
|
struct io_context *ioc;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* task_lock() is needed to avoid races with exit_io_context() */
|
/* task_lock() is needed to avoid races with exit_io_context() */
|
||||||
cgroup_taskset_for_each(task, tset) {
|
cgroup_taskset_for_each(task, dst_css, tset) {
|
||||||
task_lock(task);
|
task_lock(task);
|
||||||
ioc = task->io_context;
|
ioc = task->io_context;
|
||||||
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
|
||||||
|
|
|
@ -3405,6 +3405,9 @@ int blk_pre_runtime_suspend(struct request_queue *q)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!q->dev)
|
||||||
|
return ret;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (q->nr_pending) {
|
if (q->nr_pending) {
|
||||||
ret = -EBUSY;
|
ret = -EBUSY;
|
||||||
|
@ -3432,6 +3435,9 @@ EXPORT_SYMBOL(blk_pre_runtime_suspend);
|
||||||
*/
|
*/
|
||||||
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
void blk_post_runtime_suspend(struct request_queue *q, int err)
|
||||||
{
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
q->rpm_status = RPM_SUSPENDED;
|
q->rpm_status = RPM_SUSPENDED;
|
||||||
|
@ -3456,6 +3462,9 @@ EXPORT_SYMBOL(blk_post_runtime_suspend);
|
||||||
*/
|
*/
|
||||||
void blk_pre_runtime_resume(struct request_queue *q)
|
void blk_pre_runtime_resume(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
q->rpm_status = RPM_RESUMING;
|
q->rpm_status = RPM_RESUMING;
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
@ -3478,6 +3487,9 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
|
||||||
*/
|
*/
|
||||||
void blk_post_runtime_resume(struct request_queue *q, int err)
|
void blk_post_runtime_resume(struct request_queue *q, int err)
|
||||||
{
|
{
|
||||||
|
if (!q->dev)
|
||||||
|
return;
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
if (!err) {
|
if (!err) {
|
||||||
q->rpm_status = RPM_ACTIVE;
|
q->rpm_status = RPM_ACTIVE;
|
||||||
|
|
|
@ -91,7 +91,8 @@ void blk_set_default_limits(struct queue_limits *lim)
|
||||||
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
|
||||||
lim->virt_boundary_mask = 0;
|
lim->virt_boundary_mask = 0;
|
||||||
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
|
||||||
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
|
lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
|
||||||
|
BLK_SAFE_MAX_SECTORS;
|
||||||
lim->chunk_sectors = 0;
|
lim->chunk_sectors = 0;
|
||||||
lim->max_write_same_sectors = 0;
|
lim->max_write_same_sectors = 0;
|
||||||
lim->max_discard_sectors = 0;
|
lim->max_discard_sectors = 0;
|
||||||
|
@ -127,6 +128,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
|
||||||
lim->max_hw_sectors = UINT_MAX;
|
lim->max_hw_sectors = UINT_MAX;
|
||||||
lim->max_segment_size = UINT_MAX;
|
lim->max_segment_size = UINT_MAX;
|
||||||
lim->max_sectors = UINT_MAX;
|
lim->max_sectors = UINT_MAX;
|
||||||
|
lim->max_dev_sectors = UINT_MAX;
|
||||||
lim->max_write_same_sectors = UINT_MAX;
|
lim->max_write_same_sectors = UINT_MAX;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_set_stacking_limits);
|
EXPORT_SYMBOL(blk_set_stacking_limits);
|
||||||
|
@ -214,8 +216,8 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
|
||||||
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
|
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
||||||
* @limits: the queue limits
|
* @q: the request queue for the device
|
||||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
||||||
*
|
*
|
||||||
* Description:
|
* Description:
|
||||||
|
@ -224,13 +226,19 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
|
||||||
* the device driver based upon the capabilities of the I/O
|
* the device driver based upon the capabilities of the I/O
|
||||||
* controller.
|
* controller.
|
||||||
*
|
*
|
||||||
|
* max_dev_sectors is a hard limit imposed by the storage device for
|
||||||
|
* READ/WRITE requests. It is set by the disk driver.
|
||||||
|
*
|
||||||
* max_sectors is a soft limit imposed by the block layer for
|
* max_sectors is a soft limit imposed by the block layer for
|
||||||
* filesystem type requests. This value can be overridden on a
|
* filesystem type requests. This value can be overridden on a
|
||||||
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
* per-device basis in /sys/block/<device>/queue/max_sectors_kb.
|
||||||
* The soft limit can not exceed max_hw_sectors.
|
* The soft limit can not exceed max_hw_sectors.
|
||||||
**/
|
**/
|
||||||
void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
|
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
||||||
{
|
{
|
||||||
|
struct queue_limits *limits = &q->limits;
|
||||||
|
unsigned int max_sectors;
|
||||||
|
|
||||||
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
|
||||||
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
|
||||||
printk(KERN_INFO "%s: set to minimum %d\n",
|
printk(KERN_INFO "%s: set to minimum %d\n",
|
||||||
|
@ -238,22 +246,9 @@ void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_
|
||||||
}
|
}
|
||||||
|
|
||||||
limits->max_hw_sectors = max_hw_sectors;
|
limits->max_hw_sectors = max_hw_sectors;
|
||||||
limits->max_sectors = min_t(unsigned int, max_hw_sectors,
|
max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
|
||||||
BLK_DEF_MAX_SECTORS);
|
max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
|
||||||
}
|
limits->max_sectors = max_sectors;
|
||||||
EXPORT_SYMBOL(blk_limits_max_hw_sectors);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
|
|
||||||
* @q: the request queue for the device
|
|
||||||
* @max_hw_sectors: max hardware sectors in the usual 512b unit
|
|
||||||
*
|
|
||||||
* Description:
|
|
||||||
* See description for blk_limits_max_hw_sectors().
|
|
||||||
**/
|
|
||||||
void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
|
|
||||||
{
|
|
||||||
blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||||
|
|
||||||
|
@ -527,6 +522,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
|
||||||
|
|
||||||
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
|
||||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||||
|
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
|
||||||
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
t->max_write_same_sectors = min(t->max_write_same_sectors,
|
||||||
b->max_write_same_sectors);
|
b->max_write_same_sectors);
|
||||||
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
|
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
|
||||||
|
|
|
@ -205,6 +205,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long)
|
||||||
|
q->limits.max_dev_sectors >> 1);
|
||||||
|
|
||||||
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|
|
@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
|
||||||
if (WARN_ON_ONCE(in_irq()))
|
if (WARN_ON_ONCE(in_irq()))
|
||||||
return -EDEADLK;
|
return -EDEADLK;
|
||||||
|
|
||||||
|
walk->iv = req->info;
|
||||||
walk->nbytes = walk->total;
|
walk->nbytes = walk->total;
|
||||||
if (unlikely(!walk->total))
|
if (unlikely(!walk->total))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
walk->iv_buffer = NULL;
|
walk->iv_buffer = NULL;
|
||||||
walk->iv = req->info;
|
|
||||||
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
||||||
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
||||||
|
|
||||||
|
|
|
@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
||||||
if (WARN_ON_ONCE(in_irq()))
|
if (WARN_ON_ONCE(in_irq()))
|
||||||
return -EDEADLK;
|
return -EDEADLK;
|
||||||
|
|
||||||
|
walk->iv = desc->info;
|
||||||
walk->nbytes = walk->total;
|
walk->nbytes = walk->total;
|
||||||
if (unlikely(!walk->total))
|
if (unlikely(!walk->total))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
walk->buffer = NULL;
|
walk->buffer = NULL;
|
||||||
walk->iv = desc->info;
|
|
||||||
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
||||||
int err = blkcipher_copy_iv(walk);
|
int err = blkcipher_copy_iv(walk);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -58,10 +58,10 @@ config ACPI_CCA_REQUIRED
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config ACPI_DEBUGGER
|
config ACPI_DEBUGGER
|
||||||
bool "In-kernel debugger (EXPERIMENTAL)"
|
bool "AML debugger interface (EXPERIMENTAL)"
|
||||||
select ACPI_DEBUG
|
select ACPI_DEBUG
|
||||||
help
|
help
|
||||||
Enable in-kernel debugging facilities: statistics, internal
|
Enable in-kernel debugging of AML facilities: statistics, internal
|
||||||
object dump, single step control method execution.
|
object dump, single step control method execution.
|
||||||
This is still under development, currently enabling this only
|
This is still under development, currently enabling this only
|
||||||
results in the compilation of the ACPICA debugger files.
|
results in the compilation of the ACPICA debugger files.
|
||||||
|
|
|
@ -233,11 +233,12 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_system_address *spa)
|
struct acpi_nfit_system_address *spa)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*spa), spa->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_spa *nfit_spa;
|
struct nfit_spa *nfit_spa;
|
||||||
|
|
||||||
list_for_each_entry(nfit_spa, &prev->spas, list) {
|
list_for_each_entry(nfit_spa, &prev->spas, list) {
|
||||||
if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
|
if (memcmp(nfit_spa->spa, spa, length) == 0) {
|
||||||
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -259,11 +260,12 @@ static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_memory_map *memdev)
|
struct acpi_nfit_memory_map *memdev)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*memdev), memdev->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_memdev *nfit_memdev;
|
struct nfit_memdev *nfit_memdev;
|
||||||
|
|
||||||
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
|
list_for_each_entry(nfit_memdev, &prev->memdevs, list)
|
||||||
if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
|
if (memcmp(nfit_memdev->memdev, memdev, length) == 0) {
|
||||||
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -284,11 +286,12 @@ static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_control_region *dcr)
|
struct acpi_nfit_control_region *dcr)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*dcr), dcr->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_dcr *nfit_dcr;
|
struct nfit_dcr *nfit_dcr;
|
||||||
|
|
||||||
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
|
list_for_each_entry(nfit_dcr, &prev->dcrs, list)
|
||||||
if (memcmp(nfit_dcr->dcr, dcr, sizeof(*dcr)) == 0) {
|
if (memcmp(nfit_dcr->dcr, dcr, length) == 0) {
|
||||||
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -308,11 +311,12 @@ static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_data_region *bdw)
|
struct acpi_nfit_data_region *bdw)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*bdw), bdw->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_bdw *nfit_bdw;
|
struct nfit_bdw *nfit_bdw;
|
||||||
|
|
||||||
list_for_each_entry(nfit_bdw, &prev->bdws, list)
|
list_for_each_entry(nfit_bdw, &prev->bdws, list)
|
||||||
if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
|
if (memcmp(nfit_bdw->bdw, bdw, length) == 0) {
|
||||||
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -332,11 +336,12 @@ static bool add_idt(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_interleave *idt)
|
struct acpi_nfit_interleave *idt)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*idt), idt->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_idt *nfit_idt;
|
struct nfit_idt *nfit_idt;
|
||||||
|
|
||||||
list_for_each_entry(nfit_idt, &prev->idts, list)
|
list_for_each_entry(nfit_idt, &prev->idts, list)
|
||||||
if (memcmp(nfit_idt->idt, idt, sizeof(*idt)) == 0) {
|
if (memcmp(nfit_idt->idt, idt, length) == 0) {
|
||||||
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
|
list_move_tail(&nfit_idt->list, &acpi_desc->idts);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -356,11 +361,12 @@ static bool add_flush(struct acpi_nfit_desc *acpi_desc,
|
||||||
struct nfit_table_prev *prev,
|
struct nfit_table_prev *prev,
|
||||||
struct acpi_nfit_flush_address *flush)
|
struct acpi_nfit_flush_address *flush)
|
||||||
{
|
{
|
||||||
|
size_t length = min_t(size_t, sizeof(*flush), flush->header.length);
|
||||||
struct device *dev = acpi_desc->dev;
|
struct device *dev = acpi_desc->dev;
|
||||||
struct nfit_flush *nfit_flush;
|
struct nfit_flush *nfit_flush;
|
||||||
|
|
||||||
list_for_each_entry(nfit_flush, &prev->flushes, list)
|
list_for_each_entry(nfit_flush, &prev->flushes, list)
|
||||||
if (memcmp(nfit_flush->flush, flush, sizeof(*flush)) == 0) {
|
if (memcmp(nfit_flush->flush, flush, length) == 0) {
|
||||||
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
|
list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -655,7 +661,7 @@ static ssize_t revision_show(struct device *dev,
|
||||||
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
|
||||||
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
|
||||||
|
|
||||||
return sprintf(buf, "%d\n", acpi_desc->nfit->header.revision);
|
return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
|
||||||
}
|
}
|
||||||
static DEVICE_ATTR_RO(revision);
|
static DEVICE_ATTR_RO(revision);
|
||||||
|
|
||||||
|
@ -1652,7 +1658,6 @@ int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, acpi_size sz)
|
||||||
|
|
||||||
data = (u8 *) acpi_desc->nfit;
|
data = (u8 *) acpi_desc->nfit;
|
||||||
end = data + sz;
|
end = data + sz;
|
||||||
data += sizeof(struct acpi_table_nfit);
|
|
||||||
while (!IS_ERR_OR_NULL(data))
|
while (!IS_ERR_OR_NULL(data))
|
||||||
data = add_table(acpi_desc, &prev, data, end);
|
data = add_table(acpi_desc, &prev, data, end);
|
||||||
|
|
||||||
|
@ -1748,13 +1753,29 @@ static int acpi_nfit_add(struct acpi_device *adev)
|
||||||
return PTR_ERR(acpi_desc);
|
return PTR_ERR(acpi_desc);
|
||||||
}
|
}
|
||||||
|
|
||||||
acpi_desc->nfit = (struct acpi_table_nfit *) tbl;
|
/*
|
||||||
|
* Save the acpi header for later and then skip it,
|
||||||
|
* making nfit point to the first nfit table header.
|
||||||
|
*/
|
||||||
|
acpi_desc->acpi_header = *tbl;
|
||||||
|
acpi_desc->nfit = (void *) tbl + sizeof(struct acpi_table_nfit);
|
||||||
|
sz -= sizeof(struct acpi_table_nfit);
|
||||||
|
|
||||||
/* Evaluate _FIT and override with that if present */
|
/* Evaluate _FIT and override with that if present */
|
||||||
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
|
status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
|
||||||
if (ACPI_SUCCESS(status) && buf.length > 0) {
|
if (ACPI_SUCCESS(status) && buf.length > 0) {
|
||||||
acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
|
union acpi_object *obj;
|
||||||
sz = buf.length;
|
/*
|
||||||
|
* Adjust for the acpi_object header of the _FIT
|
||||||
|
*/
|
||||||
|
obj = buf.pointer;
|
||||||
|
if (obj->type == ACPI_TYPE_BUFFER) {
|
||||||
|
acpi_desc->nfit =
|
||||||
|
(struct acpi_nfit_header *)obj->buffer.pointer;
|
||||||
|
sz = obj->buffer.length;
|
||||||
|
} else
|
||||||
|
dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
|
||||||
|
__func__, (int) obj->type);
|
||||||
}
|
}
|
||||||
|
|
||||||
rc = acpi_nfit_init(acpi_desc, sz);
|
rc = acpi_nfit_init(acpi_desc, sz);
|
||||||
|
@ -1777,7 +1798,8 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
||||||
{
|
{
|
||||||
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
|
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev);
|
||||||
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||||
struct acpi_table_nfit *nfit_saved;
|
struct acpi_nfit_header *nfit_saved;
|
||||||
|
union acpi_object *obj;
|
||||||
struct device *dev = &adev->dev;
|
struct device *dev = &adev->dev;
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1788,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
||||||
if (!dev->driver) {
|
if (!dev->driver) {
|
||||||
/* dev->driver may be null if we're being removed */
|
/* dev->driver may be null if we're being removed */
|
||||||
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
||||||
return;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!acpi_desc) {
|
if (!acpi_desc) {
|
||||||
|
@ -1808,12 +1830,19 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
||||||
}
|
}
|
||||||
|
|
||||||
nfit_saved = acpi_desc->nfit;
|
nfit_saved = acpi_desc->nfit;
|
||||||
acpi_desc->nfit = (struct acpi_table_nfit *)buf.pointer;
|
obj = buf.pointer;
|
||||||
ret = acpi_nfit_init(acpi_desc, buf.length);
|
if (obj->type == ACPI_TYPE_BUFFER) {
|
||||||
if (!ret) {
|
acpi_desc->nfit =
|
||||||
/* Merge failed, restore old nfit, and exit */
|
(struct acpi_nfit_header *)obj->buffer.pointer;
|
||||||
acpi_desc->nfit = nfit_saved;
|
ret = acpi_nfit_init(acpi_desc, obj->buffer.length);
|
||||||
dev_err(dev, "failed to merge updated NFIT\n");
|
if (ret) {
|
||||||
|
/* Merge failed, restore old nfit, and exit */
|
||||||
|
acpi_desc->nfit = nfit_saved;
|
||||||
|
dev_err(dev, "failed to merge updated NFIT\n");
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* Bad _FIT, restore old nfit */
|
||||||
|
dev_err(dev, "Invalid _FIT\n");
|
||||||
}
|
}
|
||||||
kfree(buf.pointer);
|
kfree(buf.pointer);
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,8 @@ struct nfit_mem {
|
||||||
|
|
||||||
struct acpi_nfit_desc {
|
struct acpi_nfit_desc {
|
||||||
struct nvdimm_bus_descriptor nd_desc;
|
struct nvdimm_bus_descriptor nd_desc;
|
||||||
struct acpi_table_nfit *nfit;
|
struct acpi_table_header acpi_header;
|
||||||
|
struct acpi_nfit_header *nfit;
|
||||||
struct mutex spa_map_mutex;
|
struct mutex spa_map_mutex;
|
||||||
struct mutex init_mutex;
|
struct mutex init_mutex;
|
||||||
struct list_head spa_maps;
|
struct list_head spa_maps;
|
||||||
|
|
|
@ -768,6 +768,13 @@ static void pci_acpi_root_add_resources(struct acpi_pci_root_info *info)
|
||||||
else
|
else
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some legacy x86 host bridge drivers use iomem_resource and
|
||||||
|
* ioport_resource as default resource pool, skip it.
|
||||||
|
*/
|
||||||
|
if (res == root)
|
||||||
|
continue;
|
||||||
|
|
||||||
conflict = insert_resource_conflict(root, res);
|
conflict = insert_resource_conflict(root, res);
|
||||||
if (conflict) {
|
if (conflict) {
|
||||||
dev_info(&info->bridge->dev,
|
dev_info(&info->bridge->dev,
|
||||||
|
|
|
@ -314,16 +314,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||||
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
|
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
|
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
|
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
|
|
||||||
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
|
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
|
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
|
{ PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
|
||||||
|
@ -350,10 +340,22 @@ static const struct pci_device_id ahci_pci_tbl[] = {
|
||||||
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
|
{ PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
|
||||||
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
|
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
|
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa102), board_ahci }, /* Sunrise Point-H AHCI */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
|
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
|
||||||
|
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
|
||||||
|
|
||||||
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
|
||||||
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||||
|
|
|
@ -62,6 +62,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
|
||||||
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
|
writel(0x80, hpriv->mmio + AHCI_VENDOR_SPECIFIC_0_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_SLEEP
|
||||||
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
|
static int ahci_mvebu_suspend(struct platform_device *pdev, pm_message_t state)
|
||||||
{
|
{
|
||||||
return ahci_platform_suspend_host(&pdev->dev);
|
return ahci_platform_suspend_host(&pdev->dev);
|
||||||
|
@ -81,6 +82,10 @@ static int ahci_mvebu_resume(struct platform_device *pdev)
|
||||||
|
|
||||||
return ahci_platform_resume_host(&pdev->dev);
|
return ahci_platform_resume_host(&pdev->dev);
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
#define ahci_mvebu_suspend NULL
|
||||||
|
#define ahci_mvebu_resume NULL
|
||||||
|
#endif
|
||||||
|
|
||||||
static const struct ata_port_info ahci_mvebu_port_info = {
|
static const struct ata_port_info ahci_mvebu_port_info = {
|
||||||
.flags = AHCI_FLAG_COMMON,
|
.flags = AHCI_FLAG_COMMON,
|
||||||
|
|
|
@ -1273,6 +1273,15 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
|
||||||
ata_tf_to_fis(tf, pmp, is_cmd, fis);
|
ata_tf_to_fis(tf, pmp, is_cmd, fis);
|
||||||
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
|
ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
|
||||||
|
|
||||||
|
/* set port value for softreset of Port Multiplier */
|
||||||
|
if (pp->fbs_enabled && pp->fbs_last_dev != pmp) {
|
||||||
|
tmp = readl(port_mmio + PORT_FBS);
|
||||||
|
tmp &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
|
||||||
|
tmp |= pmp << PORT_FBS_DEV_OFFSET;
|
||||||
|
writel(tmp, port_mmio + PORT_FBS);
|
||||||
|
pp->fbs_last_dev = pmp;
|
||||||
|
}
|
||||||
|
|
||||||
/* issue & wait */
|
/* issue & wait */
|
||||||
writel(1, port_mmio + PORT_CMD_ISSUE);
|
writel(1, port_mmio + PORT_CMD_ISSUE);
|
||||||
|
|
||||||
|
|
|
@ -1505,12 +1505,20 @@ static const char *ata_err_string(unsigned int err_mask)
|
||||||
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
|
unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
|
||||||
u8 page, void *buf, unsigned int sectors)
|
u8 page, void *buf, unsigned int sectors)
|
||||||
{
|
{
|
||||||
|
unsigned long ap_flags = dev->link->ap->flags;
|
||||||
struct ata_taskfile tf;
|
struct ata_taskfile tf;
|
||||||
unsigned int err_mask;
|
unsigned int err_mask;
|
||||||
bool dma = false;
|
bool dma = false;
|
||||||
|
|
||||||
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
|
DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Return error without actually issuing the command on controllers
|
||||||
|
* which e.g. lockup on a read log page.
|
||||||
|
*/
|
||||||
|
if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
|
||||||
|
return AC_ERR_DEV;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
ata_tf_init(dev, &tf);
|
ata_tf_init(dev, &tf);
|
||||||
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
|
if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
|
||||||
|
|
|
@ -45,7 +45,8 @@ enum {
|
||||||
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
|
SATA_FSL_MAX_PRD_DIRECT = 16, /* Direct PRDT entries */
|
||||||
|
|
||||||
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
|
SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
|
||||||
ATA_FLAG_PMP | ATA_FLAG_NCQ | ATA_FLAG_AN),
|
ATA_FLAG_PMP | ATA_FLAG_NCQ |
|
||||||
|
ATA_FLAG_AN | ATA_FLAG_NO_LOG_PAGE),
|
||||||
|
|
||||||
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
|
SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH,
|
||||||
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
|
SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue