mirror of
https://github.com/Fishwaldo/build.git
synced 2025-03-27 17:21:34 +00:00
909 lines
34 KiB
Diff
909 lines
34 KiB
Diff
diff --git a/Makefile b/Makefile
|
|
index c156161..0ec4a35 100644
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -1,6 +1,6 @@
|
|
VERSION = 3
|
|
PATCHLEVEL = 4
|
|
-SUBLEVEL = 44
|
|
+SUBLEVEL = 45
|
|
EXTRAVERSION =
|
|
NAME = Saber-toothed Squirrel
|
|
|
|
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
|
|
index d81f994..762f7a6 100644
|
|
--- a/arch/powerpc/include/asm/ppc-opcode.h
|
|
+++ b/arch/powerpc/include/asm/ppc-opcode.h
|
|
@@ -45,6 +45,10 @@
|
|
#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
|
|
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
|
|
#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
|
|
+#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
|
|
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
|
|
+#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
|
|
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
|
|
#define PPC_INST_SLBFEE 0x7c0007a7
|
|
|
|
#define PPC_INST_STRING 0x7c00042a
|
|
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
|
|
index ae0843f..3bb7197 100644
|
|
--- a/arch/powerpc/kernel/traps.c
|
|
+++ b/arch/powerpc/kernel/traps.c
|
|
@@ -960,7 +960,10 @@ static int emulate_instruction(struct pt_regs *regs)
|
|
|
|
#ifdef CONFIG_PPC64
|
|
/* Emulate the mfspr rD, DSCR. */
|
|
- if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
|
|
+ if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
|
|
+ PPC_INST_MFSPR_DSCR_USER) ||
|
|
+ ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
|
|
+ PPC_INST_MFSPR_DSCR)) &&
|
|
cpu_has_feature(CPU_FTR_DSCR)) {
|
|
PPC_WARN_EMULATED(mfdscr, regs);
|
|
rd = (instword >> 21) & 0x1f;
|
|
@@ -968,7 +971,10 @@ static int emulate_instruction(struct pt_regs *regs)
|
|
return 0;
|
|
}
|
|
/* Emulate the mtspr DSCR, rD. */
|
|
- if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
|
|
+ if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
|
|
+ PPC_INST_MTSPR_DSCR_USER) ||
|
|
+ ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
|
|
+ PPC_INST_MTSPR_DSCR)) &&
|
|
cpu_has_feature(CPU_FTR_DSCR)) {
|
|
PPC_WARN_EMULATED(mtdscr, regs);
|
|
rd = (instword >> 21) & 0x1f;
|
|
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
|
|
index 6e8f677..6130719 100644
|
|
--- a/arch/powerpc/mm/numa.c
|
|
+++ b/arch/powerpc/mm/numa.c
|
|
@@ -201,7 +201,7 @@ int __node_distance(int a, int b)
|
|
int distance = LOCAL_DISTANCE;
|
|
|
|
if (!form1_affinity)
|
|
- return distance;
|
|
+ return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
|
|
|
|
for (i = 0; i < distance_ref_points_depth; i++) {
|
|
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
|
|
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
|
|
index 520b426..fd1a099 100644
|
|
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
|
|
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
|
|
@@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
|
|
* - in case there is no HW filter
|
|
* - in case the HW filter has errata or limitations
|
|
*/
|
|
-static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
|
|
+static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
|
|
{
|
|
u64 br_type = event->attr.branch_sample_type;
|
|
int mask = 0;
|
|
@@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
|
|
if (br_type & PERF_SAMPLE_BRANCH_USER)
|
|
mask |= X86_BR_USER;
|
|
|
|
- if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
|
|
+ if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
|
|
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
|
|
+ return -EACCES;
|
|
mask |= X86_BR_KERNEL;
|
|
+ }
|
|
|
|
/* we ignore BRANCH_HV here */
|
|
|
|
@@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
|
|
* be used by fixup code for some CPU
|
|
*/
|
|
event->hw.branch_reg.reg = mask;
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
/*
|
|
@@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
|
|
/*
|
|
* setup SW LBR filter
|
|
*/
|
|
- intel_pmu_setup_sw_lbr_filter(event);
|
|
+ ret = intel_pmu_setup_sw_lbr_filter(event);
|
|
+ if (ret)
|
|
+ return ret;
|
|
|
|
/*
|
|
* setup HW LBR filter, if any
|
|
@@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
|
|
return X86_BR_NONE;
|
|
|
|
addr = buf;
|
|
- } else
|
|
- addr = (void *)from;
|
|
+ } else {
|
|
+ /*
|
|
+ * The LBR logs any address in the IP, even if the IP just
|
|
+ * faulted. This means userspace can control the from address.
|
|
+ * Ensure we don't blindy read any address by validating it is
|
|
+ * a known text address.
|
|
+ */
|
|
+ if (kernel_text_address(from))
|
|
+ addr = (void *)from;
|
|
+ else
|
|
+ return X86_BR_NONE;
|
|
+ }
|
|
|
|
/*
|
|
* decoder needs to know the ABI especially
|
|
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
|
|
index fae7090..71d37f5 100644
|
|
--- a/arch/x86/mm/init.c
|
|
+++ b/arch/x86/mm/init.c
|
|
@@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
|
|
int i;
|
|
unsigned long puds = 0, pmds = 0, ptes = 0, tables;
|
|
unsigned long start = 0, good_end;
|
|
+ unsigned long pgd_extra = 0;
|
|
phys_addr_t base;
|
|
|
|
for (i = 0; i < nr_range; i++) {
|
|
unsigned long range, extra;
|
|
|
|
+ if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
|
|
+ pgd_extra++;
|
|
+
|
|
range = mr[i].end - mr[i].start;
|
|
puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
|
|
|
|
@@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
|
|
tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
|
|
tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
|
|
tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
|
|
+ tables += (pgd_extra * PAGE_SIZE);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
/* for fixmap */
|
|
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
|
|
index 53ddbc7..0bf5bd1 100644
|
|
--- a/drivers/cpufreq/longhaul.c
|
|
+++ b/drivers/cpufreq/longhaul.c
|
|
@@ -77,7 +77,7 @@ static unsigned int longhaul_index;
|
|
static int scale_voltage;
|
|
static int disable_acpi_c3;
|
|
static int revid_errata;
|
|
-
|
|
+static int enable;
|
|
|
|
/* Clock ratios multiplied by 10 */
|
|
static int mults[32];
|
|
@@ -965,6 +965,10 @@ static int __init longhaul_init(void)
|
|
if (!x86_match_cpu(longhaul_id))
|
|
return -ENODEV;
|
|
|
|
+ if (!enable) {
|
|
+ printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
#ifdef CONFIG_SMP
|
|
if (num_online_cpus() > 1) {
|
|
printk(KERN_ERR PFX "More than 1 CPU detected, "
|
|
@@ -1021,6 +1025,10 @@ MODULE_PARM_DESC(scale_voltage, "Scale voltage of processor");
|
|
* such. */
|
|
module_param(revid_errata, int, 0644);
|
|
MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
|
|
+/* By default driver is disabled to prevent incompatible
|
|
+ * system freeze. */
|
|
+module_param(enable, int, 0644);
|
|
+MODULE_PARM_DESC(enable, "Enable driver");
|
|
|
|
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
|
|
MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
|
|
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
|
|
index 020a7d7..69bea56 100644
|
|
--- a/drivers/gpu/drm/i915/intel_dvo.c
|
|
+++ b/drivers/gpu/drm/i915/intel_dvo.c
|
|
@@ -370,6 +370,7 @@ void intel_dvo_init(struct drm_device *dev)
|
|
const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
|
|
struct i2c_adapter *i2c;
|
|
int gpio;
|
|
+ bool dvoinit;
|
|
|
|
/* Allow the I2C driver info to specify the GPIO to be used in
|
|
* special cases, but otherwise default to what's defined
|
|
@@ -389,7 +390,17 @@ void intel_dvo_init(struct drm_device *dev)
|
|
i2c = &dev_priv->gmbus[gpio].adapter;
|
|
|
|
intel_dvo->dev = *dvo;
|
|
- if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
|
|
+
|
|
+ /* GMBUS NAK handling seems to be unstable, hence let the
|
|
+ * transmitter detection run in bit banging mode for now.
|
|
+ */
|
|
+ intel_gmbus_force_bit(i2c, true);
|
|
+
|
|
+ dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
|
|
+
|
|
+ intel_gmbus_force_bit(i2c, false);
|
|
+
|
|
+ if (!dvoinit)
|
|
continue;
|
|
|
|
intel_encoder->type = INTEL_OUTPUT_DVO;
|
|
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
|
|
index a8b28c4..1ad5906 100644
|
|
--- a/drivers/gpu/drm/i915/intel_lvds.c
|
|
+++ b/drivers/gpu/drm/i915/intel_lvds.c
|
|
@@ -793,6 +793,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
|
DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
|
|
},
|
|
},
|
|
+ {
|
|
+ .callback = intel_no_lvds_dmi_callback,
|
|
+ .ident = "Fujitsu Esprimo Q900",
|
|
+ .matches = {
|
|
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
|
+ DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
|
|
+ },
|
|
+ },
|
|
|
|
{ } /* terminating entry */
|
|
};
|
|
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
|
|
index 5ce9bf5..43672b6 100644
|
|
--- a/drivers/gpu/drm/radeon/atom.c
|
|
+++ b/drivers/gpu/drm/radeon/atom.c
|
|
@@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
|
|
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
|
|
|
|
DRM_DEBUG("atom firmware requested %08x %dkb\n",
|
|
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
|
|
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
|
|
+ le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
|
|
+ le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
|
|
|
|
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
|
|
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
|
|
}
|
|
ctx->scratch_size_bytes = 0;
|
|
if (usage_bytes == 0)
|
|
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
|
|
index c62132c..e458acb 100644
|
|
--- a/drivers/gpu/drm/radeon/evergreen.c
|
|
+++ b/drivers/gpu/drm/radeon/evergreen.c
|
|
@@ -445,6 +445,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
|
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
|
|
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
|
|
+
|
|
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
|
|
+ connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
|
|
+ /* don't try to enable hpd on eDP or LVDS avoid breaking the
|
|
+ * aux dp channel on imac and help (but not completely fix)
|
|
+ * https://bugzilla.redhat.com/show_bug.cgi?id=726143
|
|
+ * also avoid interrupt storms during dpms.
|
|
+ */
|
|
+ continue;
|
|
+ }
|
|
switch (radeon_connector->hpd.hpd) {
|
|
case RADEON_HPD_1:
|
|
WREG32(DC_HPD1_CONTROL, tmp);
|
|
@@ -1146,17 +1156,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
|
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
|
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
|
radeon_wait_for_vblank(rdev, i);
|
|
- tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
|
+ tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
|
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
|
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
|
}
|
|
} else {
|
|
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
|
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
|
|
radeon_wait_for_vblank(rdev, i);
|
|
- tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
|
+ tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
|
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
|
}
|
|
@@ -1168,6 +1177,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
|
break;
|
|
udelay(1);
|
|
}
|
|
+
|
|
+ /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
|
|
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
|
+ tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
|
+ tmp &= ~EVERGREEN_CRTC_MASTER_EN;
|
|
+ WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
|
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
|
+ save->crtc_enabled[i] = false;
|
|
+ /* ***** */
|
|
} else {
|
|
save->crtc_enabled[i] = false;
|
|
}
|
|
@@ -1185,6 +1203,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
|
}
|
|
/* wait for the MC to settle */
|
|
udelay(100);
|
|
+
|
|
+ /* lock double buffered regs */
|
|
+ for (i = 0; i < rdev->num_crtc; i++) {
|
|
+ if (save->crtc_enabled[i]) {
|
|
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
|
+ if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
|
|
+ tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
|
|
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
|
|
+ }
|
|
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
|
|
+ if (!(tmp & 1)) {
|
|
+ tmp |= 1;
|
|
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
|
@@ -1206,6 +1240,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
|
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
|
|
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
|
|
|
|
+ /* unlock regs and wait for update */
|
|
+ for (i = 0; i < rdev->num_crtc; i++) {
|
|
+ if (save->crtc_enabled[i]) {
|
|
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
|
|
+ if ((tmp & 0x3) != 0) {
|
|
+ tmp &= ~0x3;
|
|
+ WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
|
|
+ }
|
|
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
|
+ if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
|
|
+ tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
|
|
+ WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
|
|
+ }
|
|
+ tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
|
|
+ if (tmp & 1) {
|
|
+ tmp &= ~1;
|
|
+ WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
|
|
+ }
|
|
+ for (j = 0; j < rdev->usec_timeout; j++) {
|
|
+ tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
|
|
+ if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
|
|
+ break;
|
|
+ udelay(1);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
/* unblackout the MC */
|
|
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
|
|
tmp &= ~BLACKOUT_MODE_MASK;
|
|
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
|
|
index 34a0e85..e534e5d 100644
|
|
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
|
|
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
|
|
@@ -225,6 +225,8 @@
|
|
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
|
|
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
|
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
|
|
+#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
|
|
+#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
|
|
|
|
#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
|
|
#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
|
|
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
|
|
index 7dffc57..d706da8 100644
|
|
--- a/drivers/gpu/drm/radeon/ni.c
|
|
+++ b/drivers/gpu/drm/radeon/ni.c
|
|
@@ -668,7 +668,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|
(rdev->pdev->device == 0x990F) ||
|
|
(rdev->pdev->device == 0x9910) ||
|
|
(rdev->pdev->device == 0x9917) ||
|
|
- (rdev->pdev->device == 0x9999)) {
|
|
+ (rdev->pdev->device == 0x9999) ||
|
|
+ (rdev->pdev->device == 0x999C)) {
|
|
rdev->config.cayman.max_simds_per_se = 6;
|
|
rdev->config.cayman.max_backends_per_se = 2;
|
|
} else if ((rdev->pdev->device == 0x9903) ||
|
|
@@ -677,7 +678,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|
(rdev->pdev->device == 0x990D) ||
|
|
(rdev->pdev->device == 0x990E) ||
|
|
(rdev->pdev->device == 0x9913) ||
|
|
- (rdev->pdev->device == 0x9918)) {
|
|
+ (rdev->pdev->device == 0x9918) ||
|
|
+ (rdev->pdev->device == 0x999D)) {
|
|
rdev->config.cayman.max_simds_per_se = 4;
|
|
rdev->config.cayman.max_backends_per_se = 2;
|
|
} else if ((rdev->pdev->device == 0x9919) ||
|
|
@@ -911,6 +913,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
|
|
WREG32(GB_BACKEND_MAP, gb_backend_map);
|
|
WREG32(GB_ADDR_CONFIG, gb_addr_config);
|
|
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
|
|
+ if (ASIC_IS_DCE6(rdev))
|
|
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
|
|
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
|
|
|
/* primary versions */
|
|
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
|
|
index 2aa7046..d90b8b7 100644
|
|
--- a/drivers/gpu/drm/radeon/nid.h
|
|
+++ b/drivers/gpu/drm/radeon/nid.h
|
|
@@ -42,6 +42,10 @@
|
|
#define CAYMAN_MAX_TCC_MASK 0xFF
|
|
|
|
#define DMIF_ADDR_CONFIG 0xBD4
|
|
+
|
|
+/* DCE6 only */
|
|
+#define DMIF_ADDR_CALC 0xC00
|
|
+
|
|
#define SRBM_GFX_CNTL 0x0E44
|
|
#define RINGID(x) (((x) & 0x3) << 0)
|
|
#define VMID(x) (((x) & 0x7) << 0)
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
index 5e30e12..38d87e1 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
|
|
@@ -1998,6 +1998,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
|
|
num_modes = power_info->info.ucNumOfPowerModeEntries;
|
|
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
|
|
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
|
|
+ if (num_modes == 0)
|
|
+ return state_index;
|
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
|
|
if (!rdev->pm.power_state)
|
|
return state_index;
|
|
@@ -2396,6 +2398,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
|
|
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
|
|
|
|
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
|
|
+ if (power_info->pplib.ucNumStates == 0)
|
|
+ return state_index;
|
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
|
power_info->pplib.ucNumStates, GFP_KERNEL);
|
|
if (!rdev->pm.power_state)
|
|
@@ -2478,6 +2482,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
|
|
u16 data_offset;
|
|
u8 frev, crev;
|
|
+ u8 *power_state_offset;
|
|
|
|
if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
|
|
&frev, &crev, &data_offset))
|
|
@@ -2494,15 +2499,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|
non_clock_info_array = (struct _NonClockInfoArray *)
|
|
(mode_info->atom_context->bios + data_offset +
|
|
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
|
|
+ if (state_array->ucNumEntries == 0)
|
|
+ return state_index;
|
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
|
|
state_array->ucNumEntries, GFP_KERNEL);
|
|
if (!rdev->pm.power_state)
|
|
return state_index;
|
|
+ power_state_offset = (u8 *)state_array->states;
|
|
for (i = 0; i < state_array->ucNumEntries; i++) {
|
|
mode_index = 0;
|
|
- power_state = (union pplib_power_state *)&state_array->states[i];
|
|
- /* XXX this might be an inagua bug... */
|
|
- non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
|
|
+ power_state = (union pplib_power_state *)power_state_offset;
|
|
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
|
|
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
|
|
&non_clock_info_array->nonClockInfo[non_clock_array_index];
|
|
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
|
|
@@ -2514,9 +2521,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|
if (power_state->v2.ucNumDPMLevels) {
|
|
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
|
|
clock_array_index = power_state->v2.clockInfoIndex[j];
|
|
- /* XXX this might be an inagua bug... */
|
|
- if (clock_array_index >= clock_info_array->ucNumEntries)
|
|
- continue;
|
|
clock_info = (union pplib_clock_info *)
|
|
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
|
|
valid = radeon_atombios_parse_pplib_clock_info(rdev,
|
|
@@ -2538,6 +2542,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
|
|
non_clock_info);
|
|
state_index++;
|
|
}
|
|
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
|
|
}
|
|
/* if multiple clock modes, mark the lowest as no display */
|
|
for (i = 0; i < state_index; i++) {
|
|
@@ -2584,7 +2589,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
|
|
default:
|
|
break;
|
|
}
|
|
- } else {
|
|
+ }
|
|
+
|
|
+ if (state_index == 0) {
|
|
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
|
|
if (rdev->pm.power_state) {
|
|
rdev->pm.power_state[0].clock_info =
|
|
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
|
|
index b8459bd..bf6ca2d 100644
|
|
--- a/drivers/gpu/drm/radeon/radeon_pm.c
|
|
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
|
|
@@ -872,7 +872,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
|
|
struct radeon_device *rdev = dev->dev_private;
|
|
|
|
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
|
|
- seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
|
|
+ /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
|
|
+ if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
|
|
+ seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
|
|
+ else
|
|
+ seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
|
|
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
|
|
if (rdev->asic->pm.get_memory_clock)
|
|
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
|
|
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
|
|
index 1197f21..5508ad7 100644
|
|
--- a/drivers/gpu/drm/radeon/si.c
|
|
+++ b/drivers/gpu/drm/radeon/si.c
|
|
@@ -1799,6 +1799,7 @@ static void si_gpu_init(struct radeon_device *rdev)
|
|
rdev->config.si.backend_map = gb_backend_map;
|
|
WREG32(GB_ADDR_CONFIG, gb_addr_config);
|
|
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
|
|
+ WREG32(DMIF_ADDR_CALC, gb_addr_config);
|
|
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
|
|
|
/* primary versions */
|
|
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
|
|
index 2c2bc63..45e240d 100644
|
|
--- a/drivers/gpu/drm/radeon/sid.h
|
|
+++ b/drivers/gpu/drm/radeon/sid.h
|
|
@@ -55,6 +55,8 @@
|
|
|
|
#define DMIF_ADDR_CONFIG 0xBD4
|
|
|
|
+#define DMIF_ADDR_CALC 0xC00
|
|
+
|
|
#define SRBM_STATUS 0xE50
|
|
|
|
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
|
|
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
|
|
index b68d28a..33a1760 100644
|
|
--- a/drivers/net/ethernet/ibm/ibmveth.c
|
|
+++ b/drivers/net/ethernet/ibm/ibmveth.c
|
|
@@ -1327,7 +1327,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
|
|
static int __devinit ibmveth_probe(struct vio_dev *dev,
|
|
const struct vio_device_id *id)
|
|
{
|
|
- int rc, i;
|
|
+ int rc, i, mac_len;
|
|
struct net_device *netdev;
|
|
struct ibmveth_adapter *adapter;
|
|
unsigned char *mac_addr_p;
|
|
@@ -1337,11 +1337,19 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
|
|
dev->unit_address);
|
|
|
|
mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
|
|
- NULL);
|
|
+ &mac_len);
|
|
if (!mac_addr_p) {
|
|
dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
|
|
return -EINVAL;
|
|
}
|
|
+ /* Workaround for old/broken pHyp */
|
|
+ if (mac_len == 8)
|
|
+ mac_addr_p += 2;
|
|
+ else if (mac_len != 6) {
|
|
+ dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
|
|
+ mac_len);
|
|
+ return -EINVAL;
|
|
+ }
|
|
|
|
mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
|
|
VETH_MCAST_FILTER_SIZE, NULL);
|
|
@@ -1366,17 +1374,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
|
|
|
|
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
|
|
|
|
- /*
|
|
- * Some older boxes running PHYP non-natively have an OF that returns
|
|
- * a 8-byte local-mac-address field (and the first 2 bytes have to be
|
|
- * ignored) while newer boxes' OF return a 6-byte field. Note that
|
|
- * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
|
|
- * The RPA doc specifies that the first byte must be 10b, so we'll
|
|
- * just look for it to solve this 8 vs. 6 byte field issue
|
|
- */
|
|
- if ((*mac_addr_p & 0x3) != 0x02)
|
|
- mac_addr_p += 2;
|
|
-
|
|
adapter->mac_addr = 0;
|
|
memcpy(&adapter->mac_addr, mac_addr_p, 6);
|
|
|
|
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
|
|
index 978af21..dd037dd 100644
|
|
--- a/drivers/net/ethernet/realtek/r8169.c
|
|
+++ b/drivers/net/ethernet/realtek/r8169.c
|
|
@@ -5168,6 +5168,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|
goto err_stop_0;
|
|
}
|
|
|
|
+ /* 8168evl does not automatically pad to minimum length. */
|
|
+ if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
|
|
+ skb->len < ETH_ZLEN)) {
|
|
+ if (skb_padto(skb, ETH_ZLEN))
|
|
+ goto err_update_stats;
|
|
+ skb_put(skb, ETH_ZLEN - skb->len);
|
|
+ }
|
|
+
|
|
if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
|
|
goto err_stop_0;
|
|
|
|
@@ -5239,6 +5247,7 @@ err_dma_1:
|
|
rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
|
|
err_dma_0:
|
|
dev_kfree_skb(skb);
|
|
+err_update_stats:
|
|
dev->stats.tx_dropped++;
|
|
return NETDEV_TX_OK;
|
|
|
|
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
|
|
index a3c9374..843b3e8 100644
|
|
--- a/drivers/usb/host/xhci-ring.c
|
|
+++ b/drivers/usb/host/xhci-ring.c
|
|
@@ -2459,14 +2459,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
|
|
* TD list.
|
|
*/
|
|
if (list_empty(&ep_ring->td_list)) {
|
|
- xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
|
|
- "with no TDs queued?\n",
|
|
- TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
|
|
- ep_index);
|
|
- xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
|
- (le32_to_cpu(event->flags) &
|
|
- TRB_TYPE_BITMASK)>>10);
|
|
- xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
|
|
+ /*
|
|
+ * A stopped endpoint may generate an extra completion
|
|
+ * event if the device was suspended. Don't print
|
|
+ * warnings.
|
|
+ */
|
|
+ if (!(trb_comp_code == COMP_STOP ||
|
|
+ trb_comp_code == COMP_STOP_INVAL)) {
|
|
+ xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
|
|
+ TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
|
|
+ ep_index);
|
|
+ xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
|
|
+ (le32_to_cpu(event->flags) &
|
|
+ TRB_TYPE_BITMASK)>>10);
|
|
+ xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
|
|
+ }
|
|
if (ep->skip) {
|
|
ep->skip = false;
|
|
xhci_dbg(xhci, "td_list is empty while skip "
|
|
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
|
|
index 1feb68e..b1cdb0a 100644
|
|
--- a/fs/autofs4/expire.c
|
|
+++ b/fs/autofs4/expire.c
|
|
@@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
|
|
/* This is an autofs submount, we can't expire it */
|
|
if (autofs_type_indirect(sbi->type))
|
|
goto done;
|
|
-
|
|
- /*
|
|
- * Otherwise it's an offset mount and we need to check
|
|
- * if we can umount its mount, if there is one.
|
|
- */
|
|
- if (!d_mountpoint(path.dentry)) {
|
|
- status = 0;
|
|
- goto done;
|
|
- }
|
|
}
|
|
|
|
/* Update the expiry counter if fs is busy */
|
|
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
|
|
index 9202b22..86a8c88 100644
|
|
--- a/fs/ext4/resize.c
|
|
+++ b/fs/ext4/resize.c
|
|
@@ -1630,6 +1630,10 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
|
|
return 0;
|
|
|
|
ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
|
|
+ if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
|
|
+ ext4_warning(sb, "resize would cause inodes_count overflow");
|
|
+ return -EINVAL;
|
|
+ }
|
|
ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
|
|
|
|
n_desc_blocks = (n_group + EXT4_DESC_PER_BLOCK(sb)) /
|
|
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
|
|
index d9928c1..1a13caa 100644
|
|
--- a/include/drm/drm_pciids.h
|
|
+++ b/include/drm/drm_pciids.h
|
|
@@ -231,6 +231,7 @@
|
|
{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
@@ -238,11 +239,13 @@
|
|
{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
|
+ {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
|
|
@@ -594,6 +597,8 @@
|
|
{0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
+ {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
+ {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
{0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
|
|
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
|
|
index 31fdc48..0caf1f8 100644
|
|
--- a/kernel/audit_tree.c
|
|
+++ b/kernel/audit_tree.c
|
|
@@ -608,9 +608,9 @@ void audit_trim_trees(void)
|
|
}
|
|
spin_unlock(&hash_lock);
|
|
trim_marked(tree);
|
|
- put_tree(tree);
|
|
drop_collected_mounts(root_mnt);
|
|
skip_it:
|
|
+ put_tree(tree);
|
|
mutex_lock(&audit_filter_mutex);
|
|
}
|
|
list_del(&cursor);
|
|
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
|
|
index b29ebd3..75c11bf 100644
|
|
--- a/kernel/trace/trace.c
|
|
+++ b/kernel/trace/trace.c
|
|
@@ -4855,36 +4855,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
|
|
iter->cpu_file = TRACE_PIPE_ALL_CPU;
|
|
}
|
|
|
|
-static void
|
|
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
+void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
{
|
|
- static arch_spinlock_t ftrace_dump_lock =
|
|
- (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
|
/* use static because iter can be a bit big for the stack */
|
|
static struct trace_iterator iter;
|
|
+ static atomic_t dump_running;
|
|
unsigned int old_userobj;
|
|
- static int dump_ran;
|
|
unsigned long flags;
|
|
int cnt = 0, cpu;
|
|
|
|
- /* only one dump */
|
|
- local_irq_save(flags);
|
|
- arch_spin_lock(&ftrace_dump_lock);
|
|
- if (dump_ran)
|
|
- goto out;
|
|
-
|
|
- dump_ran = 1;
|
|
+ /* Only allow one dump user at a time. */
|
|
+ if (atomic_inc_return(&dump_running) != 1) {
|
|
+ atomic_dec(&dump_running);
|
|
+ return;
|
|
+ }
|
|
|
|
+ /*
|
|
+ * Always turn off tracing when we dump.
|
|
+ * We don't need to show trace output of what happens
|
|
+ * between multiple crashes.
|
|
+ *
|
|
+ * If the user does a sysrq-z, then they can re-enable
|
|
+ * tracing with echo 1 > tracing_on.
|
|
+ */
|
|
tracing_off();
|
|
|
|
- /* Did function tracer already get disabled? */
|
|
- if (ftrace_is_dead()) {
|
|
- printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
|
- printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
|
- }
|
|
-
|
|
- if (disable_tracing)
|
|
- ftrace_kill();
|
|
+ local_irq_save(flags);
|
|
|
|
trace_init_global_iter(&iter);
|
|
|
|
@@ -4917,6 +4913,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
|
|
printk(KERN_TRACE "Dumping ftrace buffer:\n");
|
|
|
|
+ /* Did function tracer already get disabled? */
|
|
+ if (ftrace_is_dead()) {
|
|
+ printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
|
|
+ printk("# MAY BE MISSING FUNCTION EVENTS\n");
|
|
+ }
|
|
+
|
|
/*
|
|
* We need to stop all tracing on all CPUS to read the
|
|
* the next buffer. This is a bit expensive, but is
|
|
@@ -4956,26 +4958,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
|
|
printk(KERN_TRACE "---------------------------------\n");
|
|
|
|
out_enable:
|
|
- /* Re-enable tracing if requested */
|
|
- if (!disable_tracing) {
|
|
- trace_flags |= old_userobj;
|
|
+ trace_flags |= old_userobj;
|
|
|
|
- for_each_tracing_cpu(cpu) {
|
|
- atomic_dec(&iter.tr->data[cpu]->disabled);
|
|
- }
|
|
- tracing_on();
|
|
+ for_each_tracing_cpu(cpu) {
|
|
+ atomic_dec(&iter.tr->data[cpu]->disabled);
|
|
}
|
|
-
|
|
- out:
|
|
- arch_spin_unlock(&ftrace_dump_lock);
|
|
+ atomic_dec(&dump_running);
|
|
local_irq_restore(flags);
|
|
}
|
|
-
|
|
-/* By default: disable tracing after the dump */
|
|
-void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
|
|
-{
|
|
- __ftrace_dump(true, oops_dump_mode);
|
|
-}
|
|
EXPORT_SYMBOL_GPL(ftrace_dump);
|
|
|
|
__init static int tracer_alloc_buffers(void)
|
|
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
|
|
index 288541f..09fd98a 100644
|
|
--- a/kernel/trace/trace_selftest.c
|
|
+++ b/kernel/trace/trace_selftest.c
|
|
@@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
|
/* Maximum number of functions to trace before diagnosing a hang */
|
|
#define GRAPH_MAX_FUNC_TEST 100000000
|
|
|
|
-static void
|
|
-__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
|
|
static unsigned int graph_hang_thresh;
|
|
|
|
/* Wrap the real function entry probe to avoid possible hanging */
|
|
@@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
|
|
if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
|
|
ftrace_graph_stop();
|
|
printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
|
|
- if (ftrace_dump_on_oops)
|
|
- __ftrace_dump(false, DUMP_ALL);
|
|
+ if (ftrace_dump_on_oops) {
|
|
+ ftrace_dump(DUMP_ALL);
|
|
+ /* ftrace_dump() disables tracing */
|
|
+ tracing_on();
|
|
+ }
|
|
return 0;
|
|
}
|
|
|
|
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
|
|
index 1aa5cac..55add93 100644
|
|
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
|
|
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
|
|
@@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
|
|
if (ret > 0)
|
|
break;
|
|
if (!ret)
|
|
- return 0;
|
|
+ return -EINVAL;
|
|
dataoff += *matchoff;
|
|
}
|
|
|
|
- /* Empty callid is useless */
|
|
- if (!*matchlen)
|
|
- return -EINVAL;
|
|
-
|
|
/* Too large is useless */
|
|
if (*matchlen > IP_VS_PEDATA_MAXLEN)
|
|
return -EINVAL;
|