Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

Signed-off-by: David Woodhouse <dwmw2@infradead.org>
This commit is contained in:
David Woodhouse 2006-05-06 19:59:18 +01:00
commit 5047f09b56
211 changed files with 3295 additions and 1443 deletions

10
CREDITS
View file

@ -1194,15 +1194,9 @@ S: Brecksville, OH 44141-1334
S: USA S: USA
N: Tristan Greaves N: Tristan Greaves
E: Tristan.Greaves@icl.com E: tristan@extricate.org
E: tmg296@ecs.soton.ac.uk W: http://www.extricate.org/
W: http://www.ecs.soton.ac.uk/~tmg296
D: Miscellaneous ipv4 sysctl patches D: Miscellaneous ipv4 sysctl patches
S: 15 Little Mead
S: Denmead
S: Hampshire
S: PO7 6HS
S: United Kingdom
N: Michael A. Griffith N: Michael A. Griffith
E: grif@cs.ucr.edu E: grif@cs.ucr.edu

View file

@ -150,8 +150,6 @@ config ARCH_IOP3XX
config ARCH_IXP4XX config ARCH_IXP4XX
bool "IXP4xx-based" bool "IXP4xx-based"
select DMABOUNCE
select PCI
help help
Support for Intel's IXP4XX (XScale) family of processors. Support for Intel's IXP4XX (XScale) family of processors.

View file

@ -38,10 +38,10 @@ static void icedcc_putc(int ch)
if (--i < 0) if (--i < 0)
return; return;
asm("mrc p14, 0, %0, c0, c0, 0" : "=r" (status)); asm volatile ("mrc p14, 0, %0, c0, c0, 0" : "=r" (status));
} while (status & 2); } while (status & 2);
asm("mcr p15, 0, %0, c1, c0, 0" : : "r" (ch)); asm("mcr p14, 0, %0, c1, c0, 0" : : "r" (ch));
} }
#define putc(ch) icedcc_putc(ch) #define putc(ch) icedcc_putc(ch)

View file

@ -95,5 +95,11 @@ int main(void)
DEFINE(SYS_ERROR0, 0x9f0000); DEFINE(SYS_ERROR0, 0x9f0000);
BLANK(); BLANK();
DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc)); DEFINE(SIZEOF_MACHINE_DESC, sizeof(struct machine_desc));
DEFINE(MACHINFO_TYPE, offsetof(struct machine_desc, nr));
DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name));
DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io));
DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst));
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags));
return 0; return 0;
} }

View file

@ -20,12 +20,10 @@
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/system.h> #include <asm/system.h>
#define PROCINFO_INITFUNC 12
#define MACHINFO_TYPE 0
/* /*
* Kernel startup entry point. * Kernel startup entry point.
* --------------------------- * ---------------------------

View file

@ -24,14 +24,6 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/system.h> #include <asm/system.h>
#define PROCINFO_MMUFLAGS 8
#define PROCINFO_INITFUNC 12
#define MACHINFO_TYPE 0
#define MACHINFO_PHYSIO 4
#define MACHINFO_PGOFFIO 8
#define MACHINFO_NAME 12
#define KERNEL_RAM_ADDR (PAGE_OFFSET + TEXT_OFFSET) #define KERNEL_RAM_ADDR (PAGE_OFFSET + TEXT_OFFSET)
/* /*

View file

@ -195,56 +195,6 @@ void __init imx_set_mmc_info(struct imxmmc_platform_data *info)
} }
EXPORT_SYMBOL(imx_set_mmc_info); EXPORT_SYMBOL(imx_set_mmc_info);
static struct resource imx_uart1_resources[] = {
[0] = {
.start = 0x00206000,
.end = 0x002060FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (UART1_MINT_RX),
.end = (UART1_MINT_RX),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = (UART1_MINT_TX),
.end = (UART1_MINT_TX),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device imx_uart1_device = {
.name = "imx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(imx_uart1_resources),
.resource = imx_uart1_resources,
};
static struct resource imx_uart2_resources[] = {
[0] = {
.start = 0x00207000,
.end = 0x002070FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (UART2_MINT_RX),
.end = (UART2_MINT_RX),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = (UART2_MINT_TX),
.end = (UART2_MINT_TX),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device imx_uart2_device = {
.name = "imx-uart",
.id = 1,
.num_resources = ARRAY_SIZE(imx_uart2_resources),
.resource = imx_uart2_resources,
};
static struct imxfb_mach_info imx_fb_info; static struct imxfb_mach_info imx_fb_info;
void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info) void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
@ -283,8 +233,6 @@ static struct platform_device imxfb_device = {
static struct platform_device *devices[] __initdata = { static struct platform_device *devices[] __initdata = {
&imx_mmc_device, &imx_mmc_device,
&imxfb_device, &imxfb_device,
&imx_uart1_device,
&imx_uart2_device,
}; };
static struct map_desc imx_io_desc[] __initdata = { static struct map_desc imx_io_desc[] __initdata = {

View file

@ -26,6 +26,7 @@
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/arch/mmc.h> #include <asm/arch/mmc.h>
#include <asm/arch/imx-uart.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "generic.h" #include "generic.h"
@ -48,8 +49,70 @@ static struct platform_device cs89x0_device = {
.resource = cs89x0_resources, .resource = cs89x0_resources,
}; };
static struct imxuart_platform_data uart_pdata = {
.flags = IMXUART_HAVE_RTSCTS,
};
static struct resource imx_uart1_resources[] = {
[0] = {
.start = 0x00206000,
.end = 0x002060FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (UART1_MINT_RX),
.end = (UART1_MINT_RX),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = (UART1_MINT_TX),
.end = (UART1_MINT_TX),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device imx_uart1_device = {
.name = "imx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(imx_uart1_resources),
.resource = imx_uart1_resources,
.dev = {
.platform_data = &uart_pdata,
}
};
static struct resource imx_uart2_resources[] = {
[0] = {
.start = 0x00207000,
.end = 0x002070FF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (UART2_MINT_RX),
.end = (UART2_MINT_RX),
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = (UART2_MINT_TX),
.end = (UART2_MINT_TX),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device imx_uart2_device = {
.name = "imx-uart",
.id = 1,
.num_resources = ARRAY_SIZE(imx_uart2_resources),
.resource = imx_uart2_resources,
.dev = {
.platform_data = &uart_pdata,
}
};
static struct platform_device *devices[] __initdata = { static struct platform_device *devices[] __initdata = {
&cs89x0_device, &cs89x0_device,
&imx_uart1_device,
&imx_uart2_device,
}; };
#ifdef CONFIG_MMC_IMX #ifdef CONFIG_MMC_IMX
@ -75,6 +138,17 @@ mx1ads_init(void)
imx_gpio_mode(GPIO_PORTB | GPIO_GIUS | GPIO_IN | 20); imx_gpio_mode(GPIO_PORTB | GPIO_GIUS | GPIO_IN | 20);
imx_set_mmc_info(&mx1ads_mmc_info); imx_set_mmc_info(&mx1ads_mmc_info);
#endif #endif
imx_gpio_mode(PC9_PF_UART1_CTS);
imx_gpio_mode(PC10_PF_UART1_RTS);
imx_gpio_mode(PC11_PF_UART1_TXD);
imx_gpio_mode(PC12_PF_UART1_RXD);
imx_gpio_mode(PB28_PF_UART2_CTS);
imx_gpio_mode(PB29_PF_UART2_RTS);
imx_gpio_mode(PB30_PF_UART2_TXD);
imx_gpio_mode(PB31_PF_UART2_RXD);
platform_add_devices(devices, ARRAY_SIZE(devices)); platform_add_devices(devices, ARRAY_SIZE(devices));
} }

View file

@ -11,6 +11,7 @@ comment "IXP4xx Platforms"
config MACH_NSLU2 config MACH_NSLU2
bool bool
prompt "Linksys NSLU2" prompt "Linksys NSLU2"
select PCI
help help
Say 'Y' here if you want your kernel to support Linksys's Say 'Y' here if you want your kernel to support Linksys's
NSLU2 NAS device. For more information on this platform, NSLU2 NAS device. For more information on this platform,
@ -18,6 +19,7 @@ config MACH_NSLU2
config ARCH_AVILA config ARCH_AVILA
bool "Avila" bool "Avila"
select PCI
help help
Say 'Y' here if you want your kernel to support the Gateworks Say 'Y' here if you want your kernel to support the Gateworks
Avila Network Platform. For more information on this platform, Avila Network Platform. For more information on this platform,
@ -25,6 +27,7 @@ config ARCH_AVILA
config ARCH_ADI_COYOTE config ARCH_ADI_COYOTE
bool "Coyote" bool "Coyote"
select PCI
help help
Say 'Y' here if you want your kernel to support the ADI Say 'Y' here if you want your kernel to support the ADI
Engineering Coyote Gateway Reference Platform. For more Engineering Coyote Gateway Reference Platform. For more
@ -32,6 +35,7 @@ config ARCH_ADI_COYOTE
config ARCH_IXDP425 config ARCH_IXDP425
bool "IXDP425" bool "IXDP425"
select PCI
help help
Say 'Y' here if you want your kernel to support Intel's Say 'Y' here if you want your kernel to support Intel's
IXDP425 Development Platform (Also known as Richfield). IXDP425 Development Platform (Also known as Richfield).
@ -39,6 +43,7 @@ config ARCH_IXDP425
config MACH_IXDPG425 config MACH_IXDPG425
bool "IXDPG425" bool "IXDPG425"
select PCI
help help
Say 'Y' here if you want your kernel to support Intel's Say 'Y' here if you want your kernel to support Intel's
IXDPG425 Development Platform (Also known as Montajade). IXDPG425 Development Platform (Also known as Montajade).
@ -46,6 +51,7 @@ config MACH_IXDPG425
config MACH_IXDP465 config MACH_IXDP465
bool "IXDP465" bool "IXDP465"
select PCI
help help
Say 'Y' here if you want your kernel to support Intel's Say 'Y' here if you want your kernel to support Intel's
IXDP465 Development Platform (Also known as BMP). IXDP465 Development Platform (Also known as BMP).
@ -72,6 +78,7 @@ config ARCH_PRPMC1100
config MACH_NAS100D config MACH_NAS100D
bool bool
prompt "NAS100D" prompt "NAS100D"
select PCI
help help
Say 'Y' here if you want your kernel to support Iomega's Say 'Y' here if you want your kernel to support Iomega's
NAS 100d device. For more information on this platform, NAS 100d device. For more information on this platform,
@ -96,6 +103,7 @@ config CPU_IXP46X
config MACH_GTWX5715 config MACH_GTWX5715
bool "Gemtek WX5715 (Linksys WRV54G)" bool "Gemtek WX5715 (Linksys WRV54G)"
depends on ARCH_IXP4XX depends on ARCH_IXP4XX
select PCI
help help
This board is currently inside the Linksys WRV54G Gateways. This board is currently inside the Linksys WRV54G Gateways.
@ -110,11 +118,16 @@ config MACH_GTWX5715
"High Speed" UART is n/c (as far as I can tell) "High Speed" UART is n/c (as far as I can tell)
20 Pin ARM/Xscale JTAG interface on J2 20 Pin ARM/Xscale JTAG interface on J2
comment "IXP4xx Options" comment "IXP4xx Options"
config DMABOUNCE
bool
default y
depends on PCI
config IXP4XX_INDIRECT_PCI config IXP4XX_INDIRECT_PCI
bool "Use indirect PCI memory access" bool "Use indirect PCI memory access"
depends on PCI
help help
IXP4xx provides two methods of accessing PCI memory space: IXP4xx provides two methods of accessing PCI memory space:

View file

@ -2,8 +2,9 @@
# Makefile for the linux kernel. # Makefile for the linux kernel.
# #
obj-y += common.o common-pci.o obj-y += common.o
obj-$(CONFIG_PCI) += common-pci.o
obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o
obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o
obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o

View file

@ -45,23 +45,16 @@ int pxa_request_dma (char *name, pxa_dma_prio prio,
local_irq_save(flags); local_irq_save(flags);
do {
/* try grabbing a DMA channel with the requested priority */ /* try grabbing a DMA channel with the requested priority */
for (i = prio; i < prio + PXA_DMA_NBCH(prio); i++) { pxa_for_each_dma_prio (i, prio) {
if (!dma_channels[i].name) { if (!dma_channels[i].name) {
found = 1; found = 1;
break; break;
} }
} }
/* if requested prio group is full, try a hier priority */
if (!found) { } while (!found && prio--);
/* requested prio group is full, try hier priorities */
for (i = prio-1; i >= 0; i--) {
if (!dma_channels[i].name) {
found = 1;
break;
}
}
}
if (found) { if (found) {
DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;

View file

@ -245,7 +245,7 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
*/ */
barrier(); barrier();
trigger = fmrx(FPINST2); trigger = fmrx(FPINST2);
fpscr = fmrx(FPSCR); orig_fpscr = fpscr = fmrx(FPSCR);
emulate: emulate:
exceptions = vfp_emulate_instruction(trigger, fpscr, regs); exceptions = vfp_emulate_instruction(trigger, fpscr, regs);

View file

@ -215,7 +215,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
{ {
struct acpi_table_madt *madt = NULL; struct acpi_table_madt *madt = NULL;
if (!phys_addr || !size || !cpu_has_apic) if (!phys_addr || !size)
return -EINVAL; return -EINVAL;
madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@ -1102,9 +1102,6 @@ int __init acpi_boot_table_init(void)
dmi_check_system(acpi_dmi_table); dmi_check_system(acpi_dmi_table);
#endif #endif
if (!cpu_has_apic)
return -ENODEV;
/* /*
* If acpi_disabled, bail out * If acpi_disabled, bail out
* One exception: acpi=ht continues far enough to enumerate LAPICs * One exception: acpi=ht continues far enough to enumerate LAPICs

View file

@ -757,10 +757,6 @@ static int __init apic_set_verbosity(char *str)
apic_verbosity = APIC_DEBUG; apic_verbosity = APIC_DEBUG;
else if (strcmp("verbose", str) == 0) else if (strcmp("verbose", str) == 0)
apic_verbosity = APIC_VERBOSE; apic_verbosity = APIC_VERBOSE;
else
printk(KERN_WARNING "APIC Verbosity level %s not recognised"
" use apic=verbose or apic=debug\n", str);
return 1; return 1;
} }

View file

@ -671,7 +671,7 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
if (unlikely(current->audit_context)) { if (unlikely(current->audit_context)) {
if (entryexit) if (entryexit)
audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), audit_syscall_exit(AUDITSC_RESULT(regs->eax),
regs->eax); regs->eax);
/* Debug traps, when using PTRACE_SINGLESTEP, must be sent only /* Debug traps, when using PTRACE_SINGLESTEP, must be sent only
* on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is * on the syscall exit path. Normally, when TIF_SYSCALL_AUDIT is
@ -720,14 +720,13 @@ int do_syscall_trace(struct pt_regs *regs, int entryexit)
ret = is_sysemu; ret = is_sysemu;
out: out:
if (unlikely(current->audit_context) && !entryexit) if (unlikely(current->audit_context) && !entryexit)
audit_syscall_entry(current, AUDIT_ARCH_I386, regs->orig_eax, audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_eax,
regs->ebx, regs->ecx, regs->edx, regs->esi); regs->ebx, regs->ecx, regs->edx, regs->esi);
if (ret == 0) if (ret == 0)
return 0; return 0;
regs->orig_eax = -1; /* force skip of syscall restarting */ regs->orig_eax = -1; /* force skip of syscall restarting */
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_exit(current, AUDITSC_RESULT(regs->eax), audit_syscall_exit(AUDITSC_RESULT(regs->eax), regs->eax);
regs->eax);
return 1; return 1;
} }

View file

@ -970,8 +970,10 @@ efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
* not-overlapping, which is the case * not-overlapping, which is the case
*/ */
int __init int __init
e820_all_mapped(unsigned long start, unsigned long end, unsigned type) e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
{ {
u64 start = s;
u64 end = e;
int i; int i;
for (i = 0; i < e820.nr_map; i++) { for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i]; struct e820entry *ei = &e820.map[i];

View file

@ -279,7 +279,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
{ {
struct cpufreq_freqs *freq = data; struct cpufreq_freqs *freq = data;
if (val != CPUFREQ_RESUMECHANGE) if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
write_seqlock_irq(&xtime_lock); write_seqlock_irq(&xtime_lock);
if (!ref_freq) { if (!ref_freq) {
if (!freq->old){ if (!freq->old){
@ -312,7 +312,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
} }
end: end:
if (val != CPUFREQ_RESUMECHANGE) if (val != CPUFREQ_RESUMECHANGE && val != CPUFREQ_SUSPENDCHANGE)
write_sequnlock_irq(&xtime_lock); write_sequnlock_irq(&xtime_lock);
return 0; return 0;

View file

@ -312,7 +312,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
/*call audit_syscall_exit since we do not exit via the normal paths */ /*call audit_syscall_exit since we do not exit via the normal paths */
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_exit(current, AUDITSC_RESULT(eax), eax); audit_syscall_exit(AUDITSC_RESULT(eax), eax);
__asm__ __volatile__( __asm__ __volatile__(
"movl %0,%%esp\n\t" "movl %0,%%esp\n\t"

View file

@ -1644,7 +1644,7 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
arch = AUDIT_ARCH_IA64; arch = AUDIT_ARCH_IA64;
} }
audit_syscall_entry(current, arch, syscall, arg0, arg1, arg2, arg3); audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
} }
} }
@ -1662,7 +1662,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
if (success != AUDITSC_SUCCESS) if (success != AUDITSC_SUCCESS)
result = -result; result = -result;
audit_syscall_exit(current, success, result); audit_syscall_exit(success, result);
} }
if (test_thread_flag(TIF_SYSCALL_TRACE) if (test_thread_flag(TIF_SYSCALL_TRACE)

View file

@ -483,7 +483,7 @@ static inline int audit_arch(void)
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
{ {
if (unlikely(current->audit_context) && entryexit) if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(current, AUDITSC_RESULT(regs->regs[2]), audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
regs->regs[2]); regs->regs[2]);
if (!(current->ptrace & PT_PTRACED)) if (!(current->ptrace & PT_PTRACED))
@ -507,7 +507,7 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
} }
out: out:
if (unlikely(current->audit_context) && !entryexit) if (unlikely(current->audit_context) && !entryexit)
audit_syscall_entry(current, audit_arch(), regs->regs[2], audit_syscall_entry(audit_arch(), regs->regs[2],
regs->regs[4], regs->regs[5], regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]); regs->regs[6], regs->regs[7]);
} }

View file

@ -90,14 +90,14 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
{ {
kprobe_opcode_t insn = *p->ainsn.insn;
regs->msr |= MSR_SE; regs->msr |= MSR_SE;
/* single step inline if it is a trap variant */ /*
if (is_trap(insn)) * On powerpc we should single step on the original
regs->nip = (unsigned long)p->addr; * instruction even if the probed insn is a trap
else * variant as values in regs could play a part in
* if the trap is taken or not
*/
regs->nip = (unsigned long)p->ainsn.insn; regs->nip = (unsigned long)p->ainsn.insn;
} }

View file

@ -885,6 +885,74 @@ void __init unflatten_device_tree(void)
DBG(" <- unflatten_device_tree()\n"); DBG(" <- unflatten_device_tree()\n");
} }
/*
* ibm,pa-features is a per-cpu property that contains a string of
* attribute descriptors, each of which has a 2 byte header plus up
* to 254 bytes worth of processor attribute bits. First header
* byte specifies the number of bytes following the header.
* Second header byte is an "attribute-specifier" type, of which
* zero is the only currently-defined value.
* Implementation: Pass in the byte and bit offset for the feature
* that we are interested in. The function will return -1 if the
* pa-features property is missing, or a 1/0 to indicate if the feature
* is supported/not supported. Note that the bit numbers are
* big-endian to match the definition in PAPR.
*/
static struct ibm_pa_feature {
unsigned long cpu_features; /* CPU_FTR_xxx bit */
unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
unsigned char pabyte; /* byte number in ibm,pa-features */
unsigned char pabit; /* bit number (big-endian) */
unsigned char invert; /* if 1, pa bit set => clear feature */
} ibm_pa_features[] __initdata = {
{0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
{0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
{CPU_FTR_SLB, 0, 0, 2, 0},
{CPU_FTR_CTRL, 0, 0, 3, 0},
{CPU_FTR_NOEXECUTE, 0, 0, 6, 0},
{CPU_FTR_NODSISRALIGN, 0, 1, 1, 1},
{CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
};
static void __init check_cpu_pa_features(unsigned long node)
{
unsigned char *pa_ftrs;
unsigned long len, tablelen, i, bit;
pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
if (pa_ftrs == NULL)
return;
/* find descriptor with type == 0 */
for (;;) {
if (tablelen < 3)
return;
len = 2 + pa_ftrs[0];
if (tablelen < len)
return; /* descriptor 0 not found */
if (pa_ftrs[1] == 0)
break;
tablelen -= len;
pa_ftrs += len;
}
/* loop over bits we know about */
for (i = 0; i < ARRAY_SIZE(ibm_pa_features); ++i) {
struct ibm_pa_feature *fp = &ibm_pa_features[i];
if (fp->pabyte >= pa_ftrs[0])
continue;
bit = (pa_ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
if (bit ^ fp->invert) {
cur_cpu_spec->cpu_features |= fp->cpu_features;
cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
} else {
cur_cpu_spec->cpu_features &= ~fp->cpu_features;
cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
}
}
}
static int __init early_init_dt_scan_cpus(unsigned long node, static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, const char *uname, int depth,
void *data) void *data)
@ -969,6 +1037,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
} }
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
check_cpu_pa_features(node);
#ifdef CONFIG_PPC_PSERIES #ifdef CONFIG_PPC_PSERIES
if (nthreads > 1) if (nthreads > 1)
cur_cpu_spec->cpu_features |= CPU_FTR_SMT; cur_cpu_spec->cpu_features |= CPU_FTR_SMT;

View file

@ -538,7 +538,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
do_syscall_trace(); do_syscall_trace();
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_entry(current, audit_syscall_entry(
#ifdef CONFIG_PPC32 #ifdef CONFIG_PPC32
AUDIT_ARCH_PPC, AUDIT_ARCH_PPC,
#else #else
@ -556,8 +556,7 @@ void do_syscall_trace_leave(struct pt_regs *regs)
#endif #endif
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_exit(current, audit_syscall_exit((regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
(regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
regs->result); regs->result);
if ((test_thread_flag(TIF_SYSCALL_TRACE) if ((test_thread_flag(TIF_SYSCALL_TRACE)

View file

@ -322,13 +322,31 @@ static void register_nodes(void)
} }
} }
} }
int sysfs_add_device_to_node(struct sys_device *dev, int nid)
{
struct node *node = &node_devices[nid];
return sysfs_create_link(&node->sysdev.kobj, &dev->kobj,
kobject_name(&dev->kobj));
}
void sysfs_remove_device_from_node(struct sys_device *dev, int nid)
{
struct node *node = &node_devices[nid];
sysfs_remove_link(&node->sysdev.kobj, kobject_name(&dev->kobj));
}
#else #else
static void register_nodes(void) static void register_nodes(void)
{ {
return; return;
} }
#endif #endif
EXPORT_SYMBOL_GPL(sysfs_add_device_to_node);
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
/* Only valid if CPU is present. */ /* Only valid if CPU is present. */
static ssize_t show_physical_id(struct sys_device *dev, char *buf) static ssize_t show_physical_id(struct sys_device *dev, char *buf)
{ {

View file

@ -194,7 +194,7 @@ static int *of_get_associativity(struct device_node *dev)
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
* info is found. * info is found.
*/ */
static int of_node_to_nid(struct device_node *device) static int of_node_to_nid_single(struct device_node *device)
{ {
int nid = -1; int nid = -1;
unsigned int *tmp; unsigned int *tmp;
@ -216,6 +216,28 @@ out:
return nid; return nid;
} }
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
struct device_node *tmp;
int nid = -1;
of_node_get(device);
while (device) {
nid = of_node_to_nid_single(device);
if (nid != -1)
break;
tmp = device;
device = of_get_parent(tmp);
of_node_put(tmp);
}
of_node_put(device);
return nid;
}
EXPORT_SYMBOL_GPL(of_node_to_nid);
/* /*
* In theory, the "ibm,associativity" property may contain multiple * In theory, the "ibm,associativity" property may contain multiple
* associativity lists because a resource may be multiply connected * associativity lists because a resource may be multiply connected
@ -300,7 +322,7 @@ static int __cpuinit numa_setup_cpu(unsigned long lcpu)
goto out; goto out;
} }
nid = of_node_to_nid(cpu); nid = of_node_to_nid_single(cpu);
if (nid < 0 || !node_online(nid)) if (nid < 0 || !node_online(nid))
nid = any_online_node(NODE_MASK_ALL); nid = any_online_node(NODE_MASK_ALL);
@ -393,7 +415,7 @@ static int __init parse_numa_properties(void)
cpu = find_cpu_node(i); cpu = find_cpu_node(i);
BUG_ON(!cpu); BUG_ON(!cpu);
nid = of_node_to_nid(cpu); nid = of_node_to_nid_single(cpu);
of_node_put(cpu); of_node_put(cpu);
/* /*
@ -437,7 +459,7 @@ new_range:
* have associativity properties. If none, then * have associativity properties. If none, then
* everything goes to default_nid. * everything goes to default_nid.
*/ */
nid = of_node_to_nid(memory); nid = of_node_to_nid_single(memory);
if (nid < 0) if (nid < 0)
nid = default_nid; nid = default_nid;
node_set_online(nid); node_set_online(nid);
@ -776,7 +798,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
ha_new_range: ha_new_range:
start = read_n_cells(n_mem_addr_cells, &memcell_buf); start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf); size = read_n_cells(n_mem_size_cells, &memcell_buf);
nid = of_node_to_nid(memory); nid = of_node_to_nid_single(memory);
/* Domains not present at boot default to 0 */ /* Domains not present at boot default to 0 */
if (nid < 0 || !node_online(nid)) if (nid < 0 || !node_online(nid))

View file

@ -12,7 +12,8 @@ config SPU_FS
config SPUFS_MMAP config SPUFS_MMAP
bool bool
depends on SPU_FS && SPARSEMEM && !PPC_64K_PAGES depends on SPU_FS && SPARSEMEM
select MEMORY_HOTPLUG
default y default y
endmenu endmenu

View file

@ -29,6 +29,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/mutex.h>
#include <linux/memory_hotplug.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/processor.h> #include <asm/processor.h>
@ -46,6 +48,7 @@
#include <asm/cputable.h> #include <asm/cputable.h>
#include <asm/ppc-pci.h> #include <asm/ppc-pci.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/spu.h>
#include "interrupt.h" #include "interrupt.h"
#include "iommu.h" #include "iommu.h"
@ -69,77 +72,6 @@ static void cell_show_cpuinfo(struct seq_file *m)
of_node_put(root); of_node_put(root);
} }
#ifdef CONFIG_SPARSEMEM
static int __init find_spu_node_id(struct device_node *spe)
{
unsigned int *id;
#ifdef CONFIG_NUMA
struct device_node *cpu;
cpu = spe->parent->parent;
id = (unsigned int *)get_property(cpu, "node-id", NULL);
#else
id = NULL;
#endif
return id ? *id : 0;
}
static void __init cell_spuprop_present(struct device_node *spe,
const char *prop, int early)
{
struct address_prop {
unsigned long address;
unsigned int len;
} __attribute__((packed)) *p;
int proplen;
unsigned long start_pfn, end_pfn, pfn;
int node_id;
p = (void*)get_property(spe, prop, &proplen);
WARN_ON(proplen != sizeof (*p));
node_id = find_spu_node_id(spe);
start_pfn = p->address >> PAGE_SHIFT;
end_pfn = (p->address + p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* We need to call memory_present *before* the call to sparse_init,
but we can initialize the page structs only *after* that call.
Thus, we're being called twice. */
if (early)
memory_present(node_id, start_pfn, end_pfn);
else {
/* As the pages backing SPU LS and I/O are outside the range
of regular memory, their page structs were not initialized
by free_area_init. Do it here instead. */
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
struct page *page = pfn_to_page(pfn);
set_page_links(page, ZONE_DMA, node_id, pfn);
init_page_count(page);
reset_page_mapcount(page);
SetPageReserved(page);
INIT_LIST_HEAD(&page->lru);
}
}
}
static void __init cell_spumem_init(int early)
{
struct device_node *node;
for (node = of_find_node_by_type(NULL, "spe");
node; node = of_find_node_by_type(node, "spe")) {
cell_spuprop_present(node, "local-store", early);
cell_spuprop_present(node, "problem", early);
cell_spuprop_present(node, "priv1", early);
cell_spuprop_present(node, "priv2", early);
}
}
#else
static void __init cell_spumem_init(int early)
{
}
#endif
static void cell_progress(char *s, unsigned short hex) static void cell_progress(char *s, unsigned short hex)
{ {
printk("*** %04x : %s\n", hex, s ? s : ""); printk("*** %04x : %s\n", hex, s ? s : "");
@ -172,8 +104,6 @@ static void __init cell_setup_arch(void)
#endif #endif
mmio_nvram_init(); mmio_nvram_init();
cell_spumem_init(0);
} }
/* /*
@ -189,8 +119,6 @@ static void __init cell_init_early(void)
ppc64_interrupt_controller = IC_CELL_PIC; ppc64_interrupt_controller = IC_CELL_PIC;
cell_spumem_init(1);
DBG(" <- cell_init_early()\n"); DBG(" <- cell_init_early()\n");
} }

View file

@ -520,8 +520,50 @@ void spu_irq_setaffinity(struct spu *spu, int cpu)
} }
EXPORT_SYMBOL_GPL(spu_irq_setaffinity); EXPORT_SYMBOL_GPL(spu_irq_setaffinity);
static void __iomem * __init map_spe_prop(struct device_node *n, static int __init find_spu_node_id(struct device_node *spe)
const char *name) {
unsigned int *id;
struct device_node *cpu;
cpu = spe->parent->parent;
id = (unsigned int *)get_property(cpu, "node-id", NULL);
return id ? *id : 0;
}
static int __init cell_spuprop_present(struct spu *spu, struct device_node *spe,
const char *prop)
{
static DEFINE_MUTEX(add_spumem_mutex);
struct address_prop {
unsigned long address;
unsigned int len;
} __attribute__((packed)) *p;
int proplen;
unsigned long start_pfn, nr_pages;
struct pglist_data *pgdata;
struct zone *zone;
int ret;
p = (void*)get_property(spe, prop, &proplen);
WARN_ON(proplen != sizeof (*p));
start_pfn = p->address >> PAGE_SHIFT;
nr_pages = ((unsigned long)p->len + PAGE_SIZE - 1) >> PAGE_SHIFT;
pgdata = NODE_DATA(spu->nid);
zone = pgdata->node_zones;
/* XXX rethink locking here */
mutex_lock(&add_spumem_mutex);
ret = __add_pages(zone, start_pfn, nr_pages);
mutex_unlock(&add_spumem_mutex);
return ret;
}
static void __iomem * __init map_spe_prop(struct spu *spu,
struct device_node *n, const char *name)
{ {
struct address_prop { struct address_prop {
unsigned long address; unsigned long address;
@ -530,6 +572,8 @@ static void __iomem * __init map_spe_prop(struct device_node *n,
void *p; void *p;
int proplen; int proplen;
void* ret = NULL;
int err = 0;
p = get_property(n, name, &proplen); p = get_property(n, name, &proplen);
if (proplen != sizeof (struct address_prop)) if (proplen != sizeof (struct address_prop))
@ -537,7 +581,14 @@ static void __iomem * __init map_spe_prop(struct device_node *n,
prop = p; prop = p;
return ioremap(prop->address, prop->len); err = cell_spuprop_present(spu, n, name);
if (err && (err != -EEXIST))
goto out;
ret = ioremap(prop->address, prop->len);
out:
return ret;
} }
static void spu_unmap(struct spu *spu) static void spu_unmap(struct spu *spu)
@ -548,44 +599,45 @@ static void spu_unmap(struct spu *spu)
iounmap((u8 __iomem *)spu->local_store); iounmap((u8 __iomem *)spu->local_store);
} }
static int __init spu_map_device(struct spu *spu, struct device_node *spe) static int __init spu_map_device(struct spu *spu, struct device_node *node)
{ {
char *prop; char *prop;
int ret; int ret;
ret = -ENODEV; ret = -ENODEV;
prop = get_property(spe, "isrc", NULL); prop = get_property(node, "isrc", NULL);
if (!prop) if (!prop)
goto out; goto out;
spu->isrc = *(unsigned int *)prop; spu->isrc = *(unsigned int *)prop;
spu->name = get_property(spe, "name", NULL); spu->name = get_property(node, "name", NULL);
if (!spu->name) if (!spu->name)
goto out; goto out;
prop = get_property(spe, "local-store", NULL); prop = get_property(node, "local-store", NULL);
if (!prop) if (!prop)
goto out; goto out;
spu->local_store_phys = *(unsigned long *)prop; spu->local_store_phys = *(unsigned long *)prop;
/* we use local store as ram, not io memory */ /* we use local store as ram, not io memory */
spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); spu->local_store = (void __force *)
map_spe_prop(spu, node, "local-store");
if (!spu->local_store) if (!spu->local_store)
goto out; goto out;
prop = get_property(spe, "problem", NULL); prop = get_property(node, "problem", NULL);
if (!prop) if (!prop)
goto out_unmap; goto out_unmap;
spu->problem_phys = *(unsigned long *)prop; spu->problem_phys = *(unsigned long *)prop;
spu->problem= map_spe_prop(spe, "problem"); spu->problem= map_spe_prop(spu, node, "problem");
if (!spu->problem) if (!spu->problem)
goto out_unmap; goto out_unmap;
spu->priv1= map_spe_prop(spe, "priv1"); spu->priv1= map_spe_prop(spu, node, "priv1");
/* priv1 is not available on a hypervisor */ /* priv1 is not available on a hypervisor */
spu->priv2= map_spe_prop(spe, "priv2"); spu->priv2= map_spe_prop(spu, node, "priv2");
if (!spu->priv2) if (!spu->priv2)
goto out_unmap; goto out_unmap;
ret = 0; ret = 0;
@ -597,17 +649,6 @@ out:
return ret; return ret;
} }
static int __init find_spu_node_id(struct device_node *spe)
{
unsigned int *id;
struct device_node *cpu;
cpu = spe->parent->parent;
id = (unsigned int *)get_property(cpu, "node-id", NULL);
return id ? *id : 0;
}
static int __init create_spu(struct device_node *spe) static int __init create_spu(struct device_node *spe)
{ {
struct spu *spu; struct spu *spu;
@ -624,6 +665,10 @@ static int __init create_spu(struct device_node *spe)
goto out_free; goto out_free;
spu->node = find_spu_node_id(spe); spu->node = find_spu_node_id(spe);
spu->nid = of_node_to_nid(spe);
if (spu->nid == -1)
spu->nid = 0;
spu->stop_code = 0; spu->stop_code = 0;
spu->slb_replace = 0; spu->slb_replace = 0;
spu->mm = NULL; spu->mm = NULL;

View file

@ -118,7 +118,15 @@ int eeh_send_failure_event (struct device_node *dn,
{ {
unsigned long flags; unsigned long flags;
struct eeh_event *event; struct eeh_event *event;
char *location;
if (!mem_init_done) {
printk(KERN_ERR "EEH: event during early boot not handled\n");
location = (char *) get_property(dn, "ibm,loc-code", NULL);
printk(KERN_ERR "EEH: device node = %s\n", dn->full_name);
printk(KERN_ERR "EEH: PCI location = %s\n", location);
return 1;
}
event = kmalloc(sizeof(*event), GFP_ATOMIC); event = kmalloc(sizeof(*event), GFP_ATOMIC);
if (event == NULL) { if (event == NULL) {
printk (KERN_ERR "EEH: out of memory, event not handled\n"); printk (KERN_ERR "EEH: out of memory, event not handled\n");

View file

@ -378,7 +378,7 @@ int __init mpc866ads_init(void)
ppc_sys_device_setfunc(MPC8xx_CPM_SMC1, PPC_SYS_FUNC_UART); ppc_sys_device_setfunc(MPC8xx_CPM_SMC1, PPC_SYS_FUNC_UART);
#endif #endif
#ifdef CONFIG_SERIAL_CPM_SMCer #ifdef CONFIG_SERIAL_CPM_SMC
ppc_sys_device_enable(MPC8xx_CPM_SMC2); ppc_sys_device_enable(MPC8xx_CPM_SMC2);
ppc_sys_device_setfunc(MPC8xx_CPM_SMC2, PPC_SYS_FUNC_UART); ppc_sys_device_setfunc(MPC8xx_CPM_SMC2, PPC_SYS_FUNC_UART);
#endif #endif

View file

@ -734,7 +734,7 @@ asmlinkage void
syscall_trace(struct pt_regs *regs, int entryexit) syscall_trace(struct pt_regs *regs, int entryexit)
{ {
if (unlikely(current->audit_context) && entryexit) if (unlikely(current->audit_context) && entryexit)
audit_syscall_exit(current, AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]); audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
if (!test_thread_flag(TIF_SYSCALL_TRACE)) if (!test_thread_flag(TIF_SYSCALL_TRACE))
goto out; goto out;
@ -761,8 +761,7 @@ syscall_trace(struct pt_regs *regs, int entryexit)
} }
out: out:
if (unlikely(current->audit_context) && !entryexit) if (unlikely(current->audit_context) && !entryexit)
audit_syscall_entry(current, audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
regs->gprs[4], regs->gprs[5]); regs->gprs[4], regs->gprs[5]);
} }

View file

@ -358,8 +358,9 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
} else { } else {
regs->gprs[14] = (unsigned long) regs->gprs[14] = (unsigned long)
frame->retcode | PSW_ADDR_AMODE; frame->retcode | PSW_ADDR_AMODE;
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
(u16 __user *)(frame->retcode)); (u16 __user *)(frame->retcode)))
goto give_sigsegv;
} }
/* Set up backchain. */ /* Set up backchain. */

View file

@ -23,7 +23,7 @@ sys_call_table:
/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod /*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek /*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek
/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 /*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .long sys_time, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause /*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice /*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
/*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile /*35*/ .long sys_chown, sys_sync, sys_kill, sys_newstat, sys_sendfile
/*40*/ .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid /*40*/ .long sys_newlstat, sys_dup, sys_pipe, sys_times, sys_getuid

View file

@ -653,7 +653,7 @@ asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
result = AUDITSC_FAILURE; result = AUDITSC_FAILURE;
audit_syscall_exit(current, result, regs->u_regs[UREG_I0]); audit_syscall_exit(result, regs->u_regs[UREG_I0]);
} }
if (!(current->ptrace & PT_PTRACED)) if (!(current->ptrace & PT_PTRACED))
@ -677,8 +677,7 @@ asmlinkage void syscall_trace(struct pt_regs *regs, int syscall_exit_p)
out: out:
if (unlikely(current->audit_context) && !syscall_exit_p) if (unlikely(current->audit_context) && !syscall_exit_p)
audit_syscall_entry(current, audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
(test_thread_flag(TIF_32BIT) ?
AUDIT_ARCH_SPARC : AUDIT_ARCH_SPARC :
AUDIT_ARCH_SPARC64), AUDIT_ARCH_SPARC64),
regs->u_regs[UREG_G1], regs->u_regs[UREG_G1],

View file

@ -139,6 +139,7 @@ SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
SIGN2(sys32_splice, sys_splice, %o0, %o1) SIGN2(sys32_splice, sys_splice, %o0, %o1)
SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
SIGN2(sys32_tee, sys_tee, %o0, %o1) SIGN2(sys32_tee, sys_tee, %o0, %o1)
SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0)
.globl sys32_mmap2 .globl sys32_mmap2
sys32_mmap2: sys32_mmap2:

View file

@ -25,7 +25,7 @@ sys_call_table32:
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys32_chown16, sys32_mknod
/*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek /*15*/ .word sys_chmod, sys32_lchown16, sparc_brk, sys32_perfctr, sys32_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16 /*20*/ .word sys_getpid, sys_capget, sys_capset, sys32_setuid16, sys32_getuid16
/*25*/ .word compat_sys_time, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause /*25*/ .word sys32_vmsplice, sys_ptrace, sys_alarm, sys32_sigaltstack, sys32_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
.word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile
/*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid /*40*/ .word compat_sys_newlstat, sys_dup, sys_pipe, compat_sys_times, sys_getuid
@ -94,7 +94,7 @@ sys_call_table:
/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
/*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek /*15*/ .word sys_chmod, sys_lchown, sparc_brk, sys_perfctr, sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
/*25*/ .word sys_nis_syscall, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
.word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64 .word sys_nis_syscall, sys_sync, sys_kill, sys_newstat, sys_sendfile64
/*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall /*40*/ .word sys_newlstat, sys_dup, sys_pipe, sys_times, sys_nis_syscall

View file

@ -8,6 +8,7 @@
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/preempt.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
@ -24,6 +25,8 @@ void flush_tlb_pending(void)
{ {
struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); struct mmu_gather *mp = &__get_cpu_var(mmu_gathers);
preempt_disable();
if (mp->tlb_nr) { if (mp->tlb_nr) {
flush_tsb_user(mp); flush_tsb_user(mp);
@ -38,6 +41,8 @@ void flush_tlb_pending(void)
} }
mp->tlb_nr = 0; mp->tlb_nr = 0;
} }
preempt_enable();
} }
void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)

View file

@ -57,20 +57,6 @@ config STATIC_LINK
chroot, and you disable CONFIG_MODE_TT, you probably want to say Y chroot, and you disable CONFIG_MODE_TT, you probably want to say Y
here. here.
config HOST_2G_2G
bool "2G/2G host address space split"
default n
depends on MODE_TT
help
This is needed when the host on which you run has a 2G/2G memory
split, instead of the customary 3G/1G.
Note that to enable such a host
configuration, which makes sense only in some cases, you need special
host patches.
So, if you do not know what to do here, say 'N'.
config KERNEL_HALF_GIGS config KERNEL_HALF_GIGS
int "Kernel address space size (in .5G units)" int "Kernel address space size (in .5G units)"
default "1" default "1"

View file

@ -16,6 +16,19 @@ config SEMAPHORE_SLEEPERS
bool bool
default y default y
config HOST_2G_2G
bool "2G/2G host address space split"
default n
help
This is needed when the host on which you run has a 2G/2G memory
split, instead of the customary 3G/1G.
Note that to enable such a host
configuration, which makes sense only in some cases, you need special
host patches.
So, if you do not know what to do here, say 'N'.
config TOP_ADDR config TOP_ADDR
hex hex
default 0xc0000000 if !HOST_2G_2G default 0xc0000000 if !HOST_2G_2G
@ -35,11 +48,13 @@ config 3_LEVEL_PGTABLES
config STUB_CODE config STUB_CODE
hex hex
default 0xbfffe000 default 0xbfffe000 if !HOST_2G_2G
default 0x7fffe000 if HOST_2G_2G
config STUB_DATA config STUB_DATA
hex hex
default 0xbffff000 default 0xbffff000 if !HOST_2G_2G
default 0x7ffff000 if HOST_2G_2G
config STUB_START config STUB_START
hex hex

View file

@ -96,7 +96,8 @@ PHONY += linux
all: linux all: linux
linux: vmlinux linux: vmlinux
ln -f $< $@ @echo ' LINK $@'
$(Q)ln -f $< $@
define archhelp define archhelp
echo '* linux - Binary kernel image (./linux) - for backward' echo '* linux - Binary kernel image (./linux) - for backward'
@ -117,6 +118,10 @@ prepare: $(ARCH_DIR)/include/kern_constants.h
LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
$(call cc-option, -fno-stack-protector,) \
$(call cc-option, -fno-stack-protector-all,)
CPP_MODE-$(CONFIG_MODE_TT) := -DMODE_TT CPP_MODE-$(CONFIG_MODE_TT) := -DMODE_TT
CONFIG_KERNEL_STACK_ORDER ?= 2 CONFIG_KERNEL_STACK_ORDER ?= 2
STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] ) STACK_SIZE := $(shell echo $$[ 4096 * (1 << $(CONFIG_KERNEL_STACK_ORDER)) ] )
@ -203,8 +208,8 @@ endef
$(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h $(ARCH_DIR)/include/uml-config.h : include/linux/autoconf.h
$(call filechk,umlconfig) $(call filechk,umlconfig)
$(ARCH_DIR)/user-offsets.s: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.c $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s: FORCE
$(CC) $(USER_CFLAGS) -S -o $@ $< $(Q)$(MAKE) $(build)=$(ARCH_DIR)/sys-$(SUBARCH) $@
define filechk_gen-asm-offsets define filechk_gen-asm-offsets
(set -e; \ (set -e; \
@ -219,13 +224,11 @@ define filechk_gen-asm-offsets
echo ""; ) echo ""; )
endef endef
$(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/user-offsets.s $(ARCH_DIR)/include/user_constants.h: $(ARCH_DIR)/sys-$(SUBARCH)/user-offsets.s
$(call filechk,gen-asm-offsets) $(call filechk,gen-asm-offsets)
CLEAN_FILES += $(ARCH_DIR)/user-offsets.s
$(ARCH_DIR)/include/kern_constants.h: $(objtree)/$(ARCH_DIR)/include $(ARCH_DIR)/include/kern_constants.h: $(objtree)/$(ARCH_DIR)/include
@echo ' SYMLINK $@' @echo ' SYMLINK $@'
$(Q) ln -sf ../../../include/asm-um/asm-offsets.h $@ $(Q)ln -sf ../../../include/asm-um/asm-offsets.h $@
export SUBARCH USER_CFLAGS OS export SUBARCH USER_CFLAGS CFLAGS_NO_HARDENING OS

View file

@ -1,14 +1,13 @@
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.12-rc6-mm1 # Linux kernel version: 2.6.17-rc3
# Tue Jun 14 18:22:21 2005 # Fri Apr 28 09:31:20 2006
# #
CONFIG_GENERIC_HARDIRQS=y CONFIG_GENERIC_HARDIRQS=y
CONFIG_UML=y CONFIG_UML=y
CONFIG_MMU=y CONFIG_MMU=y
CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_GENERIC_CALIBRATE_DELAY=y CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_IRQ_RELEASE_METHOD=y
# #
# UML-specific options # UML-specific options
@ -16,8 +15,50 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
# CONFIG_MODE_TT is not set # CONFIG_MODE_TT is not set
# CONFIG_STATIC_LINK is not set # CONFIG_STATIC_LINK is not set
CONFIG_MODE_SKAS=y CONFIG_MODE_SKAS=y
#
# Host processor type and features
#
# CONFIG_M386 is not set
# CONFIG_M486 is not set
# CONFIG_M586 is not set
# CONFIG_M586TSC is not set
# CONFIG_M586MMX is not set
CONFIG_M686=y
# CONFIG_MPENTIUMII is not set
# CONFIG_MPENTIUMIII is not set
# CONFIG_MPENTIUMM is not set
# CONFIG_MPENTIUM4 is not set
# CONFIG_MK6 is not set
# CONFIG_MK7 is not set
# CONFIG_MK8 is not set
# CONFIG_MCRUSOE is not set
# CONFIG_MEFFICEON is not set
# CONFIG_MWINCHIPC6 is not set
# CONFIG_MWINCHIP2 is not set
# CONFIG_MWINCHIP3D is not set
# CONFIG_MGEODEGX1 is not set
# CONFIG_MGEODE_LX is not set
# CONFIG_MCYRIXIII is not set
# CONFIG_MVIAC3_2 is not set
# CONFIG_X86_GENERIC is not set
CONFIG_X86_CMPXCHG=y
CONFIG_X86_XADD=y
CONFIG_X86_L1_CACHE_SHIFT=5
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_X86_PPRO_FENCE=y
CONFIG_X86_WP_WORKS_OK=y
CONFIG_X86_INVLPG=y
CONFIG_X86_BSWAP=y
CONFIG_X86_POPAD_OK=y
CONFIG_X86_CMPXCHG64=y
CONFIG_X86_GOOD_APIC=y
CONFIG_X86_USE_PPRO_CHECKSUM=y
CONFIG_X86_TSC=y
CONFIG_UML_X86=y CONFIG_UML_X86=y
# CONFIG_64BIT is not set # CONFIG_64BIT is not set
CONFIG_SEMAPHORE_SLEEPERS=y
# CONFIG_HOST_2G_2G is not set
CONFIG_TOP_ADDR=0xc0000000 CONFIG_TOP_ADDR=0xc0000000
# CONFIG_3_LEVEL_PGTABLES is not set # CONFIG_3_LEVEL_PGTABLES is not set
CONFIG_STUB_CODE=0xbfffe000 CONFIG_STUB_CODE=0xbfffe000
@ -25,22 +66,24 @@ CONFIG_STUB_DATA=0xbffff000
CONFIG_STUB_START=0xbfffe000 CONFIG_STUB_START=0xbfffe000
CONFIG_ARCH_HAS_SC_SIGNALS=y CONFIG_ARCH_HAS_SC_SIGNALS=y
CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA=y CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set # CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_LD_SCRIPT_DYN=y CONFIG_LD_SCRIPT_DYN=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_BINFMT_ELF=y CONFIG_BINFMT_ELF=y
CONFIG_BINFMT_MISC=m CONFIG_BINFMT_MISC=m
# CONFIG_HOSTFS is not set # CONFIG_HOSTFS is not set
# CONFIG_HPPFS is not set
CONFIG_MCONSOLE=y CONFIG_MCONSOLE=y
# CONFIG_MAGIC_SYSRQ is not set # CONFIG_MAGIC_SYSRQ is not set
# CONFIG_HOST_2G_2G is not set
CONFIG_NEST_LEVEL=0 CONFIG_NEST_LEVEL=0
CONFIG_KERNEL_HALF_GIGS=1
# CONFIG_HIGHMEM is not set # CONFIG_HIGHMEM is not set
CONFIG_KERNEL_STACK_ORDER=2 CONFIG_KERNEL_STACK_ORDER=2
CONFIG_UML_REAL_TIME_CLOCK=y CONFIG_UML_REAL_TIME_CLOCK=y
@ -49,7 +92,6 @@ CONFIG_UML_REAL_TIME_CLOCK=y
# Code maturity level options # Code maturity level options
# #
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_INIT_ENV_ARG_LIMIT=32
@ -57,6 +99,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
# General setup # General setup
# #
CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION=""
CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y CONFIG_SWAP=y
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y CONFIG_POSIX_MQUEUE=y
@ -64,26 +107,28 @@ CONFIG_BSD_PROCESS_ACCT=y
# CONFIG_BSD_PROCESS_ACCT_V3 is not set # CONFIG_BSD_PROCESS_ACCT_V3 is not set
CONFIG_SYSCTL=y CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set # CONFIG_AUDIT is not set
# CONFIG_HOTPLUG is not set
CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y CONFIG_IKCONFIG_PROC=y
# CONFIG_RELAY is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_UID16=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_EMBEDDED is not set # CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set # CONFIG_KALLSYMS_ALL is not set
CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_KALLSYMS_EXTRA_PASS=y
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y CONFIG_PRINTK=y
CONFIG_BUG=y CONFIG_BUG=y
CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y CONFIG_BASE_FULL=y
CONFIG_FUTEX=y CONFIG_FUTEX=y
CONFIG_EPOLL=y CONFIG_EPOLL=y
CONFIG_SHMEM=y CONFIG_SHMEM=y
CONFIG_CC_ALIGN_FUNCTIONS=0 CONFIG_SLAB=y
CONFIG_CC_ALIGN_LABELS=0
CONFIG_CC_ALIGN_LOOPS=0
CONFIG_CC_ALIGN_JUMPS=0
# CONFIG_TINY_SHMEM is not set # CONFIG_TINY_SHMEM is not set
CONFIG_BASE_SMALL=0 CONFIG_BASE_SMALL=0
# CONFIG_SLOB is not set
# #
# Loadable module support # Loadable module support
@ -91,18 +136,43 @@ CONFIG_BASE_SMALL=0
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODULE_FORCE_UNLOAD is not set
CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set # CONFIG_MODVERSIONS is not set
# CONFIG_MODULE_SRCVERSION_ALL is not set # CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_KMOD=y CONFIG_KMOD=y
# #
# Generic Driver Options # Block layer
# #
CONFIG_STANDALONE=y # CONFIG_LBD is not set
CONFIG_PREVENT_FIRMWARE_BUILD=y # CONFIG_BLK_DEV_IO_TRACE is not set
# CONFIG_FW_LOADER is not set # CONFIG_LSF is not set
# CONFIG_DEBUG_DRIVER is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# Block devices
#
CONFIG_BLK_DEV_UBD=y
# CONFIG_BLK_DEV_UBD_SYNC is not set
CONFIG_BLK_DEV_COW_COMMON=y
# CONFIG_MMAPPER is not set
CONFIG_BLK_DEV_LOOP=m
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_ATA_OVER_ETH is not set
# #
# Character Devices # Character Devices
@ -127,50 +197,23 @@ CONFIG_UML_SOUND=m
CONFIG_SOUND=m CONFIG_SOUND=m
CONFIG_HOSTAUDIO=m CONFIG_HOSTAUDIO=m
CONFIG_UML_RANDOM=y CONFIG_UML_RANDOM=y
# CONFIG_MMAPPER is not set
# #
# Block devices # Generic Driver Options
# #
CONFIG_BLK_DEV_UBD=y CONFIG_STANDALONE=y
CONFIG_BLK_DEV_UBD_SYNC=y CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_BLK_DEV_COW_COMMON=y # CONFIG_FW_LOADER is not set
CONFIG_BLK_DEV_LOOP=m # CONFIG_DEBUG_DRIVER is not set
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=m
# CONFIG_BLK_DEV_RAM is not set
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_INITRAMFS_SOURCE=""
# CONFIG_LBD is not set
# #
# IO Schedulers # Networking
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
CONFIG_NETDEVICES=y
#
# UML Network Devices
#
CONFIG_UML_NET=y
CONFIG_UML_NET_ETHERTAP=y
CONFIG_UML_NET_TUNTAP=y
CONFIG_UML_NET_SLIP=y
CONFIG_UML_NET_DAEMON=y
CONFIG_UML_NET_MCAST=y
CONFIG_UML_NET_SLIRP=y
#
# Networking support
# #
# #
# Networking options # Networking options
# #
# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_PACKET_MMAP=y CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y CONFIG_UNIX=y
@ -178,6 +221,7 @@ CONFIG_UNIX=y
CONFIG_INET=y CONFIG_INET=y
# CONFIG_IP_MULTICAST is not set # CONFIG_IP_MULTICAST is not set
# CONFIG_IP_ADVANCED_ROUTER is not set # CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
# CONFIG_IP_PNP is not set # CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set # CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set # CONFIG_NET_IPGRE is not set
@ -186,27 +230,31 @@ CONFIG_INET=y
# CONFIG_INET_AH is not set # CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set # CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set # CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set # CONFIG_INET_TUNNEL is not set
CONFIG_IP_TCPDIAG=y CONFIG_INET_DIAG=y
# CONFIG_IP_TCPDIAG_IPV6 is not set CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_INET6_XFRM_TUNNEL is not set
# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETFILTER is not set
# #
# TCP congestion control # DCCP Configuration (EXPERIMENTAL)
# #
CONFIG_TCP_CONG_BIC=y # CONFIG_IP_DCCP is not set
CONFIG_TCP_CONG_WESTWOOD=y
CONFIG_TCP_CONG_HTCP=y
# CONFIG_TCP_CONG_HSTCP is not set
# CONFIG_TCP_CONG_HYBLA is not set
# CONFIG_TCP_CONG_VEGAS is not set
# CONFIG_TCP_CONG_SCALABLE is not set
# CONFIG_IPV6 is not set
# CONFIG_NETFILTER is not set
# #
# SCTP Configuration (EXPERIMENTAL) # SCTP Configuration (EXPERIMENTAL)
# #
# CONFIG_IP_SCTP is not set # CONFIG_IP_SCTP is not set
#
# TIPC Configuration (EXPERIMENTAL)
#
# CONFIG_TIPC is not set
# CONFIG_ATM is not set # CONFIG_ATM is not set
# CONFIG_BRIDGE is not set # CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set # CONFIG_VLAN_8021Q is not set
@ -224,26 +272,46 @@ CONFIG_TCP_CONG_HTCP=y
# QoS and/or fair queueing # QoS and/or fair queueing
# #
# CONFIG_NET_SCHED is not set # CONFIG_NET_SCHED is not set
# CONFIG_NET_CLS_ROUTE is not set
# #
# Network testing # Network testing
# #
# CONFIG_NET_PKTGEN is not set # CONFIG_NET_PKTGEN is not set
# CONFIG_KGDBOE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NETPOLL_RX is not set
# CONFIG_NETPOLL_TRAP is not set
# CONFIG_NET_POLL_CONTROLLER is not set
# CONFIG_HAMRADIO is not set # CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set # CONFIG_IRDA is not set
# CONFIG_BT is not set # CONFIG_BT is not set
# CONFIG_IEEE80211 is not set # CONFIG_IEEE80211 is not set
#
# UML Network Devices
#
CONFIG_UML_NET=y
CONFIG_UML_NET_ETHERTAP=y
CONFIG_UML_NET_TUNTAP=y
CONFIG_UML_NET_SLIP=y
CONFIG_UML_NET_DAEMON=y
CONFIG_UML_NET_MCAST=y
# CONFIG_UML_NET_PCAP is not set
CONFIG_UML_NET_SLIRP=y
#
# Network device support
#
CONFIG_NETDEVICES=y
CONFIG_DUMMY=m CONFIG_DUMMY=m
# CONFIG_BONDING is not set # CONFIG_BONDING is not set
# CONFIG_EQUALIZER is not set # CONFIG_EQUALIZER is not set
CONFIG_TUN=m CONFIG_TUN=m
#
# PHY device support
#
#
# Wireless LAN (non-hamradio)
#
# CONFIG_NET_RADIO is not set
# #
# Wan interfaces # Wan interfaces
# #
@ -263,6 +331,13 @@ CONFIG_SLIP=m
# CONFIG_SLIP_MODE_SLIP6 is not set # CONFIG_SLIP_MODE_SLIP6 is not set
# CONFIG_SHAPER is not set # CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set # CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
#
# Connector - unified userspace <-> kernelspace linker
#
# CONFIG_CONNECTOR is not set
# #
# File systems # File systems
@ -274,17 +349,14 @@ CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set # CONFIG_EXT3_FS_XATTR is not set
CONFIG_JBD=y CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set # CONFIG_JBD_DEBUG is not set
# CONFIG_REISER4_FS is not set
CONFIG_REISERFS_FS=y CONFIG_REISERFS_FS=y
# CONFIG_REISERFS_CHECK is not set # CONFIG_REISERFS_CHECK is not set
# CONFIG_REISERFS_PROC_INFO is not set # CONFIG_REISERFS_PROC_INFO is not set
# CONFIG_REISERFS_FS_XATTR is not set # CONFIG_REISERFS_FS_XATTR is not set
# CONFIG_JFS_FS is not set # CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
#
# XFS support
#
# CONFIG_XFS_FS is not set # CONFIG_XFS_FS is not set
# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set # CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set # CONFIG_ROMFS_FS is not set
CONFIG_INOTIFY=y CONFIG_INOTIFY=y
@ -295,11 +367,6 @@ CONFIG_QUOTACTL=y
CONFIG_DNOTIFY=y CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=m CONFIG_AUTOFS_FS=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
#
# Caches
#
# CONFIG_FSCACHE is not set
# CONFIG_FUSE_FS is not set # CONFIG_FUSE_FS is not set
# #
@ -323,14 +390,10 @@ CONFIG_JOLIET=y
CONFIG_PROC_FS=y CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y CONFIG_TMPFS=y
# CONFIG_TMPFS_XATTR is not set
# CONFIG_HUGETLB_PAGE is not set # CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set # CONFIG_CONFIGFS_FS is not set
# CONFIG_RELAYFS_FS is not set
# #
# Miscellaneous filesystems # Miscellaneous filesystems
@ -430,6 +493,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# Library routines # Library routines
# #
# CONFIG_CRC_CCITT is not set # CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
CONFIG_CRC32=m CONFIG_CRC32=m
# CONFIG_LIBCRC32C is not set # CONFIG_LIBCRC32C is not set
@ -448,12 +512,18 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_DETECT_SOFTLOCKUP=y CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_SCHEDSTATS is not set # CONFIG_SCHEDSTATS is not set
CONFIG_DEBUG_SLAB=y CONFIG_DEBUG_SLAB=y
# CONFIG_DEBUG_SLAB_LEAK is not set
# CONFIG_DEBUG_MUTEXES is not set
# CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK is not set
# CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set
# CONFIG_DEBUG_KOBJECT is not set # CONFIG_DEBUG_KOBJECT is not set
CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FS is not set # CONFIG_DEBUG_FS is not set
# CONFIG_DEBUG_VM is not set
CONFIG_FRAME_POINTER=y CONFIG_FRAME_POINTER=y
# CONFIG_UNWIND_INFO is not set
CONFIG_FORCED_INLINING=y
# CONFIG_RCU_TORTURE_TEST is not set
# CONFIG_GPROF is not set # CONFIG_GPROF is not set
# CONFIG_GCOV is not set # CONFIG_GCOV is not set
# CONFIG_SYSCALL_DEBUG is not set # CONFIG_SYSCALL_DEBUG is not set

View file

@ -100,7 +100,7 @@ struct cow_header_v3_broken {
__u32 alignment; __u32 alignment;
__u32 cow_format; __u32 cow_format;
char backing_file[PATH_LEN_V3]; char backing_file[PATH_LEN_V3];
} __attribute__((packed)); };
/* COW format definitions - for now, we have only the usual COW bitmap */ /* COW format definitions - for now, we have only the usual COW bitmap */
#define COW_BITMAP 0 #define COW_BITMAP 0

View file

@ -89,16 +89,18 @@ void sigio_handler(int sig, union uml_pt_regs *regs)
struct irq_fd *irq_fd; struct irq_fd *irq_fd;
int n; int n;
if(smp_sigio_handler()) return; if (smp_sigio_handler())
while(1){ return;
while (1) {
n = os_waiting_for_events(active_fds); n = os_waiting_for_events(active_fds);
if (n <= 0) { if (n <= 0) {
if(n == -EINTR) continue; if(n == -EINTR) continue;
else break; else break;
} }
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
if(irq_fd->current_events != 0){ if (irq_fd->current_events != 0) {
irq_fd->current_events = 0; irq_fd->current_events = 0;
do_IRQ(irq_fd->irq, regs); do_IRQ(irq_fd->irq, regs);
} }
@ -110,19 +112,17 @@ void sigio_handler(int sig, union uml_pt_regs *regs)
static void maybe_sigio_broken(int fd, int type) static void maybe_sigio_broken(int fd, int type)
{ {
if(os_isatty(fd)){ if (os_isatty(fd)) {
if((type == IRQ_WRITE) && !pty_output_sigio){ if ((type == IRQ_WRITE) && !pty_output_sigio) {
write_sigio_workaround(); write_sigio_workaround();
add_sigio_fd(fd, 0); add_sigio_fd(fd, 0);
} } else if ((type == IRQ_READ) && !pty_close_sigio) {
else if((type == IRQ_READ) && !pty_close_sigio){
write_sigio_workaround(); write_sigio_workaround();
add_sigio_fd(fd, 1); add_sigio_fd(fd, 1);
} }
} }
} }
int activate_fd(int irq, int fd, int type, void *dev_id) int activate_fd(int irq, int fd, int type, void *dev_id)
{ {
struct pollfd *tmp_pfd; struct pollfd *tmp_pfd;
@ -132,16 +132,18 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
pid = os_getpid(); pid = os_getpid();
err = os_set_fd_async(fd, pid); err = os_set_fd_async(fd, pid);
if(err < 0) if (err < 0)
goto out; goto out;
new_fd = um_kmalloc(sizeof(*new_fd)); new_fd = um_kmalloc(sizeof(*new_fd));
err = -ENOMEM; err = -ENOMEM;
if(new_fd == NULL) if (new_fd == NULL)
goto out; goto out;
if(type == IRQ_READ) events = UM_POLLIN | UM_POLLPRI; if (type == IRQ_READ)
else events = UM_POLLOUT; events = UM_POLLIN | UM_POLLPRI;
else
events = UM_POLLOUT;
*new_fd = ((struct irq_fd) { .next = NULL, *new_fd = ((struct irq_fd) { .next = NULL,
.id = dev_id, .id = dev_id,
.fd = fd, .fd = fd,
@ -165,8 +167,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
* a semaphore. * a semaphore.
*/ */
flags = irq_lock(); flags = irq_lock();
for(irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next){ for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
if((irq_fd->fd == fd) && (irq_fd->type == type)){ if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
printk("Registering fd %d twice\n", fd); printk("Registering fd %d twice\n", fd);
printk("Irqs : %d, %d\n", irq_fd->irq, irq); printk("Irqs : %d, %d\n", irq_fd->irq, irq);
printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id); printk("Ids : 0x%p, 0x%p\n", irq_fd->id, dev_id);
@ -175,13 +177,13 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
} }
/*-------------*/ /*-------------*/
if(type == IRQ_WRITE) if (type == IRQ_WRITE)
fd = -1; fd = -1;
tmp_pfd = NULL; tmp_pfd = NULL;
n = 0; n = 0;
while(1){ while (1) {
n = os_create_pollfd(fd, events, tmp_pfd, n); n = os_create_pollfd(fd, events, tmp_pfd, n);
if (n == 0) if (n == 0)
break; break;
@ -198,10 +200,8 @@ int activate_fd(int irq, int fd, int type, void *dev_id)
* then we free the buffer tmp_fds and try again. * then we free the buffer tmp_fds and try again.
*/ */
irq_unlock(flags); irq_unlock(flags);
if (tmp_pfd != NULL) {
kfree(tmp_pfd); kfree(tmp_pfd);
tmp_pfd = NULL; tmp_pfd = NULL;
}
tmp_pfd = um_kmalloc(n); tmp_pfd = um_kmalloc(n);
if (tmp_pfd == NULL) if (tmp_pfd == NULL)
@ -249,7 +249,7 @@ static int same_irq_and_dev(struct irq_fd *irq, void *d)
{ {
struct irq_and_dev *data = d; struct irq_and_dev *data = d;
return((irq->irq == data->irq) && (irq->id == data->dev)); return ((irq->irq == data->irq) && (irq->id == data->dev));
} }
void free_irq_by_irq_and_dev(unsigned int irq, void *dev) void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
@ -262,7 +262,7 @@ void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
static int same_fd(struct irq_fd *irq, void *fd) static int same_fd(struct irq_fd *irq, void *fd)
{ {
return(irq->fd == *((int *) fd)); return (irq->fd == *((int *)fd));
} }
void free_irq_by_fd(int fd) void free_irq_by_fd(int fd)
@ -276,16 +276,17 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
int i = 0; int i = 0;
int fdi; int fdi;
for(irq=active_fds; irq != NULL; irq = irq->next){ for (irq = active_fds; irq != NULL; irq = irq->next) {
if((irq->fd == fd) && (irq->irq == irqnum)) break; if ((irq->fd == fd) && (irq->irq == irqnum))
break;
i++; i++;
} }
if(irq == NULL){ if (irq == NULL) {
printk("find_irq_by_fd doesn't have descriptor %d\n", fd); printk("find_irq_by_fd doesn't have descriptor %d\n", fd);
goto out; goto out;
} }
fdi = os_get_pollfd(i); fdi = os_get_pollfd(i);
if((fdi != -1) && (fdi != fd)){ if ((fdi != -1) && (fdi != fd)) {
printk("find_irq_by_fd - mismatch between active_fds and " printk("find_irq_by_fd - mismatch between active_fds and "
"pollfds, fd %d vs %d, need %d\n", irq->fd, "pollfds, fd %d vs %d, need %d\n", irq->fd,
fdi, fd); fdi, fd);
@ -294,7 +295,7 @@ static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
} }
*index_out = i; *index_out = i;
out: out:
return(irq); return irq;
} }
void reactivate_fd(int fd, int irqnum) void reactivate_fd(int fd, int irqnum)
@ -305,7 +306,7 @@ void reactivate_fd(int fd, int irqnum)
flags = irq_lock(); flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i); irq = find_irq_by_fd(fd, irqnum, &i);
if(irq == NULL){ if (irq == NULL) {
irq_unlock(flags); irq_unlock(flags);
return; return;
} }
@ -326,7 +327,7 @@ void deactivate_fd(int fd, int irqnum)
flags = irq_lock(); flags = irq_lock();
irq = find_irq_by_fd(fd, irqnum, &i); irq = find_irq_by_fd(fd, irqnum, &i);
if(irq == NULL) if (irq == NULL)
goto out; goto out;
os_set_pollfd(i, -1); os_set_pollfd(i, -1);
out: out:
@ -338,15 +339,15 @@ int deactivate_all_fds(void)
struct irq_fd *irq; struct irq_fd *irq;
int err; int err;
for(irq=active_fds;irq != NULL;irq = irq->next){ for (irq = active_fds; irq != NULL; irq = irq->next) {
err = os_clear_fd_async(irq->fd); err = os_clear_fd_async(irq->fd);
if(err) if (err)
return(err); return err;
} }
/* If there is a signal already queued, after unblocking ignore it */ /* If there is a signal already queued, after unblocking ignore it */
os_set_ioignore(); os_set_ioignore();
return(0); return 0;
} }
void forward_interrupts(int pid) void forward_interrupts(int pid)
@ -356,9 +357,9 @@ void forward_interrupts(int pid)
int err; int err;
flags = irq_lock(); flags = irq_lock();
for(irq=active_fds;irq != NULL;irq = irq->next){ for (irq = active_fds; irq != NULL; irq = irq->next) {
err = os_set_owner(irq->fd, pid); err = os_set_owner(irq->fd, pid);
if(err < 0){ if (err < 0) {
/* XXX Just remove the irq rather than /* XXX Just remove the irq rather than
* print out an infinite stream of these * print out an infinite stream of these
*/ */
@ -379,7 +380,7 @@ void forward_interrupts(int pid)
unsigned int do_IRQ(int irq, union uml_pt_regs *regs) unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
{ {
irq_enter(); irq_enter();
__do_IRQ(irq, (struct pt_regs *) regs); __do_IRQ(irq, (struct pt_regs *)regs);
irq_exit(); irq_exit();
return 1; return 1;
} }
@ -392,12 +393,12 @@ int um_request_irq(unsigned int irq, int fd, int type,
int err; int err;
err = request_irq(irq, handler, irqflags, devname, dev_id); err = request_irq(irq, handler, irqflags, devname, dev_id);
if(err) if (err)
return(err); return err;
if(fd != -1) if (fd != -1)
err = activate_fd(irq, fd, type, dev_id); err = activate_fd(irq, fd, type, dev_id);
return(err); return err;
} }
EXPORT_SYMBOL(um_request_irq); EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd); EXPORT_SYMBOL(reactivate_fd);
@ -409,7 +410,7 @@ unsigned long irq_lock(void)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&irq_spinlock, flags); spin_lock_irqsave(&irq_spinlock, flags);
return(flags); return flags;
} }
void irq_unlock(unsigned long flags) void irq_unlock(unsigned long flags)
@ -452,7 +453,7 @@ void __init init_IRQ(void)
irq_desc[TIMER_IRQ].depth = 1; irq_desc[TIMER_IRQ].depth = 1;
irq_desc[TIMER_IRQ].handler = &SIGVTALRM_irq_type; irq_desc[TIMER_IRQ].handler = &SIGVTALRM_irq_type;
enable_irq(TIMER_IRQ); enable_irq(TIMER_IRQ);
for(i=1;i<NR_IRQS;i++){ for (i = 1; i < NR_IRQS; i++) {
irq_desc[i].status = IRQ_DISABLED; irq_desc[i].status = IRQ_DISABLED;
irq_desc[i].action = NULL; irq_desc[i].action = NULL;
irq_desc[i].depth = 1; irq_desc[i].depth = 1;
@ -467,7 +468,7 @@ int init_aio_irq(int irq, char *name, irqreturn_t (*handler)(int, void *,
int fds[2], err; int fds[2], err;
err = os_pipe(fds, 1, 1); err = os_pipe(fds, 1, 1);
if(err){ if (err) {
printk("init_aio_irq - os_pipe failed, err = %d\n", -err); printk("init_aio_irq - os_pipe failed, err = %d\n", -err);
goto out; goto out;
} }
@ -475,7 +476,7 @@ int init_aio_irq(int irq, char *name, irqreturn_t (*handler)(int, void *,
err = um_request_irq(irq, fds[0], IRQ_READ, handler, err = um_request_irq(irq, fds[0], IRQ_READ, handler,
SA_INTERRUPT | SA_SAMPLE_RANDOM, name, SA_INTERRUPT | SA_SAMPLE_RANDOM, name,
(void *) (long) fds[0]); (void *) (long) fds[0]);
if(err){ if (err) {
printk("init_aio_irq - : um_request_irq failed, err = %d\n", printk("init_aio_irq - : um_request_irq failed, err = %d\n",
err); err);
goto out_close; goto out_close;
@ -488,5 +489,5 @@ int init_aio_irq(int irq, char *name, irqreturn_t (*handler)(int, void *,
os_close_file(fds[0]); os_close_file(fds[0]);
os_close_file(fds[1]); os_close_file(fds[1]);
out: out:
return(err); return err;
} }

View file

@ -407,6 +407,8 @@ unsigned long find_iomem(char *driver, unsigned long *len_out)
*len_out = region->size; *len_out = region->size;
return(region->virt); return(region->virt);
} }
region = region->next;
} }
return(0); return(0);

View file

@ -275,15 +275,13 @@ void syscall_trace(union uml_pt_regs *regs, int entryexit)
if (unlikely(current->audit_context)) { if (unlikely(current->audit_context)) {
if (!entryexit) if (!entryexit)
audit_syscall_entry(current, audit_syscall_entry(HOST_AUDIT_ARCH,
HOST_AUDIT_ARCH,
UPT_SYSCALL_NR(regs), UPT_SYSCALL_NR(regs),
UPT_SYSCALL_ARG1(regs), UPT_SYSCALL_ARG1(regs),
UPT_SYSCALL_ARG2(regs), UPT_SYSCALL_ARG2(regs),
UPT_SYSCALL_ARG3(regs), UPT_SYSCALL_ARG3(regs),
UPT_SYSCALL_ARG4(regs)); UPT_SYSCALL_ARG4(regs));
else audit_syscall_exit(current, else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
UPT_SYSCALL_RET(regs)); UPT_SYSCALL_RET(regs));
} }

View file

@ -6,9 +6,11 @@
obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \ obj-y := clone.o exec_kern.o mem.o mmu.o process_kern.o \
syscall.o tlb.o uaccess.o syscall.o tlb.o uaccess.o
USER_OBJS := clone.o # clone.o is in the stub, so it can't be built with profiling
# GCC hardened also auto-enables -fpic, but we need %ebx so it can't work ->
# disable it
CFLAGS_clone.o := $(CFLAGS_NO_HARDENING)
UNPROFILE_OBJS := clone.o
include arch/um/scripts/Makefile.rules include arch/um/scripts/Makefile.rules
# clone.o is in the stub, so it can't be built with profiling
$(obj)/clone.o : c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS))

View file

@ -209,4 +209,4 @@ int __init timer_init(void)
return(0); return(0);
} }
__initcall(timer_init); arch_initcall(timer_init);

View file

@ -171,7 +171,7 @@ int os_sigio_async(int master, int slave)
flags = fcntl(master, F_GETFL); flags = fcntl(master, F_GETFL);
if(flags < 0) if(flags < 0)
return errno; return -errno;
if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) || if((fcntl(master, F_SETFL, flags | O_NONBLOCK | O_ASYNC) < 0) ||
(fcntl(master, F_SETOWN, os_getpid()) < 0)) (fcntl(master, F_SETOWN, os_getpid()) < 0))

View file

@ -29,21 +29,21 @@ int os_waiting_for_events(struct irq_fd *active_fds)
int i, n, err; int i, n, err;
n = poll(pollfds, pollfds_num, 0); n = poll(pollfds, pollfds_num, 0);
if(n < 0){ if (n < 0) {
err = -errno; err = -errno;
if(errno != EINTR) if (errno != EINTR)
printk("sigio_handler: os_waiting_for_events:" printk("sigio_handler: os_waiting_for_events:"
" poll returned %d, errno = %d\n", n, errno); " poll returned %d, errno = %d\n", n, errno);
return err; return err;
} }
if(n == 0) if (n == 0)
return 0; return 0;
irq_fd = active_fds; irq_fd = active_fds;
for(i = 0; i < pollfds_num; i++){ for (i = 0; i < pollfds_num; i++) {
if(pollfds[i].revents != 0){ if (pollfds[i].revents != 0) {
irq_fd->current_events = pollfds[i].revents; irq_fd->current_events = pollfds[i].revents;
pollfds[i].fd = -1; pollfds[i].fd = -1;
} }
@ -54,7 +54,7 @@ int os_waiting_for_events(struct irq_fd *active_fds)
int os_isatty(int fd) int os_isatty(int fd)
{ {
return(isatty(fd)); return isatty(fd);
} }
int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds) int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
@ -65,7 +65,7 @@ int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
return((pollfds_size + 1) * sizeof(pollfds[0])); return((pollfds_size + 1) * sizeof(pollfds[0]));
} }
if(pollfds != NULL){ if (pollfds != NULL) {
memcpy(tmp_pfd, pollfds, memcpy(tmp_pfd, pollfds,
sizeof(pollfds[0]) * pollfds_size); sizeof(pollfds[0]) * pollfds_size);
/* remove old pollfds */ /* remove old pollfds */
@ -73,18 +73,15 @@ int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds)
} }
pollfds = tmp_pfd; pollfds = tmp_pfd;
pollfds_size++; pollfds_size++;
} else { } else
/* remove not used tmp_pfd */ kfree(tmp_pfd); /* remove not used tmp_pfd */
if (tmp_pfd != NULL)
kfree(tmp_pfd);
}
pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, pollfds[pollfds_num] = ((struct pollfd) { .fd = fd,
.events = events, .events = events,
.revents = 0 }); .revents = 0 });
pollfds_num++; pollfds_num++;
return(0); return 0;
} }
void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg, void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
@ -94,11 +91,11 @@ void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
int i = 0; int i = 0;
prev = &active_fds; prev = &active_fds;
while(*prev != NULL){ while (*prev != NULL) {
if((*test)(*prev, arg)){ if ((*test)(*prev, arg)) {
struct irq_fd *old_fd = *prev; struct irq_fd *old_fd = *prev;
if((pollfds[i].fd != -1) && if ((pollfds[i].fd != -1) &&
(pollfds[i].fd != (*prev)->fd)){ (pollfds[i].fd != (*prev)->fd)) {
printk("os_free_irq_by_cb - mismatch between " printk("os_free_irq_by_cb - mismatch between "
"active_fds and pollfds, fd %d vs %d\n", "active_fds and pollfds, fd %d vs %d\n",
(*prev)->fd, pollfds[i].fd); (*prev)->fd, pollfds[i].fd);
@ -110,7 +107,6 @@ void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
/* This moves the *whole* array after pollfds[i] /* This moves the *whole* array after pollfds[i]
* (though it doesn't spot as such)! * (though it doesn't spot as such)!
*/ */
memmove(&pollfds[i], &pollfds[i + 1], memmove(&pollfds[i], &pollfds[i + 1],
(pollfds_num - i) * sizeof(pollfds[0])); (pollfds_num - i) * sizeof(pollfds[0]));
if(*last_irq_ptr2 == &old_fd->next) if(*last_irq_ptr2 == &old_fd->next)
@ -129,10 +125,9 @@ void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg,
return; return;
} }
int os_get_pollfd(int i) int os_get_pollfd(int i)
{ {
return(pollfds[i].fd); return pollfds[i].fd;
} }
void os_set_pollfd(int i, int fd) void os_set_pollfd(int i, int fd)
@ -151,8 +146,10 @@ void init_irq_signals(int on_sigstack)
int flags; int flags;
flags = on_sigstack ? SA_ONSTACK : 0; flags = on_sigstack ? SA_ONSTACK : 0;
if(timer_irq_inited) h = (__sighandler_t) alarm_handler; if (timer_irq_inited)
else h = boot_timer_handler; h = (__sighandler_t)alarm_handler;
else
h = boot_timer_handler;
set_handler(SIGVTALRM, h, flags | SA_RESTART, set_handler(SIGVTALRM, h, flags | SA_RESTART,
SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1); SIGUSR1, SIGIO, SIGWINCH, SIGALRM, -1);

View file

@ -74,6 +74,34 @@ static void last_ditch_exit(int sig)
exit(1); exit(1);
} }
#define UML_LIB_PATH ":/usr/lib/uml"
static void setup_env_path(void)
{
char *new_path = NULL;
char *old_path = NULL;
int path_len = 0;
old_path = getenv("PATH");
/* if no PATH variable is set or it has an empty value
* just use the default + /usr/lib/uml
*/
if (!old_path || (path_len = strlen(old_path)) == 0) {
putenv("PATH=:/bin:/usr/bin/" UML_LIB_PATH);
return;
}
/* append /usr/lib/uml to the existing path */
path_len += strlen("PATH=" UML_LIB_PATH) + 1;
new_path = malloc(path_len);
if (!new_path) {
perror("coudn't malloc to set a new PATH");
return;
}
snprintf(new_path, path_len, "PATH=%s" UML_LIB_PATH, old_path);
putenv(new_path);
}
extern int uml_exitcode; extern int uml_exitcode;
extern void scan_elf_aux( char **envp); extern void scan_elf_aux( char **envp);
@ -114,6 +142,8 @@ int main(int argc, char **argv, char **envp)
set_stklim(); set_stklim();
setup_env_path();
new_argv = malloc((argc + 1) * sizeof(char *)); new_argv = malloc((argc + 1) * sizeof(char *));
if(new_argv == NULL){ if(new_argv == NULL){
perror("Mallocing argv"); perror("Mallocing argv");

View file

@ -206,29 +206,36 @@ int os_drop_memory(void *addr, int length)
int can_drop_memory(void) int can_drop_memory(void)
{ {
void *addr; void *addr;
int fd; int fd, ok = 0;
printk("Checking host MADV_REMOVE support..."); printk("Checking host MADV_REMOVE support...");
fd = create_mem_file(UM_KERN_PAGE_SIZE); fd = create_mem_file(UM_KERN_PAGE_SIZE);
if(fd < 0){ if(fd < 0){
printk("Creating test memory file failed, err = %d\n", -fd); printk("Creating test memory file failed, err = %d\n", -fd);
return 0; goto out;
} }
addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE, addr = mmap64(NULL, UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED, fd, 0); MAP_SHARED, fd, 0);
if(addr == MAP_FAILED){ if(addr == MAP_FAILED){
printk("Mapping test memory file failed, err = %d\n", -errno); printk("Mapping test memory file failed, err = %d\n", -errno);
return 0; goto out_close;
} }
if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){ if(madvise(addr, UM_KERN_PAGE_SIZE, MADV_REMOVE) != 0){
printk("MADV_REMOVE failed, err = %d\n", -errno); printk("MADV_REMOVE failed, err = %d\n", -errno);
return 0; goto out_unmap;
} }
printk("OK\n"); printk("OK\n");
return 1; ok = 1;
out_unmap:
munmap(addr, UM_KERN_PAGE_SIZE);
out_close:
close(fd);
out:
return ok;
} }
void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int)) void init_new_thread_stack(void *sig_stack, void (*usr1_handler)(int))

View file

@ -344,12 +344,12 @@ int copy_context_skas0(unsigned long new_stack, int pid)
err = ptrace_setregs(pid, regs); err = ptrace_setregs(pid, regs);
if(err < 0) if(err < 0)
panic("copy_context_skas0 : PTRACE_SETREGS failed, " panic("copy_context_skas0 : PTRACE_SETREGS failed, "
"pid = %d, errno = %d\n", pid, errno); "pid = %d, errno = %d\n", pid, -err);
err = ptrace_setfpregs(pid, fp_regs); err = ptrace_setfpregs(pid, fp_regs);
if(err < 0) if(err < 0)
panic("copy_context_skas0 : PTRACE_SETFPREGS failed, " panic("copy_context_skas0 : PTRACE_SETFPREGS failed, "
"pid = %d, errno = %d\n", pid, errno); "pid = %d, errno = %d\n", pid, -err);
/* set a well known return code for detection of child write failure */ /* set a well known return code for detection of child write failure */
child_data->err = 12345678; child_data->err = 12345678;
@ -362,7 +362,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)
pid = data->err; pid = data->err;
if(pid < 0) if(pid < 0)
panic("copy_context_skas0 - stub-parent reports error %d\n", panic("copy_context_skas0 - stub-parent reports error %d\n",
pid); -pid);
/* Wait, until child has finished too: read child's result from /* Wait, until child has finished too: read child's result from
* child's stack and check it. * child's stack and check it.

View file

@ -104,7 +104,7 @@ void init_registers(int pid)
err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs);
if(err) if(err)
panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", panic("check_ptrace : PTRACE_GETREGS failed, errno = %d",
err); errno);
errno = 0; errno = 0;
err = ptrace(PTRACE_GETFPXREGS, pid, 0, exec_fpx_regs); err = ptrace(PTRACE_GETFPXREGS, pid, 0, exec_fpx_regs);
@ -119,7 +119,7 @@ void init_registers(int pid)
err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs); err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs);
if(err) if(err)
panic("check_ptrace : PTRACE_GETFPREGS failed, errno = %d", panic("check_ptrace : PTRACE_GETFPREGS failed, errno = %d",
err); errno);
} }
void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)

View file

@ -62,12 +62,12 @@ void init_registers(int pid)
err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs); err = ptrace(PTRACE_GETREGS, pid, 0, exec_regs);
if(err) if(err)
panic("check_ptrace : PTRACE_GETREGS failed, errno = %d", panic("check_ptrace : PTRACE_GETREGS failed, errno = %d",
err); errno);
err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs); err = ptrace(PTRACE_GETFPREGS, pid, 0, exec_fp_regs);
if(err) if(err)
panic("check_ptrace : PTRACE_GETFPREGS failed, errno = %d", panic("check_ptrace : PTRACE_GETFPREGS failed, errno = %d",
err); errno);
} }
void get_safe_registers(unsigned long *regs, unsigned long *fp_regs) void get_safe_registers(unsigned long *regs, unsigned long *fp_regs)

View file

@ -178,14 +178,14 @@ static void __init create_pid_file(void)
fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644); fd = open(file, O_RDWR | O_CREAT | O_EXCL, 0644);
if(fd < 0){ if(fd < 0){
printk("Open of machine pid file \"%s\" failed: %s\n", printk("Open of machine pid file \"%s\" failed: %s\n",
file, strerror(-fd)); file, strerror(errno));
return; return;
} }
snprintf(pid, sizeof(pid), "%d\n", getpid()); snprintf(pid, sizeof(pid), "%d\n", getpid());
n = write(fd, pid, strlen(pid)); n = write(fd, pid, strlen(pid));
if(n != strlen(pid)) if(n != strlen(pid))
printk("Write of pid file failed - err = %d\n", -n); printk("Write of pid file failed - err = %d\n", errno);
close(fd); close(fd);
} }

View file

@ -96,6 +96,13 @@ EXPORT_SYMBOL_PROTO(getuid);
EXPORT_SYMBOL_PROTO(fsync); EXPORT_SYMBOL_PROTO(fsync);
EXPORT_SYMBOL_PROTO(fdatasync); EXPORT_SYMBOL_PROTO(fdatasync);
/* Export symbols used by GCC for the stack protector. */
extern void __stack_smash_handler(void *) __attribute__((weak));
EXPORT_SYMBOL(__stack_smash_handler);
extern long __guard __attribute__((weak));
EXPORT_SYMBOL(__guard);
/* /*
* Overrides for Emacs so that we follow Linus's tabbing style. * Overrides for Emacs so that we follow Linus's tabbing style.
* Emacs will notice this stuff at the end of the file and automatically * Emacs will notice this stuff at the end of the file and automatically

View file

@ -7,11 +7,19 @@ USER_SINGLE_OBJS := \
USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS))
USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
$(USER_OBJS) $(USER_OBJS:.o=.i) $(USER_OBJS:.o=.s) $(USER_OBJS:.o=.lst): \ $(USER_OBJS:.o=.%): \
c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(notdir $@)) c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) $(CFLAGS_$(*F).o)
$(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \ $(USER_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
-Dunix -D__unix__ -D__$(SUBARCH)__ -Dunix -D__unix__ -D__$(SUBARCH)__
# These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of
# using it directly.
UNPROFILE_OBJS := $(foreach file,$(UNPROFILE_OBJS),$(obj)/$(file))
$(UNPROFILE_OBJS:.o=.%): \
c_flags = -Wp,-MD,$(depfile) $(call unprofile,$(USER_CFLAGS)) $(CFLAGS_$(*F).o)
$(UNPROFILE_OBJS) : CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ \
-Dunix -D__unix__ -D__$(SUBARCH)__
# The stubs and unmap.o can't try to call mcount or update basic block data # The stubs and unmap.o can't try to call mcount or update basic block data
define unprofile define unprofile

View file

@ -8,11 +8,16 @@ subarch-obj-y = lib/bitops.o kernel/semaphore.o
subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem.o
subarch-obj-$(CONFIG_MODULES) += kernel/module.o subarch-obj-$(CONFIG_MODULES) += kernel/module.o
USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o stub_segv.o USER_OBJS := bugs.o ptrace_user.o sigcontext.o fault.o
include arch/um/scripts/Makefile.rules USER_OBJS += user-offsets.s
extra-y += user-offsets.s
extra-$(CONFIG_MODE_TT) += unmap.o extra-$(CONFIG_MODE_TT) += unmap.o
$(obj)/stub_segv.o $(obj)/unmap.o: \ UNPROFILE_OBJS := stub_segv.o
_c_flags = $(call unprofile,$(CFLAGS)) CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
include arch/um/scripts/Makefile.rules
$(obj)/unmap.%: _c_flags = $(call unprofile,$(CFLAGS))

View file

@ -16,11 +16,16 @@ subarch-obj-$(CONFIG_MODULES) += kernel/module.o
ldt-y = ../sys-i386/ldt.o ldt-y = ../sys-i386/ldt.o
USER_OBJS := ptrace_user.o sigcontext.o stub_segv.o USER_OBJS := ptrace_user.o sigcontext.o
include arch/um/scripts/Makefile.rules USER_OBJS += user-offsets.s
extra-y += user-offsets.s
extra-$(CONFIG_MODE_TT) += unmap.o extra-$(CONFIG_MODE_TT) += unmap.o
$(obj)/stub_segv.o $(obj)/unmap.o: \ UNPROFILE_OBJS := stub_segv.o
_c_flags = $(call unprofile,$(CFLAGS)) CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
include arch/um/scripts/Makefile.rules
$(obj)/unmap.%: _c_flags = $(call unprofile,$(CFLAGS))

View file

@ -695,4 +695,5 @@ ia32_sys_call_table:
.quad sys_splice .quad sys_splice
.quad sys_sync_file_range .quad sys_sync_file_range
.quad sys_tee .quad sys_tee
.quad compat_sys_vmsplice
ia32_syscall_end: ia32_syscall_end:

View file

@ -600,12 +600,12 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
if (unlikely(current->audit_context)) { if (unlikely(current->audit_context)) {
if (test_thread_flag(TIF_IA32)) { if (test_thread_flag(TIF_IA32)) {
audit_syscall_entry(current, AUDIT_ARCH_I386, audit_syscall_entry(AUDIT_ARCH_I386,
regs->orig_rax, regs->orig_rax,
regs->rbx, regs->rcx, regs->rbx, regs->rcx,
regs->rdx, regs->rsi); regs->rdx, regs->rsi);
} else { } else {
audit_syscall_entry(current, AUDIT_ARCH_X86_64, audit_syscall_entry(AUDIT_ARCH_X86_64,
regs->orig_rax, regs->orig_rax,
regs->rdi, regs->rsi, regs->rdi, regs->rsi,
regs->rdx, regs->r10); regs->rdx, regs->r10);
@ -616,7 +616,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
asmlinkage void syscall_trace_leave(struct pt_regs *regs) asmlinkage void syscall_trace_leave(struct pt_regs *regs)
{ {
if (unlikely(current->audit_context)) if (unlikely(current->audit_context))
audit_syscall_exit(current, AUDITSC_RESULT(regs->rax), regs->rax); audit_syscall_exit(AUDITSC_RESULT(regs->rax), regs->rax);
if ((test_thread_flag(TIF_SYSCALL_TRACE) if ((test_thread_flag(TIF_SYSCALL_TRACE)
|| test_thread_flag(TIF_SINGLESTEP)) || test_thread_flag(TIF_SINGLESTEP))

View file

@ -1426,3 +1426,22 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo, .show = show_cpuinfo,
}; };
#ifdef CONFIG_INPUT_PCSPKR
#include <linux/platform_device.h>
static __init int add_pcspkr(void)
{
struct platform_device *pd;
int ret;
pd = platform_device_alloc("pcspkr", -1);
if (!pd)
return -ENOMEM;
ret = platform_device_add(pd);
if (ret)
platform_device_put(pd);
return ret;
}
device_initcall(add_pcspkr);
#endif

View file

@ -182,6 +182,7 @@ static int exact_lock(dev_t dev, void *data)
*/ */
void add_disk(struct gendisk *disk) void add_disk(struct gendisk *disk)
{ {
get_device(disk->driverfs_dev);
disk->flags |= GENHD_FL_UP; disk->flags |= GENHD_FL_UP;
blk_register_region(MKDEV(disk->major, disk->first_minor), blk_register_region(MKDEV(disk->major, disk->first_minor),
disk->minors, NULL, exact_match, exact_lock, disk); disk->minors, NULL, exact_match, exact_lock, disk);
@ -427,6 +428,7 @@ static struct attribute * default_attrs[] = {
static void disk_release(struct kobject * kobj) static void disk_release(struct kobject * kobj)
{ {
struct gendisk *disk = to_disk(kobj); struct gendisk *disk = to_disk(kobj);
put_device(disk->driverfs_dev);
kfree(disk->random); kfree(disk->random);
kfree(disk->part); kfree(disk->part);
free_disk_stats(disk); free_disk_stats(disk);

View file

@ -200,13 +200,13 @@ static ssize_t gen_rtc_read(struct file *file, char __user *buf,
/* first test allows optimizer to nuke this case for 32-bit machines */ /* first test allows optimizer to nuke this case for 32-bit machines */
if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) { if (sizeof (int) != sizeof (long) && count == sizeof (unsigned int)) {
unsigned int uidata = data; unsigned int uidata = data;
retval = put_user(uidata, (unsigned long __user *)buf); retval = put_user(uidata, (unsigned int __user *)buf) ?:
sizeof(unsigned int);
} }
else { else {
retval = put_user(data, (unsigned long __user *)buf); retval = put_user(data, (unsigned long __user *)buf) ?:
sizeof(unsigned long);
} }
if (!retval)
retval = sizeof(unsigned long);
out: out:
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
remove_wait_queue(&gen_rtc_wait, &wait); remove_wait_queue(&gen_rtc_wait, &wait);

View file

@ -25,6 +25,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include "edac_mc.h" #include "edac_mc.h"
static int force_function_unhide;
#define e752x_printk(level, fmt, arg...) \ #define e752x_printk(level, fmt, arg...) \
edac_printk(level, "e752x", fmt, ##arg) edac_printk(level, "e752x", fmt, ##arg)
@ -782,8 +784,16 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
debugf0("%s(): mci\n", __func__); debugf0("%s(): mci\n", __func__);
debugf0("Starting Probe1\n"); debugf0("Starting Probe1\n");
/* enable device 0 function 1 */ /* check to see if device 0 function 1 is enabled; if it isn't, we
* assume the BIOS has reserved it for a reason and is expecting
* exclusive access, we take care not to violate that assumption and
* fail the probe. */
pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8); pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
if (!force_function_unhide && !(stat8 & (1 << 5))) {
printk(KERN_INFO "Contact your BIOS vendor to see if the "
"E752x error registers can be safely un-hidden\n");
goto fail;
}
stat8 |= (1 << 5); stat8 |= (1 << 5);
pci_write_config_byte(pdev, E752X_DEVPRES1, stat8); pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
@ -1063,3 +1073,8 @@ module_exit(e752x_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
module_param(force_function_unhide, int, 0444);
MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
" 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");

View file

@ -60,11 +60,11 @@
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */ #define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */ #define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */
#define __IPATH_SMADBG 0x8000 /* sma packet debug */ #define __IPATH_SMADBG 0x8000 /* sma packet debug */
#define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) general debug on */ #define __IPATH_IPATHDBG 0x10000 /* Ethernet (IPATH) gen debug */
#define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings on */ #define __IPATH_IPATHWARN 0x20000 /* Ethernet (IPATH) warnings */
#define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors on */ #define __IPATH_IPATHERR 0x40000 /* Ethernet (IPATH) errors */
#define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump on */ #define __IPATH_IPATHPD 0x80000 /* Ethernet (IPATH) packet dump */
#define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump on */ #define __IPATH_IPATHTABLE 0x100000 /* Ethernet (IPATH) table dump */
#else /* _IPATH_DEBUGGING */ #else /* _IPATH_DEBUGGING */
@ -79,11 +79,12 @@
#define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */ #define __IPATH_TRSAMPLE 0x0 /* generate trace buffer sample entries */
#define __IPATH_VERBDBG 0x0 /* very verbose debug */ #define __IPATH_VERBDBG 0x0 /* very verbose debug */
#define __IPATH_PKTDBG 0x0 /* print packet data */ #define __IPATH_PKTDBG 0x0 /* print packet data */
#define __IPATH_PROCDBG 0x0 /* print process startup (init)/exit messages */ #define __IPATH_PROCDBG 0x0 /* process startup (init)/exit messages */
/* print mmap/nopage stuff, not using VDBG any more */ /* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x0 #define __IPATH_MMDBG 0x0
#define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */ #define __IPATH_EPKTDBG 0x0 /* print ethernet packet data */
#define __IPATH_SMADBG 0x0 /* print process startup (init)/exit messages */#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */ #define __IPATH_SMADBG 0x0 /* process startup (init)/exit messages */
#define __IPATH_IPATHDBG 0x0 /* Ethernet (IPATH) table dump on */
#define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */ #define __IPATH_IPATHWARN 0x0 /* Ethernet (IPATH) warnings on */
#define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */ #define __IPATH_IPATHERR 0x0 /* Ethernet (IPATH) errors on */
#define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */ #define __IPATH_IPATHPD 0x0 /* Ethernet (IPATH) packet dump on */

View file

@ -277,13 +277,14 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
bail: bail:
spin_unlock_irqrestore(&ipath_devs_lock, flags); spin_unlock_irqrestore(&ipath_devs_lock, flags);
mutex_unlock(&ipath_mutex);
/* Only expose a way to reset the device if we /* Only expose a way to reset the device if we
make it into diag mode. */ make it into diag mode. */
if (ret == 0) if (ret == 0)
ipath_expose_reset(&dd->pcidev->dev); ipath_expose_reset(&dd->pcidev->dev);
mutex_unlock(&ipath_mutex);
return ret; return ret;
} }

View file

@ -417,11 +417,21 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
} }
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
if (ret) {
/*
* if the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less
* memory installed.
*/
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (ret) { if (ret) {
dev_info(&pdev->dev, "pci_set_dma_mask unit %u " dev_info(&pdev->dev, "pci_set_dma_mask unit %u "
"fails: %d\n", dd->ipath_unit, ret); "fails: %d\n", dd->ipath_unit, ret);
goto bail_regions; goto bail_regions;
} }
else
ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
}
pci_set_master(pdev); pci_set_master(pdev);
@ -1949,7 +1959,7 @@ int ipath_reset_device(int unit)
} }
if (dd->ipath_pd) if (dd->ipath_pd)
for (i = 1; i < dd->ipath_portcnt; i++) { for (i = 1; i < dd->ipath_cfgports; i++) {
if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) { if (dd->ipath_pd[i] && dd->ipath_pd[i]->port_cnt) {
ipath_dbg("unit %u port %d is in use " ipath_dbg("unit %u port %d is in use "
"(PID %u cmd %s), can't reset\n", "(PID %u cmd %s), can't reset\n",

View file

@ -53,13 +53,19 @@ MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
/* /*
* Number of buffers reserved for driver (layered drivers and SMA * Number of buffers reserved for driver (layered drivers and SMA
* send). Reserved at end of buffer list. * send). Reserved at end of buffer list. Initialized based on
* number of PIO buffers if not set via module interface.
* The problem with this is that it's global, but we'll use different
* numbers for different chip types. So the default value is not
* very useful. I've redefined it for the 1.3 release so that it's
* zero unless set by the user to something else, in which case we
* try to respect it.
*/ */
static ushort ipath_kpiobufs = 32; static ushort ipath_kpiobufs;
static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp); static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_uint, module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
&ipath_kpiobufs, S_IWUSR | S_IRUGO); &ipath_kpiobufs, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver"); MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
@ -531,8 +537,11 @@ static int init_housekeeping(struct ipath_devdata *dd,
* Don't clear ipath_flags as 8bit mode was set before * Don't clear ipath_flags as 8bit mode was set before
* entering this func. However, we do set the linkstate to * entering this func. However, we do set the linkstate to
* unknown, so we can watch for a transition. * unknown, so we can watch for a transition.
* PRESENT is set because we want register reads to work,
* and the kernel infrastructure saw it in config space;
* We clear it if we have failures.
*/ */
dd->ipath_flags |= IPATH_LINKUNK; dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED | dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
IPATH_LINKDOWN | IPATH_LINKINIT); IPATH_LINKDOWN | IPATH_LINKINIT);
@ -560,6 +569,7 @@ static int init_housekeeping(struct ipath_devdata *dd,
|| (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) { || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
ipath_dev_err(dd, "Register read failures from chip, " ipath_dev_err(dd, "Register read failures from chip, "
"giving up initialization\n"); "giving up initialization\n");
dd->ipath_flags &= ~IPATH_PRESENT;
ret = -ENODEV; ret = -ENODEV;
goto done; goto done;
} }
@ -682,16 +692,14 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
*/ */
dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2) dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2); / (sizeof(u64) * BITS_PER_BYTE / 2);
if (!ipath_kpiobufs) /* have to have at least 1, for SMA */ if (ipath_kpiobufs == 0) {
kpiobufs = ipath_kpiobufs = 1; /* not set by user, or set explictly to default */
else if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) < if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
(dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT)) { kpiobufs = 32;
dev_info(&dd->pcidev->dev, "Too few PIO buffers (%u) " else
"for %u ports to have %u each!\n", kpiobufs = 16;
dd->ipath_piobcnt2k + dd->ipath_piobcnt4k, }
dd->ipath_cfgports, IPATH_MIN_USER_PORT_BUFCNT); else
kpiobufs = 1; /* reserve just the minimum for SMA/ether */
} else
kpiobufs = ipath_kpiobufs; kpiobufs = ipath_kpiobufs;
if (kpiobufs > if (kpiobufs >

View file

@ -665,14 +665,14 @@ static void handle_layer_pioavail(struct ipath_devdata *dd)
ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE); ret = __ipath_layer_intr(dd, IPATH_LAYER_INT_SEND_CONTINUE);
if (ret > 0) if (ret > 0)
goto clear; goto set;
ret = __ipath_verbs_piobufavail(dd); ret = __ipath_verbs_piobufavail(dd);
if (ret > 0) if (ret > 0)
goto clear; goto set;
return; return;
clear: set:
set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
dd->ipath_sendctrl); dd->ipath_sendctrl);
@ -719,11 +719,24 @@ static void handle_rcv(struct ipath_devdata *dd, u32 istat)
irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs) irqreturn_t ipath_intr(int irq, void *data, struct pt_regs *regs)
{ {
struct ipath_devdata *dd = data; struct ipath_devdata *dd = data;
u32 istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus); u32 istat;
ipath_err_t estat = 0; ipath_err_t estat = 0;
static unsigned unexpected = 0; static unsigned unexpected = 0;
irqreturn_t ret; irqreturn_t ret;
if(!(dd->ipath_flags & IPATH_PRESENT)) {
/* this is mostly so we don't try to touch the chip while
* it is being reset */
/*
* This return value is perhaps odd, but we do not want the
* interrupt core code to remove our interrupt handler
* because we don't appear to be handling an interrupt
* during a chip reset.
*/
return IRQ_HANDLED;
}
istat = ipath_read_kreg32(dd, dd->ipath_kregs->kr_intstatus);
if (unlikely(!istat)) { if (unlikely(!istat)) {
ipath_stats.sps_nullintr++; ipath_stats.sps_nullintr++;
ret = IRQ_NONE; /* not our interrupt, or already handled */ ret = IRQ_NONE; /* not our interrupt, or already handled */

View file

@ -731,7 +731,7 @@ u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd, static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
ipath_ureg regno, int port) ipath_ureg regno, int port)
{ {
if (!dd->ipath_kregbase) if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0; return 0;
return readl(regno + (u64 __iomem *) return readl(regno + (u64 __iomem *)
@ -762,7 +762,7 @@ static inline void ipath_write_ureg(const struct ipath_devdata *dd,
static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd, static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
ipath_kreg regno) ipath_kreg regno)
{ {
if (!dd->ipath_kregbase) if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1; return -1;
return readl((u32 __iomem *) & dd->ipath_kregbase[regno]); return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
} }
@ -770,7 +770,7 @@ static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd, static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
ipath_kreg regno) ipath_kreg regno)
{ {
if (!dd->ipath_kregbase) if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return -1; return -1;
return readq(&dd->ipath_kregbase[regno]); return readq(&dd->ipath_kregbase[regno]);
@ -786,7 +786,7 @@ static inline void ipath_write_kreg(const struct ipath_devdata *dd,
static inline u64 ipath_read_creg(const struct ipath_devdata *dd, static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
ipath_sreg regno) ipath_sreg regno)
{ {
if (!dd->ipath_kregbase) if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0; return 0;
return readq(regno + (u64 __iomem *) return readq(regno + (u64 __iomem *)
@ -797,7 +797,7 @@ static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
static inline u32 ipath_read_creg32(const struct ipath_devdata *dd, static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
ipath_sreg regno) ipath_sreg regno)
{ {
if (!dd->ipath_kregbase) if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
return 0; return 0;
return readl(regno + (u64 __iomem *) return readl(regno + (u64 __iomem *)
(dd->ipath_cregbase + (dd->ipath_cregbase +

View file

@ -46,13 +46,15 @@
/* Acquire before ipath_devs_lock. */ /* Acquire before ipath_devs_lock. */
static DEFINE_MUTEX(ipath_layer_mutex); static DEFINE_MUTEX(ipath_layer_mutex);
static int ipath_verbs_registered;
u16 ipath_layer_rcv_opcode; u16 ipath_layer_rcv_opcode;
static int (*layer_intr)(void *, u32); static int (*layer_intr)(void *, u32);
static int (*layer_rcv)(void *, void *, struct sk_buff *); static int (*layer_rcv)(void *, void *, struct sk_buff *);
static int (*layer_rcv_lid)(void *, void *); static int (*layer_rcv_lid)(void *, void *);
static int (*verbs_piobufavail)(void *); static int (*verbs_piobufavail)(void *);
static void (*verbs_rcv)(void *, void *, void *, u32); static void (*verbs_rcv)(void *, void *, void *, u32);
static int ipath_verbs_registered;
static void *(*layer_add_one)(int, struct ipath_devdata *); static void *(*layer_add_one)(int, struct ipath_devdata *);
static void (*layer_remove_one)(void *); static void (*layer_remove_one)(void *);
@ -586,6 +588,8 @@ void ipath_verbs_unregister(void)
verbs_rcv = NULL; verbs_rcv = NULL;
verbs_timer_cb = NULL; verbs_timer_cb = NULL;
ipath_verbs_registered = 0;
mutex_unlock(&ipath_layer_mutex); mutex_unlock(&ipath_layer_mutex);
} }

View file

@ -972,6 +972,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
/* Use ERROR so it shows up in logs, etc. */ /* Use ERROR so it shows up in logs, etc. */
ipath_dev_err(dd, "Resetting PE-800 unit %u\n", ipath_dev_err(dd, "Resetting PE-800 unit %u\n",
dd->ipath_unit); dd->ipath_unit);
/* keep chip from being accessed in a few places */
dd->ipath_flags &= ~(IPATH_INITTED|IPATH_PRESENT);
val = dd->ipath_control | INFINIPATH_C_RESET; val = dd->ipath_control | INFINIPATH_C_RESET;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, val);
mb(); mb();
@ -997,6 +999,8 @@ static int ipath_setup_pe_reset(struct ipath_devdata *dd)
if ((r = pci_enable_device(dd->pcidev))) if ((r = pci_enable_device(dd->pcidev)))
ipath_dev_err(dd, "pci_enable_device failed after " ipath_dev_err(dd, "pci_enable_device failed after "
"reset: %d\n", r); "reset: %d\n", r);
/* whether it worked or not, mark as present, again */
dd->ipath_flags |= IPATH_PRESENT;
val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
if (val == dd->ipath_revision) { if (val == dd->ipath_revision) {
ipath_cdbg(VERBOSE, "Got matching revision " ipath_cdbg(VERBOSE, "Got matching revision "

View file

@ -34,8 +34,9 @@
#define _IPATH_REGISTERS_H #define _IPATH_REGISTERS_H
/* /*
* This file should only be included by kernel source, and by the diags. * This file should only be included by kernel source, and by the diags. It
* It defines the registers, and their contents, for the InfiniPath HT-400 chip * defines the registers, and their contents, for the InfiniPath HT-400
* chip.
*/ */
/* /*
@ -156,8 +157,10 @@
#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8 #define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL #define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1 #define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
#define INFINIPATH_IBCC_LINKINITCMD_POLL 2 /* cycle through TS1/TS2 till OK */ /* cycle through TS1/TS2 till OK */
#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3 /* wait for TS1, then go on */ #define INFINIPATH_IBCC_LINKINITCMD_POLL 2
/* wait for TS1, then go on */
#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16 #define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL #define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
#define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */ #define INFINIPATH_IBCC_LINKCMD_INIT 1 /* move to 0x11 */
@ -182,7 +185,8 @@
#define INFINIPATH_IBCS_LINKSTATE_SHIFT 4 #define INFINIPATH_IBCS_LINKSTATE_SHIFT 4
#define INFINIPATH_IBCS_TXREADY 0x40000000 #define INFINIPATH_IBCS_TXREADY 0x40000000
#define INFINIPATH_IBCS_TXCREDITOK 0x80000000 #define INFINIPATH_IBCS_TXCREDITOK 0x80000000
/* link training states (shift by INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */ /* link training states (shift by
INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
#define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00 #define INFINIPATH_IBCS_LT_STATE_DISABLED 0x00
#define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01 #define INFINIPATH_IBCS_LT_STATE_LINKUP 0x01
#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02 #define INFINIPATH_IBCS_LT_STATE_POLLACTIVE 0x02
@ -267,10 +271,12 @@
/* kr_serdesconfig0 bits */ /* kr_serdesconfig0 bits */
#define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */ #define INFINIPATH_SERDC0_RESET_MASK 0xfULL /* overal reset bits */
#define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */ #define INFINIPATH_SERDC0_RESET_PLL 0x10000000ULL /* pll reset */
#define INFINIPATH_SERDC0_TXIDLE 0xF000ULL /* tx idle enables (per lane) */ /* tx idle enables (per lane) */
#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL /* rx detect enables (per lane) */ #define INFINIPATH_SERDC0_TXIDLE 0xF000ULL
#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL /* L1 Power down; use with RXDETECT, /* rx detect enables (per lane) */
Otherwise not used on IB side */ #define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
#define INFINIPATH_SERDC0_L1PWR_DN 0xF0ULL
/* kr_xgxsconfig bits */ /* kr_xgxsconfig bits */
#define INFINIPATH_XGXS_RESET 0x7ULL #define INFINIPATH_XGXS_RESET 0x7ULL
@ -390,12 +396,13 @@ struct ipath_kregs {
ipath_kreg kr_txintmemsize; ipath_kreg kr_txintmemsize;
ipath_kreg kr_xgxsconfig; ipath_kreg kr_xgxsconfig;
ipath_kreg kr_ibpllcfg; ipath_kreg kr_ibpllcfg;
/* use these two (and the following N ports) only with ipath_k*_kreg64_port(); /* use these two (and the following N ports) only with
* not *kreg64() */ * ipath_k*_kreg64_port(); not *kreg64() */
ipath_kreg kr_rcvhdraddr; ipath_kreg kr_rcvhdraddr;
ipath_kreg kr_rcvhdrtailaddr; ipath_kreg kr_rcvhdrtailaddr;
/* remaining registers are not present on all types of infinipath chips */ /* remaining registers are not present on all types of infinipath
chips */
ipath_kreg kr_rcvpktledcnt; ipath_kreg kr_rcvpktledcnt;
ipath_kreg kr_pcierbuftestreg0; ipath_kreg kr_pcierbuftestreg0;
ipath_kreg kr_pcierbuftestreg1; ipath_kreg kr_pcierbuftestreg1;

View file

@ -531,19 +531,12 @@ int ipath_post_rc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
} }
wqe->wr.num_sge = j; wqe->wr.num_sge = j;
qp->s_head = next; qp->s_head = next;
/*
* Wake up the send tasklet if the QP is not waiting
* for an RNR timeout.
*/
next = qp->s_rnr_timeout;
spin_unlock_irqrestore(&qp->s_lock, flags); spin_unlock_irqrestore(&qp->s_lock, flags);
if (next == 0) {
if (qp->ibqp.qp_type == IB_QPT_UC) if (qp->ibqp.qp_type == IB_QPT_UC)
ipath_do_uc_send((unsigned long) qp); ipath_do_uc_send((unsigned long) qp);
else else
ipath_do_rc_send((unsigned long) qp); ipath_do_rc_send((unsigned long) qp);
}
ret = 0; ret = 0;

View file

@ -711,10 +711,22 @@ static struct attribute_group dev_attr_group = {
* enters diag mode. A device reset is quite likely to crash the * enters diag mode. A device reset is quite likely to crash the
* machine entirely, so we don't want to normally make it * machine entirely, so we don't want to normally make it
* available. * available.
*
* Called with ipath_mutex held.
*/ */
int ipath_expose_reset(struct device *dev) int ipath_expose_reset(struct device *dev)
{ {
return device_create_file(dev, &dev_attr_reset); static int exposed;
int ret;
if (!exposed) {
ret = device_create_file(dev, &dev_attr_reset);
exposed = 1;
}
else
ret = 0;
return ret;
} }
int ipath_driver_create_group(struct device_driver *drv) int ipath_driver_create_group(struct device_driver *drv)

View file

@ -46,8 +46,10 @@
* This is called from ipath_post_ud_send() to forward a WQE addressed * This is called from ipath_post_ud_send() to forward a WQE addressed
* to the same HCA. * to the same HCA.
*/ */
static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_sge_state *ss, static void ipath_ud_loopback(struct ipath_qp *sqp,
u32 length, struct ib_send_wr *wr, struct ib_wc *wc) struct ipath_sge_state *ss,
u32 length, struct ib_send_wr *wr,
struct ib_wc *wc)
{ {
struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
struct ipath_qp *qp; struct ipath_qp *qp;

View file

@ -449,7 +449,6 @@ static void ipath_ib_timer(void *arg)
{ {
struct ipath_ibdev *dev = (struct ipath_ibdev *) arg; struct ipath_ibdev *dev = (struct ipath_ibdev *) arg;
struct ipath_qp *resend = NULL; struct ipath_qp *resend = NULL;
struct ipath_qp *rnr = NULL;
struct list_head *last; struct list_head *last;
struct ipath_qp *qp; struct ipath_qp *qp;
unsigned long flags; unsigned long flags;
@ -465,32 +464,18 @@ static void ipath_ib_timer(void *arg)
last = &dev->pending[dev->pending_index]; last = &dev->pending[dev->pending_index];
while (!list_empty(last)) { while (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait); qp = list_entry(last->next, struct ipath_qp, timerwait);
if (last->next == LIST_POISON1 ||
last->next != &qp->timerwait ||
qp->timerwait.prev != last) {
INIT_LIST_HEAD(last);
} else {
list_del(&qp->timerwait); list_del(&qp->timerwait);
qp->timerwait.prev = (struct list_head *) resend; qp->timer_next = resend;
resend = qp; resend = qp;
atomic_inc(&qp->refcount); atomic_inc(&qp->refcount);
} }
}
last = &dev->rnrwait; last = &dev->rnrwait;
if (!list_empty(last)) { if (!list_empty(last)) {
qp = list_entry(last->next, struct ipath_qp, timerwait); qp = list_entry(last->next, struct ipath_qp, timerwait);
if (--qp->s_rnr_timeout == 0) { if (--qp->s_rnr_timeout == 0) {
do { do {
if (last->next == LIST_POISON1 ||
last->next != &qp->timerwait ||
qp->timerwait.prev != last) {
INIT_LIST_HEAD(last);
break;
}
list_del(&qp->timerwait); list_del(&qp->timerwait);
qp->timerwait.prev = tasklet_hi_schedule(&qp->s_task);
(struct list_head *) rnr;
rnr = qp;
if (list_empty(last)) if (list_empty(last))
break; break;
qp = list_entry(last->next, struct ipath_qp, qp = list_entry(last->next, struct ipath_qp,
@ -530,8 +515,7 @@ static void ipath_ib_timer(void *arg)
spin_unlock_irqrestore(&dev->pending_lock, flags); spin_unlock_irqrestore(&dev->pending_lock, flags);
/* XXX What if timer fires again while this is running? */ /* XXX What if timer fires again while this is running? */
for (qp = resend; qp != NULL; for (qp = resend; qp != NULL; qp = qp->timer_next) {
qp = (struct ipath_qp *) qp->timerwait.prev) {
struct ib_wc wc; struct ib_wc wc;
spin_lock_irqsave(&qp->s_lock, flags); spin_lock_irqsave(&qp->s_lock, flags);
@ -545,9 +529,6 @@ static void ipath_ib_timer(void *arg)
if (atomic_dec_and_test(&qp->refcount)) if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait); wake_up(&qp->wait);
} }
for (qp = rnr; qp != NULL;
qp = (struct ipath_qp *) qp->timerwait.prev)
tasklet_hi_schedule(&qp->s_task);
} }
/** /**
@ -556,9 +537,9 @@ static void ipath_ib_timer(void *arg)
* *
* This is called from ipath_intr() at interrupt level when a PIO buffer is * This is called from ipath_intr() at interrupt level when a PIO buffer is
* available after ipath_verbs_send() returned an error that no buffers were * available after ipath_verbs_send() returned an error that no buffers were
* available. Return 0 if we consumed all the PIO buffers and we still have * available. Return 1 if we consumed all the PIO buffers and we still have
* QPs waiting for buffers (for now, just do a tasklet_hi_schedule and * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and
* return one). * return zero).
*/ */
static int ipath_ib_piobufavail(void *arg) static int ipath_ib_piobufavail(void *arg)
{ {
@ -579,7 +560,7 @@ static int ipath_ib_piobufavail(void *arg)
spin_unlock_irqrestore(&dev->pending_lock, flags); spin_unlock_irqrestore(&dev->pending_lock, flags);
bail: bail:
return 1; return 0;
} }
static int ipath_query_device(struct ib_device *ibdev, static int ipath_query_device(struct ib_device *ibdev,
@ -1159,7 +1140,7 @@ static ssize_t show_stats(struct class_device *cdev, char *buf)
len = sprintf(buf, len = sprintf(buf,
"RC resends %d\n" "RC resends %d\n"
"RC QACKs %d\n" "RC no QACK %d\n"
"RC ACKs %d\n" "RC ACKs %d\n"
"RC SEQ NAKs %d\n" "RC SEQ NAKs %d\n"
"RC RDMA seq %d\n" "RC RDMA seq %d\n"

View file

@ -283,6 +283,7 @@ struct ipath_srq {
struct ipath_qp { struct ipath_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct ipath_qp *next; /* link list for QPN hash table */ struct ipath_qp *next; /* link list for QPN hash table */
struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */
struct list_head piowait; /* link for wait PIO buf */ struct list_head piowait; /* link for wait PIO buf */
struct list_head timerwait; /* link for waiting for timeouts */ struct list_head timerwait; /* link for waiting for timeouts */
struct ib_ah_attr remote_ah_attr; struct ib_ah_attr remote_ah_attr;

View file

@ -95,7 +95,7 @@ struct ether_header {
__u8 seq_num; __u8 seq_num;
__le32 len; __le32 len;
/* MUST be of word size due to PIO write requirements */ /* MUST be of word size due to PIO write requirements */
__u32 csum; __le32 csum;
__le16 csum_offset; __le16 csum_offset;
__le16 flags; __le16 flags;
__u16 first_2_bytes; __u16 first_2_bytes;

View file

@ -306,7 +306,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
goto out; goto out;
} }
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 16, 8); memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
out: out:
kfree(in_mad); kfree(in_mad);

View file

@ -17,7 +17,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/irq.h> //#include <asm/irq.h>
#include <asm/arch/sharpsl.h> #include <asm/arch/sharpsl.h>
#include <asm/arch/hardware.h> #include <asm/arch/hardware.h>

View file

@ -315,10 +315,11 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (r1_bio->bios[mirror] == bio) if (r1_bio->bios[mirror] == bio)
break; break;
if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) { if (error == -EOPNOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags); set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
set_bit(R1BIO_BarrierRetry, &r1_bio->state); set_bit(R1BIO_BarrierRetry, &r1_bio->state);
r1_bio->mddev->barriers_work = 0; r1_bio->mddev->barriers_work = 0;
/* Don't rdev_dec_pending in this branch - keep it for the retry */
} else { } else {
/* /*
* this branch is our 'one mirror IO has finished' event handler: * this branch is our 'one mirror IO has finished' event handler:
@ -365,6 +366,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
} }
} }
} }
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
} }
/* /*
* *
@ -374,11 +376,9 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
if (atomic_dec_and_test(&r1_bio->remaining)) { if (atomic_dec_and_test(&r1_bio->remaining)) {
if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
/* Don't dec_pending yet, we want to hold
* the reference over the retry
*/
goto out; goto out;
} }
/* it really is the end of this request */
if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
/* free extra copy of the data pages */ /* free extra copy of the data pages */
int i = bio->bi_vcnt; int i = bio->bi_vcnt;
@ -393,8 +393,6 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
md_write_end(r1_bio->mddev); md_write_end(r1_bio->mddev);
raid_end_bio_io(r1_bio); raid_end_bio_io(r1_bio);
} }
rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev);
out: out:
if (to_put) if (to_put)
bio_put(to_put); bio_put(to_put);
@ -753,18 +751,24 @@ static int make_request(request_queue_t *q, struct bio * bio)
const int rw = bio_data_dir(bio); const int rw = bio_data_dir(bio);
int do_barriers; int do_barriers;
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
return 0;
}
/* /*
* Register the new request and wait if the reconstruction * Register the new request and wait if the reconstruction
* thread has put up a bar for new requests. * thread has put up a bar for new requests.
* Continue immediately if no resync is active currently. * Continue immediately if no resync is active currently.
* We test barriers_work *after* md_write_start as md_write_start
* may cause the first superblock write, and that will check out
* if barriers work.
*/ */
md_write_start(mddev, bio); /* wait on superblock update early */ md_write_start(mddev, bio); /* wait on superblock update early */
if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
if (rw == WRITE)
md_write_end(mddev);
bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
return 0;
}
wait_barrier(conf); wait_barrier(conf);
disk_stat_inc(mddev->gendisk, ios[rw]); disk_stat_inc(mddev->gendisk, ios[rw]);
@ -1404,10 +1408,11 @@ static void raid1d(mddev_t *mddev)
unplug = 1; unplug = 1;
} else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
/* some requests in the r1bio were BIO_RW_BARRIER /* some requests in the r1bio were BIO_RW_BARRIER
* requests which failed with -ENOTSUPP. Hohumm.. * requests which failed with -EOPNOTSUPP. Hohumm..
* Better resubmit without the barrier. * Better resubmit without the barrier.
* We know which devices to resubmit for, because * We know which devices to resubmit for, because
* all others have had their bios[] entry cleared. * all others have had their bios[] entry cleared.
* We already have a nr_pending reference on these rdevs.
*/ */
int i; int i;
clear_bit(R1BIO_BarrierRetry, &r1_bio->state); clear_bit(R1BIO_BarrierRetry, &r1_bio->state);

View file

@ -1407,43 +1407,54 @@ static void raid10d(mddev_t *mddev)
if (s > (PAGE_SIZE>>9)) if (s > (PAGE_SIZE>>9))
s = PAGE_SIZE >> 9; s = PAGE_SIZE >> 9;
rcu_read_lock();
do { do {
int d = r10_bio->devs[sl].devnum; int d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev; rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev && if (rdev &&
test_bit(In_sync, &rdev->flags) && test_bit(In_sync, &rdev->flags)) {
sync_page_io(rdev->bdev, atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
success = sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr + r10_bio->devs[sl].addr +
sect + rdev->data_offset, sect + rdev->data_offset,
s<<9, s<<9,
conf->tmppage, READ)) conf->tmppage, READ);
success = 1; rdev_dec_pending(rdev, mddev);
else { rcu_read_lock();
if (success)
break;
}
sl++; sl++;
if (sl == conf->copies) if (sl == conf->copies)
sl = 0; sl = 0;
}
} while (!success && sl != r10_bio->read_slot); } while (!success && sl != r10_bio->read_slot);
rcu_read_unlock();
if (success) { if (success) {
int start = sl; int start = sl;
/* write it back and re-read */ /* write it back and re-read */
rcu_read_lock();
while (sl != r10_bio->read_slot) { while (sl != r10_bio->read_slot) {
int d; int d;
if (sl==0) if (sl==0)
sl = conf->copies; sl = conf->copies;
sl--; sl--;
d = r10_bio->devs[sl].devnum; d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev; rdev = rcu_dereference(conf->mirrors[d].rdev);
atomic_add(s, &rdev->corrected_errors);
if (rdev && if (rdev &&
test_bit(In_sync, &rdev->flags)) { test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
atomic_add(s, &rdev->corrected_errors);
if (sync_page_io(rdev->bdev, if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr + r10_bio->devs[sl].addr +
sect + rdev->data_offset, sect + rdev->data_offset,
s<<9, conf->tmppage, WRITE) == 0) s<<9, conf->tmppage, WRITE) == 0)
/* Well, this device is dead */ /* Well, this device is dead */
md_error(mddev, rdev); md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
} }
} }
sl = start; sl = start;
@ -1453,17 +1464,22 @@ static void raid10d(mddev_t *mddev)
sl = conf->copies; sl = conf->copies;
sl--; sl--;
d = r10_bio->devs[sl].devnum; d = r10_bio->devs[sl].devnum;
rdev = conf->mirrors[d].rdev; rdev = rcu_dereference(conf->mirrors[d].rdev);
if (rdev && if (rdev &&
test_bit(In_sync, &rdev->flags)) { test_bit(In_sync, &rdev->flags)) {
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
if (sync_page_io(rdev->bdev, if (sync_page_io(rdev->bdev,
r10_bio->devs[sl].addr + r10_bio->devs[sl].addr +
sect + rdev->data_offset, sect + rdev->data_offset,
s<<9, conf->tmppage, READ) == 0) s<<9, conf->tmppage, READ) == 0)
/* Well, this device is dead */ /* Well, this device is dead */
md_error(mddev, rdev); md_error(mddev, rdev);
rdev_dec_pending(rdev, mddev);
rcu_read_lock();
} }
} }
rcu_read_unlock();
} else { } else {
/* Cannot read from anywhere -- bye bye array */ /* Cannot read from anywhere -- bye bye array */
md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev); md_error(mddev, conf->mirrors[r10_bio->devs[r10_bio->read_slot].devnum].rdev);

View file

@ -621,9 +621,6 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct at91mci_host *host = mmc_priv(mmc); struct at91mci_host *host = mmc_priv(mmc);
unsigned long at91_master_clock = clk_get_rate(mci_clk); unsigned long at91_master_clock = clk_get_rate(mci_clk);
DBG("Clock %uHz, busmode %u, powermode %u, Vdd %u\n",
ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
if (host) if (host)
host->bus_mode = ios->bus_mode; host->bus_mode = ios->bus_mode;
else else

View file

@ -720,10 +720,6 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
{ {
struct au1xmmc_host *host = mmc_priv(mmc); struct au1xmmc_host *host = mmc_priv(mmc);
DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
host->id, ios->power_mode, ios->clock, ios->vdd,
ios->bus_mode);
if (ios->power_mode == MMC_POWER_OFF) if (ios->power_mode == MMC_POWER_OFF)
au1xmmc_set_power(host, 0); au1xmmc_set_power(host, 0);
else if (ios->power_mode == MMC_POWER_ON) { else if (ios->power_mode == MMC_POWER_ON) {

View file

@ -102,6 +102,7 @@ struct imxmci_host {
#define IMXMCI_PEND_CPU_DATA_b 5 #define IMXMCI_PEND_CPU_DATA_b 5
#define IMXMCI_PEND_CARD_XCHG_b 6 #define IMXMCI_PEND_CARD_XCHG_b 6
#define IMXMCI_PEND_SET_INIT_b 7 #define IMXMCI_PEND_SET_INIT_b 7
#define IMXMCI_PEND_STARTED_b 8
#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) #define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) #define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
@ -111,6 +112,7 @@ struct imxmci_host {
#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) #define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) #define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) #define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
static void imxmci_stop_clock(struct imxmci_host *host) static void imxmci_stop_clock(struct imxmci_host *host)
{ {
@ -131,23 +133,52 @@ static void imxmci_stop_clock(struct imxmci_host *host)
dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
} }
static void imxmci_start_clock(struct imxmci_host *host) static int imxmci_start_clock(struct imxmci_host *host)
{ {
int i = 0; unsigned int trials = 0;
unsigned int delay_limit = 128;
unsigned long flags;
MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK; MMC_STR_STP_CLK &= ~STR_STP_CLK_STOP_CLK;
while(i < 0x1000) {
if(!(i & 0x7f)) clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
/*
* Command start of the clock, this usually succeeds in less
* then 6 delay loops, but during card detection (low clockrate)
* it takes up to 5000 delay loops and sometimes fails for the first time
*/
MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK; MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) { do {
unsigned int delay = delay_limit;
while(delay--){
if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
/* Check twice before cut */ /* Check twice before cut */
if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN) if(MMC_STATUS & STATUS_CARD_BUS_CLK_RUN)
return; return 0;
if(test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
return 0;
} }
i++; local_irq_save(flags);
} /*
dev_dbg(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); * Ensure, that request is not doubled under all possible circumstances.
* It is possible, that cock running state is missed, because some other
* IRQ or schedule delays this function execution and the clocks has
* been already stopped by other means (response processing, SDHC HW)
*/
if(!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
MMC_STR_STP_CLK |= STR_STP_CLK_START_CLK;
local_irq_restore(flags);
} while(++trials<256);
dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
return -1;
} }
static void imxmci_softreset(void) static void imxmci_softreset(void)
@ -498,7 +529,7 @@ static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
data_error = imxmci_finish_data(host, stat); data_error = imxmci_finish_data(host, stat);
if (host->req->stop && (data_error == MMC_ERR_NONE)) { if (host->req->stop) {
imxmci_stop_clock(host); imxmci_stop_clock(host);
imxmci_start_cmd(host, host->req->stop, 0); imxmci_start_cmd(host, host->req->stop, 0);
} else { } else {
@ -622,6 +653,7 @@ static irqreturn_t imxmci_irq(int irq, void *devid, struct pt_regs *regs)
atomic_set(&host->stuck_timeout, 0); atomic_set(&host->stuck_timeout, 0);
host->status_reg = stat; host->status_reg = stat;
set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
tasklet_schedule(&host->tasklet); tasklet_schedule(&host->tasklet);
return IRQ_RETVAL(handled);; return IRQ_RETVAL(handled);;
@ -775,10 +807,6 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct imxmci_host *host = mmc_priv(mmc); struct imxmci_host *host = mmc_priv(mmc);
int prescaler; int prescaler;
dev_dbg(mmc_dev(host->mmc), "clock %u power %u vdd %u width %u\n",
ios->clock, ios->power_mode, ios->vdd,
(ios->bus_width==MMC_BUS_WIDTH_4)?4:1);
if( ios->bus_width==MMC_BUS_WIDTH_4 ) { if( ios->bus_width==MMC_BUS_WIDTH_4 ) {
host->actual_bus_width = MMC_BUS_WIDTH_4; host->actual_bus_width = MMC_BUS_WIDTH_4;
imx_gpio_mode(PB11_PF_SD_DAT3); imx_gpio_mode(PB11_PF_SD_DAT3);

View file

@ -59,21 +59,23 @@ static const unsigned int tacc_mant[] = {
/** /**
* mmc_request_done - finish processing an MMC command * mmc_request_done - finish processing an MMC request
* @host: MMC host which completed command * @host: MMC host which completed request
* @mrq: MMC request which completed * @mrq: MMC request which request
* *
* MMC drivers should call this function when they have completed * MMC drivers should call this function when they have completed
* their processing of a command. This should be called before the * their processing of a request.
* data part of the command has completed.
*/ */
void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{ {
struct mmc_command *cmd = mrq->cmd; struct mmc_command *cmd = mrq->cmd;
int err = mrq->cmd->error; int err = cmd->error;
pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
cmd->opcode, err, cmd->resp[0], cmd->resp[1], pr_debug("%s: req done (CMD%u): %d/%d/%d: %08x %08x %08x %08x\n",
cmd->resp[2], cmd->resp[3]); mmc_hostname(host), cmd->opcode, err,
mrq->data ? mrq->data->error : 0,
mrq->stop ? mrq->stop->error : 0,
cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
if (err && cmd->retries) { if (err && cmd->retries) {
cmd->retries--; cmd->retries--;
@ -97,8 +99,9 @@ EXPORT_SYMBOL(mmc_request_done);
void void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{ {
pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n", pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
WARN_ON(host->card_busy == NULL); WARN_ON(host->card_busy == NULL);
@ -312,6 +315,18 @@ void mmc_release_host(struct mmc_host *host)
EXPORT_SYMBOL(mmc_release_host); EXPORT_SYMBOL(mmc_release_host);
static inline void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width);
host->ops->set_ios(host, ios);
}
static int mmc_select_card(struct mmc_host *host, struct mmc_card *card) static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
{ {
int err; int err;
@ -364,7 +379,7 @@ static int mmc_select_card(struct mmc_host *host, struct mmc_card *card)
} }
} }
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
return MMC_ERR_NONE; return MMC_ERR_NONE;
} }
@ -415,7 +430,7 @@ static u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
ocr = 3 << bit; ocr = 3 << bit;
host->ios.vdd = bit; host->ios.vdd = bit;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
} else { } else {
ocr = 0; ocr = 0;
} }
@ -549,6 +564,7 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1); csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
} else { } else {
@ -583,6 +599,7 @@ static void mmc_decode_csd(struct mmc_card *card)
csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
csd->write_partial = UNSTUFF_BITS(resp, 21, 1); csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
} }
@ -666,7 +683,7 @@ static void mmc_idle_cards(struct mmc_host *host)
struct mmc_command cmd; struct mmc_command cmd;
host->ios.chip_select = MMC_CS_HIGH; host->ios.chip_select = MMC_CS_HIGH;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
mmc_delay(1); mmc_delay(1);
@ -679,7 +696,7 @@ static void mmc_idle_cards(struct mmc_host *host)
mmc_delay(1); mmc_delay(1);
host->ios.chip_select = MMC_CS_DONTCARE; host->ios.chip_select = MMC_CS_DONTCARE;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
mmc_delay(1); mmc_delay(1);
} }
@ -704,13 +721,13 @@ static void mmc_power_up(struct mmc_host *host)
host->ios.chip_select = MMC_CS_DONTCARE; host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_UP; host->ios.power_mode = MMC_POWER_UP;
host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
mmc_delay(1); mmc_delay(1);
host->ios.clock = host->f_min; host->ios.clock = host->f_min;
host->ios.power_mode = MMC_POWER_ON; host->ios.power_mode = MMC_POWER_ON;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
mmc_delay(2); mmc_delay(2);
} }
@ -723,7 +740,7 @@ static void mmc_power_off(struct mmc_host *host)
host->ios.chip_select = MMC_CS_DONTCARE; host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.power_mode = MMC_POWER_OFF; host->ios.power_mode = MMC_POWER_OFF;
host->ios.bus_width = MMC_BUS_WIDTH_1; host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
} }
static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) static int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
@ -971,7 +988,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr) if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
max_dtr = card->csd.max_dtr; max_dtr = card->csd.max_dtr;
pr_debug("MMC: selected %d.%03dMHz transfer rate\n", pr_debug("%s: selected %d.%03dMHz transfer rate\n",
mmc_hostname(host),
max_dtr / 1000000, (max_dtr / 1000) % 1000); max_dtr / 1000000, (max_dtr / 1000) % 1000);
return max_dtr; return max_dtr;
@ -1046,7 +1064,7 @@ static void mmc_setup(struct mmc_host *host)
} else { } else {
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.clock = host->f_min; host->ios.clock = host->f_min;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
/* /*
* We should remember the OCR mask from the existing * We should remember the OCR mask from the existing
@ -1082,7 +1100,7 @@ static void mmc_setup(struct mmc_host *host)
* Ok, now switch to push-pull mode. * Ok, now switch to push-pull mode.
*/ */
host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
mmc_read_csds(host); mmc_read_csds(host);
@ -1128,7 +1146,7 @@ static void mmc_rescan(void *data)
* attached cards and the host support. * attached cards and the host support.
*/ */
host->ios.clock = mmc_calculate_clock(host); host->ios.clock = mmc_calculate_clock(host);
host->ops->set_ios(host, &host->ios); mmc_set_ios(host);
} }
mmc_release_host(host); mmc_release_host(host);

View file

@ -187,6 +187,12 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
brq.cmd.opcode = MMC_WRITE_BLOCK; brq.cmd.opcode = MMC_WRITE_BLOCK;
brq.data.flags |= MMC_DATA_WRITE; brq.data.flags |= MMC_DATA_WRITE;
brq.data.blocks = 1; brq.data.blocks = 1;
/*
* Scale up the timeout by the r2w factor
*/
brq.data.timeout_ns <<= card->csd.r2w_factor;
brq.data.timeout_clks <<= card->csd.r2w_factor;
} }
if (brq.data.blocks > 1) { if (brq.data.blocks > 1) {

View file

@ -402,9 +402,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmci_host *host = mmc_priv(mmc); struct mmci_host *host = mmc_priv(mmc);
u32 clk = 0, pwr = 0; u32 clk = 0, pwr = 0;
DBG(host, "clock %uHz busmode %u powermode %u Vdd %u\n",
ios->clock, ios->bus_mode, ios->power_mode, ios->vdd);
if (ios->clock) { if (ios->clock) {
if (ios->clock >= host->mclk) { if (ios->clock >= host->mclk) {
clk = MCI_CLK_BYPASS; clk = MCI_CLK_BYPASS;

View file

@ -198,7 +198,6 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq) static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{ {
pr_debug("PXAMCI: request done\n");
host->mrq = NULL; host->mrq = NULL;
host->cmd = NULL; host->cmd = NULL;
host->data = NULL; host->data = NULL;
@ -291,7 +290,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
pxamci_disable_irq(host, DATA_TRAN_DONE); pxamci_disable_irq(host, DATA_TRAN_DONE);
host->data = NULL; host->data = NULL;
if (host->mrq->stop && data->error == MMC_ERR_NONE) { if (host->mrq->stop) {
pxamci_stop_clock(host); pxamci_stop_clock(host);
pxamci_start_cmd(host, host->mrq->stop, 0); pxamci_start_cmd(host, host->mrq->stop, 0);
} else { } else {
@ -309,12 +308,10 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
ireg = readl(host->base + MMC_I_REG); ireg = readl(host->base + MMC_I_REG);
pr_debug("PXAMCI: irq %08x\n", ireg);
if (ireg) { if (ireg) {
unsigned stat = readl(host->base + MMC_STAT); unsigned stat = readl(host->base + MMC_STAT);
pr_debug("PXAMCI: stat %08x\n", stat); pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
if (ireg & END_CMD_RES) if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat); handled |= pxamci_cmd_done(host, stat);
@ -368,10 +365,6 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{ {
struct pxamci_host *host = mmc_priv(mmc); struct pxamci_host *host = mmc_priv(mmc);
pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
ios->clock, ios->power_mode, ios->vdd / 100,
ios->vdd % 100);
if (ios->clock) { if (ios->clock) {
unsigned int clk = CLOCKRATE / ios->clock; unsigned int clk = CLOCKRATE / ios->clock;
if (CLOCKRATE / clk > ios->clock) if (CLOCKRATE / clk > ios->clock)
@ -397,7 +390,7 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat |= CMDAT_INIT; host->cmdat |= CMDAT_INIT;
} }
pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n", pr_debug("PXAMCI: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat); host->clkrt, host->cmdat);
} }

View file

@ -570,10 +570,6 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_lock_irqsave(&host->lock, flags); spin_lock_irqsave(&host->lock, flags);
DBG("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
ios->vdd, ios->bus_width);
/* /*
* Reset the chip on each power off. * Reset the chip on each power off.
* Should clear out any weird states. * Should clear out any weird states.

View file

@ -931,10 +931,6 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct wbsd_host *host = mmc_priv(mmc); struct wbsd_host *host = mmc_priv(mmc);
u8 clk, setup, pwr; u8 clk, setup, pwr;
DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
ios->vdd, ios->bus_width);
spin_lock_bh(&host->lock); spin_lock_bh(&host->lock);
/* /*

View file

@ -106,6 +106,7 @@
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support. * 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* *
* Known bugs: * Known bugs:
* We suspect that on some hardware no TX done interrupts are generated. * We suspect that on some hardware no TX done interrupts are generated.
@ -117,7 +118,7 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic. * superfluous timer interrupts from the nic.
*/ */
#define FORCEDETH_VERSION "0.53" #define FORCEDETH_VERSION "0.54"
#define DRV_NAME "forcedeth" #define DRV_NAME "forcedeth"
#include <linux/module.h> #include <linux/module.h>
@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
} }
} }
static int using_multi_irqs(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
return 0;
else
return 1;
}
static void nv_enable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
static void nv_disable_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (!using_multi_irqs(dev)) {
if (np->msi_flags & NV_MSI_X_ENABLED)
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
}
/* In MSIX mode, a write to irqmask behaves as XOR */
static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
{
u8 __iomem *base = get_hwbase(dev);
writel(mask, base + NvRegIrqMask);
}
static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
if (np->msi_flags & NV_MSI_X_ENABLED) {
writel(mask, base + NvRegIrqMask);
} else {
if (np->msi_flags & NV_MSI_ENABLED)
writel(0, base + NvRegMSIIrqMask);
writel(0, base + NvRegIrqMask);
}
}
#define MII_READ (-1) #define MII_READ (-1)
/* mii_rw: read/write a register on the PHY. /* mii_rw: read/write a register on the PHY.
* *
@ -1019,23 +1086,24 @@ static void nv_do_rx_refill(unsigned long data)
struct net_device *dev = (struct net_device *) data; struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
if (!using_multi_irqs(dev)) {
if (!(np->msi_flags & NV_MSI_X_ENABLED) || if (np->msi_flags & NV_MSI_X_ENABLED)
((np->msi_flags & NV_MSI_X_ENABLED) && disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { else
disable_irq(dev->irq); disable_irq(dev->irq);
} else { } else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
} }
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
} }
if (!(np->msi_flags & NV_MSI_X_ENABLED) || if (!using_multi_irqs(dev)) {
((np->msi_flags & NV_MSI_X_ENABLED) && if (np->msi_flags & NV_MSI_X_ENABLED)
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(dev->irq); enable_irq(dev->irq);
} else { } else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
* guessed, there is probably a simpler approach. * guessed, there is probably a simpler approach.
* Changing the MTU is a rare event, it shouldn't matter. * Changing the MTU is a rare event, it shouldn't matter.
*/ */
if (!(np->msi_flags & NV_MSI_X_ENABLED) || nv_disable_irq(dev);
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
disable_irq(dev->irq);
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
spin_lock_bh(&dev->xmit_lock); spin_lock_bh(&dev->xmit_lock);
spin_lock(&np->lock); spin_lock(&np->lock);
/* stop engines */ /* stop engines */
@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
nv_start_tx(dev); nv_start_tx(dev);
spin_unlock(&np->lock); spin_unlock(&np->lock);
spin_unlock_bh(&dev->xmit_lock); spin_unlock_bh(&dev->xmit_lock);
if (!(np->msi_flags & NV_MSI_X_ENABLED) || nv_enable_irq(dev);
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
enable_irq(dev->irq);
} else {
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
}
} }
return 0; return 0;
} }
@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
if (!(events & np->irqmask)) if (!(events & np->irqmask))
break; break;
spin_lock(&np->lock); spin_lock_irq(&np->lock);
nv_tx_done(dev); nv_tx_done(dev);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
if (events & (NVREG_IRQ_TX_ERR)) { if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
break; break;
} }
@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
nv_rx_process(dev); nv_rx_process(dev);
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
break; break;
} }
@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
break; break;
if (events & NVREG_IRQ_LINK) { if (events & NVREG_IRQ_LINK) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
nv_link_irq(dev); nv_link_irq(dev);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
} }
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
nv_linkchange(dev); nv_linkchange(dev);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
np->link_timeout = jiffies + LINK_TIMEOUT; np->link_timeout = jiffies + LINK_TIMEOUT;
} }
if (events & (NVREG_IRQ_UNKNOWN)) { if (events & (NVREG_IRQ_UNKNOWN)) {
@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock(&np->lock); spin_lock_irq(&np->lock);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base); pci_push(base);
@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
spin_unlock(&np->lock); spin_unlock_irq(&np->lock);
break; break;
} }
@ -2251,9 +2303,10 @@ static void nv_do_nic_poll(unsigned long data)
* nv_nic_irq because that may decide to do otherwise * nv_nic_irq because that may decide to do otherwise
*/ */
if (!(np->msi_flags & NV_MSI_X_ENABLED) || if (!using_multi_irqs(dev)) {
((np->msi_flags & NV_MSI_X_ENABLED) && if (np->msi_flags & NV_MSI_X_ENABLED)
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
disable_irq(dev->irq); disable_irq(dev->irq);
mask = np->irqmask; mask = np->irqmask;
} else { } else {
@ -2277,10 +2330,11 @@ static void nv_do_nic_poll(unsigned long data)
writel(mask, base + NvRegIrqMask); writel(mask, base + NvRegIrqMask);
pci_push(base); pci_push(base);
if (!(np->msi_flags & NV_MSI_X_ENABLED) || if (!using_multi_irqs(dev)) {
((np->msi_flags & NV_MSI_X_ENABLED) &&
((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
if (np->msi_flags & NV_MSI_X_ENABLED)
enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
else
enable_irq(dev->irq); enable_irq(dev->irq);
} else { } else {
if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
} }
static int nv_request_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
int ret = 1;
int i;
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i;
}
if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
/* Request irq for rx handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* Request irq for tx handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_rx;
}
/* Request irq for link and timer handling */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_free_tx;
}
/* map interrupts to their respective vector */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
np->msi_flags |= NV_MSI_ENABLED;
if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
goto out_err;
}
/* map interrupts to vector 0 */
writel(0, base + NvRegMSIMap0);
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
}
}
if (ret != 0) {
if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
goto out_err;
}
return 0;
out_free_tx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
out_free_rx:
free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
out_err:
return 1;
}
static void nv_free_irq(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
int i;
if (np->msi_flags & NV_MSI_X_ENABLED) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
free_irq(np->msi_x_entry[i].vector, dev);
}
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
}
static int nv_open(struct net_device *dev) static int nv_open(struct net_device *dev)
{ {
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
@ -2720,12 +2881,16 @@ static int nv_open(struct net_device *dev)
udelay(10); udelay(10);
writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
writel(0, base + NvRegIrqMask); nv_disable_hw_interrupts(dev, np->irqmask);
pci_push(base); pci_push(base);
writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
pci_push(base); pci_push(base);
if (nv_request_irq(dev)) {
goto out_drain;
}
if (np->msi_flags & NV_MSI_X_CAPABLE) { if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
np->msi_x_entry[i].entry = i; np->msi_x_entry[i].entry = i;
@ -2799,7 +2964,7 @@ static int nv_open(struct net_device *dev)
} }
/* ask for interrupts */ /* ask for interrupts */
writel(np->irqmask, base + NvRegIrqMask); nv_enable_hw_interrupts(dev, np->irqmask);
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@ -2843,7 +3008,6 @@ static int nv_close(struct net_device *dev)
{ {
struct fe_priv *np = netdev_priv(dev); struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base; u8 __iomem *base;
int i;
spin_lock_irq(&np->lock); spin_lock_irq(&np->lock);
np->in_shutdown = 1; np->in_shutdown = 1;
@ -2861,31 +3025,13 @@ static int nv_close(struct net_device *dev)
/* disable interrupts on the nic or we will lock up */ /* disable interrupts on the nic or we will lock up */
base = get_hwbase(dev); base = get_hwbase(dev);
if (np->msi_flags & NV_MSI_X_ENABLED) { nv_disable_hw_interrupts(dev, np->irqmask);
writel(np->irqmask, base + NvRegIrqMask);
} else {
if (np->msi_flags & NV_MSI_ENABLED)
writel(0, base + NvRegMSIIrqMask);
writel(0, base + NvRegIrqMask);
}
pci_push(base); pci_push(base);
dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
spin_unlock_irq(&np->lock); spin_unlock_irq(&np->lock);
if (np->msi_flags & NV_MSI_X_ENABLED) { nv_free_irq(dev);
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
free_irq(np->msi_x_entry[i].vector, dev);
}
pci_disable_msix(np->pci_dev);
np->msi_flags &= ~NV_MSI_X_ENABLED;
} else {
free_irq(np->pci_dev->irq, dev);
if (np->msi_flags & NV_MSI_ENABLED) {
pci_disable_msi(np->pci_dev);
np->msi_flags &= ~NV_MSI_ENABLED;
}
}
drain_ring(dev); drain_ring(dev);
@ -2974,20 +3120,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
if (id->driver_data & DEV_HAS_HIGH_DMA) { if (id->driver_data & DEV_HAS_HIGH_DMA) {
/* packet format 3: supports 40-bit addressing */ /* packet format 3: supports 40-bit addressing */
np->desc_ver = DESC_VER_3; np->desc_ver = DESC_VER_3;
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
pci_name(pci_dev)); pci_name(pci_dev));
} else {
if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
pci_name(pci_dev));
goto out_relreg;
} else { } else {
dev->features |= NETIF_F_HIGHDMA; dev->features |= NETIF_F_HIGHDMA;
printk(KERN_INFO "forcedeth: using HIGHDMA\n"); printk(KERN_INFO "forcedeth: using HIGHDMA\n");
} }
if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
pci_name(pci_dev));
} }
np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
} else if (id->driver_data & DEV_HAS_LARGEDESC) { } else if (id->driver_data & DEV_HAS_LARGEDESC) {
/* packet format 2: supports jumbo frames */ /* packet format 2: supports jumbo frames */
np->desc_ver = DESC_VER_2; np->desc_ver = DESC_VER_2;

View file

@ -582,7 +582,6 @@ static int __init setup_adapter(int card_base, int type, int n)
INIT_WORK(&priv->rx_work, rx_bh, priv); INIT_WORK(&priv->rx_work, rx_bh, priv);
dev->priv = priv; dev->priv = priv;
sprintf(dev->name, "dmascc%i", 2 * n + i); sprintf(dev->name, "dmascc%i", 2 * n + i);
SET_MODULE_OWNER(dev);
dev->base_addr = card_base; dev->base_addr = card_base;
dev->irq = irq; dev->irq = irq;
dev->open = scc_open; dev->open = scc_open;

View file

@ -1550,7 +1550,6 @@ static unsigned char ax25_nocall[AX25_ADDR_LEN] =
static void scc_net_setup(struct net_device *dev) static void scc_net_setup(struct net_device *dev)
{ {
SET_MODULE_OWNER(dev);
dev->tx_queue_len = 16; /* should be enough... */ dev->tx_queue_len = 16; /* should be enough... */
dev->open = scc_net_open; dev->open = scc_net_open;

View file

@ -1098,7 +1098,6 @@ static void yam_setup(struct net_device *dev)
dev->base_addr = yp->iobase; dev->base_addr = yp->iobase;
dev->irq = yp->irq; dev->irq = yp->irq;
SET_MODULE_OWNER(dev);
dev->open = yam_open; dev->open = yam_open;
dev->stop = yam_close; dev->stop = yam_close;

View file

@ -1419,6 +1419,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mv643xx_eth_update_pscr(dev, &cmd); mv643xx_eth_update_pscr(dev, &cmd);
mv643xx_set_settings(dev, &cmd); mv643xx_set_settings(dev, &cmd);
SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_netdev(dev); err = register_netdev(dev);
if (err) if (err)
goto out; goto out;

View file

@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3" #define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": " #define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.56" #define DRV_MODULE_VERSION "3.57"
#define DRV_MODULE_RELDATE "Apr 1, 2006" #define DRV_MODULE_RELDATE "Apr 28, 2006"
#define TG3_DEF_MAC_MODE 0 #define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0 #define TG3_DEF_RX_MODE 0
@ -974,6 +974,8 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
return err; return err;
} }
static void tg3_link_report(struct tg3 *);
/* This will reset the tigon3 PHY if there is no valid /* This will reset the tigon3 PHY if there is no valid
* link unless the FORCE argument is non-zero. * link unless the FORCE argument is non-zero.
*/ */
@ -987,6 +989,11 @@ static int tg3_phy_reset(struct tg3 *tp)
if (err != 0) if (err != 0)
return -EBUSY; return -EBUSY;
if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
netif_carrier_off(tp->dev);
tg3_link_report(tp);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 || if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 || GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) { GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
@ -1023,6 +1030,12 @@ out:
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
} }
else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
}
/* Set Extended packet length bit (bit 14) on all chips that */ /* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */ /* support jumbo frames */
if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) { if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
@ -3531,7 +3544,7 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
return IRQ_RETVAL(0); return IRQ_RETVAL(0);
} }
static int tg3_init_hw(struct tg3 *); static int tg3_init_hw(struct tg3 *, int);
static int tg3_halt(struct tg3 *, int, int); static int tg3_halt(struct tg3 *, int, int);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
@ -3567,7 +3580,7 @@ static void tg3_reset_task(void *_data)
tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER; tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0); tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tg3_netif_start(tp); tg3_netif_start(tp);
@ -4042,7 +4055,7 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_set_mtu(dev, tp, new_mtu); tg3_set_mtu(dev, tp, new_mtu);
tg3_init_hw(tp); tg3_init_hw(tp, 0);
tg3_netif_start(tp); tg3_netif_start(tp);
@ -5719,9 +5732,23 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!netif_running(dev)) if (!netif_running(dev))
return 0; return 0;
if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
/* Reset chip so that ASF can re-init any MAC addresses it
* needs.
*/
tg3_netif_stop(tp);
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_init_hw(tp, 0);
tg3_netif_start(tp);
tg3_full_unlock(tp);
} else {
spin_lock_bh(&tp->lock); spin_lock_bh(&tp->lock);
__tg3_set_mac_addr(tp); __tg3_set_mac_addr(tp);
spin_unlock_bh(&tp->lock); spin_unlock_bh(&tp->lock);
}
return 0; return 0;
} }
@ -5771,7 +5798,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
} }
/* tp->lock is held. */ /* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp) static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{ {
u32 val, rdmac_mode; u32 val, rdmac_mode;
int i, err, limit; int i, err, limit;
@ -5786,7 +5813,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tg3_abort_hw(tp, 1); tg3_abort_hw(tp, 1);
} }
if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && reset_phy)
tg3_phy_reset(tp); tg3_phy_reset(tp);
err = tg3_chip_reset(tp); err = tg3_chip_reset(tp);
@ -6327,7 +6354,7 @@ static int tg3_reset_hw(struct tg3 *tp)
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
} }
err = tg3_setup_phy(tp, 1); err = tg3_setup_phy(tp, reset_phy);
if (err) if (err)
return err; return err;
@ -6400,7 +6427,7 @@ static int tg3_reset_hw(struct tg3 *tp)
/* Called at device open time to get the chip ready for /* Called at device open time to get the chip ready for
* packet processing. Invoked with tp->lock held. * packet processing. Invoked with tp->lock held.
*/ */
static int tg3_init_hw(struct tg3 *tp) static int tg3_init_hw(struct tg3 *tp, int reset_phy)
{ {
int err; int err;
@ -6413,7 +6440,7 @@ static int tg3_init_hw(struct tg3 *tp)
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
err = tg3_reset_hw(tp); err = tg3_reset_hw(tp, reset_phy);
out: out:
return err; return err;
@ -6683,7 +6710,7 @@ static int tg3_test_msi(struct tg3 *tp)
tg3_full_lock(tp, 1); tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
err = tg3_init_hw(tp); err = tg3_init_hw(tp, 1);
tg3_full_unlock(tp); tg3_full_unlock(tp);
@ -6748,7 +6775,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
err = tg3_init_hw(tp); err = tg3_init_hw(tp, 1);
if (err) { if (err) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp); tg3_free_rings(tp);
@ -7839,7 +7866,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) { if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tg3_netif_start(tp); tg3_netif_start(tp);
} }
@ -7884,7 +7911,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) { if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tg3_netif_start(tp); tg3_netif_start(tp);
} }
@ -8522,7 +8549,7 @@ static int tg3_test_loopback(struct tg3 *tp)
if (!netif_running(tp->dev)) if (!netif_running(tp->dev))
return TG3_LOOPBACK_FAILED; return TG3_LOOPBACK_FAILED;
tg3_reset_hw(tp); tg3_reset_hw(tp, 1);
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK)) if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
err |= TG3_MAC_LOOPBACK_FAILED; err |= TG3_MAC_LOOPBACK_FAILED;
@ -8596,7 +8623,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) { if (netif_running(dev)) {
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tg3_netif_start(tp); tg3_netif_start(tp);
} }
@ -9377,7 +9404,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
if ((page_off == 0) || (i == 0)) if ((page_off == 0) || (i == 0))
nvram_cmd |= NVRAM_CMD_FIRST; nvram_cmd |= NVRAM_CMD_FIRST;
else if (page_off == (tp->nvram_pagesize - 4)) if (page_off == (tp->nvram_pagesize - 4))
nvram_cmd |= NVRAM_CMD_LAST; nvram_cmd |= NVRAM_CMD_LAST;
if (i == (len - 4)) if (i == (len - 4))
@ -10353,10 +10380,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) && if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
else
tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
}
tp->coalesce_mode = 0; tp->coalesce_mode = 0;
if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX && if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
@ -11569,7 +11599,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset; tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer); add_timer(&tp->timer);
@ -11603,7 +11633,7 @@ static int tg3_resume(struct pci_dev *pdev)
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE; tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
tg3_init_hw(tp); tg3_init_hw(tp, 1);
tp->timer.expires = jiffies + tp->timer_offset; tp->timer.expires = jiffies + tp->timer_offset;
add_timer(&tp->timer); add_timer(&tp->timer);

View file

@ -2215,6 +2215,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO_2 0x08000000 #define TG3_FLG2_HW_TSO_2 0x08000000
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000 #define TG3_FLG2_1SHOT_MSI 0x10000000
#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
u32 split_mode_max_reqs; u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3 #define SPLIT_MODE_5704_MAX_REQ 3

Some files were not shown because too many files have changed in this diff Show more