Merge branch 'akpm' (Andrew's patch-bomb)

Merge third batch of patches from Andrew Morton:
 - Some MM stragglers
 - core SMP library cleanups (on_each_cpu_mask)
 - Some IPI optimisations
 - kexec
 - kdump
 - IPMI
 - the radix-tree iterator work
 - various other misc bits.

 "That'll do for -rc1.  I still have ~10 patches for 3.4, will send
  those along when they've baked a little more."

* emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
  backlight: fix typo in tosa_lcd.c
  crc32: add help text for the algorithm select option
  mm: move hugepage test examples to tools/testing/selftests/vm
  mm: move slabinfo.c to tools/vm
  mm: move page-types.c from Documentation to tools/vm
  selftests/Makefile: make `run_tests' depend on `all'
  selftests: launch individual selftests from the main Makefile
  radix-tree: use iterators in find_get_pages* functions
  radix-tree: rewrite gang lookup using iterator
  radix-tree: introduce bit-optimized iterator
  fs/proc/namespaces.c: prevent crash when ns_entries[] is empty
  nbd: rename the nbd_device variable from lo to nbd
  pidns: add reboot_pid_ns() to handle the reboot syscall
  sysctl: use bitmap library functions
  ipmi: use locks on watchdog timeout set on reboot
  ipmi: simplify locking
  ipmi: fix message handling during panics
  ipmi: use a tasklet for handling received messages
  ipmi: increase KCS timeouts
  ipmi: decrease the IPMI message transaction time in interrupt mode
  ...
This commit is contained in:
Linus Torvalds 2012-03-28 17:19:27 -07:00
commit 532bfc851a
55 changed files with 1225 additions and 768 deletions

View file

@ -400,7 +400,7 @@ smp_callin (void)
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
notify_cpu_starting(cpuid);
cpu_set(cpuid, cpu_online_map);
set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
ipi_call_unlock_irq();
@ -547,7 +547,7 @@ do_rest:
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
set_cpu_online(cpu, false); /* was set in smp_callin() */
return -EINVAL;
}
return 0;
@ -577,8 +577,7 @@ smp_build_cpu_map (void)
}
ia64_cpu_to_sapicid[0] = boot_cpu_id;
cpus_clear(cpu_present_map);
set_cpu_present(0, true);
init_cpu_present(cpumask_of(0));
set_cpu_possible(0, true);
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
@ -605,10 +604,6 @@ smp_prepare_cpus (unsigned int max_cpus)
smp_setup_percpu_timer();
/*
* We have the boot CPU online for sure.
*/
cpu_set(0, cpu_online_map);
cpu_set(0, cpu_callin_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
@ -632,7 +627,7 @@ smp_prepare_cpus (unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
set_cpu_online(smp_processor_id(), true);
cpu_set(smp_processor_id(), cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
@ -689,7 +684,7 @@ int migrate_platform_irqs(unsigned int cpu)
/*
* Now re-target the CPEI to a different processor
*/
new_cpei_cpu = any_online_cpu(cpu_online_map);
new_cpei_cpu = cpumask_any(cpu_online_mask);
mask = cpumask_of(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
data = irq_get_irq_data(ia64_cpe_irq);
@ -731,10 +726,10 @@ int __cpu_disable(void)
return -EBUSY;
}
cpu_clear(cpu, cpu_online_map);
set_cpu_online(cpu, false);
if (migrate_platform_irqs(cpu)) {
cpu_set(cpu, cpu_online_map);
set_cpu_online(cpu, true);
return -EBUSY;
}